code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import argparse
import json
import torch
import torch.optim as optim
import torch.nn.functional as F
import utils_sashi
from torch.nn.utils import clip_grad_norm
from clevr_dataset_sashi import ClevrDataset
from model_sashi import RN
from torchvision import transforms
from torch.utils.data import DataLoader
def train(data, model, optimizer, epoch, args):
model.train()
avg_loss = 0.0
n_batches = 0
for batch_idx, sample_batched in enumerate(data):
#img, qst, label = utils_sashi.load_tensor_data(sample_batched, args.cuda, args.invert_questions, volatile=True)
img, qst, label = sample_batched['image'], sample_batched['question'], sample_batched['answer']
label = (label - 1).squeeze()
if args.cuda:
img, qst, label = img.cuda(), qst.cuda(), label.cuda()
optimizer.zero_grad()
output = model(img, qst)
loss = F.nll_loss(output, label)
loss.backward()
# if args.clip_norm:
# clip_grad_norm(model.parameters(), args.clip_norm)
# avg_loss += loss.data[0]
avg_loss += loss.item()
n_batches += 1
if batch_idx % args.log_interval == 0:
avg_loss /= n_batches
processed = batch_idx * args.batch_size
n_samples = len(data) * args.batch_size
progress = float(processed) / n_samples
print('Train Epoch: {} [{}/{} ({:.0%})] Train loss: {}'.format(epoch, processed, n_samples, progress, avg_loss))
avg_loss = 0.0
n_batches = 0
def reload_loaders(clevr_dataset_train, clevr_dataset_test, train_bs, test_bs, state_description=False):
clevr_train_loader = DataLoader(clevr_dataset_train, batch_size=train_bs, shuffle=False,
num_workers=2, collate_fn=utils_sashi.collate_samples_from_pixels)
clevr_test_loader = DataLoader(clevr_dataset_test, batch_size=test_bs, shuffle=False,
num_workers=2, collate_fn=utils_sashi.collate_samples_from_pixels)
return clevr_train_loader, clevr_test_loader
def initialize_dataset(clevr_dir, dictionaries):
train_transforms = transforms.Compose([transforms.Resize((128, 128)),
transforms.Pad(8),
transforms.RandomCrop((128, 128)),
transforms.RandomRotation(2.8),
transforms.ToTensor()])
# train_transforms = transforms.Compose([transforms.Resize((128, 128)),
# transforms.ToTensor()])
test_transforms = transforms.Compose([transforms.Resize((128, 128)),
transforms.ToTensor()])
clevr_dataset_train = ClevrDataset(clevr_dir, True, dictionaries, train_transforms)
clevr_dataset_test = ClevrDataset(clevr_dir, False, dictionaries, test_transforms)
return clevr_dataset_train, clevr_dataset_test
def main(args):
with open(args.config) as config_file:
hyp = json.load(config_file)['hyperparams'][args.model]
print('Loaded hyperparameters from configuration {}, model: {}: {}'.format(args.config, args.model, hyp))
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
print('Building dictionaries from all words in the dataset')
dictionaries = utils_sashi.build_dictionaries(args.clevr_dir)
print('Word dictionary completed!')
print('Initialising CLEVR dataset...')
clevr_dataset_train, clevr_dataset_test = initialize_dataset(args.clevr_dir, dictionaries)
print('CLEVR dataset initialised')
args.qdict_size = len(dictionaries[0])
args.adict_size = len(dictionaries[1])
model = RN(args, hyp)
if args.cuda:
model.cuda()
bs = args.batch_size
lr = args.lr
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=1e-4)
for epoch in range(args.epochs):
clevr_train_loader, clevr_test_loader = reload_loaders(clevr_dataset_train, clevr_dataset_test,
bs, args.test_batch_size, hyp['state_description'])
train(clevr_train_loader, model, optimizer, epoch, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--test-batch-size', type=int, default=100)
parser.add_argument('--epochs', type=int, default=350)
parser.add_argument('--lr', type=float, default=0.000005)
#parser.add_argument('--clip-norm', type=int, default=50)
parser.add_argument('--no-cuda', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--log-interval', type=int, default=10)
#parser.add_argument('--resume', type=str)
parser.add_argument('--clevr-dir', type=str, default='/home/iki/sashi/robotkoop/CLEVR_v1.0/')
parser.add_argument('--model', type=str, default='original-fp')
parser.add_argument('--no-invert-questions', action='store_true', default=True)
#parser.add_argument('--test', action='store_true', default=False)
#parser.add_argument('--conv-transfer-learn', type=str)
#parser.add_argument('--lr-max', type=float, default=0.0005)
#parser.add_argument('--lr-gamma', type=float, default=2)
#parser.add_argument('--lr-step', type=int, default=-1)
#parser.add_argument('--bs-max', type=int, default=-1)
#parser.add_argument('--bs-gamma', type=float, default=1)
#parser.add_argument('--bs-step', type=int, default=20)
#parser.add_argument('--dropout', type=float, default=-1)
parser.add_argument('--config', type=str, default='config.json')
parser.add_argument('--question-injection', type=int, default=-1)
args = parser.parse_args()
args.invert_questions = not args.no_invert_questions
main(args) | main_sashi.py | import argparse
import json
import torch
import torch.optim as optim
import torch.nn.functional as F
import utils_sashi
from torch.nn.utils import clip_grad_norm
from clevr_dataset_sashi import ClevrDataset
from model_sashi import RN
from torchvision import transforms
from torch.utils.data import DataLoader
def train(data, model, optimizer, epoch, args):
model.train()
avg_loss = 0.0
n_batches = 0
for batch_idx, sample_batched in enumerate(data):
#img, qst, label = utils_sashi.load_tensor_data(sample_batched, args.cuda, args.invert_questions, volatile=True)
img, qst, label = sample_batched['image'], sample_batched['question'], sample_batched['answer']
label = (label - 1).squeeze()
if args.cuda:
img, qst, label = img.cuda(), qst.cuda(), label.cuda()
optimizer.zero_grad()
output = model(img, qst)
loss = F.nll_loss(output, label)
loss.backward()
# if args.clip_norm:
# clip_grad_norm(model.parameters(), args.clip_norm)
# avg_loss += loss.data[0]
avg_loss += loss.item()
n_batches += 1
if batch_idx % args.log_interval == 0:
avg_loss /= n_batches
processed = batch_idx * args.batch_size
n_samples = len(data) * args.batch_size
progress = float(processed) / n_samples
print('Train Epoch: {} [{}/{} ({:.0%})] Train loss: {}'.format(epoch, processed, n_samples, progress, avg_loss))
avg_loss = 0.0
n_batches = 0
def reload_loaders(clevr_dataset_train, clevr_dataset_test, train_bs, test_bs, state_description=False):
clevr_train_loader = DataLoader(clevr_dataset_train, batch_size=train_bs, shuffle=False,
num_workers=2, collate_fn=utils_sashi.collate_samples_from_pixels)
clevr_test_loader = DataLoader(clevr_dataset_test, batch_size=test_bs, shuffle=False,
num_workers=2, collate_fn=utils_sashi.collate_samples_from_pixels)
return clevr_train_loader, clevr_test_loader
def initialize_dataset(clevr_dir, dictionaries):
train_transforms = transforms.Compose([transforms.Resize((128, 128)),
transforms.Pad(8),
transforms.RandomCrop((128, 128)),
transforms.RandomRotation(2.8),
transforms.ToTensor()])
# train_transforms = transforms.Compose([transforms.Resize((128, 128)),
# transforms.ToTensor()])
test_transforms = transforms.Compose([transforms.Resize((128, 128)),
transforms.ToTensor()])
clevr_dataset_train = ClevrDataset(clevr_dir, True, dictionaries, train_transforms)
clevr_dataset_test = ClevrDataset(clevr_dir, False, dictionaries, test_transforms)
return clevr_dataset_train, clevr_dataset_test
def main(args):
with open(args.config) as config_file:
hyp = json.load(config_file)['hyperparams'][args.model]
print('Loaded hyperparameters from configuration {}, model: {}: {}'.format(args.config, args.model, hyp))
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
print('Building dictionaries from all words in the dataset')
dictionaries = utils_sashi.build_dictionaries(args.clevr_dir)
print('Word dictionary completed!')
print('Initialising CLEVR dataset...')
clevr_dataset_train, clevr_dataset_test = initialize_dataset(args.clevr_dir, dictionaries)
print('CLEVR dataset initialised')
args.qdict_size = len(dictionaries[0])
args.adict_size = len(dictionaries[1])
model = RN(args, hyp)
if args.cuda:
model.cuda()
bs = args.batch_size
lr = args.lr
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=1e-4)
for epoch in range(args.epochs):
clevr_train_loader, clevr_test_loader = reload_loaders(clevr_dataset_train, clevr_dataset_test,
bs, args.test_batch_size, hyp['state_description'])
train(clevr_train_loader, model, optimizer, epoch, args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--test-batch-size', type=int, default=100)
parser.add_argument('--epochs', type=int, default=350)
parser.add_argument('--lr', type=float, default=0.000005)
#parser.add_argument('--clip-norm', type=int, default=50)
parser.add_argument('--no-cuda', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--log-interval', type=int, default=10)
#parser.add_argument('--resume', type=str)
parser.add_argument('--clevr-dir', type=str, default='/home/iki/sashi/robotkoop/CLEVR_v1.0/')
parser.add_argument('--model', type=str, default='original-fp')
parser.add_argument('--no-invert-questions', action='store_true', default=True)
#parser.add_argument('--test', action='store_true', default=False)
#parser.add_argument('--conv-transfer-learn', type=str)
#parser.add_argument('--lr-max', type=float, default=0.0005)
#parser.add_argument('--lr-gamma', type=float, default=2)
#parser.add_argument('--lr-step', type=int, default=-1)
#parser.add_argument('--bs-max', type=int, default=-1)
#parser.add_argument('--bs-gamma', type=float, default=1)
#parser.add_argument('--bs-step', type=int, default=20)
#parser.add_argument('--dropout', type=float, default=-1)
parser.add_argument('--config', type=str, default='config.json')
parser.add_argument('--question-injection', type=int, default=-1)
args = parser.parse_args()
args.invert_questions = not args.no_invert_questions
main(args) | 0.603348 | 0.36139 |
import json
import logging
import os
from jnius import autoclass, cast, jnius
logging.basicConfig(level=logging.DEBUG)
AndroidString = autoclass("java.lang.String")
Context = autoclass("android.content.Context")
Environment = autoclass("android.os.Environment")
File = autoclass("java.io.File")
FileProvider = autoclass('android.support.v4.content.FileProvider')
Intent = autoclass("android.content.Intent")
NotificationBuilder = autoclass("android.app.Notification$Builder")
NotificationManager = autoclass("android.app.NotificationManager")
PackageManager = autoclass("android.content.pm.PackageManager")
PendingIntent = autoclass("android.app.PendingIntent")
PythonActivity = autoclass("org.kivy.android.PythonActivity")
Timezone = autoclass("java.util.TimeZone")
Uri = autoclass("android.net.Uri")
ANDROID_VERSION = autoclass("android.os.Build$VERSION")
SDK_INT = ANDROID_VERSION.SDK_INT
def is_service_context():
return "PYTHON_SERVICE_ARGUMENT" in os.environ
def get_service():
assert is_service_context(), "Cannot get service, as we are not in a service context."
PythonService = autoclass("org.kivy.android.PythonService")
return PythonService.mService
def get_timezone_name():
return Timezone.getDefault().getDisplayName()
def start_service(service_name, service_args):
service = autoclass("org.learningequality.Kolibri.Service{}".format(service_name.title()))
service.start(PythonActivity.mActivity, json.dumps(dict(service_args)))
def get_service_args():
assert is_service_context(), "Cannot get service args, as we are not in a service context."
return json.loads(os.environ.get("PYTHON_SERVICE_ARGUMENT") or "{}")
def get_version_name():
return get_activity().getPackageManager().getPackageInfo(PythonActivity.getPackageName(), 0).versionName
def get_activity():
if is_service_context():
return cast("android.app.Service", get_service())
else:
return PythonActivity.mActivity
def is_app_installed(app_id):
manager = get_activity().getPackageManager()
try:
manager.getPackageInfo(app_id, PackageManager.GET_ACTIVITIES)
except jnius.JavaException as e:
return False
return True
# TODO: check for storage availability, allow user to chose sd card or internal
def get_home_folder():
kolibri_home_file = get_activity().getExternalFilesDir(None)
return os.path.join(kolibri_home_file.toString(), "KOLIBRI_DATA")
def send_whatsapp_message(msg):
share_by_intent(msg=msg, app="com.whatsapp")
def share_by_intent(path=None, filename=None, msg=None, app=None, mimetype=None):
assert path or msg or filename, "Must provide either a path, a filename, or a msg to share"
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
if path:
uri = FileProvider.getUriForFile(
Context.getApplicationContext(),
"org.learningequality.Kolibri.fileprovider",
File(path)
)
parcelable = cast("android.os.Parcelable", uri)
sendIntent.putExtra(Intent.EXTRA_STREAM, parcelable)
sendIntent.setType(AndroidString(mimetype or "*/*"))
sendIntent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION)
if msg:
if not path:
sendIntent.setType(AndroidString(mimetype or "text/plain"))
sendIntent.putExtra(Intent.EXTRA_TEXT, AndroidString(msg))
if app:
sendIntent.setPackage(AndroidString(app))
sendIntent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
get_activity().startActivity(sendIntent)
def make_service_foreground(title, message):
service = get_service()
Drawable = autoclass("{}.R$drawable".format(service.getPackageName()))
app_context = service.getApplication().getApplicationContext()
if SDK_INT >= 26:
NotificationChannel = autoclass("android.app.NotificationChannel")
notification_service = cast(NotificationManager, get_activity().getSystemService(Context.NOTIFICATION_SERVICE))
channel_id = get_activity().getPackageName()
app_channel = NotificationChannel(channel_id, "Kolibri Background Server", NotificationManager.IMPORTANCE_DEFAULT)
notification_service.createNotificationChannel(app_channel)
notification_builder = NotificationBuilder(app_context, channel_id)
else:
notification_builder = NotificationBuilder(app_context)
notification_builder.setContentTitle(AndroidString(title))
notification_builder.setContentText(AndroidString(message))
notification_intent = Intent(app_context, PythonActivity)
notification_intent.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP | Intent.FLAG_ACTIVITY_SINGLE_TOP | Intent.FLAG_ACTIVITY_NEW_TASK)
notification_intent.setAction(Intent.ACTION_MAIN)
notification_intent.addCategory(Intent.CATEGORY_LAUNCHER)
intent = PendingIntent.getActivity(service, 0, notification_intent, 0)
notification_builder.setContentIntent(intent)
notification_builder.setSmallIcon(Drawable.icon)
notification_builder.setAutoCancel(True)
new_notification = notification_builder.getNotification()
service.startForeground(1, new_notification) | src/platforms/android/utils.py | import json
import logging
import os
from jnius import autoclass, cast, jnius
logging.basicConfig(level=logging.DEBUG)
AndroidString = autoclass("java.lang.String")
Context = autoclass("android.content.Context")
Environment = autoclass("android.os.Environment")
File = autoclass("java.io.File")
FileProvider = autoclass('android.support.v4.content.FileProvider')
Intent = autoclass("android.content.Intent")
NotificationBuilder = autoclass("android.app.Notification$Builder")
NotificationManager = autoclass("android.app.NotificationManager")
PackageManager = autoclass("android.content.pm.PackageManager")
PendingIntent = autoclass("android.app.PendingIntent")
PythonActivity = autoclass("org.kivy.android.PythonActivity")
Timezone = autoclass("java.util.TimeZone")
Uri = autoclass("android.net.Uri")
ANDROID_VERSION = autoclass("android.os.Build$VERSION")
SDK_INT = ANDROID_VERSION.SDK_INT
def is_service_context():
return "PYTHON_SERVICE_ARGUMENT" in os.environ
def get_service():
assert is_service_context(), "Cannot get service, as we are not in a service context."
PythonService = autoclass("org.kivy.android.PythonService")
return PythonService.mService
def get_timezone_name():
return Timezone.getDefault().getDisplayName()
def start_service(service_name, service_args):
service = autoclass("org.learningequality.Kolibri.Service{}".format(service_name.title()))
service.start(PythonActivity.mActivity, json.dumps(dict(service_args)))
def get_service_args():
assert is_service_context(), "Cannot get service args, as we are not in a service context."
return json.loads(os.environ.get("PYTHON_SERVICE_ARGUMENT") or "{}")
def get_version_name():
return get_activity().getPackageManager().getPackageInfo(PythonActivity.getPackageName(), 0).versionName
def get_activity():
if is_service_context():
return cast("android.app.Service", get_service())
else:
return PythonActivity.mActivity
def is_app_installed(app_id):
manager = get_activity().getPackageManager()
try:
manager.getPackageInfo(app_id, PackageManager.GET_ACTIVITIES)
except jnius.JavaException as e:
return False
return True
# TODO: check for storage availability, allow user to chose sd card or internal
def get_home_folder():
kolibri_home_file = get_activity().getExternalFilesDir(None)
return os.path.join(kolibri_home_file.toString(), "KOLIBRI_DATA")
def send_whatsapp_message(msg):
share_by_intent(msg=msg, app="com.whatsapp")
def share_by_intent(path=None, filename=None, msg=None, app=None, mimetype=None):
assert path or msg or filename, "Must provide either a path, a filename, or a msg to share"
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
if path:
uri = FileProvider.getUriForFile(
Context.getApplicationContext(),
"org.learningequality.Kolibri.fileprovider",
File(path)
)
parcelable = cast("android.os.Parcelable", uri)
sendIntent.putExtra(Intent.EXTRA_STREAM, parcelable)
sendIntent.setType(AndroidString(mimetype or "*/*"))
sendIntent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION)
if msg:
if not path:
sendIntent.setType(AndroidString(mimetype or "text/plain"))
sendIntent.putExtra(Intent.EXTRA_TEXT, AndroidString(msg))
if app:
sendIntent.setPackage(AndroidString(app))
sendIntent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
get_activity().startActivity(sendIntent)
def make_service_foreground(title, message):
service = get_service()
Drawable = autoclass("{}.R$drawable".format(service.getPackageName()))
app_context = service.getApplication().getApplicationContext()
if SDK_INT >= 26:
NotificationChannel = autoclass("android.app.NotificationChannel")
notification_service = cast(NotificationManager, get_activity().getSystemService(Context.NOTIFICATION_SERVICE))
channel_id = get_activity().getPackageName()
app_channel = NotificationChannel(channel_id, "Kolibri Background Server", NotificationManager.IMPORTANCE_DEFAULT)
notification_service.createNotificationChannel(app_channel)
notification_builder = NotificationBuilder(app_context, channel_id)
else:
notification_builder = NotificationBuilder(app_context)
notification_builder.setContentTitle(AndroidString(title))
notification_builder.setContentText(AndroidString(message))
notification_intent = Intent(app_context, PythonActivity)
notification_intent.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP | Intent.FLAG_ACTIVITY_SINGLE_TOP | Intent.FLAG_ACTIVITY_NEW_TASK)
notification_intent.setAction(Intent.ACTION_MAIN)
notification_intent.addCategory(Intent.CATEGORY_LAUNCHER)
intent = PendingIntent.getActivity(service, 0, notification_intent, 0)
notification_builder.setContentIntent(intent)
notification_builder.setSmallIcon(Drawable.icon)
notification_builder.setAutoCancel(True)
new_notification = notification_builder.getNotification()
service.startForeground(1, new_notification) | 0.334155 | 0.072407 |
from __future__ import division
import sys
import numpy
from numpy.core import *
import vtk
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import vtkbone
import traceback
from math import atan
import unittest
class TestMohrCoulombIsotropicMaterial (unittest.TestCase):
def test_simple (self):
material = vtkbone.vtkboneMohrCoulombIsotropicMaterial()
material.SetYoungsModulus(1234.5)
material.SetPoissonsRatio(0.246)
material.SetC(11111.1)
material.SetPhi(0.1)
self.assertEqual (material.GetYoungsModulus(), 1234.5)
self.assertEqual (material.GetPoissonsRatio(), 0.246)
self.assertEqual (material.GetC(), 11111.1)
self.assertEqual (material.GetPhi(), 0.1)
def test_set_yield_strengths (self):
material = vtkbone.vtkboneMohrCoulombIsotropicMaterial()
material.SetYoungsModulus(1234.5)
material.SetPoissonsRatio(0.246)
YT = 5000
YC = 7000
material.SetYieldStrengths(YT,YC)
c = sqrt(YT*YC)/2
phi = pi/2 - 2*atan(sqrt(YT/YC))
self.assertEqual (material.GetYoungsModulus(), 1234.5)
self.assertEqual (material.GetPoissonsRatio(), 0.246)
self.assertAlmostEqual (material.GetC(), c, delta=1E-6)
self.assertAlmostEqual (material.GetPhi(), phi, delta=1E-10)
def test_copy (self):
material = vtkbone.vtkboneMohrCoulombIsotropicMaterial()
material.SetYoungsModulus(1234.5)
material.SetPoissonsRatio(0.246)
material.SetC(11111.1)
material.SetPhi(0.1)
scaled_material = material.Copy()
self.assertEqual (scaled_material.GetYoungsModulus(), 1234.5)
self.assertEqual (scaled_material.GetPoissonsRatio(), 0.246)
self.assertEqual (scaled_material.GetC(), 11111.1)
self.assertEqual (scaled_material.GetPhi(), 0.1)
def test_scaled_copy (self):
material = vtkbone.vtkboneMohrCoulombIsotropicMaterial()
material.SetYoungsModulus(1234.5)
material.SetPoissonsRatio(0.246)
material.SetC(11111.1)
material.SetPhi(0.1)
scaled_material = material.ScaledCopy(0.5)
self.assertEqual (material.GetYoungsModulus(), 1234.5)
self.assertEqual (material.GetPoissonsRatio(), 0.246)
self.assertEqual (material.GetC(), 11111.1)
self.assertEqual (material.GetPhi(), 0.1)
self.assertEqual (scaled_material.GetYoungsModulus(), 0.5*1234.5)
self.assertEqual (scaled_material.GetPoissonsRatio(), 0.246)
self.assertEqual (scaled_material.GetC(), 0.5*11111.1)
self.assertEqual (scaled_material.GetPhi(), 0.1)
if __name__ == '__main__':
unittest.main() | Testing/Python/TestMohrCoulombIsotropicMaterial.py | from __future__ import division
import sys
import numpy
from numpy.core import *
import vtk
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import vtkbone
import traceback
from math import atan
import unittest
class TestMohrCoulombIsotropicMaterial (unittest.TestCase):
def test_simple (self):
material = vtkbone.vtkboneMohrCoulombIsotropicMaterial()
material.SetYoungsModulus(1234.5)
material.SetPoissonsRatio(0.246)
material.SetC(11111.1)
material.SetPhi(0.1)
self.assertEqual (material.GetYoungsModulus(), 1234.5)
self.assertEqual (material.GetPoissonsRatio(), 0.246)
self.assertEqual (material.GetC(), 11111.1)
self.assertEqual (material.GetPhi(), 0.1)
def test_set_yield_strengths (self):
material = vtkbone.vtkboneMohrCoulombIsotropicMaterial()
material.SetYoungsModulus(1234.5)
material.SetPoissonsRatio(0.246)
YT = 5000
YC = 7000
material.SetYieldStrengths(YT,YC)
c = sqrt(YT*YC)/2
phi = pi/2 - 2*atan(sqrt(YT/YC))
self.assertEqual (material.GetYoungsModulus(), 1234.5)
self.assertEqual (material.GetPoissonsRatio(), 0.246)
self.assertAlmostEqual (material.GetC(), c, delta=1E-6)
self.assertAlmostEqual (material.GetPhi(), phi, delta=1E-10)
def test_copy (self):
material = vtkbone.vtkboneMohrCoulombIsotropicMaterial()
material.SetYoungsModulus(1234.5)
material.SetPoissonsRatio(0.246)
material.SetC(11111.1)
material.SetPhi(0.1)
scaled_material = material.Copy()
self.assertEqual (scaled_material.GetYoungsModulus(), 1234.5)
self.assertEqual (scaled_material.GetPoissonsRatio(), 0.246)
self.assertEqual (scaled_material.GetC(), 11111.1)
self.assertEqual (scaled_material.GetPhi(), 0.1)
def test_scaled_copy (self):
material = vtkbone.vtkboneMohrCoulombIsotropicMaterial()
material.SetYoungsModulus(1234.5)
material.SetPoissonsRatio(0.246)
material.SetC(11111.1)
material.SetPhi(0.1)
scaled_material = material.ScaledCopy(0.5)
self.assertEqual (material.GetYoungsModulus(), 1234.5)
self.assertEqual (material.GetPoissonsRatio(), 0.246)
self.assertEqual (material.GetC(), 11111.1)
self.assertEqual (material.GetPhi(), 0.1)
self.assertEqual (scaled_material.GetYoungsModulus(), 0.5*1234.5)
self.assertEqual (scaled_material.GetPoissonsRatio(), 0.246)
self.assertEqual (scaled_material.GetC(), 0.5*11111.1)
self.assertEqual (scaled_material.GetPhi(), 0.1)
if __name__ == '__main__':
unittest.main() | 0.434221 | 0.260967 |
from __future__ import division, print_function
import os
import sys
import glob
import time
import datetime as dt
from astropy.io import ascii
from astropy.table import Table
from astropy.table import vstack
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import epoch2num
import numpy as np
from hrcsentinel import hrccore as hrc
def main():
script_location = os.getcwd()
homedirectory = os.path.expanduser("~")
msidcloud_directory = homedirectory + "/Dropbox/Work/HRCOps/MSIDCloud/"
#print("Making sure you're in {}".format(goes_data_directory))
os.chdir(msidcloud_directory)
msidlist = glob.glob("*lifetime.csv")
hrc.styleplots()
for msid in msidlist:
msidName, values, times = parse_msid(msid)
plotit(msidName, values, times)
# Return to the starting directory
os.chdir(script_location)
def parse_msid(msid):
# Make a string of just the MSID name. Use split to separate on underscores
msidName = msid.split("_")[0]
print("Plotting {}.".format(msidName))
data = ascii.read(msid, format="fast_csv")
# Simple check to see if this data is binned or full resolution
if 'midvals' in data.colnames:
values = data['midvals']
else:
values = data['vals']
times = hrc.convert_chandra_time(data['times'])
return msidName, values, times
def plotit(msidName, values, times):
# Make a simple plot, save it rastered.
# Check that the plots subdirectory exists. If not, create it.
plot_dir = "plots/"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
print("Plot subdirectory 'plots/' not found. Creating it.")
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot_date(times, values, markersize=1.0, label=msidName)
ax.legend()
fig.savefig(plot_dir + msidName + '.png', dpi=300)
# Always close your plots so you don't needlessly consume too much memory
plt.close()
if __name__ == '__main__':
start_time = time.time()
main()
runtime = round((time.time() - start_time), 3)
print("Finished in {} seconds".format(runtime)) | scripts/generate_msid_plots.py | from __future__ import division, print_function
import os
import sys
import glob
import time
import datetime as dt
from astropy.io import ascii
from astropy.table import Table
from astropy.table import vstack
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import epoch2num
import numpy as np
from hrcsentinel import hrccore as hrc
def main():
script_location = os.getcwd()
homedirectory = os.path.expanduser("~")
msidcloud_directory = homedirectory + "/Dropbox/Work/HRCOps/MSIDCloud/"
#print("Making sure you're in {}".format(goes_data_directory))
os.chdir(msidcloud_directory)
msidlist = glob.glob("*lifetime.csv")
hrc.styleplots()
for msid in msidlist:
msidName, values, times = parse_msid(msid)
plotit(msidName, values, times)
# Return to the starting directory
os.chdir(script_location)
def parse_msid(msid):
# Make a string of just the MSID name. Use split to separate on underscores
msidName = msid.split("_")[0]
print("Plotting {}.".format(msidName))
data = ascii.read(msid, format="fast_csv")
# Simple check to see if this data is binned or full resolution
if 'midvals' in data.colnames:
values = data['midvals']
else:
values = data['vals']
times = hrc.convert_chandra_time(data['times'])
return msidName, values, times
def plotit(msidName, values, times):
# Make a simple plot, save it rastered.
# Check that the plots subdirectory exists. If not, create it.
plot_dir = "plots/"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
print("Plot subdirectory 'plots/' not found. Creating it.")
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot_date(times, values, markersize=1.0, label=msidName)
ax.legend()
fig.savefig(plot_dir + msidName + '.png', dpi=300)
# Always close your plots so you don't needlessly consume too much memory
plt.close()
if __name__ == '__main__':
start_time = time.time()
main()
runtime = round((time.time() - start_time), 3)
print("Finished in {} seconds".format(runtime)) | 0.391173 | 0.217317 |
from abc import ABCMeta, abstractmethod
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import is_classifier
from sklearn.utils import check_X_y, check_scalar, check_random_state
from sklearn.ensemble import BaseEnsemble
from sklearn.utils.validation import check_is_fitted
from .base import BaseEstimator, RegressorMixin, ClassifierMixin
from .tree import DecisionTreeRegressor, DecisionTreeClassifier
def _generate_indexes(source_size, sample_size):
return np.random.choice(np.arange(source_size),
sample_size,
replace=True)
def _parallel_fit(tree, X, y, w, max_samples, max_features):
n_samples, n_features = X.shape
samples_idx = _generate_indexes(n_samples, max_samples)
if max_features < n_features:
features_idx = _generate_indexes(n_features, max_features)
else:
features_idx = np.arange(n_features)
tree.fit(X[samples_idx, :][:, features_idx], y[samples_idx], w[samples_idx])
return tree
def _parallel_predict(tree, X):
return tree.predict(X)
class BaseForest(BaseEnsemble, BaseEstimator, metaclass=ABCMeta):
def __init__(self,
base_estimator,
*,
n_estimators: int,
estimator_params: tuple,
samples_by_estimator: int,
features_by_estimator: int,
n_jobs: int,
verbose: int,
random_state: int):
super().__init__(base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.samples_by_estimator = samples_by_estimator
self.features_by_estimator = features_by_estimator
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y, w):
X, y, w = self._validate_data(X, y, w, force_all_finite='allow-nan')
n_samples, self.n_features_in_ = X.shape
if isinstance(self.samples_by_estimator, int):
n_samples_by_estimator = check_scalar(self.samples_by_estimator,
'features_by_estimator',
int,
min_val=1,
max_val=n_samples)
elif isinstance(self.samples_by_estimator, float):
n_samples_by_estimator = check_scalar(self.samples_by_estimator,
'features_by_estimator',
float,
min_val=0,
max_val=1,
include_boundaries='right')
n_samples_by_estimator = int(n_samples_by_estimator * n_samples)
elif self.samples_by_estimator is None:
n_samples_by_estimator = n_samples
else:
raise ValueError('Invalid value for samples_by_estimator')
if isinstance(self.features_by_estimator, int):
n_features_by_estimator = check_scalar(self.features_by_estimator,
'features_by_estimator',
int,
min_val=1,
max_val=self.n_features_in_)
elif isinstance(self.features_by_estimator, float):
n_features_by_estimator = check_scalar(self.features_by_estimator,
'features_by_estimator',
float,
min_val=0,
max_val=1,
include_boundaries='right')
n_features_by_estimator = int(n_features_by_estimator * self.n_features_in_)
elif self.features_by_estimator is None:
n_features_by_estimator = self.n_features_in_
else:
raise ValueError('Invalid value for features_by_estimator')
random_state = check_random_state(self.random_state)
self._validate_estimator()
self.estimators_ = list()
trees = [self._make_estimator(append=False, random_state=random_state)
for _ in range(self.n_estimators)]
trees = Parallel(n_jobs=self.n_jobs,
verbose=self.verbose,
prefer='threads',)(delayed(_parallel_fit)(tree,
X, y, w,
n_samples_by_estimator,
n_features_by_estimator,)
for tree in trees)
self.estimators_.extend(trees)
return self
def predict(self, X):
check_is_fitted(self)
X = self._validate_data(X, reset=False,
force_all_finite='allow-nan')
preds = Parallel(n_jobs=self.n_jobs,
verbose=self.verbose,
prefer='threads',)(delayed(_parallel_predict)(tree, X)
for tree in self.estimators_)
return sum(preds) / len(preds)
class RandomForestRegressor(BaseForest, RegressorMixin):
def __init__(self,
*,
n_estimators: int = 100,
criterion: str = 'delta_delta_p',
splitter: str = 'fast',
max_depth: int = None,
min_samples_split: int = 40,
min_samples_leaf: int = 20,
min_samples_leaf_treated: int = 10,
min_samples_leaf_control: int = 10,
max_features: int = None,
max_leaf_nodes: int = None,
samples_by_estimator: int = None,
features_by_estimator: int = None,
n_jobs: int = None,
verbose: int = None,
random_state: int = None):
super().__init__(base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=('criterion',
'splitter',
'max_depth',
'min_samples_split',
'min_samples_leaf',
'min_samples_leaf_treated',
'min_samples_leaf_control',
'max_features',
'max_leaf_nodes',
'random_state',),
samples_by_estimator=samples_by_estimator,
features_by_estimator=features_by_estimator,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state)
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_samples_leaf_treated = min_samples_leaf_treated
self.min_samples_leaf_control = min_samples_leaf_control
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestClassifier(BaseForest, ClassifierMixin):
def __init__(self,
*,
n_estimators: int = 100,
criterion: str = 'kl_divergence',
splitter: str = 'fast',
max_depth: int = None,
min_samples_split: int = 40,
min_samples_leaf: int = 20,
min_samples_leaf_treated: int = 10,
min_samples_leaf_control: int = 10,
max_features: int = None,
max_leaf_nodes: int = None,
samples_by_estimator: int = None,
features_by_estimator: int = None,
n_jobs: int = None,
verbose: int = None,
random_state: int = None):
super().__init__(base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=('criterion',
'splitter',
'max_depth',
'min_samples_split',
'min_samples_leaf',
'min_samples_leaf_treated',
'min_samples_leaf_control',
'max_features',
'max_leaf_nodes',
'random_state',),
samples_by_estimator=samples_by_estimator,
features_by_estimator=features_by_estimator,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state)
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_samples_leaf_treated = min_samples_leaf_treated
self.min_samples_leaf_control = min_samples_leaf_control
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes | uplift/ensemble.py | from abc import ABCMeta, abstractmethod
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import is_classifier
from sklearn.utils import check_X_y, check_scalar, check_random_state
from sklearn.ensemble import BaseEnsemble
from sklearn.utils.validation import check_is_fitted
from .base import BaseEstimator, RegressorMixin, ClassifierMixin
from .tree import DecisionTreeRegressor, DecisionTreeClassifier
def _generate_indexes(source_size, sample_size):
return np.random.choice(np.arange(source_size),
sample_size,
replace=True)
def _parallel_fit(tree, X, y, w, max_samples, max_features):
n_samples, n_features = X.shape
samples_idx = _generate_indexes(n_samples, max_samples)
if max_features < n_features:
features_idx = _generate_indexes(n_features, max_features)
else:
features_idx = np.arange(n_features)
tree.fit(X[samples_idx, :][:, features_idx], y[samples_idx], w[samples_idx])
return tree
def _parallel_predict(tree, X):
return tree.predict(X)
class BaseForest(BaseEnsemble, BaseEstimator, metaclass=ABCMeta):
def __init__(self,
base_estimator,
*,
n_estimators: int,
estimator_params: tuple,
samples_by_estimator: int,
features_by_estimator: int,
n_jobs: int,
verbose: int,
random_state: int):
super().__init__(base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.samples_by_estimator = samples_by_estimator
self.features_by_estimator = features_by_estimator
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y, w):
X, y, w = self._validate_data(X, y, w, force_all_finite='allow-nan')
n_samples, self.n_features_in_ = X.shape
if isinstance(self.samples_by_estimator, int):
n_samples_by_estimator = check_scalar(self.samples_by_estimator,
'features_by_estimator',
int,
min_val=1,
max_val=n_samples)
elif isinstance(self.samples_by_estimator, float):
n_samples_by_estimator = check_scalar(self.samples_by_estimator,
'features_by_estimator',
float,
min_val=0,
max_val=1,
include_boundaries='right')
n_samples_by_estimator = int(n_samples_by_estimator * n_samples)
elif self.samples_by_estimator is None:
n_samples_by_estimator = n_samples
else:
raise ValueError('Invalid value for samples_by_estimator')
if isinstance(self.features_by_estimator, int):
n_features_by_estimator = check_scalar(self.features_by_estimator,
'features_by_estimator',
int,
min_val=1,
max_val=self.n_features_in_)
elif isinstance(self.features_by_estimator, float):
n_features_by_estimator = check_scalar(self.features_by_estimator,
'features_by_estimator',
float,
min_val=0,
max_val=1,
include_boundaries='right')
n_features_by_estimator = int(n_features_by_estimator * self.n_features_in_)
elif self.features_by_estimator is None:
n_features_by_estimator = self.n_features_in_
else:
raise ValueError('Invalid value for features_by_estimator')
random_state = check_random_state(self.random_state)
self._validate_estimator()
self.estimators_ = list()
trees = [self._make_estimator(append=False, random_state=random_state)
for _ in range(self.n_estimators)]
trees = Parallel(n_jobs=self.n_jobs,
verbose=self.verbose,
prefer='threads',)(delayed(_parallel_fit)(tree,
X, y, w,
n_samples_by_estimator,
n_features_by_estimator,)
for tree in trees)
self.estimators_.extend(trees)
return self
def predict(self, X):
check_is_fitted(self)
X = self._validate_data(X, reset=False,
force_all_finite='allow-nan')
preds = Parallel(n_jobs=self.n_jobs,
verbose=self.verbose,
prefer='threads',)(delayed(_parallel_predict)(tree, X)
for tree in self.estimators_)
return sum(preds) / len(preds)
class RandomForestRegressor(BaseForest, RegressorMixin):
def __init__(self,
*,
n_estimators: int = 100,
criterion: str = 'delta_delta_p',
splitter: str = 'fast',
max_depth: int = None,
min_samples_split: int = 40,
min_samples_leaf: int = 20,
min_samples_leaf_treated: int = 10,
min_samples_leaf_control: int = 10,
max_features: int = None,
max_leaf_nodes: int = None,
samples_by_estimator: int = None,
features_by_estimator: int = None,
n_jobs: int = None,
verbose: int = None,
random_state: int = None):
super().__init__(base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=('criterion',
'splitter',
'max_depth',
'min_samples_split',
'min_samples_leaf',
'min_samples_leaf_treated',
'min_samples_leaf_control',
'max_features',
'max_leaf_nodes',
'random_state',),
samples_by_estimator=samples_by_estimator,
features_by_estimator=features_by_estimator,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state)
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_samples_leaf_treated = min_samples_leaf_treated
self.min_samples_leaf_control = min_samples_leaf_control
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestClassifier(BaseForest, ClassifierMixin):
def __init__(self,
*,
n_estimators: int = 100,
criterion: str = 'kl_divergence',
splitter: str = 'fast',
max_depth: int = None,
min_samples_split: int = 40,
min_samples_leaf: int = 20,
min_samples_leaf_treated: int = 10,
min_samples_leaf_control: int = 10,
max_features: int = None,
max_leaf_nodes: int = None,
samples_by_estimator: int = None,
features_by_estimator: int = None,
n_jobs: int = None,
verbose: int = None,
random_state: int = None):
super().__init__(base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=('criterion',
'splitter',
'max_depth',
'min_samples_split',
'min_samples_leaf',
'min_samples_leaf_treated',
'min_samples_leaf_control',
'max_features',
'max_leaf_nodes',
'random_state',),
samples_by_estimator=samples_by_estimator,
features_by_estimator=features_by_estimator,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state)
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_samples_leaf_treated = min_samples_leaf_treated
self.min_samples_leaf_control = min_samples_leaf_control
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes | 0.797754 | 0.21844 |
import unittest
from ..test import TestCase, Spy
from .request import Request
from .request_mock import RequestMock
class TestSubscription(TestCase):
def test_get_url_with_query_string(self):
r = Request('GET', 'http://whatever', {'foo': 'bar', 'baz': 'qux'})
self.assertEqual('http://whatever?foo=bar&baz=qux', r.get_url_with_query_string())
def test_get_encoded_body(self):
r1 = Request('POST', 'http://whatever', None, {'foo': 'bar', 'baz': 'qux'},
{'content-type': 'application/x-www-form-urlencoded'})
r2 = Request('POST', 'http://whatever', None, {'foo': 'bar', 'baz': 'qux'},
{'content-type': 'application/json'})
r3 = Request('POST', 'http://whatever', None, {'foo': 'bar', 'baz': 'qux'})
r4 = Request('POST', 'http://whatever', None, 'foo-encoded-text', {'content-type': 'foo'})
self.assertEquals('foo=bar&baz=qux', r1.get_encoded_body())
self.assertEquals('{"foo": "bar", "baz": "qux"}', r2.get_encoded_body())
self.assertEquals('{"foo": "bar", "baz": "qux"}', r3.get_encoded_body()) # JSON by default
self.assertEquals('foo-encoded-text', r4.get_encoded_body())
def test_is_methods(self):
r1 = Request('GET', 'http://whatever')
r2 = Request('POST', 'http://whatever')
r3 = Request('PUT', 'http://whatever')
r4 = Request('DELETE', 'http://whatever')
self.assertTrue(r1.is_get() and not r1.is_post() and not r1.is_put() and not r1.is_delete())
self.assertTrue(not r2.is_get() and r2.is_post() and not r2.is_put() and not r2.is_delete())
self.assertTrue(not r3.is_get() and not r3.is_post() and r3.is_put() and not r3.is_delete())
self.assertTrue(not r4.is_get() and not r4.is_post() and not r4.is_put() and r4.is_delete())
def test_get_set_body(self):
r = Request('GET', 'http://whatever')
self.assertEquals('foo', r.set_body('foo').get_body())
def test_get_set_query_params(self):
r = Request('GET', 'http://whatever')
self.assertEquals({'foo': 'bar'}, r.set_query_params({'foo': 'bar'}).get_query_params())
def test_get_set_method(self):
r = Request('GET', 'http://whatever')
self.assertEquals('POST', r.set_method('POST').get_method())
if __name__ == '__main__':
unittest.main() | rcsdk/http/request_test.py |
import unittest
from ..test import TestCase, Spy
from .request import Request
from .request_mock import RequestMock
class TestSubscription(TestCase):
def test_get_url_with_query_string(self):
r = Request('GET', 'http://whatever', {'foo': 'bar', 'baz': 'qux'})
self.assertEqual('http://whatever?foo=bar&baz=qux', r.get_url_with_query_string())
def test_get_encoded_body(self):
r1 = Request('POST', 'http://whatever', None, {'foo': 'bar', 'baz': 'qux'},
{'content-type': 'application/x-www-form-urlencoded'})
r2 = Request('POST', 'http://whatever', None, {'foo': 'bar', 'baz': 'qux'},
{'content-type': 'application/json'})
r3 = Request('POST', 'http://whatever', None, {'foo': 'bar', 'baz': 'qux'})
r4 = Request('POST', 'http://whatever', None, 'foo-encoded-text', {'content-type': 'foo'})
self.assertEquals('foo=bar&baz=qux', r1.get_encoded_body())
self.assertEquals('{"foo": "bar", "baz": "qux"}', r2.get_encoded_body())
self.assertEquals('{"foo": "bar", "baz": "qux"}', r3.get_encoded_body()) # JSON by default
self.assertEquals('foo-encoded-text', r4.get_encoded_body())
def test_is_methods(self):
r1 = Request('GET', 'http://whatever')
r2 = Request('POST', 'http://whatever')
r3 = Request('PUT', 'http://whatever')
r4 = Request('DELETE', 'http://whatever')
self.assertTrue(r1.is_get() and not r1.is_post() and not r1.is_put() and not r1.is_delete())
self.assertTrue(not r2.is_get() and r2.is_post() and not r2.is_put() and not r2.is_delete())
self.assertTrue(not r3.is_get() and not r3.is_post() and r3.is_put() and not r3.is_delete())
self.assertTrue(not r4.is_get() and not r4.is_post() and not r4.is_put() and r4.is_delete())
def test_get_set_body(self):
r = Request('GET', 'http://whatever')
self.assertEquals('foo', r.set_body('foo').get_body())
def test_get_set_query_params(self):
r = Request('GET', 'http://whatever')
self.assertEquals({'foo': 'bar'}, r.set_query_params({'foo': 'bar'}).get_query_params())
def test_get_set_method(self):
r = Request('GET', 'http://whatever')
self.assertEquals('POST', r.set_method('POST').get_method())
if __name__ == '__main__':
unittest.main() | 0.598664 | 0.364156 |
from PyQt5.QtCore import QObject
import socket
import time
import struct
import base64
import subprocess
import logging
from Module.Packages import ClassBroadcastFlag
class ClassBroadcast(QObject):
parent = None
current_ip = None
socket_ip = None
socket_port = None
socket_buffer_size = None
socket_obj = None
def __init__(self, parent, current_ip, socket_ip, socket_port, socket_buffer_size):
super(ClassBroadcast, self).__init__(parent)
self.parent = parent
self.current_ip = current_ip
self.socket_ip = socket_ip
self.socket_port = socket_port
self.socket_buffer_size = socket_buffer_size
self.__init_socket_obj()
def __init_socket_obj(self):
self.socket_obj = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket_obj.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
self.socket_obj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket_obj.bind(('', self.socket_port))
self.socket_obj.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(self.socket_ip) + socket.inet_aton(self.current_ip)
)
@staticmethod
def execute_remote_command(command):
subprocess.call(command, shell=True)
def batch_send_decode(self, unpacked_data):
integer_length = struct.calcsize('!i')
targets_length = struct.unpack('!i', unpacked_data[:integer_length])[0]
targets = unpacked_data[integer_length:integer_length + targets_length].split(b'\x00')
targets = [socket.inet_ntoa(item) for item in targets]
if self.current_ip in targets:
data = unpacked_data[integer_length + targets_length:]
return data
return None
def start(self):
payload_size = self.socket_buffer_size - struct.calcsize('!2i')
while True:
try:
socket_data, socket_addr = self.socket_obj.recvfrom(self.socket_buffer_size)
unpacked_flag, unpacked_length, unpacked_data = struct.unpack(f'!2i{payload_size}s', socket_data)
unpacked_data = unpacked_data[:unpacked_length]
if unpacked_flag in (
ClassBroadcastFlag.Message,
ClassBroadcastFlag.Command,
ClassBroadcastFlag.RemoteSpyStart,
ClassBroadcastFlag.RemoteQuit,
ClassBroadcastFlag.ClientFileRecieved
):
data = self.batch_send_decode(unpacked_data)
if data is None:
continue
if unpacked_flag == ClassBroadcastFlag.Message:
message = base64.b64decode(data).decode('utf-8')
self.parent.message_recieved.emit(str(message))
elif unpacked_flag == ClassBroadcastFlag.Command:
message = base64.b64decode(data).decode('utf-8')
self.execute_remote_command(str(message))
elif unpacked_flag == ClassBroadcastFlag.RemoteSpyStart:
self.parent.start_remote_spy.emit()
elif unpacked_flag == ClassBroadcastFlag.RemoteQuit:
self.parent.quit_self.emit()
return
elif unpacked_flag == ClassBroadcastFlag.ClientFileRecieved:
self.parent.client_file_recieved.emit()
elif unpacked_flag == ClassBroadcastFlag.StartScreenBroadcast:
self.parent.toggle_screen_broadcats.emit(True)
elif unpacked_flag == ClassBroadcastFlag.StopScreenBroadcast:
self.parent.toggle_screen_broadcats.emit(False)
elif unpacked_flag == ClassBroadcastFlag.ConsoleQuit:
self.parent.reset_all.emit()
return
except Exception as e:
logging.warning(f'Failed to decode socket data: {e}') | Client/Module/ClassBroadcast.py | from PyQt5.QtCore import QObject
import socket
import time
import struct
import base64
import subprocess
import logging
from Module.Packages import ClassBroadcastFlag
class ClassBroadcast(QObject):
parent = None
current_ip = None
socket_ip = None
socket_port = None
socket_buffer_size = None
socket_obj = None
def __init__(self, parent, current_ip, socket_ip, socket_port, socket_buffer_size):
super(ClassBroadcast, self).__init__(parent)
self.parent = parent
self.current_ip = current_ip
self.socket_ip = socket_ip
self.socket_port = socket_port
self.socket_buffer_size = socket_buffer_size
self.__init_socket_obj()
def __init_socket_obj(self):
self.socket_obj = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket_obj.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
self.socket_obj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket_obj.bind(('', self.socket_port))
self.socket_obj.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(self.socket_ip) + socket.inet_aton(self.current_ip)
)
@staticmethod
def execute_remote_command(command):
subprocess.call(command, shell=True)
def batch_send_decode(self, unpacked_data):
integer_length = struct.calcsize('!i')
targets_length = struct.unpack('!i', unpacked_data[:integer_length])[0]
targets = unpacked_data[integer_length:integer_length + targets_length].split(b'\x00')
targets = [socket.inet_ntoa(item) for item in targets]
if self.current_ip in targets:
data = unpacked_data[integer_length + targets_length:]
return data
return None
def start(self):
payload_size = self.socket_buffer_size - struct.calcsize('!2i')
while True:
try:
socket_data, socket_addr = self.socket_obj.recvfrom(self.socket_buffer_size)
unpacked_flag, unpacked_length, unpacked_data = struct.unpack(f'!2i{payload_size}s', socket_data)
unpacked_data = unpacked_data[:unpacked_length]
if unpacked_flag in (
ClassBroadcastFlag.Message,
ClassBroadcastFlag.Command,
ClassBroadcastFlag.RemoteSpyStart,
ClassBroadcastFlag.RemoteQuit,
ClassBroadcastFlag.ClientFileRecieved
):
data = self.batch_send_decode(unpacked_data)
if data is None:
continue
if unpacked_flag == ClassBroadcastFlag.Message:
message = base64.b64decode(data).decode('utf-8')
self.parent.message_recieved.emit(str(message))
elif unpacked_flag == ClassBroadcastFlag.Command:
message = base64.b64decode(data).decode('utf-8')
self.execute_remote_command(str(message))
elif unpacked_flag == ClassBroadcastFlag.RemoteSpyStart:
self.parent.start_remote_spy.emit()
elif unpacked_flag == ClassBroadcastFlag.RemoteQuit:
self.parent.quit_self.emit()
return
elif unpacked_flag == ClassBroadcastFlag.ClientFileRecieved:
self.parent.client_file_recieved.emit()
elif unpacked_flag == ClassBroadcastFlag.StartScreenBroadcast:
self.parent.toggle_screen_broadcats.emit(True)
elif unpacked_flag == ClassBroadcastFlag.StopScreenBroadcast:
self.parent.toggle_screen_broadcats.emit(False)
elif unpacked_flag == ClassBroadcastFlag.ConsoleQuit:
self.parent.reset_all.emit()
return
except Exception as e:
logging.warning(f'Failed to decode socket data: {e}') | 0.484136 | 0.071106 |
import os
import pytest
import numpy
import automol.util.mat
import automol.util.vec
from ioformat import read_text_file
PATH = os.path.dirname(os.path.realpath(__file__))
MAT = (
(-2.3779010433, 5.2665623735, 0.0368733734),
(-1.7871641824, 4.2084900234, -0.6608528628),
(-1.9843935085, 5.3116891951, 1.0755446343),
(-2.1223381321, 6.2190137934, -0.4713637932),
(-2.0307815005, 3.3844774267, -0.1645507122),
(-3.4841693472, 5.1584160162, 0.0370840829)
)
def test__vec():
""" test automol.automol.util.cart.vec
"""
# Test angle calculators
ref_perp = (-0.90180687, -0.40614043, -0.14762896)
perp = automol.util.vec.unit_perpendicular(
MAT[0], MAT[1], allow_parallel=False)
assert numpy.allclose(perp, ref_perp)
ref_perp = (0.000, 0.000, 0.000)
perp = automol.util.vec.unit_perpendicular(
MAT[0], MAT[0], allow_parallel=True)
assert numpy.allclose(perp, ref_perp)
with pytest.raises(ValueError):
automol.util.vec.unit_perpendicular(
MAT[0], MAT[0], allow_parallel=False)
ref_angle = 0.28211376550390677
angle = automol.util.vec.projected_central_angle(
MAT[0], MAT[1], MAT[2])
assert numpy.isclose(angle, ref_angle)
# Test the string writer
ref_vec_str = read_text_file(['data'], 'vec.dat', path=PATH)
vec_str = automol.util.vec.string((MAT[0] + MAT[1]), num_per_row=3)
assert vec_str == ref_vec_str
def test__mat():
""" test automol.automol.util.cart.mat
"""
# Various matrix builder functions
rand_rot_mat = automol.util.mat.random_rotation_matrix()
assert len(rand_rot_mat) == 3
assert all(len(row) == 3 and all(isinstance(val, float) for val in row)
for row in rand_rot_mat)
ref_rot_mat = (
(1.0, 0.0, 0.0),
(0.0, -0.9922575676015892, 0.12419709955299955),
(0.0, -0.12419709955299955, -0.9922575676015892)
)
rot_mat = automol.util.mat.rotation_matrix(
(1.0, 0.0, 0.0), 30.0/numpy.pi)
assert numpy.allclose(rot_mat, ref_rot_mat)
ref_axis_align_mat = (
(-0.41149979255451374, 0.9113875155894119, 0.006380999557419693),
(0.0, 0.0, 0.0),
(-0.9018068740366958, -0.4061404341551566, -0.14762895950464514)
)
axis_align_mat = automol.util.mat.axis_alignment_matrix(
MAT[0], MAT[1])
assert numpy.allclose(axis_align_mat, ref_axis_align_mat)
ref_superimp_mat = (
(0.9931073252359884, 0.07107456632823939, 0.0932000353212035),
(-0.05722539832771756, 0.9879718232559653, -0.1436555959178673),
(-0.10228926800429702, 0.13733201547176194, 0.9852293251713573)
)
xyz1 = (MAT[0], MAT[1])
xyz2 = (MAT[2], MAT[3])
superimp_mat = automol.util.mat.superimposition_matrix(
xyz1, xyz2, keep_origin=True)
assert numpy.allclose(superimp_mat, ref_superimp_mat)
# Test the string writer
ref_mat_str = read_text_file(['data'], 'mat.dat', path=PATH)
mat_str = automol.util.mat.string(MAT)
assert mat_str == ref_mat_str
def test__highd_mat():
""" test automol.util.highd_mat.from_string
test automol.util.highd_mat.string
test automol.util.highd_mat.string_submat_3d
test automol.util.highd_mat.string_submat_4d
"""
def _chk_mat_strs(str1, str2):
""" Check if two matrix strings are similar
"""
match = True
for line1, line2 in zip(str1.splitlines(), str2.splitlines()):
vals1 = tuple(float(val) for val in line1.strip().split())
vals2 = tuple(float(val) for val in line2.strip().split())
if not numpy.allclose(vals1, vals2):
match = False
return match
ref_3d_str = read_text_file(['data'], 'ch4_h.cubic', path=PATH)
ref_4d_str = read_text_file(['data'], 'ch4_h.quartic', path=PATH)
# Handle reprentations with full matrices and strings printed by indices
test_3d_mat = automol.util.highd_mat.from_string(ref_3d_str)
test_4d_mat = automol.util.highd_mat.from_string(ref_4d_str)
test_3d_str = automol.util.highd_mat.string(test_3d_mat)
test_4d_str = automol.util.highd_mat.string(test_4d_mat)
assert _chk_mat_strs(test_3d_str, ref_3d_str)
assert _chk_mat_strs(test_4d_str, ref_4d_str)
# Handle string representations printed by submatrices (finish)
test_3d_submat_str = automol.util.highd_mat.string_submat_3d(test_3d_mat)
assert (test_3d_submat_str ==
read_text_file(['data'], 'ch4_h.cubic_submat', path=PATH))
test_4d_submat_str = automol.util.highd_mat.string_submat_4d(test_4d_mat)
assert (test_4d_submat_str ==
read_text_file(['data'], 'ch4_h.quartic_submat', path=PATH)) | automol/tests/test_cart.py | import os
import pytest
import numpy
import automol.util.mat
import automol.util.vec
from ioformat import read_text_file
PATH = os.path.dirname(os.path.realpath(__file__))
MAT = (
(-2.3779010433, 5.2665623735, 0.0368733734),
(-1.7871641824, 4.2084900234, -0.6608528628),
(-1.9843935085, 5.3116891951, 1.0755446343),
(-2.1223381321, 6.2190137934, -0.4713637932),
(-2.0307815005, 3.3844774267, -0.1645507122),
(-3.4841693472, 5.1584160162, 0.0370840829)
)
def test__vec():
""" test automol.automol.util.cart.vec
"""
# Test angle calculators
ref_perp = (-0.90180687, -0.40614043, -0.14762896)
perp = automol.util.vec.unit_perpendicular(
MAT[0], MAT[1], allow_parallel=False)
assert numpy.allclose(perp, ref_perp)
ref_perp = (0.000, 0.000, 0.000)
perp = automol.util.vec.unit_perpendicular(
MAT[0], MAT[0], allow_parallel=True)
assert numpy.allclose(perp, ref_perp)
with pytest.raises(ValueError):
automol.util.vec.unit_perpendicular(
MAT[0], MAT[0], allow_parallel=False)
ref_angle = 0.28211376550390677
angle = automol.util.vec.projected_central_angle(
MAT[0], MAT[1], MAT[2])
assert numpy.isclose(angle, ref_angle)
# Test the string writer
ref_vec_str = read_text_file(['data'], 'vec.dat', path=PATH)
vec_str = automol.util.vec.string((MAT[0] + MAT[1]), num_per_row=3)
assert vec_str == ref_vec_str
def test__mat():
""" test automol.automol.util.cart.mat
"""
# Various matrix builder functions
rand_rot_mat = automol.util.mat.random_rotation_matrix()
assert len(rand_rot_mat) == 3
assert all(len(row) == 3 and all(isinstance(val, float) for val in row)
for row in rand_rot_mat)
ref_rot_mat = (
(1.0, 0.0, 0.0),
(0.0, -0.9922575676015892, 0.12419709955299955),
(0.0, -0.12419709955299955, -0.9922575676015892)
)
rot_mat = automol.util.mat.rotation_matrix(
(1.0, 0.0, 0.0), 30.0/numpy.pi)
assert numpy.allclose(rot_mat, ref_rot_mat)
ref_axis_align_mat = (
(-0.41149979255451374, 0.9113875155894119, 0.006380999557419693),
(0.0, 0.0, 0.0),
(-0.9018068740366958, -0.4061404341551566, -0.14762895950464514)
)
axis_align_mat = automol.util.mat.axis_alignment_matrix(
MAT[0], MAT[1])
assert numpy.allclose(axis_align_mat, ref_axis_align_mat)
ref_superimp_mat = (
(0.9931073252359884, 0.07107456632823939, 0.0932000353212035),
(-0.05722539832771756, 0.9879718232559653, -0.1436555959178673),
(-0.10228926800429702, 0.13733201547176194, 0.9852293251713573)
)
xyz1 = (MAT[0], MAT[1])
xyz2 = (MAT[2], MAT[3])
superimp_mat = automol.util.mat.superimposition_matrix(
xyz1, xyz2, keep_origin=True)
assert numpy.allclose(superimp_mat, ref_superimp_mat)
# Test the string writer
ref_mat_str = read_text_file(['data'], 'mat.dat', path=PATH)
mat_str = automol.util.mat.string(MAT)
assert mat_str == ref_mat_str
def test__highd_mat():
""" test automol.util.highd_mat.from_string
test automol.util.highd_mat.string
test automol.util.highd_mat.string_submat_3d
test automol.util.highd_mat.string_submat_4d
"""
def _chk_mat_strs(str1, str2):
""" Check if two matrix strings are similar
"""
match = True
for line1, line2 in zip(str1.splitlines(), str2.splitlines()):
vals1 = tuple(float(val) for val in line1.strip().split())
vals2 = tuple(float(val) for val in line2.strip().split())
if not numpy.allclose(vals1, vals2):
match = False
return match
ref_3d_str = read_text_file(['data'], 'ch4_h.cubic', path=PATH)
ref_4d_str = read_text_file(['data'], 'ch4_h.quartic', path=PATH)
# Handle reprentations with full matrices and strings printed by indices
test_3d_mat = automol.util.highd_mat.from_string(ref_3d_str)
test_4d_mat = automol.util.highd_mat.from_string(ref_4d_str)
test_3d_str = automol.util.highd_mat.string(test_3d_mat)
test_4d_str = automol.util.highd_mat.string(test_4d_mat)
assert _chk_mat_strs(test_3d_str, ref_3d_str)
assert _chk_mat_strs(test_4d_str, ref_4d_str)
# Handle string representations printed by submatrices (finish)
test_3d_submat_str = automol.util.highd_mat.string_submat_3d(test_3d_mat)
assert (test_3d_submat_str ==
read_text_file(['data'], 'ch4_h.cubic_submat', path=PATH))
test_4d_submat_str = automol.util.highd_mat.string_submat_4d(test_4d_mat)
assert (test_4d_submat_str ==
read_text_file(['data'], 'ch4_h.quartic_submat', path=PATH)) | 0.630571 | 0.615175 |
import os
from db_eplusout_reader.db_esofile import DBEsoFile, DBEsoFileCollection
from db_eplusout_reader.sql_reader import get_results_from_sql
def get_results(
file_or_path, variables, frequency, alike=False, start_date=None, end_date=None
):
r"""
Extract results from given file.
Use a single or list of 'Variable' named tuples to specify requested outputs.
v = Variable(
key="PEOPLE BLOCK1:ZONE2",
type="Zone Thermal Comfort Fanger Model",
units=None
)
When one (or multiple) 'Variable' fields would be set as None, filtering
for specific part of variable will not be applied.
Variable(None, None, None) returns all outputs
Variable(None, None, "J") returns all 'energy' outputs.
Frequency defines output interval - it can be one of "timestep", "hourly", "daily",
"monthly" "annual" and "runperiod". Constants module includes helpers TS, H, D, M, A, RP.
Function needs to be called multiple times to get results from various intervals.
Alike optional argument defines whether variable search should filter results by
full or just a substring (search is always case insensitive).
Start and end date optional arguments can slice resulting array based on timestamp data.
Examples
--------
from datetime import datetime
from db_esofile_reader import Variable, get_results
from db_esofile_reader.constants import D
variables = [
Variable("", "Electricity:Facility", "J"), # standard meter
Variable("Cumulative", "Electricity:Facility", "J"), # cumulative meter
Variable(None, None, None), # get all outputs
Variable("PEOPLE BLOCK1:ZONE2", "Zone Thermal Comfort Fanger Model PMV", ""),
Variable("PEOPLE BLOCK", "Zone Thermal Comfort Fanger Model PMV", "")
]
# get results for variables fully matching output variables
# the last variable above won't be found as variable 'key' does not fully match
# variables will be extracted from 'daily' interval results
# start and end date slicing is not applied
results = get_results(
r"C:\some\path\eplusout.sql",
variables=variables,
frequency=D,
alike=False
)
# 'alike' argument is set to True so even substring match is enough to match variable
# the last variable will be found ("PEOPLE BLOCK" matches "PEOPLE BLOCK1:ZONE2")
# start and end dates are specified so only 'May' data will be included
results = get_results(
r"C:\some\path\eplusout.sql",
variables=variables,
frequency=D,
alike=True,
start_date=datetime(2002, 5, 1, 0),
end_date=datetime(2002, 5, 31, 23, 59)
)
Parameters
----------
file_or_path : DBEsoFile, DBEsoFileCollection or PathLike
A processed EnergyPlus .eso file, path to unprocessed .eso file
or path to unprocessed .sql file.
variables : Variable or List of Variable
Requested output variables.
frequency : str
An output interval, this can be one of {TS, H, D, M, A, RP} constants.
alike : default False, bool
Specify if full string or only part of variable attribute
needs to match, filtering is case insensitive in both cases.
start_date : default None, datetime.datetime
Lower datetime interval boundary, inclusive.
end_date : default None, datetime.datetime
Upper datetime interval boundary, inclusive.
Returns
-------
ResultsDictionary : Dict of {Variable, list of float}
A dictionary like class with some properties to easily extract output values.
"""
if isinstance(file_or_path, str):
_, ext = os.path.splitext(file_or_path)
if ext == ".sql":
results = get_results_from_sql(
file_or_path,
variables,
frequency,
alike=alike,
start_date=start_date,
end_date=end_date,
)
elif ext == ".eso":
raise NotImplementedError("Sorry, this has not been implemented yet.")
else:
raise TypeError("Unsupported file type '{}' provided!".format(ext))
else:
if isinstance(file_or_path, (DBEsoFile, DBEsoFileCollection)):
raise NotImplementedError("Sorry, this has not been implemented yet.")
else:
raise TypeError(
"Unsupported class '{}' provided!".format(type(file_or_path).__name__)
)
return results | db_eplusout_reader/get_results.py | import os
from db_eplusout_reader.db_esofile import DBEsoFile, DBEsoFileCollection
from db_eplusout_reader.sql_reader import get_results_from_sql
def get_results(
file_or_path, variables, frequency, alike=False, start_date=None, end_date=None
):
r"""
Extract results from given file.
Use a single or list of 'Variable' named tuples to specify requested outputs.
v = Variable(
key="PEOPLE BLOCK1:ZONE2",
type="Zone Thermal Comfort Fanger Model",
units=None
)
When one (or multiple) 'Variable' fields would be set as None, filtering
for specific part of variable will not be applied.
Variable(None, None, None) returns all outputs
Variable(None, None, "J") returns all 'energy' outputs.
Frequency defines output interval - it can be one of "timestep", "hourly", "daily",
"monthly" "annual" and "runperiod". Constants module includes helpers TS, H, D, M, A, RP.
Function needs to be called multiple times to get results from various intervals.
Alike optional argument defines whether variable search should filter results by
full or just a substring (search is always case insensitive).
Start and end date optional arguments can slice resulting array based on timestamp data.
Examples
--------
from datetime import datetime
from db_esofile_reader import Variable, get_results
from db_esofile_reader.constants import D
variables = [
Variable("", "Electricity:Facility", "J"), # standard meter
Variable("Cumulative", "Electricity:Facility", "J"), # cumulative meter
Variable(None, None, None), # get all outputs
Variable("PEOPLE BLOCK1:ZONE2", "Zone Thermal Comfort Fanger Model PMV", ""),
Variable("PEOPLE BLOCK", "Zone Thermal Comfort Fanger Model PMV", "")
]
# get results for variables fully matching output variables
# the last variable above won't be found as variable 'key' does not fully match
# variables will be extracted from 'daily' interval results
# start and end date slicing is not applied
results = get_results(
r"C:\some\path\eplusout.sql",
variables=variables,
frequency=D,
alike=False
)
# 'alike' argument is set to True so even substring match is enough to match variable
# the last variable will be found ("PEOPLE BLOCK" matches "PEOPLE BLOCK1:ZONE2")
# start and end dates are specified so only 'May' data will be included
results = get_results(
r"C:\some\path\eplusout.sql",
variables=variables,
frequency=D,
alike=True,
start_date=datetime(2002, 5, 1, 0),
end_date=datetime(2002, 5, 31, 23, 59)
)
Parameters
----------
file_or_path : DBEsoFile, DBEsoFileCollection or PathLike
A processed EnergyPlus .eso file, path to unprocessed .eso file
or path to unprocessed .sql file.
variables : Variable or List of Variable
Requested output variables.
frequency : str
An output interval, this can be one of {TS, H, D, M, A, RP} constants.
alike : default False, bool
Specify if full string or only part of variable attribute
needs to match, filtering is case insensitive in both cases.
start_date : default None, datetime.datetime
Lower datetime interval boundary, inclusive.
end_date : default None, datetime.datetime
Upper datetime interval boundary, inclusive.
Returns
-------
ResultsDictionary : Dict of {Variable, list of float}
A dictionary like class with some properties to easily extract output values.
"""
if isinstance(file_or_path, str):
_, ext = os.path.splitext(file_or_path)
if ext == ".sql":
results = get_results_from_sql(
file_or_path,
variables,
frequency,
alike=alike,
start_date=start_date,
end_date=end_date,
)
elif ext == ".eso":
raise NotImplementedError("Sorry, this has not been implemented yet.")
else:
raise TypeError("Unsupported file type '{}' provided!".format(ext))
else:
if isinstance(file_or_path, (DBEsoFile, DBEsoFileCollection)):
raise NotImplementedError("Sorry, this has not been implemented yet.")
else:
raise TypeError(
"Unsupported class '{}' provided!".format(type(file_or_path).__name__)
)
return results | 0.637257 | 0.373419 |
from __future__ import absolute_import
import contextlib
from cStringIO import StringIO
from rdkit import Chem as C
from rdkit.Chem import inchi as Ci
from rdkit.Chem import Draw as CD
from rdalchemy.rdalchemy import tanimoto_threshold
from flask import(
abort,
current_app,
json,
render_template,
request,
send_file,
url_for,
)
from .models import (
MoleculeMixin,
coerse_to_mol,
mol_from_agg_id,
mol_from_lig_id,
Aggregator,
Ligand,
)
IMAGE_FORMAT_MIME_TYPES = {
'jpg': 'image/jpeg',
'png': 'image/png',
'gif': 'image/gif',
}
DOWNLOAD_FORMAT_MIMETYPES = {
'smi': 'chemical/x-daylight-smiles',
'sdf': 'chemical/x-mdl-sdfile',
'pdb': 'chemical/x-pdb',
}
SEARCH_INPUT_FORMATS = [
('aggregator', mol_from_agg_id),
('ligand', mol_from_lig_id),
('smiles', C.MolFromSmiles),
('smarts', C.MolFromSmarts),
('inchi', Ci.MolFromInchi),
('sdf', C.MolFromMolBlock),
('mol2', C.MolFromMol2Block),
('pdb', C.MolFromPDBBlock),
]
DOWNLOAD_FORMAT_WRITERS = {
'smi': lambda mol: mol_to_smiles_line(mol),
'inchi': Ci.MolToInchi,
'sdf': C.MolToMolBlock,
'pdb': C.MolToPDBBlock,
}
IMAGE_URL_ENDPOINTS = {
Aggregator: ('aggregator_image', 'agg_id', 'id'),
Ligand: ('ligand_image', 'lig_id', 'id'),
}
def get_mol_name(mol, default=None):
if mol.HasProp('_Name'):
return mol.GetProp('_Name')
else:
return default
def image_url_for(obj):
cls = type(obj)
try:
endpoint, param, attr = IMAGE_URL_ENDPOINTS[cls]
except KeyError:
return None
else:
params = {}
params[param] = getattr(obj, attr)
return url_for(endpoint, **params)
def mol_to_smiles_line(mol):
name = get_mol_name(mol, default='')
return '{0} {1}'.format(C.MolToSmiles(mol, isomericSmiles=True), name)
def represent_mol(mol, format):
if format in IMAGE_FORMAT_MIME_TYPES:
return draw_mol(mol, format)
elif format in DOWNLOAD_FORMAT_MIMETYPES:
return download_mol(mol, format)
else:
abort(404)
def download_mol(mol, format):
format = format.lower()
try:
writer = DOWNLOAD_FORMAT_WRITERS[format]
mimetype = DOWNLOAD_FORMAT_MIMETYPES[format]
except KeyError:
abort(404)
buffer = StringIO()
buffer.write(writer(mol))
buffer.seek(0)
mol_name = get_mol_name(mol)
options = {}
if mol_name:
options.update(as_attachment=True,
attachment_filename="{0}.{1}".format(mol_name, format.lower()))
return send_file(buffer, mimetype,**options)
def draw_mol(mol, format='png'):
format = format.lower()
if format not in IMAGE_FORMAT_MIME_TYPES:
abort(404)
image_size = current_app.config.get('MOLECULE_DISPLAY_IMAGE_SIZE', (200,200))
image = CD.MolToImage(mol, size=image_size)
image_data = image_to_buffer(image, format)
mime_type = IMAGE_FORMAT_MIME_TYPES.get(format)
return send_file(image_data, mime_type)
def get_molecules_for_view(molecules, page_num, sorting=None, config=None):
config = config or current_app.config
per_page = config.get('MOLECULES_DISPLAY_PER_PAGE', 30)
if sorting:
ordered = molecules.order_by(sorting)
else:
ordered = molecules
paginated = ordered.paginate(page_num, per_page)
return paginated
def image_to_buffer(image, format='PNG'):
buf = StringIO()
image.save(buf, format.upper())
buf.seek(0)
return buf
def extract_query_mol(params, formats=SEARCH_INPUT_FORMATS, onerror_fail=True):
mol, error = None, None
for input_format, parser in formats:
try:
query = params[input_format]
mol = parser(str(query))
if mol is None:
raise ValueError("Failed to parse {}".format(input_format))
except KeyError:
pass
except ValueError as e:
error = "Invalid query term (reason: {})".format(str(e))
break
else:
break
else:
if onerror_fail:
abort(400)
else:
error = "Query parameter missing (expected one of: {}"\
.format(', '.join(fmt for fmt, _ in SEARCH_INPUT_FORMATS))
return mol, query, error
def get_similarity_parameters(this_request, onerror_fail=True, **kwargs):
default_search_cutoff = current_app.config.get('MOLECULE_SEARCH_TANIMOTO_CUTOFF', 0.50)
default_result_limit = current_app.config.get('MOLECULE_SEARCH_RESULT_LIMIT', None)
search_cutoff = float(this_request.args.get('cutoff', default_search_cutoff))
result_limit = this_request.args.get('count', default_result_limit)
query_structure, query_input, error = extract_query_mol(this_request.args, SEARCH_INPUT_FORMATS)
if result_limit is not None:
result_limit = int(result_limit)
if query_structure is not None:
query_molecule = coerse_to_mol(query_structure)
else:
query_molecule = None
if 'override_limit' in kwargs:
result_limit = kwargs['override_limit']
if error is not None and onerror_fail:
abort(400)
else:
return {
'cutoff': search_cutoff,
'limit': result_limit,
'query': query_molecule,
'mol': query_structure,
'raw': query_input,
'error': error,
}
@contextlib.contextmanager
def run_similar_molecules_query(result_type, params):
needle = params['query']
needle_fp = needle.bind.rdkit_fp # Force server-side fingerprint function
haystack = result_type.query # Searchable Aggregator dataset
haystack_fps = result_type.structure.rdkit_fp # Comparable fingerprint property
# Construct structural query sorted and limited by similarity with tanimoto scores annotated
similar = haystack.filter(haystack_fps.tanimoto_similar(needle_fp)) # Restrict to molecules with high Tc
similar = similar.order_by(haystack_fps.tanimoto_nearest_neighbors(needle_fp)) # Put highest Tc's first
similar = similar.add_columns(needle_fp.tanimoto(haystack_fps)) # Annotate results with Tc
if 'limit' in params:
similar = similar.limit(params['limit'])
# Run query with specified tanimoto threshold
if 'cutoff' in params:
similar = similar.with_transformation(tanimoto_threshold.set_in_session(params['cutoff']))
yield similar
def get_similar_molecules(result_type, query_structure=None, **params):
params.setdefault('cutoff', current_app.config.get('MOLECULE_SEARCH_TANIMOTO_CUTOFF', 0.50))
params.setdefault('limit', current_app.config.get('MOLECULE_SEARCH_RESULT_LIMIT', 10))
if query_structure is not None:
params.setdefault('mol', query_structure)
if 'mol' in params:
params.setdefault('query', coerse_to_mol(params['mol']))
with run_similar_molecules_query(result_type, params) as results:
annotated = annotate_tanimoto_similarity(results)
return annotated
def annotate_tanimoto_similarity(molecules_with_tc, attribute='tanimoto_similarity'):
for molecule, tc in molecules_with_tc:
setattr(molecule, attribute, tc)
setattr(molecule, attribute+'_percentage', int(100 * tc))
yield molecule
def aggregator_report(structure):
similarity_cutoff = current_app.config.get('AGGREGATOR_SIMILARITY_TANIMOTO_CUTOFF', 0.7)
logp_cutoff = current_app.config.get('AGGREGATOR_LOGP_CUTOFF', 3)
query_mol = coerse_to_mol(structure)
query_logp = query_mol.logp
similar_aggregators = get_similar_molecules(Aggregator,
mol=query_mol,
cutoff=similarity_cutoff,
limit=None)
similar_aggregators = list(similar_aggregators)
aggregator_tcs = [round(agg.tanimoto_similarity, 2) for agg in similar_aggregators]
max_tc = max(aggregator_tcs + [0])
num_similar = len(similar_aggregators)
has_similar_aggregators = num_similar > 0
high_logp = query_logp >= logp_cutoff
if max_tc == 1.0:
status = "known"
elif has_similar_aggregators and high_logp:
status = "likely"
elif has_similar_aggregators or high_logp:
status = "maybe"
else:
status = "requires testing"
return {
'query': query_mol,
'status': status,
'similar': similar_aggregators,
'num_similar': num_similar,
'logp': query_logp,
'max_tc': max_tc,
} | aggregatorcomparor/helpers.py | from __future__ import absolute_import
import contextlib
from cStringIO import StringIO
from rdkit import Chem as C
from rdkit.Chem import inchi as Ci
from rdkit.Chem import Draw as CD
from rdalchemy.rdalchemy import tanimoto_threshold
from flask import(
abort,
current_app,
json,
render_template,
request,
send_file,
url_for,
)
from .models import (
MoleculeMixin,
coerse_to_mol,
mol_from_agg_id,
mol_from_lig_id,
Aggregator,
Ligand,
)
IMAGE_FORMAT_MIME_TYPES = {
'jpg': 'image/jpeg',
'png': 'image/png',
'gif': 'image/gif',
}
DOWNLOAD_FORMAT_MIMETYPES = {
'smi': 'chemical/x-daylight-smiles',
'sdf': 'chemical/x-mdl-sdfile',
'pdb': 'chemical/x-pdb',
}
SEARCH_INPUT_FORMATS = [
('aggregator', mol_from_agg_id),
('ligand', mol_from_lig_id),
('smiles', C.MolFromSmiles),
('smarts', C.MolFromSmarts),
('inchi', Ci.MolFromInchi),
('sdf', C.MolFromMolBlock),
('mol2', C.MolFromMol2Block),
('pdb', C.MolFromPDBBlock),
]
DOWNLOAD_FORMAT_WRITERS = {
'smi': lambda mol: mol_to_smiles_line(mol),
'inchi': Ci.MolToInchi,
'sdf': C.MolToMolBlock,
'pdb': C.MolToPDBBlock,
}
IMAGE_URL_ENDPOINTS = {
Aggregator: ('aggregator_image', 'agg_id', 'id'),
Ligand: ('ligand_image', 'lig_id', 'id'),
}
def get_mol_name(mol, default=None):
if mol.HasProp('_Name'):
return mol.GetProp('_Name')
else:
return default
def image_url_for(obj):
cls = type(obj)
try:
endpoint, param, attr = IMAGE_URL_ENDPOINTS[cls]
except KeyError:
return None
else:
params = {}
params[param] = getattr(obj, attr)
return url_for(endpoint, **params)
def mol_to_smiles_line(mol):
name = get_mol_name(mol, default='')
return '{0} {1}'.format(C.MolToSmiles(mol, isomericSmiles=True), name)
def represent_mol(mol, format):
if format in IMAGE_FORMAT_MIME_TYPES:
return draw_mol(mol, format)
elif format in DOWNLOAD_FORMAT_MIMETYPES:
return download_mol(mol, format)
else:
abort(404)
def download_mol(mol, format):
format = format.lower()
try:
writer = DOWNLOAD_FORMAT_WRITERS[format]
mimetype = DOWNLOAD_FORMAT_MIMETYPES[format]
except KeyError:
abort(404)
buffer = StringIO()
buffer.write(writer(mol))
buffer.seek(0)
mol_name = get_mol_name(mol)
options = {}
if mol_name:
options.update(as_attachment=True,
attachment_filename="{0}.{1}".format(mol_name, format.lower()))
return send_file(buffer, mimetype,**options)
def draw_mol(mol, format='png'):
format = format.lower()
if format not in IMAGE_FORMAT_MIME_TYPES:
abort(404)
image_size = current_app.config.get('MOLECULE_DISPLAY_IMAGE_SIZE', (200,200))
image = CD.MolToImage(mol, size=image_size)
image_data = image_to_buffer(image, format)
mime_type = IMAGE_FORMAT_MIME_TYPES.get(format)
return send_file(image_data, mime_type)
def get_molecules_for_view(molecules, page_num, sorting=None, config=None):
config = config or current_app.config
per_page = config.get('MOLECULES_DISPLAY_PER_PAGE', 30)
if sorting:
ordered = molecules.order_by(sorting)
else:
ordered = molecules
paginated = ordered.paginate(page_num, per_page)
return paginated
def image_to_buffer(image, format='PNG'):
buf = StringIO()
image.save(buf, format.upper())
buf.seek(0)
return buf
def extract_query_mol(params, formats=SEARCH_INPUT_FORMATS, onerror_fail=True):
mol, error = None, None
for input_format, parser in formats:
try:
query = params[input_format]
mol = parser(str(query))
if mol is None:
raise ValueError("Failed to parse {}".format(input_format))
except KeyError:
pass
except ValueError as e:
error = "Invalid query term (reason: {})".format(str(e))
break
else:
break
else:
if onerror_fail:
abort(400)
else:
error = "Query parameter missing (expected one of: {}"\
.format(', '.join(fmt for fmt, _ in SEARCH_INPUT_FORMATS))
return mol, query, error
def get_similarity_parameters(this_request, onerror_fail=True, **kwargs):
default_search_cutoff = current_app.config.get('MOLECULE_SEARCH_TANIMOTO_CUTOFF', 0.50)
default_result_limit = current_app.config.get('MOLECULE_SEARCH_RESULT_LIMIT', None)
search_cutoff = float(this_request.args.get('cutoff', default_search_cutoff))
result_limit = this_request.args.get('count', default_result_limit)
query_structure, query_input, error = extract_query_mol(this_request.args, SEARCH_INPUT_FORMATS)
if result_limit is not None:
result_limit = int(result_limit)
if query_structure is not None:
query_molecule = coerse_to_mol(query_structure)
else:
query_molecule = None
if 'override_limit' in kwargs:
result_limit = kwargs['override_limit']
if error is not None and onerror_fail:
abort(400)
else:
return {
'cutoff': search_cutoff,
'limit': result_limit,
'query': query_molecule,
'mol': query_structure,
'raw': query_input,
'error': error,
}
@contextlib.contextmanager
def run_similar_molecules_query(result_type, params):
needle = params['query']
needle_fp = needle.bind.rdkit_fp # Force server-side fingerprint function
haystack = result_type.query # Searchable Aggregator dataset
haystack_fps = result_type.structure.rdkit_fp # Comparable fingerprint property
# Construct structural query sorted and limited by similarity with tanimoto scores annotated
similar = haystack.filter(haystack_fps.tanimoto_similar(needle_fp)) # Restrict to molecules with high Tc
similar = similar.order_by(haystack_fps.tanimoto_nearest_neighbors(needle_fp)) # Put highest Tc's first
similar = similar.add_columns(needle_fp.tanimoto(haystack_fps)) # Annotate results with Tc
if 'limit' in params:
similar = similar.limit(params['limit'])
# Run query with specified tanimoto threshold
if 'cutoff' in params:
similar = similar.with_transformation(tanimoto_threshold.set_in_session(params['cutoff']))
yield similar
def get_similar_molecules(result_type, query_structure=None, **params):
params.setdefault('cutoff', current_app.config.get('MOLECULE_SEARCH_TANIMOTO_CUTOFF', 0.50))
params.setdefault('limit', current_app.config.get('MOLECULE_SEARCH_RESULT_LIMIT', 10))
if query_structure is not None:
params.setdefault('mol', query_structure)
if 'mol' in params:
params.setdefault('query', coerse_to_mol(params['mol']))
with run_similar_molecules_query(result_type, params) as results:
annotated = annotate_tanimoto_similarity(results)
return annotated
def annotate_tanimoto_similarity(molecules_with_tc, attribute='tanimoto_similarity'):
for molecule, tc in molecules_with_tc:
setattr(molecule, attribute, tc)
setattr(molecule, attribute+'_percentage', int(100 * tc))
yield molecule
def aggregator_report(structure):
similarity_cutoff = current_app.config.get('AGGREGATOR_SIMILARITY_TANIMOTO_CUTOFF', 0.7)
logp_cutoff = current_app.config.get('AGGREGATOR_LOGP_CUTOFF', 3)
query_mol = coerse_to_mol(structure)
query_logp = query_mol.logp
similar_aggregators = get_similar_molecules(Aggregator,
mol=query_mol,
cutoff=similarity_cutoff,
limit=None)
similar_aggregators = list(similar_aggregators)
aggregator_tcs = [round(agg.tanimoto_similarity, 2) for agg in similar_aggregators]
max_tc = max(aggregator_tcs + [0])
num_similar = len(similar_aggregators)
has_similar_aggregators = num_similar > 0
high_logp = query_logp >= logp_cutoff
if max_tc == 1.0:
status = "known"
elif has_similar_aggregators and high_logp:
status = "likely"
elif has_similar_aggregators or high_logp:
status = "maybe"
else:
status = "requires testing"
return {
'query': query_mol,
'status': status,
'similar': similar_aggregators,
'num_similar': num_similar,
'logp': query_logp,
'max_tc': max_tc,
} | 0.462959 | 0.194406 |
import os
import time
class Vector:
def __init__(self,x,y):
self.x=x
self.y=y
def __add__(self,v):
return Vector(self.x+v.x,self.y+v.y)
def __eq__(self,v):
return self.x==v.x and self.y==v.y
def __str__(self):
return "(" + str(self.x) + ","+ str(self.y) + ")"
class Laberinto:
def __init__(self, path):
self.leer(path)
self.busca_robot()
self.visitados=[]
def imprimir(self):
for fila in self.lab:
print(''.join(fila))
def leer(self, path):
archivo = open(path,"r")
self.lab = []
for linea in archivo:
lab_linea = []
for c in linea.rstrip('\n'):
lab_linea.append(c)
self.lab.append(lab_linea)
archivo.close()
def busca_robot(self):
for i in range(len(self.lab)):
for j in range(len(self.lab[i])):
if self.lab[i][j] == '+':
self.pos = Vector(i,j)
def movimientos(self):
desp=[Vector(-1,0),Vector(1,0),Vector(0,-1),Vector(0,1)]
movs=[]
for d in desp:
nueva_pos=self.pos+d
if self.lab[nueva_pos.x][nueva_pos.y]!='x' and not nueva_pos in self.visitados:
movs.append(nueva_pos)
return movs
def marcar_ruta(self):
for v in self.visitados:
os.system('clear')
if self.lab[v.x][v.y]!='g':
self.lab[v.x][v.y]='*'
self.imprimir()
time.sleep(0.3)
def resolver(self):
if self.lab[self.pos.x][self.pos.y]=='o':
self.marcar_ruta()
return True
for m in self.movimientos():
if m not in self.visitados:
self.visitados.append(m)
posicion_original = self.pos
self.pos = m
if self.resolver():
return True
self.visitados.pop()
self.pos = posicion_original
return False
def resolver_optimo(self,i,limite):
if self.lab[self.pos.x][self.pos.y]=='o':
self.marcar_ruta()
return True
if i > limite:
return False
for m in self.movimientos():
if m not in self.visitados:
self.visitados.append(m)
posicion_original = self.pos
self.pos = m
if self.resolver_optimo(i+1,limite):
return True
self.visitados.pop()
self.pos = posicion_original
return False
# Código principal
lab = Laberinto('maze_3.txt')
lab.imprimir()
#lab.resolver()
for lim in range(10000):
if lab.resolver_optimo(0,lim):
break | clase1117/laberinto.py | import os
import time
class Vector:
def __init__(self,x,y):
self.x=x
self.y=y
def __add__(self,v):
return Vector(self.x+v.x,self.y+v.y)
def __eq__(self,v):
return self.x==v.x and self.y==v.y
def __str__(self):
return "(" + str(self.x) + ","+ str(self.y) + ")"
class Laberinto:
def __init__(self, path):
self.leer(path)
self.busca_robot()
self.visitados=[]
def imprimir(self):
for fila in self.lab:
print(''.join(fila))
def leer(self, path):
archivo = open(path,"r")
self.lab = []
for linea in archivo:
lab_linea = []
for c in linea.rstrip('\n'):
lab_linea.append(c)
self.lab.append(lab_linea)
archivo.close()
def busca_robot(self):
for i in range(len(self.lab)):
for j in range(len(self.lab[i])):
if self.lab[i][j] == '+':
self.pos = Vector(i,j)
def movimientos(self):
desp=[Vector(-1,0),Vector(1,0),Vector(0,-1),Vector(0,1)]
movs=[]
for d in desp:
nueva_pos=self.pos+d
if self.lab[nueva_pos.x][nueva_pos.y]!='x' and not nueva_pos in self.visitados:
movs.append(nueva_pos)
return movs
def marcar_ruta(self):
for v in self.visitados:
os.system('clear')
if self.lab[v.x][v.y]!='g':
self.lab[v.x][v.y]='*'
self.imprimir()
time.sleep(0.3)
def resolver(self):
if self.lab[self.pos.x][self.pos.y]=='o':
self.marcar_ruta()
return True
for m in self.movimientos():
if m not in self.visitados:
self.visitados.append(m)
posicion_original = self.pos
self.pos = m
if self.resolver():
return True
self.visitados.pop()
self.pos = posicion_original
return False
def resolver_optimo(self,i,limite):
if self.lab[self.pos.x][self.pos.y]=='o':
self.marcar_ruta()
return True
if i > limite:
return False
for m in self.movimientos():
if m not in self.visitados:
self.visitados.append(m)
posicion_original = self.pos
self.pos = m
if self.resolver_optimo(i+1,limite):
return True
self.visitados.pop()
self.pos = posicion_original
return False
# Código principal
lab = Laberinto('maze_3.txt')
lab.imprimir()
#lab.resolver()
for lim in range(10000):
if lab.resolver_optimo(0,lim):
break | 0.133288 | 0.251643 |
from Domain.grade import Grade
from Undo.undo_service import FunctionCall, Operation
class GradeService:
"""
This is GradeService class. Here we perform specific functionalities and validate the input.
We raise value errors if something is wrong.
"""
def __init__(self, grade_repository, grade_validator, undo_service):
"""
This is the constructor for GradeService class. Here we have the grade repository and grade validators.
:param grade_repository: it is a GradeRepository type where we have the list of grades and
some functionalities (add, remove)
:param grade_validator: it is a GradeValidator type where are methods responsible with validation
"""
self.repository = grade_repository
self.validator = grade_validator
self.undo_service = undo_service
def create_grade(self, student_id, assignment_id, grade_value):
"""
Here we 'create' and validate the new grade.
:param student_id: natural number representing the unique code of a Student
:param assignment_id: natural number representing the unique code of an Assignment
:param grade_value: integer in [0,10], representing the mark of a student-assignment connection
"""
new_grade = Grade(student_id, assignment_id, grade_value)
self.validator.validate(new_grade)
self.repository.add_grade_student(new_grade)
undo = FunctionCall(self.remove_grade_function, student_id, assignment_id)
redo = FunctionCall(self.repository.add_grade_student, new_grade)
operation = Operation(undo, redo)
self.undo_service.record(operation)
def remove_grade_function(self, student_id, assignment_id):
"""
Here we remove a grade by its student_id an by its assignment_id
:param student_id: natural number representing the unique code of a Student
:param assignment_id: natural number representing the unique code of an Assignment
:return:
"""
GRADE = self.repository.find_by_ids(student_id, assignment_id)
self.repository.remove(lambda grade: not(grade.student_id == student_id and grade.assignment_id == assignment_id))
return GRADE
def remove_grade(self, student_id, assignment_id):
"""
Here we remove a grade by its student and assignment id.
:param student_id: natural number representing the unique code of a student
:param assignment_id: natural number representing the unique code of an assignment
"""
GRADE = self.remove_grade_function(student_id, assignment_id)
undo = FunctionCall(self.repository.add_grade_student, GRADE)
redo = FunctionCall(self.remove_grade_function, student_id, assignment_id)
operation = Operation(undo, redo)
self.undo_service.record(operation)
def filter_grades_by_student_id(self, student_id):
"""
Here we filter grades list by student_id.
:param student_id: natural number representing the unique code of a student
:return: filter grades list by student_id
"""
return list(filter((lambda student: student.student_id == student_id), self.get_grades_list()))
def filter_grades_by_assignment_id(self, assignment_id):
"""
Here we filter grades list by assignment_id.
:param assignment_id: natural number representing the unique code of an assignment
:return: filter grades list by assignment_id
"""
return list(filter((lambda assignment: assignment.assignment_id == assignment_id), self.get_grades_list()))
def get_grades_list(self):
"""
Here we pass the grades list from repository to service, so that we can display the grades in Ui later
:return: grades repository
"""
return self.repository.get_grades_list_repository()
def give_a_mark(self, student_id, assignment_id, grade_value):
"""
Here we give a mark to a student's assignment an we create an operation with undo and redo functions.
:param student_id: the student's id which will be graded
:param assignment_id: the assignment id which will be graded
:param grade_value: the mark
"""
grade = self.repository.give_a_mark(student_id, assignment_id, grade_value)
self.validator.validate(grade)
undo = FunctionCall(self.reset_a_mark, grade.student_id, grade.assignment_id)
redo = FunctionCall(self.repository.give_a_mark, grade.student_id, grade.assignment_id, grade.grade_value)
operation = Operation(undo, redo)
self.undo_service.record(operation)
def reset_a_mark(self, student_id, assignment_id):
"""
Here we set the grade_value of a grade to 0.
:param student_id: the student's id which will be ungraded
:param assignment_id: the assignment id which will be ungraded
:return:
"""
grade = self.repository.find_by_ids(student_id, assignment_id)
grade.grade_value = 0
def init_grades(self):
self.repository.add_grade_student(Grade(1, 1, 0))
self.repository.add_grade_student(Grade(1, 2, 10))
self.repository.add_grade_student(Grade(1, 3, 9))
self.repository.add_grade_student(Grade(1, 6, 7))
self.repository.add_grade_student(Grade(2, 10, 0))
self.repository.add_grade_student(Grade(3, 10, 0))
self.repository.add_grade_student(Grade(5, 10, 8))
self.repository.add_grade_student(Grade(4, 7, 4))
self.repository.add_grade_student(Grade(4, 2, 0))
self.repository.add_grade_student(Grade(8, 10, 0))
self.repository.add_grade_student(Grade(10, 10, 0)) | Service/grade_service.py | from Domain.grade import Grade
from Undo.undo_service import FunctionCall, Operation
class GradeService:
"""
This is GradeService class. Here we perform specific functionalities and validate the input.
We raise value errors if something is wrong.
"""
def __init__(self, grade_repository, grade_validator, undo_service):
"""
This is the constructor for GradeService class. Here we have the grade repository and grade validators.
:param grade_repository: it is a GradeRepository type where we have the list of grades and
some functionalities (add, remove)
:param grade_validator: it is a GradeValidator type where are methods responsible with validation
"""
self.repository = grade_repository
self.validator = grade_validator
self.undo_service = undo_service
def create_grade(self, student_id, assignment_id, grade_value):
"""
Here we 'create' and validate the new grade.
:param student_id: natural number representing the unique code of a Student
:param assignment_id: natural number representing the unique code of an Assignment
:param grade_value: integer in [0,10], representing the mark of a student-assignment connection
"""
new_grade = Grade(student_id, assignment_id, grade_value)
self.validator.validate(new_grade)
self.repository.add_grade_student(new_grade)
undo = FunctionCall(self.remove_grade_function, student_id, assignment_id)
redo = FunctionCall(self.repository.add_grade_student, new_grade)
operation = Operation(undo, redo)
self.undo_service.record(operation)
def remove_grade_function(self, student_id, assignment_id):
"""
Here we remove a grade by its student_id an by its assignment_id
:param student_id: natural number representing the unique code of a Student
:param assignment_id: natural number representing the unique code of an Assignment
:return:
"""
GRADE = self.repository.find_by_ids(student_id, assignment_id)
self.repository.remove(lambda grade: not(grade.student_id == student_id and grade.assignment_id == assignment_id))
return GRADE
def remove_grade(self, student_id, assignment_id):
"""
Here we remove a grade by its student and assignment id.
:param student_id: natural number representing the unique code of a student
:param assignment_id: natural number representing the unique code of an assignment
"""
GRADE = self.remove_grade_function(student_id, assignment_id)
undo = FunctionCall(self.repository.add_grade_student, GRADE)
redo = FunctionCall(self.remove_grade_function, student_id, assignment_id)
operation = Operation(undo, redo)
self.undo_service.record(operation)
def filter_grades_by_student_id(self, student_id):
"""
Here we filter grades list by student_id.
:param student_id: natural number representing the unique code of a student
:return: filter grades list by student_id
"""
return list(filter((lambda student: student.student_id == student_id), self.get_grades_list()))
def filter_grades_by_assignment_id(self, assignment_id):
"""
Here we filter grades list by assignment_id.
:param assignment_id: natural number representing the unique code of an assignment
:return: filter grades list by assignment_id
"""
return list(filter((lambda assignment: assignment.assignment_id == assignment_id), self.get_grades_list()))
def get_grades_list(self):
"""
Here we pass the grades list from repository to service, so that we can display the grades in Ui later
:return: grades repository
"""
return self.repository.get_grades_list_repository()
def give_a_mark(self, student_id, assignment_id, grade_value):
"""
Here we give a mark to a student's assignment an we create an operation with undo and redo functions.
:param student_id: the student's id which will be graded
:param assignment_id: the assignment id which will be graded
:param grade_value: the mark
"""
grade = self.repository.give_a_mark(student_id, assignment_id, grade_value)
self.validator.validate(grade)
undo = FunctionCall(self.reset_a_mark, grade.student_id, grade.assignment_id)
redo = FunctionCall(self.repository.give_a_mark, grade.student_id, grade.assignment_id, grade.grade_value)
operation = Operation(undo, redo)
self.undo_service.record(operation)
def reset_a_mark(self, student_id, assignment_id):
"""
Here we set the grade_value of a grade to 0.
:param student_id: the student's id which will be ungraded
:param assignment_id: the assignment id which will be ungraded
:return:
"""
grade = self.repository.find_by_ids(student_id, assignment_id)
grade.grade_value = 0
def init_grades(self):
self.repository.add_grade_student(Grade(1, 1, 0))
self.repository.add_grade_student(Grade(1, 2, 10))
self.repository.add_grade_student(Grade(1, 3, 9))
self.repository.add_grade_student(Grade(1, 6, 7))
self.repository.add_grade_student(Grade(2, 10, 0))
self.repository.add_grade_student(Grade(3, 10, 0))
self.repository.add_grade_student(Grade(5, 10, 8))
self.repository.add_grade_student(Grade(4, 7, 4))
self.repository.add_grade_student(Grade(4, 2, 0))
self.repository.add_grade_student(Grade(8, 10, 0))
self.repository.add_grade_student(Grade(10, 10, 0)) | 0.751283 | 0.561575 |
import urllib
import re
import pandas as pd
import datetime as dt
import zipfile
import StringIO
from extra import ProgressBar
import os
import yahooFinance as yf
from string import Template
import numpy as np
def fileName2date( fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
m = re.findall('\d+',name)[0]
return dt.datetime.strptime(m,'%Y%m%d').date()
def date2fileName(date):
return 'BATSshvol%s.txt.zip' % date.strftime('%Y%m%d')
def downloadUrl(date):
s = Template('http://www.batstrading.com/market_data/shortsales/$year/$month/$fName-dl?mkt=bzx')
url = s.substitute(fName=date2fileName(date), year=date.year, month='%02d' % date.month)
return url
class BATS_Data(object):
def __init__(self, dataDir):
''' create class. dataDir: directory to which files are downloaded '''
self.dataDir = dataDir
self.shortRatio = None
self._checkDates()
def _checkDates(self):
''' update list of available dataset dates'''
self.dates = []
for fName in os.listdir(self.dataDir):
self.dates.append(fileName2date(fName))
def _missingDates(self):
''' check for missing dates based on spy data'''
print 'Getting yahoo data to determine business dates... ',
spy = yf.getHistoricData('SPY',sDate = (2010,1,1))
busDates = [d.date() for d in spy.index ]
print 'Date range: ', busDates[0] ,'-', busDates[-1]
missing = []
for d in busDates:
if d not in self.dates:
missing.append(d)
return missing
def updateDb(self):
print 'Updating database'
missing = self._missingDates()
for i, date in enumerate(missing):
source = downloadUrl(date)
dest = os.path.join(self.dataDir,date2fileName(date))
if not os.path.exists(dest):
print 'Downloading [%i/%i]' %(i,len(missing)), source
urllib.urlretrieve(source, dest)
else:
print 'x',
print 'Update done.'
self._checkDates()
def loadDate(self,date):
fName = os.path.join(self.dataDir, date2fileName(date))
zipped = zipfile.ZipFile(fName) # open zip file
lines = zipped.read(zipped.namelist()[0]) # read first file from to lines
buf = StringIO.StringIO(lines) # create buffer
df = pd.read_csv(buf,sep='|',index_col=1,parse_dates=False,dtype={'Date':object,'Short Volume':np.float32,'Total Volume':np.float32})
s = df['Short Volume']/df['Total Volume']
s.name = dt.datetime.strptime(df['Date'][-1],'%Y%m%d')
return s
def loadData(self):
''' load data from zip files '''
data = []
pb = ProgressBar(len(self.dates)-1)
for idx, date in enumerate(self.dates):
data.append(self.loadDate(date))
pb.animate(idx)
self.shortRatio = pd.DataFrame(data)
return self.shortRatio | lib/bats.py |
import urllib
import re
import pandas as pd
import datetime as dt
import zipfile
import StringIO
from extra import ProgressBar
import os
import yahooFinance as yf
from string import Template
import numpy as np
def fileName2date( fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
m = re.findall('\d+',name)[0]
return dt.datetime.strptime(m,'%Y%m%d').date()
def date2fileName(date):
return 'BATSshvol%s.txt.zip' % date.strftime('%Y%m%d')
def downloadUrl(date):
s = Template('http://www.batstrading.com/market_data/shortsales/$year/$month/$fName-dl?mkt=bzx')
url = s.substitute(fName=date2fileName(date), year=date.year, month='%02d' % date.month)
return url
class BATS_Data(object):
def __init__(self, dataDir):
''' create class. dataDir: directory to which files are downloaded '''
self.dataDir = dataDir
self.shortRatio = None
self._checkDates()
def _checkDates(self):
''' update list of available dataset dates'''
self.dates = []
for fName in os.listdir(self.dataDir):
self.dates.append(fileName2date(fName))
def _missingDates(self):
''' check for missing dates based on spy data'''
print 'Getting yahoo data to determine business dates... ',
spy = yf.getHistoricData('SPY',sDate = (2010,1,1))
busDates = [d.date() for d in spy.index ]
print 'Date range: ', busDates[0] ,'-', busDates[-1]
missing = []
for d in busDates:
if d not in self.dates:
missing.append(d)
return missing
def updateDb(self):
print 'Updating database'
missing = self._missingDates()
for i, date in enumerate(missing):
source = downloadUrl(date)
dest = os.path.join(self.dataDir,date2fileName(date))
if not os.path.exists(dest):
print 'Downloading [%i/%i]' %(i,len(missing)), source
urllib.urlretrieve(source, dest)
else:
print 'x',
print 'Update done.'
self._checkDates()
def loadDate(self,date):
fName = os.path.join(self.dataDir, date2fileName(date))
zipped = zipfile.ZipFile(fName) # open zip file
lines = zipped.read(zipped.namelist()[0]) # read first file from to lines
buf = StringIO.StringIO(lines) # create buffer
df = pd.read_csv(buf,sep='|',index_col=1,parse_dates=False,dtype={'Date':object,'Short Volume':np.float32,'Total Volume':np.float32})
s = df['Short Volume']/df['Total Volume']
s.name = dt.datetime.strptime(df['Date'][-1],'%Y%m%d')
return s
def loadData(self):
''' load data from zip files '''
data = []
pb = ProgressBar(len(self.dates)-1)
for idx, date in enumerate(self.dates):
data.append(self.loadDate(date))
pb.animate(idx)
self.shortRatio = pd.DataFrame(data)
return self.shortRatio | 0.150496 | 0.095055 |
import numpy as np
from modules import functions as f
flat = lambda x: [a for b in x for a in b]
class Vertex(object):
def __init__(self, number, coord):
self.number = number
self.coord = coord
def find_edges(self,main_edges):
self.edges = [[x[0], x[1]] for x in main_edges if self.number in x]
self.aux = [x for j in self.edges for x in j if x != self.number]
self.aux_conn = [[x[0], x[1]] for x in main_edges if (x[0] in self.aux) and (x[1] in self.aux)]
def find_planes(self):
edges = self.edges
aux_conn = self.aux_conn
tri = []
for x in edges:
for y in aux_conn:
if x[1] == y[0]:
tri.append(x+[y[1]])
elif x[0] == y[1]:
tri.append(y + [x[1]])
elif (x[0] == y[0]) and ([x[1], y[1]] in edges):
tri.append(x+[y[1]])
elif (x[0] == y[0]) and ([y[1], x[1]] in edges):
tri.append([x[0], y[1], x[1]])
elif (x[1] == y[1]) and ([x[0], y[0]] in edges):
tri.append([x[0], y[0], x[1]])
elif (x[1] == y[1]) and ([y[0], x[0]] in edges):
tri.append([y[0]]+x)
tri_red = []
for j in tri:
if j not in tri_red:
tri_red.append(j)
tri_red = [i[1:] + [i[0]] if i.index(self.number) == 1 else i for i in tri_red]
tri_red = [[i[2]] + i[:2] if i.index(self.number) == 2 else i for i in tri_red]
self.tri = tri_red
def icosahedron(i, n, k):
v = np.array([[0, 0, 1.9],
[1.618, -0.52, 0.85],
[1, 1.376, 0.85],
[0, 0, -1.9],
[-1.618, 0.52, -0.85],
[-1, -1.376, -0.85],
[0, -1.7, 0.85],
[1.618, 0.52, -0.85],
[-1, 1.376, 0.85],
[0, 1.7, -0.85],
[-1.618, -0.52, 0.85],
[1, -1.376, -0.85]])
def split(v, p):
edges = find_edges(v, p[0])
triang = make_poly(v, edges)
v_add1 = np.array([v[t].mean(axis=0) for t in triang])
v_add1 = v_add1 + v_add1 / p[1] * p[2]
v = np.vstack((v, v_add1))
return v
if n>=1:
v = split(v, [2.2, 1.5, 0.38])
if n>=2:
v = split(v, [1.5, 1.75, 0.15])
if n == 3:
v = split(v, [1.0, 1.85, 0.045])
v *= k
v += i
#print(max(flat([[f.distance(i, j) for i in v] for j in v])))
return v
def find_edges(v, maxd):
edges = []
for idx, i in enumerate(v):
for jdx, j in enumerate(v):
if idx != jdx:
d = f.distance(i, j)
if d < maxd:
k = [idx, jdx]
k.sort()
if k not in edges:
edges.append(k)
return edges
def make_poly(v, edges):
vertices = [Vertex(i, v[i]) for i in range(0, v.shape[0])]
triang = []
for i in vertices:
i.find_edges(edges)
i.find_planes()
for j in i.tri:
j.sort()
if j not in triang:
triang.append(j)
return triang
def find_triang(pts, n):
if n ==0:
l = 3.3
elif n == 1:
l = 2.3
elif n == 2:
l = 1.6
elif n == 3:
l = 1.0
edges = find_edges(pts, l)
pts = np.array(pts)
triang = make_poly(pts, edges)
return triang
def neighs_dict(subset, atoms):
neighs = {}
for adx, a in enumerate(subset):
dist = [f.distance(x, a) for x in atoms]
neighs[adx] = [kx for kx, k in enumerate(dist) if (k <= 5) and (k > 0.1)]
return neighs
def find_core(subset, atoms, atom_neighs):
ni, k = 1, 1.63
icos1 = icosahedron(np.array([0, 0, 0]), 0, k)
icos2 = icosahedron(np.array([0, 0, 0]), ni, k)
surf, core = [], []
for ax, a in enumerate(subset):
v = icos1 + a
ds = [min([f.distance(atoms[i],j) for i in atom_neighs[ax]]) for j in v]
ds = len([i for i in ds if i <= 3.11])
if icos1.shape[0] - ds == 0:
core.append(ax)
else:
v = icos2 + a
ds = [min([f.distance(atoms[i], j) for i in atom_neighs[ax]]) for j in v]
ds = [ix for ix, i in enumerate(ds) if i > 3.11]
if len(ds) <= 3:
core.append(ax)
return core | iNNterfaceDesign_scripts/modules/geometry_functions.py | import numpy as np
from modules import functions as f
flat = lambda x: [a for b in x for a in b]
class Vertex(object):
def __init__(self, number, coord):
self.number = number
self.coord = coord
def find_edges(self,main_edges):
self.edges = [[x[0], x[1]] for x in main_edges if self.number in x]
self.aux = [x for j in self.edges for x in j if x != self.number]
self.aux_conn = [[x[0], x[1]] for x in main_edges if (x[0] in self.aux) and (x[1] in self.aux)]
def find_planes(self):
edges = self.edges
aux_conn = self.aux_conn
tri = []
for x in edges:
for y in aux_conn:
if x[1] == y[0]:
tri.append(x+[y[1]])
elif x[0] == y[1]:
tri.append(y + [x[1]])
elif (x[0] == y[0]) and ([x[1], y[1]] in edges):
tri.append(x+[y[1]])
elif (x[0] == y[0]) and ([y[1], x[1]] in edges):
tri.append([x[0], y[1], x[1]])
elif (x[1] == y[1]) and ([x[0], y[0]] in edges):
tri.append([x[0], y[0], x[1]])
elif (x[1] == y[1]) and ([y[0], x[0]] in edges):
tri.append([y[0]]+x)
tri_red = []
for j in tri:
if j not in tri_red:
tri_red.append(j)
tri_red = [i[1:] + [i[0]] if i.index(self.number) == 1 else i for i in tri_red]
tri_red = [[i[2]] + i[:2] if i.index(self.number) == 2 else i for i in tri_red]
self.tri = tri_red
def icosahedron(i, n, k):
v = np.array([[0, 0, 1.9],
[1.618, -0.52, 0.85],
[1, 1.376, 0.85],
[0, 0, -1.9],
[-1.618, 0.52, -0.85],
[-1, -1.376, -0.85],
[0, -1.7, 0.85],
[1.618, 0.52, -0.85],
[-1, 1.376, 0.85],
[0, 1.7, -0.85],
[-1.618, -0.52, 0.85],
[1, -1.376, -0.85]])
def split(v, p):
edges = find_edges(v, p[0])
triang = make_poly(v, edges)
v_add1 = np.array([v[t].mean(axis=0) for t in triang])
v_add1 = v_add1 + v_add1 / p[1] * p[2]
v = np.vstack((v, v_add1))
return v
if n>=1:
v = split(v, [2.2, 1.5, 0.38])
if n>=2:
v = split(v, [1.5, 1.75, 0.15])
if n == 3:
v = split(v, [1.0, 1.85, 0.045])
v *= k
v += i
#print(max(flat([[f.distance(i, j) for i in v] for j in v])))
return v
def find_edges(v, maxd):
edges = []
for idx, i in enumerate(v):
for jdx, j in enumerate(v):
if idx != jdx:
d = f.distance(i, j)
if d < maxd:
k = [idx, jdx]
k.sort()
if k not in edges:
edges.append(k)
return edges
def make_poly(v, edges):
vertices = [Vertex(i, v[i]) for i in range(0, v.shape[0])]
triang = []
for i in vertices:
i.find_edges(edges)
i.find_planes()
for j in i.tri:
j.sort()
if j not in triang:
triang.append(j)
return triang
def find_triang(pts, n):
if n ==0:
l = 3.3
elif n == 1:
l = 2.3
elif n == 2:
l = 1.6
elif n == 3:
l = 1.0
edges = find_edges(pts, l)
pts = np.array(pts)
triang = make_poly(pts, edges)
return triang
def neighs_dict(subset, atoms):
neighs = {}
for adx, a in enumerate(subset):
dist = [f.distance(x, a) for x in atoms]
neighs[adx] = [kx for kx, k in enumerate(dist) if (k <= 5) and (k > 0.1)]
return neighs
def find_core(subset, atoms, atom_neighs):
ni, k = 1, 1.63
icos1 = icosahedron(np.array([0, 0, 0]), 0, k)
icos2 = icosahedron(np.array([0, 0, 0]), ni, k)
surf, core = [], []
for ax, a in enumerate(subset):
v = icos1 + a
ds = [min([f.distance(atoms[i],j) for i in atom_neighs[ax]]) for j in v]
ds = len([i for i in ds if i <= 3.11])
if icos1.shape[0] - ds == 0:
core.append(ax)
else:
v = icos2 + a
ds = [min([f.distance(atoms[i], j) for i in atom_neighs[ax]]) for j in v]
ds = [ix for ix, i in enumerate(ds) if i > 3.11]
if len(ds) <= 3:
core.append(ax)
return core | 0.174551 | 0.554169 |
r"""
Bending of collimating mirror
-----------------------------
Uses :mod:`shadow` backend.
File: `\\examples\\withShadow\\03\\03_DCM_energy.py`
Influence onto energy resolution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pictures after monochromator,
:ref:`type 2 of global normalization<globalNorm>`. The nominal radius is 7.4
km. Watch the energy distribution when the bending radius is smaller or greater
than the nominal one.
+---------+---------+---------+---------+
| |VCMR1| | |VCMR2| | |VCMR3| | |
+---------+---------+---------+ |VCMR4| |
| |VCMR7| | |VCMR6| | |VCMR5| | |
+---------+---------+---------+---------+
.. |VCMR1| image:: _images/03VCM_R0496453_norm2.*
:scale: 35 %
.. |VCMR2| image:: _images/03VCM_R0568297_norm2.*
:scale: 35 %
.. |VCMR3| image:: _images/03VCM_R0650537_norm2.*
:scale: 35 %
.. |VCMR4| image:: _images/03VCM_R0744680_norm2.*
:scale: 35 %
:align: middle
.. |VCMR5| image:: _images/03VCM_R0852445_norm2.*
:scale: 35 %
.. |VCMR6| image:: _images/03VCM_R0975806_norm2.*
:scale: 35 %
.. |VCMR7| image:: _images/03VCM_R1117020_norm2.*
:scale: 35 %
Influence onto focusing
~~~~~~~~~~~~~~~~~~~~~~~
Pictures at the sample position,
:ref:`type 1 of global normalization<globalNorm>`
+----------+----------+----------+----------+
| |VCMRF1| | |VCMRF2| | |VCMRF3| | |
+----------+----------+----------+ |VCMRF4| |
| |VCMRF7| | |VCMRF6| | |VCMRF5| | |
+----------+----------+----------+----------+
.. |VCMRF1| image:: _images/04VCM_R0496453_norm1.*
:scale: 35 %
.. |VCMRF2| image:: _images/04VCM_R0568297_norm1.*
:scale: 35 %
.. |VCMRF3| image:: _images/04VCM_R0650537_norm1.*
:scale: 35 %
.. |VCMRF4| image:: _images/04VCM_R0744680_norm1.*
:scale: 35 %
:align: middle
.. |VCMRF5| image:: _images/04VCM_R0852445_norm1.*
:scale: 35 %
.. |VCMRF6| image:: _images/04VCM_R0975806_norm1.*
:scale: 35 %
.. |VCMRF7| image:: _images/04VCM_R1117020_norm1.*
:scale: 35 %
"""
__author__ = "<NAME>"
__date__ = "1 Mar 2012"
import sys
sys.path.append(r"c:\Alba\Ray-tracing\with Python")
import numpy as np
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.shadow as shadow
def main():
plot1 = xrtp.XYCPlot('star.03')
plot1.caxis.offset = 6000
plot2 = xrtp.XYCPlot('star.04')
plot2.caxis.offset = 6000
plot1.xaxis.limits = [-15, 15]
plot1.yaxis.limits = [-15, 15]
plot1.yaxis.factor *= -1
plot2.xaxis.limits = [-1, 1]
plot2.yaxis.limits = [-1, 1]
plot2.yaxis.factor *= -1
textPanel1 = plot1.fig.text(
0.89, 0.82, '', transform=plot1.fig.transFigure,
size=14, color='r', ha='center')
textPanel2 = plot2.fig.text(
0.89, 0.82, '', transform=plot2.fig.transFigure,
size=14, color='r', ha='center')
#==========================================================================
threads = 4
#==========================================================================
start01 = shadow.files_in_tmp_subdirs('start.01', threads)
start04 = shadow.files_in_tmp_subdirs('start.04', threads)
rmaj0 = 476597.0
shadow.modify_input(start04, ('R_MAJ', str(rmaj0)))
angle = 4.7e-3
tIncidence = 90 - angle * 180 / np.pi
shadow.modify_input(
start01, ('T_INCIDENCE', str(tIncidence)),
('T_REFLECTION', str(tIncidence)))
shadow.modify_input(
start04, ('T_INCIDENCE', str(tIncidence)),
('T_REFLECTION', str(tIncidence)))
rmirr0 = 744680.
def plot_generator():
for rmirr in np.logspace(-1., 1., 7, base=1.5) * rmirr0:
shadow.modify_input(start01, ('RMIRR', str(rmirr)))
filename = 'VCM_R%07i' % rmirr
filename03 = '03' + filename
filename04 = '04' + filename
plot1.title = filename03
plot2.title = filename04
plot1.saveName = [filename03 + '.pdf', filename03 + '.png']
plot2.saveName = [filename04 + '.pdf', filename04 + '.png']
# plot1.persistentName = filename03 + '.pickle'
# plot2.persistentName = filename04 + '.pickle'
textToSet = 'collimating\nmirror\n$R =$ %.1f km' % (rmirr * 1e-5)
textPanel1.set_text(textToSet)
textPanel2.set_text(textToSet)
yield
def after():
# import subprocess
# subprocess.call(["python", "05-VFM-bending.py"],
# cwd='/home/kklementiev/Alba/Ray-tracing/with Python/05-VFM-bending')
pass
xrtr.run_ray_tracing(
[plot1, plot2], repeats=640, updateEvery=2,
energyRange=[5998, 6002], generator=plot_generator, threads=threads,
globalNorm=True, afterScript=after, backend='shadow')
#this is necessary to use multiprocessing in Windows, otherwise the new Python
#contexts cannot be initialized:
if __name__ == '__main__':
main() | examples/withShadow/04_06/04_dE_VCM_bending.py | r"""
Bending of collimating mirror
-----------------------------
Uses :mod:`shadow` backend.
File: `\\examples\\withShadow\\03\\03_DCM_energy.py`
Influence onto energy resolution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pictures after monochromator,
:ref:`type 2 of global normalization<globalNorm>`. The nominal radius is 7.4
km. Watch the energy distribution when the bending radius is smaller or greater
than the nominal one.
+---------+---------+---------+---------+
| |VCMR1| | |VCMR2| | |VCMR3| | |
+---------+---------+---------+ |VCMR4| |
| |VCMR7| | |VCMR6| | |VCMR5| | |
+---------+---------+---------+---------+
.. |VCMR1| image:: _images/03VCM_R0496453_norm2.*
:scale: 35 %
.. |VCMR2| image:: _images/03VCM_R0568297_norm2.*
:scale: 35 %
.. |VCMR3| image:: _images/03VCM_R0650537_norm2.*
:scale: 35 %
.. |VCMR4| image:: _images/03VCM_R0744680_norm2.*
:scale: 35 %
:align: middle
.. |VCMR5| image:: _images/03VCM_R0852445_norm2.*
:scale: 35 %
.. |VCMR6| image:: _images/03VCM_R0975806_norm2.*
:scale: 35 %
.. |VCMR7| image:: _images/03VCM_R1117020_norm2.*
:scale: 35 %
Influence onto focusing
~~~~~~~~~~~~~~~~~~~~~~~
Pictures at the sample position,
:ref:`type 1 of global normalization<globalNorm>`
+----------+----------+----------+----------+
| |VCMRF1| | |VCMRF2| | |VCMRF3| | |
+----------+----------+----------+ |VCMRF4| |
| |VCMRF7| | |VCMRF6| | |VCMRF5| | |
+----------+----------+----------+----------+
.. |VCMRF1| image:: _images/04VCM_R0496453_norm1.*
:scale: 35 %
.. |VCMRF2| image:: _images/04VCM_R0568297_norm1.*
:scale: 35 %
.. |VCMRF3| image:: _images/04VCM_R0650537_norm1.*
:scale: 35 %
.. |VCMRF4| image:: _images/04VCM_R0744680_norm1.*
:scale: 35 %
:align: middle
.. |VCMRF5| image:: _images/04VCM_R0852445_norm1.*
:scale: 35 %
.. |VCMRF6| image:: _images/04VCM_R0975806_norm1.*
:scale: 35 %
.. |VCMRF7| image:: _images/04VCM_R1117020_norm1.*
:scale: 35 %
"""
__author__ = "<NAME>"
__date__ = "1 Mar 2012"
import sys
sys.path.append(r"c:\Alba\Ray-tracing\with Python")
import numpy as np
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.shadow as shadow
def main():
plot1 = xrtp.XYCPlot('star.03')
plot1.caxis.offset = 6000
plot2 = xrtp.XYCPlot('star.04')
plot2.caxis.offset = 6000
plot1.xaxis.limits = [-15, 15]
plot1.yaxis.limits = [-15, 15]
plot1.yaxis.factor *= -1
plot2.xaxis.limits = [-1, 1]
plot2.yaxis.limits = [-1, 1]
plot2.yaxis.factor *= -1
textPanel1 = plot1.fig.text(
0.89, 0.82, '', transform=plot1.fig.transFigure,
size=14, color='r', ha='center')
textPanel2 = plot2.fig.text(
0.89, 0.82, '', transform=plot2.fig.transFigure,
size=14, color='r', ha='center')
#==========================================================================
threads = 4
#==========================================================================
start01 = shadow.files_in_tmp_subdirs('start.01', threads)
start04 = shadow.files_in_tmp_subdirs('start.04', threads)
rmaj0 = 476597.0
shadow.modify_input(start04, ('R_MAJ', str(rmaj0)))
angle = 4.7e-3
tIncidence = 90 - angle * 180 / np.pi
shadow.modify_input(
start01, ('T_INCIDENCE', str(tIncidence)),
('T_REFLECTION', str(tIncidence)))
shadow.modify_input(
start04, ('T_INCIDENCE', str(tIncidence)),
('T_REFLECTION', str(tIncidence)))
rmirr0 = 744680.
def plot_generator():
for rmirr in np.logspace(-1., 1., 7, base=1.5) * rmirr0:
shadow.modify_input(start01, ('RMIRR', str(rmirr)))
filename = 'VCM_R%07i' % rmirr
filename03 = '03' + filename
filename04 = '04' + filename
plot1.title = filename03
plot2.title = filename04
plot1.saveName = [filename03 + '.pdf', filename03 + '.png']
plot2.saveName = [filename04 + '.pdf', filename04 + '.png']
# plot1.persistentName = filename03 + '.pickle'
# plot2.persistentName = filename04 + '.pickle'
textToSet = 'collimating\nmirror\n$R =$ %.1f km' % (rmirr * 1e-5)
textPanel1.set_text(textToSet)
textPanel2.set_text(textToSet)
yield
def after():
# import subprocess
# subprocess.call(["python", "05-VFM-bending.py"],
# cwd='/home/kklementiev/Alba/Ray-tracing/with Python/05-VFM-bending')
pass
xrtr.run_ray_tracing(
[plot1, plot2], repeats=640, updateEvery=2,
energyRange=[5998, 6002], generator=plot_generator, threads=threads,
globalNorm=True, afterScript=after, backend='shadow')
#this is necessary to use multiprocessing in Windows, otherwise the new Python
#contexts cannot be initialized:
if __name__ == '__main__':
main() | 0.788827 | 0.449211 |
import state.StatePart
import globals as G
import event.EventInfo
import pyglet
from pyglet.window import mouse
from . import UIPart
import Language
class UIPartLable(UIPart.UIPart):
def __init__(self, text, position, press=event.EventInfo.MousePressEventInfo(pyglet.window.mouse.LEFT),
anchor_lable="WS", anchor_window="WS", on_press=None, color=(0, 0, 0, 255), text_size=20):
"""
creates an new UIPartButton
:param text: the text of the lable
:param position: the position of the lable
:param press: the EventInfo for mouse lables and mods, no area
:param anchor_lable: the anchor on the lable
:param anchor_window: the anchor on the window
"""
super().__init__(position, 0, anchor_window=anchor_window, anchor_element=anchor_lable)
self.text = text
self.press: event.EventInfo.MousePressEventInfo = press
self.color = color
self.text_size = text_size
self.on_press = on_press
self.lable = pyglet.text.Label(text=text)
self.active = False
def bind_to_eventbus(self):
self.master[0].eventbus.subscribe("user:mouse:press", self.on_mouse_press)
self.master[0].eventbus.subscribe("render:draw:2d", self.on_draw_2d)
def get_real_position(self):
self.bboxsize = self.lable.content_width, self.lable.content_width
return super().get_real_position()
def on_mouse_press(self, x, y, button, modifiers):
mx, my = self.get_real_position()
sx, sy = self.lable.content_width, self.lable.content_width
self.press.area = ((mx, my), (mx+sx, my+sy))
if self.press.equals(x, y, button, modifiers):
if self.on_press:
self.on_press(x, y)
def on_draw_2d(self):
x, y = self.get_real_position()
wx, wy = self.lable.content_width, self.lable.content_height
size = self.lable.content_width, self.lable.content_width
self.lable.x = x + size[0] // 2 - wx // 2
self.lable.y = y + size[1] // 2 - wy // 2
self.lable.color = self.color
self.lable.font_size = self.text_size
self.lable.text = Language.decode(self.text)
self.lable.draw() | state/ui/UIPartLable.py | import state.StatePart
import globals as G
import event.EventInfo
import pyglet
from pyglet.window import mouse
from . import UIPart
import Language
class UIPartLable(UIPart.UIPart):
def __init__(self, text, position, press=event.EventInfo.MousePressEventInfo(pyglet.window.mouse.LEFT),
anchor_lable="WS", anchor_window="WS", on_press=None, color=(0, 0, 0, 255), text_size=20):
"""
creates an new UIPartButton
:param text: the text of the lable
:param position: the position of the lable
:param press: the EventInfo for mouse lables and mods, no area
:param anchor_lable: the anchor on the lable
:param anchor_window: the anchor on the window
"""
super().__init__(position, 0, anchor_window=anchor_window, anchor_element=anchor_lable)
self.text = text
self.press: event.EventInfo.MousePressEventInfo = press
self.color = color
self.text_size = text_size
self.on_press = on_press
self.lable = pyglet.text.Label(text=text)
self.active = False
def bind_to_eventbus(self):
self.master[0].eventbus.subscribe("user:mouse:press", self.on_mouse_press)
self.master[0].eventbus.subscribe("render:draw:2d", self.on_draw_2d)
def get_real_position(self):
self.bboxsize = self.lable.content_width, self.lable.content_width
return super().get_real_position()
def on_mouse_press(self, x, y, button, modifiers):
mx, my = self.get_real_position()
sx, sy = self.lable.content_width, self.lable.content_width
self.press.area = ((mx, my), (mx+sx, my+sy))
if self.press.equals(x, y, button, modifiers):
if self.on_press:
self.on_press(x, y)
def on_draw_2d(self):
x, y = self.get_real_position()
wx, wy = self.lable.content_width, self.lable.content_height
size = self.lable.content_width, self.lable.content_width
self.lable.x = x + size[0] // 2 - wx // 2
self.lable.y = y + size[1] // 2 - wy // 2
self.lable.color = self.color
self.lable.font_size = self.text_size
self.lable.text = Language.decode(self.text)
self.lable.draw() | 0.522689 | 0.314577 |
import gfapy
import unittest
class TestApiGfa1Lines(unittest.TestCase):
def test_C(self):
fields=["C","1","+","2","-","12","12M","MQ:i:1232","NM:i:3","ab:Z:abcd"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.edge.Containment, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).from_segment)
self.assertEqual(fields[2], gfapy.Line(s).from_orient)
self.assertEqual(fields[3], gfapy.Line(s).to_segment)
self.assertEqual(fields[4], gfapy.Line(s).to_orient)
self.assertEqual(12, gfapy.Line(s).pos)
self.assertEqual([gfapy.CIGAR.Operation(12,"M")], gfapy.Line(s).overlap)
self.assertEqual(1232, gfapy.Line(s).MQ)
self.assertEqual(3, gfapy.Line(s).NM)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): (str+gfapy.Line("\tH1"))
with self.assertRaises(gfapy.FormatError): gfapy.Line("C\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[4]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[5]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[6]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[7]="MQ:Z:1232"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[8]="NM:Z:1232"; gfapy.Line("\t".join(f),vlevel=1)
def test_L(self):
fields=["L","1","+","2","-","12M","RC:i:1232","NM:i:3","ab:Z:abcd",
"FC:i:2321","KC:i:1212","MQ:i:40"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.edge.Link, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).from_segment)
self.assertEqual(fields[2], gfapy.Line(s).from_orient)
self.assertEqual(fields[3], gfapy.Line(s).to_segment)
self.assertEqual(fields[4], gfapy.Line(s).to_orient)
self.assertEqual([gfapy.CIGAR.Operation(12,"M")],
gfapy.Line(s).overlap)
self.assertEqual(1232, gfapy.Line(s).RC)
self.assertEqual(3, gfapy.Line(s).NM)
self.assertEqual(2321, gfapy.Line(s).FC)
self.assertEqual(1212, gfapy.Line(s).KC)
self.assertEqual(40, gfapy.Line(s).MQ)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): (str+gfapy.Line("\tH1"))
with self.assertRaises(gfapy.FormatError): gfapy.Line("L\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[4]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[5]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[6]="RC:Z:1232"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[7]="NM:Z:1232"; gfapy.Line("\t".join(f),vlevel=1)
def test_L_coords(self):
g = gfapy.Gfa(version="gfa1")
g.append("S\t1\t*\tLN:i:100")
g.append("L\t1\t+\t2\t-\t1M2D10M1I")
self.assertEqual(["87","100$"], [str(s) for s in g.dovetails[0].from_coords])
with self.assertRaises(gfapy.ValueError): g.dovetails[0].to_coords
g.append("S\t2\t*\tLN:i:100")
self.assertEqual(["88","100$"], [str(s) for s in g.dovetails[0].to_coords])
g.append("L\t3\t-\t4\t+\t10M2P3D1M")
self.assertEqual(["0","14"], [str(s) for s in g.dovetails[1].from_coords])
self.assertEqual(["0","11"], [str(s) for s in g.dovetails[1].to_coords])
def test_L_other(self):
l = gfapy.Line("L\t1\t+\t2\t-\t*")
self.assertEqual("2", l.other("1"))
self.assertEqual("1", l.other("2"))
with self.assertRaises(gfapy.NotFoundError): l.other("0")
def test_L_circular(self):
l = gfapy.Line("L\t1\t+\t2\t-\t*")
self.assertEqual(False, l.is_circular())
l = gfapy.Line("L\t1\t+\t1\t-\t*")
self.assertEqual(True, l.is_circular())
def test_S(self):
fields=["S","1","ACGTCACANNN","RC:i:1232","LN:i:11","ab:Z:abcd",
"FC:i:2321","KC:i:1212"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.segment.GFA1, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).name)
self.assertEqual(fields[2], gfapy.Line(s).sequence)
self.assertEqual(1232, gfapy.Line(s).RC)
self.assertEqual(11, gfapy.Line(s).LN)
self.assertEqual(2321, gfapy.Line(s).FC)
self.assertEqual(1212, gfapy.Line(s).KC)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): (str+gfapy.Line("\tH1"))
with self.assertRaises(gfapy.FormatError): gfapy.Line("S\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="!@#?"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[3]="RC:Z:1232"; gfapy.Line("\t".join(f),version="gfa1")
f=["S","2","ACGTCACANNN","LN:i:3"]
with self.assertRaises(gfapy.InconsistencyError):
gfapy.Line("\t".join(f),vlevel=1, version="gfa1")
f=["S","2","ACGTCACANNN","LN:i:11"]
gfapy.Line("\t".join(f)) # nothing raised
f=["S","2","*","LN:i:3"]
gfapy.Line("\t".join(f)) # nothing raised
def test_forbidden_segment_names(self):
gfapy.Line("S\tA+B\t*") # nothing raised
gfapy.Line("S\tA-B\t*") # nothing raised
gfapy.Line("S\tA,B\t*") # nothing raised
with self.assertRaises(gfapy.FormatError):
gfapy.Line("S\tA+,B\t*",vlevel=1)
with self.assertRaises(gfapy.FormatError):
gfapy.Line("S\tA-,B\t*",vlevel=1)
def test_coverage(self):
l = gfapy.Line("S\t0\t*\tRC:i:600\tLN:i:100")
self.assertEqual(6, l.coverage())
self.assertEqual(6, l.try_get_coverage())
l = gfapy.Line("S\t0\t*\tRC:i:600")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
l = gfapy.Line("S\t0\t*\tLN:i:100")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
l = gfapy.Line("S\t0\t*\tFC:i:600\tLN:i:100")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
self.assertEqual(6, l.coverage(count_tag="FC"))
self.assertEqual(6, l.try_get_coverage(count_tag="FC"))
def test_P(self):
fields=["P","4","1+,2-,3+","9M2I3D1M,12M","ab:Z:abcd"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.group.Path, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).path_name)
self.assertEqual([gfapy.OrientedLine("1","+"),gfapy.OrientedLine("2","-"),
gfapy.OrientedLine("3","+")],
gfapy.Line(s).segment_names)
self.assertEqual([[gfapy.CIGAR.Operation(9,"M"),
gfapy.CIGAR.Operation(2,"I"),
gfapy.CIGAR.Operation(3,"D"),
gfapy.CIGAR.Operation(1,"M")],
[gfapy.CIGAR.Operation(12,"M")]],
gfapy.Line(s).overlaps)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): (str+gfapy.Line("\tH1"))
with self.assertRaises(gfapy.FormatError): gfapy.Line("P\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="1,2,3"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.InconsistencyError):
f=fields.copy(); f[2]="1+,2+"; f[3]="9M,12M,3M"
gfapy.Line("\t".join(f),vlevel=1)
f=fields.copy(); f[3]="*,*";
gfapy.Line("\t".join(f),vlevel=1)
f=fields.copy(); f[3]="9M2I3D1M,12M,12M";
gfapy.Line("\t".join(f),vlevel=3)
f=fields.copy(); f[3]="*";
gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[3]="12,12"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[3]="12M|12M"; gfapy.Line("\t".join(f),vlevel=1) | tests/test_api_gfa1_lines.py | import gfapy
import unittest
class TestApiGfa1Lines(unittest.TestCase):
def test_C(self):
fields=["C","1","+","2","-","12","12M","MQ:i:1232","NM:i:3","ab:Z:abcd"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.edge.Containment, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).from_segment)
self.assertEqual(fields[2], gfapy.Line(s).from_orient)
self.assertEqual(fields[3], gfapy.Line(s).to_segment)
self.assertEqual(fields[4], gfapy.Line(s).to_orient)
self.assertEqual(12, gfapy.Line(s).pos)
self.assertEqual([gfapy.CIGAR.Operation(12,"M")], gfapy.Line(s).overlap)
self.assertEqual(1232, gfapy.Line(s).MQ)
self.assertEqual(3, gfapy.Line(s).NM)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): (str+gfapy.Line("\tH1"))
with self.assertRaises(gfapy.FormatError): gfapy.Line("C\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[4]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[5]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[6]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[7]="MQ:Z:1232"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[8]="NM:Z:1232"; gfapy.Line("\t".join(f),vlevel=1)
def test_L(self):
fields=["L","1","+","2","-","12M","RC:i:1232","NM:i:3","ab:Z:abcd",
"FC:i:2321","KC:i:1212","MQ:i:40"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.edge.Link, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).from_segment)
self.assertEqual(fields[2], gfapy.Line(s).from_orient)
self.assertEqual(fields[3], gfapy.Line(s).to_segment)
self.assertEqual(fields[4], gfapy.Line(s).to_orient)
self.assertEqual([gfapy.CIGAR.Operation(12,"M")],
gfapy.Line(s).overlap)
self.assertEqual(1232, gfapy.Line(s).RC)
self.assertEqual(3, gfapy.Line(s).NM)
self.assertEqual(2321, gfapy.Line(s).FC)
self.assertEqual(1212, gfapy.Line(s).KC)
self.assertEqual(40, gfapy.Line(s).MQ)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): (str+gfapy.Line("\tH1"))
with self.assertRaises(gfapy.FormatError): gfapy.Line("L\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[4]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[5]="x"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[6]="RC:Z:1232"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[7]="NM:Z:1232"; gfapy.Line("\t".join(f),vlevel=1)
def test_L_coords(self):
g = gfapy.Gfa(version="gfa1")
g.append("S\t1\t*\tLN:i:100")
g.append("L\t1\t+\t2\t-\t1M2D10M1I")
self.assertEqual(["87","100$"], [str(s) for s in g.dovetails[0].from_coords])
with self.assertRaises(gfapy.ValueError): g.dovetails[0].to_coords
g.append("S\t2\t*\tLN:i:100")
self.assertEqual(["88","100$"], [str(s) for s in g.dovetails[0].to_coords])
g.append("L\t3\t-\t4\t+\t10M2P3D1M")
self.assertEqual(["0","14"], [str(s) for s in g.dovetails[1].from_coords])
self.assertEqual(["0","11"], [str(s) for s in g.dovetails[1].to_coords])
def test_L_other(self):
l = gfapy.Line("L\t1\t+\t2\t-\t*")
self.assertEqual("2", l.other("1"))
self.assertEqual("1", l.other("2"))
with self.assertRaises(gfapy.NotFoundError): l.other("0")
def test_L_circular(self):
l = gfapy.Line("L\t1\t+\t2\t-\t*")
self.assertEqual(False, l.is_circular())
l = gfapy.Line("L\t1\t+\t1\t-\t*")
self.assertEqual(True, l.is_circular())
def test_S(self):
fields=["S","1","ACGTCACANNN","RC:i:1232","LN:i:11","ab:Z:abcd",
"FC:i:2321","KC:i:1212"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.segment.GFA1, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).name)
self.assertEqual(fields[2], gfapy.Line(s).sequence)
self.assertEqual(1232, gfapy.Line(s).RC)
self.assertEqual(11, gfapy.Line(s).LN)
self.assertEqual(2321, gfapy.Line(s).FC)
self.assertEqual(1212, gfapy.Line(s).KC)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): (str+gfapy.Line("\tH1"))
with self.assertRaises(gfapy.FormatError): gfapy.Line("S\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="!@#?"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.TypeError):
f=fields.copy(); f[3]="RC:Z:1232"; gfapy.Line("\t".join(f),version="gfa1")
f=["S","2","ACGTCACANNN","LN:i:3"]
with self.assertRaises(gfapy.InconsistencyError):
gfapy.Line("\t".join(f),vlevel=1, version="gfa1")
f=["S","2","ACGTCACANNN","LN:i:11"]
gfapy.Line("\t".join(f)) # nothing raised
f=["S","2","*","LN:i:3"]
gfapy.Line("\t".join(f)) # nothing raised
def test_forbidden_segment_names(self):
gfapy.Line("S\tA+B\t*") # nothing raised
gfapy.Line("S\tA-B\t*") # nothing raised
gfapy.Line("S\tA,B\t*") # nothing raised
with self.assertRaises(gfapy.FormatError):
gfapy.Line("S\tA+,B\t*",vlevel=1)
with self.assertRaises(gfapy.FormatError):
gfapy.Line("S\tA-,B\t*",vlevel=1)
def test_coverage(self):
l = gfapy.Line("S\t0\t*\tRC:i:600\tLN:i:100")
self.assertEqual(6, l.coverage())
self.assertEqual(6, l.try_get_coverage())
l = gfapy.Line("S\t0\t*\tRC:i:600")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
l = gfapy.Line("S\t0\t*\tLN:i:100")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
l = gfapy.Line("S\t0\t*\tFC:i:600\tLN:i:100")
self.assertEqual(None, l.coverage())
with self.assertRaises(gfapy.NotFoundError): l.try_get_coverage()
self.assertEqual(6, l.coverage(count_tag="FC"))
self.assertEqual(6, l.try_get_coverage(count_tag="FC"))
def test_P(self):
fields=["P","4","1+,2-,3+","9M2I3D1M,12M","ab:Z:abcd"]
s="\t".join(fields)
gfapy.Line(s) # nothing raised
self.assertEqual(gfapy.line.group.Path, gfapy.Line(s).__class__)
self.assertEqual(fields[0], gfapy.Line(s).record_type)
self.assertEqual(fields[1], gfapy.Line(s).path_name)
self.assertEqual([gfapy.OrientedLine("1","+"),gfapy.OrientedLine("2","-"),
gfapy.OrientedLine("3","+")],
gfapy.Line(s).segment_names)
self.assertEqual([[gfapy.CIGAR.Operation(9,"M"),
gfapy.CIGAR.Operation(2,"I"),
gfapy.CIGAR.Operation(3,"D"),
gfapy.CIGAR.Operation(1,"M")],
[gfapy.CIGAR.Operation(12,"M")]],
gfapy.Line(s).overlaps)
self.assertEqual("abcd", gfapy.Line(s).ab)
with self.assertRaises(gfapy.FormatError): (str+gfapy.Line("\tH1"))
with self.assertRaises(gfapy.FormatError): gfapy.Line("P\tH")
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[2]="1,2,3"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.InconsistencyError):
f=fields.copy(); f[2]="1+,2+"; f[3]="9M,12M,3M"
gfapy.Line("\t".join(f),vlevel=1)
f=fields.copy(); f[3]="*,*";
gfapy.Line("\t".join(f),vlevel=1)
f=fields.copy(); f[3]="9M2I3D1M,12M,12M";
gfapy.Line("\t".join(f),vlevel=3)
f=fields.copy(); f[3]="*";
gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[3]="12,12"; gfapy.Line("\t".join(f),vlevel=1)
with self.assertRaises(gfapy.FormatError):
f=fields.copy(); f[3]="12M|12M"; gfapy.Line("\t".join(f),vlevel=1) | 0.406626 | 0.605857 |
from docfish.apps.main.models import (
ImageDescription,
TextDescription
)
from docfish.apps.main.utils import *
from docfish.apps.main.permission import has_collection_annotate_permission
from docfish.apps.main.navigation import get_next_to_describe
from docfish.apps.users.utils import (
get_team,
get_team_descriptions
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.http.response import (
HttpResponseRedirect,
HttpResponseForbidden,
Http404
)
from django.shortcuts import (
get_object_or_404,
render_to_response,
render,
redirect
)
@login_required
def collection_describe_image(request,cid):
'''collection_describe_image will return a new image to describe
'''
collection = get_collection(cid)
if collection.private == True:
if not has_collection_annotate_permission(request,collection):
messages.info(request, '''This collection is private. You must be a contributor
or member of the owner's institution to annotate.''')
return redirect("collections")
if collection.has_images():
next_image = get_next_to_describe(user=request.user,
collection=collection)
# Next image will never be none for a team, keeps cycling
if next_image == None:
messages.info(request,"You have finished describing images in this collection. Great work!")
return HttpResponseRedirect(collection.get_absolute_url())
description = get_description(user=request.user,
instance=next_image)
# Pass to view if we need to save a base for the image
context = {"entity":next_image.entity,
"image": next_image,
"collection":collection,
"description": description,
"nosidebar":"pancakes"}
template_type = sniff_template_extension(next_image.get_path())
return render(request, "annotate/images_description_%s.html" %(template_type), context)
messages.info(request,"This collection does not have any images to describe.")
return HttpResponseRedirect(collection.get_absolute_url())
@login_required
def video_describe_web(request):
'''clear all annotations for a specific image'''
tid=1
context = {'collaborate':'yes'}
team = get_team(tid,return_none=True)
return render(request, "collaborate/video_describe_web.html", context)
@login_required
def describe_image(request,cid,uid=None,tid=None):
'''describe_image will return a static view of an image to describe.
:param uid: the unique id of the image. This is always the image id that is desired to be
seen. In the case of a post, the last image's uid is obtained from the post data.
If this is a team description (tid is provided) the view returns uid. If not, the user
is redirected to the collection_describe_image that will randomly select the next.
'''
collaborate = True
collection = get_collection(cid)
team = get_team(tid,return_none=True)
if collection.private == True:
if not has_collection_annotate_permission(request,collection,team):
messages.info(request, '''This collection is private. You must be a contributor
or member of the owner's institution to annotate.''')
return redirect("collections")
# If it's a post, save the description
if request.method == "POST":
# Retrieve base64 encoded data
description_text = request.POST.get('description',None)
image_id = request.POST.get('image_id',None)
if description_text not in [None,''] and image_id not in [None,'']:
if team is not None:
description,created = ImageDescription.objects.get_or_create(team=team,
image__id=image_id,
description=description_text)
else:
description,created = ImageDescription.objects.get_or_create(creator=request.user,
image__id=image_id,
description=description_text)
description.save()
# Team description has (somewhat) controlled movement through images
if team is not None:
if collection.has_images():
if uid is None:
collaborate = False
image,next_image = get_next_to_describe(user=request.user,
collection=collection,
team=team,
N=2)
else:
image = get_image(uid)
next_image = get_next_to_describe(user=request.user,
collection=collection,
team=team,
skip=image.id)
description = get_description(user=request.user,
instance=image,
team=team)
# Pass to view if we need to save a base for the image
context = {"entity":image.entity,
"image": image,
"next_image": next_image,
"collection": collection,
"description": description,
"nosidebar":"pancakes",
"team":team}
if collaborate:
context["collaborate"] = "yes"
template_type = sniff_template_extension(image.get_path())
return render(request, "collaborate/images_description_%s.html" %(template_type), context)
messages.info(request,"This collection does not have any images to describe.")
return HttpResponseRedirect(collection.get_absolute_url())
# Regular markup (no team)
return redirect("collection_describe_image", cid=collection.id)
# Text
@login_required
def collection_describe_text(request,cid):
collection = get_collection(cid)
if collection.private == True:
if not has_collection_annotate_permission(request,collection,team):
messages.info(request, '''This collection is private. You must be a contributor
or member of the owner's institution to annotate.''')
return redirect("collections")
if collection.has_text():
next_text = get_next_to_describe(user=request.user,
collection=collection,
get_images=False)
if next_text == None:
messages.info(request,"You have finished describing the text in this collection. Thanks!")
return redirect('collection_details',cid=cid)
description = get_description(user=request.user,
instance=next_text)
# Pass to view if we need to save a base for the image
context = {"entity":next_text.entity,
"text": next_text,
"collection":collection,
"description": description,
"nosidebar":"pancakes"}
return render(request, "annotate/text_description.html", context)
messages.info(request,"This collection does not have any text to describe.")
return HttpResponseRedirect(collection.get_absolute_url())
@login_required
def describe_text(request,cid,uid=None,tid=None):
'''describe_text will return a static view of text to describe.
'''
collaborate = True
team = get_team(tid,return_none=True)
collection = get_collection(cid)
if collection.private == True:
if not has_collection_annotate_permission(request,collection):
messages.info(request, '''This collection is private. You must be a contributor
or member of the owner's institution to annotate.''')
return redirect("collections")
if request.method == "POST":
# Retrieve base64 encoded data
description_text = request.POST.get('description',None)
text_id = request.POST.get('text_id',None)
if description_text not in [None,''] and text_id not in [None,'']:
if team:
description,created = TextDescription.objects.get_or_create(team=team,
text__id=text_id,
description=description_text)
else:
description,created = TextDescription.objects.get_or_create(creator=request.user,
text__id=text_id,
description=description_text)
description.save()
# Team description has (somewhat) controlled movement through texts
if team is not None:
if collection.has_text():
if uid == None:
collaborate = False
text, next_text = get_next_to_describe(user=request.user,
collection=collection,
team=team,
N=2,
get_images=False)
else:
text = get_text(uid)
next_text = get_next_to_describe(user=request.user,
collection=collection,
team=team,
get_images=False,
skip=text.id)
description = get_description(user=request.user,
instance=text,
team=team)
# Pass to view if we need to save a base for the image
context = {"entity":text.entity,
"text": text,
"next_text": next_text,
"collection": collection,
"description": description,
"nosidebar":"pancakes",
"team":team}
if collaborate:
context["collaborate"] = "yes"
return render(request, "collaborate/text_description.html", context)
messages.info(request,"This collection does not have any texts to describe.")
return HttpResponseRedirect(collection.get_absolute_url())
return redirect("collection_describe_text", cid=collection.id) | docfish/apps/main/views/describe.py | from docfish.apps.main.models import (
ImageDescription,
TextDescription
)
from docfish.apps.main.utils import *
from docfish.apps.main.permission import has_collection_annotate_permission
from docfish.apps.main.navigation import get_next_to_describe
from docfish.apps.users.utils import (
get_team,
get_team_descriptions
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.http.response import (
HttpResponseRedirect,
HttpResponseForbidden,
Http404
)
from django.shortcuts import (
get_object_or_404,
render_to_response,
render,
redirect
)
@login_required
def collection_describe_image(request,cid):
'''collection_describe_image will return a new image to describe
'''
collection = get_collection(cid)
if collection.private == True:
if not has_collection_annotate_permission(request,collection):
messages.info(request, '''This collection is private. You must be a contributor
or member of the owner's institution to annotate.''')
return redirect("collections")
if collection.has_images():
next_image = get_next_to_describe(user=request.user,
collection=collection)
# Next image will never be none for a team, keeps cycling
if next_image == None:
messages.info(request,"You have finished describing images in this collection. Great work!")
return HttpResponseRedirect(collection.get_absolute_url())
description = get_description(user=request.user,
instance=next_image)
# Pass to view if we need to save a base for the image
context = {"entity":next_image.entity,
"image": next_image,
"collection":collection,
"description": description,
"nosidebar":"pancakes"}
template_type = sniff_template_extension(next_image.get_path())
return render(request, "annotate/images_description_%s.html" %(template_type), context)
messages.info(request,"This collection does not have any images to describe.")
return HttpResponseRedirect(collection.get_absolute_url())
@login_required
def video_describe_web(request):
'''clear all annotations for a specific image'''
tid=1
context = {'collaborate':'yes'}
team = get_team(tid,return_none=True)
return render(request, "collaborate/video_describe_web.html", context)
@login_required
def describe_image(request,cid,uid=None,tid=None):
'''describe_image will return a static view of an image to describe.
:param uid: the unique id of the image. This is always the image id that is desired to be
seen. In the case of a post, the last image's uid is obtained from the post data.
If this is a team description (tid is provided) the view returns uid. If not, the user
is redirected to the collection_describe_image that will randomly select the next.
'''
collaborate = True
collection = get_collection(cid)
team = get_team(tid,return_none=True)
if collection.private == True:
if not has_collection_annotate_permission(request,collection,team):
messages.info(request, '''This collection is private. You must be a contributor
or member of the owner's institution to annotate.''')
return redirect("collections")
# If it's a post, save the description
if request.method == "POST":
# Retrieve base64 encoded data
description_text = request.POST.get('description',None)
image_id = request.POST.get('image_id',None)
if description_text not in [None,''] and image_id not in [None,'']:
if team is not None:
description,created = ImageDescription.objects.get_or_create(team=team,
image__id=image_id,
description=description_text)
else:
description,created = ImageDescription.objects.get_or_create(creator=request.user,
image__id=image_id,
description=description_text)
description.save()
# Team description has (somewhat) controlled movement through images
if team is not None:
if collection.has_images():
if uid is None:
collaborate = False
image,next_image = get_next_to_describe(user=request.user,
collection=collection,
team=team,
N=2)
else:
image = get_image(uid)
next_image = get_next_to_describe(user=request.user,
collection=collection,
team=team,
skip=image.id)
description = get_description(user=request.user,
instance=image,
team=team)
# Pass to view if we need to save a base for the image
context = {"entity":image.entity,
"image": image,
"next_image": next_image,
"collection": collection,
"description": description,
"nosidebar":"pancakes",
"team":team}
if collaborate:
context["collaborate"] = "yes"
template_type = sniff_template_extension(image.get_path())
return render(request, "collaborate/images_description_%s.html" %(template_type), context)
messages.info(request,"This collection does not have any images to describe.")
return HttpResponseRedirect(collection.get_absolute_url())
# Regular markup (no team)
return redirect("collection_describe_image", cid=collection.id)
# Text
@login_required
def collection_describe_text(request,cid):
collection = get_collection(cid)
if collection.private == True:
if not has_collection_annotate_permission(request,collection,team):
messages.info(request, '''This collection is private. You must be a contributor
or member of the owner's institution to annotate.''')
return redirect("collections")
if collection.has_text():
next_text = get_next_to_describe(user=request.user,
collection=collection,
get_images=False)
if next_text == None:
messages.info(request,"You have finished describing the text in this collection. Thanks!")
return redirect('collection_details',cid=cid)
description = get_description(user=request.user,
instance=next_text)
# Pass to view if we need to save a base for the image
context = {"entity":next_text.entity,
"text": next_text,
"collection":collection,
"description": description,
"nosidebar":"pancakes"}
return render(request, "annotate/text_description.html", context)
messages.info(request,"This collection does not have any text to describe.")
return HttpResponseRedirect(collection.get_absolute_url())
@login_required
def describe_text(request,cid,uid=None,tid=None):
'''describe_text will return a static view of text to describe.
'''
collaborate = True
team = get_team(tid,return_none=True)
collection = get_collection(cid)
if collection.private == True:
if not has_collection_annotate_permission(request,collection):
messages.info(request, '''This collection is private. You must be a contributor
or member of the owner's institution to annotate.''')
return redirect("collections")
if request.method == "POST":
# Retrieve base64 encoded data
description_text = request.POST.get('description',None)
text_id = request.POST.get('text_id',None)
if description_text not in [None,''] and text_id not in [None,'']:
if team:
description,created = TextDescription.objects.get_or_create(team=team,
text__id=text_id,
description=description_text)
else:
description,created = TextDescription.objects.get_or_create(creator=request.user,
text__id=text_id,
description=description_text)
description.save()
# Team description has (somewhat) controlled movement through texts
if team is not None:
if collection.has_text():
if uid == None:
collaborate = False
text, next_text = get_next_to_describe(user=request.user,
collection=collection,
team=team,
N=2,
get_images=False)
else:
text = get_text(uid)
next_text = get_next_to_describe(user=request.user,
collection=collection,
team=team,
get_images=False,
skip=text.id)
description = get_description(user=request.user,
instance=text,
team=team)
# Pass to view if we need to save a base for the image
context = {"entity":text.entity,
"text": text,
"next_text": next_text,
"collection": collection,
"description": description,
"nosidebar":"pancakes",
"team":team}
if collaborate:
context["collaborate"] = "yes"
return render(request, "collaborate/text_description.html", context)
messages.info(request,"This collection does not have any texts to describe.")
return HttpResponseRedirect(collection.get_absolute_url())
return redirect("collection_describe_text", cid=collection.id) | 0.400398 | 0.168823 |
import sys, os, shutil, json
import urllib.request, traceback
from io import BytesIO
from github import Github
from zipfile import ZipFile
from types import ModuleType
from . import predigame
from .utils import load_module
from predigame.constants import *
from pkg_resources import get_distribution
__version__ = get_distribution('predigame').version
def bootstrap():
if len(sys.argv) == 1:
print('Predigame Instructional Platform\n')
print('Running a Game:')
print(' pred some_file.py\n')
print('Create a New Game:')
print(' pred new some_game\n')
print('List Available Game Downloads:')
print(' pred list\n')
print('Download a Game:')
print(' pred pull some_game')
print(' pred fetch some_game\n')
sys.exit()
if sys.argv[1] == 'new':
new_game()
elif sys.argv[1] == 'list':
get_games()
elif sys.argv[1] == 'pull':
pull_game()
elif sys.argv[1] == 'fetch':
fetch_game()
else:
main()
def err():
print('Error: Invalid Python file provided')
sys.exit()
def main():
try:
run_mod = sys.argv[1:][0]
except:
err()
path = os.path.join(os.getcwd(), run_mod)
from . import api
code, mod = load_module(path, api)
dummy_mod = ModuleType('dummy')
try:
exec(code, dummy_mod.__dict__)
except:
pass
finally:
WIDTH = getattr(dummy_mod, 'WIDTH', 16)
HEIGHT = getattr(dummy_mod, 'HEIGHT', 16)
TITLE = getattr(dummy_mod, 'TITLE', 'Predigame')
SIZE = getattr(dummy_mod, 'SIZE', 50)
BACKGROUND = getattr(dummy_mod, 'BACKGROUND', (220, 220, 220))
FULLSCREEN = getattr(dummy_mod, 'FULLSCREEN', False)
COLLISIONS = getattr(dummy_mod, 'PIXEL_COLLISIONS', True)
predigame.init(path, WIDTH * SIZE, HEIGHT * SIZE, TITLE, grid = SIZE, bg = BACKGROUND, fullscreen = FULLSCREEN, collisions = COLLISIONS)
exec(code, mod.__dict__)
while True:
predigame.main_loop()
def pull_game():
if len(sys.argv) != 3:
print('Usage: pred pull <game>')
sys.exit()
game = sys.argv[2]
if os.path.exists(game):
prompt = input('{} already exists. Overwrite? (Y or N): '.format(game))
if prompt.upper() == 'Y':
shutil.rmtree(game)
else:
sys.exit()
try:
g = Github()
repo = g.get_organization('predigame').get_repo(game)
tags = repo.get_tags()
tag_url = None
for tag in tags:
tag_name = tag.name
tag_url = tag.zipball_url
if tag_name == __version__:
tag_url
if tag_url is not None:
print('Fetching game {} from {}'.format(game, tag_url))
with urllib.request.urlopen(tag_url) as response:
data = response.read()
with ZipFile(BytesIO(data)) as dnld:
prefix = dnld.namelist()[0].split('/')[0]
dnld.extractall()
os.rename(prefix, game)
print('Download Complete!')
except:
print('Unable to pull game {}. Does it exist?'.format(game))
traceback.print_exc()
def fetch_game():
"""
Similar to pull game but doesn't use the Github API and avoids stumbling into SSL verification issues.
"""
if len(sys.argv) != 3:
print('Usage: pred fetch <game>')
sys.exit()
game = sys.argv[2]
if os.path.exists(game):
prompt = input('{} already exists. Overwrite? (Y or N): '.format(game))
if prompt.upper() == 'Y':
shutil.rmtree(game)
else:
sys.exit()
url = 'http://predigame.io/games/' + game + '.zip'
try:
with urllib.request.urlopen(url) as response:
data = response.read()
with ZipFile(BytesIO(data)) as dnld:
dnld.extractall()
print('Download Complete!')
except:
print('Unable to fetch game {} from {}. Does it exist?'.format(game, url))
traceback.print_exc()
def get_games():
g = Github()
repos = g.get_organization('predigame').get_repos()
for repo in repos:
name = repo.full_name.replace('predigame/', '')
if name == 'predigame':
continue
desc = repo.description
tags = repo.get_tags()
version_match = False
for tag in tags:
tag_name = tag.name
tag_url = tag.zipball_url
if tag_name == __version__:
version_match = True
#print(' {} \t {}'.format(tag_name, tag_url))
if version_match:
print('{0:10} \t {1}'.format(name, desc))
def new_game():
if len(sys.argv) != 3:
print('Usage: pred new <game>')
sys.exit()
game = sys.argv[2]
if os.path.exists(game):
prompt = input('{} already exists. Overwrite? (Y or N): '.format(game))
if prompt.upper() == 'Y':
shutil.rmtree(game)
else:
sys.exit()
os.mkdir(game)
os.mkdir(game + '/backgrounds')
os.mkdir(game + '/images')
os.mkdir(game + '/sounds')
os.mkdir(game + '/actors')
file = open(game + '/game.py', 'w')
file.write('WIDTH = 30\n')
file.write('HEIGHT = 20\n')
file.write('TITLE = \'Simple Game\'\n')
file.close() | predigame/__init__.py | import sys, os, shutil, json
import urllib.request, traceback
from io import BytesIO
from github import Github
from zipfile import ZipFile
from types import ModuleType
from . import predigame
from .utils import load_module
from predigame.constants import *
from pkg_resources import get_distribution
__version__ = get_distribution('predigame').version
def bootstrap():
if len(sys.argv) == 1:
print('Predigame Instructional Platform\n')
print('Running a Game:')
print(' pred some_file.py\n')
print('Create a New Game:')
print(' pred new some_game\n')
print('List Available Game Downloads:')
print(' pred list\n')
print('Download a Game:')
print(' pred pull some_game')
print(' pred fetch some_game\n')
sys.exit()
if sys.argv[1] == 'new':
new_game()
elif sys.argv[1] == 'list':
get_games()
elif sys.argv[1] == 'pull':
pull_game()
elif sys.argv[1] == 'fetch':
fetch_game()
else:
main()
def err():
print('Error: Invalid Python file provided')
sys.exit()
def main():
try:
run_mod = sys.argv[1:][0]
except:
err()
path = os.path.join(os.getcwd(), run_mod)
from . import api
code, mod = load_module(path, api)
dummy_mod = ModuleType('dummy')
try:
exec(code, dummy_mod.__dict__)
except:
pass
finally:
WIDTH = getattr(dummy_mod, 'WIDTH', 16)
HEIGHT = getattr(dummy_mod, 'HEIGHT', 16)
TITLE = getattr(dummy_mod, 'TITLE', 'Predigame')
SIZE = getattr(dummy_mod, 'SIZE', 50)
BACKGROUND = getattr(dummy_mod, 'BACKGROUND', (220, 220, 220))
FULLSCREEN = getattr(dummy_mod, 'FULLSCREEN', False)
COLLISIONS = getattr(dummy_mod, 'PIXEL_COLLISIONS', True)
predigame.init(path, WIDTH * SIZE, HEIGHT * SIZE, TITLE, grid = SIZE, bg = BACKGROUND, fullscreen = FULLSCREEN, collisions = COLLISIONS)
exec(code, mod.__dict__)
while True:
predigame.main_loop()
def pull_game():
if len(sys.argv) != 3:
print('Usage: pred pull <game>')
sys.exit()
game = sys.argv[2]
if os.path.exists(game):
prompt = input('{} already exists. Overwrite? (Y or N): '.format(game))
if prompt.upper() == 'Y':
shutil.rmtree(game)
else:
sys.exit()
try:
g = Github()
repo = g.get_organization('predigame').get_repo(game)
tags = repo.get_tags()
tag_url = None
for tag in tags:
tag_name = tag.name
tag_url = tag.zipball_url
if tag_name == __version__:
tag_url
if tag_url is not None:
print('Fetching game {} from {}'.format(game, tag_url))
with urllib.request.urlopen(tag_url) as response:
data = response.read()
with ZipFile(BytesIO(data)) as dnld:
prefix = dnld.namelist()[0].split('/')[0]
dnld.extractall()
os.rename(prefix, game)
print('Download Complete!')
except:
print('Unable to pull game {}. Does it exist?'.format(game))
traceback.print_exc()
def fetch_game():
"""
Similar to pull game but doesn't use the Github API and avoids stumbling into SSL verification issues.
"""
if len(sys.argv) != 3:
print('Usage: pred fetch <game>')
sys.exit()
game = sys.argv[2]
if os.path.exists(game):
prompt = input('{} already exists. Overwrite? (Y or N): '.format(game))
if prompt.upper() == 'Y':
shutil.rmtree(game)
else:
sys.exit()
url = 'http://predigame.io/games/' + game + '.zip'
try:
with urllib.request.urlopen(url) as response:
data = response.read()
with ZipFile(BytesIO(data)) as dnld:
dnld.extractall()
print('Download Complete!')
except:
print('Unable to fetch game {} from {}. Does it exist?'.format(game, url))
traceback.print_exc()
def get_games():
g = Github()
repos = g.get_organization('predigame').get_repos()
for repo in repos:
name = repo.full_name.replace('predigame/', '')
if name == 'predigame':
continue
desc = repo.description
tags = repo.get_tags()
version_match = False
for tag in tags:
tag_name = tag.name
tag_url = tag.zipball_url
if tag_name == __version__:
version_match = True
#print(' {} \t {}'.format(tag_name, tag_url))
if version_match:
print('{0:10} \t {1}'.format(name, desc))
def new_game():
if len(sys.argv) != 3:
print('Usage: pred new <game>')
sys.exit()
game = sys.argv[2]
if os.path.exists(game):
prompt = input('{} already exists. Overwrite? (Y or N): '.format(game))
if prompt.upper() == 'Y':
shutil.rmtree(game)
else:
sys.exit()
os.mkdir(game)
os.mkdir(game + '/backgrounds')
os.mkdir(game + '/images')
os.mkdir(game + '/sounds')
os.mkdir(game + '/actors')
file = open(game + '/game.py', 'w')
file.write('WIDTH = 30\n')
file.write('HEIGHT = 20\n')
file.write('TITLE = \'Simple Game\'\n')
file.close() | 0.075155 | 0.065306 |
from datetime import timedelta
from django.db import models
from django.utils.translation import gettext_lazy
from mahjong_portal.models import BaseModel
from player.models import Player
from utils.tenhou.yakuman_list import YAKUMAN_CONST
class TenhouNickname(BaseModel):
RANKS = [
[0, u'新人'],
[1, u'9級'],
[2, u'8級'],
[3, u'7級'],
[4, u'6級'],
[5, u'5級'],
[6, u'4級'],
[7, u'3級'],
[8, u'2級'],
[9, u'1級'],
[10, u'初段'],
[11, u'二段'],
[12, u'三段'],
[13, u'四段'],
[14, u'五段'],
[15, u'六段'],
[16, u'七段'],
[17, u'八段'],
[18, u'九段'],
[19, u'十段'],
[20, u'天鳳位']
]
player = models.ForeignKey(Player, related_name='tenhou')
tenhou_username = models.CharField(max_length=8)
username_created_at = models.DateField()
rank = models.PositiveSmallIntegerField(choices=RANKS)
average_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
played_games = models.PositiveIntegerField(default=0)
month_average_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
month_played_games = models.PositiveIntegerField(default=0)
four_games_rate = models.DecimalField(decimal_places=2, max_digits=10, default=0)
pt = models.PositiveSmallIntegerField(default=0)
end_pt = models.PositiveSmallIntegerField(default=0)
last_played_date = models.DateField(null=True, blank=True)
is_main = models.BooleanField(default=True)
def __unicode__(self):
return self.tenhou_username
class Meta:
ordering = ['-rank']
db_table = 'player_tenhounickname'
def all_time_stat(self):
return self.statistics.filter(stat_type=TenhouStatistics.ALL_TIME)
def current_month_stat(self):
return self.statistics.filter(stat_type=TenhouStatistics.CURRENT_MONTH)
def latest_yakumans(self):
return self.yakumans.order_by('-date')
def prepare_latest_places(self):
return reversed(self.game_logs.order_by('-game_date')[:20])
class TenhouStatistics(models.Model):
KYU_LOBBY = 0
DAN_LOBBY = 1
UPPERDAN_LOBBY = 2
PHOENIX_LOBBY = 3
LOBBIES = [
[KYU_LOBBY, gettext_lazy('Kyu lobby')],
[DAN_LOBBY, gettext_lazy('Dan lobby')],
[UPPERDAN_LOBBY, gettext_lazy('Upperdan lobby')],
[PHOENIX_LOBBY, gettext_lazy('Phoenix lobby')],
]
ALL_TIME = 0
CURRENT_MONTH = 1
TYPES = [
[ALL_TIME, 'All time'],
[CURRENT_MONTH, 'Current month'],
]
tenhou_object = models.ForeignKey(TenhouNickname, related_name='statistics')
lobby = models.PositiveSmallIntegerField(choices=LOBBIES)
stat_type = models.PositiveSmallIntegerField(choices=TYPES, default=ALL_TIME)
played_games = models.PositiveIntegerField(default=0)
average_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
first_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
second_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
third_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
fourth_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
class Meta:
ordering = ['lobby']
db_table = 'portal_tenhou_statistics'
class CollectedYakuman(models.Model):
tenhou_object = models.ForeignKey(TenhouNickname, related_name='yakumans')
date = models.DateTimeField()
log_id = models.CharField(max_length=44)
yakuman_list = models.CharField(max_length=60)
class Meta:
db_table = 'portal_collected_yakuman'
def get_log_link(self):
return 'http://tenhou.net/0/?log={}'.format(self.log_id)
def yakuman_names(self):
if not self.yakuman_list:
return YAKUMAN_CONST.get('kazoe')
yakuman_list = [int(x) for x in self.yakuman_list.split(',')]
return ', '.join([str(YAKUMAN_CONST.get(x, x)) for x in yakuman_list])
class TenhouGameLog(models.Model):
tenhou_object = models.ForeignKey(TenhouNickname, related_name='game_logs')
lobby = models.PositiveSmallIntegerField(choices=TenhouStatistics.LOBBIES)
place = models.PositiveSmallIntegerField()
game_length = models.PositiveSmallIntegerField()
delta = models.SmallIntegerField(default=0)
rank = models.PositiveSmallIntegerField(choices=TenhouNickname.RANKS, null=True, blank=True, default=None)
next_rank = models.PositiveSmallIntegerField(choices=TenhouNickname.RANKS, null=True, blank=True, default=None)
game_date = models.DateTimeField()
game_rules = models.CharField(max_length=20)
class Meta:
unique_together = ['tenhou_object', 'game_date']
ordering = ['game_date']
db_table = 'portal_tenhou_game_log'
@property
def game_type(self):
return self.game_rules[2]
@property
def game_end_date(self):
return self.game_date + timedelta(minutes=self.game_length)
@property
def badge_class(self):
if self.rank < self.next_rank:
return 'success'
else:
return 'danger' | server/player/tenhou/models.py | from datetime import timedelta
from django.db import models
from django.utils.translation import gettext_lazy
from mahjong_portal.models import BaseModel
from player.models import Player
from utils.tenhou.yakuman_list import YAKUMAN_CONST
class TenhouNickname(BaseModel):
RANKS = [
[0, u'新人'],
[1, u'9級'],
[2, u'8級'],
[3, u'7級'],
[4, u'6級'],
[5, u'5級'],
[6, u'4級'],
[7, u'3級'],
[8, u'2級'],
[9, u'1級'],
[10, u'初段'],
[11, u'二段'],
[12, u'三段'],
[13, u'四段'],
[14, u'五段'],
[15, u'六段'],
[16, u'七段'],
[17, u'八段'],
[18, u'九段'],
[19, u'十段'],
[20, u'天鳳位']
]
player = models.ForeignKey(Player, related_name='tenhou')
tenhou_username = models.CharField(max_length=8)
username_created_at = models.DateField()
rank = models.PositiveSmallIntegerField(choices=RANKS)
average_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
played_games = models.PositiveIntegerField(default=0)
month_average_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
month_played_games = models.PositiveIntegerField(default=0)
four_games_rate = models.DecimalField(decimal_places=2, max_digits=10, default=0)
pt = models.PositiveSmallIntegerField(default=0)
end_pt = models.PositiveSmallIntegerField(default=0)
last_played_date = models.DateField(null=True, blank=True)
is_main = models.BooleanField(default=True)
def __unicode__(self):
return self.tenhou_username
class Meta:
ordering = ['-rank']
db_table = 'player_tenhounickname'
def all_time_stat(self):
return self.statistics.filter(stat_type=TenhouStatistics.ALL_TIME)
def current_month_stat(self):
return self.statistics.filter(stat_type=TenhouStatistics.CURRENT_MONTH)
def latest_yakumans(self):
return self.yakumans.order_by('-date')
def prepare_latest_places(self):
return reversed(self.game_logs.order_by('-game_date')[:20])
class TenhouStatistics(models.Model):
KYU_LOBBY = 0
DAN_LOBBY = 1
UPPERDAN_LOBBY = 2
PHOENIX_LOBBY = 3
LOBBIES = [
[KYU_LOBBY, gettext_lazy('Kyu lobby')],
[DAN_LOBBY, gettext_lazy('Dan lobby')],
[UPPERDAN_LOBBY, gettext_lazy('Upperdan lobby')],
[PHOENIX_LOBBY, gettext_lazy('Phoenix lobby')],
]
ALL_TIME = 0
CURRENT_MONTH = 1
TYPES = [
[ALL_TIME, 'All time'],
[CURRENT_MONTH, 'Current month'],
]
tenhou_object = models.ForeignKey(TenhouNickname, related_name='statistics')
lobby = models.PositiveSmallIntegerField(choices=LOBBIES)
stat_type = models.PositiveSmallIntegerField(choices=TYPES, default=ALL_TIME)
played_games = models.PositiveIntegerField(default=0)
average_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
first_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
second_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
third_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
fourth_place = models.DecimalField(decimal_places=2, max_digits=10, default=0)
class Meta:
ordering = ['lobby']
db_table = 'portal_tenhou_statistics'
class CollectedYakuman(models.Model):
tenhou_object = models.ForeignKey(TenhouNickname, related_name='yakumans')
date = models.DateTimeField()
log_id = models.CharField(max_length=44)
yakuman_list = models.CharField(max_length=60)
class Meta:
db_table = 'portal_collected_yakuman'
def get_log_link(self):
return 'http://tenhou.net/0/?log={}'.format(self.log_id)
def yakuman_names(self):
if not self.yakuman_list:
return YAKUMAN_CONST.get('kazoe')
yakuman_list = [int(x) for x in self.yakuman_list.split(',')]
return ', '.join([str(YAKUMAN_CONST.get(x, x)) for x in yakuman_list])
class TenhouGameLog(models.Model):
tenhou_object = models.ForeignKey(TenhouNickname, related_name='game_logs')
lobby = models.PositiveSmallIntegerField(choices=TenhouStatistics.LOBBIES)
place = models.PositiveSmallIntegerField()
game_length = models.PositiveSmallIntegerField()
delta = models.SmallIntegerField(default=0)
rank = models.PositiveSmallIntegerField(choices=TenhouNickname.RANKS, null=True, blank=True, default=None)
next_rank = models.PositiveSmallIntegerField(choices=TenhouNickname.RANKS, null=True, blank=True, default=None)
game_date = models.DateTimeField()
game_rules = models.CharField(max_length=20)
class Meta:
unique_together = ['tenhou_object', 'game_date']
ordering = ['game_date']
db_table = 'portal_tenhou_game_log'
@property
def game_type(self):
return self.game_rules[2]
@property
def game_end_date(self):
return self.game_date + timedelta(minutes=self.game_length)
@property
def badge_class(self):
if self.rank < self.next_rank:
return 'success'
else:
return 'danger' | 0.550607 | 0.086709 |
from IPython import embed
import numpy as np
from pypeit.core import procimg
def test_replace_columns():
y = np.zeros((10,3), dtype=float)
y[:,2] = 2
bad_col = np.array([False, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
procimg.replace_columns(y, bad_col, copy=True, replace_with='linear')), \
'Interpolation and mean should provide the same result.'
bad_col = np.array([False, True, True])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.zeros_like(y)), 'Should set everything to 0.'
bad_col = np.array([True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.full_like(y, 2)), 'Should set everything to 2.'
y = np.zeros((10,4), dtype=float)
y[:,3] = 3
bad_col = np.array([False, True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='linear'),
np.repeat(np.arange(4),10).reshape(4,10).T), \
'Interpolation failed.'
def test_rn2_frame():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
gain = np.array([1.2, 1.5])
rnvar = procimg.rn2_frame(datasec, rn, digitization=False)
assert rnvar.shape == datasec.shape, 'Shape mismatch'
assert np.array_equal(np.unique(rnvar), rn**2), 'Bad RN variance calculation'
rnvar = procimg.rn2_frame(datasec, rn, units='ADU', gain=gain, digitization=False)
assert np.allclose(np.unique(rnvar), (rn/gain)**2), 'Bad RN variance calculation'
def test_sub_overscan():
datasec = np.zeros((10,10), dtype=int)
datasec[:5,:-3] = 1
datasec[5:,:-3] = 2
oscan = np.zeros((10,10), dtype=int)
oscan[:5,-3:] = 1
oscan[5:,-3:] = 2
raw = np.zeros((10,10), dtype=float)
raw[datasec == 1] = 10.
raw[datasec == 2] = 20.
raw[oscan == 1] = 9.
raw[oscan == 2] = 19.
raw_sub, _ = procimg.subtract_overscan(raw, datasec, oscan, method='median')
assert np.array_equal(raw_sub[datasec > 0], np.ones(np.sum(datasec > 0), dtype=float)), \
'Bad overscan subtraction'
var = np.ones((10,10), dtype=float)
raw_sub, var_sub = procimg.subtract_overscan(raw, datasec, oscan, method='median', var=var)
assert np.array_equal(var_sub[datasec > 0],
np.full(np.sum(datasec > 0), np.pi/2/15, dtype=float)), \
'Bad variance calculation'
def test_trim():
datasec = np.zeros((10,10), dtype=int)
datasec[:5,:-3] = 1
datasec[5:,:-3] = 2
_datasec = procimg.trim_frame(datasec, datasec < 1)
assert _datasec.shape == (10,7), 'Trimming error'
assert np.array_equal(datasec[datasec > 0], _datasec.flat), 'Values changed'
def test_var_model():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
rnvar = procimg.rn2_frame(datasec, rn)
assert np.array_equal(rnvar, procimg.base_variance(rnvar)), \
'Variance model with only rnvar is just rnvar'
counts = np.full(rnvar.shape, 10., dtype=float)
assert np.array_equal(rnvar, procimg.variance_model(rnvar)), \
'Variance model should just return input if no optional parameters are provided.'
base = procimg.base_variance(rnvar, darkcurr=10.)
base_t = procimg.base_variance(rnvar, darkcurr=5., exptime=2.*3600)
assert np.all(procimg.variance_model(rnvar, counts=counts) > rnvar), \
'Shot noise should increase the variance'
assert np.all(procimg.variance_model(base, counts=counts) > base), \
'Shot noise should increase the variance'
assert np.array_equal(
procimg.variance_model(base, counts=counts),
procimg.variance_model(base_t, counts=counts)), \
'Dark current should be equivalent'
assert np.all(procimg.base_variance(rnvar, proc_var=10.) > rnvar), \
'Processing variance should increase the total variance'
assert np.all(procimg.variance_model(rnvar, counts=counts, count_scale=0.5) <
procimg.variance_model(rnvar, counts=counts)), \
'Scaling should have decreased the noise.'
assert np.all(procimg.variance_model(rnvar, counts=counts, noise_floor=0.1) > rnvar), \
'Noise floor should have increased the variance.' | pypeit/tests/test_procimg.py | from IPython import embed
import numpy as np
from pypeit.core import procimg
def test_replace_columns():
y = np.zeros((10,3), dtype=float)
y[:,2] = 2
bad_col = np.array([False, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
procimg.replace_columns(y, bad_col, copy=True, replace_with='linear')), \
'Interpolation and mean should provide the same result.'
bad_col = np.array([False, True, True])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.zeros_like(y)), 'Should set everything to 0.'
bad_col = np.array([True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='mean'),
np.full_like(y, 2)), 'Should set everything to 2.'
y = np.zeros((10,4), dtype=float)
y[:,3] = 3
bad_col = np.array([False, True, True, False])
assert np.array_equal(procimg.replace_columns(y, bad_col, copy=True, replace_with='linear'),
np.repeat(np.arange(4),10).reshape(4,10).T), \
'Interpolation failed.'
def test_rn2_frame():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
gain = np.array([1.2, 1.5])
rnvar = procimg.rn2_frame(datasec, rn, digitization=False)
assert rnvar.shape == datasec.shape, 'Shape mismatch'
assert np.array_equal(np.unique(rnvar), rn**2), 'Bad RN variance calculation'
rnvar = procimg.rn2_frame(datasec, rn, units='ADU', gain=gain, digitization=False)
assert np.allclose(np.unique(rnvar), (rn/gain)**2), 'Bad RN variance calculation'
def test_sub_overscan():
datasec = np.zeros((10,10), dtype=int)
datasec[:5,:-3] = 1
datasec[5:,:-3] = 2
oscan = np.zeros((10,10), dtype=int)
oscan[:5,-3:] = 1
oscan[5:,-3:] = 2
raw = np.zeros((10,10), dtype=float)
raw[datasec == 1] = 10.
raw[datasec == 2] = 20.
raw[oscan == 1] = 9.
raw[oscan == 2] = 19.
raw_sub, _ = procimg.subtract_overscan(raw, datasec, oscan, method='median')
assert np.array_equal(raw_sub[datasec > 0], np.ones(np.sum(datasec > 0), dtype=float)), \
'Bad overscan subtraction'
var = np.ones((10,10), dtype=float)
raw_sub, var_sub = procimg.subtract_overscan(raw, datasec, oscan, method='median', var=var)
assert np.array_equal(var_sub[datasec > 0],
np.full(np.sum(datasec > 0), np.pi/2/15, dtype=float)), \
'Bad variance calculation'
def test_trim():
datasec = np.zeros((10,10), dtype=int)
datasec[:5,:-3] = 1
datasec[5:,:-3] = 2
_datasec = procimg.trim_frame(datasec, datasec < 1)
assert _datasec.shape == (10,7), 'Trimming error'
assert np.array_equal(datasec[datasec > 0], _datasec.flat), 'Values changed'
def test_var_model():
# Bogus image
datasec = np.ones((10,10), dtype=int)
datasec[5:] = 2
rn = np.array([2.5, 3.5])
rnvar = procimg.rn2_frame(datasec, rn)
assert np.array_equal(rnvar, procimg.base_variance(rnvar)), \
'Variance model with only rnvar is just rnvar'
counts = np.full(rnvar.shape, 10., dtype=float)
assert np.array_equal(rnvar, procimg.variance_model(rnvar)), \
'Variance model should just return input if no optional parameters are provided.'
base = procimg.base_variance(rnvar, darkcurr=10.)
base_t = procimg.base_variance(rnvar, darkcurr=5., exptime=2.*3600)
assert np.all(procimg.variance_model(rnvar, counts=counts) > rnvar), \
'Shot noise should increase the variance'
assert np.all(procimg.variance_model(base, counts=counts) > base), \
'Shot noise should increase the variance'
assert np.array_equal(
procimg.variance_model(base, counts=counts),
procimg.variance_model(base_t, counts=counts)), \
'Dark current should be equivalent'
assert np.all(procimg.base_variance(rnvar, proc_var=10.) > rnvar), \
'Processing variance should increase the total variance'
assert np.all(procimg.variance_model(rnvar, counts=counts, count_scale=0.5) <
procimg.variance_model(rnvar, counts=counts)), \
'Scaling should have decreased the noise.'
assert np.all(procimg.variance_model(rnvar, counts=counts, noise_floor=0.1) > rnvar), \
'Noise floor should have increased the variance.' | 0.73659 | 0.653099 |
import queue as Q
def search(graph, start, end):
if start not in graph:
raise TypeError(str(start) + ' not found in graph !')
return
if end not in graph:
raise TypeError(str(end) + ' not found in graph !')
return
visited = []
queue = Q.PriorityQueue()
queue.put((0, [start]))
visited.append(start)
while not queue.empty():
node = queue.get()
current = node[1][len(node[1]) - 1]
print(current)
if end in node[1]:
print("Path found: " + str(node[1]) + ", Cost = " + str(node[0]))
break
cost = node[0]
for neighbor in graph[current]:
temp = node[1][:]
temp.append(neighbor)
if(neighbor not in visited): queue.put((cost + graph[current][neighbor], temp))
visited.append(neighbor)
def readGraph():
lines = int( input() )
graph = {}
for line in range(lines):
line = input()
tokens = line.split()
node = tokens[0]
graph[node] = {}
for i in range(1, len(tokens) - 1, 2):
# print(node, tokens[i], tokens[i + 1])
# graph.addEdge(node, tokens[i], int(tokens[i + 1]))
graph[node][tokens[i]] = int(tokens[i + 1])
return graph
def main():
graph = readGraph()
search(graph, 'DeliSerdang', 'Pekanbaru')
if __name__ == "__main__":
main()
"""
Sample Map Input:
22
DeliSerdang TebingTinggi 82 SerdangBedagai 51
SerdangBedagai DeliSerdang 51 TebingTinggi 35
TebingTinggi DeliSerdang 82 SerdangBedagai 35 BatuBara 58 PematangSiantar 47
BatuBara TebingTinggi 58 Asahan 66
PematangSiantar TebingTinggi 47 TobaSamosir 126
TobaSamosir PematangSiantar 126 TapanuliUtara 96
Asahan BatuBara 66 TanjungBalai 32 LabuhanBatuUtara 81
TapanuliUtara TobaSamosir 96 TapanuliSelatan 106
TanjungBalai LabuhanBatuUtara 110 Asahan 32
TapanuliSelatan TapanuliUtara 106 PadangLawas 133
LabuhanBatuUtara TanjungBalai 110 LabuhanBatu 108 Asahan 81
LabuhanBatu LabuhanBatuUtara 108 LabuhanBatuSelatan 96
PadangLawas TapanuliSelatan 133 PadangLawasUtara 28
PadangLawasUtara PadangLawas 28 MandailingNatal 162
LabuhanBatuSelatan LabuhanBatu 96 RokanHilir 99 RokanHulu 189
MandailingNatal RokanHulu 365 PadangLawasUtara 162
RokanHulu LabuhanBatuSelatan 189 Kampar 106 MandailingNatal 365
RokanHilir LabuhanBatuSelatan 99 Bengkalis 213
Bengkalis RokanHilir 213 Siak 131
Kampar Pekanbaru 87 RokanHulu 106
Siak Bengkalis 131 Pekanbaru 91
Pekanbaru Siak 91 Kampar 87
""" | ai bismillah.py | import queue as Q
def search(graph, start, end):
if start not in graph:
raise TypeError(str(start) + ' not found in graph !')
return
if end not in graph:
raise TypeError(str(end) + ' not found in graph !')
return
visited = []
queue = Q.PriorityQueue()
queue.put((0, [start]))
visited.append(start)
while not queue.empty():
node = queue.get()
current = node[1][len(node[1]) - 1]
print(current)
if end in node[1]:
print("Path found: " + str(node[1]) + ", Cost = " + str(node[0]))
break
cost = node[0]
for neighbor in graph[current]:
temp = node[1][:]
temp.append(neighbor)
if(neighbor not in visited): queue.put((cost + graph[current][neighbor], temp))
visited.append(neighbor)
def readGraph():
lines = int( input() )
graph = {}
for line in range(lines):
line = input()
tokens = line.split()
node = tokens[0]
graph[node] = {}
for i in range(1, len(tokens) - 1, 2):
# print(node, tokens[i], tokens[i + 1])
# graph.addEdge(node, tokens[i], int(tokens[i + 1]))
graph[node][tokens[i]] = int(tokens[i + 1])
return graph
def main():
graph = readGraph()
search(graph, 'DeliSerdang', 'Pekanbaru')
if __name__ == "__main__":
main()
"""
Sample Map Input:
22
DeliSerdang TebingTinggi 82 SerdangBedagai 51
SerdangBedagai DeliSerdang 51 TebingTinggi 35
TebingTinggi DeliSerdang 82 SerdangBedagai 35 BatuBara 58 PematangSiantar 47
BatuBara TebingTinggi 58 Asahan 66
PematangSiantar TebingTinggi 47 TobaSamosir 126
TobaSamosir PematangSiantar 126 TapanuliUtara 96
Asahan BatuBara 66 TanjungBalai 32 LabuhanBatuUtara 81
TapanuliUtara TobaSamosir 96 TapanuliSelatan 106
TanjungBalai LabuhanBatuUtara 110 Asahan 32
TapanuliSelatan TapanuliUtara 106 PadangLawas 133
LabuhanBatuUtara TanjungBalai 110 LabuhanBatu 108 Asahan 81
LabuhanBatu LabuhanBatuUtara 108 LabuhanBatuSelatan 96
PadangLawas TapanuliSelatan 133 PadangLawasUtara 28
PadangLawasUtara PadangLawas 28 MandailingNatal 162
LabuhanBatuSelatan LabuhanBatu 96 RokanHilir 99 RokanHulu 189
MandailingNatal RokanHulu 365 PadangLawasUtara 162
RokanHulu LabuhanBatuSelatan 189 Kampar 106 MandailingNatal 365
RokanHilir LabuhanBatuSelatan 99 Bengkalis 213
Bengkalis RokanHilir 213 Siak 131
Kampar Pekanbaru 87 RokanHulu 106
Siak Bengkalis 131 Pekanbaru 91
Pekanbaru Siak 91 Kampar 87
""" | 0.1211 | 0.212191 |
from .layout import Layout
import random
class Accumulator:
"""Implementation of a an Accumulator
Virtual representation of an Accumulator
"""
def __init__(self, min_bricks, max_bricks, min_cells, max_cells, constants):
# Set class attributes
self.min_bricks = min_bricks
self.max_bricks = max_bricks
self.min_cells = min_cells
self.max_cells = max_cells
self.constants = constants
self.brick_iterations = self.iterate_bricks()
self.brick_configurations = self.get_bricks_configurations()
self.brick_layouts = self.generate_brick_layouts(constants)
def base10_round(self, x, base=5):
return base * round(x/base)
def generate_brick_layouts(self, inputs):
driverMass = inputs['driverMass']
vehicleMass = inputs['vehicleMass']
accumBoxMass = inputs['accumBoxMass']
cellCoverMass = inputs['cellCoverMass']
cellMass = inputs['cellMass']
Vnom = inputs['nominalVoltage']
cellMaxVoltage = inputs['cellNominalVoltage']
cellCapacity = inputs['cellCapacity']
brick_layouts = []
for i in self.brick_iterations:
for y in self.brick_configurations[i]:
brick_layouts.append(Layout(i, y))
brick_layouts[len(brick_layouts)-1].generate_cells(self.min_cells, self.max_cells)
invalid_cell_layouts = []
for z in brick_layouts[len(brick_layouts)-1].get_cells():
z.set_car_mass(driverMass, vehicleMass, accumBoxMass, cellCoverMass, cellMass, i)
z.set_accumulator_mass(accumBoxMass, cellCoverMass, cellMass, i)
z.set_brick_mass(cellCoverMass, cellMass)
z.confirm_rules_compliance(y['Series'], y['Parallel'], Vnom, cellMaxVoltage, cellCapacity, cellMass, cellCoverMass)
if not z.get_rules_compliance():
invalid_cell_layouts.append(z)
brick_layouts[len(brick_layouts)-1].set_cells([x for x in brick_layouts[len(brick_layouts)-1].get_cells() if x not in invalid_cell_layouts])
brick_layouts[len(brick_layouts)-1].set_invalid_cells(invalid_cell_layouts)
# Remove bricklayouts with 0 possible cell layouts
for i in range(len(brick_layouts)-1, 0, -1):
if len(brick_layouts[i].get_cells()) == 0:
brick_layouts.remove(brick_layouts[i])
return brick_layouts
def get_bricks_configurations(self):
brick_configurations = {}
for i in self.brick_iterations:
counter = 2
layout_possibilities = []
while counter <= i // 2:
bricks_in_series = i / counter
if bricks_in_series % 1 == 0:
bricks_in_series = int(bricks_in_series)
temp_layout = {}
temp_layout['Series'] = bricks_in_series
temp_layout['Parallel'] = i//bricks_in_series
if layout_possibilities:
for y in layout_possibilities:
if y['Series'] == temp_layout['Parallel'] and y['Parallel'] == temp_layout['Series']:
break
else:
layout_possibilities.append(temp_layout)
else:
layout_possibilities.append(temp_layout)
counter += 1
brick_configurations[i] = layout_possibilities[:len(layout_possibilities)]
return brick_configurations
def iterate_bricks(self):
test_bricks = list(range(self.min_bricks, self.max_bricks + 1))
for num, i in enumerate(test_bricks):
if i > 1:
for y in range(2, i//2):
if (i % y) == 0:
break
else:
test_bricks.pop(num)
else:
continue
return test_bricks
def get_brick_layouts(self):
return self.brick_layouts
def set_simulation_iterations(self, simulation_iterations):
self.simulation_iterations = simulation_iterations
for num, i in enumerate(self.brick_layouts):
for y in i.cells:
car_mass = self.base10_round(y.get_car_mass())
y.set_lap_simulation(simulation_iterations[car_mass])
def get_simulation_iterations(self):
return self.simulation_iterations
def set_roadload_calcs(self):
for num, i in enumerate(self.brick_layouts):
for y in i.cells:
continue | roadload/accumulator.py | from .layout import Layout
import random
class Accumulator:
"""Implementation of a an Accumulator
Virtual representation of an Accumulator
"""
def __init__(self, min_bricks, max_bricks, min_cells, max_cells, constants):
# Set class attributes
self.min_bricks = min_bricks
self.max_bricks = max_bricks
self.min_cells = min_cells
self.max_cells = max_cells
self.constants = constants
self.brick_iterations = self.iterate_bricks()
self.brick_configurations = self.get_bricks_configurations()
self.brick_layouts = self.generate_brick_layouts(constants)
def base10_round(self, x, base=5):
return base * round(x/base)
def generate_brick_layouts(self, inputs):
driverMass = inputs['driverMass']
vehicleMass = inputs['vehicleMass']
accumBoxMass = inputs['accumBoxMass']
cellCoverMass = inputs['cellCoverMass']
cellMass = inputs['cellMass']
Vnom = inputs['nominalVoltage']
cellMaxVoltage = inputs['cellNominalVoltage']
cellCapacity = inputs['cellCapacity']
brick_layouts = []
for i in self.brick_iterations:
for y in self.brick_configurations[i]:
brick_layouts.append(Layout(i, y))
brick_layouts[len(brick_layouts)-1].generate_cells(self.min_cells, self.max_cells)
invalid_cell_layouts = []
for z in brick_layouts[len(brick_layouts)-1].get_cells():
z.set_car_mass(driverMass, vehicleMass, accumBoxMass, cellCoverMass, cellMass, i)
z.set_accumulator_mass(accumBoxMass, cellCoverMass, cellMass, i)
z.set_brick_mass(cellCoverMass, cellMass)
z.confirm_rules_compliance(y['Series'], y['Parallel'], Vnom, cellMaxVoltage, cellCapacity, cellMass, cellCoverMass)
if not z.get_rules_compliance():
invalid_cell_layouts.append(z)
brick_layouts[len(brick_layouts)-1].set_cells([x for x in brick_layouts[len(brick_layouts)-1].get_cells() if x not in invalid_cell_layouts])
brick_layouts[len(brick_layouts)-1].set_invalid_cells(invalid_cell_layouts)
# Remove bricklayouts with 0 possible cell layouts
for i in range(len(brick_layouts)-1, 0, -1):
if len(brick_layouts[i].get_cells()) == 0:
brick_layouts.remove(brick_layouts[i])
return brick_layouts
def get_bricks_configurations(self):
brick_configurations = {}
for i in self.brick_iterations:
counter = 2
layout_possibilities = []
while counter <= i // 2:
bricks_in_series = i / counter
if bricks_in_series % 1 == 0:
bricks_in_series = int(bricks_in_series)
temp_layout = {}
temp_layout['Series'] = bricks_in_series
temp_layout['Parallel'] = i//bricks_in_series
if layout_possibilities:
for y in layout_possibilities:
if y['Series'] == temp_layout['Parallel'] and y['Parallel'] == temp_layout['Series']:
break
else:
layout_possibilities.append(temp_layout)
else:
layout_possibilities.append(temp_layout)
counter += 1
brick_configurations[i] = layout_possibilities[:len(layout_possibilities)]
return brick_configurations
def iterate_bricks(self):
test_bricks = list(range(self.min_bricks, self.max_bricks + 1))
for num, i in enumerate(test_bricks):
if i > 1:
for y in range(2, i//2):
if (i % y) == 0:
break
else:
test_bricks.pop(num)
else:
continue
return test_bricks
def get_brick_layouts(self):
return self.brick_layouts
def set_simulation_iterations(self, simulation_iterations):
self.simulation_iterations = simulation_iterations
for num, i in enumerate(self.brick_layouts):
for y in i.cells:
car_mass = self.base10_round(y.get_car_mass())
y.set_lap_simulation(simulation_iterations[car_mass])
def get_simulation_iterations(self):
return self.simulation_iterations
def set_roadload_calcs(self):
for num, i in enumerate(self.brick_layouts):
for y in i.cells:
continue | 0.488161 | 0.528655 |
from __future__ import annotations
from dataclasses import dataclass, replace
from typing import Any, Generic, Tuple, Type, TypeVar
from pyckaxe.lib.pack.abc.resource import Resource
from pyckaxe.lib.pack.namespace import Namespace
__all__ = (
"InvalidResourceLocation",
"ResourceLocation",
"ClassifiedResourceLocation",
)
ST = TypeVar("ST", bound="ResourceLocation")
RT = TypeVar("RT", bound=Resource)
class InvalidResourceLocation(Exception):
def __init__(self, value: str):
super().__init__(f"Invalid resource location: {value}")
@dataclass(frozen=True)
class ResourceLocation:
"""A relative resource location, independent of any physical location."""
namespace: Namespace
parts: Tuple[str, ...]
@classmethod
def from_string(cls: Type[ST], name: str) -> ST:
try:
namespace_str, _, parts_str = name.partition(":")
namespace = Namespace(namespace_str)
parts = tuple(parts_str.split("/"))
return cls(namespace=namespace, parts=parts)
except Exception as ex:
raise InvalidResourceLocation(name) from ex
@classmethod
def declassify(
cls, classified: ClassifiedResourceLocation[Any]
) -> "ResourceLocation":
return cls(classified.namespace, classified.parts)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return str(self)
def __truediv__(self: ST, other: str) -> ST:
return self.extend(other)
def __rmatmul__(self, other: Type[RT]) -> ClassifiedResourceLocation[RT]:
return self.classify(other)
@property
def trail(self) -> str:
return "/".join(self.parts)
@property
def name(self) -> str:
return f"{self.namespace}:{self.trail}"
def extend(self: ST, *parts: str) -> ST:
return replace(self, parts=(*self.parts, *parts))
def classify(self, resource_class: Type[RT]) -> ClassifiedResourceLocation[RT]:
return ClassifiedResourceLocation(self.namespace, self.parts, resource_class)
@dataclass(frozen=True)
class ClassifiedResourceLocation(ResourceLocation, Generic[RT]):
"""
A resource location that is aware of the type of underlying resource.
"""
resource_class: Type[RT] | pyckaxe/lib/pack/resource_location.py | from __future__ import annotations
from dataclasses import dataclass, replace
from typing import Any, Generic, Tuple, Type, TypeVar
from pyckaxe.lib.pack.abc.resource import Resource
from pyckaxe.lib.pack.namespace import Namespace
__all__ = (
"InvalidResourceLocation",
"ResourceLocation",
"ClassifiedResourceLocation",
)
ST = TypeVar("ST", bound="ResourceLocation")
RT = TypeVar("RT", bound=Resource)
class InvalidResourceLocation(Exception):
def __init__(self, value: str):
super().__init__(f"Invalid resource location: {value}")
@dataclass(frozen=True)
class ResourceLocation:
"""A relative resource location, independent of any physical location."""
namespace: Namespace
parts: Tuple[str, ...]
@classmethod
def from_string(cls: Type[ST], name: str) -> ST:
try:
namespace_str, _, parts_str = name.partition(":")
namespace = Namespace(namespace_str)
parts = tuple(parts_str.split("/"))
return cls(namespace=namespace, parts=parts)
except Exception as ex:
raise InvalidResourceLocation(name) from ex
@classmethod
def declassify(
cls, classified: ClassifiedResourceLocation[Any]
) -> "ResourceLocation":
return cls(classified.namespace, classified.parts)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return str(self)
def __truediv__(self: ST, other: str) -> ST:
return self.extend(other)
def __rmatmul__(self, other: Type[RT]) -> ClassifiedResourceLocation[RT]:
return self.classify(other)
@property
def trail(self) -> str:
return "/".join(self.parts)
@property
def name(self) -> str:
return f"{self.namespace}:{self.trail}"
def extend(self: ST, *parts: str) -> ST:
return replace(self, parts=(*self.parts, *parts))
def classify(self, resource_class: Type[RT]) -> ClassifiedResourceLocation[RT]:
return ClassifiedResourceLocation(self.namespace, self.parts, resource_class)
@dataclass(frozen=True)
class ClassifiedResourceLocation(ResourceLocation, Generic[RT]):
"""
A resource location that is aware of the type of underlying resource.
"""
resource_class: Type[RT] | 0.873492 | 0.215289 |
import os, time
import src.utils.loader as loader
from distutils.dir_util import copy_tree
from src.BaseArgs import RefactorDSArgs
class RefactorDS:
def __init__(self, set_to_analyse, root_input_path, output_dir, path_participants_folder):
self.set_to_analyse = set_to_analyse # Google, OCR, PROGRAM or PROGRAM_OTHER
self.root_input_path = root_input_path
self.output_dir = output_dir
self.program_names = [file.split(".")[0] for file in os.listdir(path_participants_folder)]
self.program_file_extension = os.listdir(root_input_path)[0].split(".")[-1] \
if ("." in os.listdir(root_input_path)[0]) else ""
self.path_participants_folder = path_participants_folder
def refactor_google(self):
"""
Refactor Google DS forlder dividing IDs per program
"""
for program in self.program_names:
input_participants_path = os.path.join(self.path_participants_folder, program + self.program_file_extension)
participant_names = [val.replace(" ","_") for sublist in loader.get_participants(input_participants_path) for val in sublist]
for participant in participant_names:
# check if participant in downloaded participants from Google:
input_participant_google_ds = os.path.join(self.root_input_path, participant)
output_participants_in_program_google = os.path.join(self.output_dir, program, participant)
# copy images from input_folder to output_folder:
if os.path.isdir(input_participant_google_ds):
copy_tree(input_participant_google_ds, output_participants_in_program_google)
def refactor_program(self, new_folder_name="OTHER"):
"""
Refactor Program folder addding an ID "new_folder_name" as the ID that represents all the participants
in the program
:param new_folder_name: subfolder ID for program
"""
for program in self.program_names:
program_input_path = os.path.join(self.root_input_path, program)
program_output_path = os.path.join(self.root_input_path, program, new_folder_name)
os.makedirs(program_output_path, exist_ok=True)
intial_n_imgs = len(os.listdir(program_input_path))-1
os.system("mv " + program_input_path + "/*.png " + program_output_path)
while (len(os.listdir(program_output_path)) < intial_n_imgs):
time.sleep(15) #15 seg
def run_refactor(self):
"""
Run refactor
"""
if self.set_to_analyse == "Google":
self.refactor_google()
elif self.set_to_analyse == "PROGRAM":
self.refactor_program()
if __name__ == "__main__":
refactor_args_obj = RefactorDSArgs()
args = refactor_args_obj.parse()
refactorDS_obj = RefactorDS(args.set_to_analyse, args.root_input_path,
args.output_dir, args.program_participants_folder)
refactorDS_obj.run_refactor() | src/FACE_DIARIZATION/A_DSbasics/RefactorDS.py | import os, time
import src.utils.loader as loader
from distutils.dir_util import copy_tree
from src.BaseArgs import RefactorDSArgs
class RefactorDS:
def __init__(self, set_to_analyse, root_input_path, output_dir, path_participants_folder):
self.set_to_analyse = set_to_analyse # Google, OCR, PROGRAM or PROGRAM_OTHER
self.root_input_path = root_input_path
self.output_dir = output_dir
self.program_names = [file.split(".")[0] for file in os.listdir(path_participants_folder)]
self.program_file_extension = os.listdir(root_input_path)[0].split(".")[-1] \
if ("." in os.listdir(root_input_path)[0]) else ""
self.path_participants_folder = path_participants_folder
def refactor_google(self):
"""
Refactor Google DS forlder dividing IDs per program
"""
for program in self.program_names:
input_participants_path = os.path.join(self.path_participants_folder, program + self.program_file_extension)
participant_names = [val.replace(" ","_") for sublist in loader.get_participants(input_participants_path) for val in sublist]
for participant in participant_names:
# check if participant in downloaded participants from Google:
input_participant_google_ds = os.path.join(self.root_input_path, participant)
output_participants_in_program_google = os.path.join(self.output_dir, program, participant)
# copy images from input_folder to output_folder:
if os.path.isdir(input_participant_google_ds):
copy_tree(input_participant_google_ds, output_participants_in_program_google)
def refactor_program(self, new_folder_name="OTHER"):
"""
Refactor Program folder addding an ID "new_folder_name" as the ID that represents all the participants
in the program
:param new_folder_name: subfolder ID for program
"""
for program in self.program_names:
program_input_path = os.path.join(self.root_input_path, program)
program_output_path = os.path.join(self.root_input_path, program, new_folder_name)
os.makedirs(program_output_path, exist_ok=True)
intial_n_imgs = len(os.listdir(program_input_path))-1
os.system("mv " + program_input_path + "/*.png " + program_output_path)
while (len(os.listdir(program_output_path)) < intial_n_imgs):
time.sleep(15) #15 seg
def run_refactor(self):
"""
Run refactor
"""
if self.set_to_analyse == "Google":
self.refactor_google()
elif self.set_to_analyse == "PROGRAM":
self.refactor_program()
if __name__ == "__main__":
refactor_args_obj = RefactorDSArgs()
args = refactor_args_obj.parse()
refactorDS_obj = RefactorDS(args.set_to_analyse, args.root_input_path,
args.output_dir, args.program_participants_folder)
refactorDS_obj.run_refactor() | 0.35869 | 0.127734 |
import json
import boto3
import os
import logging
from typing import Dict
from crhelper import CfnResource
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
personalize = boto3.client('personalize')
appconfig = boto3.client('appconfig')
helper = CfnResource()
appconfig_application_id = os.environ['AppConfigApplicationId']
appconfig_config_profile_id = os.environ['AppConfigConfigurationProfileId']
appconfig_environment_id = os.environ['AppConfigEnvironmentId']
appconfig_deployment_strategy_id = os.environ['AppConfigDeploymentStrategyId']
recipe_arn_type_mapping = {
"arn:aws:personalize:::recipe/aws-ecomm-customers-who-viewed-x-also-viewed": "related-items",
"arn:aws:personalize:::recipe/aws-ecomm-frequently-bought-together": "related-items",
"arn:aws:personalize:::recipe/aws-ecomm-popular-items-by-purchases": "recommend-items",
"arn:aws:personalize:::recipe/aws-ecomm-popular-items-by-views": "recommend-items",
"arn:aws:personalize:::recipe/aws-ecomm-recommended-for-you": "recommend-items",
"arn:aws:personalize:::recipe/aws-vod-because-you-watched-x": "related-items",
"arn:aws:personalize:::recipe/aws-vod-more-like-x": "related-items",
"arn:aws:personalize:::recipe/aws-vod-most-popular": "recommend-items",
"arn:aws:personalize:::recipe/aws-vod-top-picks": "recommend-items",
"arn:aws:personalize:::recipe/aws-hrnn": "recommend-items",
"arn:aws:personalize:::recipe/aws-hrnn-coldstart": "recommend-items",
"arn:aws:personalize:::recipe/aws-hrnn-metadata": "recommend-items",
"arn:aws:personalize:::recipe/aws-personalized-ranking": "rerank-items",
"arn:aws:personalize:::recipe/aws-popularity-count": "recommend-items",
"arn:aws:personalize:::recipe/aws-similar-items": "related-items",
"arn:aws:personalize:::recipe/aws-sims": "related-items",
"arn:aws:personalize:::recipe/aws-user-personalization": "recommend-items"
}
def generate_api_config(dataset_group_names_prop: str) -> Dict:
""" Generates personalization APIs app config based on recommenders, campaigns, and event trackers for the specified dataset groups
Arguments:
dataset_group_names_prop (string) - comma separated list of Personalize dataset group names to check for recommenders, campaigns,
and event trackers or "all" to check all dataset groups in the current account & region
"""
# Start with an empty base configuration that implements some general caching.
config = {
"version": "2",
"description": "This configuration was automatically generated based on the active recommenders/campaigns for a supplied list of dataset groups",
"cacheControl": {
"autoProvision": True,
"userSpecified": {
"maxAge": 10,
"directives": "private"
},
"syntheticUserSpecified": {
"maxAge": 300,
"directives": "public"
},
"noUserSpecified": {
"maxAge": 1200,
"directives": "public"
}
},
"namespaces": {}
}
dataset_group_names = [dsg.strip() for dsg in dataset_group_names_prop.split(',')]
all_dsgs = len(dataset_group_names) == 1 and dataset_group_names[0].lower() == 'all'
logger.info('Dataset group names: %s', dataset_group_names)
logger.info('Matching all dataset groups in current region for account: %s', all_dsgs)
logger.info('Looking up recommenders and matching to dataset group(s)')
paginator = personalize.get_paginator('list_recommenders')
for recommender_page in paginator.paginate():
for recommender in recommender_page['recommenders']:
dataset_group_name = recommender['datasetGroupArn'].split('/')[-1]
if all_dsgs or dataset_group_name in dataset_group_names:
action_type = recipe_arn_type_mapping.get(recommender['recipeArn'])
if not action_type:
# Perhaps a new recipe?
logger.error('Unable to determine action type for recipe %s for recommender %s; skipping recommender', recommender['recipeArn'], recommender['recommenderArn'])
continue
variation_name = recommender['recipeArn'].split('/')[-1].replace('aws-', 'personalize-')
variation_config = (config['namespaces']
.setdefault(dataset_group_name, {})
.setdefault('recommenders', {})
.setdefault(action_type, {})
.setdefault(recommender['name'], {})
.setdefault('variations', {})
.setdefault(variation_name, {})
)
variation_config['type'] = 'personalize-recommender'
variation_config['arn'] = recommender['recommenderArn']
logger.info('Looking up campaigns and matching to dataset group(s)')
paginator = personalize.get_paginator('list_campaigns')
for campaign_page in paginator.paginate():
for campaign in campaign_page['campaigns']:
response = personalize.describe_campaign(campaignArn = campaign['campaignArn'])
sv_arn = response['campaign']['solutionVersionArn']
response = personalize.describe_solution_version(solutionVersionArn = sv_arn)
dataset_group_name = response['solutionVersion']['datasetGroupArn'].split('/')[-1]
if all_dsgs or dataset_group_name in dataset_group_names:
recipe_arn = response['solutionVersion']['recipeArn']
action_type = recipe_arn_type_mapping.get(recipe_arn)
if not action_type:
# Perhaps a new recipe?
logger.error('Unable to determine action type for recipe %s for campaign %s; skipping campaign', recipe_arn, campaign['campaignArn'])
continue
variation_name = recipe_arn.split('/')[-1].replace('aws-', 'personalize-')
variation_config = (config['namespaces']
.setdefault(dataset_group_name, {})
.setdefault('recommenders', {})
.setdefault(action_type, {})
.setdefault(campaign['name'], {})
.setdefault('variations', {})
.setdefault(variation_name, {})
)
variation_config['type'] = 'personalize-campaign'
variation_config['arn'] = campaign['campaignArn']
logger.info('Looking up event trackers and matching to dataset group(s)')
paginator = personalize.get_paginator('list_event_trackers')
for event_tracker_page in paginator.paginate():
for event_tracker in event_tracker_page['eventTrackers']:
response = personalize.describe_event_tracker(eventTrackerArn = event_tracker['eventTrackerArn'])
dataset_group_name = response['eventTracker']['datasetGroupArn'].split('/')[-1]
if all_dsgs or dataset_group_name in dataset_group_names:
targets = (config['namespaces']
.setdefault(dataset_group_name, {})
.setdefault('eventTargets', [])
)
targets.append({
'type': 'personalize-event-tracker',
'trackingId': response['eventTracker']['trackingId']
})
return config
def create_and_deploy_hosted_config(config: Dict):
""" Creates and deploys a configuration to AppConfig as a hosted configuration version """
logger.info('Creating hosted configuration...')
logger.debug(config)
response = appconfig.create_hosted_configuration_version(
ApplicationId = appconfig_application_id,
ConfigurationProfileId = appconfig_config_profile_id,
Description = 'Generated configuration based on supplied list of dataset groups',
ContentType = 'application/json',
Content = json.dumps(config, indent = 4)
)
logger.debug(json.dumps(response, indent = 2, default = str))
config_version = response['VersionNumber']
logger.info('Starting deployment...')
response = appconfig.start_deployment(
ApplicationId = appconfig_application_id,
EnvironmentId = appconfig_environment_id,
DeploymentStrategyId = appconfig_deployment_strategy_id,
ConfigurationProfileId = appconfig_config_profile_id,
ConfigurationVersion = str(config_version),
Description = 'Automatic configuration deployment after generating configuration',
Tags={
'CreatedBy': 'Personalization-APIs-Solution'
}
)
logger.debug(json.dumps(response, indent = 2, default = str))
def generate_and_deploy_config(dataset_group_names_prop: str):
if dataset_group_names_prop.strip():
config = generate_api_config(dataset_group_names_prop)
if len(config['namespaces']) > 0:
create_and_deploy_hosted_config(config)
else:
logger.warning('No namespaces discovered in current acccount/region')
else:
logger.info('Dataset group name(s) not specified; skipping generation of configuration')
@helper.create
def create_resource(event, _):
generate_and_deploy_config(event['ResourceProperties']['DatasetGroupNames'])
@helper.delete
def delete_resource(event, _):
""" Delete hosted configuration versions
This is necessary here since hosted configurations are created outside of CloudFormation
and therefore need to cleaned up before depedent AppConfig resources can be deleted by
CloudFormation when the project is deleted.
"""
logger.info('Deleting all hosted configuration versions for application %s and config profile %s', appconfig_application_id, appconfig_config_profile_id)
page_count = 0
while page_count < 10:
response = appconfig.list_hosted_configuration_versions(
ApplicationId = appconfig_application_id,
ConfigurationProfileId = appconfig_config_profile_id,
MaxResults = 50 # no paginator and max is 50
)
if len(response['Items']) == 0:
break
for config_version in response['Items']:
logger.info('Deleting hosted configuration version %s', config_version["VersionNumber"])
response = appconfig.delete_hosted_configuration_version(
ApplicationId = appconfig_application_id,
ConfigurationProfileId = appconfig_config_profile_id,
VersionNumber = config_version['VersionNumber']
)
page_count += 1
def lambda_handler(event, context):
""" Entry point of function called from either CloudFormation or directly under test
"""
logger.debug('## ENVIRONMENT VARIABLES')
logger.debug(os.environ)
logger.debug('## EVENT')
logger.debug(event)
# If the event has a RequestType, we're being called by CFN as custom resource
if event.get('RequestType'):
logger.info('Function called from CloudFormation as custom resource')
helper(event, context)
else:
logger.info('Function called outside of CloudFormation')
# Called function directly (i.e. testing in Lambda console or called directly)
generate_and_deploy_config(event['ResourceProperties']['DatasetGroupNames']) | src/generate_config_function/main.py |
import json
import boto3
import os
import logging
from typing import Dict
from crhelper import CfnResource
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
personalize = boto3.client('personalize')
appconfig = boto3.client('appconfig')
helper = CfnResource()
appconfig_application_id = os.environ['AppConfigApplicationId']
appconfig_config_profile_id = os.environ['AppConfigConfigurationProfileId']
appconfig_environment_id = os.environ['AppConfigEnvironmentId']
appconfig_deployment_strategy_id = os.environ['AppConfigDeploymentStrategyId']
recipe_arn_type_mapping = {
"arn:aws:personalize:::recipe/aws-ecomm-customers-who-viewed-x-also-viewed": "related-items",
"arn:aws:personalize:::recipe/aws-ecomm-frequently-bought-together": "related-items",
"arn:aws:personalize:::recipe/aws-ecomm-popular-items-by-purchases": "recommend-items",
"arn:aws:personalize:::recipe/aws-ecomm-popular-items-by-views": "recommend-items",
"arn:aws:personalize:::recipe/aws-ecomm-recommended-for-you": "recommend-items",
"arn:aws:personalize:::recipe/aws-vod-because-you-watched-x": "related-items",
"arn:aws:personalize:::recipe/aws-vod-more-like-x": "related-items",
"arn:aws:personalize:::recipe/aws-vod-most-popular": "recommend-items",
"arn:aws:personalize:::recipe/aws-vod-top-picks": "recommend-items",
"arn:aws:personalize:::recipe/aws-hrnn": "recommend-items",
"arn:aws:personalize:::recipe/aws-hrnn-coldstart": "recommend-items",
"arn:aws:personalize:::recipe/aws-hrnn-metadata": "recommend-items",
"arn:aws:personalize:::recipe/aws-personalized-ranking": "rerank-items",
"arn:aws:personalize:::recipe/aws-popularity-count": "recommend-items",
"arn:aws:personalize:::recipe/aws-similar-items": "related-items",
"arn:aws:personalize:::recipe/aws-sims": "related-items",
"arn:aws:personalize:::recipe/aws-user-personalization": "recommend-items"
}
def generate_api_config(dataset_group_names_prop: str) -> Dict:
""" Generates personalization APIs app config based on recommenders, campaigns, and event trackers for the specified dataset groups
Arguments:
dataset_group_names_prop (string) - comma separated list of Personalize dataset group names to check for recommenders, campaigns,
and event trackers or "all" to check all dataset groups in the current account & region
"""
# Start with an empty base configuration that implements some general caching.
config = {
"version": "2",
"description": "This configuration was automatically generated based on the active recommenders/campaigns for a supplied list of dataset groups",
"cacheControl": {
"autoProvision": True,
"userSpecified": {
"maxAge": 10,
"directives": "private"
},
"syntheticUserSpecified": {
"maxAge": 300,
"directives": "public"
},
"noUserSpecified": {
"maxAge": 1200,
"directives": "public"
}
},
"namespaces": {}
}
dataset_group_names = [dsg.strip() for dsg in dataset_group_names_prop.split(',')]
all_dsgs = len(dataset_group_names) == 1 and dataset_group_names[0].lower() == 'all'
logger.info('Dataset group names: %s', dataset_group_names)
logger.info('Matching all dataset groups in current region for account: %s', all_dsgs)
logger.info('Looking up recommenders and matching to dataset group(s)')
paginator = personalize.get_paginator('list_recommenders')
for recommender_page in paginator.paginate():
for recommender in recommender_page['recommenders']:
dataset_group_name = recommender['datasetGroupArn'].split('/')[-1]
if all_dsgs or dataset_group_name in dataset_group_names:
action_type = recipe_arn_type_mapping.get(recommender['recipeArn'])
if not action_type:
# Perhaps a new recipe?
logger.error('Unable to determine action type for recipe %s for recommender %s; skipping recommender', recommender['recipeArn'], recommender['recommenderArn'])
continue
variation_name = recommender['recipeArn'].split('/')[-1].replace('aws-', 'personalize-')
variation_config = (config['namespaces']
.setdefault(dataset_group_name, {})
.setdefault('recommenders', {})
.setdefault(action_type, {})
.setdefault(recommender['name'], {})
.setdefault('variations', {})
.setdefault(variation_name, {})
)
variation_config['type'] = 'personalize-recommender'
variation_config['arn'] = recommender['recommenderArn']
logger.info('Looking up campaigns and matching to dataset group(s)')
paginator = personalize.get_paginator('list_campaigns')
for campaign_page in paginator.paginate():
for campaign in campaign_page['campaigns']:
response = personalize.describe_campaign(campaignArn = campaign['campaignArn'])
sv_arn = response['campaign']['solutionVersionArn']
response = personalize.describe_solution_version(solutionVersionArn = sv_arn)
dataset_group_name = response['solutionVersion']['datasetGroupArn'].split('/')[-1]
if all_dsgs or dataset_group_name in dataset_group_names:
recipe_arn = response['solutionVersion']['recipeArn']
action_type = recipe_arn_type_mapping.get(recipe_arn)
if not action_type:
# Perhaps a new recipe?
logger.error('Unable to determine action type for recipe %s for campaign %s; skipping campaign', recipe_arn, campaign['campaignArn'])
continue
variation_name = recipe_arn.split('/')[-1].replace('aws-', 'personalize-')
variation_config = (config['namespaces']
.setdefault(dataset_group_name, {})
.setdefault('recommenders', {})
.setdefault(action_type, {})
.setdefault(campaign['name'], {})
.setdefault('variations', {})
.setdefault(variation_name, {})
)
variation_config['type'] = 'personalize-campaign'
variation_config['arn'] = campaign['campaignArn']
logger.info('Looking up event trackers and matching to dataset group(s)')
paginator = personalize.get_paginator('list_event_trackers')
for event_tracker_page in paginator.paginate():
for event_tracker in event_tracker_page['eventTrackers']:
response = personalize.describe_event_tracker(eventTrackerArn = event_tracker['eventTrackerArn'])
dataset_group_name = response['eventTracker']['datasetGroupArn'].split('/')[-1]
if all_dsgs or dataset_group_name in dataset_group_names:
targets = (config['namespaces']
.setdefault(dataset_group_name, {})
.setdefault('eventTargets', [])
)
targets.append({
'type': 'personalize-event-tracker',
'trackingId': response['eventTracker']['trackingId']
})
return config
def create_and_deploy_hosted_config(config: Dict):
""" Creates and deploys a configuration to AppConfig as a hosted configuration version """
logger.info('Creating hosted configuration...')
logger.debug(config)
response = appconfig.create_hosted_configuration_version(
ApplicationId = appconfig_application_id,
ConfigurationProfileId = appconfig_config_profile_id,
Description = 'Generated configuration based on supplied list of dataset groups',
ContentType = 'application/json',
Content = json.dumps(config, indent = 4)
)
logger.debug(json.dumps(response, indent = 2, default = str))
config_version = response['VersionNumber']
logger.info('Starting deployment...')
response = appconfig.start_deployment(
ApplicationId = appconfig_application_id,
EnvironmentId = appconfig_environment_id,
DeploymentStrategyId = appconfig_deployment_strategy_id,
ConfigurationProfileId = appconfig_config_profile_id,
ConfigurationVersion = str(config_version),
Description = 'Automatic configuration deployment after generating configuration',
Tags={
'CreatedBy': 'Personalization-APIs-Solution'
}
)
logger.debug(json.dumps(response, indent = 2, default = str))
def generate_and_deploy_config(dataset_group_names_prop: str):
if dataset_group_names_prop.strip():
config = generate_api_config(dataset_group_names_prop)
if len(config['namespaces']) > 0:
create_and_deploy_hosted_config(config)
else:
logger.warning('No namespaces discovered in current acccount/region')
else:
logger.info('Dataset group name(s) not specified; skipping generation of configuration')
@helper.create
def create_resource(event, _):
generate_and_deploy_config(event['ResourceProperties']['DatasetGroupNames'])
@helper.delete
def delete_resource(event, _):
""" Delete hosted configuration versions
This is necessary here since hosted configurations are created outside of CloudFormation
and therefore need to cleaned up before depedent AppConfig resources can be deleted by
CloudFormation when the project is deleted.
"""
logger.info('Deleting all hosted configuration versions for application %s and config profile %s', appconfig_application_id, appconfig_config_profile_id)
page_count = 0
while page_count < 10:
response = appconfig.list_hosted_configuration_versions(
ApplicationId = appconfig_application_id,
ConfigurationProfileId = appconfig_config_profile_id,
MaxResults = 50 # no paginator and max is 50
)
if len(response['Items']) == 0:
break
for config_version in response['Items']:
logger.info('Deleting hosted configuration version %s', config_version["VersionNumber"])
response = appconfig.delete_hosted_configuration_version(
ApplicationId = appconfig_application_id,
ConfigurationProfileId = appconfig_config_profile_id,
VersionNumber = config_version['VersionNumber']
)
page_count += 1
def lambda_handler(event, context):
""" Entry point of function called from either CloudFormation or directly under test
"""
logger.debug('## ENVIRONMENT VARIABLES')
logger.debug(os.environ)
logger.debug('## EVENT')
logger.debug(event)
# If the event has a RequestType, we're being called by CFN as custom resource
if event.get('RequestType'):
logger.info('Function called from CloudFormation as custom resource')
helper(event, context)
else:
logger.info('Function called outside of CloudFormation')
# Called function directly (i.e. testing in Lambda console or called directly)
generate_and_deploy_config(event['ResourceProperties']['DatasetGroupNames']) | 0.571767 | 0.137996 |
import keras.backend as K
import keras.layers as kl
from ..common.blocks import ChannelSE
from ..common.blocks import GroupConv2D
bn_params = {
'epsilon': 9.999999747378752e-06,
}
def SEResNetBottleneck(filters, reduction=16, strides=1, **kwargs):
def layer(input):
x = input
residual = input
# bottleneck
x = kl.Conv2D(filters // 4, (1, 1), kernel_initializer='he_uniform', strides=strides, use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.ZeroPadding2D(1)(x)
x = kl.Conv2D(filters // 4, (3, 3),
kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.Conv2D(filters, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
# if number of filters or spatial dimensions changed
# make same manipulations with residual connection
x_channels = K.int_shape(x)[-1]
r_channels = K.int_shape(residual)[-1]
if strides != 1 or x_channels != r_channels:
residual = kl.Conv2D(x_channels, (1, 1), strides=strides,
kernel_initializer='he_uniform', use_bias=False)(residual)
residual = kl.BatchNormalization(**bn_params)(residual)
# apply attention module
x = ChannelSE(reduction=reduction)(x)
# add residual connection
x = kl.Add()([x, residual])
x = kl.Activation('relu')(x)
return x
return layer
def SEResNeXtBottleneck(filters, reduction=16, strides=1, groups=32, base_width=4, **kwargs):
def layer(input):
x = input
residual = input
width = (filters // 4) * base_width * groups // 64
# bottleneck
x = kl.Conv2D(width, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.ZeroPadding2D(1)(x)
x = GroupConv2D(width, (3, 3), strides=strides, groups=groups,
kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.Conv2D(filters, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
# if number of filters or spatial dimensions changed
# make same manipulations with residual connection
x_channels = K.int_shape(x)[-1]
r_channels = K.int_shape(residual)[-1]
if strides != 1 or x_channels != r_channels:
residual = kl.Conv2D(x_channels, (1, 1), strides=strides,
kernel_initializer='he_uniform', use_bias=False)(residual)
residual = kl.BatchNormalization(**bn_params)(residual)
# apply attention module
x = ChannelSE(reduction=reduction)(x)
# add residual connection
x = kl.Add()([x, residual])
x = kl.Activation('relu')(x)
return x
return layer
def SEBottleneck(filters, reduction=16, strides=1, groups=64, is_first=False):
if is_first:
downsample_kernel_size = (1, 1)
padding = False
else:
downsample_kernel_size = (3, 3)
padding = True
def layer(input):
x = input
residual = input
# bottleneck
x = kl.Conv2D(filters // 2, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.ZeroPadding2D(1)(x)
x = GroupConv2D(filters, (3, 3), strides=strides, groups=groups,
kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.Conv2D(filters, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
# if number of filters or spatial dimensions changed
# make same manipulations with residual connection
x_channels = K.int_shape(x)[-1]
r_channels = K.int_shape(residual)[-1]
if strides != 1 or x_channels != r_channels:
if padding:
residual = kl.ZeroPadding2D(1)(residual)
residual = kl.Conv2D(x_channels, downsample_kernel_size, strides=strides,
kernel_initializer='he_uniform', use_bias=False)(residual)
residual = kl.BatchNormalization(**bn_params)(residual)
# apply attention module
x = ChannelSE(reduction=reduction)(x)
# add residual connection
x = kl.Add()([x, residual])
x = kl.Activation('relu')(x)
return x
return layer | classification_models/senet/blocks.py | import keras.backend as K
import keras.layers as kl
from ..common.blocks import ChannelSE
from ..common.blocks import GroupConv2D
bn_params = {
'epsilon': 9.999999747378752e-06,
}
def SEResNetBottleneck(filters, reduction=16, strides=1, **kwargs):
def layer(input):
x = input
residual = input
# bottleneck
x = kl.Conv2D(filters // 4, (1, 1), kernel_initializer='he_uniform', strides=strides, use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.ZeroPadding2D(1)(x)
x = kl.Conv2D(filters // 4, (3, 3),
kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.Conv2D(filters, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
# if number of filters or spatial dimensions changed
# make same manipulations with residual connection
x_channels = K.int_shape(x)[-1]
r_channels = K.int_shape(residual)[-1]
if strides != 1 or x_channels != r_channels:
residual = kl.Conv2D(x_channels, (1, 1), strides=strides,
kernel_initializer='he_uniform', use_bias=False)(residual)
residual = kl.BatchNormalization(**bn_params)(residual)
# apply attention module
x = ChannelSE(reduction=reduction)(x)
# add residual connection
x = kl.Add()([x, residual])
x = kl.Activation('relu')(x)
return x
return layer
def SEResNeXtBottleneck(filters, reduction=16, strides=1, groups=32, base_width=4, **kwargs):
def layer(input):
x = input
residual = input
width = (filters // 4) * base_width * groups // 64
# bottleneck
x = kl.Conv2D(width, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.ZeroPadding2D(1)(x)
x = GroupConv2D(width, (3, 3), strides=strides, groups=groups,
kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.Conv2D(filters, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
# if number of filters or spatial dimensions changed
# make same manipulations with residual connection
x_channels = K.int_shape(x)[-1]
r_channels = K.int_shape(residual)[-1]
if strides != 1 or x_channels != r_channels:
residual = kl.Conv2D(x_channels, (1, 1), strides=strides,
kernel_initializer='he_uniform', use_bias=False)(residual)
residual = kl.BatchNormalization(**bn_params)(residual)
# apply attention module
x = ChannelSE(reduction=reduction)(x)
# add residual connection
x = kl.Add()([x, residual])
x = kl.Activation('relu')(x)
return x
return layer
def SEBottleneck(filters, reduction=16, strides=1, groups=64, is_first=False):
if is_first:
downsample_kernel_size = (1, 1)
padding = False
else:
downsample_kernel_size = (3, 3)
padding = True
def layer(input):
x = input
residual = input
# bottleneck
x = kl.Conv2D(filters // 2, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.ZeroPadding2D(1)(x)
x = GroupConv2D(filters, (3, 3), strides=strides, groups=groups,
kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
x = kl.Activation('relu')(x)
x = kl.Conv2D(filters, (1, 1), kernel_initializer='he_uniform', use_bias=False)(x)
x = kl.BatchNormalization(**bn_params)(x)
# if number of filters or spatial dimensions changed
# make same manipulations with residual connection
x_channels = K.int_shape(x)[-1]
r_channels = K.int_shape(residual)[-1]
if strides != 1 or x_channels != r_channels:
if padding:
residual = kl.ZeroPadding2D(1)(residual)
residual = kl.Conv2D(x_channels, downsample_kernel_size, strides=strides,
kernel_initializer='he_uniform', use_bias=False)(residual)
residual = kl.BatchNormalization(**bn_params)(residual)
# apply attention module
x = ChannelSE(reduction=reduction)(x)
# add residual connection
x = kl.Add()([x, residual])
x = kl.Activation('relu')(x)
return x
return layer | 0.831349 | 0.553204 |
from django.views.generic import CreateView, UpdateView, DeleteView, DetailView, TemplateView, FormView
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib import messages
from django_tables2.views import SingleTableView
from fabric_bolt.core.mixins.views import MultipleGroupRequiredMixin, GroupRequiredMixin
from fabric_bolt.hosts import models, tables, forms
from fabric_bolt.hosts.utils import create_ssh_config
class HostList(MultipleGroupRequiredMixin, SingleTableView):
group_required = ['Admin', 'Deployer', ]
table_class = tables.HostTable
model = models.Host
class HostDetail(MultipleGroupRequiredMixin, DetailView):
group_required = ['Admin', 'Deployer', ]
model = models.Host
class HostCreate(MultipleGroupRequiredMixin, CreateView):
"""View for creating a host. Hosts let us know where we can shovel code to."""
group_required = ['Admin', 'Deployer', ]
model = models.Host
form_class = forms.HostCreateForm
template_name_suffix = '_create'
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostCreate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Created'.format(self.object))
return form_valid_from_parent
def get_success_url(self):
"""Send them back to the detail view for that host"""
return reverse('hosts_host_detail', kwargs={'pk': self.object.pk})
class HostUpdate(GroupRequiredMixin, UpdateView):
group_required = 'Admin'
model = models.Host
form_class = forms.HostUpdateForm
template_name_suffix = '_update'
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostUpdate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Updated'.format(self.object))
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.object.pk})
class HostDelete(GroupRequiredMixin, DeleteView):
group_required = 'Admin'
model = models.Host
success_url = reverse_lazy('hosts_host_list')
def delete(self, request, *args, **kwargs):
messages.success(self.request, 'Host {} Successfully Deleted'.format(self.get_object()))
return super(HostDelete, self).delete(self, request, *args, **kwargs)
class SSHKeys(TemplateView):
template_name = 'hosts/ssh_configs.html'
def get_view(self, *args, **kwargs):
return super(SSHKeys, self).get(self.request, *args, **kwargs)
def post(self, *args, **kwargs):
"""Create the SSH file & then return the normal get method..."""
existing_ssh = models.SSHConfig.objects.all()
if existing_ssh.exists():
return self.get_view()
remote_user = self.request.POST.get('remote_user', 'root')
create_ssh_config(remote_user=remote_user)
return self.get_view()
def get_context_data(self, **kwargs):
ssh_configs = models.SSHConfig.objects.all()
return {
'ssh_configs': ssh_configs,
}
class SSHKeysCreate(FormView):
form_class = forms.CreateSSHConfig
template_name = 'hosts/host_ssh_config_create.html'
success_url = reverse_lazy('hosts_ssh_config')
def form_valid(self, form):
create_ssh_config(
name=form.cleaned_data.get('name'),
private_key_text=form.cleaned_data.get('private_key'),
public_key_text=form.cleaned_data.get('public_key'),
remote_user=form.cleaned_data.get('remote_user'),
)
return super(SSHKeysCreate, self).form_valid(form)
class SSHKeyDelete(DeleteView):
model = models.SSHConfig
success_url = reverse_lazy('hosts_ssh_config') | fabric_bolt/hosts/views.py | from django.views.generic import CreateView, UpdateView, DeleteView, DetailView, TemplateView, FormView
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib import messages
from django_tables2.views import SingleTableView
from fabric_bolt.core.mixins.views import MultipleGroupRequiredMixin, GroupRequiredMixin
from fabric_bolt.hosts import models, tables, forms
from fabric_bolt.hosts.utils import create_ssh_config
class HostList(MultipleGroupRequiredMixin, SingleTableView):
group_required = ['Admin', 'Deployer', ]
table_class = tables.HostTable
model = models.Host
class HostDetail(MultipleGroupRequiredMixin, DetailView):
group_required = ['Admin', 'Deployer', ]
model = models.Host
class HostCreate(MultipleGroupRequiredMixin, CreateView):
"""View for creating a host. Hosts let us know where we can shovel code to."""
group_required = ['Admin', 'Deployer', ]
model = models.Host
form_class = forms.HostCreateForm
template_name_suffix = '_create'
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostCreate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Created'.format(self.object))
return form_valid_from_parent
def get_success_url(self):
"""Send them back to the detail view for that host"""
return reverse('hosts_host_detail', kwargs={'pk': self.object.pk})
class HostUpdate(GroupRequiredMixin, UpdateView):
group_required = 'Admin'
model = models.Host
form_class = forms.HostUpdateForm
template_name_suffix = '_update'
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostUpdate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Updated'.format(self.object))
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.object.pk})
class HostDelete(GroupRequiredMixin, DeleteView):
group_required = 'Admin'
model = models.Host
success_url = reverse_lazy('hosts_host_list')
def delete(self, request, *args, **kwargs):
messages.success(self.request, 'Host {} Successfully Deleted'.format(self.get_object()))
return super(HostDelete, self).delete(self, request, *args, **kwargs)
class SSHKeys(TemplateView):
template_name = 'hosts/ssh_configs.html'
def get_view(self, *args, **kwargs):
return super(SSHKeys, self).get(self.request, *args, **kwargs)
def post(self, *args, **kwargs):
"""Create the SSH file & then return the normal get method..."""
existing_ssh = models.SSHConfig.objects.all()
if existing_ssh.exists():
return self.get_view()
remote_user = self.request.POST.get('remote_user', 'root')
create_ssh_config(remote_user=remote_user)
return self.get_view()
def get_context_data(self, **kwargs):
ssh_configs = models.SSHConfig.objects.all()
return {
'ssh_configs': ssh_configs,
}
class SSHKeysCreate(FormView):
form_class = forms.CreateSSHConfig
template_name = 'hosts/host_ssh_config_create.html'
success_url = reverse_lazy('hosts_ssh_config')
def form_valid(self, form):
create_ssh_config(
name=form.cleaned_data.get('name'),
private_key_text=form.cleaned_data.get('private_key'),
public_key_text=form.cleaned_data.get('public_key'),
remote_user=form.cleaned_data.get('remote_user'),
)
return super(SSHKeysCreate, self).form_valid(form)
class SSHKeyDelete(DeleteView):
model = models.SSHConfig
success_url = reverse_lazy('hosts_ssh_config') | 0.709724 | 0.12552 |
from typing import TypeVar, Generic, TYPE_CHECKING, Optional, Dict, Sequence
from threading import BoundedSemaphore
import pypipeline
from pypipeline.cellio.acellio.ainput import AInput
from pypipeline.cellio.icellio import IConnectionEntryPoint, IConnectionExitPoint
from pypipeline.cellio.connectionendpoint import ConnectionExitPoint, ConnectionEntryPoint
from pypipeline.validation import BoolExplained, FalseExplained, TrueExplained
from pypipeline.exceptions import NotDeployedException
if TYPE_CHECKING:
from pypipeline.cell import ICell, ICompositeCell
from pypipeline.connection import IConnection
T = TypeVar('T')
class InternalInput(AInput[T], IConnectionExitPoint[T], Generic[T]):
"""
InternalInput class.
An internal input is a type of input that can only be created on a composite cell.
It accepts no incoming connections and infinite outgoing (internal) connections.
Every time an internal input is pulled, it blocks and wait until a new value is set.
An IO is owned by its cell.
An IO is the controlling class in the IO-ICell relation, as IO of the cell.
An IConnectionExitPoint is the controlled class in the IConnection-IConnectionExitPoint relation, as the
source of the connection.
"""
PULL_TIMEOUT: float = 5.
def __init__(self, cell: "ICompositeCell", name: str):
"""
Args:
cell: the cell of which this IO will be part.
name: the name of this IO. Should be unique within the cell.
"""
super(InternalInput, self).__init__(cell, name)
self.__exit_point: ConnectionExitPoint[T] = ConnectionExitPoint(self, max_outgoing_connections=99999)
self.__value_is_acknowledged: BoundedSemaphore = BoundedSemaphore(1)
self._notify_observers_of_creation()
def can_have_as_cell(self, cell: "ICell") -> BoolExplained:
super_result = super(InternalInput, self).can_have_as_cell(cell)
if not super_result:
return super_result
if not isinstance(cell, pypipeline.cell.compositecell.icompositecell.ICompositeCell):
return FalseExplained(f"An InternalInput can only be created on an instance of ICompositeCell")
return TrueExplained()
def set_value(self, value: T) -> None:
self.logger.debug(f"{self}.set_value( {value} ) waiting for prev value to be acknowledged @ InternalInput")
while not self.__value_is_acknowledged.acquire(timeout=self.PULL_TIMEOUT):
self.logger.warning(f"{self}.set_value() waiting... @ InternalInput level")
if not self._is_deployed():
raise NotDeployedException(f"{self} is set while not deployed")
super(InternalInput, self)._set_value(value)
def _clear_value(self) -> None:
super(InternalInput, self)._clear_value()
try:
self.__value_is_acknowledged.release()
except ValueError:
# The value was not set...
pass
def clear_value(self) -> None:
self._clear_value()
def pull(self) -> T:
self.logger.debug(f"{self}.pull() @ InternalInput level")
if not self.value_is_set():
self._wait_for_value(interruption_frequency=self.PULL_TIMEOUT)
self.logger.debug(f"{self}.pull() got value @ InternalInput level")
value = self.get_value()
self.__exit_point._notify_new_value()
self._acknowledge_value()
return value
def is_provided(self) -> bool:
# Has no more info on whether it will be provided or not.
# (It will always be provided with a value, but this value may be None in case of unconnected scalable
# cell inputs -> differentiate?)
# -> It is required to be provided, otherwise a CloneCell would not be deployable.
return True
def get_incoming_connections(self) -> "Sequence[IConnection[T]]":
return ()
def has_as_incoming_connection(self, connection: "IConnection[T]") -> bool:
return False
def get_nb_incoming_connections(self) -> int:
return 0
def pull_as_connection(self, connection: "IConnection[T]") -> T:
return self.__exit_point.pull_as_connection(connection)
def all_outgoing_connections_have_pulled(self) -> bool:
return self.__exit_point.have_all_outgoing_connections_pulled()
def has_seen_value(self, connection: "IConnection[T]") -> bool:
return self.__exit_point.has_seen_value(connection)
def get_outgoing_connections(self) -> "Sequence[IConnection[T]]":
return self.__exit_point.get_outgoing_connections()
@classmethod
def can_have_as_outgoing_connection(cls, connection: "IConnection[T]") -> BoolExplained:
return ConnectionExitPoint.can_have_as_outgoing_connection(connection)
def can_have_as_nb_outgoing_connections(self, number_of_outgoing_connections: int) -> BoolExplained:
return self.__exit_point.can_have_as_nb_outgoing_connections(number_of_outgoing_connections)
def _add_outgoing_connection(self, connection: "IConnection[T]") -> None:
self.__exit_point._add_outgoing_connection(connection)
def _remove_outgoing_connection(self, connection: "IConnection[T]") -> None:
self.__exit_point._remove_outgoing_connection(connection)
def get_max_nb_outgoing_connections(self) -> int:
return self.__exit_point.get_max_nb_outgoing_connections()
def get_nb_outgoing_connections(self) -> int:
return self.__exit_point.get_nb_outgoing_connections()
def has_as_outgoing_connection(self, connection: "IConnection[T]") -> bool:
return self.__exit_point.has_as_outgoing_connection(connection)
def has_outgoing_connection_to(self, target: "IConnectionEntryPoint[T]") -> bool:
return self.__exit_point.has_outgoing_connection_to(target)
def get_outgoing_connection_to(self, target: "IConnectionEntryPoint[T]") -> "IConnection[T]":
return self.__exit_point.get_outgoing_connection_to(target)
def assert_has_proper_outgoing_connections(self) -> None:
self.__exit_point.assert_has_proper_outgoing_connections()
def has_initial_value(self) -> bool:
return self.__exit_point.has_initial_value()
def get_nb_available_pulls(self) -> Optional[int]:
raise Exception("Not supported by this class?")
def _get_connection_entry_point(self) -> Optional[ConnectionEntryPoint]:
return None
def _get_connection_exit_point(self) -> ConnectionExitPoint:
return self.__exit_point
def assert_is_valid(self) -> None:
super(InternalInput, self).assert_is_valid()
self.__exit_point.assert_is_valid()
def delete(self) -> None:
super(InternalInput, self).delete()
self.__exit_point.delete()
def __getstate__(self) -> Dict:
# called during pickling
new_state = super(InternalInput, self).__getstate__()
new_state["_InternalInput__value_is_acknowledged"] = None
return new_state
def __setstate__(self, state: Dict) -> None:
# called during unpickling
super(InternalInput, self).__setstate__(state)
self.__value_is_acknowledged = BoundedSemaphore(1) | pypipeline/cellio/compositeio/internalinput.py |
from typing import TypeVar, Generic, TYPE_CHECKING, Optional, Dict, Sequence
from threading import BoundedSemaphore
import pypipeline
from pypipeline.cellio.acellio.ainput import AInput
from pypipeline.cellio.icellio import IConnectionEntryPoint, IConnectionExitPoint
from pypipeline.cellio.connectionendpoint import ConnectionExitPoint, ConnectionEntryPoint
from pypipeline.validation import BoolExplained, FalseExplained, TrueExplained
from pypipeline.exceptions import NotDeployedException
if TYPE_CHECKING:
from pypipeline.cell import ICell, ICompositeCell
from pypipeline.connection import IConnection
T = TypeVar('T')
class InternalInput(AInput[T], IConnectionExitPoint[T], Generic[T]):
"""
InternalInput class.
An internal input is a type of input that can only be created on a composite cell.
It accepts no incoming connections and infinite outgoing (internal) connections.
Every time an internal input is pulled, it blocks and wait until a new value is set.
An IO is owned by its cell.
An IO is the controlling class in the IO-ICell relation, as IO of the cell.
An IConnectionExitPoint is the controlled class in the IConnection-IConnectionExitPoint relation, as the
source of the connection.
"""
PULL_TIMEOUT: float = 5.
def __init__(self, cell: "ICompositeCell", name: str):
"""
Args:
cell: the cell of which this IO will be part.
name: the name of this IO. Should be unique within the cell.
"""
super(InternalInput, self).__init__(cell, name)
self.__exit_point: ConnectionExitPoint[T] = ConnectionExitPoint(self, max_outgoing_connections=99999)
self.__value_is_acknowledged: BoundedSemaphore = BoundedSemaphore(1)
self._notify_observers_of_creation()
def can_have_as_cell(self, cell: "ICell") -> BoolExplained:
super_result = super(InternalInput, self).can_have_as_cell(cell)
if not super_result:
return super_result
if not isinstance(cell, pypipeline.cell.compositecell.icompositecell.ICompositeCell):
return FalseExplained(f"An InternalInput can only be created on an instance of ICompositeCell")
return TrueExplained()
def set_value(self, value: T) -> None:
self.logger.debug(f"{self}.set_value( {value} ) waiting for prev value to be acknowledged @ InternalInput")
while not self.__value_is_acknowledged.acquire(timeout=self.PULL_TIMEOUT):
self.logger.warning(f"{self}.set_value() waiting... @ InternalInput level")
if not self._is_deployed():
raise NotDeployedException(f"{self} is set while not deployed")
super(InternalInput, self)._set_value(value)
def _clear_value(self) -> None:
super(InternalInput, self)._clear_value()
try:
self.__value_is_acknowledged.release()
except ValueError:
# The value was not set...
pass
def clear_value(self) -> None:
self._clear_value()
def pull(self) -> T:
self.logger.debug(f"{self}.pull() @ InternalInput level")
if not self.value_is_set():
self._wait_for_value(interruption_frequency=self.PULL_TIMEOUT)
self.logger.debug(f"{self}.pull() got value @ InternalInput level")
value = self.get_value()
self.__exit_point._notify_new_value()
self._acknowledge_value()
return value
def is_provided(self) -> bool:
# Has no more info on whether it will be provided or not.
# (It will always be provided with a value, but this value may be None in case of unconnected scalable
# cell inputs -> differentiate?)
# -> It is required to be provided, otherwise a CloneCell would not be deployable.
return True
def get_incoming_connections(self) -> "Sequence[IConnection[T]]":
return ()
def has_as_incoming_connection(self, connection: "IConnection[T]") -> bool:
return False
def get_nb_incoming_connections(self) -> int:
return 0
def pull_as_connection(self, connection: "IConnection[T]") -> T:
return self.__exit_point.pull_as_connection(connection)
def all_outgoing_connections_have_pulled(self) -> bool:
return self.__exit_point.have_all_outgoing_connections_pulled()
def has_seen_value(self, connection: "IConnection[T]") -> bool:
return self.__exit_point.has_seen_value(connection)
def get_outgoing_connections(self) -> "Sequence[IConnection[T]]":
return self.__exit_point.get_outgoing_connections()
@classmethod
def can_have_as_outgoing_connection(cls, connection: "IConnection[T]") -> BoolExplained:
return ConnectionExitPoint.can_have_as_outgoing_connection(connection)
def can_have_as_nb_outgoing_connections(self, number_of_outgoing_connections: int) -> BoolExplained:
return self.__exit_point.can_have_as_nb_outgoing_connections(number_of_outgoing_connections)
def _add_outgoing_connection(self, connection: "IConnection[T]") -> None:
self.__exit_point._add_outgoing_connection(connection)
def _remove_outgoing_connection(self, connection: "IConnection[T]") -> None:
self.__exit_point._remove_outgoing_connection(connection)
def get_max_nb_outgoing_connections(self) -> int:
return self.__exit_point.get_max_nb_outgoing_connections()
def get_nb_outgoing_connections(self) -> int:
return self.__exit_point.get_nb_outgoing_connections()
def has_as_outgoing_connection(self, connection: "IConnection[T]") -> bool:
return self.__exit_point.has_as_outgoing_connection(connection)
def has_outgoing_connection_to(self, target: "IConnectionEntryPoint[T]") -> bool:
return self.__exit_point.has_outgoing_connection_to(target)
def get_outgoing_connection_to(self, target: "IConnectionEntryPoint[T]") -> "IConnection[T]":
return self.__exit_point.get_outgoing_connection_to(target)
def assert_has_proper_outgoing_connections(self) -> None:
self.__exit_point.assert_has_proper_outgoing_connections()
def has_initial_value(self) -> bool:
return self.__exit_point.has_initial_value()
def get_nb_available_pulls(self) -> Optional[int]:
raise Exception("Not supported by this class?")
def _get_connection_entry_point(self) -> Optional[ConnectionEntryPoint]:
return None
def _get_connection_exit_point(self) -> ConnectionExitPoint:
return self.__exit_point
def assert_is_valid(self) -> None:
super(InternalInput, self).assert_is_valid()
self.__exit_point.assert_is_valid()
def delete(self) -> None:
super(InternalInput, self).delete()
self.__exit_point.delete()
def __getstate__(self) -> Dict:
# called during pickling
new_state = super(InternalInput, self).__getstate__()
new_state["_InternalInput__value_is_acknowledged"] = None
return new_state
def __setstate__(self, state: Dict) -> None:
# called during unpickling
super(InternalInput, self).__setstate__(state)
self.__value_is_acknowledged = BoundedSemaphore(1) | 0.837055 | 0.272772 |
import random
import math
class Model:
def __init__(self):
self.allNodes = []
self.customers = []
self.matrix = []
self.capacity = -1
def BuildModel(self):
random.seed(1)
depot = Node(0, 0, 0, 50, 50)
self.allNodes.append(depot)
self.capacity = 3000
for i in range (0, 200):
id = i + 1
tp = random.randint(1, 3)
dem = random.randint(1, 5) * 100
x = random.randint(0, 100)
y = random.randint(0, 100)
cust = Node(id, tp, dem, x, y)
self.allNodes.append(cust)
self.customers.append(cust)
rows = len(self.allNodes)
self.matrix = [[0.0 for x in range(rows)] for y in range(rows)]
for i in range(0, len(self.allNodes)):
for j in range(0, len(self.allNodes)):
if (j==0) :
self.matrix[i][j] = 0.0
else:
source = self.allNodes[i]
target = self.allNodes[j]
dx_2 = (source.x - target.x) ** 2
dy_2 = (source.y - target.y) ** 2
dist = round(math.sqrt(dx_2 + dy_2))
if self.allNodes[j].type == 1:
self.matrix[i][j] = (dist / 35) + 5.0 / 60.0
elif self.allNodes[j].type == 2:
self.matrix[i][j] = (dist / 35) + 15.0 / 60.0
elif self.allNodes[j].type == 3:
self.matrix[i][j] = (dist / 35) + 25.0 / 60.0
class Node:
def __init__(self, idd, tp, dem, xx, yy):
self.ID = idd
self.type = tp
self.demand = dem
self.x = xx
self.y = yy
self.isRouted = False
self.isTabuTillIterator = -1
self.occurences = 0
class Route:
def __init__(self, dp, cap):
self.sequenceOfNodes = []
self.sequenceOfNodes.append(dp)
self.sequenceOfNodes.append(dp)
self.cost = 0
self.capacity = cap
self.load = 0
def __eq__(self, other):
if not isinstance(other, Route):
return NotImplemented
return self.sequenceOfNodes == other.sequenceOfNodes and self.cost == other.cost and self.capacity == other.capacity and self.load == other.load | VRP_Model.py | import random
import math
class Model:
def __init__(self):
self.allNodes = []
self.customers = []
self.matrix = []
self.capacity = -1
def BuildModel(self):
random.seed(1)
depot = Node(0, 0, 0, 50, 50)
self.allNodes.append(depot)
self.capacity = 3000
for i in range (0, 200):
id = i + 1
tp = random.randint(1, 3)
dem = random.randint(1, 5) * 100
x = random.randint(0, 100)
y = random.randint(0, 100)
cust = Node(id, tp, dem, x, y)
self.allNodes.append(cust)
self.customers.append(cust)
rows = len(self.allNodes)
self.matrix = [[0.0 for x in range(rows)] for y in range(rows)]
for i in range(0, len(self.allNodes)):
for j in range(0, len(self.allNodes)):
if (j==0) :
self.matrix[i][j] = 0.0
else:
source = self.allNodes[i]
target = self.allNodes[j]
dx_2 = (source.x - target.x) ** 2
dy_2 = (source.y - target.y) ** 2
dist = round(math.sqrt(dx_2 + dy_2))
if self.allNodes[j].type == 1:
self.matrix[i][j] = (dist / 35) + 5.0 / 60.0
elif self.allNodes[j].type == 2:
self.matrix[i][j] = (dist / 35) + 15.0 / 60.0
elif self.allNodes[j].type == 3:
self.matrix[i][j] = (dist / 35) + 25.0 / 60.0
class Node:
def __init__(self, idd, tp, dem, xx, yy):
self.ID = idd
self.type = tp
self.demand = dem
self.x = xx
self.y = yy
self.isRouted = False
self.isTabuTillIterator = -1
self.occurences = 0
class Route:
def __init__(self, dp, cap):
self.sequenceOfNodes = []
self.sequenceOfNodes.append(dp)
self.sequenceOfNodes.append(dp)
self.cost = 0
self.capacity = cap
self.load = 0
def __eq__(self, other):
if not isinstance(other, Route):
return NotImplemented
return self.sequenceOfNodes == other.sequenceOfNodes and self.cost == other.cost and self.capacity == other.capacity and self.load == other.load | 0.169406 | 0.141697 |
"""Advent of Code 2015, Day 5: Doesn't He Have Intern-Elves For This?"""
import re
import aoc
import pytest
def is_nice(candidate):
"""Check if candidate string passes nice rules
Returns True if a string is nice, and False otherwise.
A 'nice' string has the following properties:
* It contains at least three vowels [aeiou]
* It contains one letter that appears twice in a row
* It does not contain the strings [ab, cd, pq, xy]
Nice strings must have all of these properties.
"""
vowels = set("aeiou")
enough_vowels = len([c for c in candidate if c in vowels]) >= 3
has_double_char = any(a == b for a, b in zip(candidate, candidate[1:]))
bad_strings = ["ab", "cd", "pq", "xy"]
no_bad_strings = not [bad for bad in bad_strings if bad in candidate]
return enough_vowels and has_double_char and no_bad_strings
@pytest.mark.parametrize(
"nice_string",
[
"ugknbfddgicrmopn",
"aaa",
],
)
def test_nice_strings(nice_string):
"""is_nice validates 'nice' strings matching certain rules
A 'nice' string has the following properties:
* It contains at least three vowels [aeiou]
* It contains one letter that appears twice in a row
* It does not contain the strings [ab, cd, pq, xy]
Nice strings must have all of these properties.
"""
assert is_nice(nice_string)
@pytest.mark.parametrize(
"naughty_string",
[
"jchzalrnumimnmhp", # no double letter
"haegwjzuvuyypxyu", # contains 'xy'
"dvszwmarrgswjxmb", # only one vowel
],
)
def test_naughty_strings(naughty_string):
"""is_nice rejects strings that are known to be invalid"""
# Explicit `is not None` check because `not (None)` returns True
assert is_nice(naughty_string) is not None
assert not is_nice(naughty_string)
def is_new_nice(candidate):
"""Check if candidate string passes new nice rules
Returns True if a string is nice, and False otherwise.
New nice strings have both of the following properties:
* Contain a pair of letters that repeats without overlapping
* Contains one letter that repeats after exactly one letter
"""
has_repeated_pair = re.search(r"(.{2}).*\1", candidate) is not None
has_repeat_one_apart = re.search(r"(.).\1", candidate) is not None
return has_repeated_pair and has_repeat_one_apart
@pytest.mark.parametrize(
"nice_string",
[
"qjhvhtzxzqqjkmpb",
"xxyxx",
],
)
def test_new_nice(nice_string):
"""is_new_nice validates according to second set of rules
New nice strings match the following rules:
* Contain a pair of letters that repeats without overlapping
* Contains one letter that repeats after exactly one letter
"""
assert is_new_nice(nice_string)
@pytest.mark.parametrize(
"naughty_string",
[
"uurcxstgmygtbstg",
"ieodomkazucvgmuy",
],
)
def test_new_nice_naughty_strings(naughty_string):
"""Strings that don't match the new nice rules return False"""
# Explicit `is not None` check because `not (None)` returns True
assert is_new_nice(naughty_string) is not None
assert not is_new_nice(naughty_string)
def main(puzzle_input):
# Part one
nice_strings = [s for s in puzzle_input if is_nice(s)]
print(f"Part one, number of nice strings: {len(nice_strings)}")
# Part two
new_nice_strings = [s for s in puzzle_input if is_new_nice(s)]
print(f"Part two, number of new nice strings: {len(new_nice_strings)}")
if __name__ == "__main__":
puzzle_input = aoc.load_puzzle_input(2015, 5).splitlines()
main(puzzle_input) | 2015/python/2015-05.py | """Advent of Code 2015, Day 5: Doesn't He Have Intern-Elves For This?"""
import re
import aoc
import pytest
def is_nice(candidate):
"""Check if candidate string passes nice rules
Returns True if a string is nice, and False otherwise.
A 'nice' string has the following properties:
* It contains at least three vowels [aeiou]
* It contains one letter that appears twice in a row
* It does not contain the strings [ab, cd, pq, xy]
Nice strings must have all of these properties.
"""
vowels = set("aeiou")
enough_vowels = len([c for c in candidate if c in vowels]) >= 3
has_double_char = any(a == b for a, b in zip(candidate, candidate[1:]))
bad_strings = ["ab", "cd", "pq", "xy"]
no_bad_strings = not [bad for bad in bad_strings if bad in candidate]
return enough_vowels and has_double_char and no_bad_strings
@pytest.mark.parametrize(
"nice_string",
[
"ugknbfddgicrmopn",
"aaa",
],
)
def test_nice_strings(nice_string):
"""is_nice validates 'nice' strings matching certain rules
A 'nice' string has the following properties:
* It contains at least three vowels [aeiou]
* It contains one letter that appears twice in a row
* It does not contain the strings [ab, cd, pq, xy]
Nice strings must have all of these properties.
"""
assert is_nice(nice_string)
@pytest.mark.parametrize(
"naughty_string",
[
"jchzalrnumimnmhp", # no double letter
"haegwjzuvuyypxyu", # contains 'xy'
"dvszwmarrgswjxmb", # only one vowel
],
)
def test_naughty_strings(naughty_string):
"""is_nice rejects strings that are known to be invalid"""
# Explicit `is not None` check because `not (None)` returns True
assert is_nice(naughty_string) is not None
assert not is_nice(naughty_string)
def is_new_nice(candidate):
"""Check if candidate string passes new nice rules
Returns True if a string is nice, and False otherwise.
New nice strings have both of the following properties:
* Contain a pair of letters that repeats without overlapping
* Contains one letter that repeats after exactly one letter
"""
has_repeated_pair = re.search(r"(.{2}).*\1", candidate) is not None
has_repeat_one_apart = re.search(r"(.).\1", candidate) is not None
return has_repeated_pair and has_repeat_one_apart
@pytest.mark.parametrize(
"nice_string",
[
"qjhvhtzxzqqjkmpb",
"xxyxx",
],
)
def test_new_nice(nice_string):
"""is_new_nice validates according to second set of rules
New nice strings match the following rules:
* Contain a pair of letters that repeats without overlapping
* Contains one letter that repeats after exactly one letter
"""
assert is_new_nice(nice_string)
@pytest.mark.parametrize(
"naughty_string",
[
"uurcxstgmygtbstg",
"ieodomkazucvgmuy",
],
)
def test_new_nice_naughty_strings(naughty_string):
"""Strings that don't match the new nice rules return False"""
# Explicit `is not None` check because `not (None)` returns True
assert is_new_nice(naughty_string) is not None
assert not is_new_nice(naughty_string)
def main(puzzle_input):
# Part one
nice_strings = [s for s in puzzle_input if is_nice(s)]
print(f"Part one, number of nice strings: {len(nice_strings)}")
# Part two
new_nice_strings = [s for s in puzzle_input if is_new_nice(s)]
print(f"Part two, number of new nice strings: {len(new_nice_strings)}")
if __name__ == "__main__":
puzzle_input = aoc.load_puzzle_input(2015, 5).splitlines()
main(puzzle_input) | 0.803097 | 0.516047 |
from pennylane import math
from ..postprocessing import even_parity_ids
from ..qnodes import joint_probs_qnode, local_parity_expval_qnode
def post_process_I_3322_joint_probs(probs_vec):
"""Applies post-processing to multi-qubit probabilities in order to coarse-grain
them into the dichotomic parity observables required by the :math:`I_{3322}` inequality.
An :math:`N`-qubit circuit has :math:`2^N` measurement outcomes.
To construct the joint probabilitye :math:`P(00|xy)` for binary outputs, the joint
probabilities can be partitioned into two sets, :math:`\\{Even\\}` and :math`\\{Odd\\}` which
denote the set of *Even* and *Odd* parity bit strings.
The :math:`2^N` joint probabilities are expressed as :math:`P(\\vec{a},\\vec{b}|x,y)` where
:math:`\\vec{a}` and :math:`\\vec{b}` are each :math:`N`-bit strings.
Since the :math:`I_{3322}` inequality only requires dichotomic probabilities :math:`P(00|xy)`,
our post-processing only needs to calculate this value.
To reduce the joint probabilities :math:`P(\\vec{a},\\vec{b}|x,y)` to dichotomic probabilities
:math:`P(00|x,y)` we aggregate the probabilities of even parity bit strings with
.. math::
P(00|xy) = \\{Even\\}_A\\{Even\\}_B
= \\sum_{\\vec{a}\\in \\{Even\\}} \\sum_{\\vec{b}\\in\\{Even\\}} P(\\vec{a},\\vec{b}|x,y).
:param n_qubits: The number of wires measured to obtained the joint probabilites.
:type n_qubits: int
:param probs_vec: A probability vector obtained by measuring all wires in the
computational basis.
:type probs_vec: list[float]
:returns: The dichotomic probability :math:`P(00|xy)`.
"""
n_local_qubits = int(math.log2(len(probs_vec)) / 2)
probs = math.reshape(probs_vec, (2**n_local_qubits, 2**n_local_qubits))
even_ids = even_parity_ids(n_local_qubits)
return sum([sum([probs[a, b] for b in even_ids]) for a in even_ids])
def I_3322_bell_inequality_cost(network_ansatz, **qnode_kwargs):
"""Constructs a cost function that maximizes the score of the :math:`I_{3322}` Bell inequality.
:param network_ansatz: A ``NetworkAnsatz`` class specifying the quantum network simulation.
:type network_ansatz: NetworkAnsatz
:returns: A cost function evaluated as ``cost(scenario_settings)`` where
the ``scenario_settings`` are obtained from the provided
``network_ansatz`` class.
"""
I_3322_joint_probs_qnode = joint_probs_qnode(network_ansatz, **qnode_kwargs)
I_3322_local_expval_qnode = local_parity_expval_qnode(network_ansatz, **qnode_kwargs)
def cost(scenario_settings):
score = 0
for (x, y, mult) in [
(0, 0, 1),
(0, 1, 1),
(0, 2, 1),
(1, 0, 1),
(1, 1, 1),
(1, 2, -1),
(2, 0, 1),
(2, 1, -1),
]:
settings = network_ansatz.qnode_settings(scenario_settings, [0], [x, y])
probs_vec_xy = I_3322_joint_probs_qnode(settings)
prob00_xy = post_process_I_3322_joint_probs(probs_vec_xy)
score += mult * prob00_xy
settings_00 = network_ansatz.qnode_settings(scenario_settings, [0], [0, 0])
settings_11 = network_ansatz.qnode_settings(scenario_settings, [0], [1, 1])
expval_00 = I_3322_local_expval_qnode(settings_00)
expval_11 = I_3322_local_expval_qnode(settings_11)
# - P_A(0|0)
score += -1 * (expval_00[0] + 1) / 2
# - 2 * P_B(0|0)
score += -2 * (expval_00[1] + 1) / 2
# - P_B(0|1)
score += -1 * (expval_11[1] + 1) / 2
return -(score)
return cost | src/qnetvo/cost/I_3322_bell_inequality.py | from pennylane import math
from ..postprocessing import even_parity_ids
from ..qnodes import joint_probs_qnode, local_parity_expval_qnode
def post_process_I_3322_joint_probs(probs_vec):
"""Applies post-processing to multi-qubit probabilities in order to coarse-grain
them into the dichotomic parity observables required by the :math:`I_{3322}` inequality.
An :math:`N`-qubit circuit has :math:`2^N` measurement outcomes.
To construct the joint probabilitye :math:`P(00|xy)` for binary outputs, the joint
probabilities can be partitioned into two sets, :math:`\\{Even\\}` and :math`\\{Odd\\}` which
denote the set of *Even* and *Odd* parity bit strings.
The :math:`2^N` joint probabilities are expressed as :math:`P(\\vec{a},\\vec{b}|x,y)` where
:math:`\\vec{a}` and :math:`\\vec{b}` are each :math:`N`-bit strings.
Since the :math:`I_{3322}` inequality only requires dichotomic probabilities :math:`P(00|xy)`,
our post-processing only needs to calculate this value.
To reduce the joint probabilities :math:`P(\\vec{a},\\vec{b}|x,y)` to dichotomic probabilities
:math:`P(00|x,y)` we aggregate the probabilities of even parity bit strings with
.. math::
P(00|xy) = \\{Even\\}_A\\{Even\\}_B
= \\sum_{\\vec{a}\\in \\{Even\\}} \\sum_{\\vec{b}\\in\\{Even\\}} P(\\vec{a},\\vec{b}|x,y).
:param n_qubits: The number of wires measured to obtained the joint probabilites.
:type n_qubits: int
:param probs_vec: A probability vector obtained by measuring all wires in the
computational basis.
:type probs_vec: list[float]
:returns: The dichotomic probability :math:`P(00|xy)`.
"""
n_local_qubits = int(math.log2(len(probs_vec)) / 2)
probs = math.reshape(probs_vec, (2**n_local_qubits, 2**n_local_qubits))
even_ids = even_parity_ids(n_local_qubits)
return sum([sum([probs[a, b] for b in even_ids]) for a in even_ids])
def I_3322_bell_inequality_cost(network_ansatz, **qnode_kwargs):
"""Constructs a cost function that maximizes the score of the :math:`I_{3322}` Bell inequality.
:param network_ansatz: A ``NetworkAnsatz`` class specifying the quantum network simulation.
:type network_ansatz: NetworkAnsatz
:returns: A cost function evaluated as ``cost(scenario_settings)`` where
the ``scenario_settings`` are obtained from the provided
``network_ansatz`` class.
"""
I_3322_joint_probs_qnode = joint_probs_qnode(network_ansatz, **qnode_kwargs)
I_3322_local_expval_qnode = local_parity_expval_qnode(network_ansatz, **qnode_kwargs)
def cost(scenario_settings):
score = 0
for (x, y, mult) in [
(0, 0, 1),
(0, 1, 1),
(0, 2, 1),
(1, 0, 1),
(1, 1, 1),
(1, 2, -1),
(2, 0, 1),
(2, 1, -1),
]:
settings = network_ansatz.qnode_settings(scenario_settings, [0], [x, y])
probs_vec_xy = I_3322_joint_probs_qnode(settings)
prob00_xy = post_process_I_3322_joint_probs(probs_vec_xy)
score += mult * prob00_xy
settings_00 = network_ansatz.qnode_settings(scenario_settings, [0], [0, 0])
settings_11 = network_ansatz.qnode_settings(scenario_settings, [0], [1, 1])
expval_00 = I_3322_local_expval_qnode(settings_00)
expval_11 = I_3322_local_expval_qnode(settings_11)
# - P_A(0|0)
score += -1 * (expval_00[0] + 1) / 2
# - 2 * P_B(0|0)
score += -2 * (expval_00[1] + 1) / 2
# - P_B(0|1)
score += -1 * (expval_11[1] + 1) / 2
return -(score)
return cost | 0.899817 | 0.790449 |
from regression_tests import *
class bin_1(Test):
settings = TestSettings(
input='1-init_bin-bd92ce74.unpacked.elf',
args='--backend-no-opts'
)
def test_for_some_random_imported_functions(self):
assert self.out_c.has_comment_matching(r'.*int32_t.*\*\*.*__ctype_toupper_loc\(.*')
assert self.out_c.has_comment_matching(r'.*int __xstat\(.*')
assert self.out_c.has_comment_matching(r'.*int munmap\(.*')
assert self.out_c.has_comment_matching(r'.*long.*ptrace\(.*')
assert self.out_c.has_comment_matching(r'.*char.*\*.*strcat\(.*')
assert self.out_c.has_comment_matching(r'.*pid_t waitpid\(.*')
class bin_2(Test):
settings = TestSettings(
input='2-backdoor-2acf2bc7.so.elf'
)
def test_for_some_random_imported_functions(self):
assert self.out_c.has_comment_matching(r'.*int32_t.*\*\*.*__ctype_toupper_loc\(.*')
assert self.out_c.has_comment_matching(r'.*size_t fread\(.*')
assert self.out_c.has_comment_matching(r'.*int mkdir\(.*')
assert self.out_c.has_comment_matching(r'.*FILE \* popen\(.*')
assert self.out_c.has_comment_matching(r'.*long.*strtoul\(.*')
assert self.out_c.has_comment_matching(r'.*pid_t waitpid\(.*')
class bin_3(Test):
settings = TestSettings(
input='3-backdoor-753dc7cd.unpacked.elf',
args='--backend-no-opts'
)
def test_check_for_strings(self):
assert self.out_c.has_string_literal( '~`!@#$%^&*()_+{}|[]\\\\;:\'\\"<>,./?' )
assert self.out_c.has_string_literal( '/proc/sys/kernel/random/boot_id' )
assert self.out_c.has_string_literal( 'Rule: .*' )
assert self.out_c.has_string_literal( '%s/.config' )
assert self.out_c.has_string_literal( '%s/%s' )
assert self.out_c.has_string_literal( '%s/%s.%s.config' )
assert self.out_c.has_string_literal( '%s/.kde' )
assert self.out_c.has_string_literal( '/proc/self/cmdline' )
assert self.out_c.has_string_literal( '%s/autostart' )
assert self.out_c.has_string_literal( '%s/%s%s.desktop' )
assert self.out_c.has_string_literal( '%s/Autostart' )
assert self.out_c.has_string_literal( 'killall unix-daemon' )
def test_for_some_random_imported_functions(self):
assert self.out_c.has_comment_matching(r'.*int __xstat\(.*')
assert self.out_c.has_comment_matching(r'.*int.*connect\(.*')
assert self.out_c.has_comment_matching(r'.*void.*\*.*memcpy\(.*')
assert self.out_c.has_comment_matching(r'.*void pthread_exit\(.*')
assert self.out_c.has_comment_matching(r'.*sig.*signal\(.*')
assert self.out_c.has_comment_matching(r'.*int uname\(.*')
class bin_4(Test):
settings = TestSettings(
input='4-form_grabber-b794ce9e.so.elf'
)
def test_for_some_random_imported_functions(self):
assert self.out_c.has_comment_matching(r'.*int32_t.*\*\*.*__ctype_toupper_loc\(.*')
assert self.out_c.has_comment_matching(r'.*void.*\*.*dlsym\(.*')
assert self.out_c.has_comment_matching(r'.*int ftruncate\(.*')
assert self.out_c.has_comment_matching(r'.*void.*\*.*mmap\(.*')
assert self.out_c.has_comment_matching(r'.*int pthread_mutex_unlock\(.*')
assert self.out_c.has_comment_matching(r'.*long.*sysconf\(.*') | bugs/hand-of-thief-malware-output-quality/test.py | from regression_tests import *
class bin_1(Test):
settings = TestSettings(
input='1-init_bin-bd92ce74.unpacked.elf',
args='--backend-no-opts'
)
def test_for_some_random_imported_functions(self):
assert self.out_c.has_comment_matching(r'.*int32_t.*\*\*.*__ctype_toupper_loc\(.*')
assert self.out_c.has_comment_matching(r'.*int __xstat\(.*')
assert self.out_c.has_comment_matching(r'.*int munmap\(.*')
assert self.out_c.has_comment_matching(r'.*long.*ptrace\(.*')
assert self.out_c.has_comment_matching(r'.*char.*\*.*strcat\(.*')
assert self.out_c.has_comment_matching(r'.*pid_t waitpid\(.*')
class bin_2(Test):
settings = TestSettings(
input='2-backdoor-2acf2bc7.so.elf'
)
def test_for_some_random_imported_functions(self):
assert self.out_c.has_comment_matching(r'.*int32_t.*\*\*.*__ctype_toupper_loc\(.*')
assert self.out_c.has_comment_matching(r'.*size_t fread\(.*')
assert self.out_c.has_comment_matching(r'.*int mkdir\(.*')
assert self.out_c.has_comment_matching(r'.*FILE \* popen\(.*')
assert self.out_c.has_comment_matching(r'.*long.*strtoul\(.*')
assert self.out_c.has_comment_matching(r'.*pid_t waitpid\(.*')
class bin_3(Test):
settings = TestSettings(
input='3-backdoor-753dc7cd.unpacked.elf',
args='--backend-no-opts'
)
def test_check_for_strings(self):
assert self.out_c.has_string_literal( '~`!@#$%^&*()_+{}|[]\\\\;:\'\\"<>,./?' )
assert self.out_c.has_string_literal( '/proc/sys/kernel/random/boot_id' )
assert self.out_c.has_string_literal( 'Rule: .*' )
assert self.out_c.has_string_literal( '%s/.config' )
assert self.out_c.has_string_literal( '%s/%s' )
assert self.out_c.has_string_literal( '%s/%s.%s.config' )
assert self.out_c.has_string_literal( '%s/.kde' )
assert self.out_c.has_string_literal( '/proc/self/cmdline' )
assert self.out_c.has_string_literal( '%s/autostart' )
assert self.out_c.has_string_literal( '%s/%s%s.desktop' )
assert self.out_c.has_string_literal( '%s/Autostart' )
assert self.out_c.has_string_literal( 'killall unix-daemon' )
def test_for_some_random_imported_functions(self):
assert self.out_c.has_comment_matching(r'.*int __xstat\(.*')
assert self.out_c.has_comment_matching(r'.*int.*connect\(.*')
assert self.out_c.has_comment_matching(r'.*void.*\*.*memcpy\(.*')
assert self.out_c.has_comment_matching(r'.*void pthread_exit\(.*')
assert self.out_c.has_comment_matching(r'.*sig.*signal\(.*')
assert self.out_c.has_comment_matching(r'.*int uname\(.*')
class bin_4(Test):
settings = TestSettings(
input='4-form_grabber-b794ce9e.so.elf'
)
def test_for_some_random_imported_functions(self):
assert self.out_c.has_comment_matching(r'.*int32_t.*\*\*.*__ctype_toupper_loc\(.*')
assert self.out_c.has_comment_matching(r'.*void.*\*.*dlsym\(.*')
assert self.out_c.has_comment_matching(r'.*int ftruncate\(.*')
assert self.out_c.has_comment_matching(r'.*void.*\*.*mmap\(.*')
assert self.out_c.has_comment_matching(r'.*int pthread_mutex_unlock\(.*')
assert self.out_c.has_comment_matching(r'.*long.*sysconf\(.*') | 0.478285 | 0.375936 |
import ConfigParser, urllib, urllib2, time, datetime, threading, os, requests
import xml.etree.ElementTree as ET
config = ConfigParser.ConfigParser()
config.read('config.cfg')
#Set debug level
if config.get('config','debug') == 'yes': debug = True
else: debug = False
if config.get('config','advanced_debug') == 'yes': advancedDebug = True
else: advancedDebug = False
#Processes Dictionary of unique hashes returning a Dictionary of unique hashes with their WildFire results. The "list" input is only used to know where to copy a new file from.
def WildFire(list, unique, tanium_handler):
if debug: print ("\n\nMODULE WILDFIRE")
if debug: print ("FUNCTION wildfire.WildFire")
if debug: print (" Incuded hashes: " + str(len(list)))
if debug: print (" Unique included hashes: " + str(len(unique)))
new = {}
updated = {}
upload = {}
uploaded = {}
cached = {}
not_found = {}
wf_hashes = {}
uploaded_count = 0
#Read in WF results from local file cache
cached = Cache()
#If unique hash is not in cache add it to the new dictionary
for hash in unique:
if not hash in cached:
new[hash] = ''
#Check new hashes against WF
updated, upload = Check(new, 'no')
wf_upload = config.get('config','wf_upload')
if wf_upload != 'yes':
if debug: print ("\nFUNCTION wildfire.WildFire: Skipping file upload")
elif len(upload) > 0:
upload_list = Copy(list, upload, tanium_handler)
time.sleep(60)
uploaded_count = Upload(upload_list)
wf_wait_time = float(config.get('config','wf_wait_time'))
if debug: print ("\nFUNCTION wildfire.WildFire: Sleeping " + str(wf_wait_time) + " seconds.")
time.sleep(wf_wait_time)
uploaded, not_found = Check(upload, 'yes')
#Combine updated & uploaded Dictionaries then update the local file cache
updated.update(uploaded)
Update_Cache(updated)
#Combine cached and updated Dictionaries into wf_hashes and compute stats
wf_hashes.update(cached)
wf_hashes.update(updated)
wf_stats = {'wf_cache':len(unique)-len(new), 'wf_new':len(new), 'wf_uploaded':uploaded_count}
#Download malware reports
Download_Reports(wf_hashes)
return(wf_hashes, wf_stats)
#Read in WF results from local file cache
def Cache():
if debug: print ("\nFUNCTION wildfire.Cache")
file = open('wf_cache.txt')
hashes = {}
for line in file:
hash = line.rstrip()
list = hash.split(',')#Hash, Malware Status
hashes[list[0]] = [list[1], 'no', 'no']
file.close()
if debug: print (" Total hashes in cache: " + str(len(hashes)))
return(hashes)
#Update local cache file with new WF results
def Update_Cache(updated):
if debug: print ("\nFUNCTION wildfire.UpdateCache")
if debug: print (" Hashes to add to cache: " + str(len(updated)))
if len(updated)>0:
file = open('wf_cache.txt', 'a')
for hash in updated:
malware = updated[hash][0]
if (malware == 'yes' or malware == 'no' or malware == 'grayware'):
line = hash + ',' + malware + '\n'
file.write(line)
file.close()
#Check new hashes against WF
def Check(new, wf_upload):
if debug: print ("\nFUNCTION wildfire.Check")
if debug: print (" Hashes to check: " + str(len(new)))
updated = {}
upload = {}
for hash in new:
#Sample File: https://wildfire.paloaltonetworks.com/publicapi/test/pe
#malware no: 3ee766cf1827c5afa1ac3cccdd29d629
#malware yes: 2c4bb9f9cf82f797eba0e2cf26fc5275
#grayware: 455d55000d14b5cdd9e7e6773887a24b
#hash not found: 65ea57712340c09b1b0c427b4848ae05
try:
time.sleep(1)
malware = ''
apikey = config.get('config','wf_apikey')
url = config.get('config','wf_url')
values = {'hash' : hash,
'format' : 'xml',
'apikey' : apikey }
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
results = response.read()
root = ET.fromstring(results)
#Return malware status from XML
malware = root[1][0].text
updated[hash] = [malware, 'yes', wf_upload]
except (urllib2.HTTPError) as malware:
upload[hash] = 'not found'
if advancedDebug: print (' ' + hash + ', ' + str(malware))
return(updated, upload)
#Copy files from source systems to central share. Share needs to be writable by Authenticated Computers.
def Copy(list, upload, tanium_handler):
if debug: print("\nFUNCTION wildfire.Copy")
if debug: print(" Files to copy: " + str(len(upload)))
upload_list = []
unique = {}
for i in list:
hash = i[4]
if hash in upload:
if not hash in unique:
unique[hash] = ''
upload_list.append(i)
length = len(upload_list)
x = 0
threads = []
while x < length:
try:
file = upload_list[x]
endpoint = file[0]
path = file[2] + "\\" + file[1]
#Check if list will be out of bounds
if x+1 < length:
next_endpoint = upload_list[x+1][0]
#If the next entry is for the same Endpoint append the file path so only one copy file package action is run per endpoint.
while endpoint == next_endpoint and x+1 < length:
x += 1
file = upload_list[x]
add_path = file[2] + "\\" + file[1]
path += '\,' + add_path
if x+1 < length:
next_endpoint = upload_list[x+1][0]
#Use threading to call copy file package so they can be run in paralell due to the Tanium targeting question taking 2 minuets to complete. https://pymotw.com/2/threading/
t = threading.Thread(target=Tanium_Copy, args=(tanium_handler,endpoint,path))
t.setDaemon(True)
threads.append(t)
time.sleep(5)
t.start()
x+=1
except:
print ("wildfire.Copy function FAILED")
return(upload_list)
#Execute Tanium's Copy File package
def Tanium_Copy(handler,endpoint,path):
if debug: print ("\nFUNCTION Tanium_Copy")
try:
if debug: print (' ' + endpoint + ': ' + path)
share_name = config.get('config','share_name')
kwargs = {}
kwargs["run"] = True
kwargs["action_filters"] = u'Computer Name, that contains:' + endpoint
kwargs["package"] = u'Copy Tools - Copy Files to Central Location{$1=SMB,$2=' + share_name + ',$3=0,$4=0,$5=' + path + ',$6=No,$7=0,$8=files}'
#This will take 2 minutes for tanium to complete the question
handler.deploy_action(**kwargs)
#response = handler.deploy_action(**kwargs)
if debug: print ("\nFUNCTION copyFileTanium END " + endpoint)
except:
print ("wildfire.Tanium_Copy function FAILED")
#Upload files for analysis to WildFire
def Upload(upload_list):
if debug: print ("\nFUNCTION wildfire.upload")
if debug: print (" Files to upload: " + str(len(upload_list)))
uploaded_count = 0
url = config.get('config','wf_submit')
now = datetime.datetime.now()
apikey = config.get('config','wf_apikey')
max_size = int(config.get('config','wf_size'))
local_share_path = config.get('config','local_share_path')
for file in upload_list:
try:
path = file[2] + "\\" + file[1]
computer = file[0]
name = computer.split('.', 1)[0]
folder = str(now.year) + '-' + '{:02d}'.format(now.month) + '-' + '{:02d}'.format(now.day) + '-' + name
path = local_share_path + "\\" + folder + path[2:]
path = path.replace("\\\\","\\")
#Verify the file exists and is less than the max size before uploading
exists = os.path.isfile(path)
size = os.path.getsize(path) < max_size
if(exists and size):
if advancedDebug: print "Uploading " + computer + ": " + path + " - " + file[2]
files = {'file': open(path, 'rb')}
time.sleep(3)
r = requests.post(url, files=files, data={'apikey':apikey})
#Count hashes of files uploaded to WildFire
uploaded_count += 1
if debug:
print (path)
print (file[2]) #Hash
print (r)
except:
print ("wildfire.Upload function FAILED for " + computer + ": " + path)
return(uploaded_count)
#Download WildFire PDF reports for all malware hashes
def Download_Reports(wf_hashes):
if debug: print ("\nFUNCTION wildfire.Download_Reports")
apikey = config.get('config','wf_apikey')
url = config.get('config','wf_url')
report_count = 0
for hash in wf_hashes:
try:
md5 = hash
wf_malware = wf_hashes[md5][0]
filename = md5 + '.pdf'
exists = os.path.isfile('reports\\' + filename)
if wf_malware == 'yes' and not exists:
values = {'hash' : md5,
'format' : 'pdf',
'apikey' : apikey }
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
CHUNK = 16 * 1024
with open('reports\\' + filename, 'wb') as f:
while True:
chunk = response.read(CHUNK)
if not chunk:
break
f.write(chunk)
report_count += 1
except:
print (" Download_Reports failed for: " + md5)
if debug: print (" Malware reports downloaded: " + str(report_count)) | wildfire.py | import ConfigParser, urllib, urllib2, time, datetime, threading, os, requests
import xml.etree.ElementTree as ET
config = ConfigParser.ConfigParser()
config.read('config.cfg')
#Set debug level
if config.get('config','debug') == 'yes': debug = True
else: debug = False
if config.get('config','advanced_debug') == 'yes': advancedDebug = True
else: advancedDebug = False
#Processes Dictionary of unique hashes returning a Dictionary of unique hashes with their WildFire results. The "list" input is only used to know where to copy a new file from.
def WildFire(list, unique, tanium_handler):
if debug: print ("\n\nMODULE WILDFIRE")
if debug: print ("FUNCTION wildfire.WildFire")
if debug: print (" Incuded hashes: " + str(len(list)))
if debug: print (" Unique included hashes: " + str(len(unique)))
new = {}
updated = {}
upload = {}
uploaded = {}
cached = {}
not_found = {}
wf_hashes = {}
uploaded_count = 0
#Read in WF results from local file cache
cached = Cache()
#If unique hash is not in cache add it to the new dictionary
for hash in unique:
if not hash in cached:
new[hash] = ''
#Check new hashes against WF
updated, upload = Check(new, 'no')
wf_upload = config.get('config','wf_upload')
if wf_upload != 'yes':
if debug: print ("\nFUNCTION wildfire.WildFire: Skipping file upload")
elif len(upload) > 0:
upload_list = Copy(list, upload, tanium_handler)
time.sleep(60)
uploaded_count = Upload(upload_list)
wf_wait_time = float(config.get('config','wf_wait_time'))
if debug: print ("\nFUNCTION wildfire.WildFire: Sleeping " + str(wf_wait_time) + " seconds.")
time.sleep(wf_wait_time)
uploaded, not_found = Check(upload, 'yes')
#Combine updated & uploaded Dictionaries then update the local file cache
updated.update(uploaded)
Update_Cache(updated)
#Combine cached and updated Dictionaries into wf_hashes and compute stats
wf_hashes.update(cached)
wf_hashes.update(updated)
wf_stats = {'wf_cache':len(unique)-len(new), 'wf_new':len(new), 'wf_uploaded':uploaded_count}
#Download malware reports
Download_Reports(wf_hashes)
return(wf_hashes, wf_stats)
#Read in WF results from local file cache
def Cache():
if debug: print ("\nFUNCTION wildfire.Cache")
file = open('wf_cache.txt')
hashes = {}
for line in file:
hash = line.rstrip()
list = hash.split(',')#Hash, Malware Status
hashes[list[0]] = [list[1], 'no', 'no']
file.close()
if debug: print (" Total hashes in cache: " + str(len(hashes)))
return(hashes)
#Update local cache file with new WF results
def Update_Cache(updated):
if debug: print ("\nFUNCTION wildfire.UpdateCache")
if debug: print (" Hashes to add to cache: " + str(len(updated)))
if len(updated)>0:
file = open('wf_cache.txt', 'a')
for hash in updated:
malware = updated[hash][0]
if (malware == 'yes' or malware == 'no' or malware == 'grayware'):
line = hash + ',' + malware + '\n'
file.write(line)
file.close()
#Check new hashes against WF
def Check(new, wf_upload):
if debug: print ("\nFUNCTION wildfire.Check")
if debug: print (" Hashes to check: " + str(len(new)))
updated = {}
upload = {}
for hash in new:
#Sample File: https://wildfire.paloaltonetworks.com/publicapi/test/pe
#malware no: 3ee766cf1827c5afa1ac3cccdd29d629
#malware yes: 2c4bb9f9cf82f797eba0e2cf26fc5275
#grayware: 455d55000d14b5cdd9e7e6773887a24b
#hash not found: 65ea57712340c09b1b0c427b4848ae05
try:
time.sleep(1)
malware = ''
apikey = config.get('config','wf_apikey')
url = config.get('config','wf_url')
values = {'hash' : hash,
'format' : 'xml',
'apikey' : apikey }
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
results = response.read()
root = ET.fromstring(results)
#Return malware status from XML
malware = root[1][0].text
updated[hash] = [malware, 'yes', wf_upload]
except (urllib2.HTTPError) as malware:
upload[hash] = 'not found'
if advancedDebug: print (' ' + hash + ', ' + str(malware))
return(updated, upload)
#Copy files from source systems to central share. Share needs to be writable by Authenticated Computers.
def Copy(list, upload, tanium_handler):
if debug: print("\nFUNCTION wildfire.Copy")
if debug: print(" Files to copy: " + str(len(upload)))
upload_list = []
unique = {}
for i in list:
hash = i[4]
if hash in upload:
if not hash in unique:
unique[hash] = ''
upload_list.append(i)
length = len(upload_list)
x = 0
threads = []
while x < length:
try:
file = upload_list[x]
endpoint = file[0]
path = file[2] + "\\" + file[1]
#Check if list will be out of bounds
if x+1 < length:
next_endpoint = upload_list[x+1][0]
#If the next entry is for the same Endpoint append the file path so only one copy file package action is run per endpoint.
while endpoint == next_endpoint and x+1 < length:
x += 1
file = upload_list[x]
add_path = file[2] + "\\" + file[1]
path += '\,' + add_path
if x+1 < length:
next_endpoint = upload_list[x+1][0]
#Use threading to call copy file package so they can be run in paralell due to the Tanium targeting question taking 2 minuets to complete. https://pymotw.com/2/threading/
t = threading.Thread(target=Tanium_Copy, args=(tanium_handler,endpoint,path))
t.setDaemon(True)
threads.append(t)
time.sleep(5)
t.start()
x+=1
except:
print ("wildfire.Copy function FAILED")
return(upload_list)
#Execute Tanium's Copy File package
def Tanium_Copy(handler,endpoint,path):
if debug: print ("\nFUNCTION Tanium_Copy")
try:
if debug: print (' ' + endpoint + ': ' + path)
share_name = config.get('config','share_name')
kwargs = {}
kwargs["run"] = True
kwargs["action_filters"] = u'Computer Name, that contains:' + endpoint
kwargs["package"] = u'Copy Tools - Copy Files to Central Location{$1=SMB,$2=' + share_name + ',$3=0,$4=0,$5=' + path + ',$6=No,$7=0,$8=files}'
#This will take 2 minutes for tanium to complete the question
handler.deploy_action(**kwargs)
#response = handler.deploy_action(**kwargs)
if debug: print ("\nFUNCTION copyFileTanium END " + endpoint)
except:
print ("wildfire.Tanium_Copy function FAILED")
#Upload files for analysis to WildFire
def Upload(upload_list):
if debug: print ("\nFUNCTION wildfire.upload")
if debug: print (" Files to upload: " + str(len(upload_list)))
uploaded_count = 0
url = config.get('config','wf_submit')
now = datetime.datetime.now()
apikey = config.get('config','wf_apikey')
max_size = int(config.get('config','wf_size'))
local_share_path = config.get('config','local_share_path')
for file in upload_list:
try:
path = file[2] + "\\" + file[1]
computer = file[0]
name = computer.split('.', 1)[0]
folder = str(now.year) + '-' + '{:02d}'.format(now.month) + '-' + '{:02d}'.format(now.day) + '-' + name
path = local_share_path + "\\" + folder + path[2:]
path = path.replace("\\\\","\\")
#Verify the file exists and is less than the max size before uploading
exists = os.path.isfile(path)
size = os.path.getsize(path) < max_size
if(exists and size):
if advancedDebug: print "Uploading " + computer + ": " + path + " - " + file[2]
files = {'file': open(path, 'rb')}
time.sleep(3)
r = requests.post(url, files=files, data={'apikey':apikey})
#Count hashes of files uploaded to WildFire
uploaded_count += 1
if debug:
print (path)
print (file[2]) #Hash
print (r)
except:
print ("wildfire.Upload function FAILED for " + computer + ": " + path)
return(uploaded_count)
#Download WildFire PDF reports for all malware hashes
def Download_Reports(wf_hashes):
if debug: print ("\nFUNCTION wildfire.Download_Reports")
apikey = config.get('config','wf_apikey')
url = config.get('config','wf_url')
report_count = 0
for hash in wf_hashes:
try:
md5 = hash
wf_malware = wf_hashes[md5][0]
filename = md5 + '.pdf'
exists = os.path.isfile('reports\\' + filename)
if wf_malware == 'yes' and not exists:
values = {'hash' : md5,
'format' : 'pdf',
'apikey' : apikey }
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
CHUNK = 16 * 1024
with open('reports\\' + filename, 'wb') as f:
while True:
chunk = response.read(CHUNK)
if not chunk:
break
f.write(chunk)
report_count += 1
except:
print (" Download_Reports failed for: " + md5)
if debug: print (" Malware reports downloaded: " + str(report_count)) | 0.156846 | 0.081556 |
import logging
from util.aws import TrustedAdvisor, EC2Wrapper
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class LowUseReportParser:
"""Parses the Low Use report
Attributes:
session (obj): Boto3 AWS Session Object
advisor (obj): Wrapper for AWS TrustedAdvisor
ec2 (obj): Wrapper for AWS EC2
"""
def __init__(self, session):
self.session = session
self.advisor = TrustedAdvisor()
self.ec2 = EC2Wrapper(session)
def parse_low_use_report(self):
"""Parses the report
Returns:
list of dict: List of Low use instances with associated metadata
"""
report = self.advisor.get_low_use_instances()
list_of_instances = []
for instance in report:
instance_metadata = self.parse_metadata(instance['metadata'])
if not instance_metadata:
continue
list_of_instances.append(instance_metadata)
return list_of_instances
def parse_metadata(self, metadata):
"""Parses instance metadata
This function mainly formats the metadata to use key/value pairs instead of indexing for
a better understanding of what the metadata is further downstream
Args:
metadata (:obj:`list`): Metadata from Low Use report
Returns:
dict: Metadata of instance
"""
creator = self.ec2.get_creator_for_instance(metadata[1])
if creator is None:
return {}
usage_logs = metadata[5:19]
instance_usage = self.parse_instance_usage(usage_logs)
instance_metadata = {
'creator': creator,
'region': metadata[0],
'instance_id': metadata[1],
'instance_name': metadata[2],
'instance_type': metadata[3],
'cost': metadata[4],
'cpu_usage': instance_usage[0],
'network_usage': instance_usage[1],
'cpu_average': metadata[-3],
'network_average': metadata[-2],
'days_logged': self.get_number_of_days(metadata[-1])
}
return instance_metadata
def parse_instance_usage(self, usage_logs):
"""Parsess instance usage
Args:
usage_logs (:obj:`list` of :obj:`str`): List of unformatted usage over at most 2 weeks
Returns:
list: lists of CPU usage and network IO over the given time frame.
"""
cpu_usage_over_time = []
network_io_over_time = []
for day in usage_logs:
if day is None: continue
usage = day.split(' ')
cpu_usage_over_time.append(usage[0])
network_io_over_time.append(usage[2])
return cpu_usage_over_time, network_io_over_time
def get_number_of_days(self, day_string):
"""Get number of days in report
Used to determine how many days the instance has data for
Args:
day_string (str): String that has the amount of days
Returns:
int: number of days with data
"""
return int(day_string.split(' ')[0]) | low_use/report_parser.py | import logging
from util.aws import TrustedAdvisor, EC2Wrapper
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class LowUseReportParser:
"""Parses the Low Use report
Attributes:
session (obj): Boto3 AWS Session Object
advisor (obj): Wrapper for AWS TrustedAdvisor
ec2 (obj): Wrapper for AWS EC2
"""
def __init__(self, session):
self.session = session
self.advisor = TrustedAdvisor()
self.ec2 = EC2Wrapper(session)
def parse_low_use_report(self):
"""Parses the report
Returns:
list of dict: List of Low use instances with associated metadata
"""
report = self.advisor.get_low_use_instances()
list_of_instances = []
for instance in report:
instance_metadata = self.parse_metadata(instance['metadata'])
if not instance_metadata:
continue
list_of_instances.append(instance_metadata)
return list_of_instances
def parse_metadata(self, metadata):
"""Parses instance metadata
This function mainly formats the metadata to use key/value pairs instead of indexing for
a better understanding of what the metadata is further downstream
Args:
metadata (:obj:`list`): Metadata from Low Use report
Returns:
dict: Metadata of instance
"""
creator = self.ec2.get_creator_for_instance(metadata[1])
if creator is None:
return {}
usage_logs = metadata[5:19]
instance_usage = self.parse_instance_usage(usage_logs)
instance_metadata = {
'creator': creator,
'region': metadata[0],
'instance_id': metadata[1],
'instance_name': metadata[2],
'instance_type': metadata[3],
'cost': metadata[4],
'cpu_usage': instance_usage[0],
'network_usage': instance_usage[1],
'cpu_average': metadata[-3],
'network_average': metadata[-2],
'days_logged': self.get_number_of_days(metadata[-1])
}
return instance_metadata
def parse_instance_usage(self, usage_logs):
"""Parsess instance usage
Args:
usage_logs (:obj:`list` of :obj:`str`): List of unformatted usage over at most 2 weeks
Returns:
list: lists of CPU usage and network IO over the given time frame.
"""
cpu_usage_over_time = []
network_io_over_time = []
for day in usage_logs:
if day is None: continue
usage = day.split(' ')
cpu_usage_over_time.append(usage[0])
network_io_over_time.append(usage[2])
return cpu_usage_over_time, network_io_over_time
def get_number_of_days(self, day_string):
"""Get number of days in report
Used to determine how many days the instance has data for
Args:
day_string (str): String that has the amount of days
Returns:
int: number of days with data
"""
return int(day_string.split(' ')[0]) | 0.820505 | 0.250008 |
from __future__ import print_function, absolute_import, division
import logging
import time
from functools import wraps
def get_item_from_module(module_name, item_name):
"""Load classes/modules/functions/... from given config"""
try:
module = __import__(module_name, fromlist=[item_name])
item = getattr(module, item_name)
except ImportError as error:
message = 'Module "{modulename}" could not be loaded: {e}'
raise Exception(message.format(
modulename=module_name, e=error))
except AttributeError as error:
message = 'No item "{itemname}" in module "{modulename}": {e}'
raise Exception(message.format(
modulename=module_name,
itemname=item_name,
e=error))
return item
def dict_is_subset(small_dict, big_dict):
"""Return True if small_dict is a subset of big_dict, else False
For example, consider the dicts
small = {'a':42}
big = {'a':42, 'b': 43}
small is a subset of big because every key in small also appears in the
big and has the same value. The dict
small2 = {'a': 43}
is not a subset of big, because the values for 'a' differ.
Recursive comparison is supported for dicts of dicts.
One use case for this function is to filter USofA account data for accounts
that match certain criteria. See unit tests for examples.
"""
if not isinstance(big_dict, dict):
# This may happen when the function recursively calls itself. Other
# container types also support the 'in' operator, e.g.
# a = [42]
# 42 in a
# but
# a[42]
# will cause an exception.
return False
for key, value in small_dict.items():
if key not in big_dict:
return False
if isinstance(value, dict):
if not dict_is_subset(value, big_dict[key]):
return False
else:
if value != big_dict[key]:
return False
return True
def levelname_to_integer(level_name):
"""Translate human-readable log level name to an integer"""
level_name = level_name.lower()
level_translation = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR,
'critical': logging.CRITICAL}
if level_name not in level_translation:
valid_levels = ", ".join(level_translation.keys())
raise Exception('Log level "{0}" is invalid, use one of {1}.'.format(
level_name, valid_levels))
return level_translation[level_name]
def retry(*args, **kwargs):
"""Retry the function call until it succeeds"""
attempts = kwargs.pop('attempts', 3)
delay = kwargs.pop('delay', 0)
if kwargs:
raise Exception("Unknown kwargs given: " + ", ".join(kwargs.keys()))
def retry_decorator(old_function):
"""Retry the function call until it succeeds"""
@wraps(old_function)
def call_with_retry(*args, **kwargs):
for attempt in range(attempts):
try:
return old_function(*args, **kwargs)
except Exception:
if attempt == attempts - 1:
raise
time.sleep(delay)
return call_with_retry
if args:
if len(args) > 1:
raise Exception("You cannot pass more than 1 argument to retry()")
return retry_decorator(args[0])
return retry_decorator | src/main/python/pils/pils.py | from __future__ import print_function, absolute_import, division
import logging
import time
from functools import wraps
def get_item_from_module(module_name, item_name):
"""Load classes/modules/functions/... from given config"""
try:
module = __import__(module_name, fromlist=[item_name])
item = getattr(module, item_name)
except ImportError as error:
message = 'Module "{modulename}" could not be loaded: {e}'
raise Exception(message.format(
modulename=module_name, e=error))
except AttributeError as error:
message = 'No item "{itemname}" in module "{modulename}": {e}'
raise Exception(message.format(
modulename=module_name,
itemname=item_name,
e=error))
return item
def dict_is_subset(small_dict, big_dict):
"""Return True if small_dict is a subset of big_dict, else False
For example, consider the dicts
small = {'a':42}
big = {'a':42, 'b': 43}
small is a subset of big because every key in small also appears in the
big and has the same value. The dict
small2 = {'a': 43}
is not a subset of big, because the values for 'a' differ.
Recursive comparison is supported for dicts of dicts.
One use case for this function is to filter USofA account data for accounts
that match certain criteria. See unit tests for examples.
"""
if not isinstance(big_dict, dict):
# This may happen when the function recursively calls itself. Other
# container types also support the 'in' operator, e.g.
# a = [42]
# 42 in a
# but
# a[42]
# will cause an exception.
return False
for key, value in small_dict.items():
if key not in big_dict:
return False
if isinstance(value, dict):
if not dict_is_subset(value, big_dict[key]):
return False
else:
if value != big_dict[key]:
return False
return True
def levelname_to_integer(level_name):
"""Translate human-readable log level name to an integer"""
level_name = level_name.lower()
level_translation = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR,
'critical': logging.CRITICAL}
if level_name not in level_translation:
valid_levels = ", ".join(level_translation.keys())
raise Exception('Log level "{0}" is invalid, use one of {1}.'.format(
level_name, valid_levels))
return level_translation[level_name]
def retry(*args, **kwargs):
"""Retry the function call until it succeeds"""
attempts = kwargs.pop('attempts', 3)
delay = kwargs.pop('delay', 0)
if kwargs:
raise Exception("Unknown kwargs given: " + ", ".join(kwargs.keys()))
def retry_decorator(old_function):
"""Retry the function call until it succeeds"""
@wraps(old_function)
def call_with_retry(*args, **kwargs):
for attempt in range(attempts):
try:
return old_function(*args, **kwargs)
except Exception:
if attempt == attempts - 1:
raise
time.sleep(delay)
return call_with_retry
if args:
if len(args) > 1:
raise Exception("You cannot pass more than 1 argument to retry()")
return retry_decorator(args[0])
return retry_decorator | 0.663887 | 0.179728 |
import itertools
def get_comp(graph, node):
"""Returns a set of nodes in this node's component"""
unexplored = set(node)
explored = set()
while unexplored:
node = unexplored.pop()
explored.add(node)
new_nbrs = graph.edges[node] - explored
unexplored.update(new_nbrs)
return explored
class Graph(object):
def __init__(self, nodes):
self.nodes = nodes
# {node -> set(node)}
self.edges = {n:set() for n in nodes}
def add_edge(self, n1, n2):
self.edges[n1].add(n2)
self.edges[n2].add(n1)
def connected_comps(self):
"""Returns a [set(node)]"""
comps = []
unvisited = set(self.nodes)
while unvisited:
# Start new component if necessary
node = unvisited.pop()
comp = get_comp(self, node)
unvisited.difference_update(comp)
comps.append(comp)
return comps
def iter_comps(self):
for c in self.connected_comps():
if len(c) == 1 and c[0].len == 1:
continue
yield list(c)
class Vector(object):
def __init__(self, col, top_row, color, length):
self.col = col
self.top_row = top_row
self.color = color
self.len = length
def cell(self):
return (self.top_row, self.col)
@property
def bottom_row(self):
return self.top_row + self.len - 1
def intersects(self, other):
if self.color != other.color:
return False
if abs(self.col - other.col) != 1:
return False
if self.top_row > other.bottom_row:
return False
if self.bottom_row < other.top_row:
return False
return True
def grid_to_vecs(grid):
# each column is a list of vectors starting from the top
columns = [[] for _ in xrange(num_cols)]
for row_idx, row in enumerate(grid):
for col_idx, color in enumerate(row):
if color == '-':
continue
col = columns[col_idx]
if not col:
col.append(Vector(col_idx, row, color, 0))
vec = col[-1]
if vec.color == color:
# extend this vector
vec.len += 1
else:
col.append(Vector(col_idx, row, color, 1))
return columns
def cols_to_graph(cols):
g = Graph([vec for col in cols for vec in col])
for i in xrange(len(cols) - 1):
c1 = cols[i]
c2 = cols[i + 1]
for v1 in c1:
for v2 in c2:
if v1.intersects(v2):
g.add_edge(v1, v2)
return g
class Board(object):
def __init__(self, grid=None, cols=None):
# each column is a list of vectors starting from the top
if grid:
self.cols = grid_to_vecs(grid)
elif cols:
self.cols = [col[:] for col in cols]
#self.graph = cols_to_graph(self.cols)
def without(self, vecs):
"""Returns a new board with vecs (a set of Vectors) removed."""
new_cols = [col[:] for col in self.cols]
vecs = sorted(vecs, lambda v: v.col)
for col_idx, vs in itertools.groupby(vecs, lambda v: v.col):
col = new_cols[col_idx]
for v in vs:
col.remove(v)
return Board(cols=new_cols)
def explore(board, steps=None):
"""Returns a (steps_to_eliminate_most_cells, cells_remaining).
steps is a list of (row, col) steps taken so far to obtain board."""
steps = steps or []
graph = cols_to_graph(board.cols)
for comp in graph.iter_comps():
new_board = board.without(comp)
explore(new_board, steps[:] + [comp[0].cell()])
def nextMove(grid):
board = Board(grid=grid)
print ""
num_rows = 0
num_cols = 0
if name == '__main__':
num_rows, num_cols, k = [ int(i) for i in raw_input().strip().split() ]
grid = [[i for i in str(raw_input().strip())] for _ in range(num_rows)]
nextMove(grid) | fun/click_o_mania.py | import itertools
def get_comp(graph, node):
"""Returns a set of nodes in this node's component"""
unexplored = set(node)
explored = set()
while unexplored:
node = unexplored.pop()
explored.add(node)
new_nbrs = graph.edges[node] - explored
unexplored.update(new_nbrs)
return explored
class Graph(object):
def __init__(self, nodes):
self.nodes = nodes
# {node -> set(node)}
self.edges = {n:set() for n in nodes}
def add_edge(self, n1, n2):
self.edges[n1].add(n2)
self.edges[n2].add(n1)
def connected_comps(self):
"""Returns a [set(node)]"""
comps = []
unvisited = set(self.nodes)
while unvisited:
# Start new component if necessary
node = unvisited.pop()
comp = get_comp(self, node)
unvisited.difference_update(comp)
comps.append(comp)
return comps
def iter_comps(self):
for c in self.connected_comps():
if len(c) == 1 and c[0].len == 1:
continue
yield list(c)
class Vector(object):
def __init__(self, col, top_row, color, length):
self.col = col
self.top_row = top_row
self.color = color
self.len = length
def cell(self):
return (self.top_row, self.col)
@property
def bottom_row(self):
return self.top_row + self.len - 1
def intersects(self, other):
if self.color != other.color:
return False
if abs(self.col - other.col) != 1:
return False
if self.top_row > other.bottom_row:
return False
if self.bottom_row < other.top_row:
return False
return True
def grid_to_vecs(grid):
# each column is a list of vectors starting from the top
columns = [[] for _ in xrange(num_cols)]
for row_idx, row in enumerate(grid):
for col_idx, color in enumerate(row):
if color == '-':
continue
col = columns[col_idx]
if not col:
col.append(Vector(col_idx, row, color, 0))
vec = col[-1]
if vec.color == color:
# extend this vector
vec.len += 1
else:
col.append(Vector(col_idx, row, color, 1))
return columns
def cols_to_graph(cols):
g = Graph([vec for col in cols for vec in col])
for i in xrange(len(cols) - 1):
c1 = cols[i]
c2 = cols[i + 1]
for v1 in c1:
for v2 in c2:
if v1.intersects(v2):
g.add_edge(v1, v2)
return g
class Board(object):
def __init__(self, grid=None, cols=None):
# each column is a list of vectors starting from the top
if grid:
self.cols = grid_to_vecs(grid)
elif cols:
self.cols = [col[:] for col in cols]
#self.graph = cols_to_graph(self.cols)
def without(self, vecs):
"""Returns a new board with vecs (a set of Vectors) removed."""
new_cols = [col[:] for col in self.cols]
vecs = sorted(vecs, lambda v: v.col)
for col_idx, vs in itertools.groupby(vecs, lambda v: v.col):
col = new_cols[col_idx]
for v in vs:
col.remove(v)
return Board(cols=new_cols)
def explore(board, steps=None):
"""Returns a (steps_to_eliminate_most_cells, cells_remaining).
steps is a list of (row, col) steps taken so far to obtain board."""
steps = steps or []
graph = cols_to_graph(board.cols)
for comp in graph.iter_comps():
new_board = board.without(comp)
explore(new_board, steps[:] + [comp[0].cell()])
def nextMove(grid):
board = Board(grid=grid)
print ""
num_rows = 0
num_cols = 0
if name == '__main__':
num_rows, num_cols, k = [ int(i) for i in raw_input().strip().split() ]
grid = [[i for i in str(raw_input().strip())] for _ in range(num_rows)]
nextMove(grid) | 0.730963 | 0.386474 |
from __future__ import unicode_literals
from django.db import models
from authentication.models import User, Usergroup
from compute.models import Computer
from image.models import Image, Imagerevision
from managementnode.models import Managementnode
from core.models import State
class Request(models.Model):
stateid = models.ForeignKey(State, db_column='stateid', related_name="rel_si")
userid = models.ForeignKey(User, db_column='userid')
laststateid = models.ForeignKey(State, db_column='laststateid', related_name="rel_laststateid" )
logid = models.IntegerField()
forimaging = models.IntegerField()
test = models.IntegerField()
preload = models.IntegerField()
start = models.DateTimeField()
end = models.DateTimeField()
daterequested = models.DateTimeField()
datemodified = models.DateTimeField(blank=True, null=True)
checkuser = models.IntegerField()
class Meta:
db_table = 'request'
class Serverrequest(models.Model):
name = models.CharField(max_length=255)
serverprofileid = models.SmallIntegerField()
requestid = models.OneToOneField(Request, db_column='requestid')
fixedip = models.CharField(db_column='fixedIP', max_length=15, blank=True, null=True) # Field name made lowercase.
fixedmac = models.CharField(db_column='fixedMAC', max_length=17, blank=True, null=True) # Field name made lowercase.
admingroupid = models.ForeignKey(Usergroup, db_column='admingroupid', blank=True, null=True, related_name="rel_agi")
logingroupid = models.ForeignKey(Usergroup, db_column='logingroupid', blank=True, null=True, related_name="rel_login")
monitored = models.IntegerField()
class Meta:
db_table = 'serverrequest'
# Create your models here.
class Reservation(models.Model):
requestid = models.ForeignKey(Request, db_column='requestid')
computerid = models.ForeignKey(Computer, db_column='computerid')
imageid = models.ForeignKey(Image, db_column='imageid')
imagerevisionid = models.ForeignKey(Imagerevision, db_column='imagerevisionid')
managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid')
remoteip = models.CharField(db_column='remoteIP', max_length=15, blank=True, null=True) # Field name made lowercase.
lastcheck = models.DateTimeField(blank=True, null=True)
pw = models.CharField(max_length=40, blank=True, null=True)
connectip = models.CharField(db_column='connectIP', max_length=15, blank=True, null=True) # Field name made lowercase.
connectport = models.SmallIntegerField(blank=True, null=True)
class Meta:
db_table = 'reservation'
class Reservationaccounts(models.Model):
reservationid = models.ForeignKey(Reservation, db_column='reservationid')
userid = models.ForeignKey(User, db_column='userid')
password = models.CharField(max_length=50, blank=True, null=True)
class Meta:
db_table = 'reservationaccounts'
unique_together = (('reservationid', 'userid'),)
class Blockrequest(models.Model):
name = models.CharField(max_length=80)
imageid = models.ForeignKey(Image, db_column='imageid')
nummachines = models.IntegerField(db_column='numMachines') # Field name made lowercase.
groupid = models.ForeignKey(Usergroup, db_column='groupid', blank=True, null=True)
repeating = models.CharField(max_length=7)
ownerid = models.ForeignKey(User, db_column='ownerid')
managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid', blank=True, null=True)
expiretime = models.DateTimeField(db_column='expireTime') # Field name made lowercase.
processing = models.IntegerField()
status = models.CharField(max_length=9)
comments = models.TextField(blank=True, null=True)
class Meta:
db_table = 'blockRequest'
class Blocktimes(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
start = models.DateTimeField()
end = models.DateTimeField()
processed = models.IntegerField()
skip = models.IntegerField()
class Meta:
db_table = 'blockTimes'
class Blockcomputers(models.Model):
blocktimeid = models.ForeignKey(Blocktimes, db_column='blockTimeid') # Field name made lowercase.
computerid = models.ForeignKey(Computer, db_column='computerid')
imageid = models.ForeignKey(Image, db_column='imageid')
reloadrequestid = models.IntegerField()
class Meta:
db_table = 'blockComputers'
unique_together = (('blocktimeid', 'computerid'),)
class Blockwebdate(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
start = models.DateField()
end = models.DateField()
days = models.IntegerField(blank=True, null=True)
weeknum = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 'blockWebDate'
class Blockwebtime(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
starthour = models.IntegerField()
startminute = models.IntegerField()
startmeridian = models.CharField(max_length=2)
endhour = models.IntegerField()
endminute = models.IntegerField()
endmeridian = models.CharField(max_length=2)
order = models.IntegerField()
class Meta:
db_table = 'blockWebTime' | reservations/models.py | from __future__ import unicode_literals
from django.db import models
from authentication.models import User, Usergroup
from compute.models import Computer
from image.models import Image, Imagerevision
from managementnode.models import Managementnode
from core.models import State
class Request(models.Model):
stateid = models.ForeignKey(State, db_column='stateid', related_name="rel_si")
userid = models.ForeignKey(User, db_column='userid')
laststateid = models.ForeignKey(State, db_column='laststateid', related_name="rel_laststateid" )
logid = models.IntegerField()
forimaging = models.IntegerField()
test = models.IntegerField()
preload = models.IntegerField()
start = models.DateTimeField()
end = models.DateTimeField()
daterequested = models.DateTimeField()
datemodified = models.DateTimeField(blank=True, null=True)
checkuser = models.IntegerField()
class Meta:
db_table = 'request'
class Serverrequest(models.Model):
name = models.CharField(max_length=255)
serverprofileid = models.SmallIntegerField()
requestid = models.OneToOneField(Request, db_column='requestid')
fixedip = models.CharField(db_column='fixedIP', max_length=15, blank=True, null=True) # Field name made lowercase.
fixedmac = models.CharField(db_column='fixedMAC', max_length=17, blank=True, null=True) # Field name made lowercase.
admingroupid = models.ForeignKey(Usergroup, db_column='admingroupid', blank=True, null=True, related_name="rel_agi")
logingroupid = models.ForeignKey(Usergroup, db_column='logingroupid', blank=True, null=True, related_name="rel_login")
monitored = models.IntegerField()
class Meta:
db_table = 'serverrequest'
# Create your models here.
class Reservation(models.Model):
requestid = models.ForeignKey(Request, db_column='requestid')
computerid = models.ForeignKey(Computer, db_column='computerid')
imageid = models.ForeignKey(Image, db_column='imageid')
imagerevisionid = models.ForeignKey(Imagerevision, db_column='imagerevisionid')
managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid')
remoteip = models.CharField(db_column='remoteIP', max_length=15, blank=True, null=True) # Field name made lowercase.
lastcheck = models.DateTimeField(blank=True, null=True)
pw = models.CharField(max_length=40, blank=True, null=True)
connectip = models.CharField(db_column='connectIP', max_length=15, blank=True, null=True) # Field name made lowercase.
connectport = models.SmallIntegerField(blank=True, null=True)
class Meta:
db_table = 'reservation'
class Reservationaccounts(models.Model):
reservationid = models.ForeignKey(Reservation, db_column='reservationid')
userid = models.ForeignKey(User, db_column='userid')
password = models.CharField(max_length=50, blank=True, null=True)
class Meta:
db_table = 'reservationaccounts'
unique_together = (('reservationid', 'userid'),)
class Blockrequest(models.Model):
name = models.CharField(max_length=80)
imageid = models.ForeignKey(Image, db_column='imageid')
nummachines = models.IntegerField(db_column='numMachines') # Field name made lowercase.
groupid = models.ForeignKey(Usergroup, db_column='groupid', blank=True, null=True)
repeating = models.CharField(max_length=7)
ownerid = models.ForeignKey(User, db_column='ownerid')
managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid', blank=True, null=True)
expiretime = models.DateTimeField(db_column='expireTime') # Field name made lowercase.
processing = models.IntegerField()
status = models.CharField(max_length=9)
comments = models.TextField(blank=True, null=True)
class Meta:
db_table = 'blockRequest'
class Blocktimes(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
start = models.DateTimeField()
end = models.DateTimeField()
processed = models.IntegerField()
skip = models.IntegerField()
class Meta:
db_table = 'blockTimes'
class Blockcomputers(models.Model):
blocktimeid = models.ForeignKey(Blocktimes, db_column='blockTimeid') # Field name made lowercase.
computerid = models.ForeignKey(Computer, db_column='computerid')
imageid = models.ForeignKey(Image, db_column='imageid')
reloadrequestid = models.IntegerField()
class Meta:
db_table = 'blockComputers'
unique_together = (('blocktimeid', 'computerid'),)
class Blockwebdate(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
start = models.DateField()
end = models.DateField()
days = models.IntegerField(blank=True, null=True)
weeknum = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 'blockWebDate'
class Blockwebtime(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
starthour = models.IntegerField()
startminute = models.IntegerField()
startmeridian = models.CharField(max_length=2)
endhour = models.IntegerField()
endminute = models.IntegerField()
endmeridian = models.CharField(max_length=2)
order = models.IntegerField()
class Meta:
db_table = 'blockWebTime' | 0.559049 | 0.08882 |
import functools
import os
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.popup import Popup
def get_indent_str(indentation):
'''Return a string consisting only indentation number of spaces
'''
i = 0
s = ''
while i < indentation:
s += ' '
i += 1
return s
def get_line_end_pos(string, line):
'''Returns the end position of line in a string
'''
_line = 0
_line_pos = -1
_line_pos = string.find('\n', _line_pos + 1)
while _line < line:
_line_pos = string.find('\n', _line_pos + 1)
_line += 1
return _line_pos
def get_line_start_pos(string, line):
'''Returns starting position of line in a string
'''
_line = 0
_line_pos = -1
_line_pos = string.find('\n', _line_pos + 1)
while _line < line - 1:
_line_pos = string.find('\n', _line_pos + 1)
_line += 1
return _line_pos
def get_indent_level(string):
'''Returns the indentation of first line of string
'''
lines = string.splitlines()
lineno = 0
line = lines[lineno]
indent = 0
total_lines = len(lines)
while line < total_lines and indent == 0:
indent = len(line) - len(line.lstrip())
line = lines[lineno]
line += 1
return indent
def get_indentation(string):
'''Returns the number of indent spaces in a string
'''
count = 0
for s in string:
if s == ' ':
count += 1
else:
return count
return count
def get_kivy_designer_dir():
'''This function returns kivy-designer's config dir
'''
user_dir = os.path.join(App.get_running_app().user_data_dir,
'.kivy-designer')
if not os.path.exists(user_dir):
os.makedirs(user_dir)
return user_dir
def show_alert(title, msg, width=500, height=200):
lbl_message = Label(text=msg)
lbl_message.padding = [10, 10]
popup = Popup(title=title,
content=lbl_message,
size_hint=(None, None),
size=(width, height))
popup.open()
def show_message(*args, **kwargs):
'''Shortcut to display a message on status bar
'''
App.get_running_app().root.statusbar.show_message(*args, **kwargs)
def get_designer():
'''Return the Designer instance
'''
return App.get_running_app().root
def ignore_proj_watcher(f):
'''Function decorator to makes project watcher ignores file modification
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
watcher = App.get_running_app().root.project_watcher
watcher.stop()
f(*args, **kwargs)
return watcher.resume_watching()
return wrapper | Parciales/practicas/kivy-designer-master/designer/helper_functions.py | import functools
import os
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.popup import Popup
def get_indent_str(indentation):
'''Return a string consisting only indentation number of spaces
'''
i = 0
s = ''
while i < indentation:
s += ' '
i += 1
return s
def get_line_end_pos(string, line):
'''Returns the end position of line in a string
'''
_line = 0
_line_pos = -1
_line_pos = string.find('\n', _line_pos + 1)
while _line < line:
_line_pos = string.find('\n', _line_pos + 1)
_line += 1
return _line_pos
def get_line_start_pos(string, line):
'''Returns starting position of line in a string
'''
_line = 0
_line_pos = -1
_line_pos = string.find('\n', _line_pos + 1)
while _line < line - 1:
_line_pos = string.find('\n', _line_pos + 1)
_line += 1
return _line_pos
def get_indent_level(string):
'''Returns the indentation of first line of string
'''
lines = string.splitlines()
lineno = 0
line = lines[lineno]
indent = 0
total_lines = len(lines)
while line < total_lines and indent == 0:
indent = len(line) - len(line.lstrip())
line = lines[lineno]
line += 1
return indent
def get_indentation(string):
'''Returns the number of indent spaces in a string
'''
count = 0
for s in string:
if s == ' ':
count += 1
else:
return count
return count
def get_kivy_designer_dir():
'''This function returns kivy-designer's config dir
'''
user_dir = os.path.join(App.get_running_app().user_data_dir,
'.kivy-designer')
if not os.path.exists(user_dir):
os.makedirs(user_dir)
return user_dir
def show_alert(title, msg, width=500, height=200):
lbl_message = Label(text=msg)
lbl_message.padding = [10, 10]
popup = Popup(title=title,
content=lbl_message,
size_hint=(None, None),
size=(width, height))
popup.open()
def show_message(*args, **kwargs):
'''Shortcut to display a message on status bar
'''
App.get_running_app().root.statusbar.show_message(*args, **kwargs)
def get_designer():
'''Return the Designer instance
'''
return App.get_running_app().root
def ignore_proj_watcher(f):
'''Function decorator to makes project watcher ignores file modification
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
watcher = App.get_running_app().root.project_watcher
watcher.stop()
f(*args, **kwargs)
return watcher.resume_watching()
return wrapper | 0.424173 | 0.237949 |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import time
import os
import xlwt,xlrd
# 定义一些需要的数据
# wps_username=17334033685 # wps的账号
# wps_password='<PASSWORD>' # wps的密码
url = 'https://www.docer.com/login' # 运行的网址
FILE = "././" # 储存路径
def loginAndsearch(url,wps_username,wps_password):
# 1.创建、配置并启动chrome
# 创建一个chrome
options = webdriver.ChromeOptions()
# 配置浏览器
prefs = {
'profile.managed_default_content_settings.images': 2, # 不加载浏览器的图片
"credentials_enable_service": False, # 浏览器弹窗
"profile.password_manager_enabled": False, # 关闭浏览器弹窗
"download.default_directory": "e:\\WPSGET"
}
options.add_experimental_option('prefs', prefs) # 参数送入执行
# 设置为开发者模式,防止被各大网站识别出来使用了Selenium
options.add_experimental_option('excludeSwitches', ['enable-automation'])
# 启动这个chrome
browser = webdriver.Chrome(options=options)
wait = WebDriverWait(browser, 10) # 超时时长为10s
# 2.打开网页
browser.get(url)
# 转到iframe
browser.implicitly_wait(30)
elementi = browser.find_element_by_class_name('ifm')
browser.switch_to.frame(elementi)
# 选择账号密码输入
account_click='//span[contains(text(),"帐号密码")]'
browser.find_element_by_xpath(account_click).click()
# 点击同意协议
browser.find_element_by_xpath('//div[@class="dialog-footer-ok"]').click()
# 键入账号和密码
browser.find_element_by_xpath('//input[@id="email"]').send_keys(wps_username) # 键入账号
browser.find_element_by_xpath('//input[@id="password"]').send_keys(wps_password) # 键入密码
# 点击验证
browser.implicitly_wait(30)
browser.find_element_by_xpath('//div[@id="rectMask"]').click()
# 点击登录
time.sleep(5)
browser.find_element_by_xpath('//a[@id="login"]').click()
browser.switch_to.default_content()
# 点击回到首页
time.sleep(2)
browser.implicitly_wait(30)
browser.find_element_by_xpath('//a[@class="nav_li_a "]').click()
return browser
# 创建新的文件夹
def creatFile(element, FILE=FILE):
path = FILE
title = element
new_path = os.path.join(path, title)
if not os.path.isdir(new_path):
os.makedirs(new_path)
return new_path
def creatExcel(model_label):
workbook = xlwt.Workbook(encoding='utf-8') # 新建工作簿
sheet1 = workbook.add_sheet(model_label) # 新建sheet
return workbook,sheet1
# PPT页面操作
def PPTDownload(browser,model_label):
print("ppt")
# 创建关键词构成的文件夹
creatFile(model_label, FILE=FILE+'ppt/')
# 创建关键词构成的excel
workbook,sheet1=creatExcel(model_label)
# 打开ppt模板页
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[2]/ul/li[1]/ul/li[3]/a').click()
time.sleep(3) # 等待页面加载完成
page = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/span').text
print('总页数:', page[1:])
count=0
for p in range(int(page[1:])):
num=browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]').get_attribute('len')
print('page'+str(p+1)+':',num,'个')
time.sleep(2)
for i in range(1, int(num) + 1):
browser.implicitly_wait(30)
href = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a').get_attribute('href')
browser.implicitly_wait(30)
title = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a/div[2]').get_attribute('title')
# 写入数据
sheet1.write(count, 0, title) # 第1行第1列数据
sheet1.write(count, 1, href) # 第1行第2列数据
count=count+1
print(title, ' ', href)
workbook.save(r'././' + 'ppt' + '/' + model_label + '/' + model_label + '.xls')
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/a[4]').click()
time.sleep(2)
return browser
# word页面操作
def WORDDownload(browser,model_label):
print('word')
# 创建关键词构成的文件夹
creatFile(model_label, FILE=FILE + 'word/')
# 创建关键词构成的excel
workbook, sheet1 = creatExcel(model_label)
# 打开ppt模板页
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[2]/ul/li[1]/ul/li[2]/a').click()
time.sleep(3) # 等待页面加载完成
page = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/span').text
print('总页数:', page[1:])
count = 0
for p in range(int(page[1:])):
num = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]').get_attribute('len')
print('page' + str(p + 1) + ':', num, '个')
time.sleep(2)
for i in range(1, int(num) + 1):
browser.implicitly_wait(30)
href = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a').get_attribute('href')
browser.implicitly_wait(30)
title = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a/div[2]').get_attribute('title')
# 写入数据
sheet1.write(count, 0, title) # 第1行第1列数据
sheet1.write(count, 1, href) # 第1行第2列数据
count = count + 1
print(title, ' ', href)
workbook.save(r'././' + 'word' + '/' + model_label + '/' + model_label + '.xls')
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/a[4]').click()
time.sleep(2)
return browser
# excel页面操作
def EXCELDownload(browser,model_label):
print('excel')
# 创建关键词构成的文件夹
creatFile(model_label, FILE=FILE + 'excel/')
# 创建关键词构成的excel
workbook, sheet1 = creatExcel(model_label)
# 打开ppt模板页
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[2]/ul/li[1]/ul/li[4]/a').click()
time.sleep(3) # 等待页面加载完成
page = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/span').text
print('总页数:', page[1:])
count = 0
for p in range(int(page[1:])):
num = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]').get_attribute('len')
print('page' + str(p + 1) + ':', num, '个')
time.sleep(2)
for i in range(1, int(num) + 1):
browser.implicitly_wait(30)
href = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a').get_attribute('href')
browser.implicitly_wait(30)
title = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a/div[2]').get_attribute('title')
# 写入数据
sheet1.write(count, 0, title) # 第1行第1列数据
sheet1.write(count, 1, href) # 第1行第2列数据
count = count + 1
print(title, ' ', href)
workbook.save(r'././' + 'excel' + '/' + model_label + '/' + model_label + '.xls')
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/a[4]').click()
time.sleep(2)
return browser
# 文件名和路径转换
def oldTonew_PPT(name,flage_num):
file_path = "e:/WPSGET/"
file = os.listdir(file_path)
# 更改名字和路径
time.sleep(0.5)
for f in range(len(file)):
type = os.path.splitext(file[f])[1]
if (type == '.pptx' or type == '.ppt' or type=='.dpt'):
print(file[f])
# 获取旧文件名
oldname = file_path + file[f] # os.sep添加系统分隔符
# 设置新文件名
newname = 'e:/WPSGET/ppt/'+model_label+'/'+str(flage_num)+ name
os.rename(oldname, newname) # 用os模块中的rename方法对文件改名
print('下載完成')
def oldTonew_WORD(name,flage_num):
file_path = "e:/WPSGET/"
file = os.listdir(file_path)
# 更改名字和路径
time.sleep(0.5)
for f in range(len(file)):
type = os.path.splitext(file[f])[1]
if (type == '.docx' or type == '.doc' or type=='.wpt' or type=='wps'):
print(file[f])
# 获取旧文件名
oldname = file_path + file[f] # os.sep添加系统分隔符
# 设置新文件名
newname = 'e:/WPSGET/word/'+model_label+'/'+ str(flage_num)+name
os.rename(oldname, newname) # 用os模块中的rename方法对文件改名
print('下載完成')
def oldTonew_EXCEL(name,flage_num):
file_path = "e:/WPSGET/"
file = os.listdir(file_path)
# 更改名字和路径
time.sleep(0.5)
for f in range(len(file)):
type = os.path.splitext(file[f])[1]
if (type == '.xlsx' or type == '.xls' or type=='.csv' or type=='ett'):
print(file[f])
# 获取旧文件名
oldname = file_path + file[f] # os.sep添加系统分隔符
# 设置新文件名
newname = 'e:/WPSGET/excel/'+model_label+'/'+ str(flage_num)+name
os.rename(oldname, newname) # 用os模块中的rename方法对文件改名
print('下載完成')
# 下载
def Download(browser,type,model_label):
excle_path = './'+type+'/'+model_label+'/'+model_label+'.xls' # excel路径
data = xlrd.open_workbook(excle_path) # 打开excel读取文件
sheet = data.sheet_by_index(0) # 根据sheet下标选择读取内容
nrows = sheet.nrows # 获取到表的总行数
print(nrows)
for j in range(int(nrows)):
name=sheet.row_values(j)[0]
ll = sheet.row_values(j)[1]
browser.get(ll)
time.sleep(1)
browser.implicitly_wait(40)
# browser.find_element_by_xpath('//*[@id="dlBtn"]').click()
ele = browser.find_element_by_xpath('//*[@id="dlBtn"]')
browser.execute_script("arguments[0].click();", ele)
if(type=='ppt'):
time.sleep(15)
oldTonew_PPT(name,j)
elif(type=='word'):
time.sleep(10)
oldTonew_WORD(name,j)
elif(type=='excel'):
time.sleep(10)
oldTonew_EXCEL(name, j)
return 1
###################################################################
if __name__ == "__main__":
# 提示输入账号和密码
wps_username = input("请输入账号:")
wps_password = input("请输入密码:")
# 登录wps
browser=loginAndsearch(url,wps_username,wps_password)
# 输入关键词
model_label = input("请输入关键词:")
browser.find_element_by_xpath('//div[@class="m-search-box header-banner__search"]/input').send_keys(
model_label + '\n')
time.sleep(1)
# 切换页面后必须切换句柄,不然找不到元素
num =browser.window_handles # 获取当前页句柄
browser.switch_to.window(num[1]) # 在句柄2 上执行下述步骤
# 分类分页下载
model_type = input("请输入需要下载的模板类型:")
if(model_type=='ppt' or model_type=='PPT'):
browser=PPTDownload(browser,model_label)
flage=Download(browser,'ppt',model_label)
elif(model_type=='word' or model_type=='WORD'):
browser=WORDDownload(browser,model_label)
flage = Download(browser,'word',model_label)
elif(model_type=='excel' or model_type=='EXCEL'):
browser=EXCELDownload(browser,model_label)
flage = Download(browser, 'excel', model_label)
else:
print('输入类型错误') | code.py | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import time
import os
import xlwt,xlrd
# 定义一些需要的数据
# wps_username=17334033685 # wps的账号
# wps_password='<PASSWORD>' # wps的密码
url = 'https://www.docer.com/login' # 运行的网址
FILE = "././" # 储存路径
def loginAndsearch(url,wps_username,wps_password):
# 1.创建、配置并启动chrome
# 创建一个chrome
options = webdriver.ChromeOptions()
# 配置浏览器
prefs = {
'profile.managed_default_content_settings.images': 2, # 不加载浏览器的图片
"credentials_enable_service": False, # 浏览器弹窗
"profile.password_manager_enabled": False, # 关闭浏览器弹窗
"download.default_directory": "e:\\WPSGET"
}
options.add_experimental_option('prefs', prefs) # 参数送入执行
# 设置为开发者模式,防止被各大网站识别出来使用了Selenium
options.add_experimental_option('excludeSwitches', ['enable-automation'])
# 启动这个chrome
browser = webdriver.Chrome(options=options)
wait = WebDriverWait(browser, 10) # 超时时长为10s
# 2.打开网页
browser.get(url)
# 转到iframe
browser.implicitly_wait(30)
elementi = browser.find_element_by_class_name('ifm')
browser.switch_to.frame(elementi)
# 选择账号密码输入
account_click='//span[contains(text(),"帐号密码")]'
browser.find_element_by_xpath(account_click).click()
# 点击同意协议
browser.find_element_by_xpath('//div[@class="dialog-footer-ok"]').click()
# 键入账号和密码
browser.find_element_by_xpath('//input[@id="email"]').send_keys(wps_username) # 键入账号
browser.find_element_by_xpath('//input[@id="password"]').send_keys(wps_password) # 键入密码
# 点击验证
browser.implicitly_wait(30)
browser.find_element_by_xpath('//div[@id="rectMask"]').click()
# 点击登录
time.sleep(5)
browser.find_element_by_xpath('//a[@id="login"]').click()
browser.switch_to.default_content()
# 点击回到首页
time.sleep(2)
browser.implicitly_wait(30)
browser.find_element_by_xpath('//a[@class="nav_li_a "]').click()
return browser
# 创建新的文件夹
def creatFile(element, FILE=FILE):
path = FILE
title = element
new_path = os.path.join(path, title)
if not os.path.isdir(new_path):
os.makedirs(new_path)
return new_path
def creatExcel(model_label):
workbook = xlwt.Workbook(encoding='utf-8') # 新建工作簿
sheet1 = workbook.add_sheet(model_label) # 新建sheet
return workbook,sheet1
# PPT页面操作
def PPTDownload(browser,model_label):
print("ppt")
# 创建关键词构成的文件夹
creatFile(model_label, FILE=FILE+'ppt/')
# 创建关键词构成的excel
workbook,sheet1=creatExcel(model_label)
# 打开ppt模板页
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[2]/ul/li[1]/ul/li[3]/a').click()
time.sleep(3) # 等待页面加载完成
page = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/span').text
print('总页数:', page[1:])
count=0
for p in range(int(page[1:])):
num=browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]').get_attribute('len')
print('page'+str(p+1)+':',num,'个')
time.sleep(2)
for i in range(1, int(num) + 1):
browser.implicitly_wait(30)
href = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a').get_attribute('href')
browser.implicitly_wait(30)
title = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a/div[2]').get_attribute('title')
# 写入数据
sheet1.write(count, 0, title) # 第1行第1列数据
sheet1.write(count, 1, href) # 第1行第2列数据
count=count+1
print(title, ' ', href)
workbook.save(r'././' + 'ppt' + '/' + model_label + '/' + model_label + '.xls')
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/a[4]').click()
time.sleep(2)
return browser
# word页面操作
def WORDDownload(browser,model_label):
print('word')
# 创建关键词构成的文件夹
creatFile(model_label, FILE=FILE + 'word/')
# 创建关键词构成的excel
workbook, sheet1 = creatExcel(model_label)
# 打开ppt模板页
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[2]/ul/li[1]/ul/li[2]/a').click()
time.sleep(3) # 等待页面加载完成
page = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/span').text
print('总页数:', page[1:])
count = 0
for p in range(int(page[1:])):
num = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]').get_attribute('len')
print('page' + str(p + 1) + ':', num, '个')
time.sleep(2)
for i in range(1, int(num) + 1):
browser.implicitly_wait(30)
href = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a').get_attribute('href')
browser.implicitly_wait(30)
title = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a/div[2]').get_attribute('title')
# 写入数据
sheet1.write(count, 0, title) # 第1行第1列数据
sheet1.write(count, 1, href) # 第1行第2列数据
count = count + 1
print(title, ' ', href)
workbook.save(r'././' + 'word' + '/' + model_label + '/' + model_label + '.xls')
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/a[4]').click()
time.sleep(2)
return browser
# excel页面操作
def EXCELDownload(browser,model_label):
print('excel')
# 创建关键词构成的文件夹
creatFile(model_label, FILE=FILE + 'excel/')
# 创建关键词构成的excel
workbook, sheet1 = creatExcel(model_label)
# 打开ppt模板页
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[2]/ul/li[1]/ul/li[4]/a').click()
time.sleep(3) # 等待页面加载完成
page = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/span').text
print('总页数:', page[1:])
count = 0
for p in range(int(page[1:])):
num = browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]').get_attribute('len')
print('page' + str(p + 1) + ':', num, '个')
time.sleep(2)
for i in range(1, int(num) + 1):
browser.implicitly_wait(30)
href = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a').get_attribute('href')
browser.implicitly_wait(30)
title = browser.find_element_by_xpath(
'//*[@id="App"]/div[2]/div[4]/ul' + '/li[' + str(i) + ']/a/div[2]').get_attribute('title')
# 写入数据
sheet1.write(count, 0, title) # 第1行第1列数据
sheet1.write(count, 1, href) # 第1行第2列数据
count = count + 1
print(title, ' ', href)
workbook.save(r'././' + 'excel' + '/' + model_label + '/' + model_label + '.xls')
browser.implicitly_wait(30)
browser.find_element_by_xpath('//*[@id="App"]/div[2]/div[4]/div[2]/a[4]').click()
time.sleep(2)
return browser
# 文件名和路径转换
def oldTonew_PPT(name,flage_num):
file_path = "e:/WPSGET/"
file = os.listdir(file_path)
# 更改名字和路径
time.sleep(0.5)
for f in range(len(file)):
type = os.path.splitext(file[f])[1]
if (type == '.pptx' or type == '.ppt' or type=='.dpt'):
print(file[f])
# 获取旧文件名
oldname = file_path + file[f] # os.sep添加系统分隔符
# 设置新文件名
newname = 'e:/WPSGET/ppt/'+model_label+'/'+str(flage_num)+ name
os.rename(oldname, newname) # 用os模块中的rename方法对文件改名
print('下載完成')
def oldTonew_WORD(name,flage_num):
file_path = "e:/WPSGET/"
file = os.listdir(file_path)
# 更改名字和路径
time.sleep(0.5)
for f in range(len(file)):
type = os.path.splitext(file[f])[1]
if (type == '.docx' or type == '.doc' or type=='.wpt' or type=='wps'):
print(file[f])
# 获取旧文件名
oldname = file_path + file[f] # os.sep添加系统分隔符
# 设置新文件名
newname = 'e:/WPSGET/word/'+model_label+'/'+ str(flage_num)+name
os.rename(oldname, newname) # 用os模块中的rename方法对文件改名
print('下載完成')
def oldTonew_EXCEL(name,flage_num):
file_path = "e:/WPSGET/"
file = os.listdir(file_path)
# 更改名字和路径
time.sleep(0.5)
for f in range(len(file)):
type = os.path.splitext(file[f])[1]
if (type == '.xlsx' or type == '.xls' or type=='.csv' or type=='ett'):
print(file[f])
# 获取旧文件名
oldname = file_path + file[f] # os.sep添加系统分隔符
# 设置新文件名
newname = 'e:/WPSGET/excel/'+model_label+'/'+ str(flage_num)+name
os.rename(oldname, newname) # 用os模块中的rename方法对文件改名
print('下載完成')
# 下载
def Download(browser,type,model_label):
excle_path = './'+type+'/'+model_label+'/'+model_label+'.xls' # excel路径
data = xlrd.open_workbook(excle_path) # 打开excel读取文件
sheet = data.sheet_by_index(0) # 根据sheet下标选择读取内容
nrows = sheet.nrows # 获取到表的总行数
print(nrows)
for j in range(int(nrows)):
name=sheet.row_values(j)[0]
ll = sheet.row_values(j)[1]
browser.get(ll)
time.sleep(1)
browser.implicitly_wait(40)
# browser.find_element_by_xpath('//*[@id="dlBtn"]').click()
ele = browser.find_element_by_xpath('//*[@id="dlBtn"]')
browser.execute_script("arguments[0].click();", ele)
if(type=='ppt'):
time.sleep(15)
oldTonew_PPT(name,j)
elif(type=='word'):
time.sleep(10)
oldTonew_WORD(name,j)
elif(type=='excel'):
time.sleep(10)
oldTonew_EXCEL(name, j)
return 1
###################################################################
if __name__ == "__main__":
# 提示输入账号和密码
wps_username = input("请输入账号:")
wps_password = input("请输入密码:")
# 登录wps
browser=loginAndsearch(url,wps_username,wps_password)
# 输入关键词
model_label = input("请输入关键词:")
browser.find_element_by_xpath('//div[@class="m-search-box header-banner__search"]/input').send_keys(
model_label + '\n')
time.sleep(1)
# 切换页面后必须切换句柄,不然找不到元素
num =browser.window_handles # 获取当前页句柄
browser.switch_to.window(num[1]) # 在句柄2 上执行下述步骤
# 分类分页下载
model_type = input("请输入需要下载的模板类型:")
if(model_type=='ppt' or model_type=='PPT'):
browser=PPTDownload(browser,model_label)
flage=Download(browser,'ppt',model_label)
elif(model_type=='word' or model_type=='WORD'):
browser=WORDDownload(browser,model_label)
flage = Download(browser,'word',model_label)
elif(model_type=='excel' or model_type=='EXCEL'):
browser=EXCELDownload(browser,model_label)
flage = Download(browser, 'excel', model_label)
else:
print('输入类型错误') | 0.111471 | 0.084417 |
import unittest
import numpy as np
from scipy.optimize import check_grad, approx_fprime
import matplotlib.pyplot as plt
from mrftools import *
class TestLearner(unittest.TestCase):
"""Test class for Learner and its subclasses"""
def set_up_learner(self, learner, latent=True):
"""
Provide synthetic training data for a learner.
:param learner: Learner object
:type learner: Learner
:param latent: Boolean value indicating whether to have latent variables in training data
:type latent: bool
:return: None
"""
d = 2
num_states = 4
np.random.seed(0)
if latent:
labels = [{0: 2, 2: 1},
{ 1: 2, 2: 0},
{0: 2, 1: 3, },
{0: 0, 1: 2, 2: 3}]
else:
labels = [{0: 2, 1: 3, 2: 1},
{0: 3, 1: 2, 2: 0},
{0: 2, 1: 3, 2: 1},
{0: 0, 1: 2, 2: 3}]
models = []
for i in range(len(labels)):
m = self.create_random_model(num_states, d)
models.append(m)
for model, states in zip(models, labels):
learner.add_data(states, model)
def test_gradient(self):
"""
Test that the provided gradient is consistent with a numerically estimated gradient when some variables are
latent.
"""
weights = np.zeros(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
learner.start_time = time.time()
learner.set_regularization(0.0, 1.0)
gradient_error = check_grad(learner.subgrad_obj, learner.subgrad_grad, weights)
# numerical_grad = approx_fprime(weights, learner.subgrad_obj, 1e-4)
# analytical_grad = learner.subgrad_grad(weights)
# plt.plot(numerical_grad, 'r')
# plt.plot(analytical_grad, 'b')
# plt.show()
print("Gradient error: %e" % gradient_error)
assert gradient_error < 1e-1, "Gradient is wrong"
def test_fully_observed_gradient(self):
"""Test that the gradient is consistent with a numerically estimated gradient when all variables are observed"""
weights = np.zeros(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=False)
learner.start_time = time.time()
learner.set_regularization(0.0, 1.0)
gradient_error = check_grad(learner.subgrad_obj, learner.subgrad_grad, weights)
# numerical_grad = approx_fprime(weights, learner.subgrad_obj, 1e-4)
# analytical_grad = learner.subgrad_grad(weights)
# plt.plot(numerical_grad, 'r')
# plt.plot(analytical_grad, 'b')
# plt.show()
print("Gradient error: %f" % gradient_error)
assert gradient_error < 1e-1, "Gradient is wrong"
def test_m_step_gradient(self):
"""Test that the gradient for the EM m-step is consistent with numerically estimated gradient."""
weights = np.zeros(8 + 32)
learner = EM(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
learner.set_regularization(0.0, 1.0)
learner.start = time.time()
learner.e_step(weights)
gradient_error = check_grad(learner.objective, learner.gradient, weights)
# numerical_grad = approx_fprime(weights, learner.objective, 1e-4)
# analytical_grad = learner.gradient(weights)
# plt.plot(numerical_grad, 'r')
# plt.plot(analytical_grad, 'b')
# plt.show()
print("Gradient error: %f" % gradient_error)
assert gradient_error < 1e-1, "Gradient is wrong"
def test_learner(self):
"""Test that the learner decreases the objective value and that it stays non-negative."""
weights = np.zeros(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
wr_obj = WeightRecord()
learner.learn(weights, callback=wr_obj.callback)
weight_record = wr_obj.weight_record
time_record = wr_obj.time_record
l = weight_record.shape[0]
old_obj = np.Inf
for i in range(l):
new_obj = learner.subgrad_obj(weight_record[i,:])
assert (new_obj <= old_obj + 1e-8), "subgradient objective is not decreasing"
old_obj = new_obj
assert new_obj >= 0, "Learner objective was not non-negative"
def test_EM(self):
"""Test that the EM learner decreases the objective value and that it stays non-negative."""
weights = np.zeros(8 + 32)
learner = EM(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
wr_obj = WeightRecord()
learner.learn(weights, callback=wr_obj.callback)
weight_record = wr_obj.weight_record
time_record = wr_obj.time_record
l = weight_record.shape[0]
old_obj = learner.subgrad_obj(weight_record[0,:])
new_obj = learner.subgrad_obj(weight_record[-1,:])
assert (new_obj <= old_obj), "EM objective did not decrease"
for i in range(l):
new_obj = learner.subgrad_obj(weight_record[i, :])
assert new_obj >= 0, "EM objective was not non-negative"
def test_paired_dual(self):
"""Test that the paired-dual learner decreases the objective value and that it stays non-negative."""
weights = np.zeros(8 + 32)
learner = PairedDual(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
wr_obj = WeightRecord()
learner.learn(weights, callback=wr_obj.callback)
weight_record = wr_obj.weight_record
time_record = wr_obj.time_record
l = weight_record.shape[0]
old_obj = learner.subgrad_obj(weight_record[0, :])
new_obj = learner.subgrad_obj(weight_record[-1, :])
assert (new_obj <= old_obj), "paired dual objective did not decrease"
for i in range(l):
new_obj = learner.subgrad_obj(weight_record[i, :])
assert new_obj >= 0, "Paired dual objective was not non-negative"
def test_primal_dual(self):
"""Test that the primal-dual learner decreases the objective value and that it stays non-negative."""
weights = np.zeros(8 + 32)
learner = PrimalDual(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
wr_obj = WeightRecord()
learner.learn(weights, callback=wr_obj.callback)
weight_record = wr_obj.weight_record
time_record = wr_obj.time_record
l = weight_record.shape[0]
old_obj = learner.subgrad_obj(weight_record[0, :])
new_obj = learner.subgrad_obj(weight_record[-1, :])
assert (new_obj <= old_obj), "Primal Dual objective did not decrease"
for i in range(l):
new_obj = learner.subgrad_obj(weight_record[i, :])
assert new_obj >= 0, "Primal Dual objective was not non-negative"
def test_overflow(self):
"""Initialize weights to a huge number and see if learner can escape it"""
weights = 1000 * np.random.randn(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner)
assert not np.isnan(learner.subgrad_obj(weights)), \
"Objective for learner was not a number"
def create_random_model(self, num_states, d):
"""
Create a random LogLinearModel with random features fo all unary and edge potentials
:param num_states: cardinality of each variable
:type num_states: int
:param d: dimensionality of feature vectors
:type d: int
:return: random model
:rtype: LogLinearModel
"""
model = LogLinearModel()
model.declare_variable(0, num_states)
model.declare_variable(1, num_states)
model.declare_variable(2, num_states)
model.set_unary_weights(0, np.random.randn(num_states, d))
model.set_unary_weights(1, np.random.randn(num_states, d))
model.set_unary_weights(2, np.random.randn(num_states, d))
model.set_unary_features(0, np.random.randn(d))
model.set_unary_features(1, np.random.randn(d))
model.set_unary_features(2, np.random.randn(d))
model.set_all_unary_factors()
model.set_edge_factor((0, 1), np.zeros((num_states, num_states)))
model.set_edge_factor((1, 2), np.zeros((num_states, num_states)))
model.set_edge_features((0, 1), np.random.randn(d))
model.set_edge_features((1, 2), np.random.randn(d))
edge_probabilities = dict()
for edge in model.edge_potentials:
edge_probabilities[edge] = 0.75
model.tree_probabilities = edge_probabilities
return model
def test_early_stopping(self):
"""Test that early-stopping timer correctly stops learning"""
weights = np.zeros(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
start = time.time()
learner.learn(weights)
default_time = time.time() - start
print("Default learner took %f seconds." % default_time)
start = time.time()
learner.max_time = 0.0001
learner.learn(weights)
early_time = time.time() - start
print("Without early stopping: %f seconds. With early stopping %f seconds." % (default_time, early_time))
assert early_time < default_time, "Early stopping was no faster than default" | tests/test_learner.py | import unittest
import numpy as np
from scipy.optimize import check_grad, approx_fprime
import matplotlib.pyplot as plt
from mrftools import *
class TestLearner(unittest.TestCase):
"""Test class for Learner and its subclasses"""
def set_up_learner(self, learner, latent=True):
"""
Provide synthetic training data for a learner.
:param learner: Learner object
:type learner: Learner
:param latent: Boolean value indicating whether to have latent variables in training data
:type latent: bool
:return: None
"""
d = 2
num_states = 4
np.random.seed(0)
if latent:
labels = [{0: 2, 2: 1},
{ 1: 2, 2: 0},
{0: 2, 1: 3, },
{0: 0, 1: 2, 2: 3}]
else:
labels = [{0: 2, 1: 3, 2: 1},
{0: 3, 1: 2, 2: 0},
{0: 2, 1: 3, 2: 1},
{0: 0, 1: 2, 2: 3}]
models = []
for i in range(len(labels)):
m = self.create_random_model(num_states, d)
models.append(m)
for model, states in zip(models, labels):
learner.add_data(states, model)
def test_gradient(self):
"""
Test that the provided gradient is consistent with a numerically estimated gradient when some variables are
latent.
"""
weights = np.zeros(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
learner.start_time = time.time()
learner.set_regularization(0.0, 1.0)
gradient_error = check_grad(learner.subgrad_obj, learner.subgrad_grad, weights)
# numerical_grad = approx_fprime(weights, learner.subgrad_obj, 1e-4)
# analytical_grad = learner.subgrad_grad(weights)
# plt.plot(numerical_grad, 'r')
# plt.plot(analytical_grad, 'b')
# plt.show()
print("Gradient error: %e" % gradient_error)
assert gradient_error < 1e-1, "Gradient is wrong"
def test_fully_observed_gradient(self):
"""Test that the gradient is consistent with a numerically estimated gradient when all variables are observed"""
weights = np.zeros(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=False)
learner.start_time = time.time()
learner.set_regularization(0.0, 1.0)
gradient_error = check_grad(learner.subgrad_obj, learner.subgrad_grad, weights)
# numerical_grad = approx_fprime(weights, learner.subgrad_obj, 1e-4)
# analytical_grad = learner.subgrad_grad(weights)
# plt.plot(numerical_grad, 'r')
# plt.plot(analytical_grad, 'b')
# plt.show()
print("Gradient error: %f" % gradient_error)
assert gradient_error < 1e-1, "Gradient is wrong"
def test_m_step_gradient(self):
"""Test that the gradient for the EM m-step is consistent with numerically estimated gradient."""
weights = np.zeros(8 + 32)
learner = EM(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
learner.set_regularization(0.0, 1.0)
learner.start = time.time()
learner.e_step(weights)
gradient_error = check_grad(learner.objective, learner.gradient, weights)
# numerical_grad = approx_fprime(weights, learner.objective, 1e-4)
# analytical_grad = learner.gradient(weights)
# plt.plot(numerical_grad, 'r')
# plt.plot(analytical_grad, 'b')
# plt.show()
print("Gradient error: %f" % gradient_error)
assert gradient_error < 1e-1, "Gradient is wrong"
def test_learner(self):
"""Test that the learner decreases the objective value and that it stays non-negative."""
weights = np.zeros(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
wr_obj = WeightRecord()
learner.learn(weights, callback=wr_obj.callback)
weight_record = wr_obj.weight_record
time_record = wr_obj.time_record
l = weight_record.shape[0]
old_obj = np.Inf
for i in range(l):
new_obj = learner.subgrad_obj(weight_record[i,:])
assert (new_obj <= old_obj + 1e-8), "subgradient objective is not decreasing"
old_obj = new_obj
assert new_obj >= 0, "Learner objective was not non-negative"
def test_EM(self):
"""Test that the EM learner decreases the objective value and that it stays non-negative."""
weights = np.zeros(8 + 32)
learner = EM(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
wr_obj = WeightRecord()
learner.learn(weights, callback=wr_obj.callback)
weight_record = wr_obj.weight_record
time_record = wr_obj.time_record
l = weight_record.shape[0]
old_obj = learner.subgrad_obj(weight_record[0,:])
new_obj = learner.subgrad_obj(weight_record[-1,:])
assert (new_obj <= old_obj), "EM objective did not decrease"
for i in range(l):
new_obj = learner.subgrad_obj(weight_record[i, :])
assert new_obj >= 0, "EM objective was not non-negative"
def test_paired_dual(self):
"""Test that the paired-dual learner decreases the objective value and that it stays non-negative."""
weights = np.zeros(8 + 32)
learner = PairedDual(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
wr_obj = WeightRecord()
learner.learn(weights, callback=wr_obj.callback)
weight_record = wr_obj.weight_record
time_record = wr_obj.time_record
l = weight_record.shape[0]
old_obj = learner.subgrad_obj(weight_record[0, :])
new_obj = learner.subgrad_obj(weight_record[-1, :])
assert (new_obj <= old_obj), "paired dual objective did not decrease"
for i in range(l):
new_obj = learner.subgrad_obj(weight_record[i, :])
assert new_obj >= 0, "Paired dual objective was not non-negative"
def test_primal_dual(self):
"""Test that the primal-dual learner decreases the objective value and that it stays non-negative."""
weights = np.zeros(8 + 32)
learner = PrimalDual(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
wr_obj = WeightRecord()
learner.learn(weights, callback=wr_obj.callback)
weight_record = wr_obj.weight_record
time_record = wr_obj.time_record
l = weight_record.shape[0]
old_obj = learner.subgrad_obj(weight_record[0, :])
new_obj = learner.subgrad_obj(weight_record[-1, :])
assert (new_obj <= old_obj), "Primal Dual objective did not decrease"
for i in range(l):
new_obj = learner.subgrad_obj(weight_record[i, :])
assert new_obj >= 0, "Primal Dual objective was not non-negative"
def test_overflow(self):
"""Initialize weights to a huge number and see if learner can escape it"""
weights = 1000 * np.random.randn(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner)
assert not np.isnan(learner.subgrad_obj(weights)), \
"Objective for learner was not a number"
def create_random_model(self, num_states, d):
"""
Create a random LogLinearModel with random features fo all unary and edge potentials
:param num_states: cardinality of each variable
:type num_states: int
:param d: dimensionality of feature vectors
:type d: int
:return: random model
:rtype: LogLinearModel
"""
model = LogLinearModel()
model.declare_variable(0, num_states)
model.declare_variable(1, num_states)
model.declare_variable(2, num_states)
model.set_unary_weights(0, np.random.randn(num_states, d))
model.set_unary_weights(1, np.random.randn(num_states, d))
model.set_unary_weights(2, np.random.randn(num_states, d))
model.set_unary_features(0, np.random.randn(d))
model.set_unary_features(1, np.random.randn(d))
model.set_unary_features(2, np.random.randn(d))
model.set_all_unary_factors()
model.set_edge_factor((0, 1), np.zeros((num_states, num_states)))
model.set_edge_factor((1, 2), np.zeros((num_states, num_states)))
model.set_edge_features((0, 1), np.random.randn(d))
model.set_edge_features((1, 2), np.random.randn(d))
edge_probabilities = dict()
for edge in model.edge_potentials:
edge_probabilities[edge] = 0.75
model.tree_probabilities = edge_probabilities
return model
def test_early_stopping(self):
"""Test that early-stopping timer correctly stops learning"""
weights = np.zeros(8 + 32)
learner = Learner(MatrixBeliefPropagator)
self.set_up_learner(learner, latent=True)
start = time.time()
learner.learn(weights)
default_time = time.time() - start
print("Default learner took %f seconds." % default_time)
start = time.time()
learner.max_time = 0.0001
learner.learn(weights)
early_time = time.time() - start
print("Without early stopping: %f seconds. With early stopping %f seconds." % (default_time, early_time))
assert early_time < default_time, "Early stopping was no faster than default" | 0.82379 | 0.673007 |
from django import forms as django_forms
from django.test import TestCase
from teamspirit.core.models import Address
from teamspirit.profiles.models import Personal
from teamspirit.users.forms import CustomAuthenticationForm
from teamspirit.users.models import User
class UsersFormsTestCase(TestCase):
"""Test the forms in the app ``users``."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.address = Address.objects.create(
label_first="1 rue de l'impasse",
label_second="",
postal_code="75000",
city="Paris",
country="France"
)
cls.personal = Personal.objects.create(
phone_number="01 02 03 04 05",
address=cls.address
)
cls.user = User.objects.create_user(
email="<EMAIL>",
password="<PASSWORD>",
first_name="Toto",
last_name="<NAME>",
personal=cls.personal
)
cls.inactive_user = User.objects.create_user(
email="<EMAIL>",
password="<PASSWORD>",
first_name="Titi",
last_name="<NAME>",
is_active=False,
personal=cls.personal
)
def test_custom_authentication_form_success(self):
"""Unit test - app ``users`` - form ``CustomAuthenticationForm`` #1
Test the authentication form with success.
"""
form_data = {
'username': '<EMAIL>',
'password': '<PASSWORD>'
}
form = CustomAuthenticationForm(data=form_data)
self.assertTrue(form.is_valid())
def test_custom_authentication_form_failure_wrong_username(self):
"""Unit test - app ``users`` - form ``CustomAuthenticationForm`` #2
Test the authentication form with a failure mode: a wrong username.
"""
form_data = {
'username': '<EMAIL>',
'password': '<PASSWORD>'
}
form = CustomAuthenticationForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'__all__':
[
"Saisissez un Courriel et un mot de passe valides. "
"Remarquez que chacun de ces champs est sensible à la "
"casse (différenciation des majuscules/minuscules)."
]
}
)
def test_custom_authentication_form_failure_wrong_password(self):
"""Unit test - app ``users`` - form ``CustomAuthenticationForm`` #3
Test the authentication form with a failure mode: a wrong password.
"""
form_data = {
'username': '<EMAIL>',
'password': '<PASSWORD>'
}
form = CustomAuthenticationForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'__all__':
[
"Saisissez un Courriel et un mot de passe valides. "
"Remarquez que chacun de ces champs est sensible à la "
"casse (différenciation des majuscules/minuscules)."
]
}
)
def test_custom_authentication_form_failure_inactive_user(self):
"""Unit test - app ``users`` - form ``CustomAuthenticationForm`` #4
Test the authentication form with a failure mode: an inactive user.
"""
form_data = {
'username': '<EMAIL>',
'password': '<PASSWORD>'
}
form = CustomAuthenticationForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertRaisesMessage(
expected_exception=django_forms.ValidationError,
expected_message="Ce compte est inactif."
) | tests/unit/users/test_forms.py |
from django import forms as django_forms
from django.test import TestCase
from teamspirit.core.models import Address
from teamspirit.profiles.models import Personal
from teamspirit.users.forms import CustomAuthenticationForm
from teamspirit.users.models import User
class UsersFormsTestCase(TestCase):
"""Test the forms in the app ``users``."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.address = Address.objects.create(
label_first="1 rue de l'impasse",
label_second="",
postal_code="75000",
city="Paris",
country="France"
)
cls.personal = Personal.objects.create(
phone_number="01 02 03 04 05",
address=cls.address
)
cls.user = User.objects.create_user(
email="<EMAIL>",
password="<PASSWORD>",
first_name="Toto",
last_name="<NAME>",
personal=cls.personal
)
cls.inactive_user = User.objects.create_user(
email="<EMAIL>",
password="<PASSWORD>",
first_name="Titi",
last_name="<NAME>",
is_active=False,
personal=cls.personal
)
def test_custom_authentication_form_success(self):
"""Unit test - app ``users`` - form ``CustomAuthenticationForm`` #1
Test the authentication form with success.
"""
form_data = {
'username': '<EMAIL>',
'password': '<PASSWORD>'
}
form = CustomAuthenticationForm(data=form_data)
self.assertTrue(form.is_valid())
def test_custom_authentication_form_failure_wrong_username(self):
"""Unit test - app ``users`` - form ``CustomAuthenticationForm`` #2
Test the authentication form with a failure mode: a wrong username.
"""
form_data = {
'username': '<EMAIL>',
'password': '<PASSWORD>'
}
form = CustomAuthenticationForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'__all__':
[
"Saisissez un Courriel et un mot de passe valides. "
"Remarquez que chacun de ces champs est sensible à la "
"casse (différenciation des majuscules/minuscules)."
]
}
)
def test_custom_authentication_form_failure_wrong_password(self):
"""Unit test - app ``users`` - form ``CustomAuthenticationForm`` #3
Test the authentication form with a failure mode: a wrong password.
"""
form_data = {
'username': '<EMAIL>',
'password': '<PASSWORD>'
}
form = CustomAuthenticationForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'__all__':
[
"Saisissez un Courriel et un mot de passe valides. "
"Remarquez que chacun de ces champs est sensible à la "
"casse (différenciation des majuscules/minuscules)."
]
}
)
def test_custom_authentication_form_failure_inactive_user(self):
"""Unit test - app ``users`` - form ``CustomAuthenticationForm`` #4
Test the authentication form with a failure mode: an inactive user.
"""
form_data = {
'username': '<EMAIL>',
'password': '<PASSWORD>'
}
form = CustomAuthenticationForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertRaisesMessage(
expected_exception=django_forms.ValidationError,
expected_message="Ce compte est inactif."
) | 0.683736 | 0.255901 |
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from apiclient.http import MediaIoBaseDownload
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from io import StringIO
import io,os,codecs
#=========================================================
#CONCERT THE PDF STREAM TO UTF8 TEXT
def pdf_to_text2(fstream):
rsrcmgr = PDFResourceManager()
sio = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, sio, codec=codec, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fstream):
try:interpreter.process_page(page)
except:
print('COULD NOT PROCESS PAGE')
pass
text = sio.getvalue()
device.close()
sio.close()
return text
"""
=========================================================
CONVERT ALL THE PDF FILES IN THE GIVEN FOLDER INTO UTF8 TXT FILESS
"""
def processFolder(folder_id,service):
if not os.path.exists(folder_id):os.mkdir(folder_id)
# Call the Drive v3 API
results = service.files().list(q="'"+folder_id+"' in parents",
pageSize=10, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
else:
print('Files:')
for item in items:
fname=item['name']
if not fname.endswith('.pdf'):continue
print('processing {0} '.format(fname))#, item['id']))
file_id =item['id']
request = service.files().get_media(fileId=file_id) #mimeType='application/pdf')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print ("Download %d%%." % int(status.progress() * 100))
text=pdf_to_text2(fh)
fw=codecs.open(folder_id+'/'+fname+'.txt','w',encoding='utf8')
fw.write(text)
fw.close()
#===========================================
def main(folder_id):
#create a connection to google drive.
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', 'https://www.googleapis.com/auth/drive.readonly')
creds = tools.run_flow(flow, store)
service = build('drive', 'v3', http=creds.authorize(Http()))
processFolder(folder_id, service)
#===========================================
if __name__ == '__main__':
folder_id='1RyntetHv4TORxfkW7Ayf4osw6vjcIrdL'
main(folder_id) | gdrive_to_pdf.py | from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from apiclient.http import MediaIoBaseDownload
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from io import StringIO
import io,os,codecs
#=========================================================
#CONCERT THE PDF STREAM TO UTF8 TEXT
def pdf_to_text2(fstream):
rsrcmgr = PDFResourceManager()
sio = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, sio, codec=codec, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fstream):
try:interpreter.process_page(page)
except:
print('COULD NOT PROCESS PAGE')
pass
text = sio.getvalue()
device.close()
sio.close()
return text
"""
=========================================================
CONVERT ALL THE PDF FILES IN THE GIVEN FOLDER INTO UTF8 TXT FILESS
"""
def processFolder(folder_id,service):
if not os.path.exists(folder_id):os.mkdir(folder_id)
# Call the Drive v3 API
results = service.files().list(q="'"+folder_id+"' in parents",
pageSize=10, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
else:
print('Files:')
for item in items:
fname=item['name']
if not fname.endswith('.pdf'):continue
print('processing {0} '.format(fname))#, item['id']))
file_id =item['id']
request = service.files().get_media(fileId=file_id) #mimeType='application/pdf')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print ("Download %d%%." % int(status.progress() * 100))
text=pdf_to_text2(fh)
fw=codecs.open(folder_id+'/'+fname+'.txt','w',encoding='utf8')
fw.write(text)
fw.close()
#===========================================
def main(folder_id):
#create a connection to google drive.
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', 'https://www.googleapis.com/auth/drive.readonly')
creds = tools.run_flow(flow, store)
service = build('drive', 'v3', http=creds.authorize(Http()))
processFolder(folder_id, service)
#===========================================
if __name__ == '__main__':
folder_id='1RyntetHv4TORxfkW7Ayf4osw6vjcIrdL'
main(folder_id) | 0.248717 | 0.061171 |
import inspect
from abc import ABCMeta, abstractmethod
from copy import deepcopy as _deepcopy, copy as _copy
import sympy as sp
import wrapt
import itertools
from utils.func_utils import get_cached_func_spec, make_function
from structdict import StructDict, OrderedStructDict
import numpy as np
from numpy.lib.stride_tricks import as_strided as _as_strided
import scipy.linalg as scl
import scipy.sparse as scs
from collections import namedtuple as NamedTuple
from utils.decorator_utils import cache_hashable_args
import functools
def is_scalar_like(val):
shape = getattr(val, 'shape', (1,))
return all([d==1 for d in shape])
def matmul(self, other):
if any(map(is_scalar_like, (self, other))):
return self * other
else:
return self @ other
def atleast_2d_col(arr, dtype=None, order=None) -> np.ndarray:
arr = np.asanyarray(arr, dtype=dtype, order=order)
if arr.ndim == 0:
result = arr.reshape(1, 1)
elif arr.ndim == 1:
result = arr[:, np.newaxis]
else:
result = arr
return result
def _atleast_3d_col(arr, dtype=None, order=None):
arr = np.asanyarray(arr, dtype=dtype, order=order)
if arr.ndim == 0:
result = arr.reshape(1, 1, 1)
elif arr.ndim == 1:
result = arr[:, np.newaxis, np.newaxis]
elif arr.ndim == 2:
result = arr[np.newaxis, :]
else:
result = arr
return result
def block_diag_dense_same_shape(mats, format=None, dtype=None):
arrs = _atleast_3d_col(mats, dtype=dtype)
k, n, m = arrs.shape
arrs = arrs.reshape(k * n, m)
vals = np.zeros(shape=(k * n, k * m), dtype=arrs.dtype)
vals[:, :m] = arrs
item_size = arrs.itemsize
shape = (k, n, k * m)
strides = ((k * n - 1) * m * item_size, k * m * item_size, item_size)
strided = np.ascontiguousarray(_as_strided(vals, shape=shape, strides=strides))
block_diag = strided.reshape(n * k, m * k)
return block_diag
def block_diag_dense(mats, format=None, dtype=None):
# scl.blockdiag is faster for large matrices or a large number of matrices.
a_mats = _atleast_3d_col(mats)
if a_mats.dtype != np.object_ and np.prod(a_mats.shape) < 720:
block_diag = block_diag_dense_same_shape(a_mats, format=format, dtype=dtype)
else:
block_diag = scl.block_diag(*a_mats)
if dtype is not None:
block_diag = block_diag.astype(dtype)
return block_diag
import timeit
def block_diag_test(a, number=1000):
def t1():
return block_diag_dense(a)
def t2():
return scl.block_diag(*a)
tt1 = timeit.timeit("t1()", globals=locals(), number=number)
print("block_diag_dense", tt1)
tt2 = timeit.timeit("t2()", globals=locals(), number=number)
print("scl.block_diag", tt2)
t1 = t1()
t2 = t2()
print("t1", t1.dtype)
print("t2", t2.dtype)
return np.array_equal(t1, t2)
def create_object_array(tup):
try:
obj_arr = np.empty(len(tup), dtype=np.object_)
except TypeError:
raise TypeError("tup must be array like.")
for ind, item in enumerate(tup):
obj_arr[ind] = item
return obj_arr
def block_toeplitz(c_tup, r_tup=None, sparse=False):
"""
Based on scipy.linalg.toeplitz method but applied in a block fashion.
"""
try:
c = np.array(c_tup)
except ValueError:
c = create_object_array(c_tup)
if r_tup is None:
if np.issubdtype(c.dtype, np.number):
r = c.conjugate()
else:
r = c
else:
try:
r = np.array(r_tup)
except ValueError:
r = create_object_array(r_tup)
c = _atleast_3d_col(c)
r = _atleast_3d_col(r)
# # Form a array containing a reversed c followed by r[1:] that could be strided to give us a toeplitz matrix.
try:
vals = np.concatenate((c[::-1], r[1:]))
except ValueError as ve:
raise ValueError("Incompatible dimensions in c_tup or between c_tup and r_tup - " + ve.args[0])
stride_shp = (c.shape[0], c.shape[1], r.shape[0], r.shape[2])
out_shp = (c.shape[0] * c.shape[1], r.shape[0] * r.shape[2])
n, m, k = vals.strides
strided = np.ascontiguousarray(_as_strided(vals[c.shape[0] - 1:], shape=stride_shp, strides=(-n, m, n, k)))
np_toeplitz = strided.reshape(out_shp)
if sparse:
if np_toeplitz.dtype != np.object_:
return scs.csr_matrix(np_toeplitz)
elif all(isinstance(block, scs.csr_matrix) for block in np_toeplitz.flat):
v_stacked = [scs.bmat(np.atleast_2d(col).T).tocsc() for col in np_toeplitz.T]
return scs.bmat(np.atleast_2d(v_stacked)).tocsr()
else:
h_stacked = [scs.bmat(np.atleast_2d(row)).tocsr() for row in np_toeplitz]
return scs.bmat(np.atleast_2d(h_stacked).T).tocsc()
else:
return np_toeplitz
def block_toeplitz_alt(c_tup, r_tup=None, sparse=False):
c = create_object_array(c_tup)
if r_tup is None:
try:
r = c.conjugate()
except AttributeError:
r = c
else:
r = create_object_array(r_tup)
# # Form a 1D array containing a reversed c followed by r[1:] that could be
# # strided to give us toeplitz matrix.
vals = np.concatenate((c[::-1], r[1:]))
out_shp = c.shape[0], r.shape[0]
n = vals.strides[0]
strided = _as_strided(vals[len(c) - 1:], shape=out_shp, strides=(-n, n))
np_toep = np.block(strided.tolist())
if sparse:
if all(isinstance(block, scs.csr_matrix) for block in np_toep.flat):
v_stacked = [scs.bmat(np.atleast_2d(col).T).tocsc() for col in np_toep.T]
return scs.bmat(np.atleast_2d(v_stacked)).tocsr()
else:
h_stacked = [scs.bmat(np.atleast_2d(row)).tocsr() for row in np_toep]
return scs.bmat(np.atleast_2d(h_stacked).T).tocsc()
else:
return np_toep
_MatOpsNames = ['package',
'linalg',
'sclinalg',
'block_diag',
'vmatrix',
'hmatrix',
'zeros',
'vstack',
'hstack',
'matmul']
_MatOpsNameTup = NamedTuple('MatOps', _MatOpsNames)
def pass_through(a):
return a
@cache_hashable_args(maxsize=2)
def get_mat_ops(sparse=False):
if sparse:
mat_ops = _MatOpsNameTup(
package=scs,
linalg=scs,
sclinalg=scs,
block_diag=scs.block_diag,
vmatrix=scs.csr_matrix,
hmatrix=scs.csc_matrix,
zeros=scs.csr_matrix,
vstack=scs.vstack,
hstack=scs.hstack,
matmul=functools.partial(matmul, sparse=True)
)
else:
mat_ops = _MatOpsNameTup(
package=np,
linalg=np.linalg,
sclinalg=scl,
block_diag=block_diag_dense,
vmatrix=np.atleast_2d,
hmatrix=np.atleast_2d,
zeros=np.zeros,
vstack=np.vstack,
hstack=np.hstack,
matmul=matmul
)
return mat_ops
def get_expr_shape(expr):
try:
expr_shape = expr.shape
except AttributeError:
pass
else:
if len(expr_shape) <= 2:
return expr_shape
else:
raise NotImplementedError("Maximum supported dimension is 2, got {}".format(len(expr_shape)))
if expr is None:
return (0, 0)
elif np.isscalar(expr) or isinstance(expr, sp.Expr):
return (1, 1)
elif callable(expr):
expr = CallableMatrix(expr)
return expr.shape
else:
raise TypeError("Invalid expression type: '{0}', for expr: '{1!s}'".format(type(expr), expr))
def get_expr_shapes(*exprs, get_max_dim=False):
if not exprs:
return None
if isinstance(exprs[0], dict):
shapes = StructDict({expr_id: get_expr_shape(expr) for expr_id, expr in exprs[0].items()})
else:
shapes = [get_expr_shape(expr) for expr in exprs]
if get_max_dim:
shapes = list(shapes.values()) if isinstance(shapes, dict) else shapes
return tuple(np.maximum.reduce(shapes))
else:
return shapes
class CallableMatrixMeta(ABCMeta):
def __new__(cls, *args, **kwargs):
kls = super(CallableMatrixMeta, cls).__new__(cls, *args, **kwargs)
mro = kls.mro()
all_slots = set(itertools.chain.from_iterable(klass.__dict__.get("__slots__", ()) for klass in mro))
all_slots.discard('__dict__')
kls._all_slots = tuple(all_slots)
return kls
def __call__(cls, *args, **kwargs):
return cls.__new__(cls, *args, **kwargs)
class CallableMatrixBase(metaclass=CallableMatrixMeta):
@staticmethod
def constant_matrix_func(constant):
def _constant_matrix_func():
return constant
_constant_matrix_func.__qualname__ = _constant_matrix_func.__name__ = 'constant_matrix_func'
return _constant_matrix_func
@classmethod
def _constructor(cls, *args, **kwargs):
self = super(CallableMatrixBase, cls).__new__(cls)
self.__init__(*args, **kwargs)
return self
def _constructor_from_self(self):
obj = super(CallableMatrixBase, type(self)).__new__(type(self))
for attr in self._all_slots:
setattr(obj, attr, getattr(self, attr))
obj.__init__(self)
return obj
def copy(self):
return self._constructor_from_self()
__copy__ = copy
def deepcopy(self, memo=None):
return self._constructor_from_self()
__deepcopy__ = deepcopy
def __new__(cls, matrix, matrix_name=None):
matrix_func = cls._process_matrix_func(matrix)
nan_call = cls._nan_call(matrix_func)
if np.all(np.isfinite(nan_call)):
return CallableMatrixConstant._constructor(matrix_func, matrix_name, _nan_call=nan_call)
else:
return CallableMatrix._constructor(matrix_func, matrix_name, _nan_call=nan_call)
@abstractmethod
def __init__(self, *args, **kwargs):
super(CallableMatrixBase, self).__init__(*args, **kwargs)
@classmethod
def _process_matrix_func(cls, matrix):
if inspect.isfunction(matrix):
func = matrix
elif inspect.ismethod(matrix):
func = matrix.__func__
elif isinstance(matrix, (sp.Expr, sp.Matrix)):
system_matrix = sp.Matrix(matrix)
param_sym_tup = cls._get_param_sym_tup(system_matrix)
func = sp.lambdify(param_sym_tup, system_matrix, modules="numpy", dummify=False)
else:
func = cls.constant_matrix_func(atleast_2d_col(matrix))
return func
@staticmethod
def _nan_call(matrix_func):
f_spec = get_cached_func_spec(matrix_func, reset_cache=True)
kwargs = {param_name: np.NaN for param_name in f_spec.all_kw_params}
args = [np.NaN] * len(f_spec.pos_only_params)
try:
ret_val = atleast_2d_col(matrix_func(*args, **kwargs))
ret_val.setflags(write=False)
return ret_val
except TypeError:
msg = f"_nan_call() failed, it is likely that the matrix function does not have a constant shape.\n"
note = (
"Note: all callable expressions must return with a constant array shape that does not depend on its "
"arguments. Shape is determined by calling the function with all arguments set to a float with value "
"NaN.")
raise TypeError(msg + note)
@staticmethod
def _get_param_sym_tup(expr):
try:
sym_dict = {str(sym): sym for sym in expr.free_symbols}
param_sym_tup = tuple([sym_dict.get(sym) for sym in sorted(sym_dict.keys())])
except AttributeError:
param_sym_tup = ()
return param_sym_tup
class CallableMatrix(CallableMatrixBase, wrapt.decorators.AdapterWrapper):
__slots__ = ('_self_matrix_name', '_self_wrapped_name', '_self_adapter_spec', '_self_shape', '_self_size',
'_self_ndim', '_self_dtype', '_self_nbytes', '_self_itemsize', '_self_is_empty', '_self_is_all_zero',
'_self_is_constant')
def __init__(self, matrix, matrix_name=None, **kwargs):
if isinstance(matrix, type(self)):
matrix_func = matrix.__wrapped__
super(CallableMatrix, self).__init__(wrapped=matrix_func, wrapper=matrix._self_wrapper, enabled=None,
adapter=matrix._self_adapter)
else:
matrix_func = self._process_matrix_func(matrix)
self._self_matrix_name = matrix_name if matrix_name is not None else matrix_func.__name__
self._self_wrapped_name = matrix_func.__name__
matrix_func.__name__ = self._self_matrix_name
matrix_func.__qualname__ = (
"".join(matrix_func.__qualname__.rsplit('.', 1)[:-1] + ['.', matrix_func.__name__]).lstrip('.'))
self._self_wrapped_f_spec = get_cached_func_spec(matrix_func)
adapter = self._gen_callable_matrix_adapter(self._self_wrapped_f_spec)
self._self_adapter_spec = get_cached_func_spec(adapter, bypass_cache=True)
wrapper = self._matrix_wrapper
super(CallableMatrix, self).__init__(wrapped=matrix_func, wrapper=wrapper, enabled=None, adapter=adapter)
_nan_call = kwargs.get('_nan_call')
nan_call = _nan_call if _nan_call is not None else self._nan_call(matrix_func)
self._self_shape = get_expr_shape(nan_call)
self._self_size = nan_call.size
self._self_ndim = nan_call.ndim
self._self_dtype = nan_call.dtype
self._self_nbytes = nan_call.nbytes
self._self_itemsize = nan_call.itemsize
self._self_is_empty = False if self._self_size else True
self._self_is_all_zero = np.all(nan_call == 0)
self._self_is_constant = np.all(np.isfinite(nan_call))
if self._self_is_constant:
if type(self) == CallableMatrix:
raise TypeError(f"Cannot initialize {type(self).__name__} object with constant matrix.")
self._self_constant = nan_call
else:
self._self_constant = None
def _matrix_wrapper(self, wrapped, instance, args, kwargs):
param_struct = kwargs.pop('param_struct', None)
if param_struct and self._self_wrapped_f_spec.all_kw_params:
try:
duplicates = set(kwargs).intersection(param_struct) if kwargs else None
kwargs.update(
{name: param_struct[name] for name in
set(self._self_wrapped_f_spec.all_kw_params).intersection(param_struct)})
except TypeError as te:
msg = f"'param_struct' must be dictionary like or None: {te.args[0]}"
raise TypeError(msg).with_traceback(te.__traceback__) from None
else:
if duplicates:
raise TypeError(
f"{wrapped.__name__}() got multiple values for argument '{duplicates.pop()}' - values in "
f"kwargs are duplicated in param_struct.")
try:
retval = wrapped(*args, **kwargs)
except TypeError as te:
msg = te.args[0].replace(self._self_wrapped_name, wrapped.__name__)
raise TypeError(msg).with_traceback(te.__traceback__) from None
if getattr(retval, 'ndim', 0) < 2:
retval = atleast_2d_col(retval)
if isinstance(retval, np.ndarray):
retval.setflags(write=False)
return retval
def _gen_callable_matrix_adapter(self, f_spec):
f_args_spec_struct = OrderedStructDict(f_spec.arg_spec._asdict()).deepcopy()
f_args_spec_struct.kwonlyargs.append('param_struct')
if f_args_spec_struct.kwonlydefaults:
f_args_spec_struct.kwonlydefaults.update({'param_struct': None})
else:
f_args_spec_struct.kwonlydefaults = {'param_struct': None}
f_args_spec = inspect.FullArgSpec(**f_args_spec_struct)
adapter = make_function(f_args_spec, name='adapter')
return adapter
def __reduce__(self):
return (type(self), (self.__wrapped__, self._self_matrix_name))
@property
def __name__(self):
return self._self_matrix_name
@property
def __class__(self):
return type(self)
@property
def _f_spec(self):
return self._self_adapter_spec
@_f_spec.setter
def _f_spec(self, f_spec):
self._self_adapter_spec = f_spec
@property
def __signature__(self):
return self._self_adapter_spec.signature
@property
def required_params(self):
return self._self_wrapped_f_spec.all_kw_params
@property
def matrix_name(self):
return self._self_matrix_name
@property
def shape(self):
return self._self_shape
@property
def size(self):
return self._self_size
@property
def ndim(self):
return self._self_ndim
@property
def dtype(self):
return self._self_dtype
@property
def nbytes(self):
return self._self_nbytes
@property
def itemsize(self):
return self._self_itemsize
@property
def is_empty(self):
return self._self_is_empty
@property
def is_all_zero(self):
return self._self_is_all_zero
@property
def is_constant(self):
return self._self_is_constant
def __repr__(self):
empty_str = f", shape={self._self_shape}" if not self._self_size else ""
return f"<{self.__class__.__name__} {self.__name__}{self.__signature__}{empty_str}>"
def __str__(self):
return self.__repr__()
def __dir__(self):
wrapped_dir = set(dir(self.__wrapped__))
added_dir = set(itertools.chain.from_iterable([kls.__dict__ for kls in type(self).mro()]))
rv = wrapped_dir | added_dir
return sorted(rv)
class CallableMatrixConstant(CallableMatrix):
def __init__(self, matrix, matrix_name=None, **kwargs):
super(CallableMatrixConstant, self).__init__(matrix, matrix_name=matrix_name, **kwargs)
if not self.is_constant:
raise TypeError(f"Cannot initialize {type(self).__name__} object with non-constant matrix.")
def __call__(self, *, param_struct=None):
return self._self_constant | utils/matrix_utils.py | import inspect
from abc import ABCMeta, abstractmethod
from copy import deepcopy as _deepcopy, copy as _copy
import sympy as sp
import wrapt
import itertools
from utils.func_utils import get_cached_func_spec, make_function
from structdict import StructDict, OrderedStructDict
import numpy as np
from numpy.lib.stride_tricks import as_strided as _as_strided
import scipy.linalg as scl
import scipy.sparse as scs
from collections import namedtuple as NamedTuple
from utils.decorator_utils import cache_hashable_args
import functools
def is_scalar_like(val):
shape = getattr(val, 'shape', (1,))
return all([d==1 for d in shape])
def matmul(self, other):
if any(map(is_scalar_like, (self, other))):
return self * other
else:
return self @ other
def atleast_2d_col(arr, dtype=None, order=None) -> np.ndarray:
arr = np.asanyarray(arr, dtype=dtype, order=order)
if arr.ndim == 0:
result = arr.reshape(1, 1)
elif arr.ndim == 1:
result = arr[:, np.newaxis]
else:
result = arr
return result
def _atleast_3d_col(arr, dtype=None, order=None):
arr = np.asanyarray(arr, dtype=dtype, order=order)
if arr.ndim == 0:
result = arr.reshape(1, 1, 1)
elif arr.ndim == 1:
result = arr[:, np.newaxis, np.newaxis]
elif arr.ndim == 2:
result = arr[np.newaxis, :]
else:
result = arr
return result
def block_diag_dense_same_shape(mats, format=None, dtype=None):
arrs = _atleast_3d_col(mats, dtype=dtype)
k, n, m = arrs.shape
arrs = arrs.reshape(k * n, m)
vals = np.zeros(shape=(k * n, k * m), dtype=arrs.dtype)
vals[:, :m] = arrs
item_size = arrs.itemsize
shape = (k, n, k * m)
strides = ((k * n - 1) * m * item_size, k * m * item_size, item_size)
strided = np.ascontiguousarray(_as_strided(vals, shape=shape, strides=strides))
block_diag = strided.reshape(n * k, m * k)
return block_diag
def block_diag_dense(mats, format=None, dtype=None):
# scl.blockdiag is faster for large matrices or a large number of matrices.
a_mats = _atleast_3d_col(mats)
if a_mats.dtype != np.object_ and np.prod(a_mats.shape) < 720:
block_diag = block_diag_dense_same_shape(a_mats, format=format, dtype=dtype)
else:
block_diag = scl.block_diag(*a_mats)
if dtype is not None:
block_diag = block_diag.astype(dtype)
return block_diag
import timeit
def block_diag_test(a, number=1000):
def t1():
return block_diag_dense(a)
def t2():
return scl.block_diag(*a)
tt1 = timeit.timeit("t1()", globals=locals(), number=number)
print("block_diag_dense", tt1)
tt2 = timeit.timeit("t2()", globals=locals(), number=number)
print("scl.block_diag", tt2)
t1 = t1()
t2 = t2()
print("t1", t1.dtype)
print("t2", t2.dtype)
return np.array_equal(t1, t2)
def create_object_array(tup):
try:
obj_arr = np.empty(len(tup), dtype=np.object_)
except TypeError:
raise TypeError("tup must be array like.")
for ind, item in enumerate(tup):
obj_arr[ind] = item
return obj_arr
def block_toeplitz(c_tup, r_tup=None, sparse=False):
"""
Based on scipy.linalg.toeplitz method but applied in a block fashion.
"""
try:
c = np.array(c_tup)
except ValueError:
c = create_object_array(c_tup)
if r_tup is None:
if np.issubdtype(c.dtype, np.number):
r = c.conjugate()
else:
r = c
else:
try:
r = np.array(r_tup)
except ValueError:
r = create_object_array(r_tup)
c = _atleast_3d_col(c)
r = _atleast_3d_col(r)
# # Form a array containing a reversed c followed by r[1:] that could be strided to give us a toeplitz matrix.
try:
vals = np.concatenate((c[::-1], r[1:]))
except ValueError as ve:
raise ValueError("Incompatible dimensions in c_tup or between c_tup and r_tup - " + ve.args[0])
stride_shp = (c.shape[0], c.shape[1], r.shape[0], r.shape[2])
out_shp = (c.shape[0] * c.shape[1], r.shape[0] * r.shape[2])
n, m, k = vals.strides
strided = np.ascontiguousarray(_as_strided(vals[c.shape[0] - 1:], shape=stride_shp, strides=(-n, m, n, k)))
np_toeplitz = strided.reshape(out_shp)
if sparse:
if np_toeplitz.dtype != np.object_:
return scs.csr_matrix(np_toeplitz)
elif all(isinstance(block, scs.csr_matrix) for block in np_toeplitz.flat):
v_stacked = [scs.bmat(np.atleast_2d(col).T).tocsc() for col in np_toeplitz.T]
return scs.bmat(np.atleast_2d(v_stacked)).tocsr()
else:
h_stacked = [scs.bmat(np.atleast_2d(row)).tocsr() for row in np_toeplitz]
return scs.bmat(np.atleast_2d(h_stacked).T).tocsc()
else:
return np_toeplitz
def block_toeplitz_alt(c_tup, r_tup=None, sparse=False):
c = create_object_array(c_tup)
if r_tup is None:
try:
r = c.conjugate()
except AttributeError:
r = c
else:
r = create_object_array(r_tup)
# # Form a 1D array containing a reversed c followed by r[1:] that could be
# # strided to give us toeplitz matrix.
vals = np.concatenate((c[::-1], r[1:]))
out_shp = c.shape[0], r.shape[0]
n = vals.strides[0]
strided = _as_strided(vals[len(c) - 1:], shape=out_shp, strides=(-n, n))
np_toep = np.block(strided.tolist())
if sparse:
if all(isinstance(block, scs.csr_matrix) for block in np_toep.flat):
v_stacked = [scs.bmat(np.atleast_2d(col).T).tocsc() for col in np_toep.T]
return scs.bmat(np.atleast_2d(v_stacked)).tocsr()
else:
h_stacked = [scs.bmat(np.atleast_2d(row)).tocsr() for row in np_toep]
return scs.bmat(np.atleast_2d(h_stacked).T).tocsc()
else:
return np_toep
_MatOpsNames = ['package',
'linalg',
'sclinalg',
'block_diag',
'vmatrix',
'hmatrix',
'zeros',
'vstack',
'hstack',
'matmul']
_MatOpsNameTup = NamedTuple('MatOps', _MatOpsNames)
def pass_through(a):
return a
@cache_hashable_args(maxsize=2)
def get_mat_ops(sparse=False):
if sparse:
mat_ops = _MatOpsNameTup(
package=scs,
linalg=scs,
sclinalg=scs,
block_diag=scs.block_diag,
vmatrix=scs.csr_matrix,
hmatrix=scs.csc_matrix,
zeros=scs.csr_matrix,
vstack=scs.vstack,
hstack=scs.hstack,
matmul=functools.partial(matmul, sparse=True)
)
else:
mat_ops = _MatOpsNameTup(
package=np,
linalg=np.linalg,
sclinalg=scl,
block_diag=block_diag_dense,
vmatrix=np.atleast_2d,
hmatrix=np.atleast_2d,
zeros=np.zeros,
vstack=np.vstack,
hstack=np.hstack,
matmul=matmul
)
return mat_ops
def get_expr_shape(expr):
try:
expr_shape = expr.shape
except AttributeError:
pass
else:
if len(expr_shape) <= 2:
return expr_shape
else:
raise NotImplementedError("Maximum supported dimension is 2, got {}".format(len(expr_shape)))
if expr is None:
return (0, 0)
elif np.isscalar(expr) or isinstance(expr, sp.Expr):
return (1, 1)
elif callable(expr):
expr = CallableMatrix(expr)
return expr.shape
else:
raise TypeError("Invalid expression type: '{0}', for expr: '{1!s}'".format(type(expr), expr))
def get_expr_shapes(*exprs, get_max_dim=False):
if not exprs:
return None
if isinstance(exprs[0], dict):
shapes = StructDict({expr_id: get_expr_shape(expr) for expr_id, expr in exprs[0].items()})
else:
shapes = [get_expr_shape(expr) for expr in exprs]
if get_max_dim:
shapes = list(shapes.values()) if isinstance(shapes, dict) else shapes
return tuple(np.maximum.reduce(shapes))
else:
return shapes
class CallableMatrixMeta(ABCMeta):
def __new__(cls, *args, **kwargs):
kls = super(CallableMatrixMeta, cls).__new__(cls, *args, **kwargs)
mro = kls.mro()
all_slots = set(itertools.chain.from_iterable(klass.__dict__.get("__slots__", ()) for klass in mro))
all_slots.discard('__dict__')
kls._all_slots = tuple(all_slots)
return kls
def __call__(cls, *args, **kwargs):
return cls.__new__(cls, *args, **kwargs)
class CallableMatrixBase(metaclass=CallableMatrixMeta):
@staticmethod
def constant_matrix_func(constant):
def _constant_matrix_func():
return constant
_constant_matrix_func.__qualname__ = _constant_matrix_func.__name__ = 'constant_matrix_func'
return _constant_matrix_func
@classmethod
def _constructor(cls, *args, **kwargs):
self = super(CallableMatrixBase, cls).__new__(cls)
self.__init__(*args, **kwargs)
return self
def _constructor_from_self(self):
obj = super(CallableMatrixBase, type(self)).__new__(type(self))
for attr in self._all_slots:
setattr(obj, attr, getattr(self, attr))
obj.__init__(self)
return obj
def copy(self):
return self._constructor_from_self()
__copy__ = copy
def deepcopy(self, memo=None):
return self._constructor_from_self()
__deepcopy__ = deepcopy
def __new__(cls, matrix, matrix_name=None):
matrix_func = cls._process_matrix_func(matrix)
nan_call = cls._nan_call(matrix_func)
if np.all(np.isfinite(nan_call)):
return CallableMatrixConstant._constructor(matrix_func, matrix_name, _nan_call=nan_call)
else:
return CallableMatrix._constructor(matrix_func, matrix_name, _nan_call=nan_call)
@abstractmethod
def __init__(self, *args, **kwargs):
super(CallableMatrixBase, self).__init__(*args, **kwargs)
@classmethod
def _process_matrix_func(cls, matrix):
if inspect.isfunction(matrix):
func = matrix
elif inspect.ismethod(matrix):
func = matrix.__func__
elif isinstance(matrix, (sp.Expr, sp.Matrix)):
system_matrix = sp.Matrix(matrix)
param_sym_tup = cls._get_param_sym_tup(system_matrix)
func = sp.lambdify(param_sym_tup, system_matrix, modules="numpy", dummify=False)
else:
func = cls.constant_matrix_func(atleast_2d_col(matrix))
return func
@staticmethod
def _nan_call(matrix_func):
f_spec = get_cached_func_spec(matrix_func, reset_cache=True)
kwargs = {param_name: np.NaN for param_name in f_spec.all_kw_params}
args = [np.NaN] * len(f_spec.pos_only_params)
try:
ret_val = atleast_2d_col(matrix_func(*args, **kwargs))
ret_val.setflags(write=False)
return ret_val
except TypeError:
msg = f"_nan_call() failed, it is likely that the matrix function does not have a constant shape.\n"
note = (
"Note: all callable expressions must return with a constant array shape that does not depend on its "
"arguments. Shape is determined by calling the function with all arguments set to a float with value "
"NaN.")
raise TypeError(msg + note)
@staticmethod
def _get_param_sym_tup(expr):
try:
sym_dict = {str(sym): sym for sym in expr.free_symbols}
param_sym_tup = tuple([sym_dict.get(sym) for sym in sorted(sym_dict.keys())])
except AttributeError:
param_sym_tup = ()
return param_sym_tup
class CallableMatrix(CallableMatrixBase, wrapt.decorators.AdapterWrapper):
__slots__ = ('_self_matrix_name', '_self_wrapped_name', '_self_adapter_spec', '_self_shape', '_self_size',
'_self_ndim', '_self_dtype', '_self_nbytes', '_self_itemsize', '_self_is_empty', '_self_is_all_zero',
'_self_is_constant')
def __init__(self, matrix, matrix_name=None, **kwargs):
if isinstance(matrix, type(self)):
matrix_func = matrix.__wrapped__
super(CallableMatrix, self).__init__(wrapped=matrix_func, wrapper=matrix._self_wrapper, enabled=None,
adapter=matrix._self_adapter)
else:
matrix_func = self._process_matrix_func(matrix)
self._self_matrix_name = matrix_name if matrix_name is not None else matrix_func.__name__
self._self_wrapped_name = matrix_func.__name__
matrix_func.__name__ = self._self_matrix_name
matrix_func.__qualname__ = (
"".join(matrix_func.__qualname__.rsplit('.', 1)[:-1] + ['.', matrix_func.__name__]).lstrip('.'))
self._self_wrapped_f_spec = get_cached_func_spec(matrix_func)
adapter = self._gen_callable_matrix_adapter(self._self_wrapped_f_spec)
self._self_adapter_spec = get_cached_func_spec(adapter, bypass_cache=True)
wrapper = self._matrix_wrapper
super(CallableMatrix, self).__init__(wrapped=matrix_func, wrapper=wrapper, enabled=None, adapter=adapter)
_nan_call = kwargs.get('_nan_call')
nan_call = _nan_call if _nan_call is not None else self._nan_call(matrix_func)
self._self_shape = get_expr_shape(nan_call)
self._self_size = nan_call.size
self._self_ndim = nan_call.ndim
self._self_dtype = nan_call.dtype
self._self_nbytes = nan_call.nbytes
self._self_itemsize = nan_call.itemsize
self._self_is_empty = False if self._self_size else True
self._self_is_all_zero = np.all(nan_call == 0)
self._self_is_constant = np.all(np.isfinite(nan_call))
if self._self_is_constant:
if type(self) == CallableMatrix:
raise TypeError(f"Cannot initialize {type(self).__name__} object with constant matrix.")
self._self_constant = nan_call
else:
self._self_constant = None
def _matrix_wrapper(self, wrapped, instance, args, kwargs):
param_struct = kwargs.pop('param_struct', None)
if param_struct and self._self_wrapped_f_spec.all_kw_params:
try:
duplicates = set(kwargs).intersection(param_struct) if kwargs else None
kwargs.update(
{name: param_struct[name] for name in
set(self._self_wrapped_f_spec.all_kw_params).intersection(param_struct)})
except TypeError as te:
msg = f"'param_struct' must be dictionary like or None: {te.args[0]}"
raise TypeError(msg).with_traceback(te.__traceback__) from None
else:
if duplicates:
raise TypeError(
f"{wrapped.__name__}() got multiple values for argument '{duplicates.pop()}' - values in "
f"kwargs are duplicated in param_struct.")
try:
retval = wrapped(*args, **kwargs)
except TypeError as te:
msg = te.args[0].replace(self._self_wrapped_name, wrapped.__name__)
raise TypeError(msg).with_traceback(te.__traceback__) from None
if getattr(retval, 'ndim', 0) < 2:
retval = atleast_2d_col(retval)
if isinstance(retval, np.ndarray):
retval.setflags(write=False)
return retval
def _gen_callable_matrix_adapter(self, f_spec):
f_args_spec_struct = OrderedStructDict(f_spec.arg_spec._asdict()).deepcopy()
f_args_spec_struct.kwonlyargs.append('param_struct')
if f_args_spec_struct.kwonlydefaults:
f_args_spec_struct.kwonlydefaults.update({'param_struct': None})
else:
f_args_spec_struct.kwonlydefaults = {'param_struct': None}
f_args_spec = inspect.FullArgSpec(**f_args_spec_struct)
adapter = make_function(f_args_spec, name='adapter')
return adapter
def __reduce__(self):
return (type(self), (self.__wrapped__, self._self_matrix_name))
@property
def __name__(self):
return self._self_matrix_name
@property
def __class__(self):
return type(self)
@property
def _f_spec(self):
return self._self_adapter_spec
@_f_spec.setter
def _f_spec(self, f_spec):
self._self_adapter_spec = f_spec
@property
def __signature__(self):
return self._self_adapter_spec.signature
@property
def required_params(self):
return self._self_wrapped_f_spec.all_kw_params
@property
def matrix_name(self):
return self._self_matrix_name
@property
def shape(self):
return self._self_shape
@property
def size(self):
return self._self_size
@property
def ndim(self):
return self._self_ndim
@property
def dtype(self):
return self._self_dtype
@property
def nbytes(self):
return self._self_nbytes
@property
def itemsize(self):
return self._self_itemsize
@property
def is_empty(self):
return self._self_is_empty
@property
def is_all_zero(self):
return self._self_is_all_zero
@property
def is_constant(self):
return self._self_is_constant
def __repr__(self):
empty_str = f", shape={self._self_shape}" if not self._self_size else ""
return f"<{self.__class__.__name__} {self.__name__}{self.__signature__}{empty_str}>"
def __str__(self):
return self.__repr__()
def __dir__(self):
wrapped_dir = set(dir(self.__wrapped__))
added_dir = set(itertools.chain.from_iterable([kls.__dict__ for kls in type(self).mro()]))
rv = wrapped_dir | added_dir
return sorted(rv)
class CallableMatrixConstant(CallableMatrix):
def __init__(self, matrix, matrix_name=None, **kwargs):
super(CallableMatrixConstant, self).__init__(matrix, matrix_name=matrix_name, **kwargs)
if not self.is_constant:
raise TypeError(f"Cannot initialize {type(self).__name__} object with non-constant matrix.")
def __call__(self, *, param_struct=None):
return self._self_constant | 0.519034 | 0.333368 |
from problem_solving.algorithms.bit_manipulation import *
def test_q1_lonely_integer(capsys, monkeypatch):
inputs = ["1",
"1"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_lonely_integer.main()
captured = capsys.readouterr()
output = "1\n"
assert captured.out == output
inputs = ["3",
"1 1 2"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_lonely_integer.main()
captured = capsys.readouterr()
output = "2\n"
assert captured.out == output
inputs = ["5",
"0 0 1 2 1"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_lonely_integer.main()
captured = capsys.readouterr()
output = "2\n"
assert captured.out == output
def test_q2_maximizing_xor(capsys, monkeypatch):
inputs = ["10", "15"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q2_maximizing_xor.main()
captured = capsys.readouterr()
output = "7\n"
assert captured.out == output
def test_q3_counter_game():
assert "Richard" == q3_counter_game.counterGame(6)
assert "Louise" == q3_counter_game.counterGame(132)
def test_q5_sum_vs_xor():
assert 2 == q5_sum_vs_xor.sumXor(5)
assert 1 == q5_sum_vs_xor.sumXor(0)
assert 1073741824 == q5_sum_vs_xor.sumXor(1000000000000000)
assert 4 == q5_sum_vs_xor.sumXor(10)
assert 1 == q5_sum_vs_xor.sumXor(1)
def test_q7_flipping_bits(capsys, monkeypatch):
inputs = ["3",
"2147483647",
"1",
"0"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q7_flipping_bits.main()
captured = capsys.readouterr()
output = "2147483648\n4294967294\n4294967295\n"
assert captured.out == output
inputs = ["2",
"4",
"123456"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q7_flipping_bits.main()
captured = capsys.readouterr()
output = "4294967291\n4294843839\n"
assert captured.out == output
def test_q11_winning_lottery_ticket(capsys, monkeypatch):
inputs = ["5",
"129300455",
"5559948277",
"012334556",
"56789",
"123456879"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q11_winning_lottery_ticket.main()
captured = capsys.readouterr()
output = "5\n\n"
assert captured.out == output | tests/test_problem_solving_algorithms_bit_manipulation.py | from problem_solving.algorithms.bit_manipulation import *
def test_q1_lonely_integer(capsys, monkeypatch):
inputs = ["1",
"1"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_lonely_integer.main()
captured = capsys.readouterr()
output = "1\n"
assert captured.out == output
inputs = ["3",
"1 1 2"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_lonely_integer.main()
captured = capsys.readouterr()
output = "2\n"
assert captured.out == output
inputs = ["5",
"0 0 1 2 1"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_lonely_integer.main()
captured = capsys.readouterr()
output = "2\n"
assert captured.out == output
def test_q2_maximizing_xor(capsys, monkeypatch):
inputs = ["10", "15"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q2_maximizing_xor.main()
captured = capsys.readouterr()
output = "7\n"
assert captured.out == output
def test_q3_counter_game():
assert "Richard" == q3_counter_game.counterGame(6)
assert "Louise" == q3_counter_game.counterGame(132)
def test_q5_sum_vs_xor():
assert 2 == q5_sum_vs_xor.sumXor(5)
assert 1 == q5_sum_vs_xor.sumXor(0)
assert 1073741824 == q5_sum_vs_xor.sumXor(1000000000000000)
assert 4 == q5_sum_vs_xor.sumXor(10)
assert 1 == q5_sum_vs_xor.sumXor(1)
def test_q7_flipping_bits(capsys, monkeypatch):
inputs = ["3",
"2147483647",
"1",
"0"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q7_flipping_bits.main()
captured = capsys.readouterr()
output = "2147483648\n4294967294\n4294967295\n"
assert captured.out == output
inputs = ["2",
"4",
"123456"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q7_flipping_bits.main()
captured = capsys.readouterr()
output = "4294967291\n4294843839\n"
assert captured.out == output
def test_q11_winning_lottery_ticket(capsys, monkeypatch):
inputs = ["5",
"129300455",
"5559948277",
"012334556",
"56789",
"123456879"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q11_winning_lottery_ticket.main()
captured = capsys.readouterr()
output = "5\n\n"
assert captured.out == output | 0.648578 | 0.525004 |
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bdd<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
from sqlalchemy.dialects.postgresql import UUID, JSON
def upgrade():
op.create_table('observr_user',
sa.Column('id', UUID, primary_key=True),
sa.Column('username', sa.String(255), unique=True, nullable=False),
sa.Column('email', sa.String(255), unique=True, nullable=False),
sa.Column('api_key', sa.String(255), unique=True, nullable=False),
sa.Column('password', sa.String(255), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('deleted_at', sa.DateTime(timezone=True), nullable=True),
)
op.create_table('observr_project',
sa.Column('id', UUID, primary_key=True),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('url', sa.String(2048), nullable=False),
sa.Column('api_key', sa.String(255), unique=True, nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('deleted_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('user_id', UUID, sa.ForeignKey('observr_user.id', ondelete="CASCADE"), nullable=False),
)
op.create_table('observr_visit',
sa.Column('id', UUID, primary_key=True),
sa.Column('host', sa.String(255), nullable=False),
sa.Column('path', sa.String(2048), nullable=True),
sa.Column('referer', sa.String(255), nullable=True),
sa.Column('remote_addr', sa.String(255), nullable=False),
sa.Column('method', sa.String(10), nullable=False),
sa.Column('user_agent', sa.String(255), nullable=True),
sa.Column('status_code', sa.Integer, nullable=True),
sa.Column('protocol', sa.String(20), nullable=True),
sa.Column('data', JSON(), nullable=True),
sa.Column('headers', JSON(), nullable=True),
sa.Column('query_string', sa.String(255), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('project_id', UUID, sa.ForeignKey('observr_project.id', ondelete="CASCADE"), nullable=False),
)
op.create_table('observr_tag',
sa.Column('id', UUID, primary_key=True),
sa.Column('key', sa.String(255), unique=True, nullable=False),
sa.Column('value', sa.Text, nullable=False),
sa.Column('data', sa.Text, nullable=False),
sa.Column('seen_count', sa.Integer, nullable=False, server_default="0"),
sa.Column('project_id', UUID, sa.ForeignKey('observr_project.id', ondelete="CASCADE"), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('first_seen_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('last_seen_at', sa.DateTime(timezone=True), nullable=False),
)
op.create_table('observr_group_tag',
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('src_tag_id', UUID, sa.ForeignKey('observr_tag.id', ondelete="CASCADE"), nullable=False),
sa.Column('dst_tag_id', UUID, sa.ForeignKey('observr_tag.id', ondelete="CASCADE"), nullable=False),
)
op.create_table('observr_visit_tag',
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('visit_id', UUID, sa.ForeignKey('observr_visit.id', ondelete="CASCADE"), nullable=False),
sa.Column('tag_id', UUID, sa.ForeignKey('observr_tag.id', ondelete="CASCADE"), nullable=False),
)
def downgrade():
op.drop_table('observr_group_tag')
op.drop_table('observr_visit_tag')
op.drop_table('observr_tag')
op.drop_table('observr_visit')
op.drop_table('observr_project')
op.drop_table('observr_user') | alembic/versions/bdd74fcd8a5c_initial.py | from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bdd<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
from sqlalchemy.dialects.postgresql import UUID, JSON
def upgrade():
op.create_table('observr_user',
sa.Column('id', UUID, primary_key=True),
sa.Column('username', sa.String(255), unique=True, nullable=False),
sa.Column('email', sa.String(255), unique=True, nullable=False),
sa.Column('api_key', sa.String(255), unique=True, nullable=False),
sa.Column('password', sa.String(255), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('deleted_at', sa.DateTime(timezone=True), nullable=True),
)
op.create_table('observr_project',
sa.Column('id', UUID, primary_key=True),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('url', sa.String(2048), nullable=False),
sa.Column('api_key', sa.String(255), unique=True, nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('deleted_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('user_id', UUID, sa.ForeignKey('observr_user.id', ondelete="CASCADE"), nullable=False),
)
op.create_table('observr_visit',
sa.Column('id', UUID, primary_key=True),
sa.Column('host', sa.String(255), nullable=False),
sa.Column('path', sa.String(2048), nullable=True),
sa.Column('referer', sa.String(255), nullable=True),
sa.Column('remote_addr', sa.String(255), nullable=False),
sa.Column('method', sa.String(10), nullable=False),
sa.Column('user_agent', sa.String(255), nullable=True),
sa.Column('status_code', sa.Integer, nullable=True),
sa.Column('protocol', sa.String(20), nullable=True),
sa.Column('data', JSON(), nullable=True),
sa.Column('headers', JSON(), nullable=True),
sa.Column('query_string', sa.String(255), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('project_id', UUID, sa.ForeignKey('observr_project.id', ondelete="CASCADE"), nullable=False),
)
op.create_table('observr_tag',
sa.Column('id', UUID, primary_key=True),
sa.Column('key', sa.String(255), unique=True, nullable=False),
sa.Column('value', sa.Text, nullable=False),
sa.Column('data', sa.Text, nullable=False),
sa.Column('seen_count', sa.Integer, nullable=False, server_default="0"),
sa.Column('project_id', UUID, sa.ForeignKey('observr_project.id', ondelete="CASCADE"), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('first_seen_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('last_seen_at', sa.DateTime(timezone=True), nullable=False),
)
op.create_table('observr_group_tag',
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('src_tag_id', UUID, sa.ForeignKey('observr_tag.id', ondelete="CASCADE"), nullable=False),
sa.Column('dst_tag_id', UUID, sa.ForeignKey('observr_tag.id', ondelete="CASCADE"), nullable=False),
)
op.create_table('observr_visit_tag',
sa.Column('id', UUID, primary_key=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now(), nullable=False),
sa.Column('visit_id', UUID, sa.ForeignKey('observr_visit.id', ondelete="CASCADE"), nullable=False),
sa.Column('tag_id', UUID, sa.ForeignKey('observr_tag.id', ondelete="CASCADE"), nullable=False),
)
def downgrade():
op.drop_table('observr_group_tag')
op.drop_table('observr_visit_tag')
op.drop_table('observr_tag')
op.drop_table('observr_visit')
op.drop_table('observr_project')
op.drop_table('observr_user') | 0.426322 | 0.137793 |
import os
import argparse
def get_common_args():
"""
Return an ArgumentParser instance with all of the common arguments added.
This is suitable for input as the parent argument of an ArgumentParser constructor.
"""
ap = argparse.ArgumentParser('EMBERS common arguments.', add_help=False)
# Queue arguments
ap.add_argument('--service', metavar='SERVICE', type=str, default=os.environ.get('UPSTART_JOB', None), nargs='?',
required=False, help='Name of this service, for inferring the input and output queues.')
ap.add_argument('--pub', metavar='PUBLISH', type=str, nargs='+', required=False,
help='Queue names or URLs to publish on.')
ap.add_argument('--sub', metavar='SUBSCRIBE', type=str, nargs='+', required=False,
help='Queue names or URLs to subscribe to.')
ap.add_argument('--queue-conf', metavar='QUEUE_CONF', type=str, nargs='?', required=False,
help='Location of the queue configuration file.')
ap.add_argument("--log-stderr", action="store_true", default=False)
# SSH arguments
ap.add_argument('--tunnel', metavar='TUNNEL', type=str, required=False, nargs='?',
help='An SSH connection string, e.g. "embers@ec2-50-16-120-255.compute-1.amazonaws.com", to open a tunnel connection to the queue. Use with --key.')
ap.add_argument('--ssh-key', metavar='KEYFILE', type=str, required=False, nargs='?',
help='The path to an SSH-compatible key file to use as the connection credentials for --tunnel. Use with --tunnel.')
# AWS values
ap.add_argument('--aws-key', metavar='AWSKEY', type=str, default=os.environ.get('AWS_ACCESS_KEY_ID', None),
nargs='?', help='''The AWS key.''')
ap.add_argument('--aws-secret', metavar='AWSSECRET', type=str,
default=os.environ.get('AWS_SECRET_ACCESS_KEY', None), nargs='?', help='''The AWS key secret.''')
# S3 and SDB arguments
ap.add_argument('-b', '--bucket', metavar='BUCKET', type=str, nargs='?',
default=os.environ.get('S3_BUCKET', 'pythia-data'),
help='S3 data bucket to use')
ap.add_argument('--resbucket', metavar='RESOURCEBUCKET', type=str, nargs='?',
default='pythia-data',
help='S3 resource bucket to use')
ap.add_argument('-x', '--sdbsuffix', metavar='SDBSUFFIX', type=str, nargs='?',
default=os.environ.get('SDB_DOMAIN_SUFFIX', ''),
help='SDB suffix to use (added to domain name, so may want to start with a dash or underscore)')
ap.add_argument('--sdbdomain', metavar='SDBDOMAIN', type=str, nargs='?',
default='',
help='SDB domain to use (will be suffixed with SDBSUFFIX)')
# General stuff
ap.add_argument('--verbose', action="store_true", help="Write debug messages.")
ap.add_argument('--log_file', default=None, help="Location to write the log file")
ap.add_argument('--log_level', default='info', help="Logging level")
return ap
def get_parser(description=None):
"""
Create an ArgumentParser instance with the common arguments included.
See argparse.ArgumentParser.add_argument() for details on adding your own arguments.
Use parse_args() to get the argument values object. (See argparse.ArgumentParser.parse_args()).
The simplest usage, if you only use default args, is 'args = get_parser.parse_args()'
"""
return argparse.ArgumentParser(description=description, parents=[get_common_args()]) | twitter_countryGeo/twitter-geo/etool/args.py |
import os
import argparse
def get_common_args():
"""
Return an ArgumentParser instance with all of the common arguments added.
This is suitable for input as the parent argument of an ArgumentParser constructor.
"""
ap = argparse.ArgumentParser('EMBERS common arguments.', add_help=False)
# Queue arguments
ap.add_argument('--service', metavar='SERVICE', type=str, default=os.environ.get('UPSTART_JOB', None), nargs='?',
required=False, help='Name of this service, for inferring the input and output queues.')
ap.add_argument('--pub', metavar='PUBLISH', type=str, nargs='+', required=False,
help='Queue names or URLs to publish on.')
ap.add_argument('--sub', metavar='SUBSCRIBE', type=str, nargs='+', required=False,
help='Queue names or URLs to subscribe to.')
ap.add_argument('--queue-conf', metavar='QUEUE_CONF', type=str, nargs='?', required=False,
help='Location of the queue configuration file.')
ap.add_argument("--log-stderr", action="store_true", default=False)
# SSH arguments
ap.add_argument('--tunnel', metavar='TUNNEL', type=str, required=False, nargs='?',
help='An SSH connection string, e.g. "embers@ec2-50-16-120-255.compute-1.amazonaws.com", to open a tunnel connection to the queue. Use with --key.')
ap.add_argument('--ssh-key', metavar='KEYFILE', type=str, required=False, nargs='?',
help='The path to an SSH-compatible key file to use as the connection credentials for --tunnel. Use with --tunnel.')
# AWS values
ap.add_argument('--aws-key', metavar='AWSKEY', type=str, default=os.environ.get('AWS_ACCESS_KEY_ID', None),
nargs='?', help='''The AWS key.''')
ap.add_argument('--aws-secret', metavar='AWSSECRET', type=str,
default=os.environ.get('AWS_SECRET_ACCESS_KEY', None), nargs='?', help='''The AWS key secret.''')
# S3 and SDB arguments
ap.add_argument('-b', '--bucket', metavar='BUCKET', type=str, nargs='?',
default=os.environ.get('S3_BUCKET', 'pythia-data'),
help='S3 data bucket to use')
ap.add_argument('--resbucket', metavar='RESOURCEBUCKET', type=str, nargs='?',
default='pythia-data',
help='S3 resource bucket to use')
ap.add_argument('-x', '--sdbsuffix', metavar='SDBSUFFIX', type=str, nargs='?',
default=os.environ.get('SDB_DOMAIN_SUFFIX', ''),
help='SDB suffix to use (added to domain name, so may want to start with a dash or underscore)')
ap.add_argument('--sdbdomain', metavar='SDBDOMAIN', type=str, nargs='?',
default='',
help='SDB domain to use (will be suffixed with SDBSUFFIX)')
# General stuff
ap.add_argument('--verbose', action="store_true", help="Write debug messages.")
ap.add_argument('--log_file', default=None, help="Location to write the log file")
ap.add_argument('--log_level', default='info', help="Logging level")
return ap
def get_parser(description=None):
"""
Create an ArgumentParser instance with the common arguments included.
See argparse.ArgumentParser.add_argument() for details on adding your own arguments.
Use parse_args() to get the argument values object. (See argparse.ArgumentParser.parse_args()).
The simplest usage, if you only use default args, is 'args = get_parser.parse_args()'
"""
return argparse.ArgumentParser(description=description, parents=[get_common_args()]) | 0.603114 | 0.176672 |
import json
import unittest
import requests
import urbanairship as ua
from mock import Mock
from tests import TEST_KEY, TEST_SECRET
class TestExperimentsReports(unittest.TestCase):
def test_experiment_overview(self):
mock_response = requests.Response()
mock_response._content = json.dumps(
{
"app_key": TEST_KEY,
"experiment_id": "24cf2af1-9961-4f3f-b301-75505c240358",
"push_id": "bb74f63c-c1c8-4618-800d-a04478e7d28c",
"created": "2021-11-12 13:44:09",
"sends": 6,
"direct_responses": 0,
"influenced_responses": 0,
"web_clicks": 0,
"web_sessions": 0,
"variants": [
{
"id": 0,
"name": "<NAME>",
"audience_pct": 80.0,
"sends": 6,
"direct_responses": 0,
"direct_response_pct": 0.0,
"indirect_responses": 0,
"indirect_response_pct": 0.0,
}
],
"control": {
"audience_pct": 20.0,
"sends": 1,
"responses": 0,
"response_rate_pct": 0.0,
},
}
).encode("UTF-8")
ua.Airship._request = Mock()
ua.Airship._request.side_effect = [mock_response]
airship = ua.Airship(TEST_KEY, TEST_SECRET)
overview = ua.ExperimentReport(airship).get_overview(
push_id="bb74f63c-c1c8-4618-800d-a04478e7d28c"
)
self.assertEqual(overview.get("app_key"), TEST_KEY)
self.assertEqual(overview.get("sends"), 6)
self.assertEqual(type(overview.get("variants")), list)
self.assertEqual(type(overview.get("control")), dict)
def test_variant_overview(self):
mock_response = requests.Response()
mock_response._content = json.dumps(
{
"app_key": TEST_KEY,
"experiment_id": "24cf2af1-9961-4f3f-b301-75505c240358",
"push_id": "bb74f63c-c1c8-4618-800d-a04478e7d28c",
"created": "2021-11-12 13:44:09",
"variant": 0,
"variant_name": "Test A",
"sends": 6,
"direct_responses": 0,
"influenced_responses": 0,
"platforms": {
"amazon": {
"type": "devicePlatformBreakdown",
"direct_responses": 0,
"influenced_responses": 0,
"sends": 0,
},
"ios": {
"type": "devicePlatformBreakdown",
"direct_responses": 0,
"influenced_responses": 0,
"sends": 5,
},
"web": {
"type": "webPlatformBreakdown",
"direct_responses": 0,
"indirect_responses": 0,
"sends": 0,
},
"android": {
"type": "devicePlatformBreakdown",
"direct_responses": 0,
"influenced_responses": 0,
"sends": 1,
},
},
}
).encode("UTF-8")
ua.Airship._request = Mock()
ua.Airship._request.side_effect = [mock_response]
airship = ua.Airship(TEST_KEY, TEST_SECRET)
variant = ua.ExperimentReport(airship).get_variant(
push_id="bb74f63c-c1c8-4618-800d-a04478e7d28c", variant_id=0
)
self.assertEqual(variant.get("app_key"), TEST_KEY)
self.assertEqual(variant.get("variant"), 0) | tests/reports/test_experiments_reports.py | import json
import unittest
import requests
import urbanairship as ua
from mock import Mock
from tests import TEST_KEY, TEST_SECRET
class TestExperimentsReports(unittest.TestCase):
def test_experiment_overview(self):
mock_response = requests.Response()
mock_response._content = json.dumps(
{
"app_key": TEST_KEY,
"experiment_id": "24cf2af1-9961-4f3f-b301-75505c240358",
"push_id": "bb74f63c-c1c8-4618-800d-a04478e7d28c",
"created": "2021-11-12 13:44:09",
"sends": 6,
"direct_responses": 0,
"influenced_responses": 0,
"web_clicks": 0,
"web_sessions": 0,
"variants": [
{
"id": 0,
"name": "<NAME>",
"audience_pct": 80.0,
"sends": 6,
"direct_responses": 0,
"direct_response_pct": 0.0,
"indirect_responses": 0,
"indirect_response_pct": 0.0,
}
],
"control": {
"audience_pct": 20.0,
"sends": 1,
"responses": 0,
"response_rate_pct": 0.0,
},
}
).encode("UTF-8")
ua.Airship._request = Mock()
ua.Airship._request.side_effect = [mock_response]
airship = ua.Airship(TEST_KEY, TEST_SECRET)
overview = ua.ExperimentReport(airship).get_overview(
push_id="bb74f63c-c1c8-4618-800d-a04478e7d28c"
)
self.assertEqual(overview.get("app_key"), TEST_KEY)
self.assertEqual(overview.get("sends"), 6)
self.assertEqual(type(overview.get("variants")), list)
self.assertEqual(type(overview.get("control")), dict)
def test_variant_overview(self):
mock_response = requests.Response()
mock_response._content = json.dumps(
{
"app_key": TEST_KEY,
"experiment_id": "24cf2af1-9961-4f3f-b301-75505c240358",
"push_id": "bb74f63c-c1c8-4618-800d-a04478e7d28c",
"created": "2021-11-12 13:44:09",
"variant": 0,
"variant_name": "Test A",
"sends": 6,
"direct_responses": 0,
"influenced_responses": 0,
"platforms": {
"amazon": {
"type": "devicePlatformBreakdown",
"direct_responses": 0,
"influenced_responses": 0,
"sends": 0,
},
"ios": {
"type": "devicePlatformBreakdown",
"direct_responses": 0,
"influenced_responses": 0,
"sends": 5,
},
"web": {
"type": "webPlatformBreakdown",
"direct_responses": 0,
"indirect_responses": 0,
"sends": 0,
},
"android": {
"type": "devicePlatformBreakdown",
"direct_responses": 0,
"influenced_responses": 0,
"sends": 1,
},
},
}
).encode("UTF-8")
ua.Airship._request = Mock()
ua.Airship._request.side_effect = [mock_response]
airship = ua.Airship(TEST_KEY, TEST_SECRET)
variant = ua.ExperimentReport(airship).get_variant(
push_id="bb74f63c-c1c8-4618-800d-a04478e7d28c", variant_id=0
)
self.assertEqual(variant.get("app_key"), TEST_KEY)
self.assertEqual(variant.get("variant"), 0) | 0.455199 | 0.229719 |
import websockets, asyncio, json
from threading import Thread
from abc import ABC, abstractmethod
ADDRESS = '127.0.0.1'
PORT = 4202
class OscillatorClient:
def __init__(self, *,
server_address=ADDRESS,
server_port=PORT):
self.uri = f'ws://{server_address}:{server_port}'
def register(self, rule):
rule.action = 'add'
asyncio.get_event_loop().run_until_complete(self._post(rule))
def unregister(self, rule):
rule.action = 'remove'
asyncio.get_event_loop().run_until_complete(self._post(rule))
def unregister_all(self):
rule = OscillatorRule(action='remove_all', pins=[], recipe='')
asyncio.get_event_loop().run_until_complete(self._post(rule))
async def _post(self, rule):
async with websockets.connect(self.uri) as websocket:
await websocket.send(str(rule))
class Oscillator(ABC):
INFINITE = -1
def __init__(self, *, loops=INFINITE):
self._client = OscillatorClient()
self.loops = loops
self.rule = None
@property
@abstractmethod
def recipe(self):
pass
def start(self, *, pins, timestamp=None):
if not isinstance(pins, list):
raise ValueError('"pins" argument must be of type "list".')
self.rule = OscillatorRule(pins=pins,
recipe=self.recipe(),
loops=self.loops,
timestamp=timestamp)
self._client.register(self.rule)
def stop(self):
if hasattr(self, 'rule') and self.rule is not None:
self._client.unregister(self.rule)
def __iter__(self):
return iter({
'rule': dict(self.rule),
'loops': self.loops,
}.items())
def __str__(self):
return f'Oscillator(rule={str(self.rule)}, loops={self.loops})'
class OscillatorRule:
def __init__(self, *,
pins,
recipe,
action='',
loops=Oscillator.INFINITE,
timestamp=None):
self.action = action
self.loops = loops
self.pins = pins
self.recipe = recipe
self.timestamp = timestamp
def __str__(self):
return (f'OscillatorRule('
f'action={self.action}, '
f'loops={self.loops}, '
f'pins={self.pins}, '
f'recipe={self.recipe}, '
f'timestamp={self.timestamp}'
f')')
def __iter__(self):
return iter({
'action': self.action,
'loops': self.loops,
'pins': self.pins,
'recipe': self.recipe,
'timestamp': self.timestamp \
if self.timestamp is not None \
else 0,
}.items())
class InvalidRecipeError(RuntimeError):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Flash(Oscillator):
def __init__(self, *,
on_time=500,
off_time=500,
loops=Oscillator.INFINITE):
super().__init__(loops=loops)
self.on_time = on_time
self.off_time = off_time
def recipe(self):
return (f'T {self.on_time} '
f'T {self.off_time}')
class DoubleFlash(Oscillator):
def __init__(self, *,
burst_time=50,
off_time=500,
loops=Oscillator.INFINITE):
super().__init__(loops=loops)
self.burst_time = burst_time
self.off_time = off_time
def recipe(self):
return (f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.off_time}')
class TripleFlash(Oscillator):
def __init__(self, *,
burst_time=50,
off_time=500,
loops=Oscillator.INFINITE):
super().__init__(loops=loops)
self.burst_time = burst_time
self.off_time = off_time
def recipe(self):
return (f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.off_time}')
class AlwaysOn(Oscillator):
def __init__(self):
super().__init__()
def recipe(self):
return 'T'
class AlwaysOff(Oscillator):
def __init__(self):
super().__init__()
def recipe(self):
return ''
class Inline(Oscillator):
def __init__(self,
*recipe,
loops=Oscillator.INFINITE):
super().__init__(loops=1 \
if len(recipe) is 1 and isinstance(recipe[0], int) \
else Oscillator.INFINITE)
if len(recipe) is 1 and isinstance(recipe[0], str):
self._recipe = recipe
elif len(recipe) is 1 and isinstance(recipe[0], int):
self._recipe = f'T {recipe[0]} T'
elif len(recipe) > 0:
recipe = recipe[0] \
if len(recipe) is 1 \
and isinstance(recipe[0], list) \
else recipe
self._recipe = ''
for symbol in recipe:
if isinstance(symbol, int):
self._recipe += 'T ' + str(symbol)
else:
raise InvalidRecipeError(f'Invalid argument: {symbol}')
def recipe(self):
return self._recipe
class Sos(Oscillator):
def __init__(self, *,
dot_time=100,
dash_time=200,
character_delay=30,
repeat_delay=800,
loops=1):
super().__init__(loops=loops)
self.dot_time = dot_time
self.dash_time = dash_time
self.character_delay = character_delay
self.repeat_delay = repeat_delay
def recipe(self):
return (f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dash_time} T {self.character_delay} '
f'T {self.dash_time} T {self.character_delay} '
f'T {self.dash_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.repeat_delay}')
class MorseCode(Oscillator):
DICT = {
'A':'.-', 'B':'-...',
'C':'-.-.', 'D':'-..', 'E':'.',
'F':'..-.', 'G':'--.', 'H':'....',
'I':'..', 'J':'.---', 'K':'-.-',
'L':'.-..', 'M':'--', 'N':'-.',
'O':'---', 'P':'.--.', 'Q':'--.-',
'R':'.-.', 'S':'...', 'T':'-',
'U':'..-', 'V':'...-', 'W':'.--',
'X':'-..-', 'Y':'-.--', 'Z':'--..',
'1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.',
'0':'-----', ', ':'--..--', '.':'.-.-.-',
'?':'..--..', '/':'-..-.', '-':'-....-',
'(':'-.--.', ')':'-.--.-'
}
def __init__(self,
message,
*,
dot_time=100,
dash_time=200,
character_delay=30,
word_delay=60,
repeat_delay=800,
loops=1):
super().__init__(loops=loops)
self._cipher = ''
self._recipe = ''
message = message.upper()
for letter in message:
if letter != ' ':
self._cipher += MorseCode.DICT[letter] + ' '
else:
self._cipher += ' '
for character in self._cipher:
if character == '.':
self._recipe += f'T {dot_time} '
elif character == '-':
self._recipe += f'T {dash_time} '
elif character != ' ':
raise RuntimeError(f'Invalid Morse code character: {character}')
if character == ' ':
self._recipe += f'T {word_delay} '
else:
self._recipe += f'T {character_delay} '
if self._cipher != '':
self._recipe += f'T {repeat_delay}'
def recipe(self):
return self._recipe
class Monostable(Oscillator):
def __init__(self, *,
on_time=3000):
super().__init__(loops=1)
self.on_time = on_time
def recipe(self):
return f'T {self.on_time} T' | dubois/oscillators.py | import websockets, asyncio, json
from threading import Thread
from abc import ABC, abstractmethod
ADDRESS = '127.0.0.1'
PORT = 4202
class OscillatorClient:
def __init__(self, *,
server_address=ADDRESS,
server_port=PORT):
self.uri = f'ws://{server_address}:{server_port}'
def register(self, rule):
rule.action = 'add'
asyncio.get_event_loop().run_until_complete(self._post(rule))
def unregister(self, rule):
rule.action = 'remove'
asyncio.get_event_loop().run_until_complete(self._post(rule))
def unregister_all(self):
rule = OscillatorRule(action='remove_all', pins=[], recipe='')
asyncio.get_event_loop().run_until_complete(self._post(rule))
async def _post(self, rule):
async with websockets.connect(self.uri) as websocket:
await websocket.send(str(rule))
class Oscillator(ABC):
INFINITE = -1
def __init__(self, *, loops=INFINITE):
self._client = OscillatorClient()
self.loops = loops
self.rule = None
@property
@abstractmethod
def recipe(self):
pass
def start(self, *, pins, timestamp=None):
if not isinstance(pins, list):
raise ValueError('"pins" argument must be of type "list".')
self.rule = OscillatorRule(pins=pins,
recipe=self.recipe(),
loops=self.loops,
timestamp=timestamp)
self._client.register(self.rule)
def stop(self):
if hasattr(self, 'rule') and self.rule is not None:
self._client.unregister(self.rule)
def __iter__(self):
return iter({
'rule': dict(self.rule),
'loops': self.loops,
}.items())
def __str__(self):
return f'Oscillator(rule={str(self.rule)}, loops={self.loops})'
class OscillatorRule:
def __init__(self, *,
pins,
recipe,
action='',
loops=Oscillator.INFINITE,
timestamp=None):
self.action = action
self.loops = loops
self.pins = pins
self.recipe = recipe
self.timestamp = timestamp
def __str__(self):
return (f'OscillatorRule('
f'action={self.action}, '
f'loops={self.loops}, '
f'pins={self.pins}, '
f'recipe={self.recipe}, '
f'timestamp={self.timestamp}'
f')')
def __iter__(self):
return iter({
'action': self.action,
'loops': self.loops,
'pins': self.pins,
'recipe': self.recipe,
'timestamp': self.timestamp \
if self.timestamp is not None \
else 0,
}.items())
class InvalidRecipeError(RuntimeError):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Flash(Oscillator):
def __init__(self, *,
on_time=500,
off_time=500,
loops=Oscillator.INFINITE):
super().__init__(loops=loops)
self.on_time = on_time
self.off_time = off_time
def recipe(self):
return (f'T {self.on_time} '
f'T {self.off_time}')
class DoubleFlash(Oscillator):
def __init__(self, *,
burst_time=50,
off_time=500,
loops=Oscillator.INFINITE):
super().__init__(loops=loops)
self.burst_time = burst_time
self.off_time = off_time
def recipe(self):
return (f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.off_time}')
class TripleFlash(Oscillator):
def __init__(self, *,
burst_time=50,
off_time=500,
loops=Oscillator.INFINITE):
super().__init__(loops=loops)
self.burst_time = burst_time
self.off_time = off_time
def recipe(self):
return (f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.burst_time} '
f'T {self.off_time}')
class AlwaysOn(Oscillator):
def __init__(self):
super().__init__()
def recipe(self):
return 'T'
class AlwaysOff(Oscillator):
def __init__(self):
super().__init__()
def recipe(self):
return ''
class Inline(Oscillator):
def __init__(self,
*recipe,
loops=Oscillator.INFINITE):
super().__init__(loops=1 \
if len(recipe) is 1 and isinstance(recipe[0], int) \
else Oscillator.INFINITE)
if len(recipe) is 1 and isinstance(recipe[0], str):
self._recipe = recipe
elif len(recipe) is 1 and isinstance(recipe[0], int):
self._recipe = f'T {recipe[0]} T'
elif len(recipe) > 0:
recipe = recipe[0] \
if len(recipe) is 1 \
and isinstance(recipe[0], list) \
else recipe
self._recipe = ''
for symbol in recipe:
if isinstance(symbol, int):
self._recipe += 'T ' + str(symbol)
else:
raise InvalidRecipeError(f'Invalid argument: {symbol}')
def recipe(self):
return self._recipe
class Sos(Oscillator):
def __init__(self, *,
dot_time=100,
dash_time=200,
character_delay=30,
repeat_delay=800,
loops=1):
super().__init__(loops=loops)
self.dot_time = dot_time
self.dash_time = dash_time
self.character_delay = character_delay
self.repeat_delay = repeat_delay
def recipe(self):
return (f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dash_time} T {self.character_delay} '
f'T {self.dash_time} T {self.character_delay} '
f'T {self.dash_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.character_delay} '
f'T {self.dot_time} T {self.repeat_delay}')
class MorseCode(Oscillator):
DICT = {
'A':'.-', 'B':'-...',
'C':'-.-.', 'D':'-..', 'E':'.',
'F':'..-.', 'G':'--.', 'H':'....',
'I':'..', 'J':'.---', 'K':'-.-',
'L':'.-..', 'M':'--', 'N':'-.',
'O':'---', 'P':'.--.', 'Q':'--.-',
'R':'.-.', 'S':'...', 'T':'-',
'U':'..-', 'V':'...-', 'W':'.--',
'X':'-..-', 'Y':'-.--', 'Z':'--..',
'1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.',
'0':'-----', ', ':'--..--', '.':'.-.-.-',
'?':'..--..', '/':'-..-.', '-':'-....-',
'(':'-.--.', ')':'-.--.-'
}
def __init__(self,
message,
*,
dot_time=100,
dash_time=200,
character_delay=30,
word_delay=60,
repeat_delay=800,
loops=1):
super().__init__(loops=loops)
self._cipher = ''
self._recipe = ''
message = message.upper()
for letter in message:
if letter != ' ':
self._cipher += MorseCode.DICT[letter] + ' '
else:
self._cipher += ' '
for character in self._cipher:
if character == '.':
self._recipe += f'T {dot_time} '
elif character == '-':
self._recipe += f'T {dash_time} '
elif character != ' ':
raise RuntimeError(f'Invalid Morse code character: {character}')
if character == ' ':
self._recipe += f'T {word_delay} '
else:
self._recipe += f'T {character_delay} '
if self._cipher != '':
self._recipe += f'T {repeat_delay}'
def recipe(self):
return self._recipe
class Monostable(Oscillator):
def __init__(self, *,
on_time=3000):
super().__init__(loops=1)
self.on_time = on_time
def recipe(self):
return f'T {self.on_time} T' | 0.71602 | 0.099383 |
from json import load
from logging import getLogger
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List
from kontrolilo.base_checker import BaseLicenseChecker
from kontrolilo.shared_main import shared_main
from kontrolilo.configuration import Configuration
from kontrolilo.configuration.package import Package
logger = getLogger(__name__)
class GradleLicenseChecker(BaseLicenseChecker):
INIT_SCRIPT = '''
initscript {
repositories {
repositories {
jcenter()
google()
}
dependencies {
classpath 'com.jaredsburrows:gradle-license-plugin:0.8.90'
}
}
}
allprojects {
apply plugin: com.jaredsburrows.license.LicensePlugin
licenseReport {
generateCsvReport = false
generateHtmlReport = false
generateJsonReport = true
}
}
'''
def __init__(self) -> None:
super().__init__()
self.init_script = NamedTemporaryFile(prefix='init.gradle')
with open(self.init_script.name, 'w') as init_script_file:
init_script_file.write(self.INIT_SCRIPT)
def prepare_directory(self, directory: str):
pass
def get_license_checker_command(self, directory: str) -> str:
wrapper_path = Path(directory, 'gradlew')
binary = 'gradlew'
if wrapper_path.exists():
binary = wrapper_path.absolute()
return f'{binary} -I {self.init_script.name} licenseReport'
def parse_packages(self, output: str, configuration: Configuration, directory: str) -> List[Package]:
packages = []
licenses_file_path = Path(directory, 'build', 'reports', 'licenses', 'licenseReport.json')
logger.debug('Loading license data from [%s]', licenses_file_path.absolute())
with open(licenses_file_path.absolute()) as license_file:
dependencies = load(license_file)
for dependency in dependencies:
artifact_name = dependency['dependency']
version = ''
index = artifact_name.rfind(':')
if index > -1:
version = artifact_name[index + 1:]
artifact_name = artifact_name[:index]
license_names = []
for license_entry in dependency['licenses']:
license_names.append(license_entry['license'])
license_names.sort()
packages.append(Package(artifact_name, version, ';'.join(license_names)))
logger.debug('Found %s packages in license file.', len(packages))
return packages
def main():
shared_main(GradleLicenseChecker())
if __name__ == '__main__':
main() | src/kontrolilo/gradle.py |
from json import load
from logging import getLogger
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List
from kontrolilo.base_checker import BaseLicenseChecker
from kontrolilo.shared_main import shared_main
from kontrolilo.configuration import Configuration
from kontrolilo.configuration.package import Package
logger = getLogger(__name__)
class GradleLicenseChecker(BaseLicenseChecker):
INIT_SCRIPT = '''
initscript {
repositories {
repositories {
jcenter()
google()
}
dependencies {
classpath 'com.jaredsburrows:gradle-license-plugin:0.8.90'
}
}
}
allprojects {
apply plugin: com.jaredsburrows.license.LicensePlugin
licenseReport {
generateCsvReport = false
generateHtmlReport = false
generateJsonReport = true
}
}
'''
def __init__(self) -> None:
super().__init__()
self.init_script = NamedTemporaryFile(prefix='init.gradle')
with open(self.init_script.name, 'w') as init_script_file:
init_script_file.write(self.INIT_SCRIPT)
def prepare_directory(self, directory: str):
pass
def get_license_checker_command(self, directory: str) -> str:
wrapper_path = Path(directory, 'gradlew')
binary = 'gradlew'
if wrapper_path.exists():
binary = wrapper_path.absolute()
return f'{binary} -I {self.init_script.name} licenseReport'
def parse_packages(self, output: str, configuration: Configuration, directory: str) -> List[Package]:
packages = []
licenses_file_path = Path(directory, 'build', 'reports', 'licenses', 'licenseReport.json')
logger.debug('Loading license data from [%s]', licenses_file_path.absolute())
with open(licenses_file_path.absolute()) as license_file:
dependencies = load(license_file)
for dependency in dependencies:
artifact_name = dependency['dependency']
version = ''
index = artifact_name.rfind(':')
if index > -1:
version = artifact_name[index + 1:]
artifact_name = artifact_name[:index]
license_names = []
for license_entry in dependency['licenses']:
license_names.append(license_entry['license'])
license_names.sort()
packages.append(Package(artifact_name, version, ';'.join(license_names)))
logger.debug('Found %s packages in license file.', len(packages))
return packages
def main():
shared_main(GradleLicenseChecker())
if __name__ == '__main__':
main() | 0.562177 | 0.095265 |
from datetime import date, datetime
from dateutil.tz import tzoffset
from django.contrib.auth import login
from django.contrib.auth.forms import AuthenticationForm
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseNotFound, HttpResponseBadRequest
from django.http import JsonResponse
from django.shortcuts import render, redirect
from travel_planner.models import Trip
def home(request):
if request.user.is_anonymous:
return render(
request,
"homepage.html"
)
else:
if request.session.get("time_zone"):
time_zone = request.session.get("time_zone") * 60 * 60
today = datetime.now(tz=tzoffset("user", time_zone)).date()
else:
today = date.today()
return render(
request,
"homepage.html", {
"user": request.user,
"current_trips": Trip.objects.filter(
owner=request.user,
start_date__lte=today,
end_date__gte=today
).order_by("start_date", "-id"),
"upcoming_trips": Trip.objects.filter(
owner=request.user,
start_date__gt=today
).order_by("start_date", "-id"),
"past_trips": Trip.objects.filter(
owner=request.user,
end_date__lt=today
).order_by("start_date", "-id"),
"current_day": today
}
)
def convert_date(date_string):
if date_string:
return datetime.strptime(date_string, "%b. %d, %Y")
def save_trip(request):
trip_id = request.POST.get("trip-id")
trip = Trip.objects.get(id=trip_id)
if not trip:
return HttpResponseNotFound('<h1>Trip not found</h1>')
if trip.owner != request.user:
raise PermissionDenied
if request.POST.get("delete"):
return remove_trip(request, trip)
trip.destination = request.POST.get("destination")
trip.start_date = convert_date(request.POST.get("start_date"))
if not request.POST.get("end_date"):
trip.end_date = trip.start_date
else:
trip.end_date = convert_date(request.POST.get("end_date"))
trip.comment = request.POST.get("comment")
trip.save()
return redirect('home')
def add_trip(request):
if request.user.is_anonymous:
return HttpResponseBadRequest('<h1>Must be logged in</h1>')
trip = Trip()
trip.owner = request.user
trip.start_date = datetime.now()
trip.end_date = datetime.now()
trip.save()
return redirect('home')
def remove_trip(request, trip):
trip.delete()
return redirect('home')
def ajax_login(request):
if request.POST.get("time_zone"):
request.session['time_zone'] = int(request.POST.get("time_zone"))
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
login(request, form.get_user())
return JsonResponse({
"OK": True,
"admin": form.get_user().is_staff
})
else:
return HttpResponseBadRequest("Incorrect username or password")
def ajax_change_password(request):
old_password = request.POST.get("old_password")
new_password = request.POST.get("new_password")
confirm_password = request.POST.get("confirm_password")
if not request.user.check_password(old_password):
return HttpResponseBadRequest('Original password not correct')
elif not new_password:
return HttpResponseBadRequest('New password must not be blank')
elif new_password != confirm_password:
return HttpResponseBadRequest("New password doesn't match")
else:
request.user.set_password(new_password)
request.user.save()
login(request, request.user)
return JsonResponse({"OK": True}) | travel_planner/views.py | from datetime import date, datetime
from dateutil.tz import tzoffset
from django.contrib.auth import login
from django.contrib.auth.forms import AuthenticationForm
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseNotFound, HttpResponseBadRequest
from django.http import JsonResponse
from django.shortcuts import render, redirect
from travel_planner.models import Trip
def home(request):
if request.user.is_anonymous:
return render(
request,
"homepage.html"
)
else:
if request.session.get("time_zone"):
time_zone = request.session.get("time_zone") * 60 * 60
today = datetime.now(tz=tzoffset("user", time_zone)).date()
else:
today = date.today()
return render(
request,
"homepage.html", {
"user": request.user,
"current_trips": Trip.objects.filter(
owner=request.user,
start_date__lte=today,
end_date__gte=today
).order_by("start_date", "-id"),
"upcoming_trips": Trip.objects.filter(
owner=request.user,
start_date__gt=today
).order_by("start_date", "-id"),
"past_trips": Trip.objects.filter(
owner=request.user,
end_date__lt=today
).order_by("start_date", "-id"),
"current_day": today
}
)
def convert_date(date_string):
if date_string:
return datetime.strptime(date_string, "%b. %d, %Y")
def save_trip(request):
trip_id = request.POST.get("trip-id")
trip = Trip.objects.get(id=trip_id)
if not trip:
return HttpResponseNotFound('<h1>Trip not found</h1>')
if trip.owner != request.user:
raise PermissionDenied
if request.POST.get("delete"):
return remove_trip(request, trip)
trip.destination = request.POST.get("destination")
trip.start_date = convert_date(request.POST.get("start_date"))
if not request.POST.get("end_date"):
trip.end_date = trip.start_date
else:
trip.end_date = convert_date(request.POST.get("end_date"))
trip.comment = request.POST.get("comment")
trip.save()
return redirect('home')
def add_trip(request):
if request.user.is_anonymous:
return HttpResponseBadRequest('<h1>Must be logged in</h1>')
trip = Trip()
trip.owner = request.user
trip.start_date = datetime.now()
trip.end_date = datetime.now()
trip.save()
return redirect('home')
def remove_trip(request, trip):
trip.delete()
return redirect('home')
def ajax_login(request):
if request.POST.get("time_zone"):
request.session['time_zone'] = int(request.POST.get("time_zone"))
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
login(request, form.get_user())
return JsonResponse({
"OK": True,
"admin": form.get_user().is_staff
})
else:
return HttpResponseBadRequest("Incorrect username or password")
def ajax_change_password(request):
old_password = request.POST.get("old_password")
new_password = request.POST.get("new_password")
confirm_password = request.POST.get("confirm_password")
if not request.user.check_password(old_password):
return HttpResponseBadRequest('Original password not correct')
elif not new_password:
return HttpResponseBadRequest('New password must not be blank')
elif new_password != confirm_password:
return HttpResponseBadRequest("New password doesn't match")
else:
request.user.set_password(new_password)
request.user.save()
login(request, request.user)
return JsonResponse({"OK": True}) | 0.377885 | 0.073198 |
import importlib
import logging
import os
from contextlib import contextmanager
import yadageschemas
from .steering_object import YadageSteering
from .strategies import get_strategy
log = logging.getLogger(__name__)
def run_workflow(*args, **kwargs):
"""
convenience function around steering context, when no additional settings
are desired.
"""
with steering_ctx(*args, **kwargs):
pass
def execute_steering(
steering_object,
updateinterval=0.02,
loginterval=30,
default_trackers=True,
strategy=None,
strategyopts=None,
backend=None,
cache=None,
):
ys = steering_object
ys.adage_argument(
default_trackers=default_trackers,
trackevery=loginterval,
update_interval=updateinterval,
recursive_updates=True,
)
if cache:
if cache == "checksums":
backend.enable_cache(
":".join([cache, os.path.join(ys.metadir, "cache.json")])
)
else:
backend.enable_cache(cache)
custom_tracker = os.environ.get("YADAGE_CUSTOM_TRACKER", None)
if custom_tracker:
modulename, trackerclassname = custom_tracker.split(":")
module = importlib.import_module(modulename)
trackerclass = getattr(module, trackerclassname)
ys.adage_argument(additional_trackers=[trackerclass()])
if strategy is not None:
ys.adage_argument(**get_strategy(strategy, strategyopts))
ys.run_adage(backend)
@contextmanager
def steering_ctx(
dataarg,
workflow=None,
initdata=None,
toplevel=os.getcwd(),
backend=None,
controller="frommodel",
ctrlopts=None,
workflow_json=None,
cache=None,
dataopts=None,
updateinterval=0.02,
loginterval=30,
schemadir=yadageschemas.schemadir,
metadir=None,
strategy=None,
strategyopts=None,
validate=True,
visualize=True,
wflowopts=None,
accept_metadir=False,
modelsetup="inmem",
modelopts=None,
):
ys = YadageSteering.create(
metadir=metadir,
accept_metadir=True if (accept_metadir or cache) else False,
dataarg=dataarg,
dataopts=dataopts,
wflowopts=wflowopts,
workflow_json=workflow_json,
workflow=workflow,
toplevel=toplevel,
schemadir=schemadir,
validate=validate,
initdata=initdata,
modelsetup=modelsetup,
modelopts=modelopts,
controller=controller,
ctrlopts=ctrlopts,
)
yield ys
try:
execute_steering(
steering_object=ys,
updateinterval=updateinterval,
loginterval=loginterval,
default_trackers=visualize,
strategy=strategy,
strategyopts=strategyopts,
backend=backend,
cache=cache,
)
finally:
log.info("done. dumping workflow to disk.")
ys.serialize()
if visualize:
log.info("visualizing workflow.")
ys.visualize() | yadage/steering_api.py | import importlib
import logging
import os
from contextlib import contextmanager
import yadageschemas
from .steering_object import YadageSteering
from .strategies import get_strategy
log = logging.getLogger(__name__)
def run_workflow(*args, **kwargs):
"""
convenience function around steering context, when no additional settings
are desired.
"""
with steering_ctx(*args, **kwargs):
pass
def execute_steering(
steering_object,
updateinterval=0.02,
loginterval=30,
default_trackers=True,
strategy=None,
strategyopts=None,
backend=None,
cache=None,
):
ys = steering_object
ys.adage_argument(
default_trackers=default_trackers,
trackevery=loginterval,
update_interval=updateinterval,
recursive_updates=True,
)
if cache:
if cache == "checksums":
backend.enable_cache(
":".join([cache, os.path.join(ys.metadir, "cache.json")])
)
else:
backend.enable_cache(cache)
custom_tracker = os.environ.get("YADAGE_CUSTOM_TRACKER", None)
if custom_tracker:
modulename, trackerclassname = custom_tracker.split(":")
module = importlib.import_module(modulename)
trackerclass = getattr(module, trackerclassname)
ys.adage_argument(additional_trackers=[trackerclass()])
if strategy is not None:
ys.adage_argument(**get_strategy(strategy, strategyopts))
ys.run_adage(backend)
@contextmanager
def steering_ctx(
dataarg,
workflow=None,
initdata=None,
toplevel=os.getcwd(),
backend=None,
controller="frommodel",
ctrlopts=None,
workflow_json=None,
cache=None,
dataopts=None,
updateinterval=0.02,
loginterval=30,
schemadir=yadageschemas.schemadir,
metadir=None,
strategy=None,
strategyopts=None,
validate=True,
visualize=True,
wflowopts=None,
accept_metadir=False,
modelsetup="inmem",
modelopts=None,
):
ys = YadageSteering.create(
metadir=metadir,
accept_metadir=True if (accept_metadir or cache) else False,
dataarg=dataarg,
dataopts=dataopts,
wflowopts=wflowopts,
workflow_json=workflow_json,
workflow=workflow,
toplevel=toplevel,
schemadir=schemadir,
validate=validate,
initdata=initdata,
modelsetup=modelsetup,
modelopts=modelopts,
controller=controller,
ctrlopts=ctrlopts,
)
yield ys
try:
execute_steering(
steering_object=ys,
updateinterval=updateinterval,
loginterval=loginterval,
default_trackers=visualize,
strategy=strategy,
strategyopts=strategyopts,
backend=backend,
cache=cache,
)
finally:
log.info("done. dumping workflow to disk.")
ys.serialize()
if visualize:
log.info("visualizing workflow.")
ys.visualize() | 0.448426 | 0.080177 |
from collections import namedtuple
import cv2
# ========================================
# Color Stuff
Color = namedtuple('Color', ['r', 'g', 'b'])
GREEN = Color(0, 255, 0)
RED = Color(0, 0, 255)
BLUE = Color(255, 75, 0)
WHITE = Color(255, 255, 255)
BLACK = Color(0, 0, 0)
YELLOW = Color(0, 255, 255)
TEAL = Color(255, 255, 0)
PINK = Color(255, 0, 255)
ORANGE = Color(0, 130, 255)
GRAY1 = Color(20, 20, 20)
GRAY2 = Color(50, 50, 50)
GRAY4 = Color(200, 200, 200)
font = cv2.FONT_HERSHEY_SIMPLEX
elapsedTime = 666
# HUD functions
def draw_HUD_FPS(frame, fps: int=0) -> None:
if fps is not 0:
cv2.putText(frame, "FPS", (5, 15), font, 0.25, GRAY4, 1)
cv2.putText(frame, str(fps), (25, 15), font, 0.5, GRAY4, 1)
def draw_HUD_elapsedTime(frame) -> None:
if elapsedTime is not 0:
cv2.putText(frame, "Elapsed", (530, 20), font, 0.25, GRAY2, 1)
cv2.putText(frame, str(elapsedTime), (530, 35), font, 0.5, GRAY2, 1)
def draw_HUD_elapsedGameTime(frame) -> None:
if elapsedTime is not 0:
cv2.putText(frame, "Game", (410, 20), font, 0.25, GRAY2, 1)
cv2.putText(frame, str(elapsedTime), (410, 35), font, 0.5, GREEN, 1)
def draw_HUD_HomeAway(frame) -> None:
if FifaFlags.HomeAway == 1:
cv2.putText(frame, "Home", (275, 25), font, 0.5, GREEN, 1)
elif FifaFlags.HomeAway == 2:
cv2.putText(frame, "Away", (275, 25), font, 0.5, GREEN, 1)
def draw_HUD_DefendingSide(frame):
# # Display the detected game state
# cv2.putText(frame, "Game State", (10, 435), font, 0.5, GRAY2, 1)
# cv2.putText(frame, FifaFlags.gameStates[FifaFlags.State], (10, 470), font, 1, TEAL, 2)
# # Defense
# if FifaFlags.Defending == 1:
# cv2.putText(frame, "Defend Left", (275, 50), font, 0.5, GREEN, 1)
# elif FifaFlags.Defending == 2:
# cv2.putText(frame, "Defend Right", (275, 50), font, 0.5, GREEN, 1)
return
# # ===========================================================================
# # Controller
# # ===========================================================================
def draw_HUD_controller(frame, press:str=None) -> None:
# A
if press == 'a':
cv2.putText(frame, "A", (480, 470), font, 0.5, GREEN, 2)
cv2.circle(frame, (485, 465), 9, GREEN, 2)
else:
cv2.putText(frame, "A", (480, 470), font, 0.5, GRAY2, 2)
cv2.circle(frame, (485, 465), 9, GRAY2, 1)
# B
if press == 'b':
cv2.putText(frame, "B", (495, 455), font, 0.5, RED, 2)
cv2.circle(frame, (500, 450), 9, RED, 2)
else:
cv2.putText(frame, "B", (495, 455), font, 0.5, GRAY2, 2)
cv2.circle(frame, (500, 450), 9, GRAY2, 1)
# X
if press == 'x':
cv2.putText(frame, "X", (465, 455), font, 0.5, BLUE, 2)
cv2.circle(frame, (470, 450), 9, BLUE, 2)
else:
cv2.putText(frame, "X", (465, 455), font, 0.5, GRAY2, 2)
cv2.circle(frame, (470, 450), 9, GRAY2, 1)
# Y
if press == 'y':
cv2.putText(frame, "Y", (480, 440), font, 0.5, YELLOW, 2)
cv2.circle(frame, (485, 435), 9, YELLOW, 1)
else:
cv2.putText(frame, "Y", (480, 440), font, 0.5, GRAY2, 2)
cv2.circle(frame, (485, 435), 9, GRAY2, 1)
cv2.putText(frame, "Xbox", (270, 435), font, 0.5, GRAY2, 1)
# # D-Pad Display
# D Up
if press == '8':
cv2.rectangle(frame, (390, 440), (380, 450), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 440), (380, 450), GRAY2, 1)
# D L
if press == '4':
cv2.rectangle(frame, (370, 450), (380, 460), YELLOW, 1)
else:
cv2.rectangle(frame, (370, 450), (380, 460), GRAY2, 1)
# D Dn
if press == '2':
cv2.rectangle(frame, (390, 460), (380, 470), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 460), (380, 470), GRAY2, 1)
# D R
if press == '6':
cv2.rectangle(frame, (390, 450), (400, 460), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 450), (400, 460), GRAY2, 1)
# LS Display
cv2.circle(frame, (350, 440), 1, YELLOW, 1)
cv2.circle(frame, (350, 440), 15, GRAY2, 1)
# RS Display
cv2.circle(frame, (440, 460), 1, YELLOW, 1)
cv2.circle(frame, (440, 460), 15, GRAY2, 1)
cv2.putText(frame, "Select", (270, 475), font, 0.5, GRAY2, 1)
if press == '3':
cv2.putText(frame, "Start", (270, 455), font, 0.5, YELLOW, 1)
else:
cv2.putText(frame, "Start", (270, 455), font, 0.5, GRAY2, 1) | xcv/WIP/hud.py | from collections import namedtuple
import cv2
# ========================================
# Color Stuff
Color = namedtuple('Color', ['r', 'g', 'b'])
GREEN = Color(0, 255, 0)
RED = Color(0, 0, 255)
BLUE = Color(255, 75, 0)
WHITE = Color(255, 255, 255)
BLACK = Color(0, 0, 0)
YELLOW = Color(0, 255, 255)
TEAL = Color(255, 255, 0)
PINK = Color(255, 0, 255)
ORANGE = Color(0, 130, 255)
GRAY1 = Color(20, 20, 20)
GRAY2 = Color(50, 50, 50)
GRAY4 = Color(200, 200, 200)
font = cv2.FONT_HERSHEY_SIMPLEX
elapsedTime = 666
# HUD functions
def draw_HUD_FPS(frame, fps: int=0) -> None:
if fps is not 0:
cv2.putText(frame, "FPS", (5, 15), font, 0.25, GRAY4, 1)
cv2.putText(frame, str(fps), (25, 15), font, 0.5, GRAY4, 1)
def draw_HUD_elapsedTime(frame) -> None:
if elapsedTime is not 0:
cv2.putText(frame, "Elapsed", (530, 20), font, 0.25, GRAY2, 1)
cv2.putText(frame, str(elapsedTime), (530, 35), font, 0.5, GRAY2, 1)
def draw_HUD_elapsedGameTime(frame) -> None:
if elapsedTime is not 0:
cv2.putText(frame, "Game", (410, 20), font, 0.25, GRAY2, 1)
cv2.putText(frame, str(elapsedTime), (410, 35), font, 0.5, GREEN, 1)
def draw_HUD_HomeAway(frame) -> None:
if FifaFlags.HomeAway == 1:
cv2.putText(frame, "Home", (275, 25), font, 0.5, GREEN, 1)
elif FifaFlags.HomeAway == 2:
cv2.putText(frame, "Away", (275, 25), font, 0.5, GREEN, 1)
def draw_HUD_DefendingSide(frame):
# # Display the detected game state
# cv2.putText(frame, "Game State", (10, 435), font, 0.5, GRAY2, 1)
# cv2.putText(frame, FifaFlags.gameStates[FifaFlags.State], (10, 470), font, 1, TEAL, 2)
# # Defense
# if FifaFlags.Defending == 1:
# cv2.putText(frame, "Defend Left", (275, 50), font, 0.5, GREEN, 1)
# elif FifaFlags.Defending == 2:
# cv2.putText(frame, "Defend Right", (275, 50), font, 0.5, GREEN, 1)
return
# # ===========================================================================
# # Controller
# # ===========================================================================
def draw_HUD_controller(frame, press:str=None) -> None:
# A
if press == 'a':
cv2.putText(frame, "A", (480, 470), font, 0.5, GREEN, 2)
cv2.circle(frame, (485, 465), 9, GREEN, 2)
else:
cv2.putText(frame, "A", (480, 470), font, 0.5, GRAY2, 2)
cv2.circle(frame, (485, 465), 9, GRAY2, 1)
# B
if press == 'b':
cv2.putText(frame, "B", (495, 455), font, 0.5, RED, 2)
cv2.circle(frame, (500, 450), 9, RED, 2)
else:
cv2.putText(frame, "B", (495, 455), font, 0.5, GRAY2, 2)
cv2.circle(frame, (500, 450), 9, GRAY2, 1)
# X
if press == 'x':
cv2.putText(frame, "X", (465, 455), font, 0.5, BLUE, 2)
cv2.circle(frame, (470, 450), 9, BLUE, 2)
else:
cv2.putText(frame, "X", (465, 455), font, 0.5, GRAY2, 2)
cv2.circle(frame, (470, 450), 9, GRAY2, 1)
# Y
if press == 'y':
cv2.putText(frame, "Y", (480, 440), font, 0.5, YELLOW, 2)
cv2.circle(frame, (485, 435), 9, YELLOW, 1)
else:
cv2.putText(frame, "Y", (480, 440), font, 0.5, GRAY2, 2)
cv2.circle(frame, (485, 435), 9, GRAY2, 1)
cv2.putText(frame, "Xbox", (270, 435), font, 0.5, GRAY2, 1)
# # D-Pad Display
# D Up
if press == '8':
cv2.rectangle(frame, (390, 440), (380, 450), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 440), (380, 450), GRAY2, 1)
# D L
if press == '4':
cv2.rectangle(frame, (370, 450), (380, 460), YELLOW, 1)
else:
cv2.rectangle(frame, (370, 450), (380, 460), GRAY2, 1)
# D Dn
if press == '2':
cv2.rectangle(frame, (390, 460), (380, 470), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 460), (380, 470), GRAY2, 1)
# D R
if press == '6':
cv2.rectangle(frame, (390, 450), (400, 460), YELLOW, 1)
else:
cv2.rectangle(frame, (390, 450), (400, 460), GRAY2, 1)
# LS Display
cv2.circle(frame, (350, 440), 1, YELLOW, 1)
cv2.circle(frame, (350, 440), 15, GRAY2, 1)
# RS Display
cv2.circle(frame, (440, 460), 1, YELLOW, 1)
cv2.circle(frame, (440, 460), 15, GRAY2, 1)
cv2.putText(frame, "Select", (270, 475), font, 0.5, GRAY2, 1)
if press == '3':
cv2.putText(frame, "Start", (270, 455), font, 0.5, YELLOW, 1)
else:
cv2.putText(frame, "Start", (270, 455), font, 0.5, GRAY2, 1) | 0.273963 | 0.333205 |
import logging
import re
import sys
from os import path
from subprocess import check_output
import pandas as pd
commit_re = re.compile(
r"""
^(?P<author>.*?)
\s
(?P<timestamp>\d+-\d+-\d+\s\d+:\d+:\d+\s-\d+)
\s
(?P<message>.*)$
""",
re.VERBOSE,
)
def get_commits(repo_path: str) -> str:
log_format = "%n###%n%ae %ai %s"
command = ["git", "-C", repo_path, "log", "--stat", f"--pretty={log_format}"]
logging.debug("[wrangle.py#get_commits] command=" + " ".join(command))
return check_output(command).decode("utf8").strip()
def handle_section(text):
lines = text.split("\n")
data = re.match(commit_re, lines[0]).groupdict()
if len(lines) == 1:
return data
lines = "\n".join(lines[1:]).strip().split("\n")
files_changed = lines[-1].strip()
data["n_files_changed"] = int(re.search(r"^\d+", files_changed[0])[0])
if "insertions" in files_changed:
data["insertions"] = int(re.search("(\d+)\sinsertions", files_changed)[1])
if "deletions" in files_changed:
data["deletions"] = int(re.search("(\d+)\sdeletions", files_changed)[1])
return data
def get_commit_df(repo_path, use_cache=True) -> pd.DataFrame:
contents = get_commits(repo_path)
sections = [
section.strip() for section in contents.split("###\n") if section.strip()
]
df = pd.DataFrame([handle_section(section) for section in sections])
for col in "n_files_changed", "insertions", "deletions":
df[col] = df[col].fillna(0).astype(int)
df.timestamp = pd.to_datetime(df.timestamp, utc=True)
df = df.set_index("timestamp").tz_convert("America/Chicago").sort_index()
return df
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.prog = "python -m zgulde.gitlog"
parser.description = "Export git log information as a csv"
parser.add_argument(
"--repo-path",
help="Local path to the git repo to be analyzed (default this directory)",
default=".",
)
parser.add_argument(
"-v", "--verbose", help="increase output verbosity", action="count"
)
args = parser.parse_args()
if args.verbose is None:
loglevel = logging.WARN
elif args.verbose == 1:
loglevel = logging.INFO
elif args.verbose >= 2:
loglevel = logging.DEBUG
logging.basicConfig(
format="%(asctime)s:%(levelname)s:%(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=loglevel,
)
df = get_commit_df(args.repo_path)
print(df.to_csv(index=True)) | zgulde/gitlog/to_csv.py | import logging
import re
import sys
from os import path
from subprocess import check_output
import pandas as pd
commit_re = re.compile(
r"""
^(?P<author>.*?)
\s
(?P<timestamp>\d+-\d+-\d+\s\d+:\d+:\d+\s-\d+)
\s
(?P<message>.*)$
""",
re.VERBOSE,
)
def get_commits(repo_path: str) -> str:
log_format = "%n###%n%ae %ai %s"
command = ["git", "-C", repo_path, "log", "--stat", f"--pretty={log_format}"]
logging.debug("[wrangle.py#get_commits] command=" + " ".join(command))
return check_output(command).decode("utf8").strip()
def handle_section(text):
lines = text.split("\n")
data = re.match(commit_re, lines[0]).groupdict()
if len(lines) == 1:
return data
lines = "\n".join(lines[1:]).strip().split("\n")
files_changed = lines[-1].strip()
data["n_files_changed"] = int(re.search(r"^\d+", files_changed[0])[0])
if "insertions" in files_changed:
data["insertions"] = int(re.search("(\d+)\sinsertions", files_changed)[1])
if "deletions" in files_changed:
data["deletions"] = int(re.search("(\d+)\sdeletions", files_changed)[1])
return data
def get_commit_df(repo_path, use_cache=True) -> pd.DataFrame:
contents = get_commits(repo_path)
sections = [
section.strip() for section in contents.split("###\n") if section.strip()
]
df = pd.DataFrame([handle_section(section) for section in sections])
for col in "n_files_changed", "insertions", "deletions":
df[col] = df[col].fillna(0).astype(int)
df.timestamp = pd.to_datetime(df.timestamp, utc=True)
df = df.set_index("timestamp").tz_convert("America/Chicago").sort_index()
return df
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.prog = "python -m zgulde.gitlog"
parser.description = "Export git log information as a csv"
parser.add_argument(
"--repo-path",
help="Local path to the git repo to be analyzed (default this directory)",
default=".",
)
parser.add_argument(
"-v", "--verbose", help="increase output verbosity", action="count"
)
args = parser.parse_args()
if args.verbose is None:
loglevel = logging.WARN
elif args.verbose == 1:
loglevel = logging.INFO
elif args.verbose >= 2:
loglevel = logging.DEBUG
logging.basicConfig(
format="%(asctime)s:%(levelname)s:%(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=loglevel,
)
df = get_commit_df(args.repo_path)
print(df.to_csv(index=True)) | 0.281801 | 0.218795 |
import logging
from core.event import Event
from core.mouse import Mouse, Button
from core.keyboard import KeyBoard, KeyCode
from Configs import MOUSE_SCROLL_SPEED
from core.os import OS
class EventHandler:
@staticmethod
def handle(event):
""" Event responder / handler """
try:
if not isinstance(event, Event):
raise Exception("Invalid event passed")
logging.debug(f"Received Event: {event.name}")
if event.name == Event.CONNECTION_TEST:
event.client.connection.close()
elif event.name == Event.MOVE_MOUSE:
Mouse.move(event.value['x'], event.value['y'])
x, y = Mouse.get_position()
if x < 1:
event_clipboard = Event(Event.CLIPBOARD, value=OS.get_clipboard_data(), client=event.client)
event_clipboard.client.send_event(event_clipboard)
event = Event(Event.CHANGE_POSITION, value=str(event.computer.get_position() - 1),
client=event.client)
event.client.send_event(event)
elif event.name == Event.CHANGE_POSITION:
event.computer.set_active_computer_position(int(event.value))
elif event.name == Event.MOUSE_LEFT_DOWN:
Mouse.mouse.press(Button.left)
elif event.name == Event.MOUSE_RIGHT_DOWN:
Mouse.mouse.press(Button.right)
elif event.name == Event.MOUSE_LEFT_UP:
Mouse.mouse.release(Button.left)
elif event.name == Event.MOUSE_RIGHT_UP:
Mouse.mouse.release(Button.right)
elif event.name == Event.MOUSE_WHEEL:
Mouse.mouse.scroll(0, int(event.value) * MOUSE_SCROLL_SPEED)
elif event.name == Event.KEY_DOWN:
KeyBoard.keyboard.press(KeyCode.from_vk(event.value))
elif event.name == Event.KEY_UP:
KeyBoard.keyboard.release(KeyCode.from_vk(event.value))
elif event.name == Event.CLIPBOARD:
OS.set_clipboard_data(event.value)
except Exception as e:
logging.error("Error while handling event.")
logging.exception(e) | core/handlers/event_handler.py | import logging
from core.event import Event
from core.mouse import Mouse, Button
from core.keyboard import KeyBoard, KeyCode
from Configs import MOUSE_SCROLL_SPEED
from core.os import OS
class EventHandler:
@staticmethod
def handle(event):
""" Event responder / handler """
try:
if not isinstance(event, Event):
raise Exception("Invalid event passed")
logging.debug(f"Received Event: {event.name}")
if event.name == Event.CONNECTION_TEST:
event.client.connection.close()
elif event.name == Event.MOVE_MOUSE:
Mouse.move(event.value['x'], event.value['y'])
x, y = Mouse.get_position()
if x < 1:
event_clipboard = Event(Event.CLIPBOARD, value=OS.get_clipboard_data(), client=event.client)
event_clipboard.client.send_event(event_clipboard)
event = Event(Event.CHANGE_POSITION, value=str(event.computer.get_position() - 1),
client=event.client)
event.client.send_event(event)
elif event.name == Event.CHANGE_POSITION:
event.computer.set_active_computer_position(int(event.value))
elif event.name == Event.MOUSE_LEFT_DOWN:
Mouse.mouse.press(Button.left)
elif event.name == Event.MOUSE_RIGHT_DOWN:
Mouse.mouse.press(Button.right)
elif event.name == Event.MOUSE_LEFT_UP:
Mouse.mouse.release(Button.left)
elif event.name == Event.MOUSE_RIGHT_UP:
Mouse.mouse.release(Button.right)
elif event.name == Event.MOUSE_WHEEL:
Mouse.mouse.scroll(0, int(event.value) * MOUSE_SCROLL_SPEED)
elif event.name == Event.KEY_DOWN:
KeyBoard.keyboard.press(KeyCode.from_vk(event.value))
elif event.name == Event.KEY_UP:
KeyBoard.keyboard.release(KeyCode.from_vk(event.value))
elif event.name == Event.CLIPBOARD:
OS.set_clipboard_data(event.value)
except Exception as e:
logging.error("Error while handling event.")
logging.exception(e) | 0.35209 | 0.11158 |
from __future__ import annotations
from hashlib import sha1
from logging import getLogger
from pathlib import Path
from shutil import rmtree
from typing import Iterable, Iterator, NamedTuple, Optional
from dzdsu.constants import ITALIC
from dzdsu.constants import LINK
from dzdsu.constants import MODS_DIR
from dzdsu.constants import WORKSHOP_URL
__all__ = ['Mod', 'InstalledMod', 'mods_str', 'print_mods']
class Mod(NamedTuple):
"""A server mod."""
id: int
name: Optional[str] = None
enabled: bool = True
def __str__(self) -> str:
return LINK.format(url=self.url, text=self.name or self.id)
@classmethod
def from_id(cls, ident: int, *, name: Optional[str] = None) -> Mod:
"""Creates a mod from an ID."""
if ident == 0:
raise ValueError(f'Invalid mod ID: {ident}')
if ident < 0:
return cls(abs(ident), name, enabled=False)
return cls(ident, name)
@classmethod
def from_json(cls, json: dict[str, int | str]) -> Mod:
"""Creates a mod from a JSON-ish dict."""
return cls.from_id(json['id'], name=json.get('name'))
@classmethod
def from_value(cls, value: int | dict[str, int | str]) -> Mod:
"""Creates a mod from an int or JSON value."""
if isinstance(value, int):
return cls.from_id(value)
if isinstance(value, dict):
return cls.from_json(value)
raise TypeError(f'Cannot create mod from: {value} ({type(value)})')
@property
def path(self) -> Path:
"""Returns the relative path to the local mod directory."""
return MODS_DIR / str(self.id)
@property
def url(self) -> str:
"""Returns the Steam Workshop URL."""
return WORKSHOP_URL.format(self.id)
class InstalledMod(NamedTuple):
"""Represents an installed mod."""
mod: Mod
base_dir: Path
@property
def path(self) -> Path:
"""Returns the relative path to the local mod directory."""
return self.base_dir / self.mod.path
@property
def addons(self) -> Path:
"""Returns the path to the addons directory."""
return self.path / 'addons'
@property
def keys(self) -> Path:
"""Returns the path to the keys directory."""
return self.path / 'keys'
@property
def metadata(self) -> Path:
"""Returns the path to the metadata file."""
return self.path / 'meta.cpp'
@property
def sha1sum(self) -> str:
"""Returns the SHA-1 checksum."""
with self.metadata.open('rb') as file:
return sha1(file.read()).hexdigest()
@property
def pbos(self) -> Iterator[Path]:
"""Yields paths to the .pbo files."""
return self.addons.glob('*.pbo')
@property
def bikeys(self) -> Iterator[Path]:
"""Yields paths to the *.bikey files."""
return self.keys.glob('*.bikey')
def fix_paths(self) -> None:
"""Links paths to lower-case."""
if (addons := self.path / 'Addons').is_dir():
link_to_lowercase(addons)
if (keys := self.path / 'Keys').is_dir():
link_to_lowercase(keys)
for pbo in self.pbos:
link_to_lowercase(pbo)
def remove(self) -> None:
"""Removes this mod."""
rmtree(self.path)
def link_to_lowercase(path: Path) -> None:
"""Creates a symlink with the path names in lower case."""
if (filename := path.name) == (lower := filename.lower()):
return
if (symlink := path.parent / lower).exists():
return
getLogger(__file__).debug('Linking "%s" to "%s".', filename, symlink)
symlink.symlink_to(filename)
def mods_str(mods: Iterable[Mod], sep: str = ';') -> str:
"""Returns a string representation of the given mods."""
return sep.join(str(mod.path) for mod in mods)
def print_mods(mods: Iterable[Mod]) -> None:
"""Lists the respective mods."""
for mod in mods:
print(mod if mod.enabled else ITALIC.format(mod)) | dzdsu/mods.py |
from __future__ import annotations
from hashlib import sha1
from logging import getLogger
from pathlib import Path
from shutil import rmtree
from typing import Iterable, Iterator, NamedTuple, Optional
from dzdsu.constants import ITALIC
from dzdsu.constants import LINK
from dzdsu.constants import MODS_DIR
from dzdsu.constants import WORKSHOP_URL
__all__ = ['Mod', 'InstalledMod', 'mods_str', 'print_mods']
class Mod(NamedTuple):
"""A server mod."""
id: int
name: Optional[str] = None
enabled: bool = True
def __str__(self) -> str:
return LINK.format(url=self.url, text=self.name or self.id)
@classmethod
def from_id(cls, ident: int, *, name: Optional[str] = None) -> Mod:
"""Creates a mod from an ID."""
if ident == 0:
raise ValueError(f'Invalid mod ID: {ident}')
if ident < 0:
return cls(abs(ident), name, enabled=False)
return cls(ident, name)
@classmethod
def from_json(cls, json: dict[str, int | str]) -> Mod:
"""Creates a mod from a JSON-ish dict."""
return cls.from_id(json['id'], name=json.get('name'))
@classmethod
def from_value(cls, value: int | dict[str, int | str]) -> Mod:
"""Creates a mod from an int or JSON value."""
if isinstance(value, int):
return cls.from_id(value)
if isinstance(value, dict):
return cls.from_json(value)
raise TypeError(f'Cannot create mod from: {value} ({type(value)})')
@property
def path(self) -> Path:
"""Returns the relative path to the local mod directory."""
return MODS_DIR / str(self.id)
@property
def url(self) -> str:
"""Returns the Steam Workshop URL."""
return WORKSHOP_URL.format(self.id)
class InstalledMod(NamedTuple):
"""Represents an installed mod."""
mod: Mod
base_dir: Path
@property
def path(self) -> Path:
"""Returns the relative path to the local mod directory."""
return self.base_dir / self.mod.path
@property
def addons(self) -> Path:
"""Returns the path to the addons directory."""
return self.path / 'addons'
@property
def keys(self) -> Path:
"""Returns the path to the keys directory."""
return self.path / 'keys'
@property
def metadata(self) -> Path:
"""Returns the path to the metadata file."""
return self.path / 'meta.cpp'
@property
def sha1sum(self) -> str:
"""Returns the SHA-1 checksum."""
with self.metadata.open('rb') as file:
return sha1(file.read()).hexdigest()
@property
def pbos(self) -> Iterator[Path]:
"""Yields paths to the .pbo files."""
return self.addons.glob('*.pbo')
@property
def bikeys(self) -> Iterator[Path]:
"""Yields paths to the *.bikey files."""
return self.keys.glob('*.bikey')
def fix_paths(self) -> None:
"""Links paths to lower-case."""
if (addons := self.path / 'Addons').is_dir():
link_to_lowercase(addons)
if (keys := self.path / 'Keys').is_dir():
link_to_lowercase(keys)
for pbo in self.pbos:
link_to_lowercase(pbo)
def remove(self) -> None:
"""Removes this mod."""
rmtree(self.path)
def link_to_lowercase(path: Path) -> None:
"""Creates a symlink with the path names in lower case."""
if (filename := path.name) == (lower := filename.lower()):
return
if (symlink := path.parent / lower).exists():
return
getLogger(__file__).debug('Linking "%s" to "%s".', filename, symlink)
symlink.symlink_to(filename)
def mods_str(mods: Iterable[Mod], sep: str = ';') -> str:
"""Returns a string representation of the given mods."""
return sep.join(str(mod.path) for mod in mods)
def print_mods(mods: Iterable[Mod]) -> None:
"""Lists the respective mods."""
for mod in mods:
print(mod if mod.enabled else ITALIC.format(mod)) | 0.926665 | 0.228791 |
from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
# helpers
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
noncausal_attn_len = 0,
sparse_attn = False,
sparse_attn_global_indices = []
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
for _, sparse_attn in zip(range(depth), sparse_layer):
attn_class = Attention if not sparse_attn else partial(SparseAttention, sparse_attn_global_indices = sparse_attn_global_indices)
layers.append(nn.ModuleList([
PreNorm(dim, attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout, noncausal_attn_len = noncausal_attn_len)),
PreNorm(dim, FeedForward(dim, mult = ff_mult, dropout = ff_dropout))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {'mask': route_attn}
self.layers = execute_type(layers, args_route = attn_route_map)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs) | dalle_pytorch/transformer.py | from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
# helpers
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
noncausal_attn_len = 0,
sparse_attn = False,
sparse_attn_global_indices = []
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
for _, sparse_attn in zip(range(depth), sparse_layer):
attn_class = Attention if not sparse_attn else partial(SparseAttention, sparse_attn_global_indices = sparse_attn_global_indices)
layers.append(nn.ModuleList([
PreNorm(dim, attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout, noncausal_attn_len = noncausal_attn_len)),
PreNorm(dim, FeedForward(dim, mult = ff_mult, dropout = ff_dropout))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
attn_route_map = {'mask': route_attn}
self.layers = execute_type(layers, args_route = attn_route_map)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs) | 0.943673 | 0.352843 |
import os
import re
import getpass
import logging
import argparse
from pathlib import Path
import yaml
import json
from datetime import datetime
import numpy as np
import pandas as pd
import scipy.stats
from doframework.core.inputs import generate_id, setup_logger
from doframework.core.pwl import PWL
from doframework.core.sampler import D_sampler
def generate_dataset(obj_input: dict, obj_name: str, **kwargs):
input_prefix = 'objective'
input_suffix = 'json'
output_prefix = 'data'
output_suffix = 'csv'
objective_id = re.match(input_prefix+'_'+'(\w+)'+'.'+input_suffix,obj_name).group(1)
assert objective_id == obj_input['objective_id'], 'Mismatch between file name recorded in json and file name.'
data_id = generate_id()
Ps = np.array(obj_input['f']['polyhedrons'])
Vs = np.array(obj_input['f']['values'])
f = PWL(Ps,Vs)
N = obj_input['data']['N']
noise = obj_input['data']['noise']
weights = obj_input['data']['weights']
policies = [np.array(policy) for policy in obj_input['data']['policies']]
covariances = [np.array(cov) for cov in obj_input['data']['covariances']]
data_hypothesis = obj_input['data']['hypothesis']
data_hypothesis_obj = getattr(scipy.stats,data_hypothesis)
D = D_sampler(f, data_hypothesis_obj, N, weights, noise, mean=policies, cov=covariances)
df = pd.DataFrame(D,columns=[f'x{i}' for i in range(D.shape[1]-1)]+['y'])
generated_file = ''.join(['_'.join([output_prefix,objective_id,data_id]),'.',output_suffix])
#### NOTE: add file name till rayvens allows to read file name from source bucket event
# df = pd.concat([df,pd.DataFrame([generated_file]*df.shape[0],columns=['generated_file_name'])],axis=1)
return df, generated_file
def main(data_root: str, args: dict, logger_name: str=None, is_raised=True):
for p in Path(os.path.join(data_root,'objectives')).rglob('*.json'):
try:
with open(os.path.join(data_root,'objectives',p.name)) as file:
obj_input = json.load(file)
obj_name = p.name
if logger_name:
log = logging.getLogger(logger_name)
log.info('Loaded {}.'.format(file.name))
obj_id = obj_name # obj_input['objective_id']
if logger_name:
log = logging.getLogger(logger_name)
log.info('Sampling {} datasets for {}.'.format(args.datasets,obj_id))
for i in range(args.datasets):
df, gen_data_file = generate_dataset(obj_input,obj_name)
gen_data_path = os.path.join(data_root,'data',gen_data_file)
df.to_csv(gen_data_path,index=False)
except IOError as e:
if logger_name:
log = logging.getLogger(logger_name)
log.error('Unable to load json from objective file.\n')
log.error(e)
if is_raised: raise e
except json.JSONDecodeError as e:
if logger_name:
log = logging.getLogger(logger_name)
log.error('Error occured while decoding obective json.\n')
log.error(e)
if is_raised: raise e
except Exception as e:
if logger_name:
log = logging.getLogger(logger_name)
log.error('Something went wrong while processing objective...\n')
log.error(e)
if is_raised: raise e
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--datasets", type=int, default=1, help="Number of datasets to generate.")
parser.add_argument("-l", "--logger", action="store_true", help="Enable logging.")
args = parser.parse_args()
configs_path = os.environ['HOME']
configs_file = 'configs.yaml'
with open(os.path.join(configs_path,configs_file),'r') as file:
try:
configs = yaml.safe_load(file)
except yaml.YAMLError as e:
print('CRITICAL ... Unable to load configs yaml.\n.')
print(e)
raise e
user = getpass.getuser()
data_root = configs[user]['data']
now = datetime.now().strftime('%Y-%m-%d_%H%M')
log_file = 'generanted_data_{}.log'.format(now)
log_path = os.path.join(data_root,'logs',log_file)
logger_name = 'generanted_data_log' if args.logger else None
setup_logger(logger_name, log_path)
if logger_name:
log = logging.getLogger(logger_name)
log.info('Running on user %s', user)
log.info('Data root %s', data_root)
main(data_root, args, logger_name) | doframework/flow/datasets.py |
import os
import re
import getpass
import logging
import argparse
from pathlib import Path
import yaml
import json
from datetime import datetime
import numpy as np
import pandas as pd
import scipy.stats
from doframework.core.inputs import generate_id, setup_logger
from doframework.core.pwl import PWL
from doframework.core.sampler import D_sampler
def generate_dataset(obj_input: dict, obj_name: str, **kwargs):
input_prefix = 'objective'
input_suffix = 'json'
output_prefix = 'data'
output_suffix = 'csv'
objective_id = re.match(input_prefix+'_'+'(\w+)'+'.'+input_suffix,obj_name).group(1)
assert objective_id == obj_input['objective_id'], 'Mismatch between file name recorded in json and file name.'
data_id = generate_id()
Ps = np.array(obj_input['f']['polyhedrons'])
Vs = np.array(obj_input['f']['values'])
f = PWL(Ps,Vs)
N = obj_input['data']['N']
noise = obj_input['data']['noise']
weights = obj_input['data']['weights']
policies = [np.array(policy) for policy in obj_input['data']['policies']]
covariances = [np.array(cov) for cov in obj_input['data']['covariances']]
data_hypothesis = obj_input['data']['hypothesis']
data_hypothesis_obj = getattr(scipy.stats,data_hypothesis)
D = D_sampler(f, data_hypothesis_obj, N, weights, noise, mean=policies, cov=covariances)
df = pd.DataFrame(D,columns=[f'x{i}' for i in range(D.shape[1]-1)]+['y'])
generated_file = ''.join(['_'.join([output_prefix,objective_id,data_id]),'.',output_suffix])
#### NOTE: add file name till rayvens allows to read file name from source bucket event
# df = pd.concat([df,pd.DataFrame([generated_file]*df.shape[0],columns=['generated_file_name'])],axis=1)
return df, generated_file
def main(data_root: str, args: dict, logger_name: str=None, is_raised=True):
for p in Path(os.path.join(data_root,'objectives')).rglob('*.json'):
try:
with open(os.path.join(data_root,'objectives',p.name)) as file:
obj_input = json.load(file)
obj_name = p.name
if logger_name:
log = logging.getLogger(logger_name)
log.info('Loaded {}.'.format(file.name))
obj_id = obj_name # obj_input['objective_id']
if logger_name:
log = logging.getLogger(logger_name)
log.info('Sampling {} datasets for {}.'.format(args.datasets,obj_id))
for i in range(args.datasets):
df, gen_data_file = generate_dataset(obj_input,obj_name)
gen_data_path = os.path.join(data_root,'data',gen_data_file)
df.to_csv(gen_data_path,index=False)
except IOError as e:
if logger_name:
log = logging.getLogger(logger_name)
log.error('Unable to load json from objective file.\n')
log.error(e)
if is_raised: raise e
except json.JSONDecodeError as e:
if logger_name:
log = logging.getLogger(logger_name)
log.error('Error occured while decoding obective json.\n')
log.error(e)
if is_raised: raise e
except Exception as e:
if logger_name:
log = logging.getLogger(logger_name)
log.error('Something went wrong while processing objective...\n')
log.error(e)
if is_raised: raise e
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--datasets", type=int, default=1, help="Number of datasets to generate.")
parser.add_argument("-l", "--logger", action="store_true", help="Enable logging.")
args = parser.parse_args()
configs_path = os.environ['HOME']
configs_file = 'configs.yaml'
with open(os.path.join(configs_path,configs_file),'r') as file:
try:
configs = yaml.safe_load(file)
except yaml.YAMLError as e:
print('CRITICAL ... Unable to load configs yaml.\n.')
print(e)
raise e
user = getpass.getuser()
data_root = configs[user]['data']
now = datetime.now().strftime('%Y-%m-%d_%H%M')
log_file = 'generanted_data_{}.log'.format(now)
log_path = os.path.join(data_root,'logs',log_file)
logger_name = 'generanted_data_log' if args.logger else None
setup_logger(logger_name, log_path)
if logger_name:
log = logging.getLogger(logger_name)
log.info('Running on user %s', user)
log.info('Data root %s', data_root)
main(data_root, args, logger_name) | 0.380874 | 0.169956 |
from __future__ import annotations
from typing import (List, Callable, Any, Tuple, TypeVar)
from ..fable_modules.fable_library.array import (map as map_2, sort_in_place_by, find_index, sort, contains as contains_1)
from ..fable_modules.fable_library.list import (empty, FSharpList, map as map_1, is_empty, head, tail, append, of_array as of_array_1, cons, to_array, fold, map_indexed, singleton as singleton_1, of_array_with_tail, reverse)
from ..fable_modules.fable_library.map import (empty as empty_1, try_find, add, to_array as to_array_1)
from ..fable_modules.fable_library.option import (default_arg, Option, some)
from ..fable_modules.fable_library.seq import (to_list, delay, map, collect, singleton, of_list as of_list_1, append as append_1, empty as empty_3)
from ..fable_modules.fable_library.set import (of_array, of_list, contains)
from ..fable_modules.fable_library.string import (to_text, interpolate, join, printf)
from ..fable_modules.fable_library.util import (compare_primitives, compare, IEnumerable, equals, string_hash, get_enumerator)
from ..FableSedlex.code_gen import (Doc, word, Doc_op_Multiply_Z7CFFAC00, Doc_op_Addition_Z7CFFAC00, parens, vsep, seplist, Doc_op_RightShift_2AAA0F3C, bracket, empty as empty_2, align, indent)
from .analysis import (Analyzer, Sigma__get_GlobalVariables, Sigma__GetADTCases, Shape, Sigma__GetRecordTypes)
from .backends_common import (CodeGenOptions, NameMangling_IdentifierDescriptor__WithNameEnv_Z7613F24B, NameMangling_nameEnv, NameMangling_IdentifierDescriptor_Create_Z48C5CCEF, NameMangling_IdentifierDescriptor, NameMangling_maskChar, NameMangling_mangle)
from .exceptions import (UnsolvedTypeVariable, UnboundVariable, UnboundLexer, UnboundNonterminal)
from .grammar import (definition, symbol, position, monot, _007CTTuple_007C__007C, monot__prune, expr, node, lexerule, production as production_1)
from .resource_keys import ocaml_rts_file
from .utils import (is_lower, is_unicode, is_digit, is_upper, escape_string, List_tryLookup, capitalized)
__A = TypeVar("__A")
__B = TypeVar("__B")
def codegen(analyzer: Analyzer, cg_options: CodeGenOptions, lang_name: str, stmts: List[definition]) -> List[Tuple[str, Doc]]:
def arrow_354(x: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return x
variable_renamer : Callable[[str], str] = default_arg(cg_options.rename_var, arrow_354)
def arrow_355(x_1: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return x_1
constructor_renamer : Callable[[str], str] = default_arg(cg_options.rename_ctor, arrow_355)
def arrow_356(x_2: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return x_2
field_renamer : Callable[[str], str] = default_arg(cg_options.rename_field, arrow_356)
def arrow_357(x_3: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return x_3
type_renamer : Callable[[str], str] = default_arg(cg_options.rename_type, arrow_357)
start_rule_qualified_type : Option[str] = cg_options.start_rule_qualified_type
rts_file_string : str = cg_options.request_resource(ocaml_rts_file)
import_var_names : FSharpList[str] = empty()
import_type_names : FSharpList[str] = empty()
class ObjectExpr359:
@property
def Compare(self) -> Any:
def arrow_358(x_4: str, y: str) -> int:
return compare_primitives(x_4, y)
return arrow_358
abandoned_names : Any = of_array(["and", "as", "assert", "asr", "begin", "class", "constraint", "do", "done", "downto", "else", "end", "exception", "external", "false", "for", "fun", "function", "functor", "if", "in", "include", "inherit", "initializer", "land", "lazy", "let", "lor", "lsl", "lsr", "lxor", "match", "method", "mod", "module", "mutable", "new", "nonrec", "object", "of", "open", "or", "private", "rec", "sig", "struct", "then", "to", "true", "try", "type", "val", "virtual", "when", "while", "with"], ObjectExpr359())
class ObjectExpr361:
@property
def Compare(self) -> Any:
def arrow_360(x_5: symbol, y_1: symbol) -> int:
return compare(x_5, y_1)
return arrow_360
symmap : Any = empty_1(ObjectExpr361())
toplevel_transformer : FSharpList[Doc] = empty()
current_pos : position = analyzer.current_pos
lexer_maps : FSharpList[Tuple[str, Doc]] = empty()
def arrow_363(analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> IEnumerable[Tuple[str, str]]:
def arrow_362(k: Any) -> Tuple[str, str]:
return (k[0], variable_renamer(k[0]))
return map(arrow_362, Sigma__get_GlobalVariables(analyzer.Sigma))
global_scope : FSharpList[Tuple[str, str]] = to_list(delay(arrow_363))
def arrow_364(tuple: Tuple[str, str], analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return tuple[1]
class ObjectExpr366:
@property
def Compare(self) -> Any:
def arrow_365(x_6: str, y_2: str) -> int:
return compare_primitives(x_6, y_2)
return arrow_365
def arrow_367(i: int, c: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> bool:
test : bool = True if (True if (is_lower(c)) else (is_unicode(c))) else (c == "_")
return test if (i == 0) else (True if (test) else (is_digit(c)))
def arrow_368(i_1: int, c_1: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return to_text(interpolate("_X%P()_", [i_1])) if (is_digit(c_1)) else (c_1.lower() if (is_upper(c_1)) else (to_text(interpolate("_%P()_", [ord(c_1)]))))
ocaml_var_ident_descr : NameMangling_IdentifierDescriptor = NameMangling_IdentifierDescriptor__WithNameEnv_Z7613F24B(NameMangling_IdentifierDescriptor_Create_Z48C5CCEF(arrow_367, arrow_368), NameMangling_nameEnv(of_list(map_1(arrow_364, global_scope), ObjectExpr366())))
class ObjectExpr370:
@property
def Compare(self) -> Any:
def arrow_369(x_7: str, y_3: str) -> int:
return compare_primitives(x_7, y_3)
return arrow_369
def arrow_371(i_2: int, c_2: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> bool:
return is_upper(c_2) if (i_2 == 0) else (True if (True if (is_upper(c_2)) else (c_2 == "_")) else (is_digit(c_2)))
def arrow_372(i_3: int, c_3: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return c_3.upper() if (is_lower(c_3)) else (NameMangling_maskChar(ord("A"), ord("Z"), ord(c_3)) + "_" if (i_3 == 0) else (("_" + NameMangling_maskChar(ord("A"), ord("Z"), ord(c_3))) + "_"))
sedlex_ident_descr : NameMangling_IdentifierDescriptor = NameMangling_IdentifierDescriptor__WithNameEnv_Z7613F24B(NameMangling_IdentifierDescriptor_Create_Z48C5CCEF(arrow_371, arrow_372), NameMangling_nameEnv(of_array(["EOF"], ObjectExpr370())))
class ObjectExpr374:
@property
def Compare(self) -> Any:
def arrow_373(x_8: str, y_4: str) -> int:
return compare_primitives(x_8, y_4)
return arrow_373
def arrow_375(i_4: int, c_4: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> bool:
return is_lower(c_4) if (i_4 == 0) else (True if (True if (is_lower(c_4)) else (c_4 == "_")) else (is_digit(c_4)))
def arrow_376(_arg1: int, c_5: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return c_5.lower() if (is_upper(c_5)) else (("_" + NameMangling_maskChar(ord("a"), ord("z"), ord(c_5))) + "_")
menhir_ident_descr : NameMangling_IdentifierDescriptor = NameMangling_IdentifierDescriptor__WithNameEnv_Z7613F24B(NameMangling_IdentifierDescriptor_Create_Z48C5CCEF(arrow_375, arrow_376), NameMangling_nameEnv(of_array(["start"], ObjectExpr374())))
def mangle(idr: NameMangling_IdentifierDescriptor, n: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return NameMangling_mangle(abandoned_names, idr, n)
def cg_symbol(x_9: symbol, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
nonlocal symmap
match_value : Option[str] = try_find(x_9, symmap)
if match_value is None:
valid_py_ident : str
if x_9.tag == 0:
define : str = x_9.fields[0]
valid_py_ident = mangle(sedlex_ident_descr, ("\"" + define) + "\"") if (x_9.fields[1]) else (mangle(sedlex_ident_descr, define))
elif x_9.tag == 1:
valid_py_ident = mangle(menhir_ident_descr, x_9.fields[0])
else:
raise Exception("macro not processed")
symmap = add(x_9, valid_py_ident, symmap)
return valid_py_ident
else:
return match_value
def name_of_named_term(n_2: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return cg_symbol(symbol(0, n_2, False))
def try_lookup(key_mut: __A, x_10_mut: FSharpList[Tuple[__A, __B]], analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> Option[__B]:
while True:
(key, x_10) = (key_mut, x_10_mut)
if not is_empty(x_10):
if equals(head(x_10)[0], key):
return some(head(x_10)[1])
elif not is_empty(x_10):
key_mut = key
x_10_mut = tail(x_10)
continue
else:
raise Exception("Match failure")
else:
return None
break
def _cg_type(t: monot, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
if t.tag == 4:
return "\u0027" + t.fields[0]
elif t.tag == 0:
raise UnsolvedTypeVariable()
elif t.tag == 3:
def mapping(tupled_arg: Tuple[str, monot], t: monot=t) -> str:
return _cg_type(tupled_arg[1])
return (("(" + join(" * ", map_1(mapping, t.fields[0]))) + ") -\u003e ") + _cg_type(t.fields[1])
elif t.tag == 2:
if _007CTTuple_007C__007C(t.fields[0]) is not None:
return ("(" + join(" * ", map_1(_cg_type, t.fields[1]))) + ")"
elif t.tag == 2:
return (("(" + join(", ", map_1(_cg_type, t.fields[1]))) + ") ") + _cg_type(t.fields[0])
else:
raise Exception("Match failure")
else:
return type_renamer(t.fields[0])
def cg_type(t_1: monot, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return _cg_type(monot__prune(t_1))
def cg_expr(scope: FSharpList[Tuple[str, str]], curr_expr: expr, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> Doc:
match_value_1 : node = curr_expr.node
if match_value_1.tag == 6:
match_value_2 : Option[str] = try_lookup(match_value_1.fields[0], scope)
if match_value_2 is not None:
return word(match_value_2)
else:
raise UnboundVariable(match_value_1.fields[0])
elif match_value_1.tag == 11:
if match_value_1.fields[0]:
return word("true")
else:
return word("false")
elif match_value_1.tag == 3:
return Doc_op_Multiply_Z7CFFAC00(Doc_op_Multiply_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(cg_expr(scope, match_value_1.fields[0]), word(":")), word(cg_type(curr_expr.t))), word(".")), word(match_value_1.fields[1]))
elif match_value_1.tag == 8:
return word(to_text(printf("%d"))(match_value_1.fields[0]))
elif match_value_1.tag == 10:
return word(to_text(printf("%f"))(match_value_1.fields[0]))
elif match_value_1.tag == 9:
return word(escape_string(match_value_1.fields[0]))
elif match_value_1.tag == 5:
def arrow_378(scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> IEnumerable[Tuple[str, str]]:
def arrow_377(match_value_3: Tuple[str, monot]) -> IEnumerable[Tuple[str, str]]:
arg : str = match_value_3[0]
return singleton((arg, mangle(ocaml_var_ident_descr, arg)))
return collect(arrow_377, match_value_1.fields[0])
code : Doc = cg_expr(append(to_list(delay(arrow_378)), scope), match_value_1.fields[1])
def arrow_379(tupled_arg_1: Tuple[str, monot], scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> Doc:
return word((tupled_arg_1[0] + ":") + cg_type(tupled_arg_1[1]))
anns : FSharpList[Doc] = map_1(arrow_379, match_value_1.fields[0])
return parens(vsep(of_array_1([Doc_op_Addition_Z7CFFAC00(word("fun"), Doc_op_Multiply_Z7CFFAC00(parens(seplist(word(", "), anns)), word("-\u003e"))), Doc_op_RightShift_2AAA0F3C(code, 4)])))
elif match_value_1.tag == 4:
value_0027 : Doc = cg_expr(scope, match_value_1.fields[1])
m_name : str = mangle(ocaml_var_ident_descr, match_value_1.fields[0])
body : Doc = cg_expr(cons((match_value_1.fields[0], m_name), scope), match_value_1.fields[2])
name_2 : Doc = Doc_op_Multiply_Z7CFFAC00(Doc_op_Multiply_Z7CFFAC00(word(m_name), word(":")), word(cg_type(match_value_1.fields[1].t)))
return vsep(of_array_1([Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), name_2), word("=")), value_0027), word("in")), body]))
elif match_value_1.tag == 2:
def arrow_380(elt: expr, scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> Doc:
return cg_expr(scope, elt)
return bracket(seplist(word(";"), map_1(arrow_380, match_value_1.fields[0])))
elif match_value_1.tag == 7:
return word(to_text(interpolate("$%P()", [match_value_1.fields[0]])))
elif match_value_1.tag == 1:
def arrow_381(elt_1: expr, scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> Doc:
return cg_expr(scope, elt_1)
return parens(seplist(word(", "), map_1(arrow_381, match_value_1.fields[0])))
else:
f_0027 : Doc = cg_expr(scope, match_value_1.fields[0])
def arrow_382(x_12: expr, scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> Doc:
return cg_expr(scope, x_12)
args_0027 : FSharpList[Doc] = map_1(arrow_382, match_value_1.fields[1])
return Doc_op_Multiply_Z7CFFAC00(f_0027, parens(seplist(word(", "), args_0027)))
def mk_lexer(def_: lexerule, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
op_dereference : Callable[[lexerule], str] = mk_lexer
(pattern_matching_result, s_6, e_2, e_3, e_4, e_5, s_7, xs, l, r_1, xs_1, e_6) = (None, None, None, None, None, None, None, None, None, None, None, None)
if def_.tag == 9:
if def_.fields[0].tag == 9:
pattern_matching_result = 1
e_2 = def_.fields[0].fields[0]
else:
pattern_matching_result = 1
e_2 = def_.fields[0]
elif def_.tag == 5:
pattern_matching_result = 2
e_3 = def_.fields[0]
elif def_.tag == 0:
pattern_matching_result = 3
elif def_.tag == 6:
pattern_matching_result = 4
e_4 = def_.fields[0]
elif def_.tag == 7:
pattern_matching_result = 5
e_5 = def_.fields[0]
elif def_.tag == 1:
pattern_matching_result = 6
elif def_.tag == 11:
pattern_matching_result = 7
s_7 = def_.fields[0]
elif def_.tag == 2:
pattern_matching_result = 8
xs = def_.fields[0]
elif def_.tag == 10:
pattern_matching_result = 9
l = def_.fields[0]
r_1 = def_.fields[1]
elif def_.tag == 4:
if is_empty(def_.fields[0]):
pattern_matching_result = 10
else:
pattern_matching_result = 11
xs_1 = def_.fields[0]
elif def_.tag == 8:
pattern_matching_result = 12
e_6 = def_.fields[0]
else:
pattern_matching_result = 0
s_6 = def_.fields[0]
if pattern_matching_result == 0:
return escape_string(s_6)
elif pattern_matching_result == 1:
return ("(" + op_dereference(e_2)) + ")"
elif pattern_matching_result == 2:
return to_text(interpolate("Compl(%P())", [op_dereference(e_3)]))
elif pattern_matching_result == 3:
return "(\u00270\u0027 .. \u00279\u0027)"
elif pattern_matching_result == 4:
return to_text(interpolate("Plus(%P())", [op_dereference(e_4)]))
elif pattern_matching_result == 5:
return to_text(interpolate("Star(%P())", [op_dereference(e_5)]))
elif pattern_matching_result == 6:
return "any"
elif pattern_matching_result == 7:
match_value_5 : Option[Doc] = List_tryLookup(s_7, lexer_maps)
if match_value_5 is None:
raise UnboundLexer(s_7)
else:
return "rule_" + name_of_named_term(s_7)
elif pattern_matching_result == 8:
return join(", ", to_array(map_1(mk_lexer, xs)))
elif pattern_matching_result == 9:
return to_text(interpolate("(%P() .. %P())", [l, r_1]))
elif pattern_matching_result == 10:
raise Exception("impossible: alternatives cannot be empty.")
elif pattern_matching_result == 11:
return join(" | ", to_array(map_1(mk_lexer, xs_1)))
elif pattern_matching_result == 12:
return to_text(interpolate("Opt(%P())", [op_dereference(e_6)]))
def mk_lexer_debug(def__1: lexerule, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
op_dereference_1 : Callable[[lexerule], str] = mk_lexer_debug
if def__1.tag == 9:
return op_dereference_1(def__1.fields[0])
elif def__1.tag == 5:
return to_text(interpolate("pnot(%P())", [op_dereference_1(def__1.fields[0])]))
elif def__1.tag == 0:
return "pnumber"
elif def__1.tag == 6:
return to_text(interpolate("pplus(%P())", [op_dereference_1(def__1.fields[0])]))
elif def__1.tag == 7:
return to_text(interpolate("pstar(%P())", [op_dereference_1(def__1.fields[0])]))
elif def__1.tag == 1:
return "pany"
elif def__1.tag == 11:
return def__1.fields[0]
elif def__1.tag == 2:
return to_text(interpolate("pseq([%P()])", [join(", ", of_list_1(map_1(mk_lexer_debug, def__1.fields[0])))]))
elif def__1.tag == 10:
return to_text(interpolate("pinterval(%P(), %P())", [def__1.fields[0], def__1.fields[1]]))
elif def__1.tag == 4:
if not is_empty(def__1.fields[0]):
def arrow_383(a_3: str, b_2: str, def__1: lexerule=def__1) -> str:
return to_text(interpolate("por(%P(), %P())", [a_3, b_2]))
return fold(arrow_383, op_dereference_1(head(def__1.fields[0])), map_1(mk_lexer_debug, tail(def__1.fields[0])))
else:
raise Exception("impossible: alternatives cannot be empty.")
elif def__1.tag == 8:
return to_text(interpolate("popt%P()", [op_dereference_1(def__1.fields[0])]))
else:
return to_text(interpolate("pstring(%P())", [escape_string(def__1.fields[0])]))
def cg_stmt(stmt: definition, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> Doc:
nonlocal current_pos, lexer_maps, import_var_names, import_type_names
if stmt.tag == 2:
decl_1 : dict[str, Any] = stmt.fields[0]
current_pos = decl_1["pos"]
lexer_maps = cons((decl_1["lhs"], word(mk_lexer(decl_1["define"]))), lexer_maps)
return empty_2
elif stmt.tag == 6:
current_pos = stmt.fields[0]["pos"]
return empty_2
elif stmt.tag == 3:
import_var_names = cons(variable_renamer(stmt.fields[0]["ident"]), import_var_names)
return vsep(empty())
elif stmt.tag == 5:
import_type_names = cons(type_renamer(stmt.fields[0]["ident"]), import_type_names)
return vsep(empty())
elif stmt.tag == 4:
return vsep(empty())
elif stmt.tag == 0:
raise Exception("macro not processed")
else:
decl : dict[str, Any] = stmt.fields[0]
current_pos = decl["pos"]
ntname_1 : str = cg_symbol(symbol(1, decl["lhs"]))
def mapping_3(i_7: int, e_1: Doc, stmt: definition=stmt) -> Doc:
return Doc_op_Addition_Z7CFFAC00(word(":") if (i_7 == 0) else (word("|")), e_1)
def arrow_387(stmt: definition=stmt) -> IEnumerable[Doc]:
def arrow_386(match_value_4: Tuple[position, production_1]) -> IEnumerable[Doc]:
nonlocal current_pos
current_pos = match_value_4[0]
def arrow_385(_unit: Any=None) -> Doc:
prod : production_1 = match_value_4[1]
def arrow_384(arg_1: symbol) -> Doc:
return word(cg_symbol(arg_1))
return Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(seplist(word(" "), map_1(arrow_384, prod.symbols)), word("{")), Doc_op_RightShift_2AAA0F3C(vsep(of_array_1([empty_2, Doc_op_RightShift_2AAA0F3C(cg_expr(global_scope, prod.action), 4), word("}")])), 12))
return singleton(arrow_385())
return collect(arrow_386, decl["define"])
body_4 : Doc = align(vsep(map_indexed(mapping_3, to_list(delay(arrow_387)))))
return Doc_op_Addition_Z7CFFAC00(word(ntname_1), body_4)
file_grammar : Doc = vsep(of_array_1(map_2(cg_stmt, stmts, None)))
filename_lexer : str = to_text(printf("%s_lexer"))(lang_name)
filename_parser : str = to_text(printf("%s_parser"))(lang_name)
filename_constructors : str = to_text(printf("%s_construct"))(lang_name)
filename_require : str = to_text(printf("%s_require"))(lang_name)
var_tokenizer : str = mangle(ocaml_var_ident_descr, "tokenizer")
var_lexbuf : str = mangle(ocaml_var_ident_descr, "lexbuf")
def arrow_412(analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> IEnumerable[Doc]:
doc_ctor_wrap_funcs : FSharpList[Tuple[str, str, FSharpList[Tuple[str, str]], Doc]] = empty()
def arrow_411(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_410(_unit: Any=None) -> IEnumerable[Doc]:
adt_cases : FSharpList[Tuple[str, Any]] = Sigma__GetADTCases(analyzer.Sigma)
def arrow_409(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_408(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_394(match_value_6: Tuple[str, Any]) -> IEnumerable[Doc]:
typename_0027 : str = type_renamer(match_value_6[0])
def arrow_393(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_391(match_value_7: Tuple[str, FSharpList[Tuple[str, monot]]]) -> IEnumerable[Doc]:
nonlocal doc_ctor_wrap_funcs
ctor_name : str = match_value_7[0]
def arrow_388(tupled_arg_2: Tuple[str, monot]) -> Tuple[str, str]:
return (field_renamer(tupled_arg_2[0]), cg_type(tupled_arg_2[1]))
fields_1 : FSharpList[Tuple[str, str]] = map_1(arrow_388, match_value_7[1])
ctor_name_0027 : str = constructor_renamer(ctor_name)
ret_t : Doc = word(typename_0027)
doc_ctor_wrap_funcs = cons((variable_renamer(ctor_name), ctor_name_0027, fields_1, ret_t), doc_ctor_wrap_funcs)
if is_empty(fields_1):
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(ctor_name_0027)), word("of")), word("unit")))
else:
def arrow_390(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_389(match_value_8: Tuple[str, str]) -> IEnumerable[Doc]:
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word(match_value_8[0]), word(":")), word(match_value_8[1])))
return collect(arrow_389, fields_1)
ano_record_typ : Doc = seplist(word(";"), to_list(delay(arrow_390)))
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(ctor_name_0027)), word("of")), word("{")), ano_record_typ), word("}")))
def arrow_392(_unit: Any=None) -> IEnumerable[Doc]:
return singleton(empty_2)
return append_1(collect(arrow_391, to_array_1(match_value_6[1])), delay(arrow_392))
return append_1(singleton(word(to_text(interpolate("and %P() = ", [typename_0027])))), delay(arrow_393))
def arrow_407(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_403(match_value_9: Tuple[str, Shape]) -> IEnumerable[Doc]:
typename_1 : str = match_value_9[0]
shape : Shape = match_value_9[1]
typename_0027_1 : str = type_renamer(typename_1)
varname : str = variable_renamer(typename_1)
def mapping_4(s_10: str) -> str:
return "\u0027" + s_10
ret_t_1 : Doc = word(typename_0027_1) if (is_empty(shape.parameters)) else (Doc_op_Addition_Z7CFFAC00(parens(word(join(", ", map_1(mapping_4, shape.parameters)))), word(typename_0027_1)))
def arrow_396(_unit: Any=None) -> IEnumerable[Tuple[str, str]]:
def arrow_395(match_value_10: Tuple[str, monot]) -> IEnumerable[Tuple[str, str]]:
return singleton((field_renamer(match_value_10[0]), cg_type(match_value_10[1])))
return collect(arrow_395, shape.fields)
fields_2 : FSharpList[Tuple[str, str]] = to_list(delay(arrow_396))
def arrow_397(_unit: Any=None) -> IEnumerable[Doc]:
nonlocal doc_ctor_wrap_funcs
doc_ctor_wrap_funcs = cons((varname, "MK_" + typename_0027_1, fields_2, ret_t_1), doc_ctor_wrap_funcs)
return empty_3()
def arrow_402(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_399(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_398(match_value_11: Tuple[str, str]) -> IEnumerable[Doc]:
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word(match_value_11[0]), word(":")), Doc_op_Multiply_Z7CFFAC00(word(match_value_11[1]), word(";"))))
return collect(arrow_398, fields_2)
def arrow_401(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_400(_unit: Any=None) -> IEnumerable[Doc]:
nonlocal doc_ctor_wrap_funcs
doc_ctor_wrap_funcs = cons((varname, "", fields_2, ret_t_1), doc_ctor_wrap_funcs)
return empty_3()
return append_1(singleton(word("}")), delay(arrow_400))
return append_1(singleton(Doc_op_RightShift_2AAA0F3C(vsep(to_list(delay(arrow_399))), 4)), delay(arrow_401))
return append_1(singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("and"), ret_t_1), word("=")), word("MK_" + typename_0027_1)), word("of unit"))), delay(arrow_397)) if (is_empty(fields_2)) else (append_1(singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("and"), ret_t_1), word("=")), word("{"))), delay(arrow_402)))
def arrow_406(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_405(match_value_12: Tuple[str, str, FSharpList[Tuple[str, str]], Doc]) -> IEnumerable[Doc]:
function_name : str = match_value_12[0]
fields_3 : FSharpList[Tuple[str, str]] = match_value_12[2]
ctor_name_1 : str = match_value_12[1]
if is_empty(fields_3):
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(function_name)), word("()")), word("=")), word(ctor_name_1)), word("()")))
else:
def arrow_404(arg_2: Tuple[str, str]) -> Doc:
return word(arg_2[0])
args_5 : FSharpList[Doc] = map_1(arrow_404, fields_3)
return singleton(vsep(of_array_1([Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(function_name)), parens(seplist(word(", "), args_5))), word(":")), match_value_12[3]), word("=")), Doc_op_RightShift_2AAA0F3C(vsep(singleton_1(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word(ctor_name_1), word("{")), seplist(word(";"), args_5)), word("}")))), 4)])))
return collect(arrow_405, doc_ctor_wrap_funcs)
return append_1(collect(arrow_403, Sigma__GetRecordTypes(analyzer.Sigma)), delay(arrow_406))
return append_1(collect(arrow_394, adt_cases), delay(arrow_407))
return append_1(singleton(word("type ___used_t_head_90xasda")), delay(arrow_408))
return append_1(singleton(empty_2), delay(arrow_409))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_lexer)])))), delay(arrow_410))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_require)])))), delay(arrow_411))
file_constructors : Tuple[str, Doc] = (filename_constructors + ".ml", vsep(to_list(delay(arrow_412))))
token_names : FSharpList[str] = empty()
ReferencedNamedTokens : List[str] = list(analyzer.ReferencedNamedTokens)
def arrow_414(k_1: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> int:
def arrow_413(y_5: str) -> bool:
return k_1 == y_5
return find_index(arrow_413, analyzer.TokenFragments)
class ObjectExpr416:
@property
def Compare(self) -> Any:
def arrow_415(x_15: int, y_6: int) -> int:
return compare_primitives(x_15, y_6)
return arrow_415
sort_in_place_by(arrow_414, ReferencedNamedTokens, ObjectExpr416())
lexical_rule_defs : FSharpList[Doc] = empty()
tokenizer_cases : FSharpList[Doc] = empty()
class ObjectExpr418:
@property
def Compare(self) -> Any:
def arrow_417(x_16: str, y_7: str) -> int:
return compare_primitives(x_16, y_7)
return arrow_417
arr : List[str] = sort(list(analyzer.LiteralTokens), ObjectExpr418())
for idx_1 in range(0, (len(arr) - 1) + 1, 1):
k_2 : str = arr[idx_1]
v_2 : Doc = word(mk_lexer(lexerule(3, k_2)))
token_name : str = cg_symbol(symbol(0, k_2, True))
lexical_rule_name : str = "rule_" + token_name
lexical_rule_def : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(lexical_rule_name)), word("=")), bracket(Doc_op_Addition_Z7CFFAC00(word("%sedlex.regexp?"), v_2)))
lexical_rule_defs = cons(lexical_rule_def, lexical_rule_defs)
tokenizer_case : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(lexical_rule_name)), word(to_text(interpolate("-\u003e %P() (mktoken %P())", [token_name, var_lexbuf]))))
tokenizer_cases = cons(tokenizer_case, tokenizer_cases)
token_names = cons(token_name, token_names)
with get_enumerator(lexer_maps) as enumerator:
while enumerator.System_Collections_IEnumerator_MoveNext():
for_loop_var : Tuple[str, Doc] = enumerator.System_Collections_Generic_IEnumerator_00601_get_Current()
v_3 : Doc = for_loop_var[1]
k_3 : str = for_loop_var[0]
if contains(k_3, analyzer.IgnoreSet):
lexical_rule_name_1 : str = "rule_" + name_of_named_term(k_3)
lexical_rule_def_1 : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(lexical_rule_name_1)), word("=")), bracket(Doc_op_Addition_Z7CFFAC00(word("%sedlex.regexp?"), v_3)))
lexical_rule_defs = cons(lexical_rule_def_1, lexical_rule_defs)
tokenizer_case_1 : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(lexical_rule_name_1)), word(to_text(interpolate("-\u003e %P() %P()", [var_tokenizer, var_lexbuf]))))
tokenizer_cases = cons(tokenizer_case_1, tokenizer_cases)
else:
token_name_1 : str = name_of_named_term(k_3)
lexical_rule_name_2 : str = "rule_" + token_name_1
lexical_rule_def_2 : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(lexical_rule_name_2)), word("=")), bracket(Doc_op_Addition_Z7CFFAC00(word("%sedlex.regexp?"), v_3)))
lexical_rule_defs = cons(lexical_rule_def_2, lexical_rule_defs)
class ObjectExpr421:
@property
def Equals(self) -> Any:
def arrow_419(x_17: str, y_8: str) -> bool:
return x_17 == y_8
return arrow_419
@property
def GetHashCode(self) -> Any:
def arrow_420(x_17: str) -> int:
return string_hash(x_17)
return arrow_420
if contains_1(k_3, ReferencedNamedTokens, ObjectExpr421()):
tokenizer_case_2 : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(lexical_rule_name_2)), word(to_text(interpolate("-\u003e %P() (mktoken %P())", [token_name_1, var_lexbuf]))))
tokenizer_cases = cons(tokenizer_case_2, tokenizer_cases)
token_names = cons(token_name_1, token_names)
tokenizer_cases = of_array_with_tail([word(to_text(interpolate("| _ -\u003e _unknown_token %P()", [var_lexbuf]))), Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word("eof -\u003e")), word("EOF"))], tokenizer_cases)
token_names_1 : FSharpList[str] = reverse(token_names)
tokenizer_cases_1 : FSharpList[Doc] = reverse(tokenizer_cases)
def arrow_424(analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> IEnumerable[Doc]:
def arrow_422(tkn: str) -> Doc:
return Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(tkn)), word("of")), word("tbnf_token"))
def arrow_423(_unit: Any=None) -> IEnumerable[Doc]:
return singleton(word("| EOF"))
return append_1(map(arrow_422, token_names_1), delay(arrow_423))
file_lexer : Tuple[str, Doc] = (filename_lexer + ".ml", vsep(of_array_1([word(rts_file_string), empty_2, word("type token ="), vsep(to_list(delay(arrow_424))), empty_2, vsep(lexical_rule_defs), empty_2, vsep(of_array_1([Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let rec"), word(var_tokenizer)), word(var_lexbuf)), word("=")), align(indent(4, vsep(cons(word(to_text(interpolate("match%%sedlex %P() with", [var_lexbuf]))), tokenizer_cases_1))))]))])))
match_value_13 : Option[monot] = try_find("start", analyzer.Omega)
if match_value_13 is not None:
start_t : monot = match_value_13
start_name : str = cg_symbol(symbol(1, "start"))
start_t_1 : str = cg_type(monot__prune(start_t))
def arrow_437(analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> IEnumerable[Doc]:
def arrow_436(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_435(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_434(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_433(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_432(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_425(token_name_2: str) -> Doc:
return word(to_text(interpolate("%%token\u003ctbnf_token\u003e %P()", [token_name_2])))
def arrow_431(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_430(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_429(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_428(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_427(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_426(_unit: Any=None) -> IEnumerable[Doc]:
return singleton(file_grammar)
return append_1(singleton(word(to_text(printf("start : %s EOF { $1 }"))(start_name))), delay(arrow_426))
return append_1(singleton(empty_2), delay(arrow_427))
return append_1(singleton(word("%%")), delay(arrow_428))
return append_1(singleton(word(to_text(interpolate("%%start \u003c%P()\u003e start", [start_rule_qualified_type])))) if (start_rule_qualified_type is not None) else (singleton(word(to_text(interpolate("%%start \u003c%P()\u003e start", [start_t_1]))))), delay(arrow_429))
return append_1(singleton(word("%token EOF")), delay(arrow_430))
return append_1(map(arrow_425, token_names_1), delay(arrow_431))
return append_1(singleton(word("%}")), delay(arrow_432))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_constructors)])))), delay(arrow_433))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_lexer)])))), delay(arrow_434))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_require)])))), delay(arrow_435))
return append_1(singleton(word("%{")), delay(arrow_436))
return [file_constructors, (filename_parser + ".mly", vsep(to_list(delay(arrow_437)))), file_lexer]
else:
raise UnboundNonterminal("start") | _tbnf/src/backends_ocaml_menhir.py | from __future__ import annotations
from typing import (List, Callable, Any, Tuple, TypeVar)
from ..fable_modules.fable_library.array import (map as map_2, sort_in_place_by, find_index, sort, contains as contains_1)
from ..fable_modules.fable_library.list import (empty, FSharpList, map as map_1, is_empty, head, tail, append, of_array as of_array_1, cons, to_array, fold, map_indexed, singleton as singleton_1, of_array_with_tail, reverse)
from ..fable_modules.fable_library.map import (empty as empty_1, try_find, add, to_array as to_array_1)
from ..fable_modules.fable_library.option import (default_arg, Option, some)
from ..fable_modules.fable_library.seq import (to_list, delay, map, collect, singleton, of_list as of_list_1, append as append_1, empty as empty_3)
from ..fable_modules.fable_library.set import (of_array, of_list, contains)
from ..fable_modules.fable_library.string import (to_text, interpolate, join, printf)
from ..fable_modules.fable_library.util import (compare_primitives, compare, IEnumerable, equals, string_hash, get_enumerator)
from ..FableSedlex.code_gen import (Doc, word, Doc_op_Multiply_Z7CFFAC00, Doc_op_Addition_Z7CFFAC00, parens, vsep, seplist, Doc_op_RightShift_2AAA0F3C, bracket, empty as empty_2, align, indent)
from .analysis import (Analyzer, Sigma__get_GlobalVariables, Sigma__GetADTCases, Shape, Sigma__GetRecordTypes)
from .backends_common import (CodeGenOptions, NameMangling_IdentifierDescriptor__WithNameEnv_Z7613F24B, NameMangling_nameEnv, NameMangling_IdentifierDescriptor_Create_Z48C5CCEF, NameMangling_IdentifierDescriptor, NameMangling_maskChar, NameMangling_mangle)
from .exceptions import (UnsolvedTypeVariable, UnboundVariable, UnboundLexer, UnboundNonterminal)
from .grammar import (definition, symbol, position, monot, _007CTTuple_007C__007C, monot__prune, expr, node, lexerule, production as production_1)
from .resource_keys import ocaml_rts_file
from .utils import (is_lower, is_unicode, is_digit, is_upper, escape_string, List_tryLookup, capitalized)
__A = TypeVar("__A")
__B = TypeVar("__B")
def codegen(analyzer: Analyzer, cg_options: CodeGenOptions, lang_name: str, stmts: List[definition]) -> List[Tuple[str, Doc]]:
def arrow_354(x: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return x
variable_renamer : Callable[[str], str] = default_arg(cg_options.rename_var, arrow_354)
def arrow_355(x_1: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return x_1
constructor_renamer : Callable[[str], str] = default_arg(cg_options.rename_ctor, arrow_355)
def arrow_356(x_2: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return x_2
field_renamer : Callable[[str], str] = default_arg(cg_options.rename_field, arrow_356)
def arrow_357(x_3: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return x_3
type_renamer : Callable[[str], str] = default_arg(cg_options.rename_type, arrow_357)
start_rule_qualified_type : Option[str] = cg_options.start_rule_qualified_type
rts_file_string : str = cg_options.request_resource(ocaml_rts_file)
import_var_names : FSharpList[str] = empty()
import_type_names : FSharpList[str] = empty()
class ObjectExpr359:
@property
def Compare(self) -> Any:
def arrow_358(x_4: str, y: str) -> int:
return compare_primitives(x_4, y)
return arrow_358
abandoned_names : Any = of_array(["and", "as", "assert", "asr", "begin", "class", "constraint", "do", "done", "downto", "else", "end", "exception", "external", "false", "for", "fun", "function", "functor", "if", "in", "include", "inherit", "initializer", "land", "lazy", "let", "lor", "lsl", "lsr", "lxor", "match", "method", "mod", "module", "mutable", "new", "nonrec", "object", "of", "open", "or", "private", "rec", "sig", "struct", "then", "to", "true", "try", "type", "val", "virtual", "when", "while", "with"], ObjectExpr359())
class ObjectExpr361:
@property
def Compare(self) -> Any:
def arrow_360(x_5: symbol, y_1: symbol) -> int:
return compare(x_5, y_1)
return arrow_360
symmap : Any = empty_1(ObjectExpr361())
toplevel_transformer : FSharpList[Doc] = empty()
current_pos : position = analyzer.current_pos
lexer_maps : FSharpList[Tuple[str, Doc]] = empty()
def arrow_363(analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> IEnumerable[Tuple[str, str]]:
def arrow_362(k: Any) -> Tuple[str, str]:
return (k[0], variable_renamer(k[0]))
return map(arrow_362, Sigma__get_GlobalVariables(analyzer.Sigma))
global_scope : FSharpList[Tuple[str, str]] = to_list(delay(arrow_363))
def arrow_364(tuple: Tuple[str, str], analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return tuple[1]
class ObjectExpr366:
@property
def Compare(self) -> Any:
def arrow_365(x_6: str, y_2: str) -> int:
return compare_primitives(x_6, y_2)
return arrow_365
def arrow_367(i: int, c: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> bool:
test : bool = True if (True if (is_lower(c)) else (is_unicode(c))) else (c == "_")
return test if (i == 0) else (True if (test) else (is_digit(c)))
def arrow_368(i_1: int, c_1: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return to_text(interpolate("_X%P()_", [i_1])) if (is_digit(c_1)) else (c_1.lower() if (is_upper(c_1)) else (to_text(interpolate("_%P()_", [ord(c_1)]))))
ocaml_var_ident_descr : NameMangling_IdentifierDescriptor = NameMangling_IdentifierDescriptor__WithNameEnv_Z7613F24B(NameMangling_IdentifierDescriptor_Create_Z48C5CCEF(arrow_367, arrow_368), NameMangling_nameEnv(of_list(map_1(arrow_364, global_scope), ObjectExpr366())))
class ObjectExpr370:
@property
def Compare(self) -> Any:
def arrow_369(x_7: str, y_3: str) -> int:
return compare_primitives(x_7, y_3)
return arrow_369
def arrow_371(i_2: int, c_2: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> bool:
return is_upper(c_2) if (i_2 == 0) else (True if (True if (is_upper(c_2)) else (c_2 == "_")) else (is_digit(c_2)))
def arrow_372(i_3: int, c_3: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return c_3.upper() if (is_lower(c_3)) else (NameMangling_maskChar(ord("A"), ord("Z"), ord(c_3)) + "_" if (i_3 == 0) else (("_" + NameMangling_maskChar(ord("A"), ord("Z"), ord(c_3))) + "_"))
sedlex_ident_descr : NameMangling_IdentifierDescriptor = NameMangling_IdentifierDescriptor__WithNameEnv_Z7613F24B(NameMangling_IdentifierDescriptor_Create_Z48C5CCEF(arrow_371, arrow_372), NameMangling_nameEnv(of_array(["EOF"], ObjectExpr370())))
class ObjectExpr374:
@property
def Compare(self) -> Any:
def arrow_373(x_8: str, y_4: str) -> int:
return compare_primitives(x_8, y_4)
return arrow_373
def arrow_375(i_4: int, c_4: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> bool:
return is_lower(c_4) if (i_4 == 0) else (True if (True if (is_lower(c_4)) else (c_4 == "_")) else (is_digit(c_4)))
def arrow_376(_arg1: int, c_5: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return c_5.lower() if (is_upper(c_5)) else (("_" + NameMangling_maskChar(ord("a"), ord("z"), ord(c_5))) + "_")
menhir_ident_descr : NameMangling_IdentifierDescriptor = NameMangling_IdentifierDescriptor__WithNameEnv_Z7613F24B(NameMangling_IdentifierDescriptor_Create_Z48C5CCEF(arrow_375, arrow_376), NameMangling_nameEnv(of_array(["start"], ObjectExpr374())))
def mangle(idr: NameMangling_IdentifierDescriptor, n: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return NameMangling_mangle(abandoned_names, idr, n)
def cg_symbol(x_9: symbol, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
nonlocal symmap
match_value : Option[str] = try_find(x_9, symmap)
if match_value is None:
valid_py_ident : str
if x_9.tag == 0:
define : str = x_9.fields[0]
valid_py_ident = mangle(sedlex_ident_descr, ("\"" + define) + "\"") if (x_9.fields[1]) else (mangle(sedlex_ident_descr, define))
elif x_9.tag == 1:
valid_py_ident = mangle(menhir_ident_descr, x_9.fields[0])
else:
raise Exception("macro not processed")
symmap = add(x_9, valid_py_ident, symmap)
return valid_py_ident
else:
return match_value
def name_of_named_term(n_2: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return cg_symbol(symbol(0, n_2, False))
def try_lookup(key_mut: __A, x_10_mut: FSharpList[Tuple[__A, __B]], analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> Option[__B]:
while True:
(key, x_10) = (key_mut, x_10_mut)
if not is_empty(x_10):
if equals(head(x_10)[0], key):
return some(head(x_10)[1])
elif not is_empty(x_10):
key_mut = key
x_10_mut = tail(x_10)
continue
else:
raise Exception("Match failure")
else:
return None
break
def _cg_type(t: monot, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
if t.tag == 4:
return "\u0027" + t.fields[0]
elif t.tag == 0:
raise UnsolvedTypeVariable()
elif t.tag == 3:
def mapping(tupled_arg: Tuple[str, monot], t: monot=t) -> str:
return _cg_type(tupled_arg[1])
return (("(" + join(" * ", map_1(mapping, t.fields[0]))) + ") -\u003e ") + _cg_type(t.fields[1])
elif t.tag == 2:
if _007CTTuple_007C__007C(t.fields[0]) is not None:
return ("(" + join(" * ", map_1(_cg_type, t.fields[1]))) + ")"
elif t.tag == 2:
return (("(" + join(", ", map_1(_cg_type, t.fields[1]))) + ") ") + _cg_type(t.fields[0])
else:
raise Exception("Match failure")
else:
return type_renamer(t.fields[0])
def cg_type(t_1: monot, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
return _cg_type(monot__prune(t_1))
def cg_expr(scope: FSharpList[Tuple[str, str]], curr_expr: expr, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> Doc:
match_value_1 : node = curr_expr.node
if match_value_1.tag == 6:
match_value_2 : Option[str] = try_lookup(match_value_1.fields[0], scope)
if match_value_2 is not None:
return word(match_value_2)
else:
raise UnboundVariable(match_value_1.fields[0])
elif match_value_1.tag == 11:
if match_value_1.fields[0]:
return word("true")
else:
return word("false")
elif match_value_1.tag == 3:
return Doc_op_Multiply_Z7CFFAC00(Doc_op_Multiply_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(cg_expr(scope, match_value_1.fields[0]), word(":")), word(cg_type(curr_expr.t))), word(".")), word(match_value_1.fields[1]))
elif match_value_1.tag == 8:
return word(to_text(printf("%d"))(match_value_1.fields[0]))
elif match_value_1.tag == 10:
return word(to_text(printf("%f"))(match_value_1.fields[0]))
elif match_value_1.tag == 9:
return word(escape_string(match_value_1.fields[0]))
elif match_value_1.tag == 5:
def arrow_378(scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> IEnumerable[Tuple[str, str]]:
def arrow_377(match_value_3: Tuple[str, monot]) -> IEnumerable[Tuple[str, str]]:
arg : str = match_value_3[0]
return singleton((arg, mangle(ocaml_var_ident_descr, arg)))
return collect(arrow_377, match_value_1.fields[0])
code : Doc = cg_expr(append(to_list(delay(arrow_378)), scope), match_value_1.fields[1])
def arrow_379(tupled_arg_1: Tuple[str, monot], scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> Doc:
return word((tupled_arg_1[0] + ":") + cg_type(tupled_arg_1[1]))
anns : FSharpList[Doc] = map_1(arrow_379, match_value_1.fields[0])
return parens(vsep(of_array_1([Doc_op_Addition_Z7CFFAC00(word("fun"), Doc_op_Multiply_Z7CFFAC00(parens(seplist(word(", "), anns)), word("-\u003e"))), Doc_op_RightShift_2AAA0F3C(code, 4)])))
elif match_value_1.tag == 4:
value_0027 : Doc = cg_expr(scope, match_value_1.fields[1])
m_name : str = mangle(ocaml_var_ident_descr, match_value_1.fields[0])
body : Doc = cg_expr(cons((match_value_1.fields[0], m_name), scope), match_value_1.fields[2])
name_2 : Doc = Doc_op_Multiply_Z7CFFAC00(Doc_op_Multiply_Z7CFFAC00(word(m_name), word(":")), word(cg_type(match_value_1.fields[1].t)))
return vsep(of_array_1([Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), name_2), word("=")), value_0027), word("in")), body]))
elif match_value_1.tag == 2:
def arrow_380(elt: expr, scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> Doc:
return cg_expr(scope, elt)
return bracket(seplist(word(";"), map_1(arrow_380, match_value_1.fields[0])))
elif match_value_1.tag == 7:
return word(to_text(interpolate("$%P()", [match_value_1.fields[0]])))
elif match_value_1.tag == 1:
def arrow_381(elt_1: expr, scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> Doc:
return cg_expr(scope, elt_1)
return parens(seplist(word(", "), map_1(arrow_381, match_value_1.fields[0])))
else:
f_0027 : Doc = cg_expr(scope, match_value_1.fields[0])
def arrow_382(x_12: expr, scope: FSharpList[Tuple[str, str]]=scope, curr_expr: expr=curr_expr) -> Doc:
return cg_expr(scope, x_12)
args_0027 : FSharpList[Doc] = map_1(arrow_382, match_value_1.fields[1])
return Doc_op_Multiply_Z7CFFAC00(f_0027, parens(seplist(word(", "), args_0027)))
def mk_lexer(def_: lexerule, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
op_dereference : Callable[[lexerule], str] = mk_lexer
(pattern_matching_result, s_6, e_2, e_3, e_4, e_5, s_7, xs, l, r_1, xs_1, e_6) = (None, None, None, None, None, None, None, None, None, None, None, None)
if def_.tag == 9:
if def_.fields[0].tag == 9:
pattern_matching_result = 1
e_2 = def_.fields[0].fields[0]
else:
pattern_matching_result = 1
e_2 = def_.fields[0]
elif def_.tag == 5:
pattern_matching_result = 2
e_3 = def_.fields[0]
elif def_.tag == 0:
pattern_matching_result = 3
elif def_.tag == 6:
pattern_matching_result = 4
e_4 = def_.fields[0]
elif def_.tag == 7:
pattern_matching_result = 5
e_5 = def_.fields[0]
elif def_.tag == 1:
pattern_matching_result = 6
elif def_.tag == 11:
pattern_matching_result = 7
s_7 = def_.fields[0]
elif def_.tag == 2:
pattern_matching_result = 8
xs = def_.fields[0]
elif def_.tag == 10:
pattern_matching_result = 9
l = def_.fields[0]
r_1 = def_.fields[1]
elif def_.tag == 4:
if is_empty(def_.fields[0]):
pattern_matching_result = 10
else:
pattern_matching_result = 11
xs_1 = def_.fields[0]
elif def_.tag == 8:
pattern_matching_result = 12
e_6 = def_.fields[0]
else:
pattern_matching_result = 0
s_6 = def_.fields[0]
if pattern_matching_result == 0:
return escape_string(s_6)
elif pattern_matching_result == 1:
return ("(" + op_dereference(e_2)) + ")"
elif pattern_matching_result == 2:
return to_text(interpolate("Compl(%P())", [op_dereference(e_3)]))
elif pattern_matching_result == 3:
return "(\u00270\u0027 .. \u00279\u0027)"
elif pattern_matching_result == 4:
return to_text(interpolate("Plus(%P())", [op_dereference(e_4)]))
elif pattern_matching_result == 5:
return to_text(interpolate("Star(%P())", [op_dereference(e_5)]))
elif pattern_matching_result == 6:
return "any"
elif pattern_matching_result == 7:
match_value_5 : Option[Doc] = List_tryLookup(s_7, lexer_maps)
if match_value_5 is None:
raise UnboundLexer(s_7)
else:
return "rule_" + name_of_named_term(s_7)
elif pattern_matching_result == 8:
return join(", ", to_array(map_1(mk_lexer, xs)))
elif pattern_matching_result == 9:
return to_text(interpolate("(%P() .. %P())", [l, r_1]))
elif pattern_matching_result == 10:
raise Exception("impossible: alternatives cannot be empty.")
elif pattern_matching_result == 11:
return join(" | ", to_array(map_1(mk_lexer, xs_1)))
elif pattern_matching_result == 12:
return to_text(interpolate("Opt(%P())", [op_dereference(e_6)]))
def mk_lexer_debug(def__1: lexerule, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> str:
op_dereference_1 : Callable[[lexerule], str] = mk_lexer_debug
if def__1.tag == 9:
return op_dereference_1(def__1.fields[0])
elif def__1.tag == 5:
return to_text(interpolate("pnot(%P())", [op_dereference_1(def__1.fields[0])]))
elif def__1.tag == 0:
return "pnumber"
elif def__1.tag == 6:
return to_text(interpolate("pplus(%P())", [op_dereference_1(def__1.fields[0])]))
elif def__1.tag == 7:
return to_text(interpolate("pstar(%P())", [op_dereference_1(def__1.fields[0])]))
elif def__1.tag == 1:
return "pany"
elif def__1.tag == 11:
return def__1.fields[0]
elif def__1.tag == 2:
return to_text(interpolate("pseq([%P()])", [join(", ", of_list_1(map_1(mk_lexer_debug, def__1.fields[0])))]))
elif def__1.tag == 10:
return to_text(interpolate("pinterval(%P(), %P())", [def__1.fields[0], def__1.fields[1]]))
elif def__1.tag == 4:
if not is_empty(def__1.fields[0]):
def arrow_383(a_3: str, b_2: str, def__1: lexerule=def__1) -> str:
return to_text(interpolate("por(%P(), %P())", [a_3, b_2]))
return fold(arrow_383, op_dereference_1(head(def__1.fields[0])), map_1(mk_lexer_debug, tail(def__1.fields[0])))
else:
raise Exception("impossible: alternatives cannot be empty.")
elif def__1.tag == 8:
return to_text(interpolate("popt%P()", [op_dereference_1(def__1.fields[0])]))
else:
return to_text(interpolate("pstring(%P())", [escape_string(def__1.fields[0])]))
def cg_stmt(stmt: definition, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> Doc:
nonlocal current_pos, lexer_maps, import_var_names, import_type_names
if stmt.tag == 2:
decl_1 : dict[str, Any] = stmt.fields[0]
current_pos = decl_1["pos"]
lexer_maps = cons((decl_1["lhs"], word(mk_lexer(decl_1["define"]))), lexer_maps)
return empty_2
elif stmt.tag == 6:
current_pos = stmt.fields[0]["pos"]
return empty_2
elif stmt.tag == 3:
import_var_names = cons(variable_renamer(stmt.fields[0]["ident"]), import_var_names)
return vsep(empty())
elif stmt.tag == 5:
import_type_names = cons(type_renamer(stmt.fields[0]["ident"]), import_type_names)
return vsep(empty())
elif stmt.tag == 4:
return vsep(empty())
elif stmt.tag == 0:
raise Exception("macro not processed")
else:
decl : dict[str, Any] = stmt.fields[0]
current_pos = decl["pos"]
ntname_1 : str = cg_symbol(symbol(1, decl["lhs"]))
def mapping_3(i_7: int, e_1: Doc, stmt: definition=stmt) -> Doc:
return Doc_op_Addition_Z7CFFAC00(word(":") if (i_7 == 0) else (word("|")), e_1)
def arrow_387(stmt: definition=stmt) -> IEnumerable[Doc]:
def arrow_386(match_value_4: Tuple[position, production_1]) -> IEnumerable[Doc]:
nonlocal current_pos
current_pos = match_value_4[0]
def arrow_385(_unit: Any=None) -> Doc:
prod : production_1 = match_value_4[1]
def arrow_384(arg_1: symbol) -> Doc:
return word(cg_symbol(arg_1))
return Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(seplist(word(" "), map_1(arrow_384, prod.symbols)), word("{")), Doc_op_RightShift_2AAA0F3C(vsep(of_array_1([empty_2, Doc_op_RightShift_2AAA0F3C(cg_expr(global_scope, prod.action), 4), word("}")])), 12))
return singleton(arrow_385())
return collect(arrow_386, decl["define"])
body_4 : Doc = align(vsep(map_indexed(mapping_3, to_list(delay(arrow_387)))))
return Doc_op_Addition_Z7CFFAC00(word(ntname_1), body_4)
file_grammar : Doc = vsep(of_array_1(map_2(cg_stmt, stmts, None)))
filename_lexer : str = to_text(printf("%s_lexer"))(lang_name)
filename_parser : str = to_text(printf("%s_parser"))(lang_name)
filename_constructors : str = to_text(printf("%s_construct"))(lang_name)
filename_require : str = to_text(printf("%s_require"))(lang_name)
var_tokenizer : str = mangle(ocaml_var_ident_descr, "tokenizer")
var_lexbuf : str = mangle(ocaml_var_ident_descr, "lexbuf")
def arrow_412(analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> IEnumerable[Doc]:
doc_ctor_wrap_funcs : FSharpList[Tuple[str, str, FSharpList[Tuple[str, str]], Doc]] = empty()
def arrow_411(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_410(_unit: Any=None) -> IEnumerable[Doc]:
adt_cases : FSharpList[Tuple[str, Any]] = Sigma__GetADTCases(analyzer.Sigma)
def arrow_409(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_408(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_394(match_value_6: Tuple[str, Any]) -> IEnumerable[Doc]:
typename_0027 : str = type_renamer(match_value_6[0])
def arrow_393(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_391(match_value_7: Tuple[str, FSharpList[Tuple[str, monot]]]) -> IEnumerable[Doc]:
nonlocal doc_ctor_wrap_funcs
ctor_name : str = match_value_7[0]
def arrow_388(tupled_arg_2: Tuple[str, monot]) -> Tuple[str, str]:
return (field_renamer(tupled_arg_2[0]), cg_type(tupled_arg_2[1]))
fields_1 : FSharpList[Tuple[str, str]] = map_1(arrow_388, match_value_7[1])
ctor_name_0027 : str = constructor_renamer(ctor_name)
ret_t : Doc = word(typename_0027)
doc_ctor_wrap_funcs = cons((variable_renamer(ctor_name), ctor_name_0027, fields_1, ret_t), doc_ctor_wrap_funcs)
if is_empty(fields_1):
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(ctor_name_0027)), word("of")), word("unit")))
else:
def arrow_390(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_389(match_value_8: Tuple[str, str]) -> IEnumerable[Doc]:
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word(match_value_8[0]), word(":")), word(match_value_8[1])))
return collect(arrow_389, fields_1)
ano_record_typ : Doc = seplist(word(";"), to_list(delay(arrow_390)))
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(ctor_name_0027)), word("of")), word("{")), ano_record_typ), word("}")))
def arrow_392(_unit: Any=None) -> IEnumerable[Doc]:
return singleton(empty_2)
return append_1(collect(arrow_391, to_array_1(match_value_6[1])), delay(arrow_392))
return append_1(singleton(word(to_text(interpolate("and %P() = ", [typename_0027])))), delay(arrow_393))
def arrow_407(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_403(match_value_9: Tuple[str, Shape]) -> IEnumerable[Doc]:
typename_1 : str = match_value_9[0]
shape : Shape = match_value_9[1]
typename_0027_1 : str = type_renamer(typename_1)
varname : str = variable_renamer(typename_1)
def mapping_4(s_10: str) -> str:
return "\u0027" + s_10
ret_t_1 : Doc = word(typename_0027_1) if (is_empty(shape.parameters)) else (Doc_op_Addition_Z7CFFAC00(parens(word(join(", ", map_1(mapping_4, shape.parameters)))), word(typename_0027_1)))
def arrow_396(_unit: Any=None) -> IEnumerable[Tuple[str, str]]:
def arrow_395(match_value_10: Tuple[str, monot]) -> IEnumerable[Tuple[str, str]]:
return singleton((field_renamer(match_value_10[0]), cg_type(match_value_10[1])))
return collect(arrow_395, shape.fields)
fields_2 : FSharpList[Tuple[str, str]] = to_list(delay(arrow_396))
def arrow_397(_unit: Any=None) -> IEnumerable[Doc]:
nonlocal doc_ctor_wrap_funcs
doc_ctor_wrap_funcs = cons((varname, "MK_" + typename_0027_1, fields_2, ret_t_1), doc_ctor_wrap_funcs)
return empty_3()
def arrow_402(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_399(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_398(match_value_11: Tuple[str, str]) -> IEnumerable[Doc]:
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word(match_value_11[0]), word(":")), Doc_op_Multiply_Z7CFFAC00(word(match_value_11[1]), word(";"))))
return collect(arrow_398, fields_2)
def arrow_401(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_400(_unit: Any=None) -> IEnumerable[Doc]:
nonlocal doc_ctor_wrap_funcs
doc_ctor_wrap_funcs = cons((varname, "", fields_2, ret_t_1), doc_ctor_wrap_funcs)
return empty_3()
return append_1(singleton(word("}")), delay(arrow_400))
return append_1(singleton(Doc_op_RightShift_2AAA0F3C(vsep(to_list(delay(arrow_399))), 4)), delay(arrow_401))
return append_1(singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("and"), ret_t_1), word("=")), word("MK_" + typename_0027_1)), word("of unit"))), delay(arrow_397)) if (is_empty(fields_2)) else (append_1(singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("and"), ret_t_1), word("=")), word("{"))), delay(arrow_402)))
def arrow_406(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_405(match_value_12: Tuple[str, str, FSharpList[Tuple[str, str]], Doc]) -> IEnumerable[Doc]:
function_name : str = match_value_12[0]
fields_3 : FSharpList[Tuple[str, str]] = match_value_12[2]
ctor_name_1 : str = match_value_12[1]
if is_empty(fields_3):
return singleton(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(function_name)), word("()")), word("=")), word(ctor_name_1)), word("()")))
else:
def arrow_404(arg_2: Tuple[str, str]) -> Doc:
return word(arg_2[0])
args_5 : FSharpList[Doc] = map_1(arrow_404, fields_3)
return singleton(vsep(of_array_1([Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(function_name)), parens(seplist(word(", "), args_5))), word(":")), match_value_12[3]), word("=")), Doc_op_RightShift_2AAA0F3C(vsep(singleton_1(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word(ctor_name_1), word("{")), seplist(word(";"), args_5)), word("}")))), 4)])))
return collect(arrow_405, doc_ctor_wrap_funcs)
return append_1(collect(arrow_403, Sigma__GetRecordTypes(analyzer.Sigma)), delay(arrow_406))
return append_1(collect(arrow_394, adt_cases), delay(arrow_407))
return append_1(singleton(word("type ___used_t_head_90xasda")), delay(arrow_408))
return append_1(singleton(empty_2), delay(arrow_409))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_lexer)])))), delay(arrow_410))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_require)])))), delay(arrow_411))
file_constructors : Tuple[str, Doc] = (filename_constructors + ".ml", vsep(to_list(delay(arrow_412))))
token_names : FSharpList[str] = empty()
ReferencedNamedTokens : List[str] = list(analyzer.ReferencedNamedTokens)
def arrow_414(k_1: str, analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> int:
def arrow_413(y_5: str) -> bool:
return k_1 == y_5
return find_index(arrow_413, analyzer.TokenFragments)
class ObjectExpr416:
@property
def Compare(self) -> Any:
def arrow_415(x_15: int, y_6: int) -> int:
return compare_primitives(x_15, y_6)
return arrow_415
sort_in_place_by(arrow_414, ReferencedNamedTokens, ObjectExpr416())
lexical_rule_defs : FSharpList[Doc] = empty()
tokenizer_cases : FSharpList[Doc] = empty()
class ObjectExpr418:
@property
def Compare(self) -> Any:
def arrow_417(x_16: str, y_7: str) -> int:
return compare_primitives(x_16, y_7)
return arrow_417
arr : List[str] = sort(list(analyzer.LiteralTokens), ObjectExpr418())
for idx_1 in range(0, (len(arr) - 1) + 1, 1):
k_2 : str = arr[idx_1]
v_2 : Doc = word(mk_lexer(lexerule(3, k_2)))
token_name : str = cg_symbol(symbol(0, k_2, True))
lexical_rule_name : str = "rule_" + token_name
lexical_rule_def : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(lexical_rule_name)), word("=")), bracket(Doc_op_Addition_Z7CFFAC00(word("%sedlex.regexp?"), v_2)))
lexical_rule_defs = cons(lexical_rule_def, lexical_rule_defs)
tokenizer_case : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(lexical_rule_name)), word(to_text(interpolate("-\u003e %P() (mktoken %P())", [token_name, var_lexbuf]))))
tokenizer_cases = cons(tokenizer_case, tokenizer_cases)
token_names = cons(token_name, token_names)
with get_enumerator(lexer_maps) as enumerator:
while enumerator.System_Collections_IEnumerator_MoveNext():
for_loop_var : Tuple[str, Doc] = enumerator.System_Collections_Generic_IEnumerator_00601_get_Current()
v_3 : Doc = for_loop_var[1]
k_3 : str = for_loop_var[0]
if contains(k_3, analyzer.IgnoreSet):
lexical_rule_name_1 : str = "rule_" + name_of_named_term(k_3)
lexical_rule_def_1 : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(lexical_rule_name_1)), word("=")), bracket(Doc_op_Addition_Z7CFFAC00(word("%sedlex.regexp?"), v_3)))
lexical_rule_defs = cons(lexical_rule_def_1, lexical_rule_defs)
tokenizer_case_1 : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(lexical_rule_name_1)), word(to_text(interpolate("-\u003e %P() %P()", [var_tokenizer, var_lexbuf]))))
tokenizer_cases = cons(tokenizer_case_1, tokenizer_cases)
else:
token_name_1 : str = name_of_named_term(k_3)
lexical_rule_name_2 : str = "rule_" + token_name_1
lexical_rule_def_2 : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let"), word(lexical_rule_name_2)), word("=")), bracket(Doc_op_Addition_Z7CFFAC00(word("%sedlex.regexp?"), v_3)))
lexical_rule_defs = cons(lexical_rule_def_2, lexical_rule_defs)
class ObjectExpr421:
@property
def Equals(self) -> Any:
def arrow_419(x_17: str, y_8: str) -> bool:
return x_17 == y_8
return arrow_419
@property
def GetHashCode(self) -> Any:
def arrow_420(x_17: str) -> int:
return string_hash(x_17)
return arrow_420
if contains_1(k_3, ReferencedNamedTokens, ObjectExpr421()):
tokenizer_case_2 : Doc = Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(lexical_rule_name_2)), word(to_text(interpolate("-\u003e %P() (mktoken %P())", [token_name_1, var_lexbuf]))))
tokenizer_cases = cons(tokenizer_case_2, tokenizer_cases)
token_names = cons(token_name_1, token_names)
tokenizer_cases = of_array_with_tail([word(to_text(interpolate("| _ -\u003e _unknown_token %P()", [var_lexbuf]))), Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word("eof -\u003e")), word("EOF"))], tokenizer_cases)
token_names_1 : FSharpList[str] = reverse(token_names)
tokenizer_cases_1 : FSharpList[Doc] = reverse(tokenizer_cases)
def arrow_424(analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> IEnumerable[Doc]:
def arrow_422(tkn: str) -> Doc:
return Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("|"), word(tkn)), word("of")), word("tbnf_token"))
def arrow_423(_unit: Any=None) -> IEnumerable[Doc]:
return singleton(word("| EOF"))
return append_1(map(arrow_422, token_names_1), delay(arrow_423))
file_lexer : Tuple[str, Doc] = (filename_lexer + ".ml", vsep(of_array_1([word(rts_file_string), empty_2, word("type token ="), vsep(to_list(delay(arrow_424))), empty_2, vsep(lexical_rule_defs), empty_2, vsep(of_array_1([Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(Doc_op_Addition_Z7CFFAC00(word("let rec"), word(var_tokenizer)), word(var_lexbuf)), word("=")), align(indent(4, vsep(cons(word(to_text(interpolate("match%%sedlex %P() with", [var_lexbuf]))), tokenizer_cases_1))))]))])))
match_value_13 : Option[monot] = try_find("start", analyzer.Omega)
if match_value_13 is not None:
start_t : monot = match_value_13
start_name : str = cg_symbol(symbol(1, "start"))
start_t_1 : str = cg_type(monot__prune(start_t))
def arrow_437(analyzer: Analyzer=analyzer, cg_options: CodeGenOptions=cg_options, lang_name: str=lang_name, stmts: List[definition]=stmts) -> IEnumerable[Doc]:
def arrow_436(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_435(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_434(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_433(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_432(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_425(token_name_2: str) -> Doc:
return word(to_text(interpolate("%%token\u003ctbnf_token\u003e %P()", [token_name_2])))
def arrow_431(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_430(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_429(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_428(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_427(_unit: Any=None) -> IEnumerable[Doc]:
def arrow_426(_unit: Any=None) -> IEnumerable[Doc]:
return singleton(file_grammar)
return append_1(singleton(word(to_text(printf("start : %s EOF { $1 }"))(start_name))), delay(arrow_426))
return append_1(singleton(empty_2), delay(arrow_427))
return append_1(singleton(word("%%")), delay(arrow_428))
return append_1(singleton(word(to_text(interpolate("%%start \u003c%P()\u003e start", [start_rule_qualified_type])))) if (start_rule_qualified_type is not None) else (singleton(word(to_text(interpolate("%%start \u003c%P()\u003e start", [start_t_1]))))), delay(arrow_429))
return append_1(singleton(word("%token EOF")), delay(arrow_430))
return append_1(map(arrow_425, token_names_1), delay(arrow_431))
return append_1(singleton(word("%}")), delay(arrow_432))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_constructors)])))), delay(arrow_433))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_lexer)])))), delay(arrow_434))
return append_1(singleton(word(to_text(interpolate("open %P();;", [capitalized(filename_require)])))), delay(arrow_435))
return append_1(singleton(word("%{")), delay(arrow_436))
return [file_constructors, (filename_parser + ".mly", vsep(to_list(delay(arrow_437)))), file_lexer]
else:
raise UnboundNonterminal("start") | 0.517815 | 0.253896 |
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import pickle
import copy
from mpl_toolkits.basemap import Basemap
import mysql.connector
timezone = -8
endpointsWHI = []
#fire times
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST
#database connection
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='black_carbon')
cursor = cnx.cursor()
SP2_data_query = ('SELECT UNIX_UTC_6h_midtime FROM whi_gc_and_sp2_6h_mass_concs WHERE RH_threshold = 90 ORDER BY UNIX_UTC_6h_midtime')
cursor.execute(SP2_data_query)
dates = cursor.fetchall()
cnx.close()
date_times = []
for date in dates:
date_time = datetime.utcfromtimestamp(date[0])
date_times.append(date_time)
endpoints_LRT = []
endpoints_SPac = []
endpoints_NPac = []
endpoints_Cont = []
#CLUSLIST_file ='C:/HYSPLIT_argh/WHI_1h_10-day_working/even_hours/CLUSLIST_4'
CLUSLIST_file = 'C:/Users/<NAME>/Documents/Data/WHI long term record/HYSPLIT/clustering/CLUSLIST_10'
with open(CLUSLIST_file,'r') as f:
for line in f:
newline = line.split()
date = datetime(2000+int(newline[2]),int(newline[3]),int(newline[4]),int(newline[5]))
if (fire_time1[0] <= date < fire_time1[1]) or (fire_time2[0] <= date < fire_time2[1]):
continue
for date_time in date_times:
if date == date_time:
cluster = int(newline[0])
file = newline[7]
tdump_file = open(file, 'r')
endpoints = []
data_start = False
for line in tdump_file:
newline = line.split()
if data_start == True:
lat = float(newline[9])
lon = float(newline[10])
endpoint = [lat, lon]
endpoints.append(endpoint)
if newline[1] == 'PRESSURE':
data_start = True
tdump_file.close()
if cluster in [4]: #N Can (Cont)
endpoints_Cont.append(endpoints)
if cluster in [6,8,9]: #S Pac
endpoints_SPac.append(endpoints)
if cluster in [2,7]: # W Pac/Asia (LRT)
endpoints_LRT.append(endpoints)
if cluster in [1,3,5,10]: #N Pac
endpoints_NPac.append(endpoints)
print len(endpoints_NPac),len(endpoints_SPac),len(endpoints_Cont),len(endpoints_LRT),
#plottting
###set up the basemap instance
lat_pt = 57.06
lon_pt = -157.96
plt_lat_min = -10
plt_lat_max = 90#44.2
plt_lon_min = -220#-125.25
plt_lon_max = -50
m = Basemap(width=9000000,height=7000000,
rsphere=(6378137.00,6356752.3142),
resolution='l',area_thresh=1000.,projection='lcc',
lat_1=45.,lat_2=55,lat_0=lat_pt,lon_0=lon_pt)
fig = plt.figure(figsize=(10,8))
ax1 = fig.add_subplot(221)
ax1.set_xlabel('Northern Pacific')
ax1.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax1.text(0.6, 0.05,'88 trajectories', transform=ax1.transAxes)
for row in endpoints_NPac:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='b')
ax2 = fig.add_subplot(222)
ax2.set_xlabel('Southern Pacific')
ax2.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax2.text(0.6, 0.05,'34 trajectories', transform=ax2.transAxes)
for row in endpoints_SPac:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='g')
ax4 = fig.add_subplot(223)
ax4.set_xlabel('Western Pacific/Asia')
ax4.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax4.text(0.6, 0.05,'18 trajectories', transform=ax4.transAxes)
for row in endpoints_LRT:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='orange')
ax5 = fig.add_subplot(224)
ax5.set_xlabel('Northern Canada')
ax5.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax5.text(0.6, 0.05,'14 trajectories', transform=ax5.transAxes)
for row in endpoints_Cont:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='r')
plt.subplots_adjust(hspace=0.15)
plt.subplots_adjust(wspace=0.1)
#labels = ['Western Pacific/Asia (15%)','Southern Pacific (19%)','Georgia Basin/Puget Sound (4%)','Northern Pacific (48%)','Northern Canada (5%)']
os.chdir('C:/Users/<NAME>/Documents/Data/WHI long term record/HYSPLIT/')
plt.savefig('WHI_FT_all_6h_HYSPLIT_BTs-4clusters.png', bbox_inches='tight')
#plt.savefig('WHI_FT_all_2h_HYSPLIT_BTs_sep_maps_by_cluster.png', bbox_inches='tight')
plt.show() | WHI_long_term_v2_display_all_HYSPLIT_BTs_colored_by_cluster.py | import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import os
import sys
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import pickle
import copy
from mpl_toolkits.basemap import Basemap
import mysql.connector
timezone = -8
endpointsWHI = []
#fire times
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST
#database connection
cnx = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', database='black_carbon')
cursor = cnx.cursor()
SP2_data_query = ('SELECT UNIX_UTC_6h_midtime FROM whi_gc_and_sp2_6h_mass_concs WHERE RH_threshold = 90 ORDER BY UNIX_UTC_6h_midtime')
cursor.execute(SP2_data_query)
dates = cursor.fetchall()
cnx.close()
date_times = []
for date in dates:
date_time = datetime.utcfromtimestamp(date[0])
date_times.append(date_time)
endpoints_LRT = []
endpoints_SPac = []
endpoints_NPac = []
endpoints_Cont = []
#CLUSLIST_file ='C:/HYSPLIT_argh/WHI_1h_10-day_working/even_hours/CLUSLIST_4'
CLUSLIST_file = 'C:/Users/<NAME>/Documents/Data/WHI long term record/HYSPLIT/clustering/CLUSLIST_10'
with open(CLUSLIST_file,'r') as f:
for line in f:
newline = line.split()
date = datetime(2000+int(newline[2]),int(newline[3]),int(newline[4]),int(newline[5]))
if (fire_time1[0] <= date < fire_time1[1]) or (fire_time2[0] <= date < fire_time2[1]):
continue
for date_time in date_times:
if date == date_time:
cluster = int(newline[0])
file = newline[7]
tdump_file = open(file, 'r')
endpoints = []
data_start = False
for line in tdump_file:
newline = line.split()
if data_start == True:
lat = float(newline[9])
lon = float(newline[10])
endpoint = [lat, lon]
endpoints.append(endpoint)
if newline[1] == 'PRESSURE':
data_start = True
tdump_file.close()
if cluster in [4]: #N Can (Cont)
endpoints_Cont.append(endpoints)
if cluster in [6,8,9]: #S Pac
endpoints_SPac.append(endpoints)
if cluster in [2,7]: # W Pac/Asia (LRT)
endpoints_LRT.append(endpoints)
if cluster in [1,3,5,10]: #N Pac
endpoints_NPac.append(endpoints)
print len(endpoints_NPac),len(endpoints_SPac),len(endpoints_Cont),len(endpoints_LRT),
#plottting
###set up the basemap instance
lat_pt = 57.06
lon_pt = -157.96
plt_lat_min = -10
plt_lat_max = 90#44.2
plt_lon_min = -220#-125.25
plt_lon_max = -50
m = Basemap(width=9000000,height=7000000,
rsphere=(6378137.00,6356752.3142),
resolution='l',area_thresh=1000.,projection='lcc',
lat_1=45.,lat_2=55,lat_0=lat_pt,lon_0=lon_pt)
fig = plt.figure(figsize=(10,8))
ax1 = fig.add_subplot(221)
ax1.set_xlabel('Northern Pacific')
ax1.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax1.text(0.6, 0.05,'88 trajectories', transform=ax1.transAxes)
for row in endpoints_NPac:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='b')
ax2 = fig.add_subplot(222)
ax2.set_xlabel('Southern Pacific')
ax2.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax2.text(0.6, 0.05,'34 trajectories', transform=ax2.transAxes)
for row in endpoints_SPac:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='g')
ax4 = fig.add_subplot(223)
ax4.set_xlabel('Western Pacific/Asia')
ax4.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax4.text(0.6, 0.05,'18 trajectories', transform=ax4.transAxes)
for row in endpoints_LRT:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='orange')
ax5 = fig.add_subplot(224)
ax5.set_xlabel('Northern Canada')
ax5.xaxis.set_label_position('top')
m.drawmapboundary(fill_color='white')
m.drawcoastlines()
m.fillcontinents(color='#FFFFBF',lake_color='#ABD9E9',zorder=0)
m.drawcountries()
ax5.text(0.6, 0.05,'14 trajectories', transform=ax5.transAxes)
for row in endpoints_Cont:
np_endpoints = np.array(row)
lats = np_endpoints[:,0]
lons = np_endpoints[:,1]
x,y = m(lons,lats)
bt = m.plot(x,y,color='r')
plt.subplots_adjust(hspace=0.15)
plt.subplots_adjust(wspace=0.1)
#labels = ['Western Pacific/Asia (15%)','Southern Pacific (19%)','Georgia Basin/Puget Sound (4%)','Northern Pacific (48%)','Northern Canada (5%)']
os.chdir('C:/Users/<NAME>/Documents/Data/WHI long term record/HYSPLIT/')
plt.savefig('WHI_FT_all_6h_HYSPLIT_BTs-4clusters.png', bbox_inches='tight')
#plt.savefig('WHI_FT_all_2h_HYSPLIT_BTs_sep_maps_by_cluster.png', bbox_inches='tight')
plt.show() | 0.162247 | 0.248386 |
import asyncio
from . import common
class StreamSession(common.SessionLayer):
"""
Stream session layer. Uses asyncio's asynchronous networking.
"""
reader: asyncio.StreamReader = None
writer: asyncio.StreamWriter = None
async def send(self, datagram: str):
await self.writer.drain()
self.writer.write(bytes(datagram + "\n", "utf-8"))
async def run(self):
datagram = b""
while True:
if self.reader.at_eof():
return
try:
datagram = datagram + await self.reader.readuntil()
asyncio.create_task(self.application.receive(datagram))
datagram = b""
except asyncio.IncompleteReadError as e:
datagram = datagram + e.partial
def __init__(self, reader, writer, *args, **kwargs):
self.reader = reader
self.writer = writer
super().__init__(*args, **kwargs)
async def connect_inet(host, port, *args, tls=False, tls_host=None, **kwargs):
"""
Connects as a client to the specified Internet server using TCP and (if tls set to True) TLS. If specified, tls_server_name is used for server name checks. All other arguments are passed through to the session and application layers.
"""
(reader, writer) = await asyncio.open_connection(host, port, ssl=tls, server_hostname=tls_host)
session = StreamSession(reader, writer, *args, **kwargs)
asyncio.create_task(session.run())
return session
async def server_inet(port, *args, **kwargs):
"""
Runs an Internet server using TCP. A separate session and application layer are created for each client. All arguments are passed through to the session and application layers each time they are created.
"""
async def launch(reader, writer):
await StreamSession(reader, writer, *args, **kwargs).run()
asyncio.create_task(asyncio.start_server(launch, port=port)) # TODO: Support TLS.
async def connect_unix(path, *args, **kwargs):
"""
Connects as a client to the specified Unix-domain sockets path. All other arguments are passed through to the session and application layers.
"""
(reader, writer) = await asyncio.open_unix_connection(path)
session = StreamSession(reader, writer, *args, **kwargs)
asyncio.create_task(session.run())
return session
async def server_unix(path, *args, **kwargs):
"""
Runs a Unix-domain sockets server on the specified path. A separate session and application layer are created for each client. All arguments are passed through to the session and application layers each time they are created.
"""
async def launch(reader, writer):
await StreamSession(reader, writer, *args, **kwargs).run()
asyncio.start_server(launch, path) | chprops/stream.py | import asyncio
from . import common
class StreamSession(common.SessionLayer):
"""
Stream session layer. Uses asyncio's asynchronous networking.
"""
reader: asyncio.StreamReader = None
writer: asyncio.StreamWriter = None
async def send(self, datagram: str):
await self.writer.drain()
self.writer.write(bytes(datagram + "\n", "utf-8"))
async def run(self):
datagram = b""
while True:
if self.reader.at_eof():
return
try:
datagram = datagram + await self.reader.readuntil()
asyncio.create_task(self.application.receive(datagram))
datagram = b""
except asyncio.IncompleteReadError as e:
datagram = datagram + e.partial
def __init__(self, reader, writer, *args, **kwargs):
self.reader = reader
self.writer = writer
super().__init__(*args, **kwargs)
async def connect_inet(host, port, *args, tls=False, tls_host=None, **kwargs):
"""
Connects as a client to the specified Internet server using TCP and (if tls set to True) TLS. If specified, tls_server_name is used for server name checks. All other arguments are passed through to the session and application layers.
"""
(reader, writer) = await asyncio.open_connection(host, port, ssl=tls, server_hostname=tls_host)
session = StreamSession(reader, writer, *args, **kwargs)
asyncio.create_task(session.run())
return session
async def server_inet(port, *args, **kwargs):
"""
Runs an Internet server using TCP. A separate session and application layer are created for each client. All arguments are passed through to the session and application layers each time they are created.
"""
async def launch(reader, writer):
await StreamSession(reader, writer, *args, **kwargs).run()
asyncio.create_task(asyncio.start_server(launch, port=port)) # TODO: Support TLS.
async def connect_unix(path, *args, **kwargs):
"""
Connects as a client to the specified Unix-domain sockets path. All other arguments are passed through to the session and application layers.
"""
(reader, writer) = await asyncio.open_unix_connection(path)
session = StreamSession(reader, writer, *args, **kwargs)
asyncio.create_task(session.run())
return session
async def server_unix(path, *args, **kwargs):
"""
Runs a Unix-domain sockets server on the specified path. A separate session and application layer are created for each client. All arguments are passed through to the session and application layers each time they are created.
"""
async def launch(reader, writer):
await StreamSession(reader, writer, *args, **kwargs).run()
asyncio.start_server(launch, path) | 0.469034 | 0.20343 |
import json
import os
import string
from urllib.request import urlopen
from dotenv import load_dotenv
def generate_keyword_mapping(queries: list) -> dict:
"""
Creates a mapping of keywords to queries.
:param queries: a list of queries with responses
:return: a dictionary of keywords to query indices
"""
keyword_to_queries = dict()
for i, question in enumerate(queries):
if question.get('query'):
keywords = generate_keywords(question.get("query"))
for keyword in keywords:
keyword_to_queries.setdefault(keyword, {})
keyword_to_queries[keyword].setdefault(i, 0)
keyword_to_queries[keyword][i] += 10
keywords = generate_keywords(question.get("response"))
for keyword in keywords:
keyword_to_queries.setdefault(keyword, {})
keyword_to_queries[keyword].setdefault(i, 0)
keyword_to_queries[keyword][i] += 1
return keyword_to_queries
def generate_keywords(query: string) -> list:
"""
Create a list of keywords from a query.
:param query: a search query
:return: the list of keywords from that query
"""
stop_words = ["", "is", "a", "the", "can",
"i", "to", "in", "by", "from", "be", "of",
"what", "where", "when", "why", "how", "which"]
keywords = query \
.translate(str.maketrans('', '', string.punctuation)) \
.lower() \
.split(" ")
keywords = [word for word in keywords if word not in stop_words]
return keywords
def search(keyword_to_queries: dict, keywords: list) -> list:
"""
Looks up the list of queries that satisfy a keyword.
:param keyword_to_queries: a mapping of keywords to query indices
:param keywords: a list of keywords to lookup
:return: a list of query indices
"""
query_count = dict()
for keyword in keywords:
query_indices = keyword_to_queries.get(keyword, {})
for i, weight in query_indices.items():
query_count.setdefault(i, 0)
query_count[i] += weight
best_matches = list(
dict(sorted(query_count.items(), key=lambda item: item[1], reverse=True)).keys())
return best_matches
def generate_similar_queries(queries: list, keyword_to_queries: dict) -> None:
"""
Generates a list of similar queries.
:param queries: a list of queries
:param keyword_to_queries: a mapping of keywords to query indices
"""
for i, query in enumerate(queries):
if i > 0:
keywords = generate_keywords(query["query"])
top_ids = search(keyword_to_queries, keywords)
top_ids.remove(i)
query["similar_queries"] = top_ids
def create_md_link(url: string, text: string) -> string:
"""
Creates a markdown link.
:param url: the url to link to
:param text: the text to display
:return: the markdown link
"""
if url:
return f"[{text}]({url})"
return text
def load_knowledge() -> tuple[int, list]:
"""
Loads the bot's knowledge database. Prioritizes the
KNOWLEDGE_PATH environment variable. KNOWLEDGE_PATH
can be set to a local file or a remote URL. Otherwise,
uses the local queries file.
:return: a tuple of the type of knowledge database and the
knowledge database (0 for remote, 1 for local, 2 for default)
"""
if path := os.environ.get("KNOWLEDGE_PATH"):
try:
data = urlopen(path).read().decode("utf-8")
return 0, json.loads(data)
except:
return 1, json.load(open(path))
else:
return 2, json.load(open("queries.json"))
def refresh_knowledge() -> tuple[list, dict]:
"""
Generates useful information from the knowledge database.
Useful when initializing the bot or when the knowledge
database has been updated.
:return: a tuple of the knowledge database and a mapping of
keywords to query indices
"""
load_dotenv()
_, queries = load_knowledge()
keyword_mapping = generate_keyword_mapping(queries)
generate_similar_queries(queries, keyword_mapping)
return queries, keyword_mapping | pymon_utils.py | import json
import os
import string
from urllib.request import urlopen
from dotenv import load_dotenv
def generate_keyword_mapping(queries: list) -> dict:
"""
Creates a mapping of keywords to queries.
:param queries: a list of queries with responses
:return: a dictionary of keywords to query indices
"""
keyword_to_queries = dict()
for i, question in enumerate(queries):
if question.get('query'):
keywords = generate_keywords(question.get("query"))
for keyword in keywords:
keyword_to_queries.setdefault(keyword, {})
keyword_to_queries[keyword].setdefault(i, 0)
keyword_to_queries[keyword][i] += 10
keywords = generate_keywords(question.get("response"))
for keyword in keywords:
keyword_to_queries.setdefault(keyword, {})
keyword_to_queries[keyword].setdefault(i, 0)
keyword_to_queries[keyword][i] += 1
return keyword_to_queries
def generate_keywords(query: string) -> list:
"""
Create a list of keywords from a query.
:param query: a search query
:return: the list of keywords from that query
"""
stop_words = ["", "is", "a", "the", "can",
"i", "to", "in", "by", "from", "be", "of",
"what", "where", "when", "why", "how", "which"]
keywords = query \
.translate(str.maketrans('', '', string.punctuation)) \
.lower() \
.split(" ")
keywords = [word for word in keywords if word not in stop_words]
return keywords
def search(keyword_to_queries: dict, keywords: list) -> list:
"""
Looks up the list of queries that satisfy a keyword.
:param keyword_to_queries: a mapping of keywords to query indices
:param keywords: a list of keywords to lookup
:return: a list of query indices
"""
query_count = dict()
for keyword in keywords:
query_indices = keyword_to_queries.get(keyword, {})
for i, weight in query_indices.items():
query_count.setdefault(i, 0)
query_count[i] += weight
best_matches = list(
dict(sorted(query_count.items(), key=lambda item: item[1], reverse=True)).keys())
return best_matches
def generate_similar_queries(queries: list, keyword_to_queries: dict) -> None:
"""
Generates a list of similar queries.
:param queries: a list of queries
:param keyword_to_queries: a mapping of keywords to query indices
"""
for i, query in enumerate(queries):
if i > 0:
keywords = generate_keywords(query["query"])
top_ids = search(keyword_to_queries, keywords)
top_ids.remove(i)
query["similar_queries"] = top_ids
def create_md_link(url: string, text: string) -> string:
"""
Creates a markdown link.
:param url: the url to link to
:param text: the text to display
:return: the markdown link
"""
if url:
return f"[{text}]({url})"
return text
def load_knowledge() -> tuple[int, list]:
"""
Loads the bot's knowledge database. Prioritizes the
KNOWLEDGE_PATH environment variable. KNOWLEDGE_PATH
can be set to a local file or a remote URL. Otherwise,
uses the local queries file.
:return: a tuple of the type of knowledge database and the
knowledge database (0 for remote, 1 for local, 2 for default)
"""
if path := os.environ.get("KNOWLEDGE_PATH"):
try:
data = urlopen(path).read().decode("utf-8")
return 0, json.loads(data)
except:
return 1, json.load(open(path))
else:
return 2, json.load(open("queries.json"))
def refresh_knowledge() -> tuple[list, dict]:
"""
Generates useful information from the knowledge database.
Useful when initializing the bot or when the knowledge
database has been updated.
:return: a tuple of the knowledge database and a mapping of
keywords to query indices
"""
load_dotenv()
_, queries = load_knowledge()
keyword_mapping = generate_keyword_mapping(queries)
generate_similar_queries(queries, keyword_mapping)
return queries, keyword_mapping | 0.625324 | 0.416678 |
import json
import os
import sys
import time
from dateutil.parser import parse
from prometheus_client import start_http_server, Summary
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from urllib.request import Request, urlopen
COLLECTION_TIME = Summary("gitlab_jobs_collector_collect_seconds",
"Time spent to collect metrics from GitLab")
class GitLabJobsCollector:
"""gitlab jobs exporter"""
scopes = ["failed", "success"]
def __init__(self, url, project, token):
"""initalize target and project for collector"""
self._url = url.rstrip("/")
self._project = project
self._token = token
self._prometheus_metrics = {}
def collect(self):
"""collect interface used by REGISTRY"""
start = time.time()
self._setup_prometheus_metrics()
for scope in self.scopes:
latest = self._request_data(scope)
self._add_to_prometheus_metrics(scope, latest)
for scope in self.scopes:
for metric in self._prometheus_metrics[scope].values():
yield metric
duration = time.time() - start
COLLECTION_TIME.observe(duration)
def _setup_prometheus_metrics(self):
"""setup metrics we want to export"""
for scope in self.scopes:
self._prometheus_metrics[scope] = {
"id":
GaugeMetricFamily("gitlab_job_latest_id",
"latest GitLab job id",
labels=["project", "scope"]),
"duration":
GaugeMetricFamily("gitlab_job_latest_duration_seconds",
"latest GitLab job duration in seconds",
labels=["project", "scope"]),
"created_timestamp":
GaugeMetricFamily("gitlab_job_latest_created_timestamp_seconds",
"latest GitLab job created timestamp in unixtime",
labels=["project", "scope"]),
"finished_timestamp":
GaugeMetricFamily("gitlab_job_latest_finished_timestamp_seconds",
"latest GitLab job finished timestamp in unixtime",
labels=["project", "scope"]),
"started_timestamp":
GaugeMetricFamily("gitlab_job_latest_started_timestamp_seconds",
"latest GitLab job started timestamp in unixtime",
labels=["project", "scope"]),
}
def _request_data(self, scope):
"""request jobs from gitlab for a scope"""
request = Request(
"{0}/api/v4/projects/{1}/jobs?scope[]={2}".format(
self._url, self._project, scope))
request.add_header("PRIVATE-TOKEN", self._token)
# latest job is always the first item
return json.loads(urlopen(request).read().decode("utf-8"))[0]
def _add_to_prometheus_metrics(self, scope, data):
"""add compute data and scope for prometheus_metrics"""
try: created = parse(data.get("created_at")).timestamp()
except TypeError: created = 0
try: finished = parse(data.get("finished_at")).timestamp()
except TypeError: finished = 0
try: started = parse(data.get("started_at")).timestamp()
except TypeError: started = 0
self._prometheus_metrics[scope]["id"].add_metric([self._project, scope], data.get("id", 0))
self._prometheus_metrics[scope]["duration"].add_metric([self._project, scope], data.get("duration", 0))
self._prometheus_metrics[scope]["created_timestamp"].add_metric([self._project, scope], created)
self._prometheus_metrics[scope]["finished_timestamp"].add_metric([self._project, scope], finished)
self._prometheus_metrics[scope]["started_timestamp"].add_metric([self._project, scope], started) | gitlab_jobs_exporter/__init__.py | import json
import os
import sys
import time
from dateutil.parser import parse
from prometheus_client import start_http_server, Summary
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from urllib.request import Request, urlopen
COLLECTION_TIME = Summary("gitlab_jobs_collector_collect_seconds",
"Time spent to collect metrics from GitLab")
class GitLabJobsCollector:
"""gitlab jobs exporter"""
scopes = ["failed", "success"]
def __init__(self, url, project, token):
"""initalize target and project for collector"""
self._url = url.rstrip("/")
self._project = project
self._token = token
self._prometheus_metrics = {}
def collect(self):
"""collect interface used by REGISTRY"""
start = time.time()
self._setup_prometheus_metrics()
for scope in self.scopes:
latest = self._request_data(scope)
self._add_to_prometheus_metrics(scope, latest)
for scope in self.scopes:
for metric in self._prometheus_metrics[scope].values():
yield metric
duration = time.time() - start
COLLECTION_TIME.observe(duration)
def _setup_prometheus_metrics(self):
"""setup metrics we want to export"""
for scope in self.scopes:
self._prometheus_metrics[scope] = {
"id":
GaugeMetricFamily("gitlab_job_latest_id",
"latest GitLab job id",
labels=["project", "scope"]),
"duration":
GaugeMetricFamily("gitlab_job_latest_duration_seconds",
"latest GitLab job duration in seconds",
labels=["project", "scope"]),
"created_timestamp":
GaugeMetricFamily("gitlab_job_latest_created_timestamp_seconds",
"latest GitLab job created timestamp in unixtime",
labels=["project", "scope"]),
"finished_timestamp":
GaugeMetricFamily("gitlab_job_latest_finished_timestamp_seconds",
"latest GitLab job finished timestamp in unixtime",
labels=["project", "scope"]),
"started_timestamp":
GaugeMetricFamily("gitlab_job_latest_started_timestamp_seconds",
"latest GitLab job started timestamp in unixtime",
labels=["project", "scope"]),
}
def _request_data(self, scope):
"""request jobs from gitlab for a scope"""
request = Request(
"{0}/api/v4/projects/{1}/jobs?scope[]={2}".format(
self._url, self._project, scope))
request.add_header("PRIVATE-TOKEN", self._token)
# latest job is always the first item
return json.loads(urlopen(request).read().decode("utf-8"))[0]
def _add_to_prometheus_metrics(self, scope, data):
"""add compute data and scope for prometheus_metrics"""
try: created = parse(data.get("created_at")).timestamp()
except TypeError: created = 0
try: finished = parse(data.get("finished_at")).timestamp()
except TypeError: finished = 0
try: started = parse(data.get("started_at")).timestamp()
except TypeError: started = 0
self._prometheus_metrics[scope]["id"].add_metric([self._project, scope], data.get("id", 0))
self._prometheus_metrics[scope]["duration"].add_metric([self._project, scope], data.get("duration", 0))
self._prometheus_metrics[scope]["created_timestamp"].add_metric([self._project, scope], created)
self._prometheus_metrics[scope]["finished_timestamp"].add_metric([self._project, scope], finished)
self._prometheus_metrics[scope]["started_timestamp"].add_metric([self._project, scope], started) | 0.337531 | 0.148016 |
import logging
import getpass
from toil.lib.misc import get_user_name
from toil.test import ToilTest
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class UserNameAvailableTest(ToilTest):
"""
Make sure we can get user names when they are available.
"""
def test_get_user_name(self):
# We assume we have the user in /etc/passwd when running the tests.
real_user_name = getpass.getuser()
apparent_user_name = get_user_name()
self.assertEqual(apparent_user_name, real_user_name)
class UserNameUnvailableTest(ToilTest):
"""
Make sure we can get something for a user name when user names are not
available.
"""
def setUp(self):
super().setUp()
# Monkey patch getpass.getuser to fail
self.original_getuser = getpass.getuser
def fake_getuser():
raise KeyError('Fake key error')
getpass.getuser = fake_getuser
def tearDown(self):
# Fix the module we hacked up
getpass.getuser = self.original_getuser
super().tearDown()
def test_get_user_name(self):
apparent_user_name = get_user_name()
# Make sure we got something
self.assertTrue(isinstance(apparent_user_name, str))
self.assertNotEqual(apparent_user_name, '')
class UserNameVeryBrokenTest(ToilTest):
"""
Make sure we can get something for a user name when user name fetching is
broken in ways we did not expect.
"""
def setUp(self):
super().setUp()
# Monkey patch getpass.getuser to fail
self.original_getuser = getpass.getuser
def fake_getuser():
raise RuntimeError('Fake error that we did not anticipate')
getpass.getuser = fake_getuser
def tearDown(self):
# Fix the module we hacked up
getpass.getuser = self.original_getuser
super().tearDown()
def test_get_user_name(self):
apparent_user_name = get_user_name()
# Make sure we got something
self.assertTrue(isinstance(apparent_user_name, str))
self.assertNotEqual(apparent_user_name, '') | src/toil/test/lib/test_misc.py | import logging
import getpass
from toil.lib.misc import get_user_name
from toil.test import ToilTest
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class UserNameAvailableTest(ToilTest):
"""
Make sure we can get user names when they are available.
"""
def test_get_user_name(self):
# We assume we have the user in /etc/passwd when running the tests.
real_user_name = getpass.getuser()
apparent_user_name = get_user_name()
self.assertEqual(apparent_user_name, real_user_name)
class UserNameUnvailableTest(ToilTest):
"""
Make sure we can get something for a user name when user names are not
available.
"""
def setUp(self):
super().setUp()
# Monkey patch getpass.getuser to fail
self.original_getuser = getpass.getuser
def fake_getuser():
raise KeyError('Fake key error')
getpass.getuser = fake_getuser
def tearDown(self):
# Fix the module we hacked up
getpass.getuser = self.original_getuser
super().tearDown()
def test_get_user_name(self):
apparent_user_name = get_user_name()
# Make sure we got something
self.assertTrue(isinstance(apparent_user_name, str))
self.assertNotEqual(apparent_user_name, '')
class UserNameVeryBrokenTest(ToilTest):
"""
Make sure we can get something for a user name when user name fetching is
broken in ways we did not expect.
"""
def setUp(self):
super().setUp()
# Monkey patch getpass.getuser to fail
self.original_getuser = getpass.getuser
def fake_getuser():
raise RuntimeError('Fake error that we did not anticipate')
getpass.getuser = fake_getuser
def tearDown(self):
# Fix the module we hacked up
getpass.getuser = self.original_getuser
super().tearDown()
def test_get_user_name(self):
apparent_user_name = get_user_name()
# Make sure we got something
self.assertTrue(isinstance(apparent_user_name, str))
self.assertNotEqual(apparent_user_name, '') | 0.459076 | 0.234407 |
import os
import platform
import re
import subprocess
import time
import traceback
"""
Create conda environment with desired python and packages
"""
def create_conda_env(conda_activate, env_name, python, packages=[], channels=''):
packages_list = ' '.join(packages)
format_print(f'Setup conda {env_name} environment')
run_command(f'{conda_activate}conda remove -q -y --name {env_name} --all')
run_command(f'{conda_activate}conda create -q -y -n {env_name} python={python} {packages_list} {channels}')
"""
Create list of packages required for build and test from conda recipe
"""
def get_sdc_env(conda_activate, sdc_src, sdc_recipe, python, numpy, channels):
def create_env_list(packages, exclude=''):
env_list = []
env_set = set()
for item in packages:
package = re.search(r"[\w-]+" , item).group()
version = ''
if re.search(r"\d+\.[\d\*]*\.?[\d\*]*", item) and '<=' not in item and '>=' not in item:
version = '={}'.format(re.search(r"\d+\.[\d\*]*\.?[\d\*]*", item).group())
if package not in env_set and package not in exclude:
env_set.add(package)
env_list.append(f'{package}{version}')
return env_list
from ruamel_yaml import YAML
yaml=YAML()
sdc_recipe_render = os.path.join(sdc_src, 'sdc_recipe_render.yaml')
# Create environment with conda-build
sdc_render_env = 'sdc_render'
sdc_render_env_activate = get_activate_env_cmd(conda_activate, sdc_render_env)
format_print('Render sdc build and test environment using conda-build')
create_conda_env(conda_activate, sdc_render_env, python, ['conda-build'])
run_command('{} && {}'.format(sdc_render_env_activate,
' '.join([f'conda render --python={python}',
f'--numpy={numpy}',
f'{channels} -f {sdc_recipe_render} {sdc_recipe}'])))
with open(sdc_recipe_render, 'r') as recipe:
data = yaml.load(recipe)
build = data['requirements']['build']
host = data['requirements']['host']
run = data['requirements']['run']
test = data['test']['requires']
return {'build': create_env_list(build + host + run, 'vs2017_win-64'),
'test': create_env_list(run + test)}
"""
Return list of conda and wheel packages in build_output folder
"""
def get_sdc_build_packages(build_output):
if platform.system() == 'Windows':
os_dir = 'win-64'
elif platform.system() == 'Linux':
os_dir = 'linux-64'
elif platform.system() == 'Darwin':
os_dir = 'osx-64'
sdc_packages = []
sdc_build_dir = os.path.join(build_output, os_dir)
for item in os.listdir(sdc_build_dir):
item_path = os.path.join(sdc_build_dir, item)
if os.path.isfile(item_path) and re.search(r'^sdc.*\.tar\.bz2$|^sdc.*\.whl$', item):
sdc_packages.append(item_path)
return sdc_packages
"""
Return platform specific activation cmd
"""
def get_activate_env_cmd(conda_activate, env_name):
if platform.system() == 'Windows':
return f'{conda_activate}activate {env_name}'
else:
return f'{conda_activate}source activate {env_name}'
"""
Return platform specific conda activation cmd
"""
def get_conda_activate_cmd(conda_prefix):
if 'CONDA_PREFIX' in os.environ:
return ''
else:
if platform.system() == 'Windows':
return '{} && '.format(os.path.join(conda_prefix, 'Scripts', 'activate.bat'))
else:
return 'source {} && '.format(os.path.join(conda_prefix, 'bin', 'activate'))
"""
Print format message with timestamp
"""
def format_print(msg, new_block=True):
if new_block:
print('='*80, flush=True)
print(f'{time.strftime("%d/%m/%Y %H:%M:%S")}: {msg}', flush=True)
"""
Execute command
"""
def run_command(command):
print('='*80, flush=True)
print(f'{time.strftime("%d/%m/%Y %H:%M:%S")}: {command}', flush=True)
print('-'*80, flush=True)
if platform.system() == 'Windows':
subprocess.check_call(command, stdout=None, stderr=None, shell=True)
else:
subprocess.check_call(command, executable='/bin/bash', stdout=None, stderr=None, shell=True)
"""
Set environment variable
"""
def set_environment_variable(key, value):
if key in os.environ:
os.environ[key] += os.pathsep + value
else:
os.environ[key] = value | buildscripts/utilities.py |
import os
import platform
import re
import subprocess
import time
import traceback
"""
Create conda environment with desired python and packages
"""
def create_conda_env(conda_activate, env_name, python, packages=[], channels=''):
packages_list = ' '.join(packages)
format_print(f'Setup conda {env_name} environment')
run_command(f'{conda_activate}conda remove -q -y --name {env_name} --all')
run_command(f'{conda_activate}conda create -q -y -n {env_name} python={python} {packages_list} {channels}')
"""
Create list of packages required for build and test from conda recipe
"""
def get_sdc_env(conda_activate, sdc_src, sdc_recipe, python, numpy, channels):
def create_env_list(packages, exclude=''):
env_list = []
env_set = set()
for item in packages:
package = re.search(r"[\w-]+" , item).group()
version = ''
if re.search(r"\d+\.[\d\*]*\.?[\d\*]*", item) and '<=' not in item and '>=' not in item:
version = '={}'.format(re.search(r"\d+\.[\d\*]*\.?[\d\*]*", item).group())
if package not in env_set and package not in exclude:
env_set.add(package)
env_list.append(f'{package}{version}')
return env_list
from ruamel_yaml import YAML
yaml=YAML()
sdc_recipe_render = os.path.join(sdc_src, 'sdc_recipe_render.yaml')
# Create environment with conda-build
sdc_render_env = 'sdc_render'
sdc_render_env_activate = get_activate_env_cmd(conda_activate, sdc_render_env)
format_print('Render sdc build and test environment using conda-build')
create_conda_env(conda_activate, sdc_render_env, python, ['conda-build'])
run_command('{} && {}'.format(sdc_render_env_activate,
' '.join([f'conda render --python={python}',
f'--numpy={numpy}',
f'{channels} -f {sdc_recipe_render} {sdc_recipe}'])))
with open(sdc_recipe_render, 'r') as recipe:
data = yaml.load(recipe)
build = data['requirements']['build']
host = data['requirements']['host']
run = data['requirements']['run']
test = data['test']['requires']
return {'build': create_env_list(build + host + run, 'vs2017_win-64'),
'test': create_env_list(run + test)}
"""
Return list of conda and wheel packages in build_output folder
"""
def get_sdc_build_packages(build_output):
if platform.system() == 'Windows':
os_dir = 'win-64'
elif platform.system() == 'Linux':
os_dir = 'linux-64'
elif platform.system() == 'Darwin':
os_dir = 'osx-64'
sdc_packages = []
sdc_build_dir = os.path.join(build_output, os_dir)
for item in os.listdir(sdc_build_dir):
item_path = os.path.join(sdc_build_dir, item)
if os.path.isfile(item_path) and re.search(r'^sdc.*\.tar\.bz2$|^sdc.*\.whl$', item):
sdc_packages.append(item_path)
return sdc_packages
"""
Return platform specific activation cmd
"""
def get_activate_env_cmd(conda_activate, env_name):
if platform.system() == 'Windows':
return f'{conda_activate}activate {env_name}'
else:
return f'{conda_activate}source activate {env_name}'
"""
Return platform specific conda activation cmd
"""
def get_conda_activate_cmd(conda_prefix):
if 'CONDA_PREFIX' in os.environ:
return ''
else:
if platform.system() == 'Windows':
return '{} && '.format(os.path.join(conda_prefix, 'Scripts', 'activate.bat'))
else:
return 'source {} && '.format(os.path.join(conda_prefix, 'bin', 'activate'))
"""
Print format message with timestamp
"""
def format_print(msg, new_block=True):
if new_block:
print('='*80, flush=True)
print(f'{time.strftime("%d/%m/%Y %H:%M:%S")}: {msg}', flush=True)
"""
Execute command
"""
def run_command(command):
print('='*80, flush=True)
print(f'{time.strftime("%d/%m/%Y %H:%M:%S")}: {command}', flush=True)
print('-'*80, flush=True)
if platform.system() == 'Windows':
subprocess.check_call(command, stdout=None, stderr=None, shell=True)
else:
subprocess.check_call(command, executable='/bin/bash', stdout=None, stderr=None, shell=True)
"""
Set environment variable
"""
def set_environment_variable(key, value):
if key in os.environ:
os.environ[key] += os.pathsep + value
else:
os.environ[key] = value | 0.440229 | 0.22849 |
__author__ = '<NAME>'
__date__ = '2021-03-19'
__copyright__ = '(C) 2021 by <NAME>'
from qgis.core import *
from qgis.gui import *
from qgis.utils import qgsfunction
from lftools.geocapt.cartography import (map_sistem,
MeridianConvergence,
SRC_Projeto,
ScaleFactor,
geom2PointList,
reprojectPoints,
areaGauss,
inom2mi as INOM2MI)
from lftools.geocapt.topogeo import (dd2dms as DD2DMS,
dms2dd as DMS2DD,
azimute, str2HTML,
geod2geoc,
geoc2enu)
from numpy import array, pi, sqrt, median
import numpy as np
from pyproj.crs import CRS
import unicodedata
import re
# https://qgis.org/pyqgis/3.2/core/Expression/QgsExpression.html
LOC = QgsApplication.locale()[:2]
def tr(*string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return string[0]
else:
return string[0]
@qgsfunction(args='auto', group='LF Tools')
def fieldstat(layer_name, field_name, type, feature, parent):
''' Returns the Aggregate function of a layer's field.
<h2>Example usage:</h2>
<ul>
<li>fieldstat('layer_name', 'field_name', 'sum') ->Sum of the values</li>
<li>fieldstat('layer_name', 'field_name', 'min') ->Min of the values</li>
<li>fieldstat('layer_name', 'field_name', 'max') ->Max of the values</li>
<li>fieldstat('layer_name', 'field_name', 'mean') ->Mean of the values</li>
<li>fieldstat('layer_name', 'field_name', 'std') ->Standard Deviation of the values</li>
<li>fieldstat('layer_name', 'field_name', 'median') ->Median of the values</li>
</ul>'''
lista = []
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
for feat in layer.getFeatures():
att = feat[field_name]
if att:
lista += [float(att)]
if type == 'sum':
return float((array(lista)).sum())
elif type == 'min':
return float((array(lista)).min())
elif type == 'max':
return float((array(lista)).max())
elif type == 'mean':
return float((array(lista)).mean())
elif type == 'std':
return float((array(lista)).std())
elif type == 'median':
return float(median(array(lista)))
else:
return None
@qgsfunction(args='auto', group='LF Tools')
def coord2inom(lon, lat, ScaleD, feature, parent):
"""
Calculates the chart index from coordinates.
<h2>Example usage:</h2>
<ul>
<li>coord2inom(lon, lat, ScaleD) -> inom</li>
<li>coord2inom(-42.2, -13.4, 1000000) -> SA-23</li>
</ul>
"""
lon, lat = lon+1e-10, lat+1e-10
return map_sistem(lon, lat, ScaleD)
@qgsfunction(args='auto', group='LF Tools')
def dd2dms(dd, n_digits, feature, parent):
"""
Transform decimal degrees to degrees, minutes and seconds.
<h2>Example usage:</h2>
<ul>
<li>dd2dms(dd, 3) -> -12°12'34.741"</li>
</ul>
"""
return DD2DMS(dd, n_digits)
@qgsfunction(args='auto', group='LF Tools')
def dms2dd(txt, feature, parent):
"""
Transform degrees, minutes, seconds coordinate to decimal degrees.
<h2>Example usage:</h2>
<ul>
<li>dms2dd("dms") -> dd</li>
<li>dms2dd('-10d30m00.0s') -> -10.5</li>
</ul>
"""
return DMS2DD(txt)
@qgsfunction(args='auto', group='LF Tools')
def scalefactor(lon, lat, feature, parent):
"""
Calculates the Scale (Kappa) Factor based on a feature coordinates.
<h2>Example usage:</h2>
<ul>
<li>scalefactor("lon", "lat") -> 0.99138</li>
</ul>
"""
return ScaleFactor(lon, lat)
@qgsfunction(args='auto', group='LF Tools')
def meridianconv(lon, lat, feature, parent):
"""
Calculates the Meridian Convergence based on a feature coordinates.
<h2>Example usage:</h2>
<ul>
<li>meridianconv("lon", "lat") -> -0.3451</li>
</ul>
"""
SRC = QgsCoordinateReferenceSystem('EPSG:4326')
return MeridianConvergence(lon, lat, SRC)
@qgsfunction(args='auto', group='LF Tools')
def inom2mi(inom, feature, parent):
"""
Determines the MI from INOM.
<h2>Example usage:</h2>
<ul>
<li>inom2mi(inom) -> mi</li>
<li>inom2mi('SB-25-V-C-I') -> '900'</li>
</ul>
"""
dicionario = INOM2MI
inom_list = inom.split('-')
inom100k = ''
resto = ''
if len(inom_list) >= 5:
for k in range(5):
inom100k += inom_list[k]+'-'
if len(inom_list) > 5:
for k in range(5,len(inom_list)):
resto += inom_list[k]+'-'
if inom100k[:-1] in dicionario:
return dicionario[inom100k[:-1]]+'-'+resto[:-1]
else:
return None
else:
if inom100k[:-1] in dicionario:
return dicionario[inom100k[:-1]]
else:
return None
@qgsfunction(args='auto', group='LF Tools')
def projectCRS(output_type, feature, parent):
"""
Return the descriptive name or the EPSG code of the Project's CRS.
<h2>Example usage:</h2>
<ul>
<li>ProjectCRS('EPSG') -> EPSG:4674</li>
<li>ProjectCRS('') -> SIRGAS 2000 / UTM 25 S</li>
</ul>
"""
a = QgsProject.instance()
b = a.crs()
if output_type == 'EPSG':
return b.authid()
else:
return b.description()
@qgsfunction(args='auto', group='LF Tools')
def layerCRS(layer_name, output_type, feature, parent):
"""
Return the descriptive name or the EPSG code of a layer's CRS.
<h2>Example usage:</h2>
<ul>
<li>LayerCRS('EPSG') -> EPSG:4326</li>
<li>LayerCRS('') -> SIRGAS 2000 / UTM 23 S</li>
</ul>
"""
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
b = layer.crs()
if output_type == 'EPSG':
return b.authid()
else:
return b.description()
@qgsfunction(args='auto', group='LF Tools')
def zonehemisf(lon, lat, feature, parent):
"""
Return the zone and hemisphere from longitude and latitude.
<h2>Example usage:</h2>
<ul>
<li>zonehemisf("lon", "lat") -> 25S</li>
</ul>
"""
# Calculo do Fuso
fuso = round((183+lon)/6.0)
# Hemisferio
hemisf = 'N' if lat>= 0 else 'S'
return str(fuso) + hemisf
@qgsfunction(args='auto', group='LF Tools')
def removespetialchar (palavra, feature, parent):
"""
Replaces special characters.
<h2>Examplo:</h2>
<ul>
<li>removespetialchar('coração') -> coracao </li>
<li>removespetialchar('gênesis') -> genesis</li>
</ul>
"""
# Unicode normalize transforma um caracter em seu equivalente em latin.
nfkd = unicodedata.normalize('NFKD', palavra)
palavraSemAcento = u"".join([c for c in nfkd if not unicodedata.combining(c)])
# Usa expressão regular para retornar a palavra apenas com números, letras e espaço
return re.sub('[^a-zA-Z0-9 \\\]', '', palavraSemAcento)
@qgsfunction(args='auto', group='LF Tools')
def areaLTP (layer_name, feature, parent):
"""
Calculates the area on the Local Tangent Plane (LTP), also known as Local Geodetic Coordinate System, which is a spatial reference system based on the tangent plane on the feature centroid defined by the local vertical direction.
<p>Note: PolygonZ or MultiPoligonZ is required.</p>
<h2>Examplo:</h2>
<ul>
<li>areaLTP('layer_name') -> 607503.4825 </li>
</ul>
"""
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
crsUTM = layer.crs()
crsGeo = QgsCoordinateReferenceSystem(crsUTM.geographicCrsAuthId())
coordinateTransformer = QgsCoordinateTransform()
coordinateTransformer.setDestinationCrs(crsGeo)
coordinateTransformer.setSourceCrs(crsUTM)
geom = feature.geometry()
geomGeo = reprojectPoints(geom, coordinateTransformer)
if geom.isMultipart():
coords = geom2PointList(geom)[0][0]
coordsGeo = geomGeo.asMultiPolygon()[0][0]
else:
coords = geom2PointList(geom)[0]
coordsGeo = geomGeo.asPolygon()[0]
centroide = geomGeo.centroid().asPoint()
try:
alt = []
for pnt in coords[:-1]:
alt += [pnt.z()]
h0 = np.array(alt).mean()
lon0 = centroide.x()
lat0 = centroide.y()
EPSG = int(crsGeo.authid().split(':')[-1]) # pegando o EPGS do SRC do QGIS
proj_crs = CRS.from_epsg(EPSG) # transformando para SRC do pyproj
a=proj_crs.ellipsoid.semi_major_metre
f_inv = proj_crs.ellipsoid.inverse_flattening
f=1/f_inv
# CENTRO DE ROTAÇÃO
Xo, Yo, Zo = geod2geoc(lon0, lat0, h0, a, f)
# CONVERSÃO DAS COORDENADAS
coordsSGL = []
for k, coord in enumerate(coordsGeo):
lon = coord.x()
lat = coord.y()
h = coords[k].z()
X, Y, Z = geod2geoc(lon, lat, h, a, f)
E, N, U = geoc2enu(X, Y, Z, lon0, lat0, Xo, Yo, Zo)
coordsSGL += [QgsPointXY(E, N)]
areaSGL = abs(areaGauss(coordsSGL))
return areaSGL
except:
return 0
@qgsfunction(args='auto', group='LF Tools')
def deedtable(layer_name, ini, fim, titulo, fontsize, feature, parent):
"""
Generates the Vertices and Sides Descriptive Table, also known as Synthetic Deed Description, based on the attributes, sequence and code, in the point layer's attribute table.
<p>Note: The table title must be inserted as string.</p>
<h2>Exemple:</h2>
<ul>
<li>deedtable('layer_name', start, end, 'title',fontsize) = HTML</li>
<li>deedtable('Limit Point', 1, 20, 'Area X',10) = HTML</li>
</ul>
"""
# Templates HTML
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>''' + tr('Synthetic deed description', str2HTML('Memorial Sintético')) + '''</title> </head>
<body>
<table
style="text-align: center; width: 100%; font-size: [FONTSIZE]px; font-family: Arial;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
[CABECALHO]
[LINHAS]
</tbody>
</table>
<br>
</body>
</html>
'''
linha = '''<tr>
<td>Vn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="7" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
decimal = 2
format_num = '{:,.Xf}'.replace('X', str(decimal))
# Camada de Pontos
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
SRC = layer.crs()
pnts_UTM = {}
pnts_GEO = {}
# Transformacao de Coordenadas Geograficas para Projetadas no sistema UTM
crsDest = QgsCoordinateReferenceSystem(SRC_Projeto('EPSG'))
coordinateTransformer = QgsCoordinateTransform()
coordinateTransformer.setDestinationCrs(crsDest)
coordinateTransformer.setSourceCrs(SRC)
for feat in layer.getFeatures():
pnt = feat.geometry().asPoint()
coord = geom2PointList(feat.geometry())
pnts_UTM[feat['ordem']] = [coordinateTransformer.transform(pnt), feat['tipo'], feat['codigo'], MeridianConvergence(pnt.x(), pnt.y(), crsDest) ]
pnts_GEO[feat['sequence']] = [QgsPoint(pnt.x(),pnt.y(),coord.z()), feat['tipo'], feat['codigo'] ]
# Calculo dos Azimutes e Distancias
tam = len(pnts_UTM)
Az_lista, Az_Geo_lista, Dist = [], [], []
for k in range(tam):
pntA = pnts_UTM[k+1][0]
pntB = pnts_UTM[1 if k+2 > tam else k+2][0]
Az_lista += [(180/pi)*azimute(pntA, pntB)[0]]
ConvMerediana = pnts_UTM[k+1][3]
Az_Geo_lista += [(180/pi)*azimute(pntA, pntB)[0]+ConvMerediana]
Dist += [sqrt((pntA.x() - pntB.x())**2 + (pntA.y() - pntB.y())**2)]
LINHAS = ''
if fim == -1 or fim > tam:
fim = tam
for k in range(ini-1,fim):
linha0 = linha
itens = {'Vn': pnts_UTM[k+1][2],
'En':tr(format_num.format(pnts_UTM[k+1][0].x()), format_num.format(pnts_UTM[k+1][0].x()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'Nn':tr(format_num.format(pnts_UTM[k+1][0].y()), format_num.format(pnts_UTM[k+1][0].y()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'hn':tr(format_num.format(pnts_GEO[k+1][0].z()), format_num.format(pnts_GEO[k+1][0].z()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'lonn':tr(DD2DMS(pnts_GEO[k+1][0].x(),decimal + 3), DD2DMS(pnts_GEO[k+1][0].x(),decimal + 3).replace('.', ',')),
'latn':tr(DD2DMS(pnts_GEO[k+1][0].y(),decimal + 3), DD2DMS(pnts_GEO[k+1][0].y(),decimal + 3).replace('.', ',')),
'Ln': pnts_UTM[k+1][2] + '/' + pnts_UTM[1 if k+2 > tam else k+2][2],
'Az_n':tr(DD2DMS(Az_lista[k],1), DD2DMS(Az_lista[k],1).replace('.', ',')),
'Dn':tr(format_num.format(Dist[k]), format_num.format(Dist[k]).replace(',', 'X').replace('.', ',').replace('X', '.'))
}
for item in itens:
linha0 = linha0.replace(item, itens[item])
LINHAS += linha0
resultado = texto.replace('[CABECALHO]', cabec).replace('[LINHAS]', LINHAS).replace('[TITULO]', str2HTML(titulo.upper())).replace('[FONTSIZE]', str(fontsize))
return resultado
@qgsfunction(args='auto', group='LF Tools')
def deedtable2(prefixo, titulo, decimal, fontsize, feature, parent):
"""
Generates the Vertices and Sides Descriptive Table, also known as Synthetic Deed Description, based on vertices of a polygon.
<p>Notes: Only for polygon layer in a projected CRS.</p>
<h2>Exemple:</h2>
<ul>
<li>deedtable2('preffix', 'title', precision, fontsize) = HTML</li>
<li>deedtable2('V-', ' - Area X', 3, 12) = HTML</li>
</ul>
"""
geom = feature.geometry()
if geom.type() == 2 and geom:
if geom.isMultipart():
coords = geom.asMultiPolygon()[0][0]
else:
coords = geom.asPolygon()[0]
format_num = '{:,.Xf}'.replace('X', str(decimal))
pnts_UTM = {}
for k, coord in enumerate(coords[:-1]):
pnts_UTM[k+1] = [coord, prefixo, prefixo + '{:02}'.format(k+1) ]
# Calculo dos Azimutes e Distancias
tam = len(pnts_UTM)
Az_lista, Dist = [], []
for k in range(tam):
pntA = pnts_UTM[k+1][0]
pntB = pnts_UTM[1 if k+2 > tam else k+2][0]
Az_lista += [(180/pi)*azimute(pntA, pntB)[0]]
Dist += [sqrt((pntA.x() - pntB.x())**2 + (pntA.y() - pntB.y())**2)]
linha = '''<tr>
<td>Vn</td>
<td>En</td>
<td>Nn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>''' + tr('Synthetic deed description', str2HTML('Memorial Sintético')) + '''</title>
<link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftools.png?raw=true" type = "image/x-icon">
</head>
<body>
<table
style="text-align: center; width: 100%; font-size: [FONTSIZE]px; font-family: Arial;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td colspan="6" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="2" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>E</td>
<td>N</td>
</tr>
[LINHAS]
</tbody>
</table>
<br>
</body>
</html>
'''
LINHAS = ''
for k in range(tam):
linha0 = linha
itens = {'Vn': pnts_UTM[k+1][2],
'En': tr(format_num.format(pnts_UTM[k+1][0].x()), format_num.format(pnts_UTM[k+1][0].x()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'Nn': tr(format_num.format(pnts_UTM[k+1][0].y()), format_num.format(pnts_UTM[k+1][0].y()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'Ln': pnts_UTM[k+1][2] + '/' + pnts_UTM[1 if k+2 > tam else k+2][2],
'Az_n': tr(DD2DMS(Az_lista[k],1), DD2DMS(Az_lista[k],1).replace('.', ',')),
'Dn': tr(format_num.format(Dist[k]), format_num.format(Dist[k]).replace(',', 'X').replace('.', ',').replace('X', '.'))
}
for item in itens:
linha0 = linha0.replace(item, itens[item])
LINHAS += linha0
resultado = texto.replace('[LINHAS]', LINHAS).replace('[TITULO]', str2HTML(titulo.upper())).replace('[FONTSIZE]', str(fontsize))
return resultado
else:
return tr('Verify geometry', 'Verificar geometria')
@qgsfunction(args='auto', group='LF Tools')
def deedtable3(prefixo, titulo, decimal, fontsize, layer_name, tipo, azimuteDist, feature, parent):
"""
Generates the Vertices and Sides Descriptive Table, also known as Synthetic Deed Description, based on vertices of a PolygonZ or MultiPoligonZ.
<p>Note 1: Layer with projected CRS is required.</p>
<p>Note 2: Table types: 'proj' - projected, 'geo' - geographic, 'both' - both coordinate systems.</p>
<p>Note 3: Define 1 or 0 for with or without azimuths and distances, respectivelly.</p>
<h2>Exemple:</h2>
<ul>
<li>deedtable3('preffix', 'title', precision, fontsize, layer_name, type, azimuth_dist) = HTML</li>
<li>deedtable3('V-', ' - Area X', 3, 12, 'layer_name', 'proj', 1) = HTML</li>
<li>deedtable3('V-', ' - Area X', 3, 12, 'layer_name', 'geo', 0) = HTML</li>
<li>deedtable3('V-', ' - Area X', 3, 12, 'layer_name', 'both', 1) = HTML</li>
</ul>
"""
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
crsUTM = layer.crs()
crsGeo = QgsCoordinateReferenceSystem(crsUTM.geographicCrsAuthId())
format_num = '{:,.Xf}'.replace('X', str(decimal))
coordinateTransformer = QgsCoordinateTransform()
coordinateTransformer.setDestinationCrs(crsGeo)
coordinateTransformer.setSourceCrs(crsUTM)
geom = feature.geometry()
if geom.type() == 2 and geom:
if geom.isMultipart():
coords = geom2PointList(geom)[0][0]
else:
coords = geom2PointList(geom)[0]
pnts_UTM = {}
pnts_GEO = {}
for k, coord in enumerate(coords[:-1]):
pnts_UTM[k+1] = [coord, prefixo, prefixo + '{:02}'.format(k+1)]
pnt = coordinateTransformer.transform(QgsPointXY(coord.x(), coord.y()))
pnts_GEO[k+1] = [QgsPoint(pnt.x(),pnt.y(),coord.z()), prefixo, prefixo + '{:02}'.format(k+1) ]
# Calculo dos Azimutes e Distancias
tam = len(pnts_UTM)
Az_lista, Dist = [], []
for k in range(tam):
pntA = pnts_UTM[k+1][0]
pntB = pnts_UTM[1 if k+2 > tam else k+2][0]
Az_lista += [(180/pi)*azimute(pntA, pntB)[0]]
Dist += [sqrt((pntA.x() - pntB.x())**2 + (pntA.y() - pntB.y())**2)]
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>''' + tr('Synthetic deed description', str2HTML('Memorial Sintético')) + '''</title> </head>
<body>
<table
style="text-align: center; width: 100%; font-size: [FONTSIZE]px; font-family: Arial;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
[CABECALHO]
[LINHAS]
</tbody>
</table>
<br>
</body>
</html>
'''
#Tipos de cabeçalhos
# UTM
if tipo == 'proj' and azimuteDist == 1:
linha = '''<tr>
<td>Vn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="7" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
# UTM sem Az e d
if tipo == 'proj' and azimuteDist == 0:
linha = '''<tr>
<td>Vn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="4" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
</tr>
<tr>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
# GEO
if tipo == 'geo' and azimuteDist == 1:
linha = '''<tr>
<td>Vn</td>
<td>lonn</td>
<td>latn</td>
<td>hn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="7" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>longitude</td>
<td>latitude</td>
<td>h</td>
</tr>'''
# GEO sem Az e d
if tipo == 'geo' and azimuteDist == 0:
linha = '''<tr>
<td>Vn</td>
<td>lonn</td>
<td>latn</td>
<td>hn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="4" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
(m)</td>
</tr>
<tr>
<td>longitude</td>
<td>latitude</td>
<td>h</td>
</tr>'''
# UTM e GEO
if tipo == 'both' and azimuteDist == 1:
linha = '''<tr>
<td>Vn</td>
<td>lonn</td>
<td>latn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="9" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="5" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>longitude</td>
<td>latitude</td>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
# UTM e GEO sem Az e d
if tipo == 'both' and azimuteDist == 0:
linha = '''<tr>
<td>Vn</td>
<td>lonn</td>
<td>latn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="6" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="5" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
</tr>
<tr>
<td>longitude</td>
<td>latitude</td>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
LINHAS = ''
for k in range(tam):
linha0 = linha
itens = {'Vn': pnts_UTM[k+1][2],
'En': tr(format_num.format(pnts_UTM[k+1][0].x()), format_num.format(pnts_UTM[k+1][0].x()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'Nn': tr(format_num.format(pnts_UTM[k+1][0].y()), format_num.format(pnts_UTM[k+1][0].y()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'hn': tr(format_num.format(pnts_UTM[k+1][0].z()), format_num.format(pnts_UTM[k+1][0].z()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'lonn': tr(DD2DMS(pnts_GEO[k+1][0].x(),decimal + 3), DD2DMS(pnts_GEO[k+1][0].x(),decimal + 3).replace('.', ',')),
'latn': tr(DD2DMS(pnts_GEO[k+1][0].y(),decimal + 3), DD2DMS(pnts_GEO[k+1][0].y(),decimal + 3).replace('.', ',')),
'Ln': pnts_UTM[k+1][2] + '/' + pnts_UTM[1 if k+2 > tam else k+2][2],
'Az_n': tr(DD2DMS(Az_lista[k],1), DD2DMS(Az_lista[k],1).replace('.', ',')),
'Dn': tr(format_num.format(Dist[k]), format_num.format(Dist[k]).replace(',', 'X').replace('.', ',').replace('X', '.'))
}
for item in itens:
linha0 = linha0.replace(item, itens[item])
LINHAS += linha0
resultado = texto.replace('[CABECALHO]', cabec).replace('[LINHAS]', LINHAS).replace('[TITULO]', str2HTML(titulo.upper())).replace('[FONTSIZE]', str(fontsize))
return resultado
else:
return tr('Verify geometry', 'Verificar geometria') | expressions.py | __author__ = '<NAME>'
__date__ = '2021-03-19'
__copyright__ = '(C) 2021 by <NAME>'
from qgis.core import *
from qgis.gui import *
from qgis.utils import qgsfunction
from lftools.geocapt.cartography import (map_sistem,
MeridianConvergence,
SRC_Projeto,
ScaleFactor,
geom2PointList,
reprojectPoints,
areaGauss,
inom2mi as INOM2MI)
from lftools.geocapt.topogeo import (dd2dms as DD2DMS,
dms2dd as DMS2DD,
azimute, str2HTML,
geod2geoc,
geoc2enu)
from numpy import array, pi, sqrt, median
import numpy as np
from pyproj.crs import CRS
import unicodedata
import re
# https://qgis.org/pyqgis/3.2/core/Expression/QgsExpression.html
LOC = QgsApplication.locale()[:2]
def tr(*string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return string[0]
else:
return string[0]
@qgsfunction(args='auto', group='LF Tools')
def fieldstat(layer_name, field_name, type, feature, parent):
''' Returns the Aggregate function of a layer's field.
<h2>Example usage:</h2>
<ul>
<li>fieldstat('layer_name', 'field_name', 'sum') ->Sum of the values</li>
<li>fieldstat('layer_name', 'field_name', 'min') ->Min of the values</li>
<li>fieldstat('layer_name', 'field_name', 'max') ->Max of the values</li>
<li>fieldstat('layer_name', 'field_name', 'mean') ->Mean of the values</li>
<li>fieldstat('layer_name', 'field_name', 'std') ->Standard Deviation of the values</li>
<li>fieldstat('layer_name', 'field_name', 'median') ->Median of the values</li>
</ul>'''
lista = []
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
for feat in layer.getFeatures():
att = feat[field_name]
if att:
lista += [float(att)]
if type == 'sum':
return float((array(lista)).sum())
elif type == 'min':
return float((array(lista)).min())
elif type == 'max':
return float((array(lista)).max())
elif type == 'mean':
return float((array(lista)).mean())
elif type == 'std':
return float((array(lista)).std())
elif type == 'median':
return float(median(array(lista)))
else:
return None
@qgsfunction(args='auto', group='LF Tools')
def coord2inom(lon, lat, ScaleD, feature, parent):
"""
Calculates the chart index from coordinates.
<h2>Example usage:</h2>
<ul>
<li>coord2inom(lon, lat, ScaleD) -> inom</li>
<li>coord2inom(-42.2, -13.4, 1000000) -> SA-23</li>
</ul>
"""
lon, lat = lon+1e-10, lat+1e-10
return map_sistem(lon, lat, ScaleD)
@qgsfunction(args='auto', group='LF Tools')
def dd2dms(dd, n_digits, feature, parent):
"""
Transform decimal degrees to degrees, minutes and seconds.
<h2>Example usage:</h2>
<ul>
<li>dd2dms(dd, 3) -> -12°12'34.741"</li>
</ul>
"""
return DD2DMS(dd, n_digits)
@qgsfunction(args='auto', group='LF Tools')
def dms2dd(txt, feature, parent):
"""
Transform degrees, minutes, seconds coordinate to decimal degrees.
<h2>Example usage:</h2>
<ul>
<li>dms2dd("dms") -> dd</li>
<li>dms2dd('-10d30m00.0s') -> -10.5</li>
</ul>
"""
return DMS2DD(txt)
@qgsfunction(args='auto', group='LF Tools')
def scalefactor(lon, lat, feature, parent):
"""
Calculates the Scale (Kappa) Factor based on a feature coordinates.
<h2>Example usage:</h2>
<ul>
<li>scalefactor("lon", "lat") -> 0.99138</li>
</ul>
"""
return ScaleFactor(lon, lat)
@qgsfunction(args='auto', group='LF Tools')
def meridianconv(lon, lat, feature, parent):
"""
Calculates the Meridian Convergence based on a feature coordinates.
<h2>Example usage:</h2>
<ul>
<li>meridianconv("lon", "lat") -> -0.3451</li>
</ul>
"""
SRC = QgsCoordinateReferenceSystem('EPSG:4326')
return MeridianConvergence(lon, lat, SRC)
@qgsfunction(args='auto', group='LF Tools')
def inom2mi(inom, feature, parent):
"""
Determines the MI from INOM.
<h2>Example usage:</h2>
<ul>
<li>inom2mi(inom) -> mi</li>
<li>inom2mi('SB-25-V-C-I') -> '900'</li>
</ul>
"""
dicionario = INOM2MI
inom_list = inom.split('-')
inom100k = ''
resto = ''
if len(inom_list) >= 5:
for k in range(5):
inom100k += inom_list[k]+'-'
if len(inom_list) > 5:
for k in range(5,len(inom_list)):
resto += inom_list[k]+'-'
if inom100k[:-1] in dicionario:
return dicionario[inom100k[:-1]]+'-'+resto[:-1]
else:
return None
else:
if inom100k[:-1] in dicionario:
return dicionario[inom100k[:-1]]
else:
return None
@qgsfunction(args='auto', group='LF Tools')
def projectCRS(output_type, feature, parent):
"""
Return the descriptive name or the EPSG code of the Project's CRS.
<h2>Example usage:</h2>
<ul>
<li>ProjectCRS('EPSG') -> EPSG:4674</li>
<li>ProjectCRS('') -> SIRGAS 2000 / UTM 25 S</li>
</ul>
"""
a = QgsProject.instance()
b = a.crs()
if output_type == 'EPSG':
return b.authid()
else:
return b.description()
@qgsfunction(args='auto', group='LF Tools')
def layerCRS(layer_name, output_type, feature, parent):
"""
Return the descriptive name or the EPSG code of a layer's CRS.
<h2>Example usage:</h2>
<ul>
<li>LayerCRS('EPSG') -> EPSG:4326</li>
<li>LayerCRS('') -> SIRGAS 2000 / UTM 23 S</li>
</ul>
"""
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
b = layer.crs()
if output_type == 'EPSG':
return b.authid()
else:
return b.description()
@qgsfunction(args='auto', group='LF Tools')
def zonehemisf(lon, lat, feature, parent):
"""
Return the zone and hemisphere from longitude and latitude.
<h2>Example usage:</h2>
<ul>
<li>zonehemisf("lon", "lat") -> 25S</li>
</ul>
"""
# Calculo do Fuso
fuso = round((183+lon)/6.0)
# Hemisferio
hemisf = 'N' if lat>= 0 else 'S'
return str(fuso) + hemisf
@qgsfunction(args='auto', group='LF Tools')
def removespetialchar (palavra, feature, parent):
"""
Replaces special characters.
<h2>Examplo:</h2>
<ul>
<li>removespetialchar('coração') -> coracao </li>
<li>removespetialchar('gênesis') -> genesis</li>
</ul>
"""
# Unicode normalize transforma um caracter em seu equivalente em latin.
nfkd = unicodedata.normalize('NFKD', palavra)
palavraSemAcento = u"".join([c for c in nfkd if not unicodedata.combining(c)])
# Usa expressão regular para retornar a palavra apenas com números, letras e espaço
return re.sub('[^a-zA-Z0-9 \\\]', '', palavraSemAcento)
@qgsfunction(args='auto', group='LF Tools')
def areaLTP (layer_name, feature, parent):
"""
Calculates the area on the Local Tangent Plane (LTP), also known as Local Geodetic Coordinate System, which is a spatial reference system based on the tangent plane on the feature centroid defined by the local vertical direction.
<p>Note: PolygonZ or MultiPoligonZ is required.</p>
<h2>Examplo:</h2>
<ul>
<li>areaLTP('layer_name') -> 607503.4825 </li>
</ul>
"""
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
crsUTM = layer.crs()
crsGeo = QgsCoordinateReferenceSystem(crsUTM.geographicCrsAuthId())
coordinateTransformer = QgsCoordinateTransform()
coordinateTransformer.setDestinationCrs(crsGeo)
coordinateTransformer.setSourceCrs(crsUTM)
geom = feature.geometry()
geomGeo = reprojectPoints(geom, coordinateTransformer)
if geom.isMultipart():
coords = geom2PointList(geom)[0][0]
coordsGeo = geomGeo.asMultiPolygon()[0][0]
else:
coords = geom2PointList(geom)[0]
coordsGeo = geomGeo.asPolygon()[0]
centroide = geomGeo.centroid().asPoint()
try:
alt = []
for pnt in coords[:-1]:
alt += [pnt.z()]
h0 = np.array(alt).mean()
lon0 = centroide.x()
lat0 = centroide.y()
EPSG = int(crsGeo.authid().split(':')[-1]) # pegando o EPGS do SRC do QGIS
proj_crs = CRS.from_epsg(EPSG) # transformando para SRC do pyproj
a=proj_crs.ellipsoid.semi_major_metre
f_inv = proj_crs.ellipsoid.inverse_flattening
f=1/f_inv
# CENTRO DE ROTAÇÃO
Xo, Yo, Zo = geod2geoc(lon0, lat0, h0, a, f)
# CONVERSÃO DAS COORDENADAS
coordsSGL = []
for k, coord in enumerate(coordsGeo):
lon = coord.x()
lat = coord.y()
h = coords[k].z()
X, Y, Z = geod2geoc(lon, lat, h, a, f)
E, N, U = geoc2enu(X, Y, Z, lon0, lat0, Xo, Yo, Zo)
coordsSGL += [QgsPointXY(E, N)]
areaSGL = abs(areaGauss(coordsSGL))
return areaSGL
except:
return 0
@qgsfunction(args='auto', group='LF Tools')
def deedtable(layer_name, ini, fim, titulo, fontsize, feature, parent):
"""
Generates the Vertices and Sides Descriptive Table, also known as Synthetic Deed Description, based on the attributes, sequence and code, in the point layer's attribute table.
<p>Note: The table title must be inserted as string.</p>
<h2>Exemple:</h2>
<ul>
<li>deedtable('layer_name', start, end, 'title',fontsize) = HTML</li>
<li>deedtable('Limit Point', 1, 20, 'Area X',10) = HTML</li>
</ul>
"""
# Templates HTML
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>''' + tr('Synthetic deed description', str2HTML('Memorial Sintético')) + '''</title> </head>
<body>
<table
style="text-align: center; width: 100%; font-size: [FONTSIZE]px; font-family: Arial;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
[CABECALHO]
[LINHAS]
</tbody>
</table>
<br>
</body>
</html>
'''
linha = '''<tr>
<td>Vn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="7" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
decimal = 2
format_num = '{:,.Xf}'.replace('X', str(decimal))
# Camada de Pontos
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
SRC = layer.crs()
pnts_UTM = {}
pnts_GEO = {}
# Transformacao de Coordenadas Geograficas para Projetadas no sistema UTM
crsDest = QgsCoordinateReferenceSystem(SRC_Projeto('EPSG'))
coordinateTransformer = QgsCoordinateTransform()
coordinateTransformer.setDestinationCrs(crsDest)
coordinateTransformer.setSourceCrs(SRC)
for feat in layer.getFeatures():
pnt = feat.geometry().asPoint()
coord = geom2PointList(feat.geometry())
pnts_UTM[feat['ordem']] = [coordinateTransformer.transform(pnt), feat['tipo'], feat['codigo'], MeridianConvergence(pnt.x(), pnt.y(), crsDest) ]
pnts_GEO[feat['sequence']] = [QgsPoint(pnt.x(),pnt.y(),coord.z()), feat['tipo'], feat['codigo'] ]
# Calculo dos Azimutes e Distancias
tam = len(pnts_UTM)
Az_lista, Az_Geo_lista, Dist = [], [], []
for k in range(tam):
pntA = pnts_UTM[k+1][0]
pntB = pnts_UTM[1 if k+2 > tam else k+2][0]
Az_lista += [(180/pi)*azimute(pntA, pntB)[0]]
ConvMerediana = pnts_UTM[k+1][3]
Az_Geo_lista += [(180/pi)*azimute(pntA, pntB)[0]+ConvMerediana]
Dist += [sqrt((pntA.x() - pntB.x())**2 + (pntA.y() - pntB.y())**2)]
LINHAS = ''
if fim == -1 or fim > tam:
fim = tam
for k in range(ini-1,fim):
linha0 = linha
itens = {'Vn': pnts_UTM[k+1][2],
'En':tr(format_num.format(pnts_UTM[k+1][0].x()), format_num.format(pnts_UTM[k+1][0].x()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'Nn':tr(format_num.format(pnts_UTM[k+1][0].y()), format_num.format(pnts_UTM[k+1][0].y()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'hn':tr(format_num.format(pnts_GEO[k+1][0].z()), format_num.format(pnts_GEO[k+1][0].z()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'lonn':tr(DD2DMS(pnts_GEO[k+1][0].x(),decimal + 3), DD2DMS(pnts_GEO[k+1][0].x(),decimal + 3).replace('.', ',')),
'latn':tr(DD2DMS(pnts_GEO[k+1][0].y(),decimal + 3), DD2DMS(pnts_GEO[k+1][0].y(),decimal + 3).replace('.', ',')),
'Ln': pnts_UTM[k+1][2] + '/' + pnts_UTM[1 if k+2 > tam else k+2][2],
'Az_n':tr(DD2DMS(Az_lista[k],1), DD2DMS(Az_lista[k],1).replace('.', ',')),
'Dn':tr(format_num.format(Dist[k]), format_num.format(Dist[k]).replace(',', 'X').replace('.', ',').replace('X', '.'))
}
for item in itens:
linha0 = linha0.replace(item, itens[item])
LINHAS += linha0
resultado = texto.replace('[CABECALHO]', cabec).replace('[LINHAS]', LINHAS).replace('[TITULO]', str2HTML(titulo.upper())).replace('[FONTSIZE]', str(fontsize))
return resultado
@qgsfunction(args='auto', group='LF Tools')
def deedtable2(prefixo, titulo, decimal, fontsize, feature, parent):
"""
Generates the Vertices and Sides Descriptive Table, also known as Synthetic Deed Description, based on vertices of a polygon.
<p>Notes: Only for polygon layer in a projected CRS.</p>
<h2>Exemple:</h2>
<ul>
<li>deedtable2('preffix', 'title', precision, fontsize) = HTML</li>
<li>deedtable2('V-', ' - Area X', 3, 12) = HTML</li>
</ul>
"""
geom = feature.geometry()
if geom.type() == 2 and geom:
if geom.isMultipart():
coords = geom.asMultiPolygon()[0][0]
else:
coords = geom.asPolygon()[0]
format_num = '{:,.Xf}'.replace('X', str(decimal))
pnts_UTM = {}
for k, coord in enumerate(coords[:-1]):
pnts_UTM[k+1] = [coord, prefixo, prefixo + '{:02}'.format(k+1) ]
# Calculo dos Azimutes e Distancias
tam = len(pnts_UTM)
Az_lista, Dist = [], []
for k in range(tam):
pntA = pnts_UTM[k+1][0]
pntB = pnts_UTM[1 if k+2 > tam else k+2][0]
Az_lista += [(180/pi)*azimute(pntA, pntB)[0]]
Dist += [sqrt((pntA.x() - pntB.x())**2 + (pntA.y() - pntB.y())**2)]
linha = '''<tr>
<td>Vn</td>
<td>En</td>
<td>Nn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>''' + tr('Synthetic deed description', str2HTML('Memorial Sintético')) + '''</title>
<link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftools.png?raw=true" type = "image/x-icon">
</head>
<body>
<table
style="text-align: center; width: 100%; font-size: [FONTSIZE]px; font-family: Arial;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<td colspan="6" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="2" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>E</td>
<td>N</td>
</tr>
[LINHAS]
</tbody>
</table>
<br>
</body>
</html>
'''
LINHAS = ''
for k in range(tam):
linha0 = linha
itens = {'Vn': pnts_UTM[k+1][2],
'En': tr(format_num.format(pnts_UTM[k+1][0].x()), format_num.format(pnts_UTM[k+1][0].x()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'Nn': tr(format_num.format(pnts_UTM[k+1][0].y()), format_num.format(pnts_UTM[k+1][0].y()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'Ln': pnts_UTM[k+1][2] + '/' + pnts_UTM[1 if k+2 > tam else k+2][2],
'Az_n': tr(DD2DMS(Az_lista[k],1), DD2DMS(Az_lista[k],1).replace('.', ',')),
'Dn': tr(format_num.format(Dist[k]), format_num.format(Dist[k]).replace(',', 'X').replace('.', ',').replace('X', '.'))
}
for item in itens:
linha0 = linha0.replace(item, itens[item])
LINHAS += linha0
resultado = texto.replace('[LINHAS]', LINHAS).replace('[TITULO]', str2HTML(titulo.upper())).replace('[FONTSIZE]', str(fontsize))
return resultado
else:
return tr('Verify geometry', 'Verificar geometria')
@qgsfunction(args='auto', group='LF Tools')
def deedtable3(prefixo, titulo, decimal, fontsize, layer_name, tipo, azimuteDist, feature, parent):
"""
Generates the Vertices and Sides Descriptive Table, also known as Synthetic Deed Description, based on vertices of a PolygonZ or MultiPoligonZ.
<p>Note 1: Layer with projected CRS is required.</p>
<p>Note 2: Table types: 'proj' - projected, 'geo' - geographic, 'both' - both coordinate systems.</p>
<p>Note 3: Define 1 or 0 for with or without azimuths and distances, respectivelly.</p>
<h2>Exemple:</h2>
<ul>
<li>deedtable3('preffix', 'title', precision, fontsize, layer_name, type, azimuth_dist) = HTML</li>
<li>deedtable3('V-', ' - Area X', 3, 12, 'layer_name', 'proj', 1) = HTML</li>
<li>deedtable3('V-', ' - Area X', 3, 12, 'layer_name', 'geo', 0) = HTML</li>
<li>deedtable3('V-', ' - Area X', 3, 12, 'layer_name', 'both', 1) = HTML</li>
</ul>
"""
if len(QgsProject.instance().mapLayersByName(layer_name)) == 1:
layer = QgsProject.instance().mapLayersByName(layer_name)[0]
else:
layer = QgsProject.instance().mapLayer(layer_name)
crsUTM = layer.crs()
crsGeo = QgsCoordinateReferenceSystem(crsUTM.geographicCrsAuthId())
format_num = '{:,.Xf}'.replace('X', str(decimal))
coordinateTransformer = QgsCoordinateTransform()
coordinateTransformer.setDestinationCrs(crsGeo)
coordinateTransformer.setSourceCrs(crsUTM)
geom = feature.geometry()
if geom.type() == 2 and geom:
if geom.isMultipart():
coords = geom2PointList(geom)[0][0]
else:
coords = geom2PointList(geom)[0]
pnts_UTM = {}
pnts_GEO = {}
for k, coord in enumerate(coords[:-1]):
pnts_UTM[k+1] = [coord, prefixo, prefixo + '{:02}'.format(k+1)]
pnt = coordinateTransformer.transform(QgsPointXY(coord.x(), coord.y()))
pnts_GEO[k+1] = [QgsPoint(pnt.x(),pnt.y(),coord.z()), prefixo, prefixo + '{:02}'.format(k+1) ]
# Calculo dos Azimutes e Distancias
tam = len(pnts_UTM)
Az_lista, Dist = [], []
for k in range(tam):
pntA = pnts_UTM[k+1][0]
pntB = pnts_UTM[1 if k+2 > tam else k+2][0]
Az_lista += [(180/pi)*azimute(pntA, pntB)[0]]
Dist += [sqrt((pntA.x() - pntB.x())**2 + (pntA.y() - pntB.y())**2)]
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>''' + tr('Synthetic deed description', str2HTML('Memorial Sintético')) + '''</title> </head>
<body>
<table
style="text-align: center; width: 100%; font-size: [FONTSIZE]px; font-family: Arial;"
border="1" cellpadding="0" cellspacing="0">
<tbody>
[CABECALHO]
[LINHAS]
</tbody>
</table>
<br>
</body>
</html>
'''
#Tipos de cabeçalhos
# UTM
if tipo == 'proj' and azimuteDist == 1:
linha = '''<tr>
<td>Vn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="7" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
# UTM sem Az e d
if tipo == 'proj' and azimuteDist == 0:
linha = '''<tr>
<td>Vn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="4" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
</tr>
<tr>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
# GEO
if tipo == 'geo' and azimuteDist == 1:
linha = '''<tr>
<td>Vn</td>
<td>lonn</td>
<td>latn</td>
<td>hn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="7" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>longitude</td>
<td>latitude</td>
<td>h</td>
</tr>'''
# GEO sem Az e d
if tipo == 'geo' and azimuteDist == 0:
linha = '''<tr>
<td>Vn</td>
<td>lonn</td>
<td>latn</td>
<td>hn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="4" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="3" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
(m)</td>
</tr>
<tr>
<td>longitude</td>
<td>latitude</td>
<td>h</td>
</tr>'''
# UTM e GEO
if tipo == 'both' and azimuteDist == 1:
linha = '''<tr>
<td>Vn</td>
<td>lonn</td>
<td>latn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
<td>Ln</td>
<td>Az_n</td>
<td>Dn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="9" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="5" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('SIDE', str2HTML('LADO')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('AZIMUTH', str2HTML('AZIMUTE')) + '''</td>
<td colspan="1" rowspan="2">''' + tr('DISTANCE', str2HTML('DISTÂNCIA')) + '''
(m)</td>
</tr>
<tr>
<td>longitude</td>
<td>latitude</td>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
# UTM e GEO sem Az e d
if tipo == 'both' and azimuteDist == 0:
linha = '''<tr>
<td>Vn</td>
<td>lonn</td>
<td>latn</td>
<td>En</td>
<td>Nn</td>
<td>hn</td>
</tr>
'''
cabec = '''<tr>
<td colspan="6" rowspan="1">''' + tr('Synthetic deed description'.upper(), str2HTML('Memorial Sintético'.upper())) + '''[TITULO]</td>
</tr>
<tr>
<td colspan="1" rowspan="2">''' + tr('VERTEX', str2HTML('VÉRTICE')) + '''</td>
<td colspan="5" rowspan="1">''' + tr('COORDINATE', str2HTML('COORDENADA')) + '''</td>
</tr>
<tr>
<td>longitude</td>
<td>latitude</td>
<td>E</td>
<td>N</td>
<td>h</td>
</tr>'''
LINHAS = ''
for k in range(tam):
linha0 = linha
itens = {'Vn': pnts_UTM[k+1][2],
'En': tr(format_num.format(pnts_UTM[k+1][0].x()), format_num.format(pnts_UTM[k+1][0].x()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'Nn': tr(format_num.format(pnts_UTM[k+1][0].y()), format_num.format(pnts_UTM[k+1][0].y()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'hn': tr(format_num.format(pnts_UTM[k+1][0].z()), format_num.format(pnts_UTM[k+1][0].z()).replace(',', 'X').replace('.', ',').replace('X', '.')),
'lonn': tr(DD2DMS(pnts_GEO[k+1][0].x(),decimal + 3), DD2DMS(pnts_GEO[k+1][0].x(),decimal + 3).replace('.', ',')),
'latn': tr(DD2DMS(pnts_GEO[k+1][0].y(),decimal + 3), DD2DMS(pnts_GEO[k+1][0].y(),decimal + 3).replace('.', ',')),
'Ln': pnts_UTM[k+1][2] + '/' + pnts_UTM[1 if k+2 > tam else k+2][2],
'Az_n': tr(DD2DMS(Az_lista[k],1), DD2DMS(Az_lista[k],1).replace('.', ',')),
'Dn': tr(format_num.format(Dist[k]), format_num.format(Dist[k]).replace(',', 'X').replace('.', ',').replace('X', '.'))
}
for item in itens:
linha0 = linha0.replace(item, itens[item])
LINHAS += linha0
resultado = texto.replace('[CABECALHO]', cabec).replace('[LINHAS]', LINHAS).replace('[TITULO]', str2HTML(titulo.upper())).replace('[FONTSIZE]', str(fontsize))
return resultado
else:
return tr('Verify geometry', 'Verificar geometria') | 0.497559 | 0.334005 |
import uuid
import ddt
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
from werkzeug import exceptions as w_exceptions
from kuryr.lib import binding
from kuryr.lib import constants as lib_const
from kuryr.lib import exceptions
from kuryr.lib import utils as lib_utils
from kuryr_libnetwork import app
from kuryr_libnetwork.tests.unit import base
from kuryr_libnetwork import utils
@ddt.ddt
class TestKuryrLeaveFailures(base.TestKuryrFailures):
"""Unit tests for the failures for unbinding a Neutron port."""
def _invoke_leave_request(self, docker_network_id,
docker_endpoint_id):
data = {
'NetworkID': docker_network_id,
'EndpointID': docker_endpoint_id,
}
response = self.app.post('/NetworkDriver.Leave',
content_type='application/json',
data=jsonutils.dumps(data))
return response
def _port_unbind_with_exception(self, docker_endpoint_id,
neutron_port, ex):
fake_unbinding_response = ('fake stdout', '')
self.mox.StubOutWithMock(binding, 'port_unbind')
if ex:
binding.port_unbind(docker_endpoint_id, neutron_port).AndRaise(ex)
else:
binding.port_unbind(docker_endpoint_id, neutron_port).AndReturn(
fake_unbinding_response)
self.mox.ReplayAll()
return fake_unbinding_response
@ddt.data(exceptions.VethDeletionFailure,
processutils.ProcessExecutionError)
def test_leave_unbinding_failure(self, GivenException):
fake_docker_network_id = lib_utils.get_hash()
fake_docker_endpoint_id = lib_utils.get_hash()
fake_neutron_network_id = str(uuid.uuid4())
self._mock_out_network(fake_neutron_network_id, fake_docker_network_id)
fake_neutron_port_id = str(uuid.uuid4())
self.mox.StubOutWithMock(app.neutron, 'list_ports')
neutron_port_name = utils.get_neutron_port_name(
fake_docker_endpoint_id)
fake_neutron_v4_subnet_id = str(uuid.uuid4())
fake_neutron_v6_subnet_id = str(uuid.uuid4())
fake_neutron_ports_response = self._get_fake_ports(
fake_docker_endpoint_id, fake_neutron_network_id,
fake_neutron_port_id, lib_const.PORT_STATUS_ACTIVE,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
app.neutron.list_ports(name=neutron_port_name).AndReturn(
fake_neutron_ports_response)
fake_neutron_port = fake_neutron_ports_response['ports'][0]
fake_message = "fake message"
fake_exception = GivenException(fake_message)
self._port_unbind_with_exception(
fake_docker_endpoint_id, fake_neutron_port, fake_exception)
response = self._invoke_leave_request(
fake_docker_network_id, fake_docker_endpoint_id)
self.assertEqual(
w_exceptions.InternalServerError.code, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertIn('Err', decoded_json)
self.assertIn(fake_message, decoded_json['Err'])
def test_leave_bad_request(self):
fake_docker_network_id = lib_utils.get_hash()
invalid_docker_endpoint_id = 'id-should-be-hexdigits'
response = self._invoke_leave_request(
fake_docker_network_id, invalid_docker_endpoint_id)
self.assertEqual(w_exceptions.BadRequest.code, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertIn('Err', decoded_json)
# TODO(tfukushima): Add the better error message validation.
self.assertIn(invalid_docker_endpoint_id, decoded_json['Err'])
self.assertIn('EndpointID', decoded_json['Err']) | kuryr_libnetwork/tests/unit/test_leave.py |
import uuid
import ddt
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
from werkzeug import exceptions as w_exceptions
from kuryr.lib import binding
from kuryr.lib import constants as lib_const
from kuryr.lib import exceptions
from kuryr.lib import utils as lib_utils
from kuryr_libnetwork import app
from kuryr_libnetwork.tests.unit import base
from kuryr_libnetwork import utils
@ddt.ddt
class TestKuryrLeaveFailures(base.TestKuryrFailures):
"""Unit tests for the failures for unbinding a Neutron port."""
def _invoke_leave_request(self, docker_network_id,
docker_endpoint_id):
data = {
'NetworkID': docker_network_id,
'EndpointID': docker_endpoint_id,
}
response = self.app.post('/NetworkDriver.Leave',
content_type='application/json',
data=jsonutils.dumps(data))
return response
def _port_unbind_with_exception(self, docker_endpoint_id,
neutron_port, ex):
fake_unbinding_response = ('fake stdout', '')
self.mox.StubOutWithMock(binding, 'port_unbind')
if ex:
binding.port_unbind(docker_endpoint_id, neutron_port).AndRaise(ex)
else:
binding.port_unbind(docker_endpoint_id, neutron_port).AndReturn(
fake_unbinding_response)
self.mox.ReplayAll()
return fake_unbinding_response
@ddt.data(exceptions.VethDeletionFailure,
processutils.ProcessExecutionError)
def test_leave_unbinding_failure(self, GivenException):
fake_docker_network_id = lib_utils.get_hash()
fake_docker_endpoint_id = lib_utils.get_hash()
fake_neutron_network_id = str(uuid.uuid4())
self._mock_out_network(fake_neutron_network_id, fake_docker_network_id)
fake_neutron_port_id = str(uuid.uuid4())
self.mox.StubOutWithMock(app.neutron, 'list_ports')
neutron_port_name = utils.get_neutron_port_name(
fake_docker_endpoint_id)
fake_neutron_v4_subnet_id = str(uuid.uuid4())
fake_neutron_v6_subnet_id = str(uuid.uuid4())
fake_neutron_ports_response = self._get_fake_ports(
fake_docker_endpoint_id, fake_neutron_network_id,
fake_neutron_port_id, lib_const.PORT_STATUS_ACTIVE,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
app.neutron.list_ports(name=neutron_port_name).AndReturn(
fake_neutron_ports_response)
fake_neutron_port = fake_neutron_ports_response['ports'][0]
fake_message = "fake message"
fake_exception = GivenException(fake_message)
self._port_unbind_with_exception(
fake_docker_endpoint_id, fake_neutron_port, fake_exception)
response = self._invoke_leave_request(
fake_docker_network_id, fake_docker_endpoint_id)
self.assertEqual(
w_exceptions.InternalServerError.code, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertIn('Err', decoded_json)
self.assertIn(fake_message, decoded_json['Err'])
def test_leave_bad_request(self):
fake_docker_network_id = lib_utils.get_hash()
invalid_docker_endpoint_id = 'id-should-be-hexdigits'
response = self._invoke_leave_request(
fake_docker_network_id, invalid_docker_endpoint_id)
self.assertEqual(w_exceptions.BadRequest.code, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertIn('Err', decoded_json)
# TODO(tfukushima): Add the better error message validation.
self.assertIn(invalid_docker_endpoint_id, decoded_json['Err'])
self.assertIn('EndpointID', decoded_json['Err']) | 0.54577 | 0.071559 |
import re
import time
import sys
import requests
from bs4 import BeautifulSoup
rude_words = [
"elf-skin",
"neat's-tongue",
"bull's-pizzle",
"stock-fish",
"three-inch fool",
"unable worm",
"lily-livered",
"tewkesbury mustard",
"pigeon-livered",
'blackguard',
"scurvy companion",
'blaggard',
'scullions',
'menial',
'scoundrel',
'cox-comb',
'knave',
'churl',
'doxy',
'glos pautonnier',
'skamelar',
'mand<NAME>',
'hedge-born',
'cumberground'
]
def find_rude_rhyme(word, cache):
# takes a word and finds rude rhyme, returns rude substitute or None if none found
result = cache.get(word, word)
if result == word:
return result
return f'\x1b[1m{result}\x1b[0m'
def get_all_rhymes(word):
candidate = (re.sub(r'[^\w]', ' ', word)).split()[-1]
json_output = requests.get("https://api.datamuse.com/words", params={'rel_rhy': candidate}).json()
rhyming_words = []
for rhyme in json_output:
rhyming_words.append(rhyme['word'])
return rhyming_words
def get_sonnets():
result = requests.get("http://lib.ru/SHAKESPEARE/sonnets.txt")
html = result.content
soup = BeautifulSoup(html, features="lxml")
text = soup.get_text()
textlist = text.split("\n")
textiter = iter(textlist)
sonnet = False
while not sonnet:
line = next(textiter)
if "Sonnet " in line:
sonnet = True
next(textiter)
return textiter
def load_sonnet(textiter):
poem = []
while True:
line = next(textiter)
if "Sonnet " not in line:
poem.append(line)
else:
break
poem.pop()
poemsplit = [x.split(" ") for x in poem if x.strip()]
return poemsplit
def replace_words(sonnet, cache):
for ix, line in enumerate(sonnet):
word = line[-1][:-1]
rude_version = find_rude_rhyme(word, cache)
sonnet[ix][-1] = rude_version + line[-1][-1]
return sonnet
def main():
textiter = get_sonnets()
# print(re quests.get("https://api.datamuse.com/words", params={'rel_rhy': 'cat'}).json())
print("PRECACHING...")
cache = {}
for word in rude_words:
for rhyme in get_all_rhymes(word):
cache[rhyme] = word
while True:
print("---")
sonnet = load_sonnet(textiter)
rude_sonnet = replace_words(sonnet, cache)
joined = "\n".join(" ".join(l) for l in rude_sonnet)
if '\x1b' in joined:
print(joined)
input()
if __name__ == '__main__':
sys.exit(main()) | various/berude.py | import re
import time
import sys
import requests
from bs4 import BeautifulSoup
rude_words = [
"elf-skin",
"neat's-tongue",
"bull's-pizzle",
"stock-fish",
"three-inch fool",
"unable worm",
"lily-livered",
"tewkesbury mustard",
"pigeon-livered",
'blackguard',
"scurvy companion",
'blaggard',
'scullions',
'menial',
'scoundrel',
'cox-comb',
'knave',
'churl',
'doxy',
'glos pautonnier',
'skamelar',
'mand<NAME>',
'hedge-born',
'cumberground'
]
def find_rude_rhyme(word, cache):
# takes a word and finds rude rhyme, returns rude substitute or None if none found
result = cache.get(word, word)
if result == word:
return result
return f'\x1b[1m{result}\x1b[0m'
def get_all_rhymes(word):
candidate = (re.sub(r'[^\w]', ' ', word)).split()[-1]
json_output = requests.get("https://api.datamuse.com/words", params={'rel_rhy': candidate}).json()
rhyming_words = []
for rhyme in json_output:
rhyming_words.append(rhyme['word'])
return rhyming_words
def get_sonnets():
result = requests.get("http://lib.ru/SHAKESPEARE/sonnets.txt")
html = result.content
soup = BeautifulSoup(html, features="lxml")
text = soup.get_text()
textlist = text.split("\n")
textiter = iter(textlist)
sonnet = False
while not sonnet:
line = next(textiter)
if "Sonnet " in line:
sonnet = True
next(textiter)
return textiter
def load_sonnet(textiter):
poem = []
while True:
line = next(textiter)
if "Sonnet " not in line:
poem.append(line)
else:
break
poem.pop()
poemsplit = [x.split(" ") for x in poem if x.strip()]
return poemsplit
def replace_words(sonnet, cache):
for ix, line in enumerate(sonnet):
word = line[-1][:-1]
rude_version = find_rude_rhyme(word, cache)
sonnet[ix][-1] = rude_version + line[-1][-1]
return sonnet
def main():
textiter = get_sonnets()
# print(re quests.get("https://api.datamuse.com/words", params={'rel_rhy': 'cat'}).json())
print("PRECACHING...")
cache = {}
for word in rude_words:
for rhyme in get_all_rhymes(word):
cache[rhyme] = word
while True:
print("---")
sonnet = load_sonnet(textiter)
rude_sonnet = replace_words(sonnet, cache)
joined = "\n".join(" ".join(l) for l in rude_sonnet)
if '\x1b' in joined:
print(joined)
input()
if __name__ == '__main__':
sys.exit(main()) | 0.170992 | 0.200088 |
from brunodb.query import get_query_sql, get_base_query, get_where_clause, get_order_by_sql
def standardize(string):
string = ' '.join(string.split())
return string.lower().strip()
def test_base_sql():
table_name = 'foo'
sql = get_base_query(table_name)
assert standardize(sql) == "select * from foo"
def test_get_where_clause_nothing():
kwargs = {}
where_extra = ''
where_clause, where_vals = get_where_clause(kwargs, where_extra=where_extra)
assert where_clause == ''
assert where_vals == []
def test_get_where_with_kwargs():
kwargs = {'bar': 5}
where_extra = ''
where_clause, where_vals = get_where_clause(kwargs, where_extra=where_extra)
where_clause = standardize(where_clause)
assert where_clause == 'where bar = (?)'
assert where_vals == [5]
def test_get_where_with_where_extra():
kwargs = {}
where_extra = 'buzz > 9'
where_clause, where_vals = get_where_clause(kwargs, where_extra=where_extra)
where_clause = standardize(where_clause)
assert where_clause == 'where buzz > 9'
assert where_vals == []
def test_get_where_with_kwargs_and_where_extra():
kwargs = {'foo': 99, 'bar': 5}
where_extra = 'buzz > 9'
where_clause, where_vals = get_where_clause(kwargs, where_extra=where_extra)
where_clause = standardize(where_clause)
assert where_clause == 'where foo = (?) and bar = (?) and buzz > 9'
assert where_vals == [99, 5]
def test_order_by_sql_nothing():
order_by = ''
order_by_sql = get_order_by_sql(order_by)
assert order_by_sql == ''
def test_order_by_sql():
order_by = 'foo'
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo'
order_by = ['foo']
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo'
def test_order_by_sql_mult():
order_by = ('foo', 'bar')
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo, bar'
order_by = ['foo', 'bar']
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo, bar'
def test_order_by_sql_mult_desc():
order_by = ('-foo', 'bar')
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo desc, bar'
order_by = ['foo', '-bar']
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo, bar desc'
def test_query_sql():
table_name = 'foo'
sql, vals = get_query_sql(table_name)
assert standardize(sql) == "select * from foo"
assert vals == ()
def test_query_sql_complex():
table_name = 'foo'
fields = ['bar', 'buzz']
kwargs = {'jug': 19,
'bug': 'ant'}
where_extra = 'hug <= 77'
order_by = ('-buzz',)
query_sql, vals = get_query_sql(table_name,
fields=fields,
where_extra=where_extra,
count_table_rows=False,
order_by=order_by,
**kwargs)
sql = standardize(query_sql)
expected = "select bar, buzz from foo where jug = (?) and bug = (?) and hug <= 77 order by buzz desc"
assert sql == expected
assert vals == (19, 'ant')
def test_query_sql_complex():
table_name = 'foo'
fields = ['bar', 'buzz']
kwargs = {'jug': 19,
'bug': 'ant'}
where_extra = 'hug <= 77'
order_by = ('-buzz',)
query_sql, vals = get_query_sql(table_name,
where_extra=where_extra,
count_table_rows=True,
order_by=order_by,
**kwargs)
sql = standardize(query_sql)
expected = "select count(*) from foo where jug = (?) and bug = (?) and hug <= 77 order by buzz desc"
assert sql == expected
assert vals == (19, 'ant') | test/test_query.py | from brunodb.query import get_query_sql, get_base_query, get_where_clause, get_order_by_sql
def standardize(string):
string = ' '.join(string.split())
return string.lower().strip()
def test_base_sql():
table_name = 'foo'
sql = get_base_query(table_name)
assert standardize(sql) == "select * from foo"
def test_get_where_clause_nothing():
kwargs = {}
where_extra = ''
where_clause, where_vals = get_where_clause(kwargs, where_extra=where_extra)
assert where_clause == ''
assert where_vals == []
def test_get_where_with_kwargs():
kwargs = {'bar': 5}
where_extra = ''
where_clause, where_vals = get_where_clause(kwargs, where_extra=where_extra)
where_clause = standardize(where_clause)
assert where_clause == 'where bar = (?)'
assert where_vals == [5]
def test_get_where_with_where_extra():
kwargs = {}
where_extra = 'buzz > 9'
where_clause, where_vals = get_where_clause(kwargs, where_extra=where_extra)
where_clause = standardize(where_clause)
assert where_clause == 'where buzz > 9'
assert where_vals == []
def test_get_where_with_kwargs_and_where_extra():
kwargs = {'foo': 99, 'bar': 5}
where_extra = 'buzz > 9'
where_clause, where_vals = get_where_clause(kwargs, where_extra=where_extra)
where_clause = standardize(where_clause)
assert where_clause == 'where foo = (?) and bar = (?) and buzz > 9'
assert where_vals == [99, 5]
def test_order_by_sql_nothing():
order_by = ''
order_by_sql = get_order_by_sql(order_by)
assert order_by_sql == ''
def test_order_by_sql():
order_by = 'foo'
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo'
order_by = ['foo']
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo'
def test_order_by_sql_mult():
order_by = ('foo', 'bar')
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo, bar'
order_by = ['foo', 'bar']
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo, bar'
def test_order_by_sql_mult_desc():
order_by = ('-foo', 'bar')
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo desc, bar'
order_by = ['foo', '-bar']
order_by_sql = get_order_by_sql(order_by)
order_by_sql = standardize(order_by_sql)
assert order_by_sql == 'order by foo, bar desc'
def test_query_sql():
table_name = 'foo'
sql, vals = get_query_sql(table_name)
assert standardize(sql) == "select * from foo"
assert vals == ()
def test_query_sql_complex():
table_name = 'foo'
fields = ['bar', 'buzz']
kwargs = {'jug': 19,
'bug': 'ant'}
where_extra = 'hug <= 77'
order_by = ('-buzz',)
query_sql, vals = get_query_sql(table_name,
fields=fields,
where_extra=where_extra,
count_table_rows=False,
order_by=order_by,
**kwargs)
sql = standardize(query_sql)
expected = "select bar, buzz from foo where jug = (?) and bug = (?) and hug <= 77 order by buzz desc"
assert sql == expected
assert vals == (19, 'ant')
def test_query_sql_complex():
table_name = 'foo'
fields = ['bar', 'buzz']
kwargs = {'jug': 19,
'bug': 'ant'}
where_extra = 'hug <= 77'
order_by = ('-buzz',)
query_sql, vals = get_query_sql(table_name,
where_extra=where_extra,
count_table_rows=True,
order_by=order_by,
**kwargs)
sql = standardize(query_sql)
expected = "select count(*) from foo where jug = (?) and bug = (?) and hug <= 77 order by buzz desc"
assert sql == expected
assert vals == (19, 'ant') | 0.635449 | 0.512449 |
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.decorators import api_view, permission_classes,authentication_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from django.shortcuts import render
from applications.gymkhana.models import Club_info,Club_member,Core_team,Session_info,Event_info,Club_budget,Club_report,Fest_budget,Registration_form,Voting_polls
from .serializers import Club_memberSerializer,Core_teamSerializer,Club_infoSerializer,Club_DetailsSerializer,Session_infoSerializer,event_infoserializer,club_budgetserializer,Club_reportSerializers,Fest_budgerSerializer,Registration_formSerializer,Voting_pollSerializer
from django.contrib.auth.models import User
def coordinator_club(request):
club_info = []
for i in Club_info.objects.all():
co = (str(i.co_ordinator)).split(" ")
co_co=(str(i.co_coordinator)).split(" ")
if co[0]==str(request.user) or co_co[0] == str(request.user):
club_info.append(serializers.ClubInfoSerializer(i).data)
return club_info
class core(APIView):
def get(self,request):
co=Core_team.objects.all()
serializer=Core_teamSerializer(co, many=True)
print(serializer.data)
return Response(serializer.data)
class clubname(APIView):
permission_classes = [IsAuthenticated]
def get(self,request):
authentication_classes = [TokenAuthentication]
clubname1 = Club_info.objects.all()
serializer = Club_infoSerializer(clubname1, many = True)
return Response(serializer.data)
class Club_Details(APIView):
def get(self,respect):
clubdetail=Club_info.objects.all()
serializer=Club_DetailsSerializer(clubdetail, many=True)
return Response(serializer.data)
class session_details(APIView):
def get(self,respect):
session = Session_info.objects.all()
serializer = Session_infoSerializer(session, many = True)
return Response(serializer.data)
class club_events(APIView):
def get(self,respect):
clubevents=Event_info.objects.all()
serializer=event_infoserializer(clubevents, many = True)
return Response(serializer.data)
class club_budgetinfo(APIView):
def get(self,respect):
clubbudget=Club_budget.objects.all()
serializer=club_budgetserializer(clubbudget, many=True)
return Response(serializer.data)
class club_report(APIView):
def get(self,respect):
clubreport = Club_report.objects.all()
serializer = Club_reportSerializers(clubreport , many=True)
return Response(serializer.data)
class Fest_Budget(APIView):
def get(self,respect):
festbudget=Fest_budget.objects.all()
serializer=Fest_budgerSerializer(festbudget, many=True)
return Response(serializer.data)
class Registraion_form(APIView):
def get(self,respect):
registration=Registration_form.objects.all()
serializer=Registration_formSerializer(registration, many=True)
return Response(serializer.data)
class Voting_Polls(APIView):
def get(self,respect):
votingpolls=Voting_polls.objects.all()
serializer=Voting_pollSerializer(votingpolls, many=True)
return Response(serializer.data) | FusionIIIT/applications/gymkhana/api/views.py | from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.decorators import api_view, permission_classes,authentication_classes
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from django.shortcuts import render
from applications.gymkhana.models import Club_info,Club_member,Core_team,Session_info,Event_info,Club_budget,Club_report,Fest_budget,Registration_form,Voting_polls
from .serializers import Club_memberSerializer,Core_teamSerializer,Club_infoSerializer,Club_DetailsSerializer,Session_infoSerializer,event_infoserializer,club_budgetserializer,Club_reportSerializers,Fest_budgerSerializer,Registration_formSerializer,Voting_pollSerializer
from django.contrib.auth.models import User
def coordinator_club(request):
club_info = []
for i in Club_info.objects.all():
co = (str(i.co_ordinator)).split(" ")
co_co=(str(i.co_coordinator)).split(" ")
if co[0]==str(request.user) or co_co[0] == str(request.user):
club_info.append(serializers.ClubInfoSerializer(i).data)
return club_info
class core(APIView):
def get(self,request):
co=Core_team.objects.all()
serializer=Core_teamSerializer(co, many=True)
print(serializer.data)
return Response(serializer.data)
class clubname(APIView):
permission_classes = [IsAuthenticated]
def get(self,request):
authentication_classes = [TokenAuthentication]
clubname1 = Club_info.objects.all()
serializer = Club_infoSerializer(clubname1, many = True)
return Response(serializer.data)
class Club_Details(APIView):
def get(self,respect):
clubdetail=Club_info.objects.all()
serializer=Club_DetailsSerializer(clubdetail, many=True)
return Response(serializer.data)
class session_details(APIView):
def get(self,respect):
session = Session_info.objects.all()
serializer = Session_infoSerializer(session, many = True)
return Response(serializer.data)
class club_events(APIView):
def get(self,respect):
clubevents=Event_info.objects.all()
serializer=event_infoserializer(clubevents, many = True)
return Response(serializer.data)
class club_budgetinfo(APIView):
def get(self,respect):
clubbudget=Club_budget.objects.all()
serializer=club_budgetserializer(clubbudget, many=True)
return Response(serializer.data)
class club_report(APIView):
def get(self,respect):
clubreport = Club_report.objects.all()
serializer = Club_reportSerializers(clubreport , many=True)
return Response(serializer.data)
class Fest_Budget(APIView):
def get(self,respect):
festbudget=Fest_budget.objects.all()
serializer=Fest_budgerSerializer(festbudget, many=True)
return Response(serializer.data)
class Registraion_form(APIView):
def get(self,respect):
registration=Registration_form.objects.all()
serializer=Registration_formSerializer(registration, many=True)
return Response(serializer.data)
class Voting_Polls(APIView):
def get(self,respect):
votingpolls=Voting_polls.objects.all()
serializer=Voting_pollSerializer(votingpolls, many=True)
return Response(serializer.data) | 0.477554 | 0.086942 |
import json
from utils.utils_aws import publish_to_sns
from utils.utils_db import db_list_all_unfixed_vulnerabilities, db_vulnerability_fixed
from utils.utils_dns import dns_deleted, vulnerable_ns, vulnerable_alias, vulnerable_cname
from utils.utils_requests import vulnerable_storage
def lambda_handler(event, context): # pylint:disable=unused-argument
vulnerabilities = db_list_all_unfixed_vulnerabilities()
json_data = {"Fixed": []}
for vulnerability in vulnerabilities:
domain = vulnerability["Domain"]["S"]
vulnerability_type = vulnerability["VulnerabilityType"]["S"]
resource_type = vulnerability["ResourceType"]["S"]
cloud = vulnerability["Cloud"]["S"]
account = vulnerability["Account"]["S"]
if vulnerability_type == "NS":
if dns_deleted(domain) or not vulnerable_ns(domain):
db_vulnerability_fixed(domain)
json_data["Fixed"].append(
{"Account": account, "Cloud": cloud, "Domain": domain, "ResourceType": resource_type}
)
elif "S3" in resource_type or "Google cloud storage" in resource_type:
if dns_deleted(domain) or not vulnerable_storage(domain, https_timeout=3, http_timeout=3):
db_vulnerability_fixed(domain)
json_data["Fixed"].append(
{"Account": account, "Cloud": cloud, "Domain": domain, "ResourceType": resource_type}
)
elif vulnerability_type == "CNAME":
if dns_deleted(domain) or not vulnerable_cname(domain):
db_vulnerability_fixed(domain)
json_data["Fixed"].append(
{"Account": account, "Cloud": cloud, "Domain": domain, "ResourceType": resource_type}
)
elif vulnerability_type == "Alias":
if dns_deleted(domain) or not vulnerable_alias(domain):
db_vulnerability_fixed(domain)
json_data["Fixed"].append(
{"Account": account, "Cloud": cloud, "Domain": domain, "ResourceType": resource_type}
)
if len(json_data["Fixed"]) == 0:
print("No new fixed vulnerabilities")
else:
print(json.dumps(json_data, sort_keys=True, indent=2))
publish_to_sns(json_data, "Domains no longer vulnerable to takeover") | terraform-modules/lambda/code/update/update.py | import json
from utils.utils_aws import publish_to_sns
from utils.utils_db import db_list_all_unfixed_vulnerabilities, db_vulnerability_fixed
from utils.utils_dns import dns_deleted, vulnerable_ns, vulnerable_alias, vulnerable_cname
from utils.utils_requests import vulnerable_storage
def lambda_handler(event, context): # pylint:disable=unused-argument
vulnerabilities = db_list_all_unfixed_vulnerabilities()
json_data = {"Fixed": []}
for vulnerability in vulnerabilities:
domain = vulnerability["Domain"]["S"]
vulnerability_type = vulnerability["VulnerabilityType"]["S"]
resource_type = vulnerability["ResourceType"]["S"]
cloud = vulnerability["Cloud"]["S"]
account = vulnerability["Account"]["S"]
if vulnerability_type == "NS":
if dns_deleted(domain) or not vulnerable_ns(domain):
db_vulnerability_fixed(domain)
json_data["Fixed"].append(
{"Account": account, "Cloud": cloud, "Domain": domain, "ResourceType": resource_type}
)
elif "S3" in resource_type or "Google cloud storage" in resource_type:
if dns_deleted(domain) or not vulnerable_storage(domain, https_timeout=3, http_timeout=3):
db_vulnerability_fixed(domain)
json_data["Fixed"].append(
{"Account": account, "Cloud": cloud, "Domain": domain, "ResourceType": resource_type}
)
elif vulnerability_type == "CNAME":
if dns_deleted(domain) or not vulnerable_cname(domain):
db_vulnerability_fixed(domain)
json_data["Fixed"].append(
{"Account": account, "Cloud": cloud, "Domain": domain, "ResourceType": resource_type}
)
elif vulnerability_type == "Alias":
if dns_deleted(domain) or not vulnerable_alias(domain):
db_vulnerability_fixed(domain)
json_data["Fixed"].append(
{"Account": account, "Cloud": cloud, "Domain": domain, "ResourceType": resource_type}
)
if len(json_data["Fixed"]) == 0:
print("No new fixed vulnerabilities")
else:
print(json.dumps(json_data, sort_keys=True, indent=2))
publish_to_sns(json_data, "Domains no longer vulnerable to takeover") | 0.192046 | 0.093471 |
from contextlib import closing
import socket
import numpy as np
import random
import string
# https://github.com/PSBPOSAS/dji-asdk-to-python/issues/2
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
import cv2
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst # noqa: E402
class StreamingListener(object):
def __init__(self, width=1920, height=1080, port=None):
self.width = width
self.height = height
self.streaming = False
self.pipe_name = self.rand_str(10)
if port is None:
self.port = StreamingListener.find_free_port()
else:
self.port = port
# Gstreamer
Gst.init(None)
self._frame = None
self.video_source = (
'udpsrc port=%s caps="application/x-rtp, \
encoding-name=(string)H264" ! queue '
% self.port
)
self.video_decode = "! rtph264depay ! queue ! h264parse ! avdec_h264 ! \
videoconvert ! video/x-raw,format=(string)BGR ! videoconvert"
# Create a sink to get data
self.video_sink_conf = (
"! appsink name=%s emit-signals=true \
sync=false max-buffers=2 drop=true" % self.pipe_name
)
self.video_pipe = None
self.video_sink = None
self.appsrc = None
# End Gstreamer
@staticmethod
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('localhost', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
@staticmethod
def rand_str(n):
return ''.join(random.choices(
string.ascii_uppercase + string.digits, k=n)
)
@staticmethod
def _gst_to_opencv(sample):
buf = sample.get_buffer()
caps = sample.get_caps()
array = np.ndarray(
(
caps.get_structure(0).get_value("height"),
caps.get_structure(0).get_value("width"),
3,
),
buffer=buf.extract_dup(0, buf.get_size()),
dtype=np.uint8,
)
return array
def _gst_callback(self, sink):
sample = sink.emit("pull-sample")
new_frame = self._gst_to_opencv(sample)
new_frame = cv2.resize(new_frame, (self.width, self.height))
self._frame = new_frame
return Gst.FlowReturn.OK
def start(self):
self.streaming = True
# Gstreamer
config = [
self.video_source,
self.video_decode,
self.video_sink_conf,
]
command = " ".join(config)
self.video_pipe = Gst.parse_launch(command)
self.video_pipe.set_state(Gst.State.PLAYING)
self.appsrc = self.video_pipe.get_child_by_name("source")
self.video_sink = self.video_pipe.get_by_name(self.pipe_name)
self.video_sink.connect("new-sample", self._gst_callback)
# End Gstreamer
def getFrame(self):
frame = self._frame
self._frame = None
return frame # self._images_queue.pop()
def stop(self):
self.streaming = False
self.video_pipe.set_state(Gst.State.NULL) | dji_asdk_to_python/utils/streaming_utils.py | from contextlib import closing
import socket
import numpy as np
import random
import string
# https://github.com/PSBPOSAS/dji-asdk-to-python/issues/2
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
import cv2
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst # noqa: E402
class StreamingListener(object):
def __init__(self, width=1920, height=1080, port=None):
self.width = width
self.height = height
self.streaming = False
self.pipe_name = self.rand_str(10)
if port is None:
self.port = StreamingListener.find_free_port()
else:
self.port = port
# Gstreamer
Gst.init(None)
self._frame = None
self.video_source = (
'udpsrc port=%s caps="application/x-rtp, \
encoding-name=(string)H264" ! queue '
% self.port
)
self.video_decode = "! rtph264depay ! queue ! h264parse ! avdec_h264 ! \
videoconvert ! video/x-raw,format=(string)BGR ! videoconvert"
# Create a sink to get data
self.video_sink_conf = (
"! appsink name=%s emit-signals=true \
sync=false max-buffers=2 drop=true" % self.pipe_name
)
self.video_pipe = None
self.video_sink = None
self.appsrc = None
# End Gstreamer
@staticmethod
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('localhost', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
@staticmethod
def rand_str(n):
return ''.join(random.choices(
string.ascii_uppercase + string.digits, k=n)
)
@staticmethod
def _gst_to_opencv(sample):
buf = sample.get_buffer()
caps = sample.get_caps()
array = np.ndarray(
(
caps.get_structure(0).get_value("height"),
caps.get_structure(0).get_value("width"),
3,
),
buffer=buf.extract_dup(0, buf.get_size()),
dtype=np.uint8,
)
return array
def _gst_callback(self, sink):
sample = sink.emit("pull-sample")
new_frame = self._gst_to_opencv(sample)
new_frame = cv2.resize(new_frame, (self.width, self.height))
self._frame = new_frame
return Gst.FlowReturn.OK
def start(self):
self.streaming = True
# Gstreamer
config = [
self.video_source,
self.video_decode,
self.video_sink_conf,
]
command = " ".join(config)
self.video_pipe = Gst.parse_launch(command)
self.video_pipe.set_state(Gst.State.PLAYING)
self.appsrc = self.video_pipe.get_child_by_name("source")
self.video_sink = self.video_pipe.get_by_name(self.pipe_name)
self.video_sink.connect("new-sample", self._gst_callback)
# End Gstreamer
def getFrame(self):
frame = self._frame
self._frame = None
return frame # self._images_queue.pop()
def stop(self):
self.streaming = False
self.video_pipe.set_state(Gst.State.NULL) | 0.510985 | 0.087994 |
from multiprocessing import Pool
import numpy as np
class Provider:
def __init__(self, numofthreads=4):
self.transformers = []
self.numofthreads = numofthreads
self.pool = Pool(numofthreads)
self.pool.starmap(lambda: np.random.seed(), ())
self.poolresults = None
self.data = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.closePool()
def closePool(self):
self.pool.close()
self.pool.join()
def addTransformer(self, transformer):
self.transformers.append(transformer)
def getNextChunk(self, chunksize, **kwargs):
raise NotImplementedError()
def prepareData(self, chunksize=20000, **kwargs):
result = self.getNextChunk(chunksize, **kwargs)
if len(self.transformers) == 0:
self.data = result
return
if result is not None:
if isinstance(result, tuple) or isinstance(result, list):
batchsize = result[0].shape[0] // self.numofthreads
else:
batchsize = result.shape[0] // self.numofthreads
batches = []
for i in range(self.numofthreads - 1):
if isinstance(result, tuple) or isinstance(result, list):
batches.append([res[i * batchsize:(i + 1) * batchsize] for res in result])
else:
batches.append(result[i * batchsize:(i + 1) * batchsize])
if isinstance(result, tuple) or isinstance(result, list):
batches.append([res[(self.numofthreads - 1) * batchsize:] for res in result])
else:
batches.append(result[(self.numofthreads - 1) * batchsize:])
args = []
for i, batch in enumerate(batches):
arg = (self.transformers, batch, i)
args.append(arg)
else:
args = []
for i in range(self.numofthreads):
args.append((self.transformers, None, i))
self.poolresults = self.pool.starmap_async(self.worker, args)
def getData(self):
if self.poolresults is not None:
self.poolresults.wait()
results = [None] * self.numofthreads
for data in self.poolresults.get():
result, threadidx = data
results[threadidx] = result
self.poolresults = None
length = 0
if isinstance(results[0], tuple) or isinstance(results[0], list):
datshape = [res.shape[1:] for res in results[0]]
for res in results:
length += res[0].shape[0]
self.data = tuple(np.empty((length, )+shape, dtype=results[0][i].dtype)
for i, shape in enumerate(datshape))
idx = 0
for res in results:
for i, dat in enumerate(res):
self.data[i][idx:idx + dat.shape[0]] = dat
idx += res[0].shape[0]
else:
datshape = results[0].shape[1:]
for res in results:
length += res.shape[0]
self.data = np.empty((length, ) + datshape, dtype=np.float32)
idx = 0
for res in results:
self.data[idx:idx + res.shape[0]] = res
idx += res.shape[0]
return self.data
@staticmethod
def worker(transformers, batch, threadidx):
for transformer in transformers:
batch = transformer(batch, threadidx)
return batch, threadidx | Transformers/Provider.py | from multiprocessing import Pool
import numpy as np
class Provider:
def __init__(self, numofthreads=4):
self.transformers = []
self.numofthreads = numofthreads
self.pool = Pool(numofthreads)
self.pool.starmap(lambda: np.random.seed(), ())
self.poolresults = None
self.data = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.closePool()
def closePool(self):
self.pool.close()
self.pool.join()
def addTransformer(self, transformer):
self.transformers.append(transformer)
def getNextChunk(self, chunksize, **kwargs):
raise NotImplementedError()
def prepareData(self, chunksize=20000, **kwargs):
result = self.getNextChunk(chunksize, **kwargs)
if len(self.transformers) == 0:
self.data = result
return
if result is not None:
if isinstance(result, tuple) or isinstance(result, list):
batchsize = result[0].shape[0] // self.numofthreads
else:
batchsize = result.shape[0] // self.numofthreads
batches = []
for i in range(self.numofthreads - 1):
if isinstance(result, tuple) or isinstance(result, list):
batches.append([res[i * batchsize:(i + 1) * batchsize] for res in result])
else:
batches.append(result[i * batchsize:(i + 1) * batchsize])
if isinstance(result, tuple) or isinstance(result, list):
batches.append([res[(self.numofthreads - 1) * batchsize:] for res in result])
else:
batches.append(result[(self.numofthreads - 1) * batchsize:])
args = []
for i, batch in enumerate(batches):
arg = (self.transformers, batch, i)
args.append(arg)
else:
args = []
for i in range(self.numofthreads):
args.append((self.transformers, None, i))
self.poolresults = self.pool.starmap_async(self.worker, args)
def getData(self):
if self.poolresults is not None:
self.poolresults.wait()
results = [None] * self.numofthreads
for data in self.poolresults.get():
result, threadidx = data
results[threadidx] = result
self.poolresults = None
length = 0
if isinstance(results[0], tuple) or isinstance(results[0], list):
datshape = [res.shape[1:] for res in results[0]]
for res in results:
length += res[0].shape[0]
self.data = tuple(np.empty((length, )+shape, dtype=results[0][i].dtype)
for i, shape in enumerate(datshape))
idx = 0
for res in results:
for i, dat in enumerate(res):
self.data[i][idx:idx + dat.shape[0]] = dat
idx += res[0].shape[0]
else:
datshape = results[0].shape[1:]
for res in results:
length += res.shape[0]
self.data = np.empty((length, ) + datshape, dtype=np.float32)
idx = 0
for res in results:
self.data[idx:idx + res.shape[0]] = res
idx += res.shape[0]
return self.data
@staticmethod
def worker(transformers, batch, threadidx):
for transformer in transformers:
batch = transformer(batch, threadidx)
return batch, threadidx | 0.260954 | 0.141726 |
"""Bevakning av sista minuten-tider hos Folktandvården."""
import re
import time
from email.message import EmailMessage
from pathlib import Path
from subprocess import PIPE, Popen
from urllib.parse import urljoin
import markdownify
import requests # https://docs.python-requests.org/en/master/
from bs4 import BeautifulSoup
import config
OUTPUT_FILE = "out.md"
def main():
"""Check for new bookable times at Folktandvården."""
if Path(OUTPUT_FILE).is_file():
with open(OUTPUT_FILE, "r") as f:
old_content = f.read()
else:
old_content = ""
url = "https://folktandvarden.vgregion.se/boka-besokstider/AvailableAppointments/"
data = [
('undefined', 'undefined'),
# Göteborg
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000144'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000146'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000015795'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000147'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000148'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000149'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000155'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000231'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000152'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000012121'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000010399'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000154'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000010587'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000157'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000158'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000160'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000161'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000011956'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000232'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000164'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000165'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000166'),
# Mölndal
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000137'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000138'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000136'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000005700'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000139'),
('isLastMinute', 'true'),
]
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-GB,en;q=0.9,sv;q=0.8,de-DE;q=0.7,de;q=0.6',
'cache-control': 'max-age=0',
'content-type': 'application/x-www-form-urlencoded',
'cookie': 'epi-cms-public-1-ext-got-adc-insert=3596527244.47873.0000; ASP.NET_SessionId=kusp0s0wthg1ox2s03hw51ox; TS01608e63=01c69f8384df64b985e5a250e16301ccb4dbe86ae07f2d85373a2678abef60e31284f38cf59e63934e0685c70b76014b8fc87299f6629a82f54ac81fa57e9b173f7fa41b678ea45d3811fbc397e88cc48271e46d5a; TSdbca8b71027=0857e74f39ab200050768e6df49cfb9be85d99810581f8d02ce7ccfb195fa1a05cd7e7cdfeb313b3087ebae0ad11300018f5fbf09d3b4175b5879546eead6c21debbe07e551170c188891b6859ea87e6ced3fa3fc03e8c7cee9a6a57a671e19d',
'origin': 'https://folktandvarden.vgregion.se',
'referer': 'https://folktandvarden.vgregion.se/',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="98", "Google Chrome";v="98"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36'
}
# Try multiple times if request fails
for _x in range(5):
try:
response = requests.post(url, data, headers=headers)
# print(response.text)
# Parse HTML
soup = BeautifulSoup(response.text, "html.parser")
interesting_area = soup.find("main").find("div", {"class": "ftv-booking__tightblock"})
for tag in ["button", "form", "input"]:
for t in interesting_area.find_all(tag):
t.decompose()
interesting_area.find("div", "block__generic-body").unwrap()
interesting_area.find("div", "block__header").unwrap()
interesting_area.find("ol", "vgr-pagination").decompose()
# Make URLs absolute
for a in interesting_area.find_all("a"):
a["href"] = urljoin(url, a.get("href"))
# print(interesting_area)
# Convert to markdown
md = markdownify.markdownify(str(interesting_area), heading_style="ATX")
md = md.strip()
md = re.sub("\n+", "\n", md)
# Send email and save output if there is any new information
if (md != "# Lediga tider\nTyvärr finns det inga sista minuten-tider tillgängliga just nu.") and (md != old_content):
send_email(str(interesting_area))
with open(OUTPUT_FILE, "w") as f:
f.write(md)
break
except AttributeError:
time.sleep(10)
def send_email(content):
"""Compose and send email."""
sender = config.sender
subject = "Nya lediga tider hos Folktandvården"
msg = EmailMessage()
msg.set_content(content, subtype="html")
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = config.receiver
p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE)
p.communicate(msg.as_bytes())
if __name__ == '__main__':
main() | bevakning.py | """Bevakning av sista minuten-tider hos Folktandvården."""
import re
import time
from email.message import EmailMessage
from pathlib import Path
from subprocess import PIPE, Popen
from urllib.parse import urljoin
import markdownify
import requests # https://docs.python-requests.org/en/master/
from bs4 import BeautifulSoup
import config
OUTPUT_FILE = "out.md"
def main():
"""Check for new bookable times at Folktandvården."""
if Path(OUTPUT_FILE).is_file():
with open(OUTPUT_FILE, "r") as f:
old_content = f.read()
else:
old_content = ""
url = "https://folktandvarden.vgregion.se/boka-besokstider/AvailableAppointments/"
data = [
('undefined', 'undefined'),
# Göteborg
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000144'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000146'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000015795'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000147'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000148'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000149'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000155'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000231'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000152'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000012121'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000010399'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000154'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000010587'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000157'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000158'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000160'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000161'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000011956'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000232'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000164'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000165'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000166'),
# Mölndal
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000137'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000138'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000136'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000005700'),
('AppointmentSearchViewModel.SelectedClinincs', 'SE2321000131-E000000000139'),
('isLastMinute', 'true'),
]
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-GB,en;q=0.9,sv;q=0.8,de-DE;q=0.7,de;q=0.6',
'cache-control': 'max-age=0',
'content-type': 'application/x-www-form-urlencoded',
'cookie': 'epi-cms-public-1-ext-got-adc-insert=3596527244.47873.0000; ASP.NET_SessionId=kusp0s0wthg1ox2s03hw51ox; TS01608e63=01c69f8384df64b985e5a250e16301ccb4dbe86ae07f2d85373a2678abef60e31284f38cf59e63934e0685c70b76014b8fc87299f6629a82f54ac81fa57e9b173f7fa41b678ea45d3811fbc397e88cc48271e46d5a; TSdbca8b71027=0857e74f39ab200050768e6df49cfb9be85d99810581f8d02ce7ccfb195fa1a05cd7e7cdfeb313b3087ebae0ad11300018f5fbf09d3b4175b5879546eead6c21debbe07e551170c188891b6859ea87e6ced3fa3fc03e8c7cee9a6a57a671e19d',
'origin': 'https://folktandvarden.vgregion.se',
'referer': 'https://folktandvarden.vgregion.se/',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="98", "Google Chrome";v="98"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36'
}
# Try multiple times if request fails
for _x in range(5):
try:
response = requests.post(url, data, headers=headers)
# print(response.text)
# Parse HTML
soup = BeautifulSoup(response.text, "html.parser")
interesting_area = soup.find("main").find("div", {"class": "ftv-booking__tightblock"})
for tag in ["button", "form", "input"]:
for t in interesting_area.find_all(tag):
t.decompose()
interesting_area.find("div", "block__generic-body").unwrap()
interesting_area.find("div", "block__header").unwrap()
interesting_area.find("ol", "vgr-pagination").decompose()
# Make URLs absolute
for a in interesting_area.find_all("a"):
a["href"] = urljoin(url, a.get("href"))
# print(interesting_area)
# Convert to markdown
md = markdownify.markdownify(str(interesting_area), heading_style="ATX")
md = md.strip()
md = re.sub("\n+", "\n", md)
# Send email and save output if there is any new information
if (md != "# Lediga tider\nTyvärr finns det inga sista minuten-tider tillgängliga just nu.") and (md != old_content):
send_email(str(interesting_area))
with open(OUTPUT_FILE, "w") as f:
f.write(md)
break
except AttributeError:
time.sleep(10)
def send_email(content):
"""Compose and send email."""
sender = config.sender
subject = "Nya lediga tider hos Folktandvården"
msg = EmailMessage()
msg.set_content(content, subtype="html")
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = config.receiver
p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE)
p.communicate(msg.as_bytes())
if __name__ == '__main__':
main() | 0.352536 | 0.124772 |
import json
try:
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from kubernetes.client.models import V1Container
from scan.fetchers.cli.cli_fetcher import CliFetcher
from scan.fetchers.kube.kube_access import KubeAccess
from base.utils.exceptions import CredentialsError, HostAddressError, SshError
class KubeFetchContainers(KubeAccess, CliFetcher):
PROXY_ATTR = 'kube-proxy'
SANDBOX_ID_ATTR = 'io.kubernetes.sandbox.id'
ATTRIBUTES_TO_IGNORE = ['resources', 'liveness_probe', 'readiness_probe', 'security_context']
def get(self, parent_id) -> list:
pod_id = parent_id.replace('-containers', '')
pod_obj = self.inv.get_by_id(self.get_env(), pod_id)
if not pod_obj:
self.log.error('inventory has no pod with uid={}'.format(pod_id))
return []
# TODO: temporary
labels = pod_obj.get('labels', {})
if labels and labels.get('calipso-rancher-pod-for-kube-proxy'):
return [self.get_rancher_proxy_container(pod_obj)]
host = pod_obj['host']
pod_filter = 'spec.nodeName={}'.format(host)
pods = self.api.list_pod_for_all_namespaces(field_selector=pod_filter)
ret = []
if not pods or len(pods.items) == 0:
self.log.error('failed to find pod with nodeName={}'.format(host))
return []
pod = next((pod for pod in pods.items if pod.metadata.uid == pod_id),
None)
if not pod:
self.log.error('failed to find pod with uid={}'.format(pod_id))
return []
for container in pod.spec.containers:
ret.append(self.get_container(container, pod, pod_obj))
return ret
def get_rancher_proxy_container(self, pod_obj):
doc = {
'type': 'container',
'namespace': pod_obj['namespace'],
'host': pod_obj['host'],
'pod': {'id': pod_obj['id'], 'name': pod_obj['object_name']},
'id': '{}-kube-proxy'.format(pod_obj['id']),
'name': 'kube-proxy',
'container_id': 'kube-proxy'
}
self.update_container_config(doc=doc, pod_obj=pod_obj)
self.update_interface_link(doc=doc, pod_obj=pod_obj)
self.update_proxy_container_info(container=doc, pod_obj=pod_obj)
return doc
def get_container(self, container, pod, pod_obj):
doc = {
'type': 'container',
'environment': self.get_env(),
'namespace': pod.metadata.namespace,
**self.get_container_data(container)
}
self.update_container_status_data(doc, pod_obj)
doc.update({
'host': pod_obj['host'],
'pod': {
'id': pod_obj['id'],
'name': pod_obj['object_name']
},
'ip_address': pod_obj.get('pod_status', {}).get('pod_ip', ''),
'id': '{}-{}'.format(pod_obj['id'], doc['name'])
})
if doc.get('image'):
doc['image'] = {"name": doc['image']}
self.update_container_config(doc=doc, pod_obj=pod_obj)
self.update_interface_link(doc=doc, pod_obj=pod_obj)
self.update_proxy_container_info(container=doc, pod_obj=pod_obj)
return doc
@staticmethod
def _get_state_from_container_status(container_status):
for status_key in ('waiting', 'running', 'terminated'):
if container_status.get('state', {}).get(status_key):
return status_key
def update_container_status_data(self, doc: dict, pod_obj: dict):
container_statuses = pod_obj['pod_status']['container_statuses']
container_status = next(s for s in container_statuses
if s['name'] == doc['name'])
if not container_status:
self.log.error('failed to find container_status record '
'for container {} in pod {}'
.format(doc['name'], pod_obj['name']))
return
container_id = container_status['container_id']
if container_id is None:
doc['container_type'] = container_status['name']
doc['container_id'] = container_status['image']
else:
id_parts = container_id.split('://')
doc['container_type'] = id_parts[0]
doc['container_id'] = id_parts[1]
doc['container_status'] = container_status
doc['state'] = self._get_state_from_container_status(container_status)
@classmethod
def get_container_data(cls, container: V1Container):
return cls.class_to_dict(data_object=container, exclude=cls.ATTRIBUTES_TO_IGNORE)
def update_container_config(self, doc, pod_obj):
if doc.get('state') == 'waiting':
return
cmd = 'docker inspect {}'.format(doc['container_id'])
try:
output = self.run(cmd, ssh_to_host=pod_obj['host'])
data = json.loads(output)
except SshError as e:
self.log.warning('"docker inspect" cmd failed for container {}. '
'Error: {}'.format(doc['container_id'], e))
return
except JSONDecodeError as e:
self.log.error('error reading output of cmd: {}, {}'
.format(cmd, str(e)))
return
if not data:
return
data = data[0]
if 'State' in data:
doc['container_state'] = data['State']
doc['state'] = data['State']['Status'] # Prefer actual state to the one fetched from container_status
if 'Config' in data:
doc['config'] = data['Config']
self.update_container_sandbox(doc=doc, pod_obj=pod_obj)
def update_container_sandbox(self, doc: dict, pod_obj: dict) -> None:
sandbox_id = doc['config'].get('Labels').get(self.SANDBOX_ID_ATTR)
if not sandbox_id:
return
cmd = 'docker inspect {}'.format(sandbox_id)
output = self.run(cmd, ssh_to_host=pod_obj['host'])
try:
data = json.loads(output)
except JSONDecodeError as e:
self.log.error('error reading output of cmd: {}, {}'
.format(cmd, str(e)))
return
if not data:
return
doc['sandbox'] = data[0]
self.find_network(doc)
def update_interface_link(self, doc: dict, pod_obj: dict):
if doc.get('state') != 'running' or doc['namespace'] == 'cattle-system' or doc['name'] == 'kubernetes-dashboard':
doc['vnic_index'] = ''
return
interface_name = 'vpp1' if 'VPP' in self.configuration.environment['mechanism_drivers'] else 'eth0'
file_name = "/sys/class/net/{}/iflink".format(interface_name)
cmd = 'docker exec {0} sh -c "test -f {1} && cat {1} || exit 0"'.format(doc['container_id'], file_name)
try:
output = self.run(cmd, ssh_to_host=pod_obj['host'])
doc['vnic_index'] = output.strip()
self.add_container_to_vnic(container=doc, pod_obj=pod_obj)
except (SshError, CredentialsError, HostAddressError):
doc['vnic_index'] = ''
# find network matching the one sandbox, and keep its name
def find_network(self, doc):
networks = doc['sandbox']['NetworkSettings']['Networks']
if not networks:
return
if isinstance(networks, dict):
network_names = list(networks.keys())
network = network_names[0]
network = networks[network]
else:
network = networks[0]
network_id = network['NetworkID']
network_obj = self.inv.get_by_id(self.get_env(), network_id)
if not network_obj:
return
doc['network'] = network_id
def add_container_to_vnic(self, container: dict, pod_obj: dict) -> None:
condition = {
'environment': self.get_env(),
'type': 'vnic',
'host': pod_obj['host'],
}
if 'VPP' in self.configuration.environment['mechanism_drivers']:
condition['ip_address'] = container['ip_address']
else:
condition['index'] = container['vnic_index']
vnic = self.inv.find_one(condition)
if not vnic:
return
# TODO: figure out this logic
# re-calc new ID and name path for vNIC and vNICs folder
# self.set_folder_parent(vnic, object_type='vnic',
# master_parent_id=container['id'],
# master_parent_type='container')
vnic_containers = vnic.get('containers', [])
vnic_container = {'id': container['container_id'], 'name': container['name']}
vnic['containers'] = vnic_containers.append(vnic_container) if vnic_containers else [vnic_container]
self.inv.set(vnic)
# self.inv.save_inventory_object(vnic,
# parent=container,
# environment=self.get_env())
def update_proxy_container_info(self, container, pod_obj):
if container['name'] != self.PROXY_ATTR:
return
container['container_app'] = self.PROXY_ATTR
self.update_proxy_container_config(container)
self.update_proxy_nat_tables(container)
container['vservices'] = self.get_proxy_container_vservices(pod_obj)
self.add_proxy_container_to_vservices(container)
def update_proxy_container_config(self, container: dict) -> None:
command = container.get('command')
if not command or not isinstance(command, list) or len(command) < 2:
self.log.error('unable to find {} command file '
'for container {}'
.format(self.PROXY_ATTR, container['id']))
return
conf_line = command[1]
if not isinstance(conf_line, str) \
or not conf_line.startswith('--config='):
self.log.error('unable to find {} command config file '
'for container {}'
.format(self.PROXY_ATTR, container['id']))
return
conf_file = conf_line[len('--config='):]
cmd = 'docker exec {} cat {}'.format(container['container_id'],
conf_file)
conf_file_contents = self.run(cmd=cmd, ssh_to_host=container['host'])
container['kube_proxy_config'] = conf_file_contents
def update_proxy_nat_tables(self, container: dict) -> None:
cmd = 'docker exec {} iptables -t nat -n -L' \
.format(container['container_id'])
nat_tables = self.run(cmd=cmd, ssh_to_host=container['host'])
container['nat_tables'] = nat_tables
def get_proxy_container_vservices(self, pod_obj: dict) -> list:
pods = self.inv.find_items({
'environment': self.get_env(),
'type': 'pod',
'host': pod_obj['host'],
'vservices': {'$exists': 1}
})
vservices = []
for pod in pods:
vservices.extend(pod['vservices'])
return vservices
def add_proxy_container_to_vservices(self, container: dict) -> None:
for vservice in list(container.get('vservices', [])):
self.add_proxy_container_to_vservice(container=container, vservice=vservice)
def add_proxy_container_to_vservice(self, container: dict, vservice: dict) -> None:
vservice_obj = self.inv.get_by_id(self.get_env(), vservice['id'])
if not vservice_obj:
self.log.error('failed to find vservice object with id {} '
'in container {} (id: {})'
.format(vservice['id'],
container['object_name'],
container['id']))
if self.PROXY_ATTR not in vservice_obj:
vservice_obj[self.PROXY_ATTR] = []
matches = [p for p in vservice_obj[self.PROXY_ATTR]
if p['id'] == container['id']]
if not matches:
proxy_data = dict(id=container['id'], name=container['name'],
host=container['host'])
vservice_obj[self.PROXY_ATTR].append(proxy_data)
self.inv.set(vservice_obj) | scan/fetchers/kube/kube_fetch_containers.py | import json
try:
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from kubernetes.client.models import V1Container
from scan.fetchers.cli.cli_fetcher import CliFetcher
from scan.fetchers.kube.kube_access import KubeAccess
from base.utils.exceptions import CredentialsError, HostAddressError, SshError
class KubeFetchContainers(KubeAccess, CliFetcher):
PROXY_ATTR = 'kube-proxy'
SANDBOX_ID_ATTR = 'io.kubernetes.sandbox.id'
ATTRIBUTES_TO_IGNORE = ['resources', 'liveness_probe', 'readiness_probe', 'security_context']
def get(self, parent_id) -> list:
pod_id = parent_id.replace('-containers', '')
pod_obj = self.inv.get_by_id(self.get_env(), pod_id)
if not pod_obj:
self.log.error('inventory has no pod with uid={}'.format(pod_id))
return []
# TODO: temporary
labels = pod_obj.get('labels', {})
if labels and labels.get('calipso-rancher-pod-for-kube-proxy'):
return [self.get_rancher_proxy_container(pod_obj)]
host = pod_obj['host']
pod_filter = 'spec.nodeName={}'.format(host)
pods = self.api.list_pod_for_all_namespaces(field_selector=pod_filter)
ret = []
if not pods or len(pods.items) == 0:
self.log.error('failed to find pod with nodeName={}'.format(host))
return []
pod = next((pod for pod in pods.items if pod.metadata.uid == pod_id),
None)
if not pod:
self.log.error('failed to find pod with uid={}'.format(pod_id))
return []
for container in pod.spec.containers:
ret.append(self.get_container(container, pod, pod_obj))
return ret
def get_rancher_proxy_container(self, pod_obj):
doc = {
'type': 'container',
'namespace': pod_obj['namespace'],
'host': pod_obj['host'],
'pod': {'id': pod_obj['id'], 'name': pod_obj['object_name']},
'id': '{}-kube-proxy'.format(pod_obj['id']),
'name': 'kube-proxy',
'container_id': 'kube-proxy'
}
self.update_container_config(doc=doc, pod_obj=pod_obj)
self.update_interface_link(doc=doc, pod_obj=pod_obj)
self.update_proxy_container_info(container=doc, pod_obj=pod_obj)
return doc
def get_container(self, container, pod, pod_obj):
doc = {
'type': 'container',
'environment': self.get_env(),
'namespace': pod.metadata.namespace,
**self.get_container_data(container)
}
self.update_container_status_data(doc, pod_obj)
doc.update({
'host': pod_obj['host'],
'pod': {
'id': pod_obj['id'],
'name': pod_obj['object_name']
},
'ip_address': pod_obj.get('pod_status', {}).get('pod_ip', ''),
'id': '{}-{}'.format(pod_obj['id'], doc['name'])
})
if doc.get('image'):
doc['image'] = {"name": doc['image']}
self.update_container_config(doc=doc, pod_obj=pod_obj)
self.update_interface_link(doc=doc, pod_obj=pod_obj)
self.update_proxy_container_info(container=doc, pod_obj=pod_obj)
return doc
@staticmethod
def _get_state_from_container_status(container_status):
for status_key in ('waiting', 'running', 'terminated'):
if container_status.get('state', {}).get(status_key):
return status_key
def update_container_status_data(self, doc: dict, pod_obj: dict):
container_statuses = pod_obj['pod_status']['container_statuses']
container_status = next(s for s in container_statuses
if s['name'] == doc['name'])
if not container_status:
self.log.error('failed to find container_status record '
'for container {} in pod {}'
.format(doc['name'], pod_obj['name']))
return
container_id = container_status['container_id']
if container_id is None:
doc['container_type'] = container_status['name']
doc['container_id'] = container_status['image']
else:
id_parts = container_id.split('://')
doc['container_type'] = id_parts[0]
doc['container_id'] = id_parts[1]
doc['container_status'] = container_status
doc['state'] = self._get_state_from_container_status(container_status)
@classmethod
def get_container_data(cls, container: V1Container):
return cls.class_to_dict(data_object=container, exclude=cls.ATTRIBUTES_TO_IGNORE)
def update_container_config(self, doc, pod_obj):
if doc.get('state') == 'waiting':
return
cmd = 'docker inspect {}'.format(doc['container_id'])
try:
output = self.run(cmd, ssh_to_host=pod_obj['host'])
data = json.loads(output)
except SshError as e:
self.log.warning('"docker inspect" cmd failed for container {}. '
'Error: {}'.format(doc['container_id'], e))
return
except JSONDecodeError as e:
self.log.error('error reading output of cmd: {}, {}'
.format(cmd, str(e)))
return
if not data:
return
data = data[0]
if 'State' in data:
doc['container_state'] = data['State']
doc['state'] = data['State']['Status'] # Prefer actual state to the one fetched from container_status
if 'Config' in data:
doc['config'] = data['Config']
self.update_container_sandbox(doc=doc, pod_obj=pod_obj)
def update_container_sandbox(self, doc: dict, pod_obj: dict) -> None:
sandbox_id = doc['config'].get('Labels').get(self.SANDBOX_ID_ATTR)
if not sandbox_id:
return
cmd = 'docker inspect {}'.format(sandbox_id)
output = self.run(cmd, ssh_to_host=pod_obj['host'])
try:
data = json.loads(output)
except JSONDecodeError as e:
self.log.error('error reading output of cmd: {}, {}'
.format(cmd, str(e)))
return
if not data:
return
doc['sandbox'] = data[0]
self.find_network(doc)
def update_interface_link(self, doc: dict, pod_obj: dict):
if doc.get('state') != 'running' or doc['namespace'] == 'cattle-system' or doc['name'] == 'kubernetes-dashboard':
doc['vnic_index'] = ''
return
interface_name = 'vpp1' if 'VPP' in self.configuration.environment['mechanism_drivers'] else 'eth0'
file_name = "/sys/class/net/{}/iflink".format(interface_name)
cmd = 'docker exec {0} sh -c "test -f {1} && cat {1} || exit 0"'.format(doc['container_id'], file_name)
try:
output = self.run(cmd, ssh_to_host=pod_obj['host'])
doc['vnic_index'] = output.strip()
self.add_container_to_vnic(container=doc, pod_obj=pod_obj)
except (SshError, CredentialsError, HostAddressError):
doc['vnic_index'] = ''
# find network matching the one sandbox, and keep its name
def find_network(self, doc):
networks = doc['sandbox']['NetworkSettings']['Networks']
if not networks:
return
if isinstance(networks, dict):
network_names = list(networks.keys())
network = network_names[0]
network = networks[network]
else:
network = networks[0]
network_id = network['NetworkID']
network_obj = self.inv.get_by_id(self.get_env(), network_id)
if not network_obj:
return
doc['network'] = network_id
def add_container_to_vnic(self, container: dict, pod_obj: dict) -> None:
condition = {
'environment': self.get_env(),
'type': 'vnic',
'host': pod_obj['host'],
}
if 'VPP' in self.configuration.environment['mechanism_drivers']:
condition['ip_address'] = container['ip_address']
else:
condition['index'] = container['vnic_index']
vnic = self.inv.find_one(condition)
if not vnic:
return
# TODO: figure out this logic
# re-calc new ID and name path for vNIC and vNICs folder
# self.set_folder_parent(vnic, object_type='vnic',
# master_parent_id=container['id'],
# master_parent_type='container')
vnic_containers = vnic.get('containers', [])
vnic_container = {'id': container['container_id'], 'name': container['name']}
vnic['containers'] = vnic_containers.append(vnic_container) if vnic_containers else [vnic_container]
self.inv.set(vnic)
# self.inv.save_inventory_object(vnic,
# parent=container,
# environment=self.get_env())
def update_proxy_container_info(self, container, pod_obj):
if container['name'] != self.PROXY_ATTR:
return
container['container_app'] = self.PROXY_ATTR
self.update_proxy_container_config(container)
self.update_proxy_nat_tables(container)
container['vservices'] = self.get_proxy_container_vservices(pod_obj)
self.add_proxy_container_to_vservices(container)
def update_proxy_container_config(self, container: dict) -> None:
command = container.get('command')
if not command or not isinstance(command, list) or len(command) < 2:
self.log.error('unable to find {} command file '
'for container {}'
.format(self.PROXY_ATTR, container['id']))
return
conf_line = command[1]
if not isinstance(conf_line, str) \
or not conf_line.startswith('--config='):
self.log.error('unable to find {} command config file '
'for container {}'
.format(self.PROXY_ATTR, container['id']))
return
conf_file = conf_line[len('--config='):]
cmd = 'docker exec {} cat {}'.format(container['container_id'],
conf_file)
conf_file_contents = self.run(cmd=cmd, ssh_to_host=container['host'])
container['kube_proxy_config'] = conf_file_contents
def update_proxy_nat_tables(self, container: dict) -> None:
cmd = 'docker exec {} iptables -t nat -n -L' \
.format(container['container_id'])
nat_tables = self.run(cmd=cmd, ssh_to_host=container['host'])
container['nat_tables'] = nat_tables
def get_proxy_container_vservices(self, pod_obj: dict) -> list:
pods = self.inv.find_items({
'environment': self.get_env(),
'type': 'pod',
'host': pod_obj['host'],
'vservices': {'$exists': 1}
})
vservices = []
for pod in pods:
vservices.extend(pod['vservices'])
return vservices
def add_proxy_container_to_vservices(self, container: dict) -> None:
for vservice in list(container.get('vservices', [])):
self.add_proxy_container_to_vservice(container=container, vservice=vservice)
def add_proxy_container_to_vservice(self, container: dict, vservice: dict) -> None:
vservice_obj = self.inv.get_by_id(self.get_env(), vservice['id'])
if not vservice_obj:
self.log.error('failed to find vservice object with id {} '
'in container {} (id: {})'
.format(vservice['id'],
container['object_name'],
container['id']))
if self.PROXY_ATTR not in vservice_obj:
vservice_obj[self.PROXY_ATTR] = []
matches = [p for p in vservice_obj[self.PROXY_ATTR]
if p['id'] == container['id']]
if not matches:
proxy_data = dict(id=container['id'], name=container['name'],
host=container['host'])
vservice_obj[self.PROXY_ATTR].append(proxy_data)
self.inv.set(vservice_obj) | 0.153708 | 0.115861 |
import argparse
import itertools
import re
import sys
import os
from os.path import join
from env import env
from ioutils import ioutils
from ioutils.ioutils import sprint, eprint
def _init():
_set_args()
_set_env()
def _set_args():
env.parser = argparse.ArgumentParser(
description="Compile various dotfiles using their input files.")
env.parser.add_argument(
'input_dir',
default=None,
help="input files directory")
env.parser.add_argument(
'-c',
'--clobber',
action='store_true',
help="Clobber any existing output files (don't back them up).")
env.parser.add_argument(
'-r',
'--revert',
action='store_true',
help="Revert dotfiles to most recent backup.")
env.parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Enable verbose output.")
env.parser.add_argument(
'-o',
'--output-dir',
nargs=1,
help="Specify output directory (default value is $HOME).")
env.parser.add_argument(
'-f',
'--file',
nargs=1,
help="Process only the specified dotfile.")
env.parser.add_argument(
'-e',
'--exclude',
nargs='+',
action='append',
metavar='INPUT_FILE',
help="Exclude the specified input file.")
env.parser.add_argument(
'--dry-run',
action='store_true',
help="Don't write anything to disk, but instead report what \
action(s) would be taken. Implies --verbose.")
env.parser.add_argument(
'--no-symlinks',
action='store_true',
help="Don't symlink output dotfiles (compile a new file instead)")
env.ARGS = env.parser.parse_args()
sprint("\nPreparing dotfiles with args: " + " ".join(sys.argv[1:]) + "\n")
def _set_env():
input_dir = env.ARGS.input_dir
if os.path.isdir(input_dir):
env.INPUT_DIR = input_dir
else:
eprint("Specified input directory {0} does not exist."
.format(input_dir))
sys.exit(1)
if env.ARGS.output_dir:
env.OUTPUT_DIR = env.ARGS.output_dir[0]
if env.INPUT_DIR == env.OUTPUT_DIR:
eprint("INPUT_DIR {0} cannot be the same as OUTPUT_DIR {1}"
.format(env.INPUT_DIR, env.OUTPUT_DIR))
sys.exit(1)
env.BACKUPS_DIR = join(env.INPUT_DIR, 'backups')
if env.ARGS.dry_run:
env.ARGS.verbose = True
sprint("Environment:")
sprint("\tinput_dir: " + env.INPUT_DIR)
sprint("\tbackups_dir: " + env.BACKUPS_DIR)
sprint("\toutput_dir: " + env.OUTPUT_DIR)
sprint("\targs: " + str(env.ARGS))
sprint("")
def _print_completion_message(processed_dotfiles):
pretty_list = ', '.join(processed_dotfiles)
sprint("Processed the following dotfiles: {0}"
.format(pretty_list))
def _sort_input_file_list(input_files):
to_sort = []
not_to_sort = []
for file_name in input_files:
if not re.search(r'[0-9]{2}\-', file_name):
not_to_sort.append(file_name)
else:
to_sort.append(file_name)
result = sorted(to_sort, reverse=True)
result.extend(not_to_sort)
return result
def _get_dotfile_name(file_name):
if '-' in file_name:
sidx = file_name.index('-') + 1
else:
sidx = 0
if '_' in file_name:
eidx = file_name.index('_')
else:
eidx = len(file_name)
if eidx < sidx:
sidx = 0
dotfile_name = '.' + file_name[sidx:eidx]
return dotfile_name
def _is_input_file_excluded(file_name):
if not env.ARGS.exclude:
return False
all_excludes = list(itertools.chain.from_iterable(env.ARGS.exclude))
return file_name in all_excludes
def _add_input_file_to_dict(dotfiles_dict, input_file):
if not _is_input_file_excluded(input_file):
file_key = _get_dotfile_name(input_file)
if file_key in dotfiles_dict:
dotfiles_dict[file_key].append(input_file)
else:
dotfiles_dict[file_key] = [input_file]
def _get_dotfiles_dict(input_dir):
dotfiles = {}
all_input_files = [item for item in os.listdir(input_dir)
if os.path.isfile(join(input_dir, item))]
for input_file in all_input_files:
_add_input_file_to_dict(dotfiles, input_file)
for dotfile in dotfiles:
dotfiles[dotfile] = _sort_input_file_list(dotfiles[dotfile])
return dotfiles
def _process_dotfile(dotfile, input_files):
sprint("Processing file: " + dotfile)
if env.ARGS.no_symlinks or len(input_files) > 1:
ioutils.compile_dotfile(dotfile, input_files)
else:
ioutils.create_symlink(join(env.INPUT_DIR, input_files[0]),
join(env.OUTPUT_DIR, dotfile))
sprint("Done with {0}\n".format(dotfile))
def _process_dotfiles(all_dotfiles_dict):
for dotfile in all_dotfiles_dict:
_process_dotfile(dotfile, all_dotfiles_dict[dotfile])
def _revert_dotfiles(file_names):
for dotfile in file_names:
ioutils.revert_dotfile(dotfile)
def main():
_init()
processed_dotfiles = []
all_dotfiles_dict = _get_dotfiles_dict(env.INPUT_DIR)
if env.ARGS.file:
dotfile = env.ARGS.file[0]
if not dotfile.startswith("."):
dotfile = "." + dotfile
if env.ARGS.revert:
ioutils.revert_dotfile(dotfile)
processed_dotfiles.append(dotfile)
else:
if dotfile in all_dotfiles_dict:
_process_dotfile(dotfile, all_dotfiles_dict[dotfile])
processed_dotfiles.append(dotfile)
else:
eprint(
"No input files found for {0}. Please double-check "
"the file name(s) and try again."
.format(dotfile))
sys.exit(1)
else:
all_dotfiles = list(all_dotfiles_dict)
if env.ARGS.revert:
_revert_dotfiles(all_dotfiles)
else:
_process_dotfiles(all_dotfiles_dict)
processed_dotfiles.extend(all_dotfiles)
_print_completion_message(processed_dotfiles)
if __name__ == '__main__':
main() | dotfilesmanager/dfm.py |
import argparse
import itertools
import re
import sys
import os
from os.path import join
from env import env
from ioutils import ioutils
from ioutils.ioutils import sprint, eprint
def _init():
_set_args()
_set_env()
def _set_args():
env.parser = argparse.ArgumentParser(
description="Compile various dotfiles using their input files.")
env.parser.add_argument(
'input_dir',
default=None,
help="input files directory")
env.parser.add_argument(
'-c',
'--clobber',
action='store_true',
help="Clobber any existing output files (don't back them up).")
env.parser.add_argument(
'-r',
'--revert',
action='store_true',
help="Revert dotfiles to most recent backup.")
env.parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Enable verbose output.")
env.parser.add_argument(
'-o',
'--output-dir',
nargs=1,
help="Specify output directory (default value is $HOME).")
env.parser.add_argument(
'-f',
'--file',
nargs=1,
help="Process only the specified dotfile.")
env.parser.add_argument(
'-e',
'--exclude',
nargs='+',
action='append',
metavar='INPUT_FILE',
help="Exclude the specified input file.")
env.parser.add_argument(
'--dry-run',
action='store_true',
help="Don't write anything to disk, but instead report what \
action(s) would be taken. Implies --verbose.")
env.parser.add_argument(
'--no-symlinks',
action='store_true',
help="Don't symlink output dotfiles (compile a new file instead)")
env.ARGS = env.parser.parse_args()
sprint("\nPreparing dotfiles with args: " + " ".join(sys.argv[1:]) + "\n")
def _set_env():
input_dir = env.ARGS.input_dir
if os.path.isdir(input_dir):
env.INPUT_DIR = input_dir
else:
eprint("Specified input directory {0} does not exist."
.format(input_dir))
sys.exit(1)
if env.ARGS.output_dir:
env.OUTPUT_DIR = env.ARGS.output_dir[0]
if env.INPUT_DIR == env.OUTPUT_DIR:
eprint("INPUT_DIR {0} cannot be the same as OUTPUT_DIR {1}"
.format(env.INPUT_DIR, env.OUTPUT_DIR))
sys.exit(1)
env.BACKUPS_DIR = join(env.INPUT_DIR, 'backups')
if env.ARGS.dry_run:
env.ARGS.verbose = True
sprint("Environment:")
sprint("\tinput_dir: " + env.INPUT_DIR)
sprint("\tbackups_dir: " + env.BACKUPS_DIR)
sprint("\toutput_dir: " + env.OUTPUT_DIR)
sprint("\targs: " + str(env.ARGS))
sprint("")
def _print_completion_message(processed_dotfiles):
pretty_list = ', '.join(processed_dotfiles)
sprint("Processed the following dotfiles: {0}"
.format(pretty_list))
def _sort_input_file_list(input_files):
to_sort = []
not_to_sort = []
for file_name in input_files:
if not re.search(r'[0-9]{2}\-', file_name):
not_to_sort.append(file_name)
else:
to_sort.append(file_name)
result = sorted(to_sort, reverse=True)
result.extend(not_to_sort)
return result
def _get_dotfile_name(file_name):
if '-' in file_name:
sidx = file_name.index('-') + 1
else:
sidx = 0
if '_' in file_name:
eidx = file_name.index('_')
else:
eidx = len(file_name)
if eidx < sidx:
sidx = 0
dotfile_name = '.' + file_name[sidx:eidx]
return dotfile_name
def _is_input_file_excluded(file_name):
if not env.ARGS.exclude:
return False
all_excludes = list(itertools.chain.from_iterable(env.ARGS.exclude))
return file_name in all_excludes
def _add_input_file_to_dict(dotfiles_dict, input_file):
if not _is_input_file_excluded(input_file):
file_key = _get_dotfile_name(input_file)
if file_key in dotfiles_dict:
dotfiles_dict[file_key].append(input_file)
else:
dotfiles_dict[file_key] = [input_file]
def _get_dotfiles_dict(input_dir):
dotfiles = {}
all_input_files = [item for item in os.listdir(input_dir)
if os.path.isfile(join(input_dir, item))]
for input_file in all_input_files:
_add_input_file_to_dict(dotfiles, input_file)
for dotfile in dotfiles:
dotfiles[dotfile] = _sort_input_file_list(dotfiles[dotfile])
return dotfiles
def _process_dotfile(dotfile, input_files):
sprint("Processing file: " + dotfile)
if env.ARGS.no_symlinks or len(input_files) > 1:
ioutils.compile_dotfile(dotfile, input_files)
else:
ioutils.create_symlink(join(env.INPUT_DIR, input_files[0]),
join(env.OUTPUT_DIR, dotfile))
sprint("Done with {0}\n".format(dotfile))
def _process_dotfiles(all_dotfiles_dict):
for dotfile in all_dotfiles_dict:
_process_dotfile(dotfile, all_dotfiles_dict[dotfile])
def _revert_dotfiles(file_names):
for dotfile in file_names:
ioutils.revert_dotfile(dotfile)
def main():
_init()
processed_dotfiles = []
all_dotfiles_dict = _get_dotfiles_dict(env.INPUT_DIR)
if env.ARGS.file:
dotfile = env.ARGS.file[0]
if not dotfile.startswith("."):
dotfile = "." + dotfile
if env.ARGS.revert:
ioutils.revert_dotfile(dotfile)
processed_dotfiles.append(dotfile)
else:
if dotfile in all_dotfiles_dict:
_process_dotfile(dotfile, all_dotfiles_dict[dotfile])
processed_dotfiles.append(dotfile)
else:
eprint(
"No input files found for {0}. Please double-check "
"the file name(s) and try again."
.format(dotfile))
sys.exit(1)
else:
all_dotfiles = list(all_dotfiles_dict)
if env.ARGS.revert:
_revert_dotfiles(all_dotfiles)
else:
_process_dotfiles(all_dotfiles_dict)
processed_dotfiles.extend(all_dotfiles)
_print_completion_message(processed_dotfiles)
if __name__ == '__main__':
main() | 0.199581 | 0.071494 |
import random
import wx
globalItems = ['New', 'Open', 'Save', 'Save As...', 'Cut', 'Copy', 'Paste',
'Delete', 'Select All', 'Find', 'About', 'Help', 'Exit',
'Python is the Best!']
random.shuffle(globalItems)
globalOrder = []
length = len(globalItems)
# print(length)
for num in range(0, length):
globalOrder.append(num)
random.shuffle(globalOrder)
# print(len(globalOrder))
randomShuffleCheckedOnce = True
globalCheckedStrings = []
class MyRearrangeDialog(wx.RearrangeDialog):
def __init__(self, parent, message, title, order, items, log):
wx.RearrangeDialog.__init__(self, parent, message, title, order, items)
self.log = log
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.lc = self.GetList()
sizer.Add(wx.StaticText(panel, wx.ID_ANY,
"Number of checked boxes:"))
self.lenItems = len(items)
self.tc = wx.TextCtrl(panel, wx.ID_ANY, "%s" % self.lenItems,
style=wx.TE_READONLY)
self.lc.Bind(wx.EVT_CHECKLISTBOX, self.OnCheck)
self.lc.Bind(wx.EVT_LISTBOX, self.OnListBox)
self.lc.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
sizer.Add(self.tc)
panel.SetSizer(sizer)
self.AddExtraControls(panel)
global randomShuffleCheckedOnce
global globalCheckedStrings
if randomShuffleCheckedOnce:
globalCheckedStrings = []
for i in range(0, self.lenItems):
bool = random.randint(0, 1)
# print(bool)
if bool:
self.lc.Check(item=i, check=False)
globalCheckedStrings.append(0)
else:
globalCheckedStrings.append(1)
randomShuffleCheckedOnce = False
else:
for i in range(0, self.lenItems):
if globalCheckedStrings[i]:
self.lc.Check(item=i, check=True)
else:
self.lc.Check(item=i, check=False)
self.checkedItems = self.lc.GetCheckedItems()
self.checkedStrings = self.lc.GetCheckedStrings()
#Update the TextCtrl
self.tc.SetValue("%s" % len(self.checkedItems))
def OnListBox(self, event):
self.log.write('You Selected %s\n' % (self.lc.GetString(event.GetSelection())))
def OnCheck(self, event):
self.log.write('You Checked %s %s\n' % (self.lc.GetString(event.GetSelection()),
self.lc.IsChecked(event.GetSelection())))
#Update the TextCtrl
self.checkedItems = self.lc.GetCheckedItems()
self.tc.SetValue("%s" % len(self.checkedItems))
def OnUnCheckOrCheckAll(self, event):
doWhat = str(event.GetId()).endswith('1')
# print('doWhat', doWhat)
for i in range(0, self.lenItems):
if doWhat:
self.lc.Check(i, True)
else:
self.lc.Check(i, False)
self.checkedItems = self.lc.GetCheckedItems()
self.tc.SetValue("%s" % len(self.checkedItems))
def OnContextMenu(self, event):
menu = wx.Menu()
ID_UNCHECKALL = 1000
ID_CHECKALL = 1001
mi1 = wx.MenuItem(menu, ID_UNCHECKALL, 'UnCheck All', 'UnCheck All')
mi2 = wx.MenuItem(menu, ID_CHECKALL, 'Check All', 'Check All')
menu.Append(mi1)
menu.Append(mi2)
menu.Bind(wx.EVT_MENU, self.OnUnCheckOrCheckAll, id=ID_UNCHECKALL)
menu.Bind(wx.EVT_MENU, self.OnUnCheckOrCheckAll, id=ID_CHECKALL)
self.PopupMenu(menu)
menu.Destroy()
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, "Create and Show a RearrangeDialog", (50, 50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
global globalOrder
global globalItems
global globalCheckedStrings
rd = MyRearrangeDialog(self, message="Rearrangeify Stuff!",
title="This is a wx.RearrangeDialog",
order=globalOrder, items=globalItems, log=self.log)
if rd.ShowModal() == wx.ID_OK:
# print('GetOrder: ', rd.GetOrder())
globalOrder = list(range(rd.lenItems))
globalItems = []
globalCheckedStrings = []
for i in range(0, rd.lenItems):
# print(rd.lc.GetString(i))
globalItems.append(rd.lc.GetString(i))
# print(rd.lc.IsChecked(i))
if rd.lc.IsChecked(i):
globalCheckedStrings.append(1)
else:
globalCheckedStrings.append(0)
#---------------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#---------------------------------------------------------------------------
overview = """\
A RearrangeDialog is a dialog that allows the user to rearrange
the specified items.
This dialog can be used to allow the user to modify the order
of the items and to enable or disable them individually.
"""
if __name__ == '__main__':
import sys
import os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:]) | demo/RearrangeDialog.py |
import random
import wx
globalItems = ['New', 'Open', 'Save', 'Save As...', 'Cut', 'Copy', 'Paste',
'Delete', 'Select All', 'Find', 'About', 'Help', 'Exit',
'Python is the Best!']
random.shuffle(globalItems)
globalOrder = []
length = len(globalItems)
# print(length)
for num in range(0, length):
globalOrder.append(num)
random.shuffle(globalOrder)
# print(len(globalOrder))
randomShuffleCheckedOnce = True
globalCheckedStrings = []
class MyRearrangeDialog(wx.RearrangeDialog):
def __init__(self, parent, message, title, order, items, log):
wx.RearrangeDialog.__init__(self, parent, message, title, order, items)
self.log = log
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.lc = self.GetList()
sizer.Add(wx.StaticText(panel, wx.ID_ANY,
"Number of checked boxes:"))
self.lenItems = len(items)
self.tc = wx.TextCtrl(panel, wx.ID_ANY, "%s" % self.lenItems,
style=wx.TE_READONLY)
self.lc.Bind(wx.EVT_CHECKLISTBOX, self.OnCheck)
self.lc.Bind(wx.EVT_LISTBOX, self.OnListBox)
self.lc.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
sizer.Add(self.tc)
panel.SetSizer(sizer)
self.AddExtraControls(panel)
global randomShuffleCheckedOnce
global globalCheckedStrings
if randomShuffleCheckedOnce:
globalCheckedStrings = []
for i in range(0, self.lenItems):
bool = random.randint(0, 1)
# print(bool)
if bool:
self.lc.Check(item=i, check=False)
globalCheckedStrings.append(0)
else:
globalCheckedStrings.append(1)
randomShuffleCheckedOnce = False
else:
for i in range(0, self.lenItems):
if globalCheckedStrings[i]:
self.lc.Check(item=i, check=True)
else:
self.lc.Check(item=i, check=False)
self.checkedItems = self.lc.GetCheckedItems()
self.checkedStrings = self.lc.GetCheckedStrings()
#Update the TextCtrl
self.tc.SetValue("%s" % len(self.checkedItems))
def OnListBox(self, event):
self.log.write('You Selected %s\n' % (self.lc.GetString(event.GetSelection())))
def OnCheck(self, event):
self.log.write('You Checked %s %s\n' % (self.lc.GetString(event.GetSelection()),
self.lc.IsChecked(event.GetSelection())))
#Update the TextCtrl
self.checkedItems = self.lc.GetCheckedItems()
self.tc.SetValue("%s" % len(self.checkedItems))
def OnUnCheckOrCheckAll(self, event):
doWhat = str(event.GetId()).endswith('1')
# print('doWhat', doWhat)
for i in range(0, self.lenItems):
if doWhat:
self.lc.Check(i, True)
else:
self.lc.Check(i, False)
self.checkedItems = self.lc.GetCheckedItems()
self.tc.SetValue("%s" % len(self.checkedItems))
def OnContextMenu(self, event):
menu = wx.Menu()
ID_UNCHECKALL = 1000
ID_CHECKALL = 1001
mi1 = wx.MenuItem(menu, ID_UNCHECKALL, 'UnCheck All', 'UnCheck All')
mi2 = wx.MenuItem(menu, ID_CHECKALL, 'Check All', 'Check All')
menu.Append(mi1)
menu.Append(mi2)
menu.Bind(wx.EVT_MENU, self.OnUnCheckOrCheckAll, id=ID_UNCHECKALL)
menu.Bind(wx.EVT_MENU, self.OnUnCheckOrCheckAll, id=ID_CHECKALL)
self.PopupMenu(menu)
menu.Destroy()
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, "Create and Show a RearrangeDialog", (50, 50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
global globalOrder
global globalItems
global globalCheckedStrings
rd = MyRearrangeDialog(self, message="Rearrangeify Stuff!",
title="This is a wx.RearrangeDialog",
order=globalOrder, items=globalItems, log=self.log)
if rd.ShowModal() == wx.ID_OK:
# print('GetOrder: ', rd.GetOrder())
globalOrder = list(range(rd.lenItems))
globalItems = []
globalCheckedStrings = []
for i in range(0, rd.lenItems):
# print(rd.lc.GetString(i))
globalItems.append(rd.lc.GetString(i))
# print(rd.lc.IsChecked(i))
if rd.lc.IsChecked(i):
globalCheckedStrings.append(1)
else:
globalCheckedStrings.append(0)
#---------------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#---------------------------------------------------------------------------
overview = """\
A RearrangeDialog is a dialog that allows the user to rearrange
the specified items.
This dialog can be used to allow the user to modify the order
of the items and to enable or disable them individually.
"""
if __name__ == '__main__':
import sys
import os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:]) | 0.161849 | 0.057414 |
import numpy as np
from dataclasses import dataclass
from typing import List, NamedTuple, Tuple
from jiant.tasks.core import FeaturizationSpec
from jiant.tasks.utils import truncate_sequences, pad_to_max_seq_length
from jiant.utils.python.datastructures import BiMap
MAX_SUB_TOKEN_LENGTH = 5
MAX_CONCEPT_LENGTH = 512
MAX_RELATION_LENGTH = 512
class Span(NamedTuple):
start: int
end: int # Use exclusive end, for consistency
def add(self, i: int):
return Span(start=self.start + i, end=self.end + i)
def to_slice(self):
return slice(*self)
def to_array(self):
return np.array([self.start, self.end])
@dataclass
class UnpaddedInputs:
unpadded_tokens: List
unpadded_segment_ids: List
cls_offset: int
@dataclass
class UnpaddedAMRInputs:
unpadded_concepts: List[List[str]]
unpadded_relation_ids: List[Tuple[int, int]]
unpadded_relation_labels: List[List[str]]
@dataclass
class InputSet:
input_ids: List
input_mask: List
segment_ids: List
@dataclass
class AMRInputSet:
concept_sub_token_ids: List[List[int]]
concept_sub_token_mask: List[List[int]]
relation_ids: List[Tuple[int, int]]
relation_id_mask: List[int]
relation_label_sub_token_ids: List[List[int]]
relation_label_sub_token_mask: List[List[int]]
def single_sentence_featurize(
guid: str,
input_tokens: List[str],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
unpadded_inputs = construct_single_input_tokens_and_segment_ids(
input_tokens=input_tokens, tokenizer=tokenizer, feat_spec=feat_spec,
)
return create_generic_data_row_from_tokens_and_segments(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def double_sentence_featurize(
guid: str,
input_tokens_a: List[str],
input_tokens_b: List[str],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Featurize an example for a two-input/two-sentence task, and return the example as a DataRow.
Args:
guid (str): human-readable identifier for interpretability and debugging.
input_tokens_a (List[str]): sequence of tokens in segment a.
input_tokens_b (List[str]): sequence of tokens in segment b.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): DataRow class used in the task.
Returns:
DataRow representing an example.
"""
unpadded_inputs = construct_double_input_tokens_and_segment_ids(
input_tokens_a=input_tokens_a,
input_tokens_b=input_tokens_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return create_generic_data_row_from_tokens_and_segments(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def double_sentence_with_amr_featurize(
guid: str,
input_tokens_a: List[str],
input_amr_concepts_a: List[List[str]],
input_amr_relation_ids_a: List[Tuple[int, int]],
input_amr_relation_labels_a: List[List[str]],
input_tokens_b: List[str],
input_amr_concepts_b: List[List[str]],
input_amr_relation_ids_b: List[Tuple[int, int]],
input_amr_relation_labels_b: List[List[str]],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Featurize an example for a two-input/two-sentence with AMR task, and return the example as a DataRow.
Args:
guid (str): human-readable identifier for interoperability and debugging.
input_tokens_a (List[str]): sequence of tokens in segment a.
input_amr_concepts_a (List[List[str]]): sequence of sub tokens of concepts in AMR a.
input_amr_relation_ids_a (List[(int, int)]): sequence of (source, target)
based on concept indices for relations in AMR a.
input_amr_relation_labels_a (List[List[str]]): sequence of sub tokens of relation labels in AMR a.
input_tokens_b (List[str]): sequence of tokens in segment b.
input_amr_concepts_b (List[List[str]]): sequence of sub tokens of concepts in AMR b.
input_amr_relation_ids_b (List[(int, int)]): sequence of (source, target)
based on concept indices for relations in AMR b.
input_amr_relation_labels_b (List[List[str]]): sequence of sub tokens of relation labels in AMR b.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): DataRow class used in the task.
Returns:
DataRow representing an example.
"""
unpadded_inputs = construct_double_input_tokens_and_segment_ids(
input_tokens_a=input_tokens_a,
input_tokens_b=input_tokens_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
unpadded_amr_inputs = construct_double_input_amr_concepts_and_relations(
input_amr_concepts_a=input_amr_concepts_a,
input_amr_relation_ids_a=input_amr_relation_ids_a,
input_amr_relation_labels_a=input_amr_relation_labels_a,
input_amr_concepts_b=input_amr_concepts_b,
input_amr_relation_ids_b=input_amr_relation_ids_b,
input_amr_relation_labels_b=input_amr_relation_labels_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return create_generic_data_row_with_amr(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
unpadded_concepts=unpadded_amr_inputs.unpadded_concepts,
unpadded_relation_ids=unpadded_amr_inputs.unpadded_relation_ids,
unpadded_relation_labels=unpadded_amr_inputs.unpadded_relation_labels,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def construct_single_input_tokens_and_segment_ids(
input_tokens: List[str], tokenizer, feat_spec: FeaturizationSpec
):
special_tokens_count = 2 # CLS, SEP
(input_tokens,) = truncate_sequences(
tokens_ls=[input_tokens], max_length=feat_spec.max_seq_length - special_tokens_count,
)
return add_cls_token(
unpadded_tokens=input_tokens + [tokenizer.sep_token],
unpadded_segment_ids=(
[feat_spec.sequence_a_segment_id]
+ [feat_spec.sequence_a_segment_id] * (len(input_tokens))
),
tokenizer=tokenizer,
feat_spec=feat_spec,
)
def construct_double_input_tokens_and_segment_ids(
input_tokens_a: List[str], input_tokens_b: List[str], tokenizer, feat_spec: FeaturizationSpec
):
"""Create token and segment id sequences, apply truncation, add separator and class tokens.
Args:
input_tokens_a (List[str]): sequence of tokens in segment a.
input_tokens_b (List[str]): sequence of tokens in segment b.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedInputs: unpadded inputs with truncation applied and special tokens appended.
"""
if feat_spec.sep_token_extra:
maybe_extra_sep = [tokenizer.sep_token]
maybe_extra_sep_segment_id = [feat_spec.sequence_a_segment_id]
special_tokens_count = 4 # CLS, SEP-SEP, SEP
else:
maybe_extra_sep = []
maybe_extra_sep_segment_id = []
special_tokens_count = 3 # CLS, SEP, SEP
input_tokens_a, input_tokens_b = truncate_sequences(
tokens_ls=[input_tokens_a, input_tokens_b],
max_length=feat_spec.max_seq_length - special_tokens_count,
)
unpadded_tokens = (
input_tokens_a
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ input_tokens_b
+ [tokenizer.sep_token]
)
unpadded_segment_ids = (
[feat_spec.sequence_a_segment_id] * len(input_tokens_a)
+ [feat_spec.sequence_a_segment_id]
+ maybe_extra_sep_segment_id
+ [feat_spec.sequence_b_segment_id] * len(input_tokens_b)
+ [feat_spec.sequence_b_segment_id]
)
return add_cls_token(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
def construct_double_input_amr_concepts_and_relations(
input_amr_concepts_a: List[List[str]],
input_amr_relation_ids_a: List[Tuple[int, int]],
input_amr_relation_labels_a: List[List[str]],
input_amr_concepts_b: List[List[str]],
input_amr_relation_ids_b: List[Tuple[int, int]],
input_amr_relation_labels_b: List[List[str]],
tokenizer,
feat_spec: FeaturizationSpec,
):
""" Merge concepts, relation ids and labels from 2 AMRs, apply truncation.
Args:
input_amr_concepts_a (List[List[str]]): sequence of sub tokens of concepts in AMR a.
input_amr_relation_ids_a (List[(int, int)]):
sequence of (source, target) based on concept indices for relations in AMR a.
input_amr_relation_labels_a (List[List[str]]): sequence of sub tokens of relation labels in AMR a.
input_amr_concepts_b (List[List[str]]): sequence of sub tokens of concepts in AMR b.
input_amr_relation_ids_b (List[(int, int)]):
sequence of (source, target) based on concept indices for relations in AMR b.
input_amr_relation_labels_b (List[List[str]]): sequence of sub tokens of relation labels in AMR b.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedAMRInputs: unpadded merged AMR inputs.
"""
# TODO: 1、sub token长度裁剪;2、concepts长度裁剪,相应修改relation ids和labels;3、合并,相应修改relation ids
input_amr_concepts_a = sum([truncate_sequences(tokens_ls=[concept], max_length=MAX_SUB_TOKEN_LENGTH)
for concept in input_amr_concepts_a], [])
input_amr_concepts_b = sum([truncate_sequences(tokens_ls=[concept], max_length=MAX_SUB_TOKEN_LENGTH)
for concept in input_amr_concepts_b], [])
input_amr_relation_labels_a = sum([truncate_sequences(tokens_ls=[label], max_length=MAX_SUB_TOKEN_LENGTH)
for label in input_amr_relation_labels_a], [])
input_amr_relation_labels_b = sum([truncate_sequences(tokens_ls=[label], max_length=MAX_SUB_TOKEN_LENGTH)
for label in input_amr_relation_labels_b], [])
input_amr_concepts_a, input_amr_concepts_b = truncate_sequences(
tokens_ls=[input_amr_concepts_a, input_amr_concepts_b], max_length=MAX_CONCEPT_LENGTH)
truncate_input_amr_relation_ids_a = []
truncate_input_amr_relation_labels_a = []
truncate_input_amr_relation_ids_b = []
truncate_input_amr_relation_labels_b= []
length_a = len(input_amr_concepts_a)
length_b = len(input_amr_concepts_b)
for relation_id, relation_label in zip(input_amr_relation_ids_a, input_amr_relation_labels_a):
source, target = relation_id
if source < length_a and target < length_a:
truncate_input_amr_relation_ids_a.append(relation_id)
truncate_input_amr_relation_labels_a.append(relation_label)
for relation_id, relation_label in zip(input_amr_relation_ids_b, input_amr_relation_labels_b):
source, target = relation_id
if source < length_b and target < length_b:
truncate_input_amr_relation_ids_b.append([source + length_a, target + length_a])
truncate_input_amr_relation_labels_b.append(relation_label)
truncate_input_amr_relation_ids_a, truncate_input_amr_relation_ids_b = truncate_sequences(
tokens_ls=[truncate_input_amr_relation_ids_a, truncate_input_amr_relation_ids_b],
max_length=MAX_RELATION_LENGTH)
truncate_input_amr_relation_labels_a, truncate_input_amr_relation_labels_b = truncate_sequences(
tokens_ls=[truncate_input_amr_relation_labels_a, truncate_input_amr_relation_labels_b],
max_length=MAX_RELATION_LENGTH)
return UnpaddedAMRInputs(
unpadded_concepts=input_amr_concepts_a + input_amr_concepts_b,
unpadded_relation_ids=truncate_input_amr_relation_ids_a + truncate_input_amr_relation_ids_b,
unpadded_relation_labels=truncate_input_amr_relation_labels_a + truncate_input_amr_relation_labels_b,
)
def add_cls_token(
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Add class token to unpadded inputs.
Applies class token to end (or start) of unpadded inputs depending on FeaturizationSpec.
Args:
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[str]): sequence of unpadded segment ids.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedInputs: unpadded inputs with class token appended.
"""
if feat_spec.cls_token_at_end:
return UnpaddedInputs(
unpadded_tokens=unpadded_tokens + [tokenizer.cls_token],
unpadded_segment_ids=unpadded_segment_ids + [feat_spec.cls_token_segment_id],
cls_offset=0,
)
else:
return UnpaddedInputs(
unpadded_tokens=[tokenizer.cls_token] + unpadded_tokens,
unpadded_segment_ids=[feat_spec.cls_token_segment_id] + unpadded_segment_ids,
cls_offset=1,
)
def create_generic_data_row_from_tokens_and_segments(
guid: str,
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Creates an InputSet and wraps the InputSet into a DataRow class.
Args:
guid (str): human-readable identifier (for interpretability and debugging).
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[int]): sequence of unpadded segment ids.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): data row class to wrap and return the inputs.
Returns:
DataRow: data row class containing model inputs.
"""
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return data_row_class(
guid=guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
label_id=label_id,
tokens=unpadded_tokens,
)
def create_generic_data_row_with_amr(
guid: str,
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
unpadded_concepts: List[List[str]],
unpadded_relation_ids: List[Tuple[int, int]],
unpadded_relation_labels: List[List[str]],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Creates an InputSet and wraps the InputSet into a DataRow class.
Args:
guid (str): human-readable identifier (for interpretability and debugging).
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[int]): sequence of unpadded segment ids.
unpadded_concepts (List[List[str]]): sequence of unpadded sub tokens of AMR concepts.
unpadded_relation_ids (List[(int, int)]): sequence of unpadded (source, target)
based on concept indices for AMR relations.
unpadded_relation_labels (List[List[str]]): sequence of unpadded sub tokens of AMR relation labels.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): data row class to wrap and return the inputs.
Returns:
DataRow: data row class containing model inputs.
"""
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
amr_input_set = create_amr_input_set(
unpadded_concepts=unpadded_concepts,
unpadded_relation_ids=unpadded_relation_ids,
unpadded_relation_labels=unpadded_relation_labels,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return data_row_class(
guid=guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
input_concept_ids=np.array(amr_input_set.concept_sub_token_ids),
input_concept_mask=np.array(amr_input_set.concept_sub_token_mask),
input_relation_ids=np.array(amr_input_set.relation_ids),
input_relation_id_mask=np.array(amr_input_set.relation_id_mask),
input_relation_label_ids=np.array(amr_input_set.relation_label_sub_token_ids),
input_relation_label_mask=np.array(amr_input_set.relation_label_sub_token_mask),
label_id=label_id,
tokens=unpadded_tokens,
)
def create_input_set_from_tokens_and_segments(
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Create padded inputs for model.
Converts tokens to ids, makes input set (input ids, input mask, and segment ids), adds padding.
Args:
unpadded_tokens (List[str]): unpadded list of token strings.
unpadded_segment_ids (List[int]): unpadded list of segment ids.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
Padded input set.
"""
assert len(unpadded_tokens) == len(unpadded_segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(unpadded_tokens)
input_mask = [1] * len(input_ids)
input_set = pad_features_with_feat_spec(
input_ids=input_ids,
input_mask=input_mask,
unpadded_segment_ids=unpadded_segment_ids,
feat_spec=feat_spec,
)
return input_set
def create_amr_input_set(
unpadded_concepts: List[List[str]],
unpadded_relation_ids: List[Tuple[int, int]],
unpadded_relation_labels: List[List[str]],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Create padded inputs for model.
Converts tokens to ids, makes input set (input ids, input mask, and segment ids), adds padding.
Args:
unpadded_tokens (List[str]): unpadded list of token strings.
unpadded_segment_ids (List[int]): unpadded list of segment ids.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
Padded amr input set.
"""
# TODO:1、convert_tokens_to_ids;2、对concepts和relation labels进行二维padding,并生成mask
assert len(unpadded_relation_ids) == len(unpadded_relation_labels)
concept_sub_token_ids = [tokenizer.convert_tokens_to_ids(concept_sub_tokens)
for concept_sub_tokens in unpadded_concepts]
relation_label_sub_token_ids = [tokenizer.convert_tokens_to_ids(relation_label_sub_tokens)
for relation_label_sub_tokens in unpadded_relation_labels]
concept_sub_token_mask = [pad_to_max_seq_length(ls=[1] * len(sub_tokens),
max_seq_length=MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
for sub_tokens in concept_sub_token_ids]
concept_sub_token_ids = [pad_to_max_seq_length(ls=sub_tokens,
max_seq_length=MAX_SUB_TOKEN_LENGTH,
pad_idx=feat_spec.pad_token_id,
pad_right=not feat_spec.pad_on_left)
for sub_tokens in concept_sub_token_ids]
concept_sub_token_mask = pad_to_max_seq_length(ls=concept_sub_token_mask,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[0] * MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
concept_sub_token_ids = pad_to_max_seq_length(ls=concept_sub_token_ids,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[feat_spec.pad_token_id] * MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
relation_id_mask = pad_to_max_seq_length(ls=[1] * len(unpadded_relation_ids),
max_seq_length=feat_spec.max_seq_length,
pad_right=not feat_spec.pad_on_left)
relation_ids = pad_to_max_seq_length(ls=unpadded_relation_ids,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[-1, -1],
pad_right=not feat_spec.pad_on_left)
relation_label_sub_token_mask = [pad_to_max_seq_length(ls=[1] * len(sub_tokens),
max_seq_length=MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
for sub_tokens in relation_label_sub_token_ids]
relation_label_sub_token_ids = [pad_to_max_seq_length(ls=sub_tokens,
max_seq_length=MAX_SUB_TOKEN_LENGTH,
pad_idx=feat_spec.pad_token_id,
pad_right=not feat_spec.pad_on_left)
for sub_tokens in relation_label_sub_token_ids]
relation_label_sub_token_mask = pad_to_max_seq_length(ls=relation_label_sub_token_mask,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[0] * MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
relation_label_sub_token_ids = pad_to_max_seq_length(ls=relation_label_sub_token_ids,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[feat_spec.pad_token_id] * MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
return AMRInputSet(
concept_sub_token_ids=concept_sub_token_ids,
concept_sub_token_mask=concept_sub_token_mask,
relation_ids=relation_ids,
relation_id_mask=relation_id_mask,
relation_label_sub_token_ids=relation_label_sub_token_ids,
relation_label_sub_token_mask=relation_label_sub_token_mask,
)
def pad_features_with_feat_spec(
input_ids: List[int],
input_mask: List[int],
unpadded_segment_ids: List[int],
feat_spec: FeaturizationSpec,
):
"""Apply padding to feature set according to settings from FeaturizationSpec.
Args:
input_ids (List[int]): sequence unpadded input ids.
input_mask (List[int]): unpadded input mask sequence.
unpadded_segment_ids (List[int]): sequence of unpadded segment ids.
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
InputSet: input set containing padded input ids, input mask, and segment ids.
"""
return InputSet(
input_ids=pad_single_with_feat_spec(
ls=input_ids, feat_spec=feat_spec, pad_idx=feat_spec.pad_token_id,
),
input_mask=pad_single_with_feat_spec(
ls=input_mask, feat_spec=feat_spec, pad_idx=feat_spec.pad_token_mask_id,
),
segment_ids=pad_single_with_feat_spec(
ls=unpadded_segment_ids, feat_spec=feat_spec, pad_idx=feat_spec.pad_token_segment_id,
),
)
def pad_single_with_feat_spec(
ls: List[int], feat_spec: FeaturizationSpec, pad_idx: int, check=True
):
"""Apply padding to sequence according to settings from FeaturizationSpec.
Args:
ls (List[int]): sequence to pad.
feat_spec (FeaturizationSpec): metadata containing max sequence length and padding settings.
pad_idx (int): element to use for padding.
check (bool): True if padded length should be checked as under the max sequence length.
Returns:
Sequence with padding applied.
"""
return pad_to_max_seq_length(
ls=ls,
max_seq_length=feat_spec.max_seq_length,
pad_idx=pad_idx,
pad_right=not feat_spec.pad_on_left,
check=check,
)
def labels_to_bimap(labels):
"""Creates mappings from label to id, and from id to label. See details in docs for BiMap.
Args:
labels: sequence of label to map to ids.
Returns:
Tuple[Dict, Dict]: mappings from labels to ids, and ids to labels.
"""
label2id, id2label = BiMap(a=labels, b=list(range(len(labels)))).get_maps()
return label2id, id2label | jiant/tasks/lib/templates/shared.py | import numpy as np
from dataclasses import dataclass
from typing import List, NamedTuple, Tuple
from jiant.tasks.core import FeaturizationSpec
from jiant.tasks.utils import truncate_sequences, pad_to_max_seq_length
from jiant.utils.python.datastructures import BiMap
MAX_SUB_TOKEN_LENGTH = 5
MAX_CONCEPT_LENGTH = 512
MAX_RELATION_LENGTH = 512
class Span(NamedTuple):
start: int
end: int # Use exclusive end, for consistency
def add(self, i: int):
return Span(start=self.start + i, end=self.end + i)
def to_slice(self):
return slice(*self)
def to_array(self):
return np.array([self.start, self.end])
@dataclass
class UnpaddedInputs:
unpadded_tokens: List
unpadded_segment_ids: List
cls_offset: int
@dataclass
class UnpaddedAMRInputs:
unpadded_concepts: List[List[str]]
unpadded_relation_ids: List[Tuple[int, int]]
unpadded_relation_labels: List[List[str]]
@dataclass
class InputSet:
input_ids: List
input_mask: List
segment_ids: List
@dataclass
class AMRInputSet:
concept_sub_token_ids: List[List[int]]
concept_sub_token_mask: List[List[int]]
relation_ids: List[Tuple[int, int]]
relation_id_mask: List[int]
relation_label_sub_token_ids: List[List[int]]
relation_label_sub_token_mask: List[List[int]]
def single_sentence_featurize(
guid: str,
input_tokens: List[str],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
unpadded_inputs = construct_single_input_tokens_and_segment_ids(
input_tokens=input_tokens, tokenizer=tokenizer, feat_spec=feat_spec,
)
return create_generic_data_row_from_tokens_and_segments(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def double_sentence_featurize(
guid: str,
input_tokens_a: List[str],
input_tokens_b: List[str],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Featurize an example for a two-input/two-sentence task, and return the example as a DataRow.
Args:
guid (str): human-readable identifier for interpretability and debugging.
input_tokens_a (List[str]): sequence of tokens in segment a.
input_tokens_b (List[str]): sequence of tokens in segment b.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): DataRow class used in the task.
Returns:
DataRow representing an example.
"""
unpadded_inputs = construct_double_input_tokens_and_segment_ids(
input_tokens_a=input_tokens_a,
input_tokens_b=input_tokens_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return create_generic_data_row_from_tokens_and_segments(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def double_sentence_with_amr_featurize(
guid: str,
input_tokens_a: List[str],
input_amr_concepts_a: List[List[str]],
input_amr_relation_ids_a: List[Tuple[int, int]],
input_amr_relation_labels_a: List[List[str]],
input_tokens_b: List[str],
input_amr_concepts_b: List[List[str]],
input_amr_relation_ids_b: List[Tuple[int, int]],
input_amr_relation_labels_b: List[List[str]],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Featurize an example for a two-input/two-sentence with AMR task, and return the example as a DataRow.
Args:
guid (str): human-readable identifier for interoperability and debugging.
input_tokens_a (List[str]): sequence of tokens in segment a.
input_amr_concepts_a (List[List[str]]): sequence of sub tokens of concepts in AMR a.
input_amr_relation_ids_a (List[(int, int)]): sequence of (source, target)
based on concept indices for relations in AMR a.
input_amr_relation_labels_a (List[List[str]]): sequence of sub tokens of relation labels in AMR a.
input_tokens_b (List[str]): sequence of tokens in segment b.
input_amr_concepts_b (List[List[str]]): sequence of sub tokens of concepts in AMR b.
input_amr_relation_ids_b (List[(int, int)]): sequence of (source, target)
based on concept indices for relations in AMR b.
input_amr_relation_labels_b (List[List[str]]): sequence of sub tokens of relation labels in AMR b.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): DataRow class used in the task.
Returns:
DataRow representing an example.
"""
unpadded_inputs = construct_double_input_tokens_and_segment_ids(
input_tokens_a=input_tokens_a,
input_tokens_b=input_tokens_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
unpadded_amr_inputs = construct_double_input_amr_concepts_and_relations(
input_amr_concepts_a=input_amr_concepts_a,
input_amr_relation_ids_a=input_amr_relation_ids_a,
input_amr_relation_labels_a=input_amr_relation_labels_a,
input_amr_concepts_b=input_amr_concepts_b,
input_amr_relation_ids_b=input_amr_relation_ids_b,
input_amr_relation_labels_b=input_amr_relation_labels_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return create_generic_data_row_with_amr(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
unpadded_concepts=unpadded_amr_inputs.unpadded_concepts,
unpadded_relation_ids=unpadded_amr_inputs.unpadded_relation_ids,
unpadded_relation_labels=unpadded_amr_inputs.unpadded_relation_labels,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def construct_single_input_tokens_and_segment_ids(
input_tokens: List[str], tokenizer, feat_spec: FeaturizationSpec
):
special_tokens_count = 2 # CLS, SEP
(input_tokens,) = truncate_sequences(
tokens_ls=[input_tokens], max_length=feat_spec.max_seq_length - special_tokens_count,
)
return add_cls_token(
unpadded_tokens=input_tokens + [tokenizer.sep_token],
unpadded_segment_ids=(
[feat_spec.sequence_a_segment_id]
+ [feat_spec.sequence_a_segment_id] * (len(input_tokens))
),
tokenizer=tokenizer,
feat_spec=feat_spec,
)
def construct_double_input_tokens_and_segment_ids(
input_tokens_a: List[str], input_tokens_b: List[str], tokenizer, feat_spec: FeaturizationSpec
):
"""Create token and segment id sequences, apply truncation, add separator and class tokens.
Args:
input_tokens_a (List[str]): sequence of tokens in segment a.
input_tokens_b (List[str]): sequence of tokens in segment b.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedInputs: unpadded inputs with truncation applied and special tokens appended.
"""
if feat_spec.sep_token_extra:
maybe_extra_sep = [tokenizer.sep_token]
maybe_extra_sep_segment_id = [feat_spec.sequence_a_segment_id]
special_tokens_count = 4 # CLS, SEP-SEP, SEP
else:
maybe_extra_sep = []
maybe_extra_sep_segment_id = []
special_tokens_count = 3 # CLS, SEP, SEP
input_tokens_a, input_tokens_b = truncate_sequences(
tokens_ls=[input_tokens_a, input_tokens_b],
max_length=feat_spec.max_seq_length - special_tokens_count,
)
unpadded_tokens = (
input_tokens_a
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ input_tokens_b
+ [tokenizer.sep_token]
)
unpadded_segment_ids = (
[feat_spec.sequence_a_segment_id] * len(input_tokens_a)
+ [feat_spec.sequence_a_segment_id]
+ maybe_extra_sep_segment_id
+ [feat_spec.sequence_b_segment_id] * len(input_tokens_b)
+ [feat_spec.sequence_b_segment_id]
)
return add_cls_token(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
def construct_double_input_amr_concepts_and_relations(
input_amr_concepts_a: List[List[str]],
input_amr_relation_ids_a: List[Tuple[int, int]],
input_amr_relation_labels_a: List[List[str]],
input_amr_concepts_b: List[List[str]],
input_amr_relation_ids_b: List[Tuple[int, int]],
input_amr_relation_labels_b: List[List[str]],
tokenizer,
feat_spec: FeaturizationSpec,
):
""" Merge concepts, relation ids and labels from 2 AMRs, apply truncation.
Args:
input_amr_concepts_a (List[List[str]]): sequence of sub tokens of concepts in AMR a.
input_amr_relation_ids_a (List[(int, int)]):
sequence of (source, target) based on concept indices for relations in AMR a.
input_amr_relation_labels_a (List[List[str]]): sequence of sub tokens of relation labels in AMR a.
input_amr_concepts_b (List[List[str]]): sequence of sub tokens of concepts in AMR b.
input_amr_relation_ids_b (List[(int, int)]):
sequence of (source, target) based on concept indices for relations in AMR b.
input_amr_relation_labels_b (List[List[str]]): sequence of sub tokens of relation labels in AMR b.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedAMRInputs: unpadded merged AMR inputs.
"""
# TODO: 1、sub token长度裁剪;2、concepts长度裁剪,相应修改relation ids和labels;3、合并,相应修改relation ids
input_amr_concepts_a = sum([truncate_sequences(tokens_ls=[concept], max_length=MAX_SUB_TOKEN_LENGTH)
for concept in input_amr_concepts_a], [])
input_amr_concepts_b = sum([truncate_sequences(tokens_ls=[concept], max_length=MAX_SUB_TOKEN_LENGTH)
for concept in input_amr_concepts_b], [])
input_amr_relation_labels_a = sum([truncate_sequences(tokens_ls=[label], max_length=MAX_SUB_TOKEN_LENGTH)
for label in input_amr_relation_labels_a], [])
input_amr_relation_labels_b = sum([truncate_sequences(tokens_ls=[label], max_length=MAX_SUB_TOKEN_LENGTH)
for label in input_amr_relation_labels_b], [])
input_amr_concepts_a, input_amr_concepts_b = truncate_sequences(
tokens_ls=[input_amr_concepts_a, input_amr_concepts_b], max_length=MAX_CONCEPT_LENGTH)
truncate_input_amr_relation_ids_a = []
truncate_input_amr_relation_labels_a = []
truncate_input_amr_relation_ids_b = []
truncate_input_amr_relation_labels_b= []
length_a = len(input_amr_concepts_a)
length_b = len(input_amr_concepts_b)
for relation_id, relation_label in zip(input_amr_relation_ids_a, input_amr_relation_labels_a):
source, target = relation_id
if source < length_a and target < length_a:
truncate_input_amr_relation_ids_a.append(relation_id)
truncate_input_amr_relation_labels_a.append(relation_label)
for relation_id, relation_label in zip(input_amr_relation_ids_b, input_amr_relation_labels_b):
source, target = relation_id
if source < length_b and target < length_b:
truncate_input_amr_relation_ids_b.append([source + length_a, target + length_a])
truncate_input_amr_relation_labels_b.append(relation_label)
truncate_input_amr_relation_ids_a, truncate_input_amr_relation_ids_b = truncate_sequences(
tokens_ls=[truncate_input_amr_relation_ids_a, truncate_input_amr_relation_ids_b],
max_length=MAX_RELATION_LENGTH)
truncate_input_amr_relation_labels_a, truncate_input_amr_relation_labels_b = truncate_sequences(
tokens_ls=[truncate_input_amr_relation_labels_a, truncate_input_amr_relation_labels_b],
max_length=MAX_RELATION_LENGTH)
return UnpaddedAMRInputs(
unpadded_concepts=input_amr_concepts_a + input_amr_concepts_b,
unpadded_relation_ids=truncate_input_amr_relation_ids_a + truncate_input_amr_relation_ids_b,
unpadded_relation_labels=truncate_input_amr_relation_labels_a + truncate_input_amr_relation_labels_b,
)
def add_cls_token(
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Add class token to unpadded inputs.
Applies class token to end (or start) of unpadded inputs depending on FeaturizationSpec.
Args:
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[str]): sequence of unpadded segment ids.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedInputs: unpadded inputs with class token appended.
"""
if feat_spec.cls_token_at_end:
return UnpaddedInputs(
unpadded_tokens=unpadded_tokens + [tokenizer.cls_token],
unpadded_segment_ids=unpadded_segment_ids + [feat_spec.cls_token_segment_id],
cls_offset=0,
)
else:
return UnpaddedInputs(
unpadded_tokens=[tokenizer.cls_token] + unpadded_tokens,
unpadded_segment_ids=[feat_spec.cls_token_segment_id] + unpadded_segment_ids,
cls_offset=1,
)
def create_generic_data_row_from_tokens_and_segments(
guid: str,
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Creates an InputSet and wraps the InputSet into a DataRow class.
Args:
guid (str): human-readable identifier (for interpretability and debugging).
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[int]): sequence of unpadded segment ids.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): data row class to wrap and return the inputs.
Returns:
DataRow: data row class containing model inputs.
"""
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return data_row_class(
guid=guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
label_id=label_id,
tokens=unpadded_tokens,
)
def create_generic_data_row_with_amr(
guid: str,
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
unpadded_concepts: List[List[str]],
unpadded_relation_ids: List[Tuple[int, int]],
unpadded_relation_labels: List[List[str]],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Creates an InputSet and wraps the InputSet into a DataRow class.
Args:
guid (str): human-readable identifier (for interpretability and debugging).
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[int]): sequence of unpadded segment ids.
unpadded_concepts (List[List[str]]): sequence of unpadded sub tokens of AMR concepts.
unpadded_relation_ids (List[(int, int)]): sequence of unpadded (source, target)
based on concept indices for AMR relations.
unpadded_relation_labels (List[List[str]]): sequence of unpadded sub tokens of AMR relation labels.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): data row class to wrap and return the inputs.
Returns:
DataRow: data row class containing model inputs.
"""
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
amr_input_set = create_amr_input_set(
unpadded_concepts=unpadded_concepts,
unpadded_relation_ids=unpadded_relation_ids,
unpadded_relation_labels=unpadded_relation_labels,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return data_row_class(
guid=guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
input_concept_ids=np.array(amr_input_set.concept_sub_token_ids),
input_concept_mask=np.array(amr_input_set.concept_sub_token_mask),
input_relation_ids=np.array(amr_input_set.relation_ids),
input_relation_id_mask=np.array(amr_input_set.relation_id_mask),
input_relation_label_ids=np.array(amr_input_set.relation_label_sub_token_ids),
input_relation_label_mask=np.array(amr_input_set.relation_label_sub_token_mask),
label_id=label_id,
tokens=unpadded_tokens,
)
def create_input_set_from_tokens_and_segments(
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Create padded inputs for model.
Converts tokens to ids, makes input set (input ids, input mask, and segment ids), adds padding.
Args:
unpadded_tokens (List[str]): unpadded list of token strings.
unpadded_segment_ids (List[int]): unpadded list of segment ids.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
Padded input set.
"""
assert len(unpadded_tokens) == len(unpadded_segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(unpadded_tokens)
input_mask = [1] * len(input_ids)
input_set = pad_features_with_feat_spec(
input_ids=input_ids,
input_mask=input_mask,
unpadded_segment_ids=unpadded_segment_ids,
feat_spec=feat_spec,
)
return input_set
def create_amr_input_set(
unpadded_concepts: List[List[str]],
unpadded_relation_ids: List[Tuple[int, int]],
unpadded_relation_labels: List[List[str]],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Create padded inputs for model.
Converts tokens to ids, makes input set (input ids, input mask, and segment ids), adds padding.
Args:
unpadded_tokens (List[str]): unpadded list of token strings.
unpadded_segment_ids (List[int]): unpadded list of segment ids.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
Padded amr input set.
"""
# TODO:1、convert_tokens_to_ids;2、对concepts和relation labels进行二维padding,并生成mask
assert len(unpadded_relation_ids) == len(unpadded_relation_labels)
concept_sub_token_ids = [tokenizer.convert_tokens_to_ids(concept_sub_tokens)
for concept_sub_tokens in unpadded_concepts]
relation_label_sub_token_ids = [tokenizer.convert_tokens_to_ids(relation_label_sub_tokens)
for relation_label_sub_tokens in unpadded_relation_labels]
concept_sub_token_mask = [pad_to_max_seq_length(ls=[1] * len(sub_tokens),
max_seq_length=MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
for sub_tokens in concept_sub_token_ids]
concept_sub_token_ids = [pad_to_max_seq_length(ls=sub_tokens,
max_seq_length=MAX_SUB_TOKEN_LENGTH,
pad_idx=feat_spec.pad_token_id,
pad_right=not feat_spec.pad_on_left)
for sub_tokens in concept_sub_token_ids]
concept_sub_token_mask = pad_to_max_seq_length(ls=concept_sub_token_mask,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[0] * MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
concept_sub_token_ids = pad_to_max_seq_length(ls=concept_sub_token_ids,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[feat_spec.pad_token_id] * MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
relation_id_mask = pad_to_max_seq_length(ls=[1] * len(unpadded_relation_ids),
max_seq_length=feat_spec.max_seq_length,
pad_right=not feat_spec.pad_on_left)
relation_ids = pad_to_max_seq_length(ls=unpadded_relation_ids,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[-1, -1],
pad_right=not feat_spec.pad_on_left)
relation_label_sub_token_mask = [pad_to_max_seq_length(ls=[1] * len(sub_tokens),
max_seq_length=MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
for sub_tokens in relation_label_sub_token_ids]
relation_label_sub_token_ids = [pad_to_max_seq_length(ls=sub_tokens,
max_seq_length=MAX_SUB_TOKEN_LENGTH,
pad_idx=feat_spec.pad_token_id,
pad_right=not feat_spec.pad_on_left)
for sub_tokens in relation_label_sub_token_ids]
relation_label_sub_token_mask = pad_to_max_seq_length(ls=relation_label_sub_token_mask,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[0] * MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
relation_label_sub_token_ids = pad_to_max_seq_length(ls=relation_label_sub_token_ids,
max_seq_length=feat_spec.max_seq_length,
pad_idx=[feat_spec.pad_token_id] * MAX_SUB_TOKEN_LENGTH,
pad_right=not feat_spec.pad_on_left)
return AMRInputSet(
concept_sub_token_ids=concept_sub_token_ids,
concept_sub_token_mask=concept_sub_token_mask,
relation_ids=relation_ids,
relation_id_mask=relation_id_mask,
relation_label_sub_token_ids=relation_label_sub_token_ids,
relation_label_sub_token_mask=relation_label_sub_token_mask,
)
def pad_features_with_feat_spec(
input_ids: List[int],
input_mask: List[int],
unpadded_segment_ids: List[int],
feat_spec: FeaturizationSpec,
):
"""Apply padding to feature set according to settings from FeaturizationSpec.
Args:
input_ids (List[int]): sequence unpadded input ids.
input_mask (List[int]): unpadded input mask sequence.
unpadded_segment_ids (List[int]): sequence of unpadded segment ids.
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
InputSet: input set containing padded input ids, input mask, and segment ids.
"""
return InputSet(
input_ids=pad_single_with_feat_spec(
ls=input_ids, feat_spec=feat_spec, pad_idx=feat_spec.pad_token_id,
),
input_mask=pad_single_with_feat_spec(
ls=input_mask, feat_spec=feat_spec, pad_idx=feat_spec.pad_token_mask_id,
),
segment_ids=pad_single_with_feat_spec(
ls=unpadded_segment_ids, feat_spec=feat_spec, pad_idx=feat_spec.pad_token_segment_id,
),
)
def pad_single_with_feat_spec(
ls: List[int], feat_spec: FeaturizationSpec, pad_idx: int, check=True
):
"""Apply padding to sequence according to settings from FeaturizationSpec.
Args:
ls (List[int]): sequence to pad.
feat_spec (FeaturizationSpec): metadata containing max sequence length and padding settings.
pad_idx (int): element to use for padding.
check (bool): True if padded length should be checked as under the max sequence length.
Returns:
Sequence with padding applied.
"""
return pad_to_max_seq_length(
ls=ls,
max_seq_length=feat_spec.max_seq_length,
pad_idx=pad_idx,
pad_right=not feat_spec.pad_on_left,
check=check,
)
def labels_to_bimap(labels):
"""Creates mappings from label to id, and from id to label. See details in docs for BiMap.
Args:
labels: sequence of label to map to ids.
Returns:
Tuple[Dict, Dict]: mappings from labels to ids, and ids to labels.
"""
label2id, id2label = BiMap(a=labels, b=list(range(len(labels)))).get_maps()
return label2id, id2label | 0.900639 | 0.492615 |
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from teprunner.models import Project, EnvVar, Fixture, Case, CaseResult, Plan, PlanCase, PlanResult
class ProjectSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
envConfig = serializers.CharField(source="env_config")
gitRepository = serializers.CharField(source="git_repository", required=False, allow_blank=True)
gitBranch = serializers.CharField(source="git_branch", required=False, allow_blank=True)
lastSyncTime = serializers.SerializerMethodField(required=False)
class Meta:
model = Project
fields = ["id", "name", "envConfig", "gitRepository", "gitBranch", "lastSyncTime"]
def get_lastSyncTime(self, instance):
return instance.last_sync_time.strftime("%Y-%m-%d %H:%M:%S") if instance.last_sync_time else ""
class EnvVarSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
curProjectId = serializers.CharField(source="project_id")
curEnvName = serializers.CharField(source="env_name")
class Meta:
model = EnvVar
fields = ["id", "name", "value", "desc", "curProjectId", "curEnvName"]
class FixtureSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
creatorNickname = serializers.CharField(source="creator_nickname")
curProjectId = serializers.CharField(source="project_id")
class Meta:
model = Fixture
fields = ["id", "name", "desc", "code", "creatorNickname", "curProjectId"]
class CaseSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
creatorNickname = serializers.CharField(source="creator_nickname")
projectId = serializers.CharField(source="project_id")
filename = serializers.CharField(required=False)
source = serializers.CharField(required=False)
class Meta:
model = Case
fields = ["id", "desc", "code", "creatorNickname", "projectId", "filename", "source"]
class CaseListSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
creatorNickname = serializers.CharField(source="creator_nickname")
projectId = serializers.CharField(source="project_id")
result = serializers.SerializerMethodField(required=False)
elapsed = serializers.SerializerMethodField(required=False)
runEnv = serializers.SerializerMethodField(required=False)
runUserNickname = serializers.SerializerMethodField(required=False)
runTime = serializers.SerializerMethodField(required=False)
class Meta:
model = Case
fields = ["id", "desc", "code", "creatorNickname", "projectId",
"result", "elapsed", "runEnv", "runUserNickname", "runTime", "source"]
def get_result(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
result = case_result[0].result
return result
except ObjectDoesNotExist:
return ""
return ""
def get_elapsed(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
elapsed = case_result[0].elapsed
return elapsed
except ObjectDoesNotExist:
return ""
return ""
def get_runEnv(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
run_env = case_result[0].run_env
return run_env
except ObjectDoesNotExist:
return ""
return ""
def get_runUserNickname(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
run_user_nickname = case_result[0].run_user_nickname
return run_user_nickname
except ObjectDoesNotExist:
return ""
return ""
def get_runTime(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
run_time = case_result[0].run_time
return run_time.strftime("%Y-%m-%d %H:%M:%S")
except ObjectDoesNotExist:
return ""
return ""
class CaseResultSerializer(serializers.ModelSerializer):
caseId = serializers.IntegerField(source="case_id")
runEnv = serializers.CharField(source="run_env")
runUserNickname = serializers.CharField(source="run_user_nickname")
runTime = serializers.SerializerMethodField(required=False)
class Meta:
model = CaseResult
fields = ["caseId", "result", "elapsed", "output", "runEnv", "runUserNickname", "runTime"]
def get_runTime(self, instance):
return instance.run_time.strftime("%Y-%m-%d %H:%M:%S")
class PlanSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
projectId = serializers.CharField(source="project_id")
taskStatus = serializers.CharField(source="task_status")
taskCrontab = serializers.CharField(source="task_crontab", required=False, allow_blank=True)
taskRunEnv = serializers.CharField(source="task_run_env", required=False, allow_blank=True)
caseNum = serializers.SerializerMethodField(required=False)
passedNum = serializers.SerializerMethodField(required=False)
failedNum = serializers.SerializerMethodField(required=False)
errorNum = serializers.SerializerMethodField(required=False)
elapsed = serializers.SerializerMethodField(required=False)
runEnv = serializers.SerializerMethodField(required=False)
runUserNickname = serializers.SerializerMethodField(required=False)
runTime = serializers.SerializerMethodField(required=False)
class Meta:
model = Plan
fields = ["id", "name", "projectId", "taskStatus", "taskCrontab", "taskRunEnv",
"caseNum", "passedNum", "failedNum", "errorNum", "elapsed", "runEnv", "runUserNickname", "runTime"]
def get_caseNum(self, instance):
plan_id = instance.id
try:
case_num = len(PlanCase.objects.filter(plan_id=plan_id))
except ObjectDoesNotExist:
return ""
return str(case_num)
def get_passedNum(self, instance):
plan_id = instance.id
try:
passed_num = 0
for plan_result in PlanResult.objects.filter(plan_id=plan_id):
if ("passed" in plan_result.result
and "failed" not in plan_result.result
and "error" not in plan_result.result):
passed_num += 1
except ObjectDoesNotExist:
return ""
return str(passed_num)
def get_failedNum(self, instance):
plan_id = instance.id
try:
failed_num = 0
for plan_result in PlanResult.objects.filter(plan_id=plan_id):
if "failed" in plan_result.result and "error" not in plan_result.result:
failed_num += 1
except ObjectDoesNotExist:
return ""
return str(failed_num)
def get_errorNum(self, instance):
plan_id = instance.id
try:
error_num = 0
for plan_result in PlanResult.objects.filter(plan_id=plan_id):
if "error" in plan_result.result:
error_num += 1
except ObjectDoesNotExist:
return ""
return str(error_num)
def get_elapsed(self, instance):
plan_id = instance.id
try:
total_elapsed = 0
for plan_result in PlanResult.objects.filter(plan_id=plan_id):
total_elapsed += float(plan_result.elapsed.replace("s", ""))
except ObjectDoesNotExist:
return ""
return str(total_elapsed)[:4] + "s"
def get_runEnv(self, instance):
plan_id = instance.id
try:
plan_results = PlanResult.objects.filter(plan_id=plan_id)
except ObjectDoesNotExist:
return ""
run_env = ""
if plan_results:
run_env = plan_results[0].run_env
return run_env
def get_runUserNickname(self, instance):
plan_id = instance.id
try:
plan_results = PlanResult.objects.filter(plan_id=plan_id)
except ObjectDoesNotExist:
return ""
run_user_nickname = ""
if plan_results:
run_user_nickname = plan_results[0].run_user_nickname
return run_user_nickname
def get_runTime(self, instance):
plan_id = instance.id
try:
plan_results = PlanResult.objects.filter(plan_id=plan_id)
except ObjectDoesNotExist:
return ""
run_time = ""
if plan_results:
run_time = plan_results.order_by('run_time')[0].run_time.strftime("%Y-%m-%d %H:%M:%S")
return run_time
class PlanCaseSerializer(serializers.ModelSerializer):
planId = serializers.CharField(source="plan_id")
caseId = serializers.CharField(source="case_id")
caseDesc = serializers.SerializerMethodField(required=False)
caseCreatorNickname = serializers.SerializerMethodField(required=False)
class Meta:
model = PlanCase
fields = ["planId", "caseId", "caseDesc", "caseCreatorNickname"]
def get_caseDesc(self, instance):
plan_case_id = instance.id
case_id = PlanCase.objects.get(id=plan_case_id).case_id
return Case.objects.get(id=case_id).desc
def get_caseCreatorNickname(self, instance):
plan_case_id = instance.id
case_id = PlanCase.objects.get(id=plan_case_id).case_id
return Case.objects.get(id=case_id).creator_nickname
class PlanResultSerializer(serializers.ModelSerializer):
planId = serializers.CharField(source="plan_id")
caseId = serializers.CharField(source="case_id")
caseDesc = serializers.SerializerMethodField(required=False)
caseCreatorNickname = serializers.SerializerMethodField(required=False)
runEnv = serializers.CharField(source="run_env")
runUserNickname = serializers.CharField(source="run_user_nickname")
runTime = serializers.SerializerMethodField()
class Meta:
model = PlanResult
fields = ["planId", "caseId", "caseDesc", "caseCreatorNickname",
"result", "elapsed", "output", "runEnv", "runUserNickname", "runTime"]
def get_caseDesc(self, instance):
return Case.objects.get(id=instance.case_id).desc
def get_caseCreatorNickname(self, instance):
return Case.objects.get(id=instance.case_id).creator_nickname
def get_runTime(self, instance):
return PlanResult.objects.get(id=instance.id).run_time.strftime("%Y-%m-%d %H:%M:%S")
def to_representation(self, obj):
ret = super(PlanResultSerializer, self).to_representation(obj)
ret.pop('output')
return ret | teprunner/serializers.py | from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from teprunner.models import Project, EnvVar, Fixture, Case, CaseResult, Plan, PlanCase, PlanResult
class ProjectSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
envConfig = serializers.CharField(source="env_config")
gitRepository = serializers.CharField(source="git_repository", required=False, allow_blank=True)
gitBranch = serializers.CharField(source="git_branch", required=False, allow_blank=True)
lastSyncTime = serializers.SerializerMethodField(required=False)
class Meta:
model = Project
fields = ["id", "name", "envConfig", "gitRepository", "gitBranch", "lastSyncTime"]
def get_lastSyncTime(self, instance):
return instance.last_sync_time.strftime("%Y-%m-%d %H:%M:%S") if instance.last_sync_time else ""
class EnvVarSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
curProjectId = serializers.CharField(source="project_id")
curEnvName = serializers.CharField(source="env_name")
class Meta:
model = EnvVar
fields = ["id", "name", "value", "desc", "curProjectId", "curEnvName"]
class FixtureSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
creatorNickname = serializers.CharField(source="creator_nickname")
curProjectId = serializers.CharField(source="project_id")
class Meta:
model = Fixture
fields = ["id", "name", "desc", "code", "creatorNickname", "curProjectId"]
class CaseSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
creatorNickname = serializers.CharField(source="creator_nickname")
projectId = serializers.CharField(source="project_id")
filename = serializers.CharField(required=False)
source = serializers.CharField(required=False)
class Meta:
model = Case
fields = ["id", "desc", "code", "creatorNickname", "projectId", "filename", "source"]
class CaseListSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
creatorNickname = serializers.CharField(source="creator_nickname")
projectId = serializers.CharField(source="project_id")
result = serializers.SerializerMethodField(required=False)
elapsed = serializers.SerializerMethodField(required=False)
runEnv = serializers.SerializerMethodField(required=False)
runUserNickname = serializers.SerializerMethodField(required=False)
runTime = serializers.SerializerMethodField(required=False)
class Meta:
model = Case
fields = ["id", "desc", "code", "creatorNickname", "projectId",
"result", "elapsed", "runEnv", "runUserNickname", "runTime", "source"]
def get_result(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
result = case_result[0].result
return result
except ObjectDoesNotExist:
return ""
return ""
def get_elapsed(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
elapsed = case_result[0].elapsed
return elapsed
except ObjectDoesNotExist:
return ""
return ""
def get_runEnv(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
run_env = case_result[0].run_env
return run_env
except ObjectDoesNotExist:
return ""
return ""
def get_runUserNickname(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
run_user_nickname = case_result[0].run_user_nickname
return run_user_nickname
except ObjectDoesNotExist:
return ""
return ""
def get_runTime(self, instance):
case_id = instance.id
try:
case_result = CaseResult.objects.filter(case_id=case_id).order_by('-run_time')
if case_result:
run_time = case_result[0].run_time
return run_time.strftime("%Y-%m-%d %H:%M:%S")
except ObjectDoesNotExist:
return ""
return ""
class CaseResultSerializer(serializers.ModelSerializer):
caseId = serializers.IntegerField(source="case_id")
runEnv = serializers.CharField(source="run_env")
runUserNickname = serializers.CharField(source="run_user_nickname")
runTime = serializers.SerializerMethodField(required=False)
class Meta:
model = CaseResult
fields = ["caseId", "result", "elapsed", "output", "runEnv", "runUserNickname", "runTime"]
def get_runTime(self, instance):
return instance.run_time.strftime("%Y-%m-%d %H:%M:%S")
class PlanSerializer(serializers.ModelSerializer):
id = serializers.CharField(required=False)
projectId = serializers.CharField(source="project_id")
taskStatus = serializers.CharField(source="task_status")
taskCrontab = serializers.CharField(source="task_crontab", required=False, allow_blank=True)
taskRunEnv = serializers.CharField(source="task_run_env", required=False, allow_blank=True)
caseNum = serializers.SerializerMethodField(required=False)
passedNum = serializers.SerializerMethodField(required=False)
failedNum = serializers.SerializerMethodField(required=False)
errorNum = serializers.SerializerMethodField(required=False)
elapsed = serializers.SerializerMethodField(required=False)
runEnv = serializers.SerializerMethodField(required=False)
runUserNickname = serializers.SerializerMethodField(required=False)
runTime = serializers.SerializerMethodField(required=False)
class Meta:
model = Plan
fields = ["id", "name", "projectId", "taskStatus", "taskCrontab", "taskRunEnv",
"caseNum", "passedNum", "failedNum", "errorNum", "elapsed", "runEnv", "runUserNickname", "runTime"]
def get_caseNum(self, instance):
plan_id = instance.id
try:
case_num = len(PlanCase.objects.filter(plan_id=plan_id))
except ObjectDoesNotExist:
return ""
return str(case_num)
def get_passedNum(self, instance):
plan_id = instance.id
try:
passed_num = 0
for plan_result in PlanResult.objects.filter(plan_id=plan_id):
if ("passed" in plan_result.result
and "failed" not in plan_result.result
and "error" not in plan_result.result):
passed_num += 1
except ObjectDoesNotExist:
return ""
return str(passed_num)
def get_failedNum(self, instance):
plan_id = instance.id
try:
failed_num = 0
for plan_result in PlanResult.objects.filter(plan_id=plan_id):
if "failed" in plan_result.result and "error" not in plan_result.result:
failed_num += 1
except ObjectDoesNotExist:
return ""
return str(failed_num)
def get_errorNum(self, instance):
plan_id = instance.id
try:
error_num = 0
for plan_result in PlanResult.objects.filter(plan_id=plan_id):
if "error" in plan_result.result:
error_num += 1
except ObjectDoesNotExist:
return ""
return str(error_num)
def get_elapsed(self, instance):
plan_id = instance.id
try:
total_elapsed = 0
for plan_result in PlanResult.objects.filter(plan_id=plan_id):
total_elapsed += float(plan_result.elapsed.replace("s", ""))
except ObjectDoesNotExist:
return ""
return str(total_elapsed)[:4] + "s"
def get_runEnv(self, instance):
plan_id = instance.id
try:
plan_results = PlanResult.objects.filter(plan_id=plan_id)
except ObjectDoesNotExist:
return ""
run_env = ""
if plan_results:
run_env = plan_results[0].run_env
return run_env
def get_runUserNickname(self, instance):
plan_id = instance.id
try:
plan_results = PlanResult.objects.filter(plan_id=plan_id)
except ObjectDoesNotExist:
return ""
run_user_nickname = ""
if plan_results:
run_user_nickname = plan_results[0].run_user_nickname
return run_user_nickname
def get_runTime(self, instance):
plan_id = instance.id
try:
plan_results = PlanResult.objects.filter(plan_id=plan_id)
except ObjectDoesNotExist:
return ""
run_time = ""
if plan_results:
run_time = plan_results.order_by('run_time')[0].run_time.strftime("%Y-%m-%d %H:%M:%S")
return run_time
class PlanCaseSerializer(serializers.ModelSerializer):
planId = serializers.CharField(source="plan_id")
caseId = serializers.CharField(source="case_id")
caseDesc = serializers.SerializerMethodField(required=False)
caseCreatorNickname = serializers.SerializerMethodField(required=False)
class Meta:
model = PlanCase
fields = ["planId", "caseId", "caseDesc", "caseCreatorNickname"]
def get_caseDesc(self, instance):
plan_case_id = instance.id
case_id = PlanCase.objects.get(id=plan_case_id).case_id
return Case.objects.get(id=case_id).desc
def get_caseCreatorNickname(self, instance):
plan_case_id = instance.id
case_id = PlanCase.objects.get(id=plan_case_id).case_id
return Case.objects.get(id=case_id).creator_nickname
class PlanResultSerializer(serializers.ModelSerializer):
planId = serializers.CharField(source="plan_id")
caseId = serializers.CharField(source="case_id")
caseDesc = serializers.SerializerMethodField(required=False)
caseCreatorNickname = serializers.SerializerMethodField(required=False)
runEnv = serializers.CharField(source="run_env")
runUserNickname = serializers.CharField(source="run_user_nickname")
runTime = serializers.SerializerMethodField()
class Meta:
model = PlanResult
fields = ["planId", "caseId", "caseDesc", "caseCreatorNickname",
"result", "elapsed", "output", "runEnv", "runUserNickname", "runTime"]
def get_caseDesc(self, instance):
return Case.objects.get(id=instance.case_id).desc
def get_caseCreatorNickname(self, instance):
return Case.objects.get(id=instance.case_id).creator_nickname
def get_runTime(self, instance):
return PlanResult.objects.get(id=instance.id).run_time.strftime("%Y-%m-%d %H:%M:%S")
def to_representation(self, obj):
ret = super(PlanResultSerializer, self).to_representation(obj)
ret.pop('output')
return ret | 0.487063 | 0.117775 |
from sqlalchemy import Column, Integer, Float, TIMESTAMP, Boolean, create_engine, text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import pprint
Base = declarative_base()
class RawData(Base):
"""AirSim RawData"""
__tablename__ = "rawdatas"
id = Column(Integer, primary_key=True)
gear = Column(Integer, nullable=False)
handbreak = Column(Boolean, nullable=False)
maxrpm = Column(Float, nullable=False)
rpm = Column(Float, nullable=False)
speed = Column(Float, nullable=False)
timestamp = Column(TIMESTAMP(True), nullable=False,
server_default=text('NOW()'))
pos_x = Column(Float, nullable=False)
pos_y = Column(Float, nullable=False)
pos_z = Column(Float, nullable=False)
aa_x = Column(Float, nullable=False)
aa_y = Column(Float, nullable=False)
aa_z = Column(Float, nullable=False)
av_x = Column(Float, nullable=False)
av_y = Column(Float, nullable=False)
av_z = Column(Float, nullable=False)
la_x = Column(Float, nullable=False)
la_y = Column(Float, nullable=False)
la_z = Column(Float, nullable=False)
lv_x = Column(Float, nullable=False)
lv_y = Column(Float, nullable=False)
lv_z = Column(Float, nullable=False)
def __init__(self, gear, handbreak, maxrpm, rpm, speed, pos_x, pos_y, pos_z, aa_x, aa_y, aa_z, av_x,
av_y, av_z, la_x, la_y, la_z, lv_x, lv_y, lv_z):
Base.__init__(self, gear=gear, handbreak=handbreak, maxrpm=maxrpm, rpm=rpm, speed=speed, pos_x=pos_x,
pos_y=pos_y, pos_z=pos_z, aa_x=aa_x, aa_y=aa_y, aa_z=aa_z,
av_x=av_x, av_y=av_y, av_z=av_z, la_x=la_x, la_y=la_y, la_z=la_z, lv_x=lv_x, lv_y=lv_y,
lv_z=lv_z)
engine = create_engine(
'mysql+pymysql://root:<EMAIL>ying_@cdb-rokmsrpe.bj.tencentcdb.com:10033/CarSim')
DBSession = sessionmaker(bind=engine)
def getData():
session = DBSession()
raw_data = session.query(RawData).all()
return raw_data[-40:] if len(raw_data) > 40 else raw_data | app/model/utils.py | from sqlalchemy import Column, Integer, Float, TIMESTAMP, Boolean, create_engine, text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import pprint
Base = declarative_base()
class RawData(Base):
"""AirSim RawData"""
__tablename__ = "rawdatas"
id = Column(Integer, primary_key=True)
gear = Column(Integer, nullable=False)
handbreak = Column(Boolean, nullable=False)
maxrpm = Column(Float, nullable=False)
rpm = Column(Float, nullable=False)
speed = Column(Float, nullable=False)
timestamp = Column(TIMESTAMP(True), nullable=False,
server_default=text('NOW()'))
pos_x = Column(Float, nullable=False)
pos_y = Column(Float, nullable=False)
pos_z = Column(Float, nullable=False)
aa_x = Column(Float, nullable=False)
aa_y = Column(Float, nullable=False)
aa_z = Column(Float, nullable=False)
av_x = Column(Float, nullable=False)
av_y = Column(Float, nullable=False)
av_z = Column(Float, nullable=False)
la_x = Column(Float, nullable=False)
la_y = Column(Float, nullable=False)
la_z = Column(Float, nullable=False)
lv_x = Column(Float, nullable=False)
lv_y = Column(Float, nullable=False)
lv_z = Column(Float, nullable=False)
def __init__(self, gear, handbreak, maxrpm, rpm, speed, pos_x, pos_y, pos_z, aa_x, aa_y, aa_z, av_x,
av_y, av_z, la_x, la_y, la_z, lv_x, lv_y, lv_z):
Base.__init__(self, gear=gear, handbreak=handbreak, maxrpm=maxrpm, rpm=rpm, speed=speed, pos_x=pos_x,
pos_y=pos_y, pos_z=pos_z, aa_x=aa_x, aa_y=aa_y, aa_z=aa_z,
av_x=av_x, av_y=av_y, av_z=av_z, la_x=la_x, la_y=la_y, la_z=la_z, lv_x=lv_x, lv_y=lv_y,
lv_z=lv_z)
engine = create_engine(
'mysql+pymysql://root:<EMAIL>ying_@cdb-rokmsrpe.bj.tencentcdb.com:10033/CarSim')
DBSession = sessionmaker(bind=engine)
def getData():
session = DBSession()
raw_data = session.query(RawData).all()
return raw_data[-40:] if len(raw_data) > 40 else raw_data | 0.525369 | 0.212845 |
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.clock import Clock
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.core.window import Window
from functools import partial
from random import randint, randrange
from models import *
WIDTH = Window.size[0] / 100
HEIGHT = Window.size[1] / 100
FPS = 0.01
VELOCITY = 1000
class BackGround(Widget):
def __init__(self, **kwargs):
super(BackGround, self).__init__(**kwargs)
self.rect = Image(source='graphics/road_mountain_summer/background.png', size=self.size, pos=self.pos,
allow_stretch=True, keep_ratio=False)
self.bg_rect = Image(source='graphics/road_mountain_summer/road.zip', anim_delay=VELOCITY / 1000 - 0.95,
size=self.size, pos=self.pos, allow_stretch=True, keep_ratio=False)
self.add_widget(self.rect)
self.add_widget(self.bg_rect)
class GameScreen(Screen):
def __init__(self, to_lose, **kwargs):
super(GameScreen, self).__init__(**kwargs)
self.score = 0
self.car = Car(pos=(WIDTH * 50, HEIGHT * 5), size_hint=(0.1, 0.17))
self.main_layout = FloatLayout(size_hint=(1, 1))
self.start_size = (0.1, 0.1)
self.offset = [WIDTH * 28.76, 0, WIDTH * -28.76]
self.offset_objects = [WIDTH * 43, WIDTH * -43]
self.start_coord = [(WIDTH * 42, HEIGHT * 62.22), (WIDTH * 50, HEIGHT * 62.22),
(WIDTH * 58, HEIGHT * 62.22)] # координаты стратовой позиции препятсвий
self.start_coord_objects = [(WIDTH * 28, HEIGHT * 62.22), (WIDTH * 72, HEIGHT * 62.22)]
self.main_layout.add_widget(BackGround(pos=(0, 0), size=(WIDTH * 100, HEIGHT * 100)))
self.to_lose = to_lose
self.main_layout.add_widget(self.car)
btn_left = Button(size_hint=(0.5, 1), pos=(WIDTH * 0, HEIGHT * 0),
background_color=(0, 0, 0, 0), on_press=self.on_press_l)
btn_left.bind(state=self.move_to_left_state)
btn_right = Button(size_hint=(0.5, 1), pos=(WIDTH * 50, HEIGHT * 0),
background_color=(0, 0, 0, 0), on_press=self.on_press_r)
btn_right.bind(state=self.move_to_right_state)
self.main_layout.add_widget(btn_left)
self.main_layout.add_widget(btn_right)
self.score_label = Label(text=str(self.score), size_hint=(0.2, 0.2), pos=(WIDTH * 40, HEIGHT * 80))
self.main_layout.add_widget(self.score_label)
self.add_widget(self.main_layout)
def build(self):
global VELOCITY
self.score = 0
VELOCITY = 1000 # переменная отвечающая за скорость движения
self.obstructions = []
self.obstructions.append(Cow(pos=self.start_coord[0],
size_hint=self.start_size))
self.obstructions.append(Tractor(pos=self.start_coord[0],
size_hint=self.start_size))
self.spawner()
self.spawner_bckgd_objects()
Clock.schedule_interval(self.collision, FPS)
self.acc_fun = Clock.schedule_interval(self.acceleration, FPS)
self.score_label.text = str(self.score)
def collision(self, *args):
for obs in self.obstructions:
if self.car.collide_widget(obs):
print('game over')
for obs1 in self.obstructions:
self.main_layout.remove_widget(obs1)
for obj in self.bckgd_objects:
self.main_layout.remove_widget(obj)
for obj in self.right_bckgd_objects:
self.main_layout.remove_widget(obj)
for fun in self.moves:
fun.cancel()
self.acc_fun.cancel()
self.to_lose()
return False
pass
# Далее блок функций отвечающий за перемещения игрока
def on_press_r(self, instance):
self.func_r = Clock.schedule_interval(self.move_to_right, FPS)
self.car.rect.source = 'graphics/car0_right.png'
def on_press_l(self, instance):
self.func_l = Clock.schedule_interval(self.move_to_left, FPS)
self.car.rect.source = 'graphics/car0_left.png'
def move_to_right_state(self, instance, state):
if not state is 'down':
self.func_r.cancel()
self.car.rect.source = 'graphics/car0.png'
def move_to_left_state(self, instance, state):
if not state is 'down':
self.func_l.cancel()
self.car.rect.source = 'graphics/car0.png'
def move_to_right(self, value):
if self.car.pos[0] >= 80 * WIDTH:
return False
self.car.pos[0] += WIDTH
self.car.pos[1] += 0
self.car.size_hint[0] += 0
self.car.size_hint[1] += 0
def move_to_left(self, value):
if self.car.pos[0] <= 10 * WIDTH:
return False
self.car.pos[0] -= WIDTH
self.car.pos[1] -= 0
self.car.size_hint[0] += 0
self.car.size_hint[1] += 0
# Далее блок функций отвечающих за спавн и перемещение препятсвий
def spawner(self, *args):
"""функция управляет спавном и движением препятсвий"""
Clock.schedule_once(self.spawn)
self.moves = []
for obs in self.obstructions:
self.moves.append(Clock.schedule_interval(partial(self.move, obs), FPS))
def spawn(self, *args):
"""стартовый спавн препятсвий"""
for obs in self.obstructions:
self.main_layout.add_widget(obs)
def move(self, obstruction, *args):
"""движение препятвий"""
if obstruction.pos[1] <= - (HEIGHT * randrange(30, 100, 3)): # Костыль
obstruction.num_way = randint(0, 2)
obstruction.pos = self.start_coord[obstruction.num_way]
print(obstruction.pos[0])
obstruction.pos[0] += randrange(-4, 4, 1) * WIDTH
print(obstruction.pos[0])
obstruction.size_hint = self.start_size
obstruction.pos[1] -= HEIGHT * 70.22 / VELOCITY
obstruction.pos[0] -= self.offset[obstruction.num_way] / VELOCITY
obstruction.size_hint[0] += 0.1 / VELOCITY
obstruction.size_hint[1] += 0.1 / VELOCITY
obstruction.pos[0] -= WIDTH * 5 / VELOCITY
obstruction.pos[1] -= HEIGHT * 5 / VELOCITY
def spawner_bckgd_objects(self, *args):
self.bckgd_objects = []
self.right_bckgd_objects = []
x = 0
for _ in range(4):
self.bckgd_objects.append(BackgroundObjects(size_hint=(0.1, 0.1), pos=self.start_coord_objects[0]))
for obj in self.bckgd_objects:
Clock.schedule_once(partial(self.spawn_bckgd_objects, obj), x)
Clock.schedule_once(partial(self.move_bckgd_objects, obj), x)
x += 4
def spawn_bckgd_objects(self, obj, *args):
self.main_layout.add_widget(obj)
def move_bckgd_objects(self, obj, *args):
self.moves.append(Clock.schedule_interval(partial(self.move_objects, obj), FPS))
def move_objects(self, obstruction, *args):
if obstruction.pos[1] <= - (HEIGHT * 20):
obstruction.num_way = randint(0, 1)
obstruction.num_sprite = randint(0, 2)
if obstruction.num_sprite == 1:
obstruction.rect.source = 'graphics/road_mountain_summer/tree.png'
elif obstruction.num_sprite == 2:
obstruction.rect.source = 'graphics/road_mountain_summer/tree1.png'
else:
obstruction.rect.source = 'graphics/road_mountain_summer/bush.png'
obstruction.pos = self.start_coord_objects[obstruction.num_way]
obstruction.pos[0] += randrange(-4, 4, 1) * WIDTH
obstruction.size_hint = self.start_size
self.score += 1
self.score_label.text = str(self.score)
obstruction.pos[1] -= HEIGHT * 70.22 / VELOCITY
obstruction.pos[0] -= self.offset_objects[obstruction.num_way] / VELOCITY
obstruction.size_hint[0] += 0.1 / VELOCITY
obstruction.size_hint[1] += 0.1 / VELOCITY
obstruction.pos[0] -= WIDTH * 5 / VELOCITY
obstruction.pos[1] -= HEIGHT * 5 / VELOCITY
def acceleration(self, *args):
global VELOCITY
acc = 1.0003 # Подобрать подходящее значение
VELOCITY /= acc
class MainScreen(Screen):
def __init__(self, to_game, **kwargs):
super(MainScreen, self).__init__(**kwargs)
main_layout = FloatLayout()
main_layout.add_widget(Button(text='play', size_hint=(.2, .2), pos_hint={'x': .4, 'y': .4}, on_press=to_game))
self.add_widget(main_layout)
class LoseScreen(Screen):
def __init__(self, to_main, **kwargs):
super(LoseScreen, self).__init__(**kwargs)
main_layout = FloatLayout()
main_layout.add_widget(Button(text='ok', size_hint=(.2, .2), pos_hint={'x': .4, 'y': .4}, on_press=to_main))
self.add_widget(main_layout)
class MySM(ScreenManager):
def __init__(self, **kwargs):
super(MySM, self).__init__(**kwargs)
self.game_screen = GameScreen(name='game_screen', to_lose=self.go_to_lose)
self.add_widget(MainScreen(name='main_screen', to_game=self.go_to_game))
self.add_widget(self.game_screen)
self.add_widget(LoseScreen(name='lose_menu', to_main=self.go_to_main))
def go_to_game(self, *args):
self.current = 'game_screen'
self.game_screen.build()
def go_to_main(self, *args):
self.current = 'main_screen'
def go_to_lose(self, *args):
self.current = 'lose_menu'
class GameApp(App):
def build(self):
return MySM()
if __name__ == "__main__":
GameApp().run() | main.py | from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.clock import Clock
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.core.window import Window
from functools import partial
from random import randint, randrange
from models import *
WIDTH = Window.size[0] / 100
HEIGHT = Window.size[1] / 100
FPS = 0.01
VELOCITY = 1000
class BackGround(Widget):
def __init__(self, **kwargs):
super(BackGround, self).__init__(**kwargs)
self.rect = Image(source='graphics/road_mountain_summer/background.png', size=self.size, pos=self.pos,
allow_stretch=True, keep_ratio=False)
self.bg_rect = Image(source='graphics/road_mountain_summer/road.zip', anim_delay=VELOCITY / 1000 - 0.95,
size=self.size, pos=self.pos, allow_stretch=True, keep_ratio=False)
self.add_widget(self.rect)
self.add_widget(self.bg_rect)
class GameScreen(Screen):
def __init__(self, to_lose, **kwargs):
super(GameScreen, self).__init__(**kwargs)
self.score = 0
self.car = Car(pos=(WIDTH * 50, HEIGHT * 5), size_hint=(0.1, 0.17))
self.main_layout = FloatLayout(size_hint=(1, 1))
self.start_size = (0.1, 0.1)
self.offset = [WIDTH * 28.76, 0, WIDTH * -28.76]
self.offset_objects = [WIDTH * 43, WIDTH * -43]
self.start_coord = [(WIDTH * 42, HEIGHT * 62.22), (WIDTH * 50, HEIGHT * 62.22),
(WIDTH * 58, HEIGHT * 62.22)] # координаты стратовой позиции препятсвий
self.start_coord_objects = [(WIDTH * 28, HEIGHT * 62.22), (WIDTH * 72, HEIGHT * 62.22)]
self.main_layout.add_widget(BackGround(pos=(0, 0), size=(WIDTH * 100, HEIGHT * 100)))
self.to_lose = to_lose
self.main_layout.add_widget(self.car)
btn_left = Button(size_hint=(0.5, 1), pos=(WIDTH * 0, HEIGHT * 0),
background_color=(0, 0, 0, 0), on_press=self.on_press_l)
btn_left.bind(state=self.move_to_left_state)
btn_right = Button(size_hint=(0.5, 1), pos=(WIDTH * 50, HEIGHT * 0),
background_color=(0, 0, 0, 0), on_press=self.on_press_r)
btn_right.bind(state=self.move_to_right_state)
self.main_layout.add_widget(btn_left)
self.main_layout.add_widget(btn_right)
self.score_label = Label(text=str(self.score), size_hint=(0.2, 0.2), pos=(WIDTH * 40, HEIGHT * 80))
self.main_layout.add_widget(self.score_label)
self.add_widget(self.main_layout)
def build(self):
global VELOCITY
self.score = 0
VELOCITY = 1000 # переменная отвечающая за скорость движения
self.obstructions = []
self.obstructions.append(Cow(pos=self.start_coord[0],
size_hint=self.start_size))
self.obstructions.append(Tractor(pos=self.start_coord[0],
size_hint=self.start_size))
self.spawner()
self.spawner_bckgd_objects()
Clock.schedule_interval(self.collision, FPS)
self.acc_fun = Clock.schedule_interval(self.acceleration, FPS)
self.score_label.text = str(self.score)
def collision(self, *args):
for obs in self.obstructions:
if self.car.collide_widget(obs):
print('game over')
for obs1 in self.obstructions:
self.main_layout.remove_widget(obs1)
for obj in self.bckgd_objects:
self.main_layout.remove_widget(obj)
for obj in self.right_bckgd_objects:
self.main_layout.remove_widget(obj)
for fun in self.moves:
fun.cancel()
self.acc_fun.cancel()
self.to_lose()
return False
pass
# Далее блок функций отвечающий за перемещения игрока
def on_press_r(self, instance):
self.func_r = Clock.schedule_interval(self.move_to_right, FPS)
self.car.rect.source = 'graphics/car0_right.png'
def on_press_l(self, instance):
self.func_l = Clock.schedule_interval(self.move_to_left, FPS)
self.car.rect.source = 'graphics/car0_left.png'
def move_to_right_state(self, instance, state):
if not state is 'down':
self.func_r.cancel()
self.car.rect.source = 'graphics/car0.png'
def move_to_left_state(self, instance, state):
if not state is 'down':
self.func_l.cancel()
self.car.rect.source = 'graphics/car0.png'
def move_to_right(self, value):
if self.car.pos[0] >= 80 * WIDTH:
return False
self.car.pos[0] += WIDTH
self.car.pos[1] += 0
self.car.size_hint[0] += 0
self.car.size_hint[1] += 0
def move_to_left(self, value):
if self.car.pos[0] <= 10 * WIDTH:
return False
self.car.pos[0] -= WIDTH
self.car.pos[1] -= 0
self.car.size_hint[0] += 0
self.car.size_hint[1] += 0
# Далее блок функций отвечающих за спавн и перемещение препятсвий
def spawner(self, *args):
"""функция управляет спавном и движением препятсвий"""
Clock.schedule_once(self.spawn)
self.moves = []
for obs in self.obstructions:
self.moves.append(Clock.schedule_interval(partial(self.move, obs), FPS))
def spawn(self, *args):
"""стартовый спавн препятсвий"""
for obs in self.obstructions:
self.main_layout.add_widget(obs)
def move(self, obstruction, *args):
"""движение препятвий"""
if obstruction.pos[1] <= - (HEIGHT * randrange(30, 100, 3)): # Костыль
obstruction.num_way = randint(0, 2)
obstruction.pos = self.start_coord[obstruction.num_way]
print(obstruction.pos[0])
obstruction.pos[0] += randrange(-4, 4, 1) * WIDTH
print(obstruction.pos[0])
obstruction.size_hint = self.start_size
obstruction.pos[1] -= HEIGHT * 70.22 / VELOCITY
obstruction.pos[0] -= self.offset[obstruction.num_way] / VELOCITY
obstruction.size_hint[0] += 0.1 / VELOCITY
obstruction.size_hint[1] += 0.1 / VELOCITY
obstruction.pos[0] -= WIDTH * 5 / VELOCITY
obstruction.pos[1] -= HEIGHT * 5 / VELOCITY
def spawner_bckgd_objects(self, *args):
self.bckgd_objects = []
self.right_bckgd_objects = []
x = 0
for _ in range(4):
self.bckgd_objects.append(BackgroundObjects(size_hint=(0.1, 0.1), pos=self.start_coord_objects[0]))
for obj in self.bckgd_objects:
Clock.schedule_once(partial(self.spawn_bckgd_objects, obj), x)
Clock.schedule_once(partial(self.move_bckgd_objects, obj), x)
x += 4
def spawn_bckgd_objects(self, obj, *args):
self.main_layout.add_widget(obj)
def move_bckgd_objects(self, obj, *args):
self.moves.append(Clock.schedule_interval(partial(self.move_objects, obj), FPS))
def move_objects(self, obstruction, *args):
if obstruction.pos[1] <= - (HEIGHT * 20):
obstruction.num_way = randint(0, 1)
obstruction.num_sprite = randint(0, 2)
if obstruction.num_sprite == 1:
obstruction.rect.source = 'graphics/road_mountain_summer/tree.png'
elif obstruction.num_sprite == 2:
obstruction.rect.source = 'graphics/road_mountain_summer/tree1.png'
else:
obstruction.rect.source = 'graphics/road_mountain_summer/bush.png'
obstruction.pos = self.start_coord_objects[obstruction.num_way]
obstruction.pos[0] += randrange(-4, 4, 1) * WIDTH
obstruction.size_hint = self.start_size
self.score += 1
self.score_label.text = str(self.score)
obstruction.pos[1] -= HEIGHT * 70.22 / VELOCITY
obstruction.pos[0] -= self.offset_objects[obstruction.num_way] / VELOCITY
obstruction.size_hint[0] += 0.1 / VELOCITY
obstruction.size_hint[1] += 0.1 / VELOCITY
obstruction.pos[0] -= WIDTH * 5 / VELOCITY
obstruction.pos[1] -= HEIGHT * 5 / VELOCITY
def acceleration(self, *args):
global VELOCITY
acc = 1.0003 # Подобрать подходящее значение
VELOCITY /= acc
class MainScreen(Screen):
def __init__(self, to_game, **kwargs):
super(MainScreen, self).__init__(**kwargs)
main_layout = FloatLayout()
main_layout.add_widget(Button(text='play', size_hint=(.2, .2), pos_hint={'x': .4, 'y': .4}, on_press=to_game))
self.add_widget(main_layout)
class LoseScreen(Screen):
def __init__(self, to_main, **kwargs):
super(LoseScreen, self).__init__(**kwargs)
main_layout = FloatLayout()
main_layout.add_widget(Button(text='ok', size_hint=(.2, .2), pos_hint={'x': .4, 'y': .4}, on_press=to_main))
self.add_widget(main_layout)
class MySM(ScreenManager):
def __init__(self, **kwargs):
super(MySM, self).__init__(**kwargs)
self.game_screen = GameScreen(name='game_screen', to_lose=self.go_to_lose)
self.add_widget(MainScreen(name='main_screen', to_game=self.go_to_game))
self.add_widget(self.game_screen)
self.add_widget(LoseScreen(name='lose_menu', to_main=self.go_to_main))
def go_to_game(self, *args):
self.current = 'game_screen'
self.game_screen.build()
def go_to_main(self, *args):
self.current = 'main_screen'
def go_to_lose(self, *args):
self.current = 'lose_menu'
class GameApp(App):
def build(self):
return MySM()
if __name__ == "__main__":
GameApp().run() | 0.558447 | 0.167797 |
import mock
from nova.api.openstack.compute import hosts
from nova.policies import base as base_policy
from nova.policies import hosts as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class HostsPolicyTest(base.BasePolicyTest):
"""Test os-hosts APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(HostsPolicyTest, self).setUp()
self.controller = hosts.HostController()
self.req = fakes.HTTPRequest.blank('')
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform hosts
# Operations.
self.system_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@mock.patch('nova.compute.api.HostAPI.service_get_all')
def test_list_hosts_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'list'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@mock.patch('nova.context.set_target_cell')
@mock.patch('nova.objects.HostMapping.get_by_host')
@mock.patch('nova.objects.ComputeNode.'
'get_first_node_by_host_for_old_compat')
@mock.patch('nova.compute.api.HostAPI.instance_get_all_by_host')
def test_show_host_policy(self, mock_get, mock_node, mock_map, mock_set):
rule_name = policies.POLICY_NAME % 'show'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, 11111)
def test_update_host_policy(self):
rule_name = policies.POLICY_NAME % 'update'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.update,
self.req, 11111, body={})
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_reboot_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'reboot'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.reboot,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_shutdown_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'shutdown'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.shutdown,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_startup_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'start'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.startup,
self.req, 11111)
class HostsNoLegacyNoScopePolicyTest(HostsPolicyTest):
"""Test Hosts APIs policies with no legacy deprecated rules
and no scope checks which means new defaults only. In this case
system admin, legacy admin, and project admin will be able to perform
hosts Operations. Legacy admin will be allowed as policy is just admin
if no scope checks.
"""
without_deprecated_rules = True
class HostsScopeTypePolicyTest(HostsPolicyTest):
"""Test os-hosts APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(HostsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With scope checks enable, only system admin is able to perform
# hosts Operations.
self.system_admin_authorized_contexts = [self.system_admin_context]
class HostsScopeTypeNoLegacyPolicyTest(HostsScopeTypePolicyTest):
"""Test Hosts APIs policies with with no legacy deprecated rules
and scope checks enabled which means scope + new defaults. So
only system admin is able to perform hosts Operations.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
base_policy.ADMIN,
policies.POLICY_NAME % 'show':
base_policy.ADMIN,
policies.POLICY_NAME % 'update':
base_policy.ADMIN,
policies.POLICY_NAME % 'reboot':
base_policy.ADMIN,
policies.POLICY_NAME % 'shutdown':
base_policy.ADMIN,
policies.POLICY_NAME % 'startup':
base_policy.ADMIN} | nova/tests/unit/policies/test_hosts.py |
import mock
from nova.api.openstack.compute import hosts
from nova.policies import base as base_policy
from nova.policies import hosts as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class HostsPolicyTest(base.BasePolicyTest):
"""Test os-hosts APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(HostsPolicyTest, self).setUp()
self.controller = hosts.HostController()
self.req = fakes.HTTPRequest.blank('')
# With legacy rule and scope check disabled by default, system admin,
# legacy admin, and project admin will be able to perform hosts
# Operations.
self.system_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
@mock.patch('nova.compute.api.HostAPI.service_get_all')
def test_list_hosts_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'list'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.index,
self.req)
@mock.patch('nova.context.set_target_cell')
@mock.patch('nova.objects.HostMapping.get_by_host')
@mock.patch('nova.objects.ComputeNode.'
'get_first_node_by_host_for_old_compat')
@mock.patch('nova.compute.api.HostAPI.instance_get_all_by_host')
def test_show_host_policy(self, mock_get, mock_node, mock_map, mock_set):
rule_name = policies.POLICY_NAME % 'show'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, 11111)
def test_update_host_policy(self):
rule_name = policies.POLICY_NAME % 'update'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.update,
self.req, 11111, body={})
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_reboot_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'reboot'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.reboot,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_shutdown_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'shutdown'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.shutdown,
self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_startup_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'start'
self.common_policy_auth(self.system_admin_authorized_contexts,
rule_name, self.controller.startup,
self.req, 11111)
class HostsNoLegacyNoScopePolicyTest(HostsPolicyTest):
"""Test Hosts APIs policies with no legacy deprecated rules
and no scope checks which means new defaults only. In this case
system admin, legacy admin, and project admin will be able to perform
hosts Operations. Legacy admin will be allowed as policy is just admin
if no scope checks.
"""
without_deprecated_rules = True
class HostsScopeTypePolicyTest(HostsPolicyTest):
"""Test os-hosts APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(HostsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# With scope checks enable, only system admin is able to perform
# hosts Operations.
self.system_admin_authorized_contexts = [self.system_admin_context]
class HostsScopeTypeNoLegacyPolicyTest(HostsScopeTypePolicyTest):
"""Test Hosts APIs policies with with no legacy deprecated rules
and scope checks enabled which means scope + new defaults. So
only system admin is able to perform hosts Operations.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
base_policy.ADMIN,
policies.POLICY_NAME % 'show':
base_policy.ADMIN,
policies.POLICY_NAME % 'update':
base_policy.ADMIN,
policies.POLICY_NAME % 'reboot':
base_policy.ADMIN,
policies.POLICY_NAME % 'shutdown':
base_policy.ADMIN,
policies.POLICY_NAME % 'startup':
base_policy.ADMIN} | 0.606382 | 0.205814 |
from modules import create_connect as db
from contextlib import closing
from modules import utils
class VaccinationPlan:
def __init__(self, vaccination_plan_id, min_age, max_age, start_date, end_date):
self.__vaccination_plan_id = vaccination_plan_id
self.__min_age = min_age
self.__max_age = max_age
self.__start_date = start_date
self.__end_date = end_date
def set_vaccination_plan_id(self, vaccination_plan_id):
self.__vaccination_plan_id = vaccination_plan_id
def get_vaccination_plan_id(self):
return self.__vaccination_plan_id
def set_min_age(self, min_age):
self.__min_age = min_age
def get_min_age(self):
return self.__min_age
def set_max_age(self, max_age):
self.__max_age = max_age
def get_max_age(self):
return self.__max_age
def set_start_date(self, start_date):
self.__start_date = start_date
def get_start_date(self):
return self.__start_date
def set_end_date(self, end_date):
self.__end_date = end_date
def get_end_date(self):
return self.__end_date
class VaccinationPlanManager:
"""
Creates one instance of Vaccination Plan.
Args:
vaccination_plan_id: identification of the Vaccination Plan (int)
minimum_age: minimum age of the Vaccination Plan (int)
maximum_age: maximum age of the Vaccination Plan (int)
start_date: start date of the Vaccination Plan (int)
end_date: end date of the Vaccination Plan (int)
"""
def create_vaccination_plan(self, vaccination_plan: VaccinationPlan):
try:
with db.create_or_connect() as con:
with closing(con.cursor()) as cursor:
cursor.execute("SELECT * from VaccinationPlan WHERE (?) BETWEEN minimum_age AND maximum_age OR (?) BETWEEN minimum_age AND maximum_age",
(vaccination_plan.get_min_age(), vaccination_plan.get_max_age(),))
l = cursor.fetchall()
if len(l) != 0:
return False
cursor.execute("INSERT INTO VaccinationPlan (vaccination_plan_id , minimum_age, maximum_age, start_date, end_date) VALUES(?, ?, ?, ?, ?)", (
vaccination_plan.get_vaccination_plan_id(),
vaccination_plan.get_min_age(),
vaccination_plan.get_max_age(),
vaccination_plan.get_start_date(),
vaccination_plan.get_end_date()))
return True
except:
return False
"""
Consults the information of one instance of Vaccination Plan by vaccination_plan_id.
Args:
vaccination_plan_id: identification of the Vaccination Plan (int)
Returns:
a dictionay with the information of one instance of Vaccination Plan: the name of the field (key) with its corresponding value
"""
def consult_vaccination_plan(self, vaccination_plan_id):
try:
with db.create_or_connect() as con:
with closing(con.cursor()) as cursor:
cursor.execute("SELECT * from VaccinationPlan WHERE vaccination_plan_id = (?)", (vaccination_plan_id,))
record = cursor.fetchone()
return utils.dict_factory(cursor, record)
except:
return {} | modules/vaccination_plan.py | from modules import create_connect as db
from contextlib import closing
from modules import utils
class VaccinationPlan:
def __init__(self, vaccination_plan_id, min_age, max_age, start_date, end_date):
self.__vaccination_plan_id = vaccination_plan_id
self.__min_age = min_age
self.__max_age = max_age
self.__start_date = start_date
self.__end_date = end_date
def set_vaccination_plan_id(self, vaccination_plan_id):
self.__vaccination_plan_id = vaccination_plan_id
def get_vaccination_plan_id(self):
return self.__vaccination_plan_id
def set_min_age(self, min_age):
self.__min_age = min_age
def get_min_age(self):
return self.__min_age
def set_max_age(self, max_age):
self.__max_age = max_age
def get_max_age(self):
return self.__max_age
def set_start_date(self, start_date):
self.__start_date = start_date
def get_start_date(self):
return self.__start_date
def set_end_date(self, end_date):
self.__end_date = end_date
def get_end_date(self):
return self.__end_date
class VaccinationPlanManager:
"""
Creates one instance of Vaccination Plan.
Args:
vaccination_plan_id: identification of the Vaccination Plan (int)
minimum_age: minimum age of the Vaccination Plan (int)
maximum_age: maximum age of the Vaccination Plan (int)
start_date: start date of the Vaccination Plan (int)
end_date: end date of the Vaccination Plan (int)
"""
def create_vaccination_plan(self, vaccination_plan: VaccinationPlan):
try:
with db.create_or_connect() as con:
with closing(con.cursor()) as cursor:
cursor.execute("SELECT * from VaccinationPlan WHERE (?) BETWEEN minimum_age AND maximum_age OR (?) BETWEEN minimum_age AND maximum_age",
(vaccination_plan.get_min_age(), vaccination_plan.get_max_age(),))
l = cursor.fetchall()
if len(l) != 0:
return False
cursor.execute("INSERT INTO VaccinationPlan (vaccination_plan_id , minimum_age, maximum_age, start_date, end_date) VALUES(?, ?, ?, ?, ?)", (
vaccination_plan.get_vaccination_plan_id(),
vaccination_plan.get_min_age(),
vaccination_plan.get_max_age(),
vaccination_plan.get_start_date(),
vaccination_plan.get_end_date()))
return True
except:
return False
"""
Consults the information of one instance of Vaccination Plan by vaccination_plan_id.
Args:
vaccination_plan_id: identification of the Vaccination Plan (int)
Returns:
a dictionay with the information of one instance of Vaccination Plan: the name of the field (key) with its corresponding value
"""
def consult_vaccination_plan(self, vaccination_plan_id):
try:
with db.create_or_connect() as con:
with closing(con.cursor()) as cursor:
cursor.execute("SELECT * from VaccinationPlan WHERE vaccination_plan_id = (?)", (vaccination_plan_id,))
record = cursor.fetchone()
return utils.dict_factory(cursor, record)
except:
return {} | 0.706393 | 0.135489 |
import numpy as np
import datetime
import pandas
import os
import glob
import h5py
import yaml
from scipy import interpolate
from ttools import utils, config
from ttools.satellite import SATELLITES
OMNI_COLUMNS = (
"rotation_number", "imf_id", "sw_id", "imf_n", "plasma_n", "b_mag", "b_vector_mag", "b_vector_lat_avg",
"b_vector_lon_avg", "bx", "by_gse", "bz_gse", "by_gsm", "bz_gsm", "b_mag_std", "b_vector_mag_std", "bx_std",
"by_std", "bz_std", "proton_temp", "proton_density", "plasma_speed", "plasma_lon_angle", "plasma_lat_angle",
"na_np_ratio", "flow_pressure", "temp_std", "density_std", "speed_std", "phi_v_std", "theta_v_std",
"na_np_ratio_std", "e_field", "plasma_beta", "alfven_mach_number", "kp", "r", "dst", "ae", "proton_flux_1",
"proton_flux_2", "proton_flux_4", "proton_flux_10", "proton_flux_30", "proton_flux_60", "proton_flux_flag", "ap",
"f107", "pcn", "al", "au", "magnetosonic_mach_number"
)
def get_gm_index_kyoto(fn=None):
if fn is None:
fn = config.kp_file
with open(fn, 'r') as f:
text = f.readlines()
ut_list = []
kp_list = []
ap_list = []
for line in text[1:]:
day = datetime.datetime.strptime(line[:8], '%Y%m%d')
dt = datetime.timedelta(hours=3)
uts = np.array([(day + i * dt).timestamp() for i in range(8)], dtype=int)
kp = []
for i in range(9, 25, 2):
num = float(line[i])
sign = line[i + 1]
if sign == '+':
num += 1 / 3
elif sign == '-':
num -= 1 / 3
kp.append(num)
kp_sum = float(line[25:27])
sign = line[27]
if sign == '+':
kp_sum += 1 / 3
elif sign == '-':
kp_sum -= 1 / 3
assert abs(kp_sum - sum(kp)) < .01
kp_list.append(kp)
ap = []
for i in range(28, 52, 3):
ap.append(float(line[i:i + 3]))
ap = np.array(ap, dtype=int)
Ap = float(line[52:55])
ut_list.append(uts)
ap_list.append(ap)
ut = np.concatenate(ut_list)
ap = np.concatenate(ap_list)
kp = np.concatenate(kp_list)
return pandas.DataFrame({'kp': kp, 'ap': ap, 'ut': ut}, index=pandas.to_datetime(ut, unit='s'))
def get_kp(times, fn=None):
if fn is None:
fn = config.kp_file
data = get_gm_index_kyoto(fn)
interpolator = interpolate.interp1d(data['ut'].values, data['kp'], kind='previous')
return interpolator(times.astype('datetime64[s]').astype(float))
def get_omni_data(fn=None):
if fn is None:
fn = config.omni_file
data = np.loadtxt(fn)
year = (data[:, 0] - 1970).astype('datetime64[Y]')
doy = (data[:, 1] - 1).astype('timedelta64[D]')
hour = data[:, 2].astype('timedelta64[h]')
datetimes = (year + doy + hour).astype('datetime64[s]')
dtindex = pandas.DatetimeIndex(datetimes)
df = pandas.DataFrame(data=data[:, 3:], index=dtindex, columns=OMNI_COLUMNS)
for field in df:
bad_val = df[field].max()
bad_val_str = str(int(np.floor(bad_val)))
if bad_val_str.count('9') == len(bad_val_str):
mask = df[field] == bad_val
df[field].loc[mask] = np.nan
return df
def get_borovsky_data(fn="E:\\borovsky_2020_data.txt"):
data = np.loadtxt(fn, skiprows=1)
year = (data[:, 1] - 1970).astype('datetime64[Y]')
doy = (data[:, 2] - 1).astype('timedelta64[D]')
hour = data[:, 3].astype('timedelta64[h]')
datetimes = (year + doy + hour).astype('datetime64[s]')
return datetimes.astype(int), data[:, 4:]
def get_madrigal_data(start_date, end_date, data_dir=None):
"""Gets madrigal TEC and timestamps assuming regular sampling. Fills in missing time steps.
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
tec, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.madrigal_dir
dt = np.timedelta64(5, 'm')
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
tec = np.ones((config.madrigal_lat.shape[0], config.madrigal_lon.shape[0], ref_times_ut.shape[0])) * np.nan
file_dates = np.unique(ref_times.astype('datetime64[D]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
d = file_dates[i, 2]
try:
fn = glob.glob(os.path.join(data_dir, f"gps{y - 2000:02d}{m:02d}{d:02d}g.*.hdf5"))[-1]
except IndexError:
print(f"{y}-{m}-{d} madrigal file doesn't exist")
continue
t, ut, lat, lon = open_madrigal_file(fn)
month_time_mask = np.in1d(ref_times_ut, ut)
day_time_mask = np.in1d(ut, ref_times_ut)
if not (np.all(lat == config.madrigal_lat) and np.all(lon == config.madrigal_lon)):
print(f"THIS FILE HAS MISSING DATA!!!!!!! {fn}")
lat_ind = np.argwhere(np.in1d(config.madrigal_lat, lat))[:, 0]
lon_ind = np.argwhere(np.in1d(config.madrigal_lon, lon))[:, 0]
time_ind = np.argwhere(month_time_mask)[:, 0]
lat_grid_ind, lon_grid_ind, time_grid_ind = np.meshgrid(lat_ind, lon_ind, time_ind)
tec[lat_grid_ind.ravel(), lon_grid_ind.ravel(), time_grid_ind.ravel()] = t[:, :, day_time_mask].ravel()
else:
# assume ut is increasing and has no repeating entries, basically that it is a subset of ref_times_ut
tec[:, :, month_time_mask] = t[:, :, day_time_mask]
return np.moveaxis(tec, -1, 0), ref_times
def open_madrigal_file(fn):
"""Open a madrigal file, return its data
Parameters
----------
fn: str
madrigal file name to open
Returns
-------
tec, timestamps, latitude, longitude: numpy.ndarray[float]
(X, Y, T), (T, ), (X, ), (Y, )
"""
with h5py.File(fn, 'r') as f:
tec = f['Data']['Array Layout']['2D Parameters']['tec'][()]
dtec = f['Data']['Array Layout']['2D Parameters']['tec'][()]
timestamps = f['Data']['Array Layout']['timestamps'][()]
lat = f['Data']['Array Layout']['gdlat'][()]
lon = f['Data']['Array Layout']['glon'][()]
print(f"Opened madrigal file: {fn}, size: {tec.shape}")
return tec, timestamps, lat, lon
def get_swarm_data(start_date, end_date, data_dir=None):
"""Gets swarm and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
tec, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.swarm_dir
dt = np.timedelta64(500, 'ms')
dt_sec = dt.astype('timedelta64[ms]').astype(float)
start_date = (np.ceil(start_date.astype('datetime64[ms]').astype(float) / dt_sec) * dt_sec).astype('datetime64[ms]')
end_date = (np.ceil(end_date.astype('datetime64[ms]').astype(float) / dt_sec) * dt_sec).astype('datetime64[ms]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[ms]').astype(float)
keys = ['n', 'mlat', 'mlon', 'mlt']#, 'viy']
data = {sat: {key: [] for key in keys} for sat in SATELLITES['swarm']}
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, "{year:04d}_{month:02d}_swarm.h5".format(year=y, month=m))
d, ut = open_swarm_file(fn)
in_time_mask = np.in1d(ut, ref_times_ut)
for sat in SATELLITES['swarm']:
for key in keys:
data[sat][key].append(d[sat][key][in_time_mask])
for sat in SATELLITES['swarm']:
for key in keys:
data[sat][key] = np.concatenate(data[sat][key], axis=0)
return data, ref_times
def open_swarm_file(fn):
"""Open a monthly SWARM file, return its data
Parameters
----------
fn: str
Returns
-------
n, times, mlat, mlt, mlon: numpy.ndarray
"""
data = {}
with h5py.File(fn, 'r') as f:
ut = f['ut_ms'][()]
for sat in SATELLITES['swarm']:
data[sat] = {
'n': f[f'/swarm{sat}/n'][()],
'mlat': f[f'/swarm{sat}/apex_lat'][()],
'mlon': f[f'/swarm{sat}/apex_lon'][()],
'mlt': f[f'/swarm{sat}/mlt'][()],
# 'viy': f[f'/swarm{sat}/Viy'][()],
}
print(f"Opened SWARM file: {fn}, size: {ut.shape}")
return data, ut
def get_tec_data(start_date, end_date, dt=np.timedelta64(1, 'h'), data_dir=None):
"""Gets TEC and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
tec, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.tec_dir
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
tec = []
ssmlon = []
n_samples = []
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, "{year:04d}_{month:02d}_tec.h5".format(year=y, month=m))
t, ut, ss, n, std = open_tec_file(fn)
in_time_mask = np.in1d(ut, ref_times_ut)
tec.append(t[in_time_mask])
ssmlon.append(ss[in_time_mask])
n_samples.append(n[in_time_mask])
return np.concatenate(tec, axis=0), ref_times, np.concatenate(ssmlon), np.concatenate(n_samples)
def open_tec_file(fn):
"""Open a monthly TEC file, return its data
Parameters
----------
fn: str
Returns
-------
tec, times, ssmlon, n, std: numpy.ndarray
"""
with h5py.File(fn, 'r') as f:
tec = f['tec'][()]
n = f['n'][()]
times = f['times'][()]
std = f['std'][()]
ssmlon = f['ssmlon'][()]
print(f"Opened TEC file: {fn}, size: {tec.shape}")
return tec, times, ssmlon, n, std
def get_superdarn_data(start_date, end_date, dt=np.timedelta64(1, 'h'), data_dir=None):
"""Gets Superdarn flow data and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
fx, fy, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.superdarn_dir
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
fx = []
fy = []
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, "{year:04d}_{month:02d}_superdarn.h5".format(year=y, month=m))
x, y, ut = open_superdarn_file(fn)
in_time_mask = np.in1d(ut, ref_times_ut)
fx.append(x[in_time_mask])
fy.append(y[in_time_mask])
return np.concatenate(fx, axis=0), np.concatenate(fy, axis=0), ref_times
def open_superdarn_file(fn):
"""Open a monthly superdarn file, return its data
Parameters
----------
fn: str
Returns
-------
tec, times, ssmlon, n, std: numpy.ndarray
"""
with h5py.File(fn, 'r') as f:
fx = f['fx'][()]
fy = f['fy'][()]
times = f['time'][()]
print(f"Opened Superdarn file: {fn}, size: {fx.shape}")
return fx, fy, times
def get_arb_data(start_date, end_date, dt=np.timedelta64(1, 'h'), data_dir=None):
"""Gets auroral boundary mlat and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
arb_mlat, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.arb_dir
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
arb_mlat = []
uts = []
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, f"{y:04d}_{m:02d}_arb.h5")
mlat, ut = open_arb_file(fn)
arb_mlat.append(mlat)
uts.append(ut)
uts = np.concatenate(uts)
arb_mlat = np.concatenate(arb_mlat, axis=0)
int_arb_mlat = np.empty((ref_times.shape[0], config.mlt_vals.shape[0]))
for i in range(config.mlt_vals.shape[0]):
int_arb_mlat[:, i] = np.interp(ref_times_ut, uts, arb_mlat[:, i])
return int_arb_mlat, ref_times
def open_arb_file(fn):
"""Open a monthly auroral boundary file, return its data
Parameters
----------
fn: str
Returns
-------
arb_mlat, times: numpy.ndarray
"""
with h5py.File(fn, 'r') as f:
arb_mlat = f['mlat'][()]
times = f['times'][()]
print(f"Opened ARB file: {fn}, size: {arb_mlat.shape}")
return arb_mlat, times
def get_dmsp_data(start_date, end_date, data_dir=None):
"""Gets dmsp flow data and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
dmsp data, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.dmsp_dir
dt = np.timedelta64(1, 's')
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
sats = ['dmsp15', 'dmsp16', 'dmsp17', 'dmsp18']
keys = ['mlat', 'mlt', 'ne', 'hor_ion_v', 'vert_ion_v']
data = {sat: {key: np.ones(ref_times.shape[0]) * np.nan for key in keys} for sat in sats}
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, f"{y:04d}_{m:02d}_dmsp_flow.h5")
d, ut = open_dmsp_file(fn)
ref_mask = np.in1d(ref_times_ut, ut)
d_mask = np.in1d(ut, ref_times_ut)
for sat in sats:
for key in keys:
data[sat][key][ref_mask] = d[sat][key][d_mask]
return data, ref_times
def open_dmsp_file(fn):
"""Open a monthly dmsp ion flow file, return its data
Parameters
----------
fn: str
Returns
-------
data, times: numpy.ndarray
"""
sats = ['dmsp15', 'dmsp16', 'dmsp17', 'dmsp18']
keys = ['mlat', 'mlt', 'ne', 'hor_ion_v', 'vert_ion_v']
data = {}
with h5py.File(fn, 'r') as f:
ut = f['ut'][()]
for sat in sats:
data[sat] = {}
for key in keys:
data[sat][key] = f[f'/{sat}/{key}'][()]
print(f"Opened DMSP file: {fn}, size: {ut.shape}")
return data, ut
def write_h5(fn, **kwargs):
"""Writes an h5 file with data specified by kwargs.
Parameters
----------
fn: str
file path to write
**kwargs
"""
with h5py.File(fn, 'w') as f:
for key, value in kwargs.items():
f.create_dataset(key, data=value)
def write_yaml(fn, **kwargs):
with open(fn, 'w') as f:
yaml.safe_dump(kwargs, f) | ttools/io.py | import numpy as np
import datetime
import pandas
import os
import glob
import h5py
import yaml
from scipy import interpolate
from ttools import utils, config
from ttools.satellite import SATELLITES
OMNI_COLUMNS = (
"rotation_number", "imf_id", "sw_id", "imf_n", "plasma_n", "b_mag", "b_vector_mag", "b_vector_lat_avg",
"b_vector_lon_avg", "bx", "by_gse", "bz_gse", "by_gsm", "bz_gsm", "b_mag_std", "b_vector_mag_std", "bx_std",
"by_std", "bz_std", "proton_temp", "proton_density", "plasma_speed", "plasma_lon_angle", "plasma_lat_angle",
"na_np_ratio", "flow_pressure", "temp_std", "density_std", "speed_std", "phi_v_std", "theta_v_std",
"na_np_ratio_std", "e_field", "plasma_beta", "alfven_mach_number", "kp", "r", "dst", "ae", "proton_flux_1",
"proton_flux_2", "proton_flux_4", "proton_flux_10", "proton_flux_30", "proton_flux_60", "proton_flux_flag", "ap",
"f107", "pcn", "al", "au", "magnetosonic_mach_number"
)
def get_gm_index_kyoto(fn=None):
if fn is None:
fn = config.kp_file
with open(fn, 'r') as f:
text = f.readlines()
ut_list = []
kp_list = []
ap_list = []
for line in text[1:]:
day = datetime.datetime.strptime(line[:8], '%Y%m%d')
dt = datetime.timedelta(hours=3)
uts = np.array([(day + i * dt).timestamp() for i in range(8)], dtype=int)
kp = []
for i in range(9, 25, 2):
num = float(line[i])
sign = line[i + 1]
if sign == '+':
num += 1 / 3
elif sign == '-':
num -= 1 / 3
kp.append(num)
kp_sum = float(line[25:27])
sign = line[27]
if sign == '+':
kp_sum += 1 / 3
elif sign == '-':
kp_sum -= 1 / 3
assert abs(kp_sum - sum(kp)) < .01
kp_list.append(kp)
ap = []
for i in range(28, 52, 3):
ap.append(float(line[i:i + 3]))
ap = np.array(ap, dtype=int)
Ap = float(line[52:55])
ut_list.append(uts)
ap_list.append(ap)
ut = np.concatenate(ut_list)
ap = np.concatenate(ap_list)
kp = np.concatenate(kp_list)
return pandas.DataFrame({'kp': kp, 'ap': ap, 'ut': ut}, index=pandas.to_datetime(ut, unit='s'))
def get_kp(times, fn=None):
if fn is None:
fn = config.kp_file
data = get_gm_index_kyoto(fn)
interpolator = interpolate.interp1d(data['ut'].values, data['kp'], kind='previous')
return interpolator(times.astype('datetime64[s]').astype(float))
def get_omni_data(fn=None):
if fn is None:
fn = config.omni_file
data = np.loadtxt(fn)
year = (data[:, 0] - 1970).astype('datetime64[Y]')
doy = (data[:, 1] - 1).astype('timedelta64[D]')
hour = data[:, 2].astype('timedelta64[h]')
datetimes = (year + doy + hour).astype('datetime64[s]')
dtindex = pandas.DatetimeIndex(datetimes)
df = pandas.DataFrame(data=data[:, 3:], index=dtindex, columns=OMNI_COLUMNS)
for field in df:
bad_val = df[field].max()
bad_val_str = str(int(np.floor(bad_val)))
if bad_val_str.count('9') == len(bad_val_str):
mask = df[field] == bad_val
df[field].loc[mask] = np.nan
return df
def get_borovsky_data(fn="E:\\borovsky_2020_data.txt"):
data = np.loadtxt(fn, skiprows=1)
year = (data[:, 1] - 1970).astype('datetime64[Y]')
doy = (data[:, 2] - 1).astype('timedelta64[D]')
hour = data[:, 3].astype('timedelta64[h]')
datetimes = (year + doy + hour).astype('datetime64[s]')
return datetimes.astype(int), data[:, 4:]
def get_madrigal_data(start_date, end_date, data_dir=None):
"""Gets madrigal TEC and timestamps assuming regular sampling. Fills in missing time steps.
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
tec, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.madrigal_dir
dt = np.timedelta64(5, 'm')
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
tec = np.ones((config.madrigal_lat.shape[0], config.madrigal_lon.shape[0], ref_times_ut.shape[0])) * np.nan
file_dates = np.unique(ref_times.astype('datetime64[D]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
d = file_dates[i, 2]
try:
fn = glob.glob(os.path.join(data_dir, f"gps{y - 2000:02d}{m:02d}{d:02d}g.*.hdf5"))[-1]
except IndexError:
print(f"{y}-{m}-{d} madrigal file doesn't exist")
continue
t, ut, lat, lon = open_madrigal_file(fn)
month_time_mask = np.in1d(ref_times_ut, ut)
day_time_mask = np.in1d(ut, ref_times_ut)
if not (np.all(lat == config.madrigal_lat) and np.all(lon == config.madrigal_lon)):
print(f"THIS FILE HAS MISSING DATA!!!!!!! {fn}")
lat_ind = np.argwhere(np.in1d(config.madrigal_lat, lat))[:, 0]
lon_ind = np.argwhere(np.in1d(config.madrigal_lon, lon))[:, 0]
time_ind = np.argwhere(month_time_mask)[:, 0]
lat_grid_ind, lon_grid_ind, time_grid_ind = np.meshgrid(lat_ind, lon_ind, time_ind)
tec[lat_grid_ind.ravel(), lon_grid_ind.ravel(), time_grid_ind.ravel()] = t[:, :, day_time_mask].ravel()
else:
# assume ut is increasing and has no repeating entries, basically that it is a subset of ref_times_ut
tec[:, :, month_time_mask] = t[:, :, day_time_mask]
return np.moveaxis(tec, -1, 0), ref_times
def open_madrigal_file(fn):
"""Open a madrigal file, return its data
Parameters
----------
fn: str
madrigal file name to open
Returns
-------
tec, timestamps, latitude, longitude: numpy.ndarray[float]
(X, Y, T), (T, ), (X, ), (Y, )
"""
with h5py.File(fn, 'r') as f:
tec = f['Data']['Array Layout']['2D Parameters']['tec'][()]
dtec = f['Data']['Array Layout']['2D Parameters']['tec'][()]
timestamps = f['Data']['Array Layout']['timestamps'][()]
lat = f['Data']['Array Layout']['gdlat'][()]
lon = f['Data']['Array Layout']['glon'][()]
print(f"Opened madrigal file: {fn}, size: {tec.shape}")
return tec, timestamps, lat, lon
def get_swarm_data(start_date, end_date, data_dir=None):
"""Gets swarm and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
tec, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.swarm_dir
dt = np.timedelta64(500, 'ms')
dt_sec = dt.astype('timedelta64[ms]').astype(float)
start_date = (np.ceil(start_date.astype('datetime64[ms]').astype(float) / dt_sec) * dt_sec).astype('datetime64[ms]')
end_date = (np.ceil(end_date.astype('datetime64[ms]').astype(float) / dt_sec) * dt_sec).astype('datetime64[ms]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[ms]').astype(float)
keys = ['n', 'mlat', 'mlon', 'mlt']#, 'viy']
data = {sat: {key: [] for key in keys} for sat in SATELLITES['swarm']}
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, "{year:04d}_{month:02d}_swarm.h5".format(year=y, month=m))
d, ut = open_swarm_file(fn)
in_time_mask = np.in1d(ut, ref_times_ut)
for sat in SATELLITES['swarm']:
for key in keys:
data[sat][key].append(d[sat][key][in_time_mask])
for sat in SATELLITES['swarm']:
for key in keys:
data[sat][key] = np.concatenate(data[sat][key], axis=0)
return data, ref_times
def open_swarm_file(fn):
"""Open a monthly SWARM file, return its data
Parameters
----------
fn: str
Returns
-------
n, times, mlat, mlt, mlon: numpy.ndarray
"""
data = {}
with h5py.File(fn, 'r') as f:
ut = f['ut_ms'][()]
for sat in SATELLITES['swarm']:
data[sat] = {
'n': f[f'/swarm{sat}/n'][()],
'mlat': f[f'/swarm{sat}/apex_lat'][()],
'mlon': f[f'/swarm{sat}/apex_lon'][()],
'mlt': f[f'/swarm{sat}/mlt'][()],
# 'viy': f[f'/swarm{sat}/Viy'][()],
}
print(f"Opened SWARM file: {fn}, size: {ut.shape}")
return data, ut
def get_tec_data(start_date, end_date, dt=np.timedelta64(1, 'h'), data_dir=None):
"""Gets TEC and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
tec, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.tec_dir
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
tec = []
ssmlon = []
n_samples = []
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, "{year:04d}_{month:02d}_tec.h5".format(year=y, month=m))
t, ut, ss, n, std = open_tec_file(fn)
in_time_mask = np.in1d(ut, ref_times_ut)
tec.append(t[in_time_mask])
ssmlon.append(ss[in_time_mask])
n_samples.append(n[in_time_mask])
return np.concatenate(tec, axis=0), ref_times, np.concatenate(ssmlon), np.concatenate(n_samples)
def open_tec_file(fn):
"""Open a monthly TEC file, return its data
Parameters
----------
fn: str
Returns
-------
tec, times, ssmlon, n, std: numpy.ndarray
"""
with h5py.File(fn, 'r') as f:
tec = f['tec'][()]
n = f['n'][()]
times = f['times'][()]
std = f['std'][()]
ssmlon = f['ssmlon'][()]
print(f"Opened TEC file: {fn}, size: {tec.shape}")
return tec, times, ssmlon, n, std
def get_superdarn_data(start_date, end_date, dt=np.timedelta64(1, 'h'), data_dir=None):
"""Gets Superdarn flow data and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
fx, fy, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.superdarn_dir
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
fx = []
fy = []
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, "{year:04d}_{month:02d}_superdarn.h5".format(year=y, month=m))
x, y, ut = open_superdarn_file(fn)
in_time_mask = np.in1d(ut, ref_times_ut)
fx.append(x[in_time_mask])
fy.append(y[in_time_mask])
return np.concatenate(fx, axis=0), np.concatenate(fy, axis=0), ref_times
def open_superdarn_file(fn):
"""Open a monthly superdarn file, return its data
Parameters
----------
fn: str
Returns
-------
tec, times, ssmlon, n, std: numpy.ndarray
"""
with h5py.File(fn, 'r') as f:
fx = f['fx'][()]
fy = f['fy'][()]
times = f['time'][()]
print(f"Opened Superdarn file: {fn}, size: {fx.shape}")
return fx, fy, times
def get_arb_data(start_date, end_date, dt=np.timedelta64(1, 'h'), data_dir=None):
"""Gets auroral boundary mlat and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
arb_mlat, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.arb_dir
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
arb_mlat = []
uts = []
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, f"{y:04d}_{m:02d}_arb.h5")
mlat, ut = open_arb_file(fn)
arb_mlat.append(mlat)
uts.append(ut)
uts = np.concatenate(uts)
arb_mlat = np.concatenate(arb_mlat, axis=0)
int_arb_mlat = np.empty((ref_times.shape[0], config.mlt_vals.shape[0]))
for i in range(config.mlt_vals.shape[0]):
int_arb_mlat[:, i] = np.interp(ref_times_ut, uts, arb_mlat[:, i])
return int_arb_mlat, ref_times
def open_arb_file(fn):
"""Open a monthly auroral boundary file, return its data
Parameters
----------
fn: str
Returns
-------
arb_mlat, times: numpy.ndarray
"""
with h5py.File(fn, 'r') as f:
arb_mlat = f['mlat'][()]
times = f['times'][()]
print(f"Opened ARB file: {fn}, size: {arb_mlat.shape}")
return arb_mlat, times
def get_dmsp_data(start_date, end_date, data_dir=None):
"""Gets dmsp flow data and timestamps
Parameters
----------
start_date, end_date: np.datetime64
data_dir: str
Returns
-------
dmsp data, times: numpy.ndarray
"""
if data_dir is None:
data_dir = config.dmsp_dir
dt = np.timedelta64(1, 's')
dt_sec = dt.astype('timedelta64[s]').astype(int)
start_date = (np.ceil(start_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
end_date = (np.ceil(end_date.astype('datetime64[s]').astype(int) / dt_sec) * dt_sec).astype('datetime64[s]')
ref_times = np.arange(start_date, end_date, dt)
ref_times_ut = ref_times.astype('datetime64[s]').astype(int)
file_dates = np.unique(ref_times.astype('datetime64[M]'))
file_dates = utils.decompose_datetime64(file_dates)
sats = ['dmsp15', 'dmsp16', 'dmsp17', 'dmsp18']
keys = ['mlat', 'mlt', 'ne', 'hor_ion_v', 'vert_ion_v']
data = {sat: {key: np.ones(ref_times.shape[0]) * np.nan for key in keys} for sat in sats}
for i in range(file_dates.shape[0]):
y = file_dates[i, 0]
m = file_dates[i, 1]
fn = os.path.join(data_dir, f"{y:04d}_{m:02d}_dmsp_flow.h5")
d, ut = open_dmsp_file(fn)
ref_mask = np.in1d(ref_times_ut, ut)
d_mask = np.in1d(ut, ref_times_ut)
for sat in sats:
for key in keys:
data[sat][key][ref_mask] = d[sat][key][d_mask]
return data, ref_times
def open_dmsp_file(fn):
"""Open a monthly dmsp ion flow file, return its data
Parameters
----------
fn: str
Returns
-------
data, times: numpy.ndarray
"""
sats = ['dmsp15', 'dmsp16', 'dmsp17', 'dmsp18']
keys = ['mlat', 'mlt', 'ne', 'hor_ion_v', 'vert_ion_v']
data = {}
with h5py.File(fn, 'r') as f:
ut = f['ut'][()]
for sat in sats:
data[sat] = {}
for key in keys:
data[sat][key] = f[f'/{sat}/{key}'][()]
print(f"Opened DMSP file: {fn}, size: {ut.shape}")
return data, ut
def write_h5(fn, **kwargs):
"""Writes an h5 file with data specified by kwargs.
Parameters
----------
fn: str
file path to write
**kwargs
"""
with h5py.File(fn, 'w') as f:
for key, value in kwargs.items():
f.create_dataset(key, data=value)
def write_yaml(fn, **kwargs):
with open(fn, 'w') as f:
yaml.safe_dump(kwargs, f) | 0.455441 | 0.364156 |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class TermRespObj(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, code_id=None, code_sab=None, code=None, concept=None, tty=None, term=None, matched=None, rel_type=None, rel_sab=None): # noqa: E501
"""TermRespObj - a model defined in OpenAPI
:param code_id: The code_id of this TermRespObj. # noqa: E501
:type code_id: str
:param code_sab: The code_sab of this TermRespObj. # noqa: E501
:type code_sab: str
:param code: The code of this TermRespObj. # noqa: E501
:type code: str
:param concept: The concept of this TermRespObj. # noqa: E501
:type concept: str
:param tty: The tty of this TermRespObj. # noqa: E501
:type tty: str
:param term: The term of this TermRespObj. # noqa: E501
:type term: str
:param matched: The matched of this TermRespObj. # noqa: E501
:type matched: str
:param rel_type: The rel_type of this TermRespObj. # noqa: E501
:type rel_type: str
:param rel_sab: The rel_sab of this TermRespObj. # noqa: E501
:type rel_sab: str
"""
self.openapi_types = {
'code_id': str,
'code_sab': str,
'code': str,
'concept': str,
'tty': str,
'term': str,
'matched': str,
'rel_type': str,
'rel_sab': str
}
self.attribute_map = {
'code_id': 'code_id',
'code_sab': 'code_sab',
'code': 'code',
'concept': 'concept',
'tty': 'tty',
'term': 'term',
'matched': 'matched',
'rel_type': 'rel_type',
'rel_sab': 'rel_sab'
}
self._code_id = code_id
self._code_sab = code_sab
self._code = code
self._concept = concept
self._tty = tty
self._term = term
self._matched = matched
self._rel_type = rel_type
self._rel_sab = rel_sab
@classmethod
def from_dict(cls, dikt) -> 'TermRespObj':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The TermRespObj of this TermRespObj. # noqa: E501
:rtype: TermRespObj
"""
return util.deserialize_model(dikt, cls)
@property
def code_id(self):
"""Gets the code_id of this TermRespObj.
:return: The code_id of this TermRespObj.
:rtype: str
"""
return self._code_id
@code_id.setter
def code_id(self, code_id):
"""Sets the code_id of this TermRespObj.
:param code_id: The code_id of this TermRespObj.
:type code_id: str
"""
self._code_id = code_id
@property
def code_sab(self):
"""Gets the code_sab of this TermRespObj.
:return: The code_sab of this TermRespObj.
:rtype: str
"""
return self._code_sab
@code_sab.setter
def code_sab(self, code_sab):
"""Sets the code_sab of this TermRespObj.
:param code_sab: The code_sab of this TermRespObj.
:type code_sab: str
"""
self._code_sab = code_sab
@property
def code(self):
"""Gets the code of this TermRespObj.
:return: The code of this TermRespObj.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this TermRespObj.
:param code: The code of this TermRespObj.
:type code: str
"""
self._code = code
@property
def concept(self):
"""Gets the concept of this TermRespObj.
:return: The concept of this TermRespObj.
:rtype: str
"""
return self._concept
@concept.setter
def concept(self, concept):
"""Sets the concept of this TermRespObj.
:param concept: The concept of this TermRespObj.
:type concept: str
"""
self._concept = concept
@property
def tty(self):
"""Gets the tty of this TermRespObj.
:return: The tty of this TermRespObj.
:rtype: str
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this TermRespObj.
:param tty: The tty of this TermRespObj.
:type tty: str
"""
self._tty = tty
@property
def term(self):
"""Gets the term of this TermRespObj.
:return: The term of this TermRespObj.
:rtype: str
"""
return self._term
@term.setter
def term(self, term):
"""Sets the term of this TermRespObj.
:param term: The term of this TermRespObj.
:type term: str
"""
self._term = term
@property
def matched(self):
"""Gets the matched of this TermRespObj.
:return: The matched of this TermRespObj.
:rtype: str
"""
return self._matched
@matched.setter
def matched(self, matched):
"""Sets the matched of this TermRespObj.
:param matched: The matched of this TermRespObj.
:type matched: str
"""
self._matched = matched
@property
def rel_type(self):
"""Gets the rel_type of this TermRespObj.
:return: The rel_type of this TermRespObj.
:rtype: str
"""
return self._rel_type
@rel_type.setter
def rel_type(self, rel_type):
"""Sets the rel_type of this TermRespObj.
:param rel_type: The rel_type of this TermRespObj.
:type rel_type: str
"""
self._rel_type = rel_type
@property
def rel_sab(self):
"""Gets the rel_sab of this TermRespObj.
:return: The rel_sab of this TermRespObj.
:rtype: str
"""
return self._rel_sab
@rel_sab.setter
def rel_sab(self, rel_sab):
"""Sets the rel_sab of this TermRespObj.
:param rel_sab: The rel_sab of this TermRespObj.
:type rel_sab: str
"""
self._rel_sab = rel_sab | server/openapi_server/models/term_resp_obj.py |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class TermRespObj(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, code_id=None, code_sab=None, code=None, concept=None, tty=None, term=None, matched=None, rel_type=None, rel_sab=None): # noqa: E501
"""TermRespObj - a model defined in OpenAPI
:param code_id: The code_id of this TermRespObj. # noqa: E501
:type code_id: str
:param code_sab: The code_sab of this TermRespObj. # noqa: E501
:type code_sab: str
:param code: The code of this TermRespObj. # noqa: E501
:type code: str
:param concept: The concept of this TermRespObj. # noqa: E501
:type concept: str
:param tty: The tty of this TermRespObj. # noqa: E501
:type tty: str
:param term: The term of this TermRespObj. # noqa: E501
:type term: str
:param matched: The matched of this TermRespObj. # noqa: E501
:type matched: str
:param rel_type: The rel_type of this TermRespObj. # noqa: E501
:type rel_type: str
:param rel_sab: The rel_sab of this TermRespObj. # noqa: E501
:type rel_sab: str
"""
self.openapi_types = {
'code_id': str,
'code_sab': str,
'code': str,
'concept': str,
'tty': str,
'term': str,
'matched': str,
'rel_type': str,
'rel_sab': str
}
self.attribute_map = {
'code_id': 'code_id',
'code_sab': 'code_sab',
'code': 'code',
'concept': 'concept',
'tty': 'tty',
'term': 'term',
'matched': 'matched',
'rel_type': 'rel_type',
'rel_sab': 'rel_sab'
}
self._code_id = code_id
self._code_sab = code_sab
self._code = code
self._concept = concept
self._tty = tty
self._term = term
self._matched = matched
self._rel_type = rel_type
self._rel_sab = rel_sab
@classmethod
def from_dict(cls, dikt) -> 'TermRespObj':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The TermRespObj of this TermRespObj. # noqa: E501
:rtype: TermRespObj
"""
return util.deserialize_model(dikt, cls)
@property
def code_id(self):
"""Gets the code_id of this TermRespObj.
:return: The code_id of this TermRespObj.
:rtype: str
"""
return self._code_id
@code_id.setter
def code_id(self, code_id):
"""Sets the code_id of this TermRespObj.
:param code_id: The code_id of this TermRespObj.
:type code_id: str
"""
self._code_id = code_id
@property
def code_sab(self):
"""Gets the code_sab of this TermRespObj.
:return: The code_sab of this TermRespObj.
:rtype: str
"""
return self._code_sab
@code_sab.setter
def code_sab(self, code_sab):
"""Sets the code_sab of this TermRespObj.
:param code_sab: The code_sab of this TermRespObj.
:type code_sab: str
"""
self._code_sab = code_sab
@property
def code(self):
"""Gets the code of this TermRespObj.
:return: The code of this TermRespObj.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this TermRespObj.
:param code: The code of this TermRespObj.
:type code: str
"""
self._code = code
@property
def concept(self):
"""Gets the concept of this TermRespObj.
:return: The concept of this TermRespObj.
:rtype: str
"""
return self._concept
@concept.setter
def concept(self, concept):
"""Sets the concept of this TermRespObj.
:param concept: The concept of this TermRespObj.
:type concept: str
"""
self._concept = concept
@property
def tty(self):
"""Gets the tty of this TermRespObj.
:return: The tty of this TermRespObj.
:rtype: str
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this TermRespObj.
:param tty: The tty of this TermRespObj.
:type tty: str
"""
self._tty = tty
@property
def term(self):
"""Gets the term of this TermRespObj.
:return: The term of this TermRespObj.
:rtype: str
"""
return self._term
@term.setter
def term(self, term):
"""Sets the term of this TermRespObj.
:param term: The term of this TermRespObj.
:type term: str
"""
self._term = term
@property
def matched(self):
"""Gets the matched of this TermRespObj.
:return: The matched of this TermRespObj.
:rtype: str
"""
return self._matched
@matched.setter
def matched(self, matched):
"""Sets the matched of this TermRespObj.
:param matched: The matched of this TermRespObj.
:type matched: str
"""
self._matched = matched
@property
def rel_type(self):
"""Gets the rel_type of this TermRespObj.
:return: The rel_type of this TermRespObj.
:rtype: str
"""
return self._rel_type
@rel_type.setter
def rel_type(self, rel_type):
"""Sets the rel_type of this TermRespObj.
:param rel_type: The rel_type of this TermRespObj.
:type rel_type: str
"""
self._rel_type = rel_type
@property
def rel_sab(self):
"""Gets the rel_sab of this TermRespObj.
:return: The rel_sab of this TermRespObj.
:rtype: str
"""
return self._rel_sab
@rel_sab.setter
def rel_sab(self, rel_sab):
"""Sets the rel_sab of this TermRespObj.
:param rel_sab: The rel_sab of this TermRespObj.
:type rel_sab: str
"""
self._rel_sab = rel_sab | 0.7641 | 0.079353 |
from datetime import datetime, timedelta
import gzip
import json
import logging
from queue import Queue, Empty
import random
import re
import struct
import time
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.client.coap import CoAP
from coapthon.messages.request import Request
from coapthon.utils import generate_random_token
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_NAME, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_point_in_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = []
REQUIREMENTS = ['CoAPy==4.1.5']
CONF_UPDATE_TIME = 'update_time'
CONF_DISCOVER_TIME = 'discover_time'
CONF_DEVICE_CLEANUP_TIME = 'device_cleanup_time'
CONF_BATCH_SIZE = 'batch_size'
CONF_MAX_DATA_TRANSFERRED = 'max_data_transferred'
CONF_MONITORS = 'monitors'
SECONDS_IN_A_YEAR = 31536000
RUNNING = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_UPDATE_TIME,
default=timedelta(seconds=60)): cv.time_period,
vol.Optional(CONF_DISCOVER_TIME,
default=timedelta(minutes=5)): cv.time_period,
vol.Optional(CONF_DEVICE_CLEANUP_TIME,
default=timedelta(days=1)): cv.time_period,
vol.Optional(CONF_BATCH_SIZE, default=1): cv.positive_int,
vol.Optional(CONF_MAX_DATA_TRANSFERRED, default=120): cv.positive_int,
vol.Optional(CONF_MONITORS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
def next_data_time():
return dt_util.now() + config[CONF_UPDATE_TIME]
def next_discover_time():
return dt_util.now() + config[CONF_DISCOVER_TIME]
# Device name => device
devices = {}
device_cleanup_time = config[CONF_DEVICE_CLEANUP_TIME]
def data_action(now):
now = dt_util.now()
def active(device):
if now - device.last_discovered > device_cleanup_time:
_LOGGER.warning("Skipping device %s - %s: Hasn't been "
"seen since %s", device.name, device.address,
device.last_discovered)
return False
else:
return True
device_list = list(devices.values())
device_list = [device for device in device_list if active(device)]
for device in device_list:
try:
get_data(device,
config[CONF_BATCH_SIZE],
config[CONF_MAX_DATA_TRANSFERRED])
except Exception as exp:
_LOGGER.exception(
"Error occurred while getting data from %s: %s",
device,
exp)
# Schedule again
next = next_data_time()
_LOGGER.debug("Scheduling to get data at %s", next)
track_point_in_time(hass, data_action, next)
# Schedule a time to update
next = next_data_time()
_LOGGER.debug("Scheduling to get data at %s", next)
track_point_in_time(hass, data_action, next)
# Connect to multicast address
discover_client = Client(server=('172.16.58.3', 5683))
provided_devices = config[CONF_MONITORS]
def discover_action(now):
try:
discover(discover_client,
devices,
provided_devices,
add_devices,
hass)
except Exception as exp:
_LOGGER.exception(
"Error occurred while discovering devices: %s",
exp)
# Schedule again
next = next_discover_time()
_LOGGER.debug("Scheduling to discover at %s", next)
track_point_in_time(hass, discover_action, next)
# Start discovery
next = dt_util.now() + timedelta(seconds=5)
_LOGGER.debug("Scheduling to discover at %s", next)
track_point_in_time(hass, discover_action, next)
def stop(event):
global RUNNING
RUNNING = False
_LOGGER.info("Shutting down Air Quality component")
if discover_client is not None:
discover_client.stop()
device_list = list(devices.values())
for device in device_list:
device.client.stop()
_LOGGER.debug("Done shutting down Air Quality component")
# Register to know when home assistant is stopping
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
def discover(discover_client, devices, provided_devices, add_devices, hass):
_LOGGER.info("Looking for new Air Quality devices")
# Send a message to discover new devices
responses = discover_client.multicast_discover()
_LOGGER.debug("Finished multicast discovering devices (%s)", len(responses))
# Discover clients that have been provided
for device in provided_devices:
_LOGGER.debug("Discovering %s", device)
client = Client(server=(device, 5683))
response = client.discover()
if response is not None:
responses.append(response)
client.stop()
if not RUNNING:
_LOGGER.debug("Stop discovering new devices")
return
now = dt_util.now()
_LOGGER.info("Processing discovered sensors (%s):", len(responses))
for response in responses:
if response is None:
# This means that we are trying to exit in the middle of discovery
break
# Get the hostname
m = re.search("</name=(.*?)>", response.payload.decode('utf8'))
if m is None:
_LOGGER.warning("Couldn't find hostname in response: %s", response)
continue
name = m.group(1)
address = response.source[0]
_LOGGER.info("\tFound device: %s - %s", name, address)
if name in devices:
_LOGGER.info("\tDevice has already been discovered")
devices[name].last_discovered = now
if devices[name].address != address:
_LOGGER.warning("\tAddress of device has changed!")
devices[name].address = address
continue
# Add the new device to home assistant
sensors = []
callbacks = []
# Create a special sensor that keeps track of how many
# packets are received from a sensor
data_points_sensor = AirQualitySensor(name, 'data_points_received', hass)
add_devices([data_points_sensor])
if RUNNING:
devices[name] = PrismsDevice(address,
name,
add_devices,
data_points_sensor.update,
hass)
def get_data(device, batch_size, max_data_transferred):
_LOGGER.info("Getting new data from %s (%s) at %s",
device.name,
device.address,
dt_util.now())
try:
total_packets = 0
while True:
data = None
_LOGGER.info("ACKing %s and requesting %s (%s - %s)",
device.ack,
batch_size,
device.name,
device.address)
payload = struct.pack('!HH', device.ack, batch_size)
response = device.client.get('data', payload=payload)
if response is None:
device.ack = 0
_LOGGER.info(
"Did not receive a response from sensor %s - %s",
device.name, device.address)
break
if len(response.payload) == 0:
device.ack = 0
_LOGGER.info(
"Received an empty payload from %s - %s",
device.name, device.address)
break
data = json.loads(gzip.decompress(response.payload).decode())
_LOGGER.info("Received data from %s: %s samples", device.name, len(data))
_LOGGER.debug("Data (%s): %s (%s - %s - %s)",
len(data),
data,
device.name,
device.address,
response.mid)
device.ack = len(data)
total_packets += device.ack
now = time.time()
device.packet_received_cb({'data_points_received': (len(data), 'num'),
'sequence': (0, 'sequence'),
'sampletime': (now, 's')})
# For each new piece of data, notify everyone that has
# registered a callback
for d in data:
# Make sure the timestamp makes sense
if abs(now - d['sampletime'][0]) >= SECONDS_IN_A_YEAR:
_LOGGER.warning(
"Sample time is too far off: %s. Data: %s",
d['sampletime'][0],
d)
_LOGGER.debug("Updating data for %s - %s", device.name, device.address)
device.update_data(d)
# If we get all of the data we ask for, then let's request more
# right away
if device.ack != batch_size:
_LOGGER.info(
"%s - %s: Stopping because acks (%s) != size (%s)",
device.name, device.address, device.ack, batch_size)
time.sleep(1)
break
# Let's give the system some time to catch up
# We will try again after CONF_UPDATE_TIME amount of time
_LOGGER.info("%s (total_packets) >= %s (max_data_transferred)",
total_packets,
max_data_transferred)
if total_packets >= max_data_transferred:
_LOGGER.info(
"%s - %s: Stopping because total_packets (%s) > %s",
device.name, device.address, total_packets, max_data_transferred)
time.sleep(1)
break
except Exception:
device.ack = 0
_LOGGER.exception(
"Unable to receive data or unpack data: %s (%s - %s)",
data, device.name, device.address)
time.sleep(1)
class PrismsDevice(object):
def __init__(self, address, name, add_devices_cb, packet_received_cb, hass):
self._address = address
self.name = name
self.add_devices_cb = add_devices_cb
self.packet_received_cb = packet_received_cb
self.hass = hass
self.ignore_sensors = ['ip_address', 'name']
self.sensors = {}
self.ack = 0
self.last_discovered = dt_util.now()
self.client = Client(server=(address, 5683))
@property
def address(self):
return self._address
@address.setter
def address(self, new_address):
_LOGGER.debug("Updating address from {} to {}",
self._address,
new_address)
self._address = new_address
_LOGGER.debug("Stopping client of old address")
self.client.stop()
_LOGGER.debug("Creating a new client with new address")
self.client = Client(server=(new_address, 5683))
def update_data(self, data):
for key, value in data.items():
if key in self.ignore_sensors:
# Some data we don't care about
continue
if key not in self.sensors:
sensor = AirQualitySensor(self.name, key, self.hass)
self.add_devices_cb([sensor])
self.sensors[key] = sensor
_LOGGER.debug("Calling update on %s (%s - %s)", key, self.name, self.address)
self.sensors[key].update(data)
time.sleep(0.05)
class AirQualitySensor(Entity):
def __init__(self, monitor_name, sensor_name, hass):
self._monitor_name = monitor_name
self._name = sensor_name
self.hass = hass
self._data = None
def update(self, data):
self._data = data
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._monitor_name, self._name)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if self._data is None:
return None
return self._data[self._name][1]
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._data is None:
return None
return {'sequence': self._data['sequence'][0],
'sample_time': dt_util.utc_from_timestamp(self._data['sampletime'][0])}
@property
def state(self):
"""Return the state of the entity."""
if self._data is None:
return None
return self._data[self._name][0]
@property
def force_update(self):
return True
class Client(object):
def __init__(self, server):
self.server = server
self.protocol = CoAP(self.server,
random.randint(1, 65535),
self._wait_response,
self._timeout)
self.queue = Queue()
self.running = True
def _wait_response(self, message):
if message.code != defines.Codes.CONTINUE.number:
self.queue.put(message)
def _timeout(self, message):
_LOGGER.warning("Timed out trying to send message: %s", message)
self.queue.put(None)
def stop(self):
self.running = False
self.protocol.stop()
self.queue.put(None)
def get(self, path, payload=None): # pragma: no cover
request = Request()
request.destination = self.server
request.code = defines.Codes.GET.number
request.uri_path = path
request.payload = payload
# Clear out queue before sending a request. It is possible that an old
# response was received between requests. We don't want the requests
# and responses to be mismatched. I expect the protocol to take care of
# that, but I don't have confidence in the CoAP library.
try:
while True:
self.queue.get_nowait()
except Empty:
pass
self.protocol.send_message(request)
response = self.queue.get(block=True)
_LOGGER.debug("%s: Got response to GET request with MID: %s", self.server[0], request.mid)
return response
def discover(self):
request = Request()
request.destination = self.server
request.code = defines.Codes.GET.number
request.uri_path = defines.DISCOVERY_URL
try:
while True:
self.queue.get_nowait()
except Empty:
pass
self.protocol.send_message(request)
response = self.queue.get(block=True)
return response
def multicast_discover(self): # pragma: no cover
request = Request()
request.destination = self.server
request.code = defines.Codes.GET.number
request.uri_path = defines.DISCOVERY_URL
self.protocol.send_message(request)
first_response = self.queue.get(block=True)
if first_response is None:
# The message timed out
return []
responses = [first_response]
try:
# Keep trying to get more responses if they come in
while self.running:
responses.append(self.queue.get(block=True, timeout=10))
except Empty:
pass
return responses | sensor/prisms_wifi_sensor.py | from datetime import datetime, timedelta
import gzip
import json
import logging
from queue import Queue, Empty
import random
import re
import struct
import time
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.client.coap import CoAP
from coapthon.messages.request import Request
from coapthon.utils import generate_random_token
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_NAME, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_point_in_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = []
REQUIREMENTS = ['CoAPy==4.1.5']
CONF_UPDATE_TIME = 'update_time'
CONF_DISCOVER_TIME = 'discover_time'
CONF_DEVICE_CLEANUP_TIME = 'device_cleanup_time'
CONF_BATCH_SIZE = 'batch_size'
CONF_MAX_DATA_TRANSFERRED = 'max_data_transferred'
CONF_MONITORS = 'monitors'
SECONDS_IN_A_YEAR = 31536000
RUNNING = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_UPDATE_TIME,
default=timedelta(seconds=60)): cv.time_period,
vol.Optional(CONF_DISCOVER_TIME,
default=timedelta(minutes=5)): cv.time_period,
vol.Optional(CONF_DEVICE_CLEANUP_TIME,
default=timedelta(days=1)): cv.time_period,
vol.Optional(CONF_BATCH_SIZE, default=1): cv.positive_int,
vol.Optional(CONF_MAX_DATA_TRANSFERRED, default=120): cv.positive_int,
vol.Optional(CONF_MONITORS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
def next_data_time():
return dt_util.now() + config[CONF_UPDATE_TIME]
def next_discover_time():
return dt_util.now() + config[CONF_DISCOVER_TIME]
# Device name => device
devices = {}
device_cleanup_time = config[CONF_DEVICE_CLEANUP_TIME]
def data_action(now):
now = dt_util.now()
def active(device):
if now - device.last_discovered > device_cleanup_time:
_LOGGER.warning("Skipping device %s - %s: Hasn't been "
"seen since %s", device.name, device.address,
device.last_discovered)
return False
else:
return True
device_list = list(devices.values())
device_list = [device for device in device_list if active(device)]
for device in device_list:
try:
get_data(device,
config[CONF_BATCH_SIZE],
config[CONF_MAX_DATA_TRANSFERRED])
except Exception as exp:
_LOGGER.exception(
"Error occurred while getting data from %s: %s",
device,
exp)
# Schedule again
next = next_data_time()
_LOGGER.debug("Scheduling to get data at %s", next)
track_point_in_time(hass, data_action, next)
# Schedule a time to update
next = next_data_time()
_LOGGER.debug("Scheduling to get data at %s", next)
track_point_in_time(hass, data_action, next)
# Connect to multicast address
discover_client = Client(server=('172.16.58.3', 5683))
provided_devices = config[CONF_MONITORS]
def discover_action(now):
try:
discover(discover_client,
devices,
provided_devices,
add_devices,
hass)
except Exception as exp:
_LOGGER.exception(
"Error occurred while discovering devices: %s",
exp)
# Schedule again
next = next_discover_time()
_LOGGER.debug("Scheduling to discover at %s", next)
track_point_in_time(hass, discover_action, next)
# Start discovery
next = dt_util.now() + timedelta(seconds=5)
_LOGGER.debug("Scheduling to discover at %s", next)
track_point_in_time(hass, discover_action, next)
def stop(event):
global RUNNING
RUNNING = False
_LOGGER.info("Shutting down Air Quality component")
if discover_client is not None:
discover_client.stop()
device_list = list(devices.values())
for device in device_list:
device.client.stop()
_LOGGER.debug("Done shutting down Air Quality component")
# Register to know when home assistant is stopping
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
def discover(discover_client, devices, provided_devices, add_devices, hass):
_LOGGER.info("Looking for new Air Quality devices")
# Send a message to discover new devices
responses = discover_client.multicast_discover()
_LOGGER.debug("Finished multicast discovering devices (%s)", len(responses))
# Discover clients that have been provided
for device in provided_devices:
_LOGGER.debug("Discovering %s", device)
client = Client(server=(device, 5683))
response = client.discover()
if response is not None:
responses.append(response)
client.stop()
if not RUNNING:
_LOGGER.debug("Stop discovering new devices")
return
now = dt_util.now()
_LOGGER.info("Processing discovered sensors (%s):", len(responses))
for response in responses:
if response is None:
# This means that we are trying to exit in the middle of discovery
break
# Get the hostname
m = re.search("</name=(.*?)>", response.payload.decode('utf8'))
if m is None:
_LOGGER.warning("Couldn't find hostname in response: %s", response)
continue
name = m.group(1)
address = response.source[0]
_LOGGER.info("\tFound device: %s - %s", name, address)
if name in devices:
_LOGGER.info("\tDevice has already been discovered")
devices[name].last_discovered = now
if devices[name].address != address:
_LOGGER.warning("\tAddress of device has changed!")
devices[name].address = address
continue
# Add the new device to home assistant
sensors = []
callbacks = []
# Create a special sensor that keeps track of how many
# packets are received from a sensor
data_points_sensor = AirQualitySensor(name, 'data_points_received', hass)
add_devices([data_points_sensor])
if RUNNING:
devices[name] = PrismsDevice(address,
name,
add_devices,
data_points_sensor.update,
hass)
def get_data(device, batch_size, max_data_transferred):
_LOGGER.info("Getting new data from %s (%s) at %s",
device.name,
device.address,
dt_util.now())
try:
total_packets = 0
while True:
data = None
_LOGGER.info("ACKing %s and requesting %s (%s - %s)",
device.ack,
batch_size,
device.name,
device.address)
payload = struct.pack('!HH', device.ack, batch_size)
response = device.client.get('data', payload=payload)
if response is None:
device.ack = 0
_LOGGER.info(
"Did not receive a response from sensor %s - %s",
device.name, device.address)
break
if len(response.payload) == 0:
device.ack = 0
_LOGGER.info(
"Received an empty payload from %s - %s",
device.name, device.address)
break
data = json.loads(gzip.decompress(response.payload).decode())
_LOGGER.info("Received data from %s: %s samples", device.name, len(data))
_LOGGER.debug("Data (%s): %s (%s - %s - %s)",
len(data),
data,
device.name,
device.address,
response.mid)
device.ack = len(data)
total_packets += device.ack
now = time.time()
device.packet_received_cb({'data_points_received': (len(data), 'num'),
'sequence': (0, 'sequence'),
'sampletime': (now, 's')})
# For each new piece of data, notify everyone that has
# registered a callback
for d in data:
# Make sure the timestamp makes sense
if abs(now - d['sampletime'][0]) >= SECONDS_IN_A_YEAR:
_LOGGER.warning(
"Sample time is too far off: %s. Data: %s",
d['sampletime'][0],
d)
_LOGGER.debug("Updating data for %s - %s", device.name, device.address)
device.update_data(d)
# If we get all of the data we ask for, then let's request more
# right away
if device.ack != batch_size:
_LOGGER.info(
"%s - %s: Stopping because acks (%s) != size (%s)",
device.name, device.address, device.ack, batch_size)
time.sleep(1)
break
# Let's give the system some time to catch up
# We will try again after CONF_UPDATE_TIME amount of time
_LOGGER.info("%s (total_packets) >= %s (max_data_transferred)",
total_packets,
max_data_transferred)
if total_packets >= max_data_transferred:
_LOGGER.info(
"%s - %s: Stopping because total_packets (%s) > %s",
device.name, device.address, total_packets, max_data_transferred)
time.sleep(1)
break
except Exception:
device.ack = 0
_LOGGER.exception(
"Unable to receive data or unpack data: %s (%s - %s)",
data, device.name, device.address)
time.sleep(1)
class PrismsDevice(object):
def __init__(self, address, name, add_devices_cb, packet_received_cb, hass):
self._address = address
self.name = name
self.add_devices_cb = add_devices_cb
self.packet_received_cb = packet_received_cb
self.hass = hass
self.ignore_sensors = ['ip_address', 'name']
self.sensors = {}
self.ack = 0
self.last_discovered = dt_util.now()
self.client = Client(server=(address, 5683))
@property
def address(self):
return self._address
@address.setter
def address(self, new_address):
_LOGGER.debug("Updating address from {} to {}",
self._address,
new_address)
self._address = new_address
_LOGGER.debug("Stopping client of old address")
self.client.stop()
_LOGGER.debug("Creating a new client with new address")
self.client = Client(server=(new_address, 5683))
def update_data(self, data):
for key, value in data.items():
if key in self.ignore_sensors:
# Some data we don't care about
continue
if key not in self.sensors:
sensor = AirQualitySensor(self.name, key, self.hass)
self.add_devices_cb([sensor])
self.sensors[key] = sensor
_LOGGER.debug("Calling update on %s (%s - %s)", key, self.name, self.address)
self.sensors[key].update(data)
time.sleep(0.05)
class AirQualitySensor(Entity):
def __init__(self, monitor_name, sensor_name, hass):
self._monitor_name = monitor_name
self._name = sensor_name
self.hass = hass
self._data = None
def update(self, data):
self._data = data
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._monitor_name, self._name)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if self._data is None:
return None
return self._data[self._name][1]
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._data is None:
return None
return {'sequence': self._data['sequence'][0],
'sample_time': dt_util.utc_from_timestamp(self._data['sampletime'][0])}
@property
def state(self):
"""Return the state of the entity."""
if self._data is None:
return None
return self._data[self._name][0]
@property
def force_update(self):
return True
class Client(object):
def __init__(self, server):
self.server = server
self.protocol = CoAP(self.server,
random.randint(1, 65535),
self._wait_response,
self._timeout)
self.queue = Queue()
self.running = True
def _wait_response(self, message):
if message.code != defines.Codes.CONTINUE.number:
self.queue.put(message)
def _timeout(self, message):
_LOGGER.warning("Timed out trying to send message: %s", message)
self.queue.put(None)
def stop(self):
self.running = False
self.protocol.stop()
self.queue.put(None)
def get(self, path, payload=None): # pragma: no cover
request = Request()
request.destination = self.server
request.code = defines.Codes.GET.number
request.uri_path = path
request.payload = payload
# Clear out queue before sending a request. It is possible that an old
# response was received between requests. We don't want the requests
# and responses to be mismatched. I expect the protocol to take care of
# that, but I don't have confidence in the CoAP library.
try:
while True:
self.queue.get_nowait()
except Empty:
pass
self.protocol.send_message(request)
response = self.queue.get(block=True)
_LOGGER.debug("%s: Got response to GET request with MID: %s", self.server[0], request.mid)
return response
def discover(self):
request = Request()
request.destination = self.server
request.code = defines.Codes.GET.number
request.uri_path = defines.DISCOVERY_URL
try:
while True:
self.queue.get_nowait()
except Empty:
pass
self.protocol.send_message(request)
response = self.queue.get(block=True)
return response
def multicast_discover(self): # pragma: no cover
request = Request()
request.destination = self.server
request.code = defines.Codes.GET.number
request.uri_path = defines.DISCOVERY_URL
self.protocol.send_message(request)
first_response = self.queue.get(block=True)
if first_response is None:
# The message timed out
return []
responses = [first_response]
try:
# Keep trying to get more responses if they come in
while self.running:
responses.append(self.queue.get(block=True, timeout=10))
except Empty:
pass
return responses | 0.418222 | 0.10316 |
from __future__ import division
from __future__ import absolute_import
import numpy as np
from .constants import k_erg, ls_km
def effective_area(N, d_dish=7., band=6):
"""
The effective area.
Parameters
----------
N : int
The number of antennas.
d_dish : float
Diameter of the dish, units: m.
band : int
The number of the band.
Returns
-------
aeff : float
The effective area of the telescope, units: m.
"""
itaDict_12 = {
3: 0.71,
4: 0.70,
5: 0.69,
6: 0.68,
7: 0.63,
8: 0.60,
9: 0.43,
10: 0.31
}
itaDict_7 = {
3: 0.71,
4: 0.71,
5: 0.70,
6: 0.69,
7: 0.66,
8: 0.64,
9: 0.52,
10: 0.42
}
if d_dish == 7.:
itaDict = itaDict_7
elif d_dish == 12.:
itaDict = itaDict_12
else:
raise ValueError("The dish diameter ({0}) is not correct!".format(d_dish))
if not band in itaDict:
raise ValueError("The band ({0}) is not recognized!".format(band))
aeff = np.pi * (d_dish / 2.)**2. * itaDict[band]
return aeff
def omega(theta):
"""
Calculate the beam solid angle.
Parameters
----------
theta : float
The spatial resolution, units: arcsec.
Returns
-------
omg : float
The beam solid angle, units: steradian.
"""
theta *= np.pi / 180 / 3600 # Convert to radian
omg = np.pi * theta**2 / (4 * np.log(2))
return omg
def sigma_S(Tsys, wr=1.1, Aeff=1., fs=0, itaq=0.96, itac=0.88, N=10,
npol=2, delta_nu=1., tint=1):
"""
The fomular to calculate the point source sensitivity.
Please refer to ALMA Technical Handbook.
Parameters
----------
Tsys : float
The system temperature, unit: K.
wr : float
Robust weighting factor, default: 1.1.
Aeff : float
Effective area, units: m^2.
fs : float
Shadowing fraction.
itaq : float
Quantization efficiency, default: 0.96.
itac : float
Correlator efficiency, default: 0.88.
N : int
The number of antennas.
npol : int
The number of polarization, 1 or 2, default: 2.
delta_nu : float
Resolution element width, units: GHz.
tint : float
Integration time, units: minutes.
Return
------
sigma : float
The sensitivity, units: Jy.
"""
Aeff *= 1e4 # Convert to cm^2
delta_nu *= 1e9 # Convert to Hz
tint *= 60 # Convert to second
sigma = (wr * 2 * k_erg * Tsys) / (itaq * itac * Aeff * (1 - fs) * \
(N * (N - 1) * npol * delta_nu * tint)**0.5)
sigma *= 1e23 # Convert to Jy
return sigma
def Sensitivity_ALMA(band, N, Tsys, tint, delta_nu, array="ACA", **kws_sig):
"""
Calculate the sensitivity of ALMA.
Parameters
----------
band : int
The number of the band.
N : int
The number of antennas.
Tsys : float
The system temperature, unit: K.
tint : float
Integration time, units: minutes.
delta_nu : float
Resolution element width, units: GHz.
array : string
The type of the array, "ACA" or "12m", default: "ACA".
**kws_sig : other parameters of sigma_S.
Returns
-------
sigma : float
The sensitivity, unit: Jy.
"""
if array == "ACA":
d_dish = 7
if N > 20:
raise ValueError("The number of antennas ({0}) is likely wrong!!".format(N))
elif array == "12m":
d_dish = 12
else:
raise ValueError("The array type ({0}) is not recognized!".format(array))
Aeff = effective_area(N, d_dish, band)
kws_sig["N"] = N
kws_sig["tint"] = tint
kws_sig["delta_nu"] = delta_nu
if not "Aeff" in kws_sig:
kws_sig["Aeff"] = Aeff
sigma = sigma_S(Tsys, **kws_sig)
return sigma | sgAstro/AlmaSensitivityCalculator.py | from __future__ import division
from __future__ import absolute_import
import numpy as np
from .constants import k_erg, ls_km
def effective_area(N, d_dish=7., band=6):
"""
The effective area.
Parameters
----------
N : int
The number of antennas.
d_dish : float
Diameter of the dish, units: m.
band : int
The number of the band.
Returns
-------
aeff : float
The effective area of the telescope, units: m.
"""
itaDict_12 = {
3: 0.71,
4: 0.70,
5: 0.69,
6: 0.68,
7: 0.63,
8: 0.60,
9: 0.43,
10: 0.31
}
itaDict_7 = {
3: 0.71,
4: 0.71,
5: 0.70,
6: 0.69,
7: 0.66,
8: 0.64,
9: 0.52,
10: 0.42
}
if d_dish == 7.:
itaDict = itaDict_7
elif d_dish == 12.:
itaDict = itaDict_12
else:
raise ValueError("The dish diameter ({0}) is not correct!".format(d_dish))
if not band in itaDict:
raise ValueError("The band ({0}) is not recognized!".format(band))
aeff = np.pi * (d_dish / 2.)**2. * itaDict[band]
return aeff
def omega(theta):
"""
Calculate the beam solid angle.
Parameters
----------
theta : float
The spatial resolution, units: arcsec.
Returns
-------
omg : float
The beam solid angle, units: steradian.
"""
theta *= np.pi / 180 / 3600 # Convert to radian
omg = np.pi * theta**2 / (4 * np.log(2))
return omg
def sigma_S(Tsys, wr=1.1, Aeff=1., fs=0, itaq=0.96, itac=0.88, N=10,
npol=2, delta_nu=1., tint=1):
"""
The fomular to calculate the point source sensitivity.
Please refer to ALMA Technical Handbook.
Parameters
----------
Tsys : float
The system temperature, unit: K.
wr : float
Robust weighting factor, default: 1.1.
Aeff : float
Effective area, units: m^2.
fs : float
Shadowing fraction.
itaq : float
Quantization efficiency, default: 0.96.
itac : float
Correlator efficiency, default: 0.88.
N : int
The number of antennas.
npol : int
The number of polarization, 1 or 2, default: 2.
delta_nu : float
Resolution element width, units: GHz.
tint : float
Integration time, units: minutes.
Return
------
sigma : float
The sensitivity, units: Jy.
"""
Aeff *= 1e4 # Convert to cm^2
delta_nu *= 1e9 # Convert to Hz
tint *= 60 # Convert to second
sigma = (wr * 2 * k_erg * Tsys) / (itaq * itac * Aeff * (1 - fs) * \
(N * (N - 1) * npol * delta_nu * tint)**0.5)
sigma *= 1e23 # Convert to Jy
return sigma
def Sensitivity_ALMA(band, N, Tsys, tint, delta_nu, array="ACA", **kws_sig):
"""
Calculate the sensitivity of ALMA.
Parameters
----------
band : int
The number of the band.
N : int
The number of antennas.
Tsys : float
The system temperature, unit: K.
tint : float
Integration time, units: minutes.
delta_nu : float
Resolution element width, units: GHz.
array : string
The type of the array, "ACA" or "12m", default: "ACA".
**kws_sig : other parameters of sigma_S.
Returns
-------
sigma : float
The sensitivity, unit: Jy.
"""
if array == "ACA":
d_dish = 7
if N > 20:
raise ValueError("The number of antennas ({0}) is likely wrong!!".format(N))
elif array == "12m":
d_dish = 12
else:
raise ValueError("The array type ({0}) is not recognized!".format(array))
Aeff = effective_area(N, d_dish, band)
kws_sig["N"] = N
kws_sig["tint"] = tint
kws_sig["delta_nu"] = delta_nu
if not "Aeff" in kws_sig:
kws_sig["Aeff"] = Aeff
sigma = sigma_S(Tsys, **kws_sig)
return sigma | 0.940358 | 0.621828 |
from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
# -*- utf-8 -*-
from datetime import datetime
from django.db import models
# Create your models here.
class Target(models.Model):
uid = models.CharField(max_length=20, verbose_name=u"爬取用户")
cookie = models.TextField(verbose_name=u"设置cookie")
isScrapy = models.IntegerField(default=0, verbose_name=u"是否爬取")
group = models.IntegerField(default=0, verbose_name=u"用户分组")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"爬虫初始"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self.uid)
class UserInfo(models.Model):
""" 个人信息 """
_id = models.CharField(max_length=200, verbose_name=u"用户id", primary_key=True) # 用户ID
Image = models.TextField(verbose_name=u"用户头像", blank=True) # 用户头像
nick_name = models.CharField(max_length=30, verbose_name=u"昵称") #昵称
gender = models.CharField(max_length=6, choices=(("male", u"男"), ("female", u"女")), default="female",verbose_name=u"性别") # 性别
labels = models.CharField(max_length=500, verbose_name=u"标签", blank=True) # 所在省
province = models.CharField(max_length=30, verbose_name=u"所在省", blank=True) # 所在省
city = models.CharField(max_length=30, verbose_name=u"所在城市", blank=True) # 所在城市
brief_introduction = models.CharField(max_length=500, verbose_name=u"简介", blank=True) # 简介
birthday = models.DateField(verbose_name=u"生日", null=True, blank=True) # 生日
constellation = models.CharField(max_length=30, verbose_name=u"星座", blank=True) # 所在城市
tweets_num = models.IntegerField(default=0, verbose_name=u'微博数') # 微博数
fans_num = models.IntegerField(default=0, verbose_name=u'关注数') # 关注数
follows_num = models.IntegerField(default=0, verbose_name=u'粉丝数', blank=True) # 粉丝数
sex_orientation = models.CharField(max_length=30, verbose_name=u"性取向", blank=True) # 性取向
sentiment = models.CharField(max_length=30, verbose_name=u"感情状况", blank=True) # 感情状况
vip_level = models.CharField(max_length=30, verbose_name=u"会员等级", blank=True) # 会员等级
authentication = models.CharField(max_length=30, verbose_name=u"认证", blank=True) # 认证
person_url = models.CharField(max_length=30, verbose_name=u"首页链接", blank=True) # 首页链接
crawl_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"用户信息"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self.nick_name)
class TweetsInfo(models.Model):
""" 微博信息 """
_id = models.CharField(max_length=50, verbose_name=u"微博ID", primary_key=True) # 微博内容ID标签
user_id = models.CharField(max_length=200, verbose_name=u"用户信息") # 用户信息
content = models.TextField(verbose_name=u"微博内容") # 微博内容
created_at = models.DateTimeField(verbose_name=u"发表时间", blank=True) # 发表时间
weibo_url = models.TextField(verbose_name=u"weibo的URL", blank=True)
# Co_oridinates = models.CharField(max_length=300, verbose_name=u"定位坐标", blank=True) # 定位坐标
# Tools = models.CharField(max_length=300, verbose_name=u"发布工具", blank=True) # 发布工具/平台
like_num = models.IntegerField(default=0, verbose_name=u'点赞数', blank=True) # 点赞数
comment_num = models.IntegerField(default=0, verbose_name=u'评论数', blank=True) # 评论数
repost_num = models.IntegerField(default=0, verbose_name=u'转载数', blank=True) # 转载数
crawl_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"微博信息"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self._id)
class RelationshipsInfo(models.Model):
_id = models.CharField(max_length=50, verbose_name=u"用户关系ID", primary_key=True)
fan_id = models.CharField(max_length=50, verbose_name=u"关注者的用户ID")
followed_id = models.CharField(max_length=50, verbose_name=u"被关注者的用户ID")
crawl_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"用户关系"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self._id)
class CommentInfo(models.Model):
_id = models.CharField(max_length=50, verbose_name=u"评论的ID", primary_key=True)
comment_user_id = models.CharField(max_length=50, verbose_name=u"评论的用户ID")
weibo_url = models.TextField(verbose_name=u"weibo的URL", blank=True)
content = models.TextField(verbose_name=u"评论内容", blank=True)
created_at = models.CharField(max_length=30, verbose_name=u"评论创建时间", blank=True)
crawl_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"评论内容"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self._id) | src/ScrapydAPI/models.py | from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
# -*- utf-8 -*-
from datetime import datetime
from django.db import models
# Create your models here.
class Target(models.Model):
uid = models.CharField(max_length=20, verbose_name=u"爬取用户")
cookie = models.TextField(verbose_name=u"设置cookie")
isScrapy = models.IntegerField(default=0, verbose_name=u"是否爬取")
group = models.IntegerField(default=0, verbose_name=u"用户分组")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"爬虫初始"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self.uid)
class UserInfo(models.Model):
""" 个人信息 """
_id = models.CharField(max_length=200, verbose_name=u"用户id", primary_key=True) # 用户ID
Image = models.TextField(verbose_name=u"用户头像", blank=True) # 用户头像
nick_name = models.CharField(max_length=30, verbose_name=u"昵称") #昵称
gender = models.CharField(max_length=6, choices=(("male", u"男"), ("female", u"女")), default="female",verbose_name=u"性别") # 性别
labels = models.CharField(max_length=500, verbose_name=u"标签", blank=True) # 所在省
province = models.CharField(max_length=30, verbose_name=u"所在省", blank=True) # 所在省
city = models.CharField(max_length=30, verbose_name=u"所在城市", blank=True) # 所在城市
brief_introduction = models.CharField(max_length=500, verbose_name=u"简介", blank=True) # 简介
birthday = models.DateField(verbose_name=u"生日", null=True, blank=True) # 生日
constellation = models.CharField(max_length=30, verbose_name=u"星座", blank=True) # 所在城市
tweets_num = models.IntegerField(default=0, verbose_name=u'微博数') # 微博数
fans_num = models.IntegerField(default=0, verbose_name=u'关注数') # 关注数
follows_num = models.IntegerField(default=0, verbose_name=u'粉丝数', blank=True) # 粉丝数
sex_orientation = models.CharField(max_length=30, verbose_name=u"性取向", blank=True) # 性取向
sentiment = models.CharField(max_length=30, verbose_name=u"感情状况", blank=True) # 感情状况
vip_level = models.CharField(max_length=30, verbose_name=u"会员等级", blank=True) # 会员等级
authentication = models.CharField(max_length=30, verbose_name=u"认证", blank=True) # 认证
person_url = models.CharField(max_length=30, verbose_name=u"首页链接", blank=True) # 首页链接
crawl_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"用户信息"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self.nick_name)
class TweetsInfo(models.Model):
""" 微博信息 """
_id = models.CharField(max_length=50, verbose_name=u"微博ID", primary_key=True) # 微博内容ID标签
user_id = models.CharField(max_length=200, verbose_name=u"用户信息") # 用户信息
content = models.TextField(verbose_name=u"微博内容") # 微博内容
created_at = models.DateTimeField(verbose_name=u"发表时间", blank=True) # 发表时间
weibo_url = models.TextField(verbose_name=u"weibo的URL", blank=True)
# Co_oridinates = models.CharField(max_length=300, verbose_name=u"定位坐标", blank=True) # 定位坐标
# Tools = models.CharField(max_length=300, verbose_name=u"发布工具", blank=True) # 发布工具/平台
like_num = models.IntegerField(default=0, verbose_name=u'点赞数', blank=True) # 点赞数
comment_num = models.IntegerField(default=0, verbose_name=u'评论数', blank=True) # 评论数
repost_num = models.IntegerField(default=0, verbose_name=u'转载数', blank=True) # 转载数
crawl_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"微博信息"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self._id)
class RelationshipsInfo(models.Model):
_id = models.CharField(max_length=50, verbose_name=u"用户关系ID", primary_key=True)
fan_id = models.CharField(max_length=50, verbose_name=u"关注者的用户ID")
followed_id = models.CharField(max_length=50, verbose_name=u"被关注者的用户ID")
crawl_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"用户关系"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self._id)
class CommentInfo(models.Model):
_id = models.CharField(max_length=50, verbose_name=u"评论的ID", primary_key=True)
comment_user_id = models.CharField(max_length=50, verbose_name=u"评论的用户ID")
weibo_url = models.TextField(verbose_name=u"weibo的URL", blank=True)
content = models.TextField(verbose_name=u"评论内容", blank=True)
created_at = models.CharField(max_length=30, verbose_name=u"评论创建时间", blank=True)
crawl_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"评论内容"
verbose_name_plural = verbose_name
def __str__(self):
return "{0}".format(self._id) | 0.302288 | 0.108519 |
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop, Adam
import numpy as np
class NN:
def __init__(self, modelFilePath):
# Create a sorted list of the characters
self.chars = ['\n', ' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', '\\', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', 'ì', 'í', '\u200d', '’', '☀', '☝', '☠', '☹', '♀', '♂', '✅', '✊', '✌', '❤', '️', '🌞', '🌵', '🍉', '🍕', '🍺', '🍻', '🎂', '🎆', '🐢', '👀', '👋', '👌', '👍', '👎', '👻', '💀', '💙', '💩', '💪', '💯', '💸', '📑', '📓', '📔', '📕', '📗', '📘', '📙', '📚', '🔖', '🔥', '🔫', '🖕', '🖖', '😀', '😁', '😂', '😃', '😄', '😆', '😇', '😉', '😊', '😋', '😍', '😎', '😏', '😐', '😑', '😒', '😔', '😕', '😛', '😜', '😠', '😢', '😥', '😦', '😩', '😬', '😭', '😮', '😯', '😲', '😴', '😶', '🙁', '🙂', '🙃', '🙄', '🙏', '🤑', '🤔', '🤖', '🤘', '🤙', '🤞', '🤟', '🤢', '🤣', '🤦', '🤨', '🤪', '🤫', '🤭', '🤮', '🤷', '🥫', '🥲', '🥳', '🥺', '🧐', '🧟']
# Create a dictionary where given a character, you can look up the index and vice versa
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
# cut the text in semi-redundant sequences of self.maxlen characters
self.maxlen = 15 # The window size
self.model = Sequential()
self.model.add(LSTM(128, input_shape=(self.maxlen, len(self.chars))))
self.model.add(Dense(len(self.chars)))
self.model.add(Activation('softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer='adam', run_eagerly=False)
self.model.load_weights(modelFilePath)
def sample(self, preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def getResponse(self, seed):
sentence = ((" "*self.maxlen) + seed)[-15:]
x = np.zeros((1, self.maxlen, len(self.chars)))
for t, char in enumerate(sentence):
x[0, t, self.char_indices[char]] = 1.
variance = 0.2
generated = ''
original = seed
window = sentence
for i in range(280):
x = np.zeros((1, self.maxlen, len(self.chars)))
for t, char in enumerate(window):
x[0, t, self.char_indices[char]] = 1.
preds = self.model.predict(x, verbose=0)[0]
next_index = self.sample(preds, variance)
next_char = self.indices_char[next_index]
generated += next_char
window = window[1:] + next_char
return generated | noncommands/NN.py | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop, Adam
import numpy as np
class NN:
def __init__(self, modelFilePath):
# Create a sorted list of the characters
self.chars = ['\n', ' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', '\\', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', 'ì', 'í', '\u200d', '’', '☀', '☝', '☠', '☹', '♀', '♂', '✅', '✊', '✌', '❤', '️', '🌞', '🌵', '🍉', '🍕', '🍺', '🍻', '🎂', '🎆', '🐢', '👀', '👋', '👌', '👍', '👎', '👻', '💀', '💙', '💩', '💪', '💯', '💸', '📑', '📓', '📔', '📕', '📗', '📘', '📙', '📚', '🔖', '🔥', '🔫', '🖕', '🖖', '😀', '😁', '😂', '😃', '😄', '😆', '😇', '😉', '😊', '😋', '😍', '😎', '😏', '😐', '😑', '😒', '😔', '😕', '😛', '😜', '😠', '😢', '😥', '😦', '😩', '😬', '😭', '😮', '😯', '😲', '😴', '😶', '🙁', '🙂', '🙃', '🙄', '🙏', '🤑', '🤔', '🤖', '🤘', '🤙', '🤞', '🤟', '🤢', '🤣', '🤦', '🤨', '🤪', '🤫', '🤭', '🤮', '🤷', '🥫', '🥲', '🥳', '🥺', '🧐', '🧟']
# Create a dictionary where given a character, you can look up the index and vice versa
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
# cut the text in semi-redundant sequences of self.maxlen characters
self.maxlen = 15 # The window size
self.model = Sequential()
self.model.add(LSTM(128, input_shape=(self.maxlen, len(self.chars))))
self.model.add(Dense(len(self.chars)))
self.model.add(Activation('softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer='adam', run_eagerly=False)
self.model.load_weights(modelFilePath)
def sample(self, preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def getResponse(self, seed):
sentence = ((" "*self.maxlen) + seed)[-15:]
x = np.zeros((1, self.maxlen, len(self.chars)))
for t, char in enumerate(sentence):
x[0, t, self.char_indices[char]] = 1.
variance = 0.2
generated = ''
original = seed
window = sentence
for i in range(280):
x = np.zeros((1, self.maxlen, len(self.chars)))
for t, char in enumerate(window):
x[0, t, self.char_indices[char]] = 1.
preds = self.model.predict(x, verbose=0)[0]
next_index = self.sample(preds, variance)
next_char = self.indices_char[next_index]
generated += next_char
window = window[1:] + next_char
return generated | 0.872538 | 0.451085 |
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV, cross_val_score
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
df = pd.read_csv('data/telecom_churn.csv')
df.head()
# Convert Yes/No to 1/0
## pd.factorize gives output of array and index, hence [0] to take the array only
df['International plan'] = pd.factorize(df['International plan'], sort = True)[0]
df['Voice mail plan'] = pd.factorize(df['Voice mail plan'], sort = True)[0]
# Convert boolean (T/F) to integer
df['Churn'] = df['Churn'].astype('int')
df.dtypes # `State` is the only non-numeric variable now
# Remove states, but store it just in case
states = df.pop('State')
# Create X and y
y = df.pop('Churn')
X = df
# Build train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=17)
# Setup and fit model
tree = DecisionTreeClassifier(max_depth=5, random_state=17)
tree.fit(X_train, y_train)
# Generate predictions and Assess model accuracy
pred = tree.predict(X_test)
accuracy_score(y_test, pred)
# Generate decision tree dot file
import pydotplus
from sklearn.tree import export_graphviz
def tree_graph_to_png(tree, feature_names, png_file_to_save):
tree_str = export_graphviz(tree, feature_names=feature_names,
filled=True, out_file=None)
graph = pydotplus.graph_from_dot_data(tree_str)
graph.write_png(png_file_to_save)
tree_graph_to_png(tree=tree, feature_names=df.columns, png_file_to_save='data/my_tree.png')
# Hyper-parameter tuning
tree_params = {'max_depth': range(1,11),
'max_features': range(4,19)}
tree_grid = GridSearchCV(tree, tree_params,
cv=5, n_jobs=-1, verbose=True)
tree_grid.fit(X_train, y_train)
# Evaluate results of cross validation
tree_grid.best_params_
tree_grid.best_score_
tree_grid.best_estimator_
# Assess accuracy of "best" model from CV
pred_CV = tree_grid.predict(X_test)
accuracy_score(y_true=y_test, y_pred=pred_CV)
# Generate decision tree graph
tree_graph_to_png(tree=tree_grid, feature_names=df.columns, png_file_to_save='data/my_tree_CV.png') | ML_course/Decision_Trees/decision_tree_churn.py | from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV, cross_val_score
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
df = pd.read_csv('data/telecom_churn.csv')
df.head()
# Convert Yes/No to 1/0
## pd.factorize gives output of array and index, hence [0] to take the array only
df['International plan'] = pd.factorize(df['International plan'], sort = True)[0]
df['Voice mail plan'] = pd.factorize(df['Voice mail plan'], sort = True)[0]
# Convert boolean (T/F) to integer
df['Churn'] = df['Churn'].astype('int')
df.dtypes # `State` is the only non-numeric variable now
# Remove states, but store it just in case
states = df.pop('State')
# Create X and y
y = df.pop('Churn')
X = df
# Build train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=17)
# Setup and fit model
tree = DecisionTreeClassifier(max_depth=5, random_state=17)
tree.fit(X_train, y_train)
# Generate predictions and Assess model accuracy
pred = tree.predict(X_test)
accuracy_score(y_test, pred)
# Generate decision tree dot file
import pydotplus
from sklearn.tree import export_graphviz
def tree_graph_to_png(tree, feature_names, png_file_to_save):
tree_str = export_graphviz(tree, feature_names=feature_names,
filled=True, out_file=None)
graph = pydotplus.graph_from_dot_data(tree_str)
graph.write_png(png_file_to_save)
tree_graph_to_png(tree=tree, feature_names=df.columns, png_file_to_save='data/my_tree.png')
# Hyper-parameter tuning
tree_params = {'max_depth': range(1,11),
'max_features': range(4,19)}
tree_grid = GridSearchCV(tree, tree_params,
cv=5, n_jobs=-1, verbose=True)
tree_grid.fit(X_train, y_train)
# Evaluate results of cross validation
tree_grid.best_params_
tree_grid.best_score_
tree_grid.best_estimator_
# Assess accuracy of "best" model from CV
pred_CV = tree_grid.predict(X_test)
accuracy_score(y_true=y_test, y_pred=pred_CV)
# Generate decision tree graph
tree_graph_to_png(tree=tree_grid, feature_names=df.columns, png_file_to_save='data/my_tree_CV.png') | 0.62681 | 0.445349 |
import torch
from megatron import mpu
# TODO: use functions from megatron/p2p
def recv_from_prev_pipeline_rank_(recv_buffer=None):
"""Receive from previous pipeline stage and update the
input buffer inplace."""
if not mpu.is_pipeline_first_stage():
assert recv_buffer is not None
recv_prev_op = torch.distributed.P2POp(
torch.distributed.irecv, recv_buffer,
mpu.get_pipeline_model_parallel_prev_rank())
reqs = torch.distributed.batch_isend_irecv([recv_prev_op])
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
# TODO: use functions from megatron/p2p
def send_to_next_pipeline_rank(tensor=None):
"""Send output to the next pipeline stage."""
if not mpu.is_pipeline_last_stage():
assert tensor is not None
send_next_op = torch.distributed.P2POp(
torch.distributed.isend, tensor,
mpu.get_pipeline_model_parallel_next_rank())
reqs = torch.distributed.batch_isend_irecv([send_next_op])
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
def _is_cuda(tensor):
"""Check if a tensor is not none and is cuda."""
assert tensor is not None
assert tensor.is_cuda
def _is_cuda_contiguous(tensor):
"""Check if a tensor is not none, is cuda, and is contiguous."""
_is_cuda(tensor)
assert tensor.is_contiguous()
def broadcast_from_last_pipeline_stage(size, dtype, tensor=None):
"""Broadcast a tensor from last pipeline stage to all ranks."""
is_last_stage = mpu.is_pipeline_last_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if mpu.is_pipeline_first_stage() and is_last_stage:
return tensor
if is_last_stage:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
# Get the group and corresponding source rank.
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_pipeline_model_parallel_group()
torch.distributed.broadcast(tensor, src, group)
return tensor
def broadcast_from_last_to_first_pipeline_stage(size, dtype, tensor=None):
"""Broadcast tensor values from last stage into the first stage."""
is_last_stage = mpu.is_pipeline_last_stage()
is_first_stage = mpu.is_pipeline_first_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if is_first_stage and is_last_stage:
return tensor
# Only first and last stage pipeline stages need to be involved.
if is_last_stage or is_first_stage:
if is_last_stage:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_embedding_group()
# Broadcast from last stage into the first stage.
torch.distributed.broadcast(tensor, src, group)
else:
tensor = None
return tensor
def copy_from_last_to_first_pipeline_stage(size, dtype, tensor=None):
"""Copy tensor values from last stage into the first stage.
Note that the input tensor is updated in place."""
is_last_stage = mpu.is_pipeline_last_stage()
is_first_stage = mpu.is_pipeline_first_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if is_first_stage and is_last_stage:
return
# Only first and last stage pipeline stages need to be involved.
if is_last_stage or is_first_stage:
_is_cuda(tensor)
is_contiguous = tensor.is_contiguous()
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_embedding_group()
if is_contiguous:
tensor_ = tensor
else:
if is_last_stage:
tensor_ = tensor.contiguous()
else:
tensor_ = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
# Broadcast from last stage into the first stage.
torch.distributed.broadcast(tensor_, src, group)
# Update the first stage tensor
if is_first_stage and not is_contiguous:
tensor[...] = tensor_
def broadcast_tensor(size, dtype, tensor=None, rank=0):
""" Given size and type of a tensor on all ranks and the tensor value
only on a specific rank, broadcast from that rank to all other ranks.
"""
if torch.distributed.get_rank() == rank:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
torch.distributed.broadcast(tensor, rank)
return tensor
def broadcast_list(size, dtype, list_values=None, rank=0):
"""Broadcast a list of values with a given type."""
tensor = None
if torch.distributed.get_rank() == rank:
tensor = torch.tensor(list_values, dtype=dtype,
device=torch.cuda.current_device())
return broadcast_tensor(size, dtype, tensor=tensor, rank=rank)
def broadcast_int_list(size, int_list=None, rank=0):
"""Broadcast a list of interger values."""
return broadcast_list(size, torch.int64, list_values=int_list, rank=rank)
def broadcast_float_list(size, float_list=None, rank=0):
"""Broadcast a list of float values."""
return broadcast_list(size, torch.float32, list_values=float_list,
rank=rank) | megatron/text_generation/communication.py | import torch
from megatron import mpu
# TODO: use functions from megatron/p2p
def recv_from_prev_pipeline_rank_(recv_buffer=None):
"""Receive from previous pipeline stage and update the
input buffer inplace."""
if not mpu.is_pipeline_first_stage():
assert recv_buffer is not None
recv_prev_op = torch.distributed.P2POp(
torch.distributed.irecv, recv_buffer,
mpu.get_pipeline_model_parallel_prev_rank())
reqs = torch.distributed.batch_isend_irecv([recv_prev_op])
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
# TODO: use functions from megatron/p2p
def send_to_next_pipeline_rank(tensor=None):
"""Send output to the next pipeline stage."""
if not mpu.is_pipeline_last_stage():
assert tensor is not None
send_next_op = torch.distributed.P2POp(
torch.distributed.isend, tensor,
mpu.get_pipeline_model_parallel_next_rank())
reqs = torch.distributed.batch_isend_irecv([send_next_op])
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
def _is_cuda(tensor):
"""Check if a tensor is not none and is cuda."""
assert tensor is not None
assert tensor.is_cuda
def _is_cuda_contiguous(tensor):
"""Check if a tensor is not none, is cuda, and is contiguous."""
_is_cuda(tensor)
assert tensor.is_contiguous()
def broadcast_from_last_pipeline_stage(size, dtype, tensor=None):
"""Broadcast a tensor from last pipeline stage to all ranks."""
is_last_stage = mpu.is_pipeline_last_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if mpu.is_pipeline_first_stage() and is_last_stage:
return tensor
if is_last_stage:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
# Get the group and corresponding source rank.
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_pipeline_model_parallel_group()
torch.distributed.broadcast(tensor, src, group)
return tensor
def broadcast_from_last_to_first_pipeline_stage(size, dtype, tensor=None):
"""Broadcast tensor values from last stage into the first stage."""
is_last_stage = mpu.is_pipeline_last_stage()
is_first_stage = mpu.is_pipeline_first_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if is_first_stage and is_last_stage:
return tensor
# Only first and last stage pipeline stages need to be involved.
if is_last_stage or is_first_stage:
if is_last_stage:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_embedding_group()
# Broadcast from last stage into the first stage.
torch.distributed.broadcast(tensor, src, group)
else:
tensor = None
return tensor
def copy_from_last_to_first_pipeline_stage(size, dtype, tensor=None):
"""Copy tensor values from last stage into the first stage.
Note that the input tensor is updated in place."""
is_last_stage = mpu.is_pipeline_last_stage()
is_first_stage = mpu.is_pipeline_first_stage()
# If first stage and last state are the same, then there is no
# pipeline parallelism and no need to communicate.
if is_first_stage and is_last_stage:
return
# Only first and last stage pipeline stages need to be involved.
if is_last_stage or is_first_stage:
_is_cuda(tensor)
is_contiguous = tensor.is_contiguous()
src = mpu.get_pipeline_model_parallel_last_rank()
group = mpu.get_embedding_group()
if is_contiguous:
tensor_ = tensor
else:
if is_last_stage:
tensor_ = tensor.contiguous()
else:
tensor_ = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
# Broadcast from last stage into the first stage.
torch.distributed.broadcast(tensor_, src, group)
# Update the first stage tensor
if is_first_stage and not is_contiguous:
tensor[...] = tensor_
def broadcast_tensor(size, dtype, tensor=None, rank=0):
""" Given size and type of a tensor on all ranks and the tensor value
only on a specific rank, broadcast from that rank to all other ranks.
"""
if torch.distributed.get_rank() == rank:
_is_cuda_contiguous(tensor)
else:
tensor = torch.empty(size,
dtype=dtype,
device=torch.cuda.current_device())
torch.distributed.broadcast(tensor, rank)
return tensor
def broadcast_list(size, dtype, list_values=None, rank=0):
"""Broadcast a list of values with a given type."""
tensor = None
if torch.distributed.get_rank() == rank:
tensor = torch.tensor(list_values, dtype=dtype,
device=torch.cuda.current_device())
return broadcast_tensor(size, dtype, tensor=tensor, rank=rank)
def broadcast_int_list(size, int_list=None, rank=0):
"""Broadcast a list of interger values."""
return broadcast_list(size, torch.int64, list_values=int_list, rank=rank)
def broadcast_float_list(size, float_list=None, rank=0):
"""Broadcast a list of float values."""
return broadcast_list(size, torch.float32, list_values=float_list,
rank=rank) | 0.363308 | 0.533701 |
from __future__ import annotations
from src.compiler import generate_c_code_from_AST # type: ignore[import]
import subprocess
from pathlib import Path
from typing import Union
from src import core, parsing, CEAst, typecheck # type: ignore[import]
import sys
import shlex
def read_file(filepath: Union[Path, str]) -> str:
with open(filepath, "r") as f:
return f.read()
def usage() -> None:
print("[USAGE]")
print("python corpe.py <FILEPATH>")
print("Optional flags:")
print(" -r (run the generated executable)")
print("Compiler flags:")
print(" -O0 (no optimizations, default)")
print(" -O1 (some optimizations)")
print(" -O2 (more optimizations)")
print(" -O3 (even more optimizations)")
print(" -Ofast (using experimental features)")
sys.exit(1)
def consume_arg(arg: str) -> bool:
if arg in sys.argv:
sys.argv.remove(arg)
return True
return False
def get_optimization_flag() -> str:
ret = "-O0"
if consume_arg("-O0"):
ret = "-O0"
if consume_arg("-O1"):
ret = "-O1"
if consume_arg("-O2"):
ret = "-O2"
if consume_arg("-O3"):
ret = "-O3"
if consume_arg("-Ofast"):
ret = "-Ofast"
return ret
def echo_and_call(cmd: list[str]) -> int:
print(f"[CMD] {shlex.join(cmd)}")
return subprocess.call(cmd)
if __name__ == "__main__":
sys.argv.extend(["test.ce", "-r"])
if len(sys.argv) < 2 or any(
x in sys.argv for x in ["-h", "--h", "-help", "--help"]
):
usage()
filepath: str = sys.argv.pop(1)
base_filename: str = (
filepath[: -len(core.EXTENSION)]
if filepath.endswith(core.EXTENSION)
else filepath
)
run: bool = "-r" in sys.argv
optimization_flag: str = get_optimization_flag()
CEAst.run_checks()
with open(base_filename + ".c", "w") as out:
print(f"[INFO] parsing {filepath}...")
ast = CEAst.makeAST(parsing.parse_file(filepath), Path(filepath))
print(f"[INFO] type checking {filepath}...")
typecheck.typecheck_AST(ast)
print("[INFO] generating C code...")
out.write(generate_c_code_from_AST(ast, core.STACK_SIZE))
print("[INFO] compiling with GCC compiler...")
if echo_and_call(
["gcc", base_filename + ".c", "-o", base_filename + ".exe", optimization_flag]
):
run = False
if run:
print("[INFO] running the executable...")
echo_and_call([base_filename + ".exe"]) | corpe.py |
from __future__ import annotations
from src.compiler import generate_c_code_from_AST # type: ignore[import]
import subprocess
from pathlib import Path
from typing import Union
from src import core, parsing, CEAst, typecheck # type: ignore[import]
import sys
import shlex
def read_file(filepath: Union[Path, str]) -> str:
with open(filepath, "r") as f:
return f.read()
def usage() -> None:
print("[USAGE]")
print("python corpe.py <FILEPATH>")
print("Optional flags:")
print(" -r (run the generated executable)")
print("Compiler flags:")
print(" -O0 (no optimizations, default)")
print(" -O1 (some optimizations)")
print(" -O2 (more optimizations)")
print(" -O3 (even more optimizations)")
print(" -Ofast (using experimental features)")
sys.exit(1)
def consume_arg(arg: str) -> bool:
if arg in sys.argv:
sys.argv.remove(arg)
return True
return False
def get_optimization_flag() -> str:
ret = "-O0"
if consume_arg("-O0"):
ret = "-O0"
if consume_arg("-O1"):
ret = "-O1"
if consume_arg("-O2"):
ret = "-O2"
if consume_arg("-O3"):
ret = "-O3"
if consume_arg("-Ofast"):
ret = "-Ofast"
return ret
def echo_and_call(cmd: list[str]) -> int:
print(f"[CMD] {shlex.join(cmd)}")
return subprocess.call(cmd)
if __name__ == "__main__":
sys.argv.extend(["test.ce", "-r"])
if len(sys.argv) < 2 or any(
x in sys.argv for x in ["-h", "--h", "-help", "--help"]
):
usage()
filepath: str = sys.argv.pop(1)
base_filename: str = (
filepath[: -len(core.EXTENSION)]
if filepath.endswith(core.EXTENSION)
else filepath
)
run: bool = "-r" in sys.argv
optimization_flag: str = get_optimization_flag()
CEAst.run_checks()
with open(base_filename + ".c", "w") as out:
print(f"[INFO] parsing {filepath}...")
ast = CEAst.makeAST(parsing.parse_file(filepath), Path(filepath))
print(f"[INFO] type checking {filepath}...")
typecheck.typecheck_AST(ast)
print("[INFO] generating C code...")
out.write(generate_c_code_from_AST(ast, core.STACK_SIZE))
print("[INFO] compiling with GCC compiler...")
if echo_and_call(
["gcc", base_filename + ".c", "-o", base_filename + ".exe", optimization_flag]
):
run = False
if run:
print("[INFO] running the executable...")
echo_and_call([base_filename + ".exe"]) | 0.473414 | 0.130313 |
from SNDG.BioMongo.Process.BioMongoDB import BioMongoDB
from SNDG.BioMongo.Model.SeqCollection import SeqCollection, SeqColDruggabilityParam
from SNDG.WebServices.Offtargeting import Offtargeting
from SNDG import init_log, mkdir, execute
from SNDG.WebServices import PROXIES
import os
PROXIES["ftp_proxy"] = "http://proxy.fcen.uba.ar:8080"
init_log()
mdb = BioMongoDB("tdr",port=27018)
off_props = {"human_offtarget": SeqColDruggabilityParam(**{
"target": "protein",
"defaultGroupOperation": "max",
"defaultValue": 0.6,
"name": "human_offtarget",
"defaultOperation": ">",
"_cls": "SeqColDruggabilityParam",
"uploader": "demo",
"_class": "ar.com.bia.entity.SeqCollectionDoc",
"type": "number",
"options": [],
"description": "This score reflects the results of a blastp search of the pathogen protein in the human proteome database (Gencode v17) with the scale 1 - max(alignment identity), so when a protein has no hit in the human proteome, the value is 1, and if it has 2 hits, one with an identity of 0.4 and other with 0.6, the score is 0.4 (human_offtarget = 1 - 0.6, uses the max identity)."
}),
"gut_microbiome": SeqColDruggabilityParam(**{
"target": "protein",
"name": "gut_microbiome",
"_cls": "SeqColDruggabilityParam",
"uploader": "demo",
"_class": "ar.com.bia.entity.SeqCollectionDoc",
"type": "number",
"options": [],
"description": "Number of gut microbiome organisms that have at least one hit (blast: identity > 40% evalue 1e-5)"
}),
"gut_microbiome_norm": SeqColDruggabilityParam(**{
"target": "protein",
"name": "gut_microbiome_norm",
"_cls": "SeqColDruggabilityParam",
"uploader": "demo",
"_class": "ar.com.bia.entity.SeqCollectionDoc",
"type": "number",
"options": [],
"description": "gut_microbiome normalized by the total number of compared bacteria (226 - https://doi.org/10.1038/s41598-018-28916-7 - Supplementary Table S1)"
}),
"hit_in_deg": SeqColDruggabilityParam(**{
"target": "protein",
"defaultGroupOperation": "avg",
"defaultValue": "Yes",
"name": "hit_in_deg",
"defaultOperation": "equal",
"_cls": "SeqColDruggabilityParam",
"uploader": "demo",
"_class": "ar.com.bia.entity.SeqCollectionDoc",
"type": "value",
"options": [
"No",
"Yes"
],
"description": "Has a hit in Database of Essential Genes"
})
}
from SNDG.Sequence import read_blast_table
from tqdm import tqdm
# cols = list(SeqCollection.objects(name__nin=["cruzi","pdb"]))
cols = list(SeqCollection.objects(name__nin=["cruzi","pdb"]))
cpus = 4
db = mdb.db
for seqCol in tqdm(cols):
mkdir("/data/organismos/" + seqCol.name + "/contigs")
proteome = "/data/organismos/" + seqCol.name + "/contigs/genoma.fasta"
if not os.path.exists(proteome):
mdb.protein_fasta(proteome, seqCol.name)
out = "/data/organismos/" + seqCol.name + "/annotation/offtarget/"
mkdir(out)
if not seqCol.has_druggability_param("human_offtarget"):
seqCol.druggabilityParams.append(off_props["human_offtarget"])
db = "/data/databases/human/gencode.v17.pc_translations.fa"
execute(
"blastp -evalue 1e-5 -max_hsps 1 -outfmt 6 -max_target_seqs 1 -db {db} -query {query} -out {out} -num_threads {cpus}",
db=db, query=proteome, out=out + "human_offtarget.tbl", cpus=cpus)
mdb.db.proteins.update({"organism": seqCol.name},
{"$set": {"search.human_offtarget": 1}}, multi=True)
for _, r in tqdm(read_blast_table(out + "human_offtarget.tbl").iterrows()):
mdb.db.proteins.update({"organism": seqCol.name, "gene": r["query"]},
{"$set": {"search.human_offtarget": 1 - r.identity / 100}})
if not seqCol.has_druggability_param("gut_microbiome"):
seqCol.druggabilityParams.append(off_props["gut_microbiome"])
seqCol.druggabilityParams.append(off_props["gut_microbiome_norm"])
db = "/data/databases/human/gut_microbiota.fasta"
execute(
"blastp -evalue 1e-5 -max_hsps 1 -outfmt 6 -db {db} -query {query} -out {out} -num_threads {cpus}",
db=db, query=proteome, out=out + "gut_microbiome.tbl", cpus=cpus)
mdb.db.proteins.update({"organism": seqCol.name},
{"$set": {"search.gut_microbiome": 0,
"search.gut_microbiome_norm": 0
}}, multi=True)
prot_off = Offtargeting.count_organism_from_microbiome_blast(out + "gut_microbiome.tbl", db)
for locus_tag, organisms in tqdm(prot_off.items()):
assert len(organisms) <= 226
mdb.db.proteins.update({"organism": seqCol.name, "gene": locus_tag},
{"$set": {"search.gut_microbiome": len(organisms),
"search.gut_microbiome_norm": len(organisms) / 226.0
}})
if not seqCol.has_druggability_param("hit_in_deg"):
seqCol.druggabilityParams.append(off_props["hit_in_deg"])
db = "/data/databases/deg/degaa-p.dat"
if seqCol.name in ["cruzi", "LMajor", "SC_MAS", "PVIVAX", "TGONDII"]:
db = "/data/databases/deg/degaa-e.dat"
execute(
"blastp -evalue 1e-5 -max_hsps 1 -qcov_hsp_perc 80 -outfmt 6 -db {db} -query {query} -out {out} -num_threads {cpus}",
db=db, query=proteome, out=out + "deg.tbl", cpus=cpus)
mdb.db.proteins.update({"organism": seqCol.name},
{"$set": {"search.hit_in_deg": "No"}}, multi=True)
for _, r in tqdm(read_blast_table(out + "deg.tbl").iterrows()):
if (r.identity / 100.0) > 0.7:
mdb.db.proteins.update({"organism": seqCol.name, "gene": r["query"]},
{"$set": {"search.hit_in_deg": "Yes"}})
seqCol.save()
from SNDG.BioMongo.Process.Index import build_statistics
build_statistics(mdb.db,seqCol.name) | scripts/pathogen/06-08-2018-offtargets.py | from SNDG.BioMongo.Process.BioMongoDB import BioMongoDB
from SNDG.BioMongo.Model.SeqCollection import SeqCollection, SeqColDruggabilityParam
from SNDG.WebServices.Offtargeting import Offtargeting
from SNDG import init_log, mkdir, execute
from SNDG.WebServices import PROXIES
import os
PROXIES["ftp_proxy"] = "http://proxy.fcen.uba.ar:8080"
init_log()
mdb = BioMongoDB("tdr",port=27018)
off_props = {"human_offtarget": SeqColDruggabilityParam(**{
"target": "protein",
"defaultGroupOperation": "max",
"defaultValue": 0.6,
"name": "human_offtarget",
"defaultOperation": ">",
"_cls": "SeqColDruggabilityParam",
"uploader": "demo",
"_class": "ar.com.bia.entity.SeqCollectionDoc",
"type": "number",
"options": [],
"description": "This score reflects the results of a blastp search of the pathogen protein in the human proteome database (Gencode v17) with the scale 1 - max(alignment identity), so when a protein has no hit in the human proteome, the value is 1, and if it has 2 hits, one with an identity of 0.4 and other with 0.6, the score is 0.4 (human_offtarget = 1 - 0.6, uses the max identity)."
}),
"gut_microbiome": SeqColDruggabilityParam(**{
"target": "protein",
"name": "gut_microbiome",
"_cls": "SeqColDruggabilityParam",
"uploader": "demo",
"_class": "ar.com.bia.entity.SeqCollectionDoc",
"type": "number",
"options": [],
"description": "Number of gut microbiome organisms that have at least one hit (blast: identity > 40% evalue 1e-5)"
}),
"gut_microbiome_norm": SeqColDruggabilityParam(**{
"target": "protein",
"name": "gut_microbiome_norm",
"_cls": "SeqColDruggabilityParam",
"uploader": "demo",
"_class": "ar.com.bia.entity.SeqCollectionDoc",
"type": "number",
"options": [],
"description": "gut_microbiome normalized by the total number of compared bacteria (226 - https://doi.org/10.1038/s41598-018-28916-7 - Supplementary Table S1)"
}),
"hit_in_deg": SeqColDruggabilityParam(**{
"target": "protein",
"defaultGroupOperation": "avg",
"defaultValue": "Yes",
"name": "hit_in_deg",
"defaultOperation": "equal",
"_cls": "SeqColDruggabilityParam",
"uploader": "demo",
"_class": "ar.com.bia.entity.SeqCollectionDoc",
"type": "value",
"options": [
"No",
"Yes"
],
"description": "Has a hit in Database of Essential Genes"
})
}
from SNDG.Sequence import read_blast_table
from tqdm import tqdm
# cols = list(SeqCollection.objects(name__nin=["cruzi","pdb"]))
cols = list(SeqCollection.objects(name__nin=["cruzi","pdb"]))
cpus = 4
db = mdb.db
for seqCol in tqdm(cols):
mkdir("/data/organismos/" + seqCol.name + "/contigs")
proteome = "/data/organismos/" + seqCol.name + "/contigs/genoma.fasta"
if not os.path.exists(proteome):
mdb.protein_fasta(proteome, seqCol.name)
out = "/data/organismos/" + seqCol.name + "/annotation/offtarget/"
mkdir(out)
if not seqCol.has_druggability_param("human_offtarget"):
seqCol.druggabilityParams.append(off_props["human_offtarget"])
db = "/data/databases/human/gencode.v17.pc_translations.fa"
execute(
"blastp -evalue 1e-5 -max_hsps 1 -outfmt 6 -max_target_seqs 1 -db {db} -query {query} -out {out} -num_threads {cpus}",
db=db, query=proteome, out=out + "human_offtarget.tbl", cpus=cpus)
mdb.db.proteins.update({"organism": seqCol.name},
{"$set": {"search.human_offtarget": 1}}, multi=True)
for _, r in tqdm(read_blast_table(out + "human_offtarget.tbl").iterrows()):
mdb.db.proteins.update({"organism": seqCol.name, "gene": r["query"]},
{"$set": {"search.human_offtarget": 1 - r.identity / 100}})
if not seqCol.has_druggability_param("gut_microbiome"):
seqCol.druggabilityParams.append(off_props["gut_microbiome"])
seqCol.druggabilityParams.append(off_props["gut_microbiome_norm"])
db = "/data/databases/human/gut_microbiota.fasta"
execute(
"blastp -evalue 1e-5 -max_hsps 1 -outfmt 6 -db {db} -query {query} -out {out} -num_threads {cpus}",
db=db, query=proteome, out=out + "gut_microbiome.tbl", cpus=cpus)
mdb.db.proteins.update({"organism": seqCol.name},
{"$set": {"search.gut_microbiome": 0,
"search.gut_microbiome_norm": 0
}}, multi=True)
prot_off = Offtargeting.count_organism_from_microbiome_blast(out + "gut_microbiome.tbl", db)
for locus_tag, organisms in tqdm(prot_off.items()):
assert len(organisms) <= 226
mdb.db.proteins.update({"organism": seqCol.name, "gene": locus_tag},
{"$set": {"search.gut_microbiome": len(organisms),
"search.gut_microbiome_norm": len(organisms) / 226.0
}})
if not seqCol.has_druggability_param("hit_in_deg"):
seqCol.druggabilityParams.append(off_props["hit_in_deg"])
db = "/data/databases/deg/degaa-p.dat"
if seqCol.name in ["cruzi", "LMajor", "SC_MAS", "PVIVAX", "TGONDII"]:
db = "/data/databases/deg/degaa-e.dat"
execute(
"blastp -evalue 1e-5 -max_hsps 1 -qcov_hsp_perc 80 -outfmt 6 -db {db} -query {query} -out {out} -num_threads {cpus}",
db=db, query=proteome, out=out + "deg.tbl", cpus=cpus)
mdb.db.proteins.update({"organism": seqCol.name},
{"$set": {"search.hit_in_deg": "No"}}, multi=True)
for _, r in tqdm(read_blast_table(out + "deg.tbl").iterrows()):
if (r.identity / 100.0) > 0.7:
mdb.db.proteins.update({"organism": seqCol.name, "gene": r["query"]},
{"$set": {"search.hit_in_deg": "Yes"}})
seqCol.save()
from SNDG.BioMongo.Process.Index import build_statistics
build_statistics(mdb.db,seqCol.name) | 0.483161 | 0.35301 |
import pandas as pd
import numpy as np
class DataTransformer(object):
def __init__(
self,
transforms,
):
self.transforms = transforms
def __call__(
self,
data_ids,
controls=None,
):
if controls is None:
controls = {}
data = {
"id":data_ids,
}
data.update(controls)
for transform in self.transforms:
data = transform(data)
return data
class DataFormatter(object):
def __init__(
self,
format_dict,
):
self.format_dict = format_dict
def __call__(self, data_dict):
output = {}
for key in self.format_dict.keys():
extractor = self.format_dict[key]
if extractor is None:
output[key] = data_dict[key]
else:
output[key] = extractor(data_dict)
return output
class DataFrameTransformWrapper(object):
def __init__(
self,
data_frame,
target_column,
output_key,
as_array=True,
):
self.data_frame = data_frame
self.target_column = target_column
self.output_key = output_key
self.as_array = as_array
def __call__(self, data_in):
ids = data_in["id"]
targ = self.data_frame[self.target_column].loc[ids]
if self.as_array:
targ = targ.values
#put the extracted data back into the data dictionary and return the whole dictionary
data_in[self.output_key] = targ
return data_in
class FunctionTransformWrapper(object):
def __init__(
self,
transform_function,
input_key,
output_key,
):
self.transform_function = transform_function
self.input_key = input_key
self.output_key = output_key
def __call__(self, data_in):
targ = self.transform_function(data_in[self.input_key])
data_in[self.output_key] = targ
return data_in | toxicity/pipeline/transforms.py | import pandas as pd
import numpy as np
class DataTransformer(object):
def __init__(
self,
transforms,
):
self.transforms = transforms
def __call__(
self,
data_ids,
controls=None,
):
if controls is None:
controls = {}
data = {
"id":data_ids,
}
data.update(controls)
for transform in self.transforms:
data = transform(data)
return data
class DataFormatter(object):
def __init__(
self,
format_dict,
):
self.format_dict = format_dict
def __call__(self, data_dict):
output = {}
for key in self.format_dict.keys():
extractor = self.format_dict[key]
if extractor is None:
output[key] = data_dict[key]
else:
output[key] = extractor(data_dict)
return output
class DataFrameTransformWrapper(object):
def __init__(
self,
data_frame,
target_column,
output_key,
as_array=True,
):
self.data_frame = data_frame
self.target_column = target_column
self.output_key = output_key
self.as_array = as_array
def __call__(self, data_in):
ids = data_in["id"]
targ = self.data_frame[self.target_column].loc[ids]
if self.as_array:
targ = targ.values
#put the extracted data back into the data dictionary and return the whole dictionary
data_in[self.output_key] = targ
return data_in
class FunctionTransformWrapper(object):
def __init__(
self,
transform_function,
input_key,
output_key,
):
self.transform_function = transform_function
self.input_key = input_key
self.output_key = output_key
def __call__(self, data_in):
targ = self.transform_function(data_in[self.input_key])
data_in[self.output_key] = targ
return data_in | 0.535341 | 0.319254 |
import unittest
from PyStacks.PyStacks.template import templateCF
class TestTemplate(unittest.TestCase):
def test_templateCF_S3(self):
resources = {
's3': {
'S3Bucket': {
'name': 'stuff.holder',
'accesscontrol': 'PublicRead',
'versioning': True,
'tags': {
'Name': 'Api'
},
'notices': {
'lamda': [{
'event': 's3:ObjectCreated:*',
'function': 'somelambdaarn'
}]
}
}
},
's3_policies': {
'S3BucketPolicies': {
'policy': '"what": "on earth"'
}
}
}
expected = {
'S3BucketPolicies': {
'Type': 'AWS::S3::BucketPolicy',
'Properties': {
'what': 'on earth'
}
},
'S3Bucket': {
'Type': 'AWS::S3::Bucket',
'Properties': {
'AccessControl': 'PublicRead',
'NotificationConfiguration': {
'LambdaConfigurations': [
{
'Event': 's3:ObjectCreated:*',
'Function': 'somelambdaarn'
}
]
},
'VersioningConfiguration': {
'Status': 'Enabled'
},
'BucketName': 'stuff.holder',
'Tags': [
{
'Key': 'Name',
'Value': 'Api'
}
]
}
}
}
self.maxDiff = 'None'
actual = templateCF(resources, 'resources')
self.assertDictEqual(actual, expected)
if __name__ == '__main__':
unittest.main() | PyStacks/test/templates/test_s3.py | import unittest
from PyStacks.PyStacks.template import templateCF
class TestTemplate(unittest.TestCase):
def test_templateCF_S3(self):
resources = {
's3': {
'S3Bucket': {
'name': 'stuff.holder',
'accesscontrol': 'PublicRead',
'versioning': True,
'tags': {
'Name': 'Api'
},
'notices': {
'lamda': [{
'event': 's3:ObjectCreated:*',
'function': 'somelambdaarn'
}]
}
}
},
's3_policies': {
'S3BucketPolicies': {
'policy': '"what": "on earth"'
}
}
}
expected = {
'S3BucketPolicies': {
'Type': 'AWS::S3::BucketPolicy',
'Properties': {
'what': 'on earth'
}
},
'S3Bucket': {
'Type': 'AWS::S3::Bucket',
'Properties': {
'AccessControl': 'PublicRead',
'NotificationConfiguration': {
'LambdaConfigurations': [
{
'Event': 's3:ObjectCreated:*',
'Function': 'somelambdaarn'
}
]
},
'VersioningConfiguration': {
'Status': 'Enabled'
},
'BucketName': 'stuff.holder',
'Tags': [
{
'Key': 'Name',
'Value': 'Api'
}
]
}
}
}
self.maxDiff = 'None'
actual = templateCF(resources, 'resources')
self.assertDictEqual(actual, expected)
if __name__ == '__main__':
unittest.main() | 0.508056 | 0.380414 |
from flask import Flask, json, request
import logging
from flask_cors import CORS, cross_origin
from jinja2 import Environment, FileSystemLoader
import yaml
from gpapi import gpapi
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# CROSS ORIGIN PERMITE CONEXION CON FRONTEND
#FUNCION PARA PROBAR MICROSERVICIO
@app.route('/', methods=['GET'])
@cross_origin()
def status():
print("Ok!")
return "Hello Netmi!"
#FUNCION PARA PROBAR COMANDOS SHOWS EN DISPOSITIVOS
@app.route('/show', methods=['POST'])
@cross_origin()
def getshow():
try:
json_data = request.get_json() #RECIBE JSON DEL BODY DEL POST
show = json_data["show"] #RECIBE PARAMETRO SHOW
connection = gpapi(json_data) #DEFINE CONEXION
connection.connect() #INICIAR SESION
parse = connection.showconfig("show version") #RECIBE DICCIONARIO DE SHOW
connection.disconnect() #CIERRA SESION
data = {"data": parse} #DICCIONARIO FINAL
s = 200
except:
data = {"data": ""}
s = 400
response = app.response_class(response=json.dumps(data), #ENVIA EN FORMATO JSON EL REQUEST
status=s,
mimetype='application/json')
return response
#FUNCION PARA PARSEAMIENTO EN YAML
@app.route('/getparsingyaml', methods=['POST'])
@cross_origin()
def getparsingyaml():
try:
json_data = request.get_json() #RECIBE JSON DEL BODY DEL POST
show = json_data["show"] #RECIBE PARAMETRO SHOW
plantilla = json_data["plantilla"] #RECIBE PARAMETRO PLANTILLA
connection = gpapi(json_data) #DEFINE CONEXION
connection.connect() #INICIAR SESION
parse = connection.showconfig(show) #RECIBE DICCIONARIO DE SHOW
data = {"data": parse} #DICCIONARIO FINAL
try:
show2 = json_data["show2"] #RECIBE PARAMETRO SHOW
parse2 = connection.showconfig(show2) #RECIBE DICCIONARIO DE SHOW2
data = {"data": parse, "data2": parse2} #DICCIONARIO FINAL
except:
data = {"data": parse} #DICCIONARIO FINAL
connection.disconnect() #CIERRA SESION
env = Environment(loader=FileSystemLoader( #DEFINE DIRECTORIO DE TRABAJO
'./'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(plantilla) #DEFINE PLANTILLA A UTILIZAR
doc = template.render(data) #RENDERIZACION
s = 200
except:
doc = "no data"
s = 400
response = app.response_class(response=doc, #ENVIA DOCUMENTO FORMATO YAML
status=s,
mimetype='text/yaml')
return response
# #FUNCION PARA PARSEAMIENTO EN CFG
@app.route('/getparsingcfg', methods=['POST'])
@cross_origin()
def getparsingcfg():
try:
json_data = request.get_json() #RECIBE JSON DEL BODY DEL POST
show = json_data["show"] #RECIBE PARAMETRO SHOW
plantilla = json_data["plantilla"] #RECIBE PARAMETRO PLANTILLA
connection = gpapi(json_data) #DEFINE CONEXION
connection.connect() #INICIAR SESION
parse = connection.showconfig(show) #RECIBE DICCIONARIO DE SHOW2
data = {"data": parse} #DICCIONARIO FINAL
try:
show2 = json_data["show2"] #RECIBE PARAMETRO SHOW2
parse2 = connection.showconfig(show2) #RECIBE DICCIONARIO DE SHOW2
data = {"data": parse, "data2": parse2} #DICCIONARIO FINAL
except:
data = {"data": parse} #DICCIONARIO FINAL
connection.disconnect() #CIERRA SESION
env = Environment(loader=FileSystemLoader( #DEFINE DIRECTORIO DE TRABAJO
'./'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(plantilla) #DEFINE PLANTILLA A UTILIZAR
doc = template.render(data) #RENDERIZACION
s = 200
except:
doc = "No data"
s = 400
response = app.response_class(response=doc, #ENVIA DOCUMENTO FORMATO YAML
status=s,
mimetype='text/cfg')
return response
# STATIC ROUTE VRF
@app.route('/getvrfstaticroute', methods=['POST'])
@cross_origin()
def getvrfstaticroute():
try:
json_data = request.get_json() #RECIBE JSON DEL BODY DEL POST
plantilla = json_data["plantilla"] #RECIBE PARAMETRO PLANTILLA
connection = gpapi(json_data) #DEFINE CONEXION
connection.connect() #INICIAR SESION
parse = connection.showconfig("show vrf") #RECIBE DICCIONARIO DE COMANDO SHOW VRF
vrf = {}
for v in parse['vrf']:
try:
cmd = "show ip static route vrf "+v #DEFINE CADA SHOW DE RUTA ESTATICA CON VRF
a = connection.showconfig(cmd) #RECIBE DICCIONARIO DE CADA RUTA ESTATICA VRF
vrf[v] = a['vrf'][v] #DICCIONARIO PARA CADA VRF
except:
vrf = vrf
data = {"data": vrf} #DICCIONARIO FINAL
connection.disconnect() #CIERRE SESION
env = Environment(loader=FileSystemLoader( #DEFINE DIRECTORIO DE TRABAJO
'./'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(plantilla) #DEFINE PLANTILLA A UTILIZAR
doc = template.render(data) #RENDERIZACION
s = 200
except:
doc = "No data"
s = 400
response = app.response_class(response=doc, #ENVIA DOCUMENTO FORMATO TXT
status=s,
mimetype='text/txt')
return response
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True) #DEFINE IP Y PUERTO DE MICROSERVICIO | src/app.py | from flask import Flask, json, request
import logging
from flask_cors import CORS, cross_origin
from jinja2 import Environment, FileSystemLoader
import yaml
from gpapi import gpapi
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# CROSS ORIGIN PERMITE CONEXION CON FRONTEND
#FUNCION PARA PROBAR MICROSERVICIO
@app.route('/', methods=['GET'])
@cross_origin()
def status():
print("Ok!")
return "Hello Netmi!"
#FUNCION PARA PROBAR COMANDOS SHOWS EN DISPOSITIVOS
@app.route('/show', methods=['POST'])
@cross_origin()
def getshow():
try:
json_data = request.get_json() #RECIBE JSON DEL BODY DEL POST
show = json_data["show"] #RECIBE PARAMETRO SHOW
connection = gpapi(json_data) #DEFINE CONEXION
connection.connect() #INICIAR SESION
parse = connection.showconfig("show version") #RECIBE DICCIONARIO DE SHOW
connection.disconnect() #CIERRA SESION
data = {"data": parse} #DICCIONARIO FINAL
s = 200
except:
data = {"data": ""}
s = 400
response = app.response_class(response=json.dumps(data), #ENVIA EN FORMATO JSON EL REQUEST
status=s,
mimetype='application/json')
return response
#FUNCION PARA PARSEAMIENTO EN YAML
@app.route('/getparsingyaml', methods=['POST'])
@cross_origin()
def getparsingyaml():
try:
json_data = request.get_json() #RECIBE JSON DEL BODY DEL POST
show = json_data["show"] #RECIBE PARAMETRO SHOW
plantilla = json_data["plantilla"] #RECIBE PARAMETRO PLANTILLA
connection = gpapi(json_data) #DEFINE CONEXION
connection.connect() #INICIAR SESION
parse = connection.showconfig(show) #RECIBE DICCIONARIO DE SHOW
data = {"data": parse} #DICCIONARIO FINAL
try:
show2 = json_data["show2"] #RECIBE PARAMETRO SHOW
parse2 = connection.showconfig(show2) #RECIBE DICCIONARIO DE SHOW2
data = {"data": parse, "data2": parse2} #DICCIONARIO FINAL
except:
data = {"data": parse} #DICCIONARIO FINAL
connection.disconnect() #CIERRA SESION
env = Environment(loader=FileSystemLoader( #DEFINE DIRECTORIO DE TRABAJO
'./'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(plantilla) #DEFINE PLANTILLA A UTILIZAR
doc = template.render(data) #RENDERIZACION
s = 200
except:
doc = "no data"
s = 400
response = app.response_class(response=doc, #ENVIA DOCUMENTO FORMATO YAML
status=s,
mimetype='text/yaml')
return response
# #FUNCION PARA PARSEAMIENTO EN CFG
@app.route('/getparsingcfg', methods=['POST'])
@cross_origin()
def getparsingcfg():
try:
json_data = request.get_json() #RECIBE JSON DEL BODY DEL POST
show = json_data["show"] #RECIBE PARAMETRO SHOW
plantilla = json_data["plantilla"] #RECIBE PARAMETRO PLANTILLA
connection = gpapi(json_data) #DEFINE CONEXION
connection.connect() #INICIAR SESION
parse = connection.showconfig(show) #RECIBE DICCIONARIO DE SHOW2
data = {"data": parse} #DICCIONARIO FINAL
try:
show2 = json_data["show2"] #RECIBE PARAMETRO SHOW2
parse2 = connection.showconfig(show2) #RECIBE DICCIONARIO DE SHOW2
data = {"data": parse, "data2": parse2} #DICCIONARIO FINAL
except:
data = {"data": parse} #DICCIONARIO FINAL
connection.disconnect() #CIERRA SESION
env = Environment(loader=FileSystemLoader( #DEFINE DIRECTORIO DE TRABAJO
'./'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(plantilla) #DEFINE PLANTILLA A UTILIZAR
doc = template.render(data) #RENDERIZACION
s = 200
except:
doc = "No data"
s = 400
response = app.response_class(response=doc, #ENVIA DOCUMENTO FORMATO YAML
status=s,
mimetype='text/cfg')
return response
# STATIC ROUTE VRF
@app.route('/getvrfstaticroute', methods=['POST'])
@cross_origin()
def getvrfstaticroute():
try:
json_data = request.get_json() #RECIBE JSON DEL BODY DEL POST
plantilla = json_data["plantilla"] #RECIBE PARAMETRO PLANTILLA
connection = gpapi(json_data) #DEFINE CONEXION
connection.connect() #INICIAR SESION
parse = connection.showconfig("show vrf") #RECIBE DICCIONARIO DE COMANDO SHOW VRF
vrf = {}
for v in parse['vrf']:
try:
cmd = "show ip static route vrf "+v #DEFINE CADA SHOW DE RUTA ESTATICA CON VRF
a = connection.showconfig(cmd) #RECIBE DICCIONARIO DE CADA RUTA ESTATICA VRF
vrf[v] = a['vrf'][v] #DICCIONARIO PARA CADA VRF
except:
vrf = vrf
data = {"data": vrf} #DICCIONARIO FINAL
connection.disconnect() #CIERRE SESION
env = Environment(loader=FileSystemLoader( #DEFINE DIRECTORIO DE TRABAJO
'./'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(plantilla) #DEFINE PLANTILLA A UTILIZAR
doc = template.render(data) #RENDERIZACION
s = 200
except:
doc = "No data"
s = 400
response = app.response_class(response=doc, #ENVIA DOCUMENTO FORMATO TXT
status=s,
mimetype='text/txt')
return response
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True) #DEFINE IP Y PUERTO DE MICROSERVICIO | 0.264453 | 0.085556 |
from typing import List
from xbox.webapi.api.provider.baseprovider import BaseProvider
from xbox.webapi.api.provider.profile.models import ProfileResponse, ProfileSettings
class ProfileProvider(BaseProvider):
PROFILE_URL = "https://profile.xboxlive.com"
HEADERS_PROFILE = {"x-xbl-contract-version": "3"}
SEPARATOR = ","
async def get_profiles(self, xuid_list: List[str], **kwargs) -> ProfileResponse:
"""
Get profile info for list of xuids
Args:
xuid_list (list): List of xuids
Returns:
:class:`ProfileResponse`: Profile Response
"""
post_data = {
"settings": [
ProfileSettings.GAME_DISPLAY_NAME,
ProfileSettings.APP_DISPLAY_NAME,
ProfileSettings.APP_DISPLAYPIC_RAW,
ProfileSettings.GAMERSCORE,
ProfileSettings.GAMERTAG,
ProfileSettings.GAME_DISPLAYPIC_RAW,
ProfileSettings.ACCOUNT_TIER,
ProfileSettings.TENURE_LEVEL,
ProfileSettings.XBOX_ONE_REP,
ProfileSettings.PREFERRED_COLOR,
ProfileSettings.LOCATION,
ProfileSettings.BIOGRAPHY,
ProfileSettings.WATERMARKS,
ProfileSettings.REAL_NAME,
],
"userIds": xuid_list,
}
url = self.PROFILE_URL + "/users/batch/profile/settings"
resp = await self.client.session.post(
url, json=post_data, headers=self.HEADERS_PROFILE, **kwargs
)
resp.raise_for_status()
return ProfileResponse.parse_raw(await resp.text())
async def get_profile_by_xuid(self, target_xuid: str, **kwargs) -> ProfileResponse:
"""
Get Userprofile by xuid
Args:
target_xuid: XUID to get profile for
Returns:
:class:`ProfileResponse`: Profile Response
"""
url = self.PROFILE_URL + f"/users/xuid({target_xuid})/profile/settings"
params = {
"settings": self.SEPARATOR.join(
[
ProfileSettings.GAMERTAG,
ProfileSettings.MODERN_GAMERTAG,
ProfileSettings.MODERN_GAMERTAG_SUFFIX,
ProfileSettings.UNIQUE_MODERN_GAMERTAG,
ProfileSettings.REAL_NAME_OVERRIDE,
ProfileSettings.BIOGRAPHY,
ProfileSettings.LOCATION,
ProfileSettings.GAMERSCORE,
ProfileSettings.GAME_DISPLAYPIC_RAW,
ProfileSettings.TENURE_LEVEL,
ProfileSettings.ACCOUNT_TIER,
ProfileSettings.XBOX_ONE_REP,
ProfileSettings.PREFERRED_COLOR,
ProfileSettings.WATERMARKS,
ProfileSettings.IS_QUARANTINED,
]
)
}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_PROFILE, **kwargs
)
resp.raise_for_status()
return ProfileResponse.parse_raw(await resp.text())
async def get_profile_by_gamertag(self, gamertag: str, **kwargs) -> ProfileResponse:
"""
Get Userprofile by gamertag
Args:
gamertag: Gamertag to get profile for
Returns:
:class:`ProfileResponse`: Profile Response
"""
url = self.PROFILE_URL + f"/users/gt({gamertag})/profile/settings"
params = {
"settings": self.SEPARATOR.join(
[
ProfileSettings.GAMERTAG,
ProfileSettings.MODERN_GAMERTAG,
ProfileSettings.MODERN_GAMERTAG_SUFFIX,
ProfileSettings.UNIQUE_MODERN_GAMERTAG,
ProfileSettings.REAL_NAME_OVERRIDE,
ProfileSettings.BIOGRAPHY,
ProfileSettings.LOCATION,
ProfileSettings.GAMERSCORE,
ProfileSettings.GAME_DISPLAYPIC_RAW,
ProfileSettings.TENURE_LEVEL,
ProfileSettings.ACCOUNT_TIER,
ProfileSettings.XBOX_ONE_REP,
ProfileSettings.PREFERRED_COLOR,
ProfileSettings.WATERMARKS,
ProfileSettings.IS_QUARANTINED,
]
)
}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_PROFILE, **kwargs
)
resp.raise_for_status()
return ProfileResponse.parse_raw(await resp.text()) | xbox/webapi/api/provider/profile/__init__.py | from typing import List
from xbox.webapi.api.provider.baseprovider import BaseProvider
from xbox.webapi.api.provider.profile.models import ProfileResponse, ProfileSettings
class ProfileProvider(BaseProvider):
PROFILE_URL = "https://profile.xboxlive.com"
HEADERS_PROFILE = {"x-xbl-contract-version": "3"}
SEPARATOR = ","
async def get_profiles(self, xuid_list: List[str], **kwargs) -> ProfileResponse:
"""
Get profile info for list of xuids
Args:
xuid_list (list): List of xuids
Returns:
:class:`ProfileResponse`: Profile Response
"""
post_data = {
"settings": [
ProfileSettings.GAME_DISPLAY_NAME,
ProfileSettings.APP_DISPLAY_NAME,
ProfileSettings.APP_DISPLAYPIC_RAW,
ProfileSettings.GAMERSCORE,
ProfileSettings.GAMERTAG,
ProfileSettings.GAME_DISPLAYPIC_RAW,
ProfileSettings.ACCOUNT_TIER,
ProfileSettings.TENURE_LEVEL,
ProfileSettings.XBOX_ONE_REP,
ProfileSettings.PREFERRED_COLOR,
ProfileSettings.LOCATION,
ProfileSettings.BIOGRAPHY,
ProfileSettings.WATERMARKS,
ProfileSettings.REAL_NAME,
],
"userIds": xuid_list,
}
url = self.PROFILE_URL + "/users/batch/profile/settings"
resp = await self.client.session.post(
url, json=post_data, headers=self.HEADERS_PROFILE, **kwargs
)
resp.raise_for_status()
return ProfileResponse.parse_raw(await resp.text())
async def get_profile_by_xuid(self, target_xuid: str, **kwargs) -> ProfileResponse:
"""
Get Userprofile by xuid
Args:
target_xuid: XUID to get profile for
Returns:
:class:`ProfileResponse`: Profile Response
"""
url = self.PROFILE_URL + f"/users/xuid({target_xuid})/profile/settings"
params = {
"settings": self.SEPARATOR.join(
[
ProfileSettings.GAMERTAG,
ProfileSettings.MODERN_GAMERTAG,
ProfileSettings.MODERN_GAMERTAG_SUFFIX,
ProfileSettings.UNIQUE_MODERN_GAMERTAG,
ProfileSettings.REAL_NAME_OVERRIDE,
ProfileSettings.BIOGRAPHY,
ProfileSettings.LOCATION,
ProfileSettings.GAMERSCORE,
ProfileSettings.GAME_DISPLAYPIC_RAW,
ProfileSettings.TENURE_LEVEL,
ProfileSettings.ACCOUNT_TIER,
ProfileSettings.XBOX_ONE_REP,
ProfileSettings.PREFERRED_COLOR,
ProfileSettings.WATERMARKS,
ProfileSettings.IS_QUARANTINED,
]
)
}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_PROFILE, **kwargs
)
resp.raise_for_status()
return ProfileResponse.parse_raw(await resp.text())
async def get_profile_by_gamertag(self, gamertag: str, **kwargs) -> ProfileResponse:
"""
Get Userprofile by gamertag
Args:
gamertag: Gamertag to get profile for
Returns:
:class:`ProfileResponse`: Profile Response
"""
url = self.PROFILE_URL + f"/users/gt({gamertag})/profile/settings"
params = {
"settings": self.SEPARATOR.join(
[
ProfileSettings.GAMERTAG,
ProfileSettings.MODERN_GAMERTAG,
ProfileSettings.MODERN_GAMERTAG_SUFFIX,
ProfileSettings.UNIQUE_MODERN_GAMERTAG,
ProfileSettings.REAL_NAME_OVERRIDE,
ProfileSettings.BIOGRAPHY,
ProfileSettings.LOCATION,
ProfileSettings.GAMERSCORE,
ProfileSettings.GAME_DISPLAYPIC_RAW,
ProfileSettings.TENURE_LEVEL,
ProfileSettings.ACCOUNT_TIER,
ProfileSettings.XBOX_ONE_REP,
ProfileSettings.PREFERRED_COLOR,
ProfileSettings.WATERMARKS,
ProfileSettings.IS_QUARANTINED,
]
)
}
resp = await self.client.session.get(
url, params=params, headers=self.HEADERS_PROFILE, **kwargs
)
resp.raise_for_status()
return ProfileResponse.parse_raw(await resp.text()) | 0.824462 | 0.115761 |
from django.core.urlresolvers import reverse
from hvad.test_utils.context_managers import LanguageOverride
from hvad.test_utils.testcase import NaniTestCase
from hvad.test_utils.request_factory import RequestFactory
from testproject.app.models import Normal, Related
from hvad.views import TranslatableUpdateView
class ViewsTest(NaniTestCase):
def setUp(self):
with LanguageOverride("en"):
self.object = Normal.objects.language().create(shared_field="test", translated_field="translated test")
self.rf = RequestFactory()
self.request = self.rf.post('/url/')
def test_update_view_get(self):
with LanguageOverride("en"):
response = self.client.get(reverse('update_normal', args=[self.object.id]))
self.assertTrue(response.status_code== 200)
response = self.client.get(reverse('update_normal_slug', kwargs={'slug': self.object.shared_field}))
self.assertTrue(response.status_code == 200)
response = self.client.get(reverse('update_normal', args=[self.object.id]) + "?%s=da" % TranslatableUpdateView.query_language_key)
self.assertTrue(response.status_code == 200)
response = self.client.get(reverse('update_normal', args=[self.object.id * 100]) + "?%s=da" % TranslatableUpdateView.query_language_key)
self.assertTrue(response.status_code == 404)
def test_update_view_post(self):
with LanguageOverride("en"):
translated_string = u"some english translation"
response = self.client.post(reverse('update_normal', args=[self.object.id]),
data={
'shared_field': 'some value',
'translated_field': translated_string,
})
self.assertTrue(response.status_code == 302)
obj = Normal.objects.language().filter(pk=self.object.id).get()
self.assertTrue(obj.translated_field == translated_string)
translated_string = u"svenne banan æøå"
response = self.client.post(reverse('update_normal', args=[self.object.id]) + "?%s=da" % TranslatableUpdateView.query_language_key,data={
'shared_field': 'some value',
'translated_field': translated_string,
})
self.assertTrue(response.status_code, 200)
obj = Normal.objects.language("da").filter(pk=self.object.id).get()
self.assertTrue(obj.translated_field == translated_string) | hvad/tests/views.py | from django.core.urlresolvers import reverse
from hvad.test_utils.context_managers import LanguageOverride
from hvad.test_utils.testcase import NaniTestCase
from hvad.test_utils.request_factory import RequestFactory
from testproject.app.models import Normal, Related
from hvad.views import TranslatableUpdateView
class ViewsTest(NaniTestCase):
def setUp(self):
with LanguageOverride("en"):
self.object = Normal.objects.language().create(shared_field="test", translated_field="translated test")
self.rf = RequestFactory()
self.request = self.rf.post('/url/')
def test_update_view_get(self):
with LanguageOverride("en"):
response = self.client.get(reverse('update_normal', args=[self.object.id]))
self.assertTrue(response.status_code== 200)
response = self.client.get(reverse('update_normal_slug', kwargs={'slug': self.object.shared_field}))
self.assertTrue(response.status_code == 200)
response = self.client.get(reverse('update_normal', args=[self.object.id]) + "?%s=da" % TranslatableUpdateView.query_language_key)
self.assertTrue(response.status_code == 200)
response = self.client.get(reverse('update_normal', args=[self.object.id * 100]) + "?%s=da" % TranslatableUpdateView.query_language_key)
self.assertTrue(response.status_code == 404)
def test_update_view_post(self):
with LanguageOverride("en"):
translated_string = u"some english translation"
response = self.client.post(reverse('update_normal', args=[self.object.id]),
data={
'shared_field': 'some value',
'translated_field': translated_string,
})
self.assertTrue(response.status_code == 302)
obj = Normal.objects.language().filter(pk=self.object.id).get()
self.assertTrue(obj.translated_field == translated_string)
translated_string = u"svenne banan æøå"
response = self.client.post(reverse('update_normal', args=[self.object.id]) + "?%s=da" % TranslatableUpdateView.query_language_key,data={
'shared_field': 'some value',
'translated_field': translated_string,
})
self.assertTrue(response.status_code, 200)
obj = Normal.objects.language("da").filter(pk=self.object.id).get()
self.assertTrue(obj.translated_field == translated_string) | 0.439747 | 0.254781 |
import time
from drowsy.log import Loggable
import requests
class KodiRpcClient(Loggable):
def __init__(self, base_url, username, password):
"""
:param str base_url:
:param username:
:param password:
"""
self.username = username
self.password = password
self.base_url = base_url
self.req_counter = 140
self.req_session = requests.Session()
if not self.base_url.endswith("/"):
self.base_url += "/"
def post_rpc(self, method, params):
data = {
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": self.req_counter
}
url = self.base_url + f"jsonrpc?{method}"
result = self.req_session.post(
url,
auth=(self.username, self.password),
headers={"Connection": "keep-alive"},
json=[data])
self.req_counter += 1
return result
def get_monitor(self):
return self.post_rpc(
method="Settings.GetSettingValue",
params={"setting": "videoscreen.monitor"}
).json()[0]["result"]["value"]
def set_monitor(self, value):
current = self.get_monitor()
if current != value:
self.post_rpc(
method="Settings.SetSettingValue",
params={"setting": "videoscreen.monitor", "value": value}
)
def get_fullscreen(self):
response = self.post_rpc(
method="Settings.GetSettingValue",
params={"setting": "videoscreen.screen"}
).json()
return response[0]["result"]["value"]
def set_fullscreen(self):
current = self.get_fullscreen()
if current != 0:
return self.post_rpc(
method="Settings.SetSettingValue",
params={"setting": "videoscreen.screen", "value": 0}
)
def execute_action(self, action):
result = self.post_rpc(
method="Input.ExecuteAction",
params={"action": action}
)
return result
def play_pause_toggle(self):
result = self.post_rpc(
method="Player.PlayPause",
params={"playerid": 1, "play": "toggle"}
)
return result
def play_next(self):
result = self.post_rpc(
method="Player.GoTo",
params={"playerid": 1, "to": "next"}
)
return result
def play_video(self, media_id, media_type, resume_time=None):
media_ids = []
if isinstance(media_id, list):
media_ids = media_id
media_id = media_ids[0]
media_ids.pop(0)
# TODO - Get playlist id? Assuming 1..
playlist_id = 1
clear_playlist = self.post_rpc(
method="Playlist.Clear",
params=[playlist_id]
)
if media_type == "movie":
media_id_key = "movieid"
else:
media_id_key = "episodeid"
playlist_insert = self.post_rpc(
method="Playlist.Insert",
params=[playlist_id, 0, {media_id_key: media_id}]
)
player_open = self.post_rpc(
method="Player.Open",
params={
"item": {
"position": 0,
"playlistid": playlist_id
},
"options": {
"resume": {"hours": 1, "minutes": 0, "seconds": 8}
}
}
)
gui_set_fullscreen = self.post_rpc(
method="GUI.SetFullscreen",
params=[True]
)
if resume_time:
time.sleep(.5)
player_seek = self.post_rpc(
method="Player.Seek",
params={
"playerid": 1,
"value": {"seconds": int(resume_time)}
}
)
for i, next_id in media_ids:
queue_insert = self.post_rpc(
method="Playlist.Insert",
params=[playlist_id, (i + 1), {media_id_key: next_id}]
)
return
def play_mlb(self, list_index, is_home, game_status):
open_mlb = self.post_rpc(
method="Addons.ExecuteAddon",
params=["plugin.video.mlbtv"]
)
time.sleep(1)
self.execute_action("pageup")
time.sleep(.1)
self.post_rpc(
method="Input.down",
params=[]
)
time.sleep(.1)
select_today = self.post_rpc(
method="Input.Select",
params=[]
)
time.sleep(3)
self.execute_action("pageup")
for i in range(0, list_index+3):
self.post_rpc(
method="Input.Down",
params=[]
)
time.sleep(.1)
select_game = self.post_rpc(
method="Input.Select",
params=[]
)
time.sleep(3)
for i in range(0, 1 + 1 if not is_home else 0):
self.post_rpc(
method="Input.Down",
params=[]
)
time.sleep(.1)
select_feed = self.post_rpc(
method="Input.Select",
params=[]
)
time.sleep(3)
# select the live feed
if game_status != "Final":
for i in range(0, 2):
self.post_rpc(
method="Input.Down",
params=[]
)
time.sleep(.1)
select_start = self.post_rpc(
method="Input.Select",
params=[]
)
return | bender_mc/kodi/rpc_client.py | import time
from drowsy.log import Loggable
import requests
class KodiRpcClient(Loggable):
def __init__(self, base_url, username, password):
"""
:param str base_url:
:param username:
:param password:
"""
self.username = username
self.password = password
self.base_url = base_url
self.req_counter = 140
self.req_session = requests.Session()
if not self.base_url.endswith("/"):
self.base_url += "/"
def post_rpc(self, method, params):
data = {
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": self.req_counter
}
url = self.base_url + f"jsonrpc?{method}"
result = self.req_session.post(
url,
auth=(self.username, self.password),
headers={"Connection": "keep-alive"},
json=[data])
self.req_counter += 1
return result
def get_monitor(self):
return self.post_rpc(
method="Settings.GetSettingValue",
params={"setting": "videoscreen.monitor"}
).json()[0]["result"]["value"]
def set_monitor(self, value):
current = self.get_monitor()
if current != value:
self.post_rpc(
method="Settings.SetSettingValue",
params={"setting": "videoscreen.monitor", "value": value}
)
def get_fullscreen(self):
response = self.post_rpc(
method="Settings.GetSettingValue",
params={"setting": "videoscreen.screen"}
).json()
return response[0]["result"]["value"]
def set_fullscreen(self):
current = self.get_fullscreen()
if current != 0:
return self.post_rpc(
method="Settings.SetSettingValue",
params={"setting": "videoscreen.screen", "value": 0}
)
def execute_action(self, action):
result = self.post_rpc(
method="Input.ExecuteAction",
params={"action": action}
)
return result
def play_pause_toggle(self):
result = self.post_rpc(
method="Player.PlayPause",
params={"playerid": 1, "play": "toggle"}
)
return result
def play_next(self):
result = self.post_rpc(
method="Player.GoTo",
params={"playerid": 1, "to": "next"}
)
return result
def play_video(self, media_id, media_type, resume_time=None):
media_ids = []
if isinstance(media_id, list):
media_ids = media_id
media_id = media_ids[0]
media_ids.pop(0)
# TODO - Get playlist id? Assuming 1..
playlist_id = 1
clear_playlist = self.post_rpc(
method="Playlist.Clear",
params=[playlist_id]
)
if media_type == "movie":
media_id_key = "movieid"
else:
media_id_key = "episodeid"
playlist_insert = self.post_rpc(
method="Playlist.Insert",
params=[playlist_id, 0, {media_id_key: media_id}]
)
player_open = self.post_rpc(
method="Player.Open",
params={
"item": {
"position": 0,
"playlistid": playlist_id
},
"options": {
"resume": {"hours": 1, "minutes": 0, "seconds": 8}
}
}
)
gui_set_fullscreen = self.post_rpc(
method="GUI.SetFullscreen",
params=[True]
)
if resume_time:
time.sleep(.5)
player_seek = self.post_rpc(
method="Player.Seek",
params={
"playerid": 1,
"value": {"seconds": int(resume_time)}
}
)
for i, next_id in media_ids:
queue_insert = self.post_rpc(
method="Playlist.Insert",
params=[playlist_id, (i + 1), {media_id_key: next_id}]
)
return
def play_mlb(self, list_index, is_home, game_status):
open_mlb = self.post_rpc(
method="Addons.ExecuteAddon",
params=["plugin.video.mlbtv"]
)
time.sleep(1)
self.execute_action("pageup")
time.sleep(.1)
self.post_rpc(
method="Input.down",
params=[]
)
time.sleep(.1)
select_today = self.post_rpc(
method="Input.Select",
params=[]
)
time.sleep(3)
self.execute_action("pageup")
for i in range(0, list_index+3):
self.post_rpc(
method="Input.Down",
params=[]
)
time.sleep(.1)
select_game = self.post_rpc(
method="Input.Select",
params=[]
)
time.sleep(3)
for i in range(0, 1 + 1 if not is_home else 0):
self.post_rpc(
method="Input.Down",
params=[]
)
time.sleep(.1)
select_feed = self.post_rpc(
method="Input.Select",
params=[]
)
time.sleep(3)
# select the live feed
if game_status != "Final":
for i in range(0, 2):
self.post_rpc(
method="Input.Down",
params=[]
)
time.sleep(.1)
select_start = self.post_rpc(
method="Input.Select",
params=[]
)
return | 0.221856 | 0.145085 |
"""API for interacting with dtfClient application"""
from __future__ import absolute_import
from __future__ import print_function
import os
import socket
import struct
from dtf.adb import DtfAdb
import dtf.logging as log
CMD_DOWNLOAD = 'd'
CMD_UPLOAD = 'u'
CMD_EXECUTE = 'e'
RESP_OK = chr(0)
RESP_ERROR = chr(1)
RESP_NO_EXIST = chr(-1 % 256)
RESP_NO_READ = chr(-2 % 256)
RESP_EXISTS = chr(-3 % 256)
RESP_NO_WRITE = chr(-4 % 256)
ERR_SOCK = -1
SIZE_LONG = 8
SIZE_INTEGER = 4
SIZE_FILENAME = 256
SIZE_CMD = 512
SIZE_TRANSFER = 1024
TAG = "dtfClient"
DTF_SOCKET = "dtf_socket"
FORWARD_SOCKET = "localabstract:" + DTF_SOCKET
def bytes_to_int(byte_stream):
"""Convert bytes to integer"""
return struct.unpack(">L", byte_stream)[0]
def bytes_to_long(byte_stream):
"""Convert bytes to long"""
return struct.unpack(">Q", byte_stream)[0]
def long_to_bytes(long_in):
"""Convert a long into byte stream"""
return struct.pack(">Q", long_in)
class DtfClient(object):
"""Python class for dtfClient"""
serial = ''
stdout = None
stderr = None
adb = None
def __init__(self):
"""Object initialization"""
self.adb = DtfAdb()
def __enable_forward(self):
"""Setup forwarding for talking to dtfClient"""
self.adb.add_forward(FORWARD_SOCKET, FORWARD_SOCKET)
def __disable_forward(self):
"""Remove forwarding rule"""
self.adb.remove_forward(FORWARD_SOCKET)
@classmethod
def __sock_connect(cls, socket_name,
socket_family=socket.AF_UNIX,
socket_type=socket.SOCK_STREAM):
""" Connect to socket_name.
First try abstract and fall back to filesystem
"""
# Create an unbound and not-connected socket.
try:
sock = socket.socket(socket_family, socket_type)
except socket.error as err:
log.e(TAG, "Socket creation failed: " + err.message)
return None
try:
log.d(TAG, "Connecting to abstract socket...")
# \0 denotes an abstract socket
sock.connect('\0' + socket_name)
except socket.error:
# abstract socket connection failed - it probably doesn't exist
# see jakev/dtf GitHub Issue #35
log.d(TAG, "Connecting to abstract socket failed. Does it exist?")
try:
log.d(TAG, "Connecting to filesystem socket...")
sock.connect('/tmp/' + socket_name)
except socket.error as err:
log.d(TAG, "Connecting to filesystem socket failed: "
+ err.message)
log.e(TAG, "Connecting to socket failed, giving up.")
return None
else:
log.d(TAG, "Connected to filesystem socket!")
else:
log.d(TAG, "Connected to abstract socket!")
return sock
@classmethod
def __safe_recv(cls, sock, size, response=None):
"""Handle any issues sending data"""
try:
sock.recv(size)
except socket.error as err:
log.e(TAG, "Error calling recv(): %s" % err)
return response
def __do_download(self, remote_file_name, local_file_name):
"""Download a file using the dtfClient"""
# Get a connected socket
sock = self.__sock_connect(DTF_SOCKET)
if sock is None:
log.e(TAG, "Cannot __do_download, socket failure.")
return ERR_SOCK
sock.send(CMD_DOWNLOAD)
resp_code = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp_code != RESP_OK:
log.e(TAG, "Server rejected download request!")
return resp_code
padded_file_name = remote_file_name.ljust(SIZE_FILENAME, '\0')
log.d(TAG, "Sending filename to server")
sock.send(padded_file_name)
log.d(TAG, "Filename sent.")
binary_file_size = sock.recv(SIZE_LONG)
# This is an error
if len(binary_file_size) == 1:
return binary_file_size
long_file_size = bytes_to_long(binary_file_size)
log.d(TAG, "File size from server: %d" % long_file_size)
sock.send(RESP_OK)
local_f = open(local_file_name, 'wb')
bytes_left = long_file_size
transfer_success = False
while True:
if bytes_left <= SIZE_TRANSFER:
local_buf = self.__safe_recv(sock, bytes_left)
if local_buf is None:
break
local_f.write(local_buf)
local_f.close()
transfer_success = True
break
else:
local_buf = self.__safe_recv(sock, SIZE_TRANSFER)
if local_buf is None:
break
local_f.write(local_buf)
bytes_left -= SIZE_TRANSFER
if not transfer_success:
log.e(TAG, "Error downloading file!")
return RESP_ERROR
sock.send(RESP_OK)
log.d(TAG, "Transfer complete!")
return RESP_OK
# pylint:disable=too-many-return-statements
def __do_upload(self, local_file_name, remote_file_name):
"""Do file upload"""
# Get a connected socket
sock = self.__sock_connect(DTF_SOCKET)
if sock is None:
log.e(TAG, "Cannot __do_upload, socket failure.")
return ERR_SOCK
statinfo = os.stat(local_file_name)
file_size = statinfo.st_size
local_f = open(local_file_name, 'rb')
sock.send(CMD_UPLOAD)
resp_code = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp_code != RESP_OK:
log.e(TAG, "Server rejected upload request!")
return resp_code
log.d(TAG, "Sending filesize to server")
sock.send(long_to_bytes(file_size))
resp = sock.recv(1)
if resp != RESP_OK:
log.e(TAG, "Error submitting filesize!")
return resp
padded_file_name = remote_file_name.ljust(SIZE_FILENAME, '\0')
log.d(TAG, "Sending the filename...")
sock.send(padded_file_name)
resp = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp != RESP_OK:
log.e(TAG, "Error with filename!")
return resp
bytes_left = file_size
while True:
if bytes_left <= SIZE_TRANSFER:
sock.send(local_f.read(bytes_left))
local_f.close()
break
else:
sock.send(local_f.read(SIZE_TRANSFER))
bytes_left -= SIZE_TRANSFER
resp = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp != RESP_OK:
log.e(TAG, "Error uploading file!")
return resp
return RESP_OK
def __do_execute(self, command_string):
"""Do file execute"""
response = None
# Get a connected socket
sock = self.__sock_connect(DTF_SOCKET)
if sock is None:
log.e(TAG, "Cannot __do_execute, socket failure.")
return ("", ERR_SOCK)
sock.send(CMD_EXECUTE)
resp_code = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp_code != RESP_OK:
log.e(TAG, "Server rejected execute request!")
return (response, resp_code)
full_command = command_string.ljust(SIZE_CMD, '\0')
log.d(TAG, "Sending execute string to server")
sock.send(full_command)
log.d(TAG, "Command sent.")
binary_cmd_size = sock.recv(SIZE_INTEGER)
# This is an error.
if len(binary_cmd_size) == 1:
return (response, binary_cmd_size)
int_cmd_size = bytes_to_int(binary_cmd_size)
sock.send(RESP_OK)
if int_cmd_size == 0:
log.d(TAG, "Response is empty string!")
return ("", RESP_OK)
bytes_left = int_cmd_size
response = ""
transfer_success = False
while True:
if bytes_left <= SIZE_TRANSFER:
local_buf = self.__safe_recv(sock, bytes_left)
if local_buf is None:
break
response += local_buf
transfer_success = True
break
else:
local_buf = self.__safe_recv(sock, SIZE_TRANSFER)
if local_buf is None:
break
response += local_buf
bytes_left -= SIZE_TRANSFER
if not transfer_success:
log.e(TAG, "Error downloading file!")
return ("", RESP_ERROR)
sock.send(RESP_OK)
log.d(TAG, "Command complete!")
return (response, RESP_OK)
# Public API Starts here
def upload_file(self, local_file_name, remote_file):
"""Upload a file using the dtfClient"""
self.__enable_forward()
resp_code = self.__do_upload(local_file_name, remote_file)
self.__disable_forward()
return resp_code
def download_file(self, remote_file_name, local_file):
"""Download a file using the dtfClient"""
self.__enable_forward()
resp_code = self.__do_download(remote_file_name, local_file)
self.__disable_forward()
return resp_code
def execute_command(self, cmd_string):
"""Execute command using dtfClient"""
if cmd_string == "":
return (None, None)
self.__enable_forward()
output, resp_code = self.__do_execute(cmd_string)
self.__disable_forward()
return (output, resp_code)
def set_to_usb(self):
"""Set current connection to USB mode"""
self.adb.usb()
def set_to_wifi(self, ip_addr, port):
"""Set current connection to TCP and connect"""
if ip_addr is None or port is None:
log.e(TAG, "IP and port cannot be none!")
return None
self.adb.tcpip(port)
if self.adb.connect(ip_addr, port) is None:
raise IOError
# End public API | python-dtf/dtf/client.py | """API for interacting with dtfClient application"""
from __future__ import absolute_import
from __future__ import print_function
import os
import socket
import struct
from dtf.adb import DtfAdb
import dtf.logging as log
CMD_DOWNLOAD = 'd'
CMD_UPLOAD = 'u'
CMD_EXECUTE = 'e'
RESP_OK = chr(0)
RESP_ERROR = chr(1)
RESP_NO_EXIST = chr(-1 % 256)
RESP_NO_READ = chr(-2 % 256)
RESP_EXISTS = chr(-3 % 256)
RESP_NO_WRITE = chr(-4 % 256)
ERR_SOCK = -1
SIZE_LONG = 8
SIZE_INTEGER = 4
SIZE_FILENAME = 256
SIZE_CMD = 512
SIZE_TRANSFER = 1024
TAG = "dtfClient"
DTF_SOCKET = "dtf_socket"
FORWARD_SOCKET = "localabstract:" + DTF_SOCKET
def bytes_to_int(byte_stream):
"""Convert bytes to integer"""
return struct.unpack(">L", byte_stream)[0]
def bytes_to_long(byte_stream):
"""Convert bytes to long"""
return struct.unpack(">Q", byte_stream)[0]
def long_to_bytes(long_in):
"""Convert a long into byte stream"""
return struct.pack(">Q", long_in)
class DtfClient(object):
"""Python class for dtfClient"""
serial = ''
stdout = None
stderr = None
adb = None
def __init__(self):
"""Object initialization"""
self.adb = DtfAdb()
def __enable_forward(self):
"""Setup forwarding for talking to dtfClient"""
self.adb.add_forward(FORWARD_SOCKET, FORWARD_SOCKET)
def __disable_forward(self):
"""Remove forwarding rule"""
self.adb.remove_forward(FORWARD_SOCKET)
@classmethod
def __sock_connect(cls, socket_name,
socket_family=socket.AF_UNIX,
socket_type=socket.SOCK_STREAM):
""" Connect to socket_name.
First try abstract and fall back to filesystem
"""
# Create an unbound and not-connected socket.
try:
sock = socket.socket(socket_family, socket_type)
except socket.error as err:
log.e(TAG, "Socket creation failed: " + err.message)
return None
try:
log.d(TAG, "Connecting to abstract socket...")
# \0 denotes an abstract socket
sock.connect('\0' + socket_name)
except socket.error:
# abstract socket connection failed - it probably doesn't exist
# see jakev/dtf GitHub Issue #35
log.d(TAG, "Connecting to abstract socket failed. Does it exist?")
try:
log.d(TAG, "Connecting to filesystem socket...")
sock.connect('/tmp/' + socket_name)
except socket.error as err:
log.d(TAG, "Connecting to filesystem socket failed: "
+ err.message)
log.e(TAG, "Connecting to socket failed, giving up.")
return None
else:
log.d(TAG, "Connected to filesystem socket!")
else:
log.d(TAG, "Connected to abstract socket!")
return sock
@classmethod
def __safe_recv(cls, sock, size, response=None):
"""Handle any issues sending data"""
try:
sock.recv(size)
except socket.error as err:
log.e(TAG, "Error calling recv(): %s" % err)
return response
def __do_download(self, remote_file_name, local_file_name):
"""Download a file using the dtfClient"""
# Get a connected socket
sock = self.__sock_connect(DTF_SOCKET)
if sock is None:
log.e(TAG, "Cannot __do_download, socket failure.")
return ERR_SOCK
sock.send(CMD_DOWNLOAD)
resp_code = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp_code != RESP_OK:
log.e(TAG, "Server rejected download request!")
return resp_code
padded_file_name = remote_file_name.ljust(SIZE_FILENAME, '\0')
log.d(TAG, "Sending filename to server")
sock.send(padded_file_name)
log.d(TAG, "Filename sent.")
binary_file_size = sock.recv(SIZE_LONG)
# This is an error
if len(binary_file_size) == 1:
return binary_file_size
long_file_size = bytes_to_long(binary_file_size)
log.d(TAG, "File size from server: %d" % long_file_size)
sock.send(RESP_OK)
local_f = open(local_file_name, 'wb')
bytes_left = long_file_size
transfer_success = False
while True:
if bytes_left <= SIZE_TRANSFER:
local_buf = self.__safe_recv(sock, bytes_left)
if local_buf is None:
break
local_f.write(local_buf)
local_f.close()
transfer_success = True
break
else:
local_buf = self.__safe_recv(sock, SIZE_TRANSFER)
if local_buf is None:
break
local_f.write(local_buf)
bytes_left -= SIZE_TRANSFER
if not transfer_success:
log.e(TAG, "Error downloading file!")
return RESP_ERROR
sock.send(RESP_OK)
log.d(TAG, "Transfer complete!")
return RESP_OK
# pylint:disable=too-many-return-statements
def __do_upload(self, local_file_name, remote_file_name):
"""Do file upload"""
# Get a connected socket
sock = self.__sock_connect(DTF_SOCKET)
if sock is None:
log.e(TAG, "Cannot __do_upload, socket failure.")
return ERR_SOCK
statinfo = os.stat(local_file_name)
file_size = statinfo.st_size
local_f = open(local_file_name, 'rb')
sock.send(CMD_UPLOAD)
resp_code = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp_code != RESP_OK:
log.e(TAG, "Server rejected upload request!")
return resp_code
log.d(TAG, "Sending filesize to server")
sock.send(long_to_bytes(file_size))
resp = sock.recv(1)
if resp != RESP_OK:
log.e(TAG, "Error submitting filesize!")
return resp
padded_file_name = remote_file_name.ljust(SIZE_FILENAME, '\0')
log.d(TAG, "Sending the filename...")
sock.send(padded_file_name)
resp = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp != RESP_OK:
log.e(TAG, "Error with filename!")
return resp
bytes_left = file_size
while True:
if bytes_left <= SIZE_TRANSFER:
sock.send(local_f.read(bytes_left))
local_f.close()
break
else:
sock.send(local_f.read(SIZE_TRANSFER))
bytes_left -= SIZE_TRANSFER
resp = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp != RESP_OK:
log.e(TAG, "Error uploading file!")
return resp
return RESP_OK
def __do_execute(self, command_string):
"""Do file execute"""
response = None
# Get a connected socket
sock = self.__sock_connect(DTF_SOCKET)
if sock is None:
log.e(TAG, "Cannot __do_execute, socket failure.")
return ("", ERR_SOCK)
sock.send(CMD_EXECUTE)
resp_code = self.__safe_recv(sock, 1, response=RESP_ERROR)
if resp_code != RESP_OK:
log.e(TAG, "Server rejected execute request!")
return (response, resp_code)
full_command = command_string.ljust(SIZE_CMD, '\0')
log.d(TAG, "Sending execute string to server")
sock.send(full_command)
log.d(TAG, "Command sent.")
binary_cmd_size = sock.recv(SIZE_INTEGER)
# This is an error.
if len(binary_cmd_size) == 1:
return (response, binary_cmd_size)
int_cmd_size = bytes_to_int(binary_cmd_size)
sock.send(RESP_OK)
if int_cmd_size == 0:
log.d(TAG, "Response is empty string!")
return ("", RESP_OK)
bytes_left = int_cmd_size
response = ""
transfer_success = False
while True:
if bytes_left <= SIZE_TRANSFER:
local_buf = self.__safe_recv(sock, bytes_left)
if local_buf is None:
break
response += local_buf
transfer_success = True
break
else:
local_buf = self.__safe_recv(sock, SIZE_TRANSFER)
if local_buf is None:
break
response += local_buf
bytes_left -= SIZE_TRANSFER
if not transfer_success:
log.e(TAG, "Error downloading file!")
return ("", RESP_ERROR)
sock.send(RESP_OK)
log.d(TAG, "Command complete!")
return (response, RESP_OK)
# Public API Starts here
def upload_file(self, local_file_name, remote_file):
"""Upload a file using the dtfClient"""
self.__enable_forward()
resp_code = self.__do_upload(local_file_name, remote_file)
self.__disable_forward()
return resp_code
def download_file(self, remote_file_name, local_file):
"""Download a file using the dtfClient"""
self.__enable_forward()
resp_code = self.__do_download(remote_file_name, local_file)
self.__disable_forward()
return resp_code
def execute_command(self, cmd_string):
"""Execute command using dtfClient"""
if cmd_string == "":
return (None, None)
self.__enable_forward()
output, resp_code = self.__do_execute(cmd_string)
self.__disable_forward()
return (output, resp_code)
def set_to_usb(self):
"""Set current connection to USB mode"""
self.adb.usb()
def set_to_wifi(self, ip_addr, port):
"""Set current connection to TCP and connect"""
if ip_addr is None or port is None:
log.e(TAG, "IP and port cannot be none!")
return None
self.adb.tcpip(port)
if self.adb.connect(ip_addr, port) is None:
raise IOError
# End public API | 0.631708 | 0.107578 |