id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9625503 | # Checking for each interval the next intervals if has start
# lower then actual end
def rooms(intervals):
min_rooms = 0
for key, value in enumerate(intervals):
lesson = intervals[key]
required = 1
for vm in intervals:
if vm[0] > lesson[1]:
required += 1
if required > min_rooms:
min_rooms = required
return min_rooms
def rooms_sorted(intervals):
dict_intervals = {}
# O(n)
for v in intervals:
dict_intervals[v[0]] = dict_intervals[v[0]] + 1 if v[0] in dict_intervals else 1
dict_intervals[v[1]] = dict_intervals[v[1]] - 1 if v[1] in dict_intervals else -1
# Timsort algorithm O(n.log(n))
sorted_events = sorted(dict_intervals.items())
min_rooms = 0
rooms_available = 0
# O(n)
for k, v in sorted_events:
rooms_available += v
if rooms_available > min_rooms:
min_rooms = rooms_available
return min_rooms
| StarcoderdataPython |
1805749 | import json
import random
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import pytorch_lightning as pl
import torch
import torch.utils.data
from nuplan.planning.training.callbacks.utils.scene_converter import SceneConverter
from nuplan.planning.training.data_loader.scenario_dataset import ScenarioDataset
from nuplan.planning.training.modeling.types import FeaturesType, TargetsType, move_features_type_to_device
from nuplan.planning.training.preprocessing.feature_collate import FeatureCollate
def _dump_scenes(scenes: List[Dict[str, Any]], output_dir: Path) -> None:
"""
Dump a single scene file
:param scenes: list of scenes to be written
:param output_dir: final output directory
"""
for scene in scenes:
file_name = output_dir / str(scene["ego"]["timestamp_us"])
with open(str(file_name.with_suffix('.json')), 'w') as outfile:
json.dump(scene, outfile, indent=4)
def _score_model(
pl_module: pl.LightningModule, features: FeaturesType, targets: TargetsType
) -> Tuple[float, FeaturesType]:
"""
Make an inference of the input batch feature given a model and score them through their objective
:param pl_module: lightning model
:param features: model inputs
:param targets: training targets
:return: tuple of score and prediction
"""
objectives = pl_module.objectives
with torch.no_grad():
pl_module.eval()
predictions = pl_module(features)
pl_module.train()
score = 0.0
for objective in objectives:
score += objective.compute(predictions, targets).to('cpu')
return score / len(objectives), move_features_type_to_device(predictions, torch.device('cpu'))
def _eval_model_and_write_to_scene(
dataloader: torch.utils.data.DataLoader,
pl_module: pl.LightningModule,
scene_converter: SceneConverter,
num_store: int,
output_dir: Path,
) -> None:
"""
Evaluate prediction of the model and write scenes based on their scores
:param dataloader: pytorch data loader
:param pl_module: lightning module
:param scene_converter: converts data from the scored scenario into scene dictionary
:param num_store: n number of scenarios to be written into scenes for each best, worst and random cases
:param output_dir: output directory of scene file
"""
scenario_dataset = dataloader.dataset
score_record = torch.empty(len(scenario_dataset))
predictions: List[TargetsType] = []
# Obtain scores for each sample of the dataset
for sample_idx, sample in enumerate(dataloader):
features = cast(FeaturesType, sample[0])
targets = cast(TargetsType, sample[1])
score, prediction = _score_model(
pl_module,
move_features_type_to_device(features, pl_module.device),
move_features_type_to_device(targets, pl_module.device),
)
predictions.append(prediction)
score_record[sample_idx] = score
# Classify score results with lower scores as best
best_n_idx = torch.topk(score_record, num_store, largest=False).indices.tolist()
worst_n_idx = torch.topk(score_record, num_store).indices.tolist()
random_n_idx = random.sample(range(len(scenario_dataset)), num_store)
# collect data to write
for data_idx, score_type in zip((best_n_idx, worst_n_idx, random_n_idx), ('best', 'worst', 'random')):
for idx in data_idx:
features, targets = scenario_dataset[idx]
scenario = scenario_dataset._scenarios[idx]
# convert data to scenes
scenes = scene_converter(scenario, features, targets, predictions[idx])
file_dir = output_dir / score_type / scenario.token
file_dir.mkdir(parents=True, exist_ok=True)
# dump scenes
_dump_scenes(scenes, file_dir)
class ScenarioScoringCallback(pl.Callback):
"""
Callback that performs an evaluation to score the model on each validation data.
The n-best, n-worst and n-random data is written into a scene.
The directory structure for the output of the scenes is:
<output_dir>
└── scenes
├── best
│ ├── scenario_token_01
│ │ ├── timestamp_01.json
│ │ └── timestamp_02.json
│ : :
│ └── scenario_token_n
├── worst
└── random
"""
def __init__(self, scene_converter: SceneConverter, num_store: int, frequency: int, output_dir: Union[str, Path]):
"""
Initialize the callback.
:param scene_converter: Converts data from the scored scenario into scene dictionary.
:param num_store: N number of scenarios to be written into scenes for each best, worst and random cases.
:param frequency: Interval between epochs at which to perform the evaluation. Set 0 to skip the callback.
:param output_dir: Output directory of scene file.
"""
super().__init__()
self._num_store = num_store
self._frequency = frequency
self._scene_converter = scene_converter
self._output_dir = Path(output_dir) / 'scenes'
self._val_dataloader: Optional[torch.utils.data.DataLoader] = None
def _initialize_dataloaders(self, datamodule: pl.LightningDataModule) -> None:
"""
Initialize the dataloaders. This makes sure that the same examples are sampled every time.
:param datamodule: Lightning datamodule.
"""
val_set = datamodule.val_dataloader().dataset
assert isinstance(val_set, ScenarioDataset), "invalid dataset type, dataset must be a scenario dataset"
self._val_dataloader = torch.utils.data.DataLoader(
dataset=val_set, batch_size=1, shuffle=False, collate_fn=FeatureCollate()
)
def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""
Called at the end of each epoch validation.
:param trainer: Lightning trainer.
:param pl_module: lightning model.
"""
# skip callback
if self._frequency == 0:
return
assert hasattr(trainer, 'datamodule'), "Trainer missing datamodule attribute"
assert hasattr(trainer, 'current_epoch'), "Trainer missing current_epoch attribute"
epoch = trainer.current_epoch
if epoch % self._frequency == 0:
if self._val_dataloader is None:
self._initialize_dataloaders(trainer.datamodule)
output_dir = self._output_dir / f'epoch={epoch}'
_eval_model_and_write_to_scene(
self._val_dataloader, pl_module, self._scene_converter, self._num_store, output_dir
)
| StarcoderdataPython |
12842241 | <gh_stars>1-10
# Generated by Django 2.0 on 2018-01-29 07:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datatrans', '0004_alias_registration_fields_optional'),
]
operations = [
migrations.AlterField(
model_name='aliasregistration',
name='transaction_id',
field=models.CharField(blank=True, max_length=18, null=True, unique=True),
),
]
| StarcoderdataPython |
3226616 | <filename>recomendation system/content.py
__author__ = 'lily'
import numpy as np
from sklearn.decomposition import ProjectedGradientNMF
import recsys
import evaluate
import similarity
from sklearn import decomposition
from numpy.linalg import inv
from sklearn.metrics.pairwise import pairwise_distances
#feature helper and user_feature are derived from lambda functions
class content(recsys.recsys):
def __init__(self,X, similarity_helper = None, feature_helper = None, score_helper = None, \
item_feat = None, user_feat = None, cluster=None):
super(content, self).__init__(X)
self.feature_helper = feature_helper
self.score_helper = score_helper
self.item_feat = item_feat
self.user_feat = user_feat
self.similarity_helper = similarity_helper
def get_helper2(self, name, function):
super(content, self).get_helper2(name, function)
def get_parameters(self):
pass
def predict_for_user(self, user_ratings, user_feat, k, feature_transform_all =None):
#feature_transform_all refers to items
# shape return the rows and colonms of the matrix
Nitems, Nusers = self.X.shape
#W represents a tranformed feature_helper function
if (feature_transform_all == None):
if self.feature_helper == None:
item_transform = self.item_feat
user_transform = user_feat
else:
item_transform, user_transform = self.feature_helper(X=user_ratings, item_feat = self.item_feat, user_feat = user_feat)
else:
item_transform= feature_transform_all
lol, user_transform = self.feature_helper(X=user_ratings, item_feat = self.item_feat[:, 1], user_feat = user_feat)
#assume that the similarity matrix is
S = pairwise_distances(item_transform, user_transform, self.similarity_helper)
predicted_values = S
predicted_values[np.asarray(user_ratings)] = 0
result = np.argsort(predicted_values)
return result[0:k]
def fit(self, train_indices = None, test_indices = None):
super(content, self).transform_training(train_indices, test_indices)#setting up training data
# shape return the rows and colonms of the matrix
#unpack constants from dictionary here
#setting constants
#some how accomodate constants for two different constants
#create the symmetric matrix
#W represents a tranformed feature_helper function
if self.feature_helper == None:
item_transform = self.item_feat
user_transform = self.user_feat
else:
item_transform, user_transform = self.feature_helper(X=self.X_train, item_feat = self.item_feat, user_feat = self.user_feat)
#assume that the similarity matrix is
S = pairwise_distances(item_transform, user_transform, self.similarity_helper)
S[self.X_train == 1] =1
self.X_predict = S
def score(self, truth_index):
return super(content, self).score(truth_index)
def user_to_item(X_train, item_feat, user_feat, start, end):
#creates a nice lambda function
START = start
END = end+1#remember to +1 as an offset
#stores that mallls belong into
#creating a new item_transform matrix
# LONG_IND is the colomn index of the user feature matrix
user_transform = user_feat[:, START:END]
item_transform = np.zeros((X_train.shape[0], END - START))
#possibly faster if you use a join and a group in pandas
for i in np.arange(X_train.shape[0]): #go through all stores
mall_indexes = (X_train[i, :] == 1) #finds all the malls that have store i
store_features = user_feat[mall_indexes, : ][:, START:END] #get coordinates fast
if( np.sum(np.nonzero(mall_indexes)) == 0):
test = .001*np.ones(store_features.shape[1])
else:
test = np.average(store_features, axis=0)
item_transform[i, :]= test
return (item_transform, user_transform)
#helper that extracts columns from a the mall matrix
def user_to_item_helper(start, end):
return lambda X, item_feat, user_feat : user_to_item(X, item_feat, user_feat, start, end)
#This is for testing purposes
# X = np.array([[1, 1, 1, 1] , [1, 1, 0, 0], [1, 0, 1, 0]])
# user_feat = np.array([[1, 1, 1, 2, 3], [0, 0, 4, 5, 6], [1, 0, 7, 8, 9], [0,1 , 10, 11, 12]])
# item_feat = None
# fun = user_to_item_helper(2, 4)
# cosine = similarity.cosine()
# test = content(X, similarity_helper=cosine, user_feat=user_feat, item_feat=item_feat, feature_helper=fun)
# test.fit() | StarcoderdataPython |
3480255 | from __future__ import print_function
import numpy
from amuse.lab import *
from amuse.couple import bridge
from matplotlib import pyplot
class MilkyWay_galaxy(object):
def get_gravity_at_point(self, eps, x,y,z):
phi_0 = self.get_potential_at_point(eps, x,y,z)
grav = AdaptingVectorQuantity()
dpos = 0.001*(x**2+y**2+z**2).sqrt()
phi_dx = self.get_potential_at_point(0,x+dpos,y,z) - phi_0
phi_dy = self.get_potential_at_point(0,x,y+dpos,z) - phi_0
phi_dz = self.get_potential_at_point(0,x,y, z+dpos) - phi_0
return phi_dx/dpos, phi_dy/dpos, phi_dz/dpos
def disk_and_bulge_potentials(self, x,y,z, a, b, mass):
r = (x**2+y**2).sqrt()
return constants.G * mass /\
(r**2 + (a + (z**2 + b**2).sqrt())**2).sqrt()
def halo_potential(self, x,y,z, Mc=5.0E+10|units.MSun, Rc=1.0|units.kpc**2):
r=(x**2+y**2+z**2).sqrt()
rr = (r/Rc)
return -constants.G * (Mc/Rc)*(0.5*numpy.log(1 +rr**2) + numpy.arctan(rr)/rr)
def get_potential_at_point(self, eps, x, y, z):
pot_disk = self.disk_and_bulge_potentials(x,y,z,
0.0|units.kpc, 0.277|units.kpc, 1.12E+10|units.MSun)
pot_bulge = self.disk_and_bulge_potentials(x,y,z,
3.7|units.kpc, 0.20|units.kpc, 8.07E+10|units.MSun)
pot_halo = self.halo_potential(x,y,z,
Mc=5.0E+10|units.MSun, Rc=6.0|units.kpc)
return pot_disk + pot_bulge + pot_halo
def movie(time, sun_and_planets):
R = [] | units.kpc
for sp in sun_and_planets:
R.append(sp.position.length())
# - sun_and_planets.z
print(R)
pyplot.subplot(2,2,1)
pyplot.scatter(sun_and_planets.x.value_in(units.kpc),
sun_and_planets.y.value_in(units.kpc),
c=['k', 'r'], s=10, lw=0)
pyplot.subplot(2,2,2)
pyplot.scatter(R.value_in(units.kpc),
sun_and_planets.z.value_in(units.kpc),
c=['k', 'r'], s=10, lw=0)
pyplot.xlabel("R [kpc]")
pyplot.ylabel("Z [kpc]")
R = [] | units.kpc
R.append((sun_and_planets[1].position-sun_and_planets[0].position).length())
pyplot.subplot(2,2,3)
pyplot.scatter(-time.value_in(units.Gyr),
R.value_in(units.kpc),
c=['k', 'r'], s=10, lw=0)
pyplot.xlabel("t [Myr]")
pyplot.ylabel("r [kpc]")
pyplot.draw()
def main(t_end, filename):
bodies = Particles(2)
Sun = bodies[0]
Sun.mass = 1|units.MSun
Sun.position = (8.4, 0.0, 0.0) | units.kpc
Sun.velocity = (-10.1, 235.5, 7.5) | units.kms # SPZ2009
M67 = bodies[1]
M67.mass = 50000 | units.MSun
M67.position = Sun.position + ((0.766, 0.0, 0.49) |units.kpc)
M67.velocity = Sun.velocity + ((31.92, -21.66, -8.71) |units.kms)
converter = nbody_system.nbody_to_si(bodies.mass.sum(), Sun.x)
sunandm67 = Huayno(converter)
sunandm67.particles.add_particle(bodies)
channel_from_sunandm67 = sunandm67.particles.new_channel_to(bodies)
gravity = bridge.Bridge()
gravity.add_system(sunandm67, (MilkyWay_galaxy(),) )
dt = 1|units.Myr
gravity.timestep = dt
Etot_init = gravity.kinetic_energy + gravity.potential_energy
Etot_prev = Etot_init
t_end = 4.5|units.Gyr
time = 0 * t_end
if filename:
write_set_to_file(bodies.savepoint(0.0 | t_end.unit),
filename, "hdf5",
append_to_file=False)
pyplot.draw()
else:
R = [] | units.kpc
for bi in bodies:
R.append(bi.position.length())
pyplot.ion()
pyplot.scatter(R.value_in(units.kpc),
bodies.z.value_in(units.kpc),
c=['k', 'r'], s=10, lw=0)
pyplot.xlabel("R [kpc]")
pyplot.ylabel("Z [kpc]")
while time < t_end:
time += dt
gravity.evolve_model(time)
channel_from_sunandm67.copy()
Ekin = gravity.kinetic_energy
Epot = gravity.potential_energy
Etot = Ekin + Epot
print("T=", time, "M=", bodies.mass.sum(), end=' ')
print("E= ", Etot, "Q= ", Ekin/Epot, end=' ')
print("dE=", (Etot_init-Etot)/Etot, "ddE=", (Etot_prev-Etot)/Etot)
Etot_prev = Etot
if filename:
write_set_to_file(bodies.savepoint(time), filename, "hdf5")
else:
R = [] | units.kpc
for bi in bodies:
R.append(bi.position.length())
pyplot.scatter(R.value_in(units.kpc),
bodies.z.value_in(units.kpc),
c=['k', 'r'], s=10, lw=0)
pyplot.draw()
gravity.stop()
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-t", unit=units.Gyr,
dest="t_end", type="float", default = 4.5|units.Gyr,
help="end time of the simulation [%default]")
result.add_option("-f", dest="filename", default = "",
help="output filename")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
main(**o.__dict__)
| StarcoderdataPython |
64871 | #
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#!/usr/bin/env python
"""Main test file for SSM document."""
import ConfigParser
import glob
import logging
import os
import sys
import unittest
import boto3
import demjson
DOC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
REPO_ROOT = os.path.dirname(DOC_DIR)
# Import shared testing code
sys.path.append(
os.path.join(
REPO_ROOT,
'Testing'
)
)
import ssm_testing # noqa pylint: disable=import-error,wrong-import-position
CONFIG = ConfigParser.ConfigParser()
CONFIG.readfp(open(os.path.join(REPO_ROOT, 'Testing', 'defaults.cfg')))
CONFIG.read([os.path.join(REPO_ROOT, 'Testing', 'local.cfg')])
REGION = CONFIG.get('general', 'region')
PREFIX = CONFIG.get('general', 'resource_prefix')
AMIID = CONFIG.get('linux', 'ami')
SERVICE_ROLE_NAME = CONFIG.get('general', 'automation_service_role_name')
INSTANCE_TYPE = CONFIG.get('linux', 'instance_type')
SSM_DOC_NAME = PREFIX + 'automation-stopinstance-with-approval'
INSTANCE_CFN_STACK_NAME = PREFIX + 'automation-stopinstance-with-approval'
if CONFIG.get('general', 'log_level') == 'warn':
logging.basicConfig(level=logging.WARN)
elif CONFIG.get('general', 'log_level') == 'info':
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
class TestCase(unittest.TestCase):
"""Main test class for SSM document."""
@staticmethod
def test_json_linting():
"""Verify correct json syntax."""
for i in glob.glob(os.path.join(DOC_DIR, 'Documents', '*.json')):
assert demjson.jsonlint('jsonlint').main([i]) == 0, (
'JSON documents are not well formed')
@staticmethod
def test_document():
"""Verify correct deployment and use of document."""
cfn_client = boto3.client('cloudformation', region_name=REGION)
ec2_client = boto3.client('ec2', region_name=REGION)
ssm_client = boto3.client('ssm', region_name=REGION)
ssm_doc = ssm_testing.SSMTester(
ssm_client=ssm_client,
doc_filename=os.path.join(DOC_DIR,
'Documents',
'aws-StopEC2InstanceWithApproval.json'),
doc_name=SSM_DOC_NAME,
doc_type='Automation'
)
test_cf_stack = ssm_testing.CFNTester(
cfn_client=cfn_client,
template_filename=os.path.join(DOC_DIR,
'Tests',
'CloudFormationTemplates',
'TwoInstancesWithSNS.yml'),
stack_name=INSTANCE_CFN_STACK_NAME
)
automation_role = ssm_doc.get_automation_role(
boto3.client('sts', region_name=REGION),
boto3.client('iam', region_name=REGION),
SERVICE_ROLE_NAME
)
LOGGER.info('Starting 2 instances for testing')
test_cf_stack.create_stack([
{
'ParameterKey': 'AMI',
'ParameterValue': AMIID
},
{
'ParameterKey': 'INSTANCETYPE',
'ParameterValue': INSTANCE_TYPE
}
])
try:
LOGGER.info('Creating automation document')
assert ssm_doc.create_document() == 'Active', ('Document not '
'created '
'successfully')
ec2_instance_ids = [
test_cf_stack.stack_outputs['Instance0Id'],
test_cf_stack.stack_outputs['Instance1Id']
]
user_arn = boto3.client('sts', region_name=REGION).get_caller_identity().get('Arn')
sns_topic_arn = test_cf_stack.stack_outputs['SNSTopicArn']
LOGGER.info("User ARN for approval: " + user_arn)
LOGGER.info("SNS Topic ARN for approval: " + sns_topic_arn)
LOGGER.info('Verifying all instances are running')
describe_res = ec2_client.describe_instance_status(
InstanceIds=ec2_instance_ids,
IncludeAllInstances=True
)
assert all(d['InstanceState']['Name'] == 'running' for d in describe_res['InstanceStatuses']) is True, ( # noqa pylint: disable=line-too-long
'Instances not started')
LOGGER.info('Running automation to stop multiple instances '
'(using defined role)')
ssm_doc_params = {'InstanceId': ec2_instance_ids,
'AutomationAssumeRole': [automation_role],
'Approvers': [user_arn],
'SNSTopicArn': [sns_topic_arn]}
execution = ssm_doc.execute_automation(params=ssm_doc_params)
LOGGER.info('Verifying automation executions have concluded '
'successfully')
# since this automation requires approval to continue, the correct status at this point should be 'Waiting'
assert ssm_doc.automation_execution_status(ssm_client, execution, False) == 'Waiting', \
'Automation not waiting for approval'
LOGGER.info('Approving continuation of execution')
ssm_client.send_automation_signal(
AutomationExecutionId=execution,
SignalType='Approve'
)
# this will block until the automation is back in a running state
assert ssm_doc.automation_execution_status(ssm_client, execution) == 'Success', \
'Automation step unsuccessful'
LOGGER.info('Verifying all instances are stopped')
describe_res = ec2_client.describe_instance_status(
InstanceIds=ec2_instance_ids,
IncludeAllInstances=True
)
assert all(d['InstanceState']['Name'] == 'stopped' for d in describe_res['InstanceStatuses']) is True, ( # noqa pylint: disable=line-too-long
'Instances not stopped')
finally:
test_cf_stack.delete_stack()
ssm_doc.destroy()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4832898 | import base64
import urllib.parse
from django.test import Client, TestCase
from django.urls import reverse
from dcim.models import Device, DeviceRole, DeviceType, Manufacturer, Site
from secrets.models import Secret, SecretRole, SessionKey, UserKey
from utilities.testing import create_test_user
from .constants import PRIVATE_KEY, PUBLIC_KEY
class SecretRoleTestCase(TestCase):
def setUp(self):
user = create_test_user(
permissions=[
'secrets.view_secretrole',
'secrets.add_secretrole',
]
)
self.client = Client()
self.client.force_login(user)
SecretRole.objects.bulk_create([
SecretRole(name='Secret Role 1', slug='secret-role-1'),
SecretRole(name='Secret Role 2', slug='secret-role-2'),
SecretRole(name='Secret Role 3', slug='secret-role-3'),
])
def test_secretrole_list(self):
url = reverse('secrets:secretrole_list')
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
def test_secretrole_import(self):
csv_data = (
"name,slug",
"Secret Role 4,secret-role-4",
"Secret Role 5,secret-role-5",
"Secret Role 6,secret-role-6",
)
response = self.client.post(reverse('secrets:secretrole_import'), {'csv': '\n'.join(csv_data)})
self.assertEqual(response.status_code, 200)
self.assertEqual(SecretRole.objects.count(), 6)
class SecretTestCase(TestCase):
def setUp(self):
user = create_test_user(
permissions=[
'secrets.view_secret',
'secrets.add_secret',
]
)
# Set up a master key
userkey = UserKey(user=user, public_key=PUBLIC_KEY)
userkey.save()
master_key = userkey.get_master_key(PRIVATE_KEY)
self.session_key = SessionKey(userkey=userkey)
self.session_key.save(master_key)
self.client = Client()
self.client.force_login(user)
site = Site(name='Site 1', slug='site-1')
site.save()
manufacturer = Manufacturer(name='Manufacturer 1', slug='manufacturer-1')
manufacturer.save()
devicetype = DeviceType(manufacturer=manufacturer, model='Device Type 1')
devicetype.save()
devicerole = DeviceRole(name='Device Role 1', slug='device-role-1')
devicerole.save()
device = Device(name='Device 1', site=site, device_type=devicetype, device_role=devicerole)
device.save()
secretrole = SecretRole(name='Secret Role 1', slug='secret-role-1')
secretrole.save()
Secret.objects.bulk_create([
Secret(device=device, role=secretrole, name='Secret 1', ciphertext=b'1234567890'),
Secret(device=device, role=secretrole, name='Secret 2', ciphertext=b'1234567890'),
Secret(device=device, role=secretrole, name='Secret 3', ciphertext=b'1234567890'),
])
def test_secret_list(self):
url = reverse('secrets:secret_list')
params = {
"role": SecretRole.objects.first().slug,
}
response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)), follow=True)
self.assertEqual(response.status_code, 200)
def test_secret(self):
secret = Secret.objects.first()
response = self.client.get(secret.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
def test_secret_import(self):
csv_data = (
"device,role,name,plaintext",
"Device 1,Secret Role 1,Secret 4,abcdefghij",
"Device 1,Secret Role 1,Secret 5,abcdefghij",
"Device 1,Secret Role 1,Secret 6,abcdefghij",
)
# Set the session_key cookie on the request
session_key = base64.b64encode(self.session_key.key).decode('utf-8')
self.client.cookies['session_key'] = session_key
response = self.client.post(reverse('secrets:secret_import'), {'csv': '\n'.join(csv_data)})
self.assertEqual(response.status_code, 200)
self.assertEqual(Secret.objects.count(), 6)
| StarcoderdataPython |
6487778 | <reponame>andreped/annotationweb
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2019-05-15 09:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('annotationweb', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ControlPoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('x', models.FloatField()),
('y', models.FloatField()),
('index', models.PositiveIntegerField()),
('phase', models.PositiveIntegerField(choices=[(0, 'End Diastole'), (1, 'End Systole')])),
('object', models.PositiveIntegerField(choices=[(0, 'Endocardium'), (1, 'Epicardium'), (2, 'Left atrium')])),
('uncertain', models.BooleanField()),
],
),
migrations.CreateModel(
name='Segmentation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('frame_ED', models.IntegerField()),
('frame_ES', models.IntegerField()),
('motion_mode_line', models.PositiveIntegerField()),
('image', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='annotationweb.KeyFrameAnnotation')),
],
),
migrations.AddField(
model_name='controlpoint',
name='segmentation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cardiac.Segmentation'),
),
]
| StarcoderdataPython |
4997168 | # Generated by Django 2.1.7 on 2020-02-10 10:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0022_auto_20200210_1005'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='image',
new_name='image1',
),
]
| StarcoderdataPython |
8191376 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tricircleclient import constants
from tricircleclient.tests.unit import utils
from tricircleclient.v1 import jobs_cli
class _TestJobCommand(utils.TestCommand):
def setUp(self):
super(_TestJobCommand, self).setUp()
self.job_manager = self.app.client_manager.multiregion_networking.job
class TestCreateJob(_TestJobCommand, utils.TestCommandWithoutOptions):
def setUp(self):
super(TestCreateJob, self).setUp()
self.cmd = jobs_cli.CreateJob(self.app, None)
def test_create_all_options(self):
_job = utils.FakeJob.create_single_job()
arglist = [
'--type', _job['job']['type'],
'--project_id', _job['job']['project_id'],
'--pod_id', _job['job']['resource']['pod_id'],
'--router_id', _job['job']['resource']['router_id'],
'--network_id', _job['job']['resource']['network_id'],
]
verifylist = [
('type', _job['job']['type']),
('project_id', _job['job']['project_id']),
('pod_id', _job['job']['resource']['pod_id']),
('router_id', _job['job']['resource']['router_id']),
('network_id', _job['job']['resource']['network_id']),
]
self.job_manager.create = mock.Mock(return_value=_job)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.assertEqual(sorted(_job['job'].keys()), sorted(columns))
self.assertEqual(sorted(_job['job'].values()), sorted(data))
class TestShowJob(_TestJobCommand, utils.TestCommandWithoutOptions):
def setUp(self):
super(TestShowJob, self).setUp()
self.cmd = jobs_cli.ShowJob(self.app, None)
def test_show_a_single_job(self):
_job = utils.FakeJob.create_single_job()
arglist = [
_job['job']['id'],
]
verifylist = [
('job', _job['job']['id']),
]
self.job_manager.get = mock.Mock(return_value=_job)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.assertEqual(sorted(_job['job'].keys()), sorted(columns))
self.assertEqual(sorted(_job['job'].values()), sorted(data))
class TestListJob(_TestJobCommand):
def setUp(self):
super(TestListJob, self).setUp()
self.cmd = jobs_cli.ListJobs(self.app, None)
def test_list(self):
_jobs = utils.FakeJob.create_multiple_jobs()
self.job_manager.list = mock.Mock(return_value={'jobs': _jobs})
parsed_args = self.check_parser(self.cmd)
columns, data = (self.cmd.take_action(parsed_args))
self.assertEqual(sorted(constants.COLUMNS_REMAP.values()),
sorted(columns))
self.assertEqual(len(_jobs), len(data))
self.assertEqual(
sorted([tuple(o[k] for k in constants.COLUMNS) for o in _jobs]),
sorted(data))
def test_list_with_filters(self):
_job = utils.FakeJob.create_single_job()
_job = _job['job']
# we filter the jobs by the following fields: project ID, type, status.
# given values of _job, then only single item _job is retrieved.
arglist = [
'--project-id', _job['project_id'],
'--type', _job['type'],
'--status', _job['status'],
]
verifylist = [
('project_id', _job['project_id']),
('type', _job['type']),
('status', _job['status'].lower()),
]
self.job_manager.list = mock.Mock(return_value={'jobs': [_job]})
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.assertEqual(1, len(data))
self.assertEqual(sorted(constants.COLUMNS_REMAP.values()),
sorted(columns))
# lower case of job status
arglist = [
'--status', _job['status'].lower(),
]
verifylist = [
('status', _job['status'].lower()),
]
self.job_manager.list = mock.Mock(return_value={'jobs': [_job]})
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.assertEqual(1, len(data))
self.assertEqual(sorted(constants.COLUMNS_REMAP.values()),
sorted(columns))
def test_invalid_job_status_filter(self):
# unrecognizable job status filter
arglist = [
'--status', 'new_1',
]
verifylist = []
self.assertRaises(utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_list_with_pagination(self):
number_of_jobs = 4
limit = number_of_jobs - 2
_jobs = utils.FakeJob.create_multiple_jobs(count=number_of_jobs)
# test list operation with pagination
arglist = [
'--limit', str(limit),
]
verifylist = [
('limit', limit),
]
self.job_manager.list = mock.Mock(return_value={"jobs": _jobs[:limit]})
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.assertEqual(limit, len(data))
self.assertEqual(sorted(constants.COLUMNS_REMAP.values()),
sorted(columns))
# test list operation with pagination and marker
arglist = [
'--limit', str(limit),
'--marker', _jobs[0]['id'],
]
verifylist = [
('limit', limit),
('marker', _jobs[0]['id']),
]
self.job_manager.list = mock.Mock(
return_value={"jobs": _jobs[1:limit+1]})
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = (self.cmd.take_action(parsed_args))
self.assertEqual(limit, len(data))
self.assertEqual(sorted(constants.COLUMNS_REMAP.values()),
sorted(columns))
class TestDeleteJob(_TestJobCommand, utils.TestCommandWithoutOptions):
def setUp(self):
super(TestDeleteJob, self).setUp()
self.cmd = jobs_cli.DeleteJob(self.app, None)
def test_delete_job(self):
_job = utils.FakeJob.create_single_job()
arglist = [
_job['job']['id'],
]
verifylist = [
('job', [_job['job']['id']]),
]
self.job_manager.delete = mock.Mock(return_value=None)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
class TestRedoJob(_TestJobCommand, utils.TestCommandWithoutOptions):
def setUp(self):
super(TestRedoJob, self).setUp()
self.cmd = jobs_cli.RedoJob(self.app, None)
def test_redo_job(self):
_job = utils.FakeJob.create_single_job()
arglist = [
_job['job']['id'],
]
verifylist = [
('job', _job['job']['id']),
]
self.job_manager.update = mock.Mock(return_value=None)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
| StarcoderdataPython |
9661957 | from Classes.Logic.LogicCommandManager import LogicCommandManager
from Classes.Packets.PiranhaMessage import PiranhaMessage
class AvailableServerCommandMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields):
self.writeVint(fields["Command"]["ID"])
command = LogicCommandManager.createCommand(fields["Command"]["ID"], self.messagePayload)
self.messagePayload = command.encode(fields)
def decode(self):
return {}
def execute(message, calling_instance, fields):
pass
def getMessageType(self):
return 24111
def getMessageVersion(self):
return self.messageVersion | StarcoderdataPython |
1850045 | import unittest
from blahtex import Blahtex, BlahtexException
class TestAll(unittest.TestCase):
def test_convert(self):
bt = Blahtex()
self.assertEqual(
bt.convert("\sqrt{3}"),
'<math xmlns="http://www.w3.org/1998/Math/MathML" display="inline"><msqrt><mn>3</mn></msqrt></math>')
def test_plane1(self):
bt = Blahtex(mathml_version1_fonts=True)
self.assertEqual(
bt.convert("\mathfrak{ABCDEF}"),
'<math xmlns="http://www.w3.org/1998/Math/MathML" display="inline">'
'<mrow><mi>\U0001D504</mi>'
'<mi>\U0001D505</mi>'
'<mi>\U0000212D</mi>'
'<mi>\U0001D507</mi>'
'<mi>\U0001D508</mi>'
'<mi>\U0001D509</mi></mrow>'
'</math>')
def test_options(self):
bt = Blahtex(japanese_font='ipaex.ttf')
options = bt.get_options()
self.assertEqual(options['japanese_font'], 'ipaex.ttf')
options['indented'] = not options['indented']
bt.set_options(options)
self.assertEqual(bt.get_options(), options)
def test_bad_option_value(self):
bt = Blahtex()
with self.assertRaises(ValueError):
bt.not_exist_key = True
def test_bad_option_value(self):
bt = Blahtex()
bt.other_encoding = Blahtex.ENCODING.NUMERIC
with self.assertRaises(ValueError):
bt.other_encoding = 100
def test_exception_no_input(self):
bt = Blahtex()
with self.assertRaises(ValueError):
bt.get_mathml()
def test_exception_bad_tex(self):
bt = Blahtex()
with self.assertRaises(BlahtexException):
bt.convert(r'\badcommand')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
177584 | <reponame>Animenosekai/yuno<filename>yuno/utils/annotations.py
"""
annotations.py
Manages annotations utilities.
"""
import typing
class Default():
"""A default value for a function call"""
def __init__(self, value: typing.Any = None) -> None:
"""
A class representing the default value for any parameter in a function call
Parameters
-----------
`value`: Any
This is the default value
"""
self.value = value
def __repr__(self) -> str:
return "Default({value})".format(value=self.value)
| StarcoderdataPython |
1685831 | <filename>latte/monkey_patches/frappe/model/naming.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint, cstr
import re
from six import string_types
from frappe.model import naming, document
from frappe.model.naming import (
validate_name,
set_name_from_naming_options,
make_autoname,
_set_amended_name
)
def set_new_name(doc):
"""
Sets the `name` property for the document based on various rules.
1. If amended doc, set suffix.
2. If `autoname` method is declared, then call it.
3. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
4. If no rule defined, use hash.
:param doc: Document to be named.
"""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname or ""
if autoname.lower() != "prompt" and not frappe.flags.in_import:
doc.name = None
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
else:
doc.run_method("autoname")
if not doc.name and autoname:
set_name_from_naming_options(autoname, doc)
# if the autoname option is 'field:' and no name was derived, we need to
# notify
if autoname.startswith('field:') and not doc.name:
fieldname = autoname[6:]
frappe.throw(_("{0} is required").format(
doc.meta.get_label(fieldname)))
if doc.name and autoname.lower() not in ("hash", "auto_increment") and ("#" not in autoname):
doc.name = ' '.join(doc.name.split())
# at this point, we fall back to name generation with the hash option
if not doc.name or autoname == 'hash':
doc.name = make_autoname('hash', doc.doctype)
# Monkeypatch: Check for special character if to be disallowed-
# read from site_config for sanitise_docnames = 1
docnames_disallowed_chars = frappe.local.conf.get(
"docnames_disallowed_chars")
if docnames_disallowed_chars:
string_check = re.compile(docnames_disallowed_chars)
if string_check.search(doc.name):
frappe.throw(
f"{doc.name} contains special character. Not allowed list - {docnames_disallowed_chars}")
doc.name = validate_name(
doc.doctype,
doc.name,
frappe.get_meta(doc.doctype).get_field("name_case")
)
naming.set_new_name = set_new_name
document.set_new_name = set_new_name
| StarcoderdataPython |
11353288 | # -*- coding:utf-8; python-indent:2; indent-tabs-mode:nil -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used for tests."""
# pylint: disable=unused-argument
import sys
from pytypedecl import checker
# def IntToInt(i :int) -> int
def IntToInt(i):
return 42
# def MultiArgs(a : int, b: int, c:str, d: str) -> None
def MultiArgs(a, b, c, d):
return None
# def GoodRet() -> int
def GoodRet():
return 42
# def BadRet() -> int
def BadRet():
return "I want integer"
# def NoneRet() -> None
def NoneRet():
return [1337]
class Apple(object):
pass
class Banana(object):
pass
class Orange(object):
pass
# def AppleRet() -> Apple
def AppleRet():
return Banana() # Intentionally returning the wrong type
class FooException(Exception):
pass
class WrongException(Exception):
pass
class BadException(Exception):
pass
# def FooFail() -> None raise FooException
def FooFail():
raise FooException
# def WrongFail() -> None raise FooException, WrongException
def WrongFail():
raise WrongException
# def BadFail() -> None raise FooException, WrongException
def BadFail():
raise BadException
# def MultiFail(a: Apple) -> None raise FooException
def MultiFail(a):
raise BadException
# def MultiArgsNoType(a: int, b, c, d: str, e) -> int
def MultiArgsNoType(a, b, c, d, e):
return a
checker.CheckFromFile(sys.modules[__name__], __file__ + "td")
| StarcoderdataPython |
6626008 | <filename>genmotion/render/c4d/params.py
# Get skeleton information from https://meshcapade.wiki/SMPL#smpl-x
SMPL_SKELETON = {
0: 'Pelvis', 3: 'Spine1', 6: 'Spine2', 9: 'Spine3', 12: 'Neck', 15: 'Head',
1: 'L_Hip', 4: 'L_Knee', 7: 'L_Ankle', 10: 'L_Foot',
2: 'R_Hip', 5: 'R_Knee', 8: 'R_Ankle', 11: 'R_Foot',
13: 'L_Collar', 16: 'L_Shoulder', 18: 'L_Elbow', 20: 'L_Wrist',
14: 'R_Collar', 17: 'R_Shoulder', 19: 'R_Elbow', 21: 'R_Wrist',
22: 'L_Hand',
23: 'R_Hand'
}
SMPL_H_SKELETON = {
0: 'Pelvis', 3: 'Spine1', 6: 'Spine2', 9: 'Spine3', 12: 'Neck', 15: 'Head',
1: 'L_Hip', 4: 'L_Knee', 7: 'L_Ankle', 10: 'L_Foot',
2: 'R_Hip', 5: 'R_Knee', 8: 'R_Ankle', 11: 'R_Foot',
13: 'L_Collar', 16: 'L_Shoulder', 18: 'L_Elbow', 20: 'L_Wrist',
14: 'R_Collar', 17: 'R_Shoulder', 19: 'R_Elbow', 21: 'R_Wrist',
22: 'lindex0', 23: 'lindex1', 24: 'lindex2',
25: 'lmiddle0', 26: 'lmiddle1', 27: 'lmiddle2',
28: 'lpinky0', 29: 'lpinky1', 30: 'lpinky2',
31: 'lring0', 32: 'lring1', 33: 'lring2',
34: 'lthumb0', 35: 'lthumb1', 36: 'lthumb2',
37: 'rindex0', 38: 'rindex1', 39: 'rindex2',
40: 'rmiddle0', 41: 'rmiddle1', 42: 'rmiddle2',
43: 'rpinky0', 44: 'rpinky1', 45: 'rpinky2',
46: 'rring0', 47: 'rring1', 48: 'rring2',
49: 'rthumb0', 50: 'rthumb1', 51: 'rthumb2'
} | StarcoderdataPython |
4815989 | <reponame>RussianNLP/WikiOmnia<filename>filtering_rus/data/language_checker.py
from langid.langid import LanguageIdentifier
from langid.langid import model as lang_ident
class CheckRussian:
"""
Check if a text is in Russian.
(Only for first 100 characters of text: 1) in order to make it faster 2) not Russian terms are usually in the beginning).
(Because summaries with too many term translations in the beginning should be later removed).
"""
def __init__(self, language: str='ru'):
self.language = language
self.language_identifier = LanguageIdentifier.from_modelstring(lang_ident, norm_probs=True)
self.check_params()
def is_language(self, text: str) -> bool:
"""
Check if a text is in Russian
"""
lang, prob = self.language_identifier.classify(text[:100])
return lang == self.language
def check_params(self):
pass # FixMe
| StarcoderdataPython |
3441294 | import pytz
import unittest
from airflow.models import DAG, TaskInstance, XCom, DagRun, DagTag, DagModel
from airflow.models.xcom import XCOM_RETURN_KEY
from airflow.operators.dummy import DummyOperator
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from datetime import datetime
from unittest import mock
from unittest.mock import MagicMock
from gcp_airflow_foundations.base_class.ods_metadata_config import (
OdsTableMetadataConfig,
)
from gcp_airflow_foundations.base_class.ods_table_config import OdsTableConfig
from gcp_airflow_foundations.enums.ingestion_type import IngestionType
from gcp_airflow_foundations.operators.gcp.ods.ods_merge_table_operator import (
MergeBigQueryODS,
)
TASK_ID = "test-bq-generic-operator"
TEST_DATASET = "test-dataset"
TEST_GCP_PROJECT_ID = "test-project"
TEST_TABLE_ID = "test-table-id"
TEST_STG_TABLE_ID = "test-staging-table-id"
DEFAULT_DATE = pytz.utc.localize(datetime(2015, 1, 1))
TEST_DAG_ID = "test-bigquery-operators"
SCHEMA_FIELDS = [{"name": "column", "type": "STRING"}]
@provide_session
def cleanup_xcom(session=None):
session.query(XCom).delete()
def clear_db_dags():
with create_session() as session:
session.query(DagTag).delete()
session.query(DagModel).delete()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
class TestMergeBigQueryODS(unittest.TestCase):
def setUp(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("TEST_DAG_ID", default_args=args, schedule_interval="@once")
self.dag.create_dagrun(
run_id="test",
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
)
task = DummyOperator(task_id="schema_parsing", dag=self.dag)
self.ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
self.template_context = self.ti.get_template_context()
self.ti.xcom_push(key=XCOM_RETURN_KEY, value={TEST_TABLE_ID: SCHEMA_FIELDS})
def doCleanups(self):
cleanup_xcom()
clear_db_dags()
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_execute(self, mock_hook):
operator = MergeBigQueryODS(
task_id=TASK_ID,
project_id=TEST_GCP_PROJECT_ID,
stg_table_name=TEST_STG_TABLE_ID,
data_table_name=TEST_TABLE_ID,
stg_dataset_name=TEST_DATASET,
data_dataset_name=TEST_DATASET,
columns=["column"],
surrogate_keys=["column"],
column_mapping={"column": "column"},
column_casting=None,
new_column_udfs=None,
ingestion_type=IngestionType.FULL,
ods_table_config=OdsTableConfig(
ods_metadata=OdsTableMetadataConfig(),
ods_table_time_partitioning=None,
partition_column_name=None,
),
)
operator.pre_execute(context=self.template_context)
operator.execute(MagicMock())
ds = self.template_context["ds"]
sql = f"""
SELECT column AS `column`,
CURRENT_TIMESTAMP() AS af_metadata_inserted_at,
CURRENT_TIMESTAMP() AS af_metadata_updated_at,
TO_BASE64(MD5(TO_JSON_STRING(S))) AS af_metadata_row_hash,
TO_BASE64(MD5(ARRAY_TO_STRING([CAST(S.`column` AS STRING)], ""))) AS af_metadata_primary_key_hash
FROM `test-project.test-dataset.test-staging-table-id_{ds}` S
"""
mock_hook.return_value.run_query.assert_called_once_with(
sql=sql,
destination_dataset_table=f"{TEST_GCP_PROJECT_ID}.{TEST_DATASET}.{TEST_TABLE_ID}",
write_disposition="WRITE_TRUNCATE",
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition="CREATE_NEVER",
schema_update_options=None,
query_params=None,
labels=None,
priority="INTERACTIVE",
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None,
)
| StarcoderdataPython |
4808147 | <filename>bugbuster/cmd/__init__.py
import sys
from oslo.config import cfg
from bugbuster.utils import gerrit
CONF = cfg.CONF
opts = [
cfg.StrOpt('exceptions',
default='./exceptions.txt',
help='Exceptions file'),
]
CONF.register_opts(opts)
EXCEPTIONS = ["128497"]
def main():
gerrit_api = gerrit.GerritAPI()
url = ('message:Closes+NOT+label:Verified<=-1+'
'label:Verified+status:open+'
'NOT+label:Code-Review<=-1+'
'project:openstack/nova+'
'branch:master')
changes = gerrit_api.get_changes(url)
easieast = []
for change in changes:
if (change['_number'] not in EXCEPTIONS
and not infile(str(change['_number']))
and easy_review(change)):
easieast.append(change)
easieast = sorted(easieast, key=lambda k: k['_number'])
for easy in easieast:
print_out(easy)
def infile(word):
# We should cache the stream...
with open(CONF.exceptions, 'r+') as f:
out = f.read()
return word in out
def easy_review(change):
score = 0
files = change['revisions'].values()[0]['files']
for name in files:
score += 10
ld = files[name].get('lines_deleted', 0)
li = files[name].get('lines_inserted', 0)
score = score + li + ld
return True if score < 100 else False
def print_out(change):
subject = change['subject']
url = "https://review.openstack.org/#/c/%d/" % change['_number']
print "%s - %s" % (subject, url)
if __name__ == 'main':
sys.exit(main())
| StarcoderdataPython |
4993218 | <reponame>PacktPublishing/PySpark-and-AWS-Master-Big-Data-with-PySpark-and-AWS
# Databricks notebook source
from pyspark.streaming import StreamingContext
from pyspark import SparkConf, SparkContext
conf = SparkConf().setAppName("Streaming")
sc = SparkContext.getOrCreate(conf=conf)
ssc = StreamingContext(sc, 1)
# COMMAND ----------
rdd = ssc.textFileStream("/FileStore/tables/")
# COMMAND ----------
rdd = rdd.map(lambda x: (x,1))
rdd = rdd.reduceByKey(lambda x,y : x+y)
rdd.pprint()
ssc.start()
ssc.awaitTerminationOrTimeout(1000000)
# COMMAND ----------
| StarcoderdataPython |
5045304 | # Generated by Django 3.2.11 on 2022-01-12 08:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracking', '0002_auto_20190516_1146'),
]
operations = [
migrations.AlterField(
model_name='applicationtracking',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='canvaslogin',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='proposalstatuschange',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='proposaltracking',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='telemetrykey',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='userlogin',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| StarcoderdataPython |
4910415 | <filename>SERVER/src/client.py
import socket
from random import randint
from threading import Thread
from json import dumps, loads
clients = {}
class Client:
def __init__(self, id:int, conn, addr) -> None:
clients[str(id)] = self
self.id = str(id)
self.conn = conn
self.addr = addr
self.name = None
self.linked = None
self.thread = Thread(target=self.connect)
self.thread.start()
def connect(self):
try:
data = loads(self.conn.recv(1024).decode('utf-8'))
if len(data) > 1:
return self.disconnect()
except:
return self.disconnect()
thread = Thread(target=self.message_handler, args=[data])
thread.start()
return self.connect()
def send(self, content):
try:
self.conn.send(content.encode('utf-8'))
except:
return
def message_handler(self, data):
if self.name is None:
if 'name' in data and len(data['name']) < 10:
self.name = data['name']
print(f"{self.id} : {self.name}")
else:
self.disconnect()
if 'connect' in data:
if self.linked is not None:
return self.disconnect()
if data['connect'] not in clients or data['connect'] == self.id:
self.send('invalid id')
return self.disconnect()
else:
self.linked = clients[data['connect']]
if self.linked.linked != self:
self.send('Waiting for connection...')
# self.linked.send('Someone connected to you! ID: {self.id}!')
else:
self.send('Connected!')
self.linked.send("Connected!")
elif 'message' in data:
if not self.linked:
self.send('invalid id')
self.disconnect()
if self.linked.linked == self:
self.linked.send(dumps({'message':{self.name:data['message']}}))
def disconnect(self):
try:
del clients[self.id]
return self.conn.close()
except:
pass
class Admin(Client):
pass | StarcoderdataPython |
1675498 | <reponame>shubh2ds/DSA_Python
def partition_for_quick_sort(arr,sidx,eidx):
pivot=arr[sidx]
c=0
for i in range(sidx,eidx+1):
if arr[i]<pivot:
c=c+1
arr[sidx+c],arr[sidx] = arr[sidx],arr[sidx+c]
pivot_idx=sidx+c
i=sidx
j=eidx
while i<j:
if arr[i]<pivot:
i=i+1
elif arr[j]>=pivot:
j=j-1
else:
arr[i],arr[j]=arr[j],arr[i]
i=i+1
j=j-1
return pivot_idx
arr=[6,2,1,4,3,8,9,12,5]
sidx,eidx=0,len(arr)-1
partition(arr,sidx,eidx)
print(arr)
| StarcoderdataPython |
4966403 | <reponame>FengYen-Chang/ImageCaptioning<filename>pytorch/sample.py
import torch
import matplotlib.pyplot as plt
import numpy as np
import argparse
import pickle
import os
from torchvision import transforms
from build_vocab import Vocabulary
from model import EncoderCNN, DecoderRNN, Embed, DecoderRNN2
from PIL import Image
# Device configuration
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
def load_image(image_path, transform=None):
image = Image.open(image_path)
image = image.resize([224, 224], Image.LANCZOS)
if transform is not None:
image = transform(image).unsqueeze(0)
return image
def main(args):
# Image preprocessing
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Build models
encoder = EncoderCNN(args.embed_size).eval() # eval mode (batchnorm uses moving mean/variance)
decoder = DecoderRNN(args.embed_size, args.hidden_size, len(vocab), args.num_layers)
encoder = encoder.to(device)
decoder = decoder.to(device)
# Load the trained model parameters
encoder.load_state_dict(torch.load(args.encoder_path))
decoder.load_state_dict(torch.load(args.decoder_path))
# Prepare an image
image = load_image(args.image, transform)
image_tensor = image.to(device)
# Define embed
_embed = Embed(decoder.embed)
# _decoder = DecoderRNN2(decoder.lstm, decoder.linear)
# Generate an caption from the image
feature = encoder(image_tensor)
sampled_ids = []
state = (torch.zeros((1, 1, 512)).to(device), torch.zeros((1, 1, 512)).to(device))
# sampled_ids = decoder(feature)
# sampled_ids = sampled_ids[0].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)
inputs = feature
print (inputs.size())
for i in range(20):
pred, inputs, state = decoder(inputs, state)
# _, pred = outputs.max(1)
sampled_ids.append(pred)
# print (pred)
# inputs = _embed(pred)
# print (inputs)
print (state[0].size())
print (state[1].size())
print (np.array(state).shape)
sampled_ids = torch.stack(sampled_ids, 1)
sampled_ids = sampled_ids[0].cpu().numpy()
print (sampled_ids)
# Save the model as .onnx format
Decoder_ONNX_dir = '../models/onnx/decoder_nightly.onnx'
Encoder_ONNX_dir = '../models/onnx/encoder.onnx'
Embeded_ONNX_dir = '../models/onnx/embeded.onnx'
state_for_onnx = torch.ones((1, 1, 512))
# torch.onnx.export(encoder, image_tensor, Encoder_ONNX_dir)
torch.onnx.export(decoder, (torch.ones(1, 256).to(device),state) , Decoder_ONNX_dir)
# torch.onnx.export(_embed, pred, Embeded_ONNX_dir)
# Convert word_ids to words
sampled_caption = []
for word_id in sampled_ids:
word = vocab.idx2word[word_id]
sampled_caption.append(word)
if word == '<end>':
break
sentence = ' '.join(sampled_caption)
# Print out the image and the generated caption
print (sentence)
image = Image.open(args.image)
plt.imshow(np.asarray(image))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image', type=str, required=True, help='input image for generating caption')
parser.add_argument('--encoder_path', type=str, default='models/encoder-2-1000.ckpt', help='path for trained encoder')
parser.add_argument('--decoder_path', type=str, default='models/decoder-2-1000.ckpt', help='path for trained decoder')
parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='path for vocabulary wrapper')
# Model parameters (should be same as paramters in train.py)
parser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
9625602 | import os
import time
from collections import defaultdict
class DeterministicDie:
def __init__(self):
self.curr = 0
self.rolls = 0
def roll(self):
if self.curr == 100:
self.curr = 1
else:
self.curr += 1
self.rolls += 1
return self.curr
class Game:
def __init__(self, pos1, score1, pos2, score2):
self.pos1 = pos1
self.score1 = score1
self.pos2 = pos2
self.score2 = score2
def is_finished(self):
return self.score1 >= 21 or self.score2 >= 21
def process_move(self, player1_turn, pos_diff):
if player1_turn:
pos1 = (self.pos1 + pos_diff - 1) % 10 + 1
score1 = self.score1 + pos1
return Game(pos1, score1, self.pos2, self.score2)
else:
pos2 = (self.pos2 + pos_diff - 1) % 10 + 1
score2 = self.score2 + pos2
return Game(self.pos1, self.score1, pos2, score2)
def serialise(self):
return "{},{},{},{}".format(self.pos1, self.score1, self.pos2, self.score2)
@staticmethod
def deserialise(serialised):
pos1, score1, pos2, score2 = [int(num) for num in serialised.split(",")]
return Game(pos1, score1, pos2, score2)
def main(puzzle_input):
pos1_initial = int(puzzle_input[0][-1])
pos2_initial = int(puzzle_input[1][-1])
die = DeterministicDie()
player1 = {
"pos": pos1_initial,
"score": 0,
}
player2 = {
"pos": pos2_initial,
"score": 0,
}
player1["other"] = player2
player2["other"] = player1
curr = player1
while player1["score"] < 1000 and player2["score"] < 1000:
curr["pos"] = (curr["pos"] + die.roll() + die.roll() + die.roll() - 1) % 10 + 1
curr["score"] += curr["pos"]
curr = curr["other"]
looser = player2 if player1["score"] >= 1000 else player1
print("Solution 1: the product of the score of the losing player by the number of times the die was rolled is {}"
.format(looser["score"] * die.rolls))
initial = Game(pos1_initial, 0, pos2_initial, 0)
games = defaultdict(lambda: 0)
games[initial.serialise()] = 1
continue_games = True
player1_turn = True
while continue_games:
continue_games = False
games_copy = games.copy()
for serialised, num in games_copy.items():
game = Game.deserialise(serialised)
if num == 0 or game.is_finished():
continue
games[serialised] -= num
moves = {
3: 1,
4: 3,
5: 6,
6: 7,
7: 6,
8: 3,
9: 1,
}
for move, factor in moves.items():
new_game = game.process_move(player1_turn, move)
games[new_game.serialise()] += num * factor
continue_games = True
player1_turn = not player1_turn
player1_wins = 0
player2_wins = 0
for serialised, num in games.items():
game = Game.deserialise(serialised)
if game.score1 > game.score2:
player1_wins += num
else:
player2_wins += num
player_num = 1 if player1_wins > player2_wins else 2
player_score = max(player1_wins, player2_wins)
print("Solution 2: player {} wins in more universes, namely in {}".format(player_num, player_score))
if __name__ == "__main__":
input_filename = os.path.basename(__file__).replace("aoc", "input").replace("py", "txt")
with open("input/{}".format(input_filename)) as f:
content = f.read().splitlines()
start = time.time()
main(content)
end = time.time()
diff = (end - start)
if diff >= 1:
print("The solutions took {}s".format(round(diff)))
else:
print("The solutions took {}ms".format(round(diff * 1000)))
| StarcoderdataPython |
254776 | <reponame>pablogo1/wallet
# Generated by Django 4.0.1 on 2022-01-10 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wallet', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='current_balance',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='account',
name='description',
field=models.CharField(blank=True, max_length=150, null=True),
),
]
| StarcoderdataPython |
12829371 | from .science import * | StarcoderdataPython |
5186614 | <gh_stars>10-100
import re
import sys
import mapnik
# mapnik_utils
from mapnik_utils.projection import EasyProjection
if not hasattr(mapnik,'ProjTransform'):
from compatibility import ProjTransform
BoostPythonMetaclass = mapnik.Coord.__class__
class _injector(object):
class __metaclass__(BoostPythonMetaclass):
def __init__(self, name, bases, dict):
for b in bases:
if type(b) not in (self, type):
for k,v in dict.items():
setattr(b,k,v)
return type.__init__(self, name, bases, dict)
if not hasattr(mapnik,'Box2d'):
mapnik.Box2d = mapnik.Envelope
class _Map(mapnik.Map,_injector):
def set_easy_srs(self,srs):
self.srs = EasyProjection(srs).params()
@property
def proj_obj(self):
return EasyProjection(self.srs)
def lon_lat_bbox(self):
return self.envelope().forward(self.proj_obj,EasyProjection(4326))
def find_layer(self,name):
lyr = [l for l in self.layers if l.name.lower() == name.lower()]
if not lyr:
raise ValueError('Layer "%s" not found, available layers are: ["%s"]' % (name,', '.join(self.layer_names())))
return lyr[0]
def layer_names(self):
return [l.name for l in self.layers]
def active_layers(self):
return [l.name for l in self.layers if l.active]
def zoom_to_layer(self,layer):
layer = self.find_layer(layer)
layer_box = layer.envelope()
box = layer_box.forward(layer.proj_obj,self.proj_obj)
self.zoom_to_box(box)
def lon_lat_layers_bounds(self):
return self.layers_bounds().forward(self.proj_obj,EasyProjection(4326))
def layers_bounds(self):
new_box = None
if len(self.layers):
first = self.layers[0]
new_box = None
try:
new_box = first.envelope().forward(first.proj_obj,self.proj_obj)
except RuntimeError:
# try clipping layer extent to map
new_box = self.envelope().forward(self.proj_obj,first.proj_obj)
new_box.clip(first.envelope())
for layer in self.layers:
layer_box = layer.envelope()
box = None
try:
box = layer_box.forward(layer.proj_obj,self.proj_obj)
except RuntimeError:
# try clipping layer extent to map
box = self.envelope().forward(self.proj_obj,first.proj_obj)
box.clip(layer_box)
new_box.expand_to_include(box)
return new_box
def zoom_to_layers(self,layers):
first = self.find_layer(layers[0])
new_box = first.envelope().forward(first.proj_obj,self.proj_obj)
for lyr in layers:
layer = self.find_layer(lyr)
layer_box = layer.envelope()
box = layer_box.forward(layer.proj_obj,self.proj_obj)
new_box.expand_to_include(box)
self.zoom_to_box(new_box)
def zoom_to_level(self,level):
c = self.layers_bounds().center()
self.set_center_and_zoom(c.x,c.y,level=level,geographic=self.proj_obj.geographic)
def max_resolution(self):
#self.zoom_max()
map_w,map_h = self.envelope().width(),self.envelope().height()
return max(map_w / self.width, map_h / self.height)
def get_scales(self,number):
max_res = self.max_resolution()
return [max_res / 2 ** i for i in range(int(number))]
def get_scale_for_zoom_level(self,level):
return self.get_scales(level+1)[level]
# http://trac.mapnik.org/browser/trunk/src/map.cpp#L245
def set_center_and_zoom(self,lon,lat,level=0,geographic=True):
coords = mapnik.Coord(lon,lat)
if geographic and not self.proj_obj.geographic:
wgs_84 = mapnik.Projection('+init=epsg:4326')
coords = coords.forward(wgs_84,self.proj_obj)
w,h = self.width, self.height
res = self.get_scale_for_zoom_level(level)
box = mapnik.Box2d(coords.x - 0.5 * w * res,
coords.y - 0.5 * h * res,
coords.x + 0.5 * w * res,
coords.y + 0.5 * h * res)
self.zoom_to_box(box)
def set_center_and_radius(self,lon,lat,radius=None,geographic=True):
coords = mapnik.Coord(lon,lat)
box = mapnik.Box2d(coords.x - radius,
coords.y - radius,
coords.x + radius,
coords.y + radius)
if geographic and not self.proj_obj.geographic:
wgs_84 = mapnik.Projection('+init=epsg:4326')
box = box.forward(wgs_84,self.proj_obj)
self.zoom_to_box(box)
def zoom_max(self):
max_extent = mapnik.Box2d(-179.99999694572804,-85.0511285163245,179.99999694572804,85.0511287798066)
if not self.proj_obj.geographic:
wgs_84 = mapnik.Projection('+init=epsg:4326')
max_extent = max_extent.forward(wgs_84,self.proj_obj)
self.zoom_to_box(max_extent)
def activate_layers(self,names):
self.select_layers(names,remove=False)
def select_layers(self,names,remove=True):
disactivated = []
selected = []
if not isinstance(names,list):
names = [names]
for lyr in self.layers:
if not lyr.name in names and remove:
lyr.active = False
disactivated.append(lyr.name)
else:
lyr.active = True
selected.append(lyr.name)
return selected, disactivated
def intersecting_layers(self):
lyrs = []
for layer in self.layers:
layer_box = None
try:
layer_box = layer.envelope().forward(layer.proj_obj,self.proj_obj)
except RuntimeError:
# try clipping layer extent to map
layer_box = self.envelope().forward(self.proj_obj,layer.proj_obj)
layer_box.clip(layer.envelope())
if layer_box.intersects(self.envelope()):
#layer.active_rules = layer.active_rules(self)
lyrs.append(layer)
return lyrs
def to_wld(self, x_rotation=0.0, y_rotation=0.0):
"""
Outputs an ESRI world file that can be used to load the resulting
image as a georeferenced raster in a variety of gis viewers.
'.wld' is the most common extension used, but format-specific extensions
are also looked for by some software, such as '.tfw' for tiff and '.pgw' for png
A world file file is a plain ASCII text file consisting of six values separated
by newlines. The format is:
pixel X size
rotation about the Y axis (usually 0.0)
rotation about the X axis (usually 0.0)
pixel Y size (negative when using North-Up data)
X coordinate of upper left pixel center
Y coordinate of upper left pixel center
Info from: http://gdal.osgeo.org/frmt_various.html#WLD
"""
extent = self.envelope()
pixel_x_size = (extent.maxx - extent.minx)/self.width
pixel_y_size = (extent.maxy - extent.miny)/self.height
upper_left_x_center = extent.minx + 0.5 * pixel_x_size + 0.5 * x_rotation
upper_left_y_center = extent.maxy + 0.5 * (pixel_y_size*-1) + 0.5 * y_rotation
# http://trac.osgeo.org/gdal/browser/trunk/gdal/gcore/gdal_misc.cpp#L1296
wld_string = '''%.10f\n%.10f\n%.10f\n-%.10f\n%.10f\n%.10f\n''' % (
pixel_x_size, # geotransform[1] - width of pixel
y_rotation, # geotransform[4] - rotational coefficient, zero for north up images.
x_rotation, # geotransform[2] - rotational coefficient, zero for north up images.
pixel_y_size, # geotransform[5] - height of pixel (but negative)
upper_left_x_center, # geotransform[0] - x offset to center of top left pixel
upper_left_y_center # geotransform[3] - y offset to center of top left pixel.
)
return wld_string
class _Layer(mapnik.Layer,_injector):
@property
def proj_obj(self):
return EasyProjection(self.srs)
def set_srs_by_srid(self,srid):
self.srs = EasyProjection(srid).params()
def active_rules(self,map):
rules = []
for style in self.styles:
sty_obj = map.find_style(style)
for rule in sty_obj.rules:
if rule.active(map.scale_denominator()):
rules.append({'name':rule.name,'parent':style,'filter':str(rule.filter)})
return rules
class _Coord(mapnik.Coord,_injector):
def forward(self,from_prj,to_prj):
trans = mapnik.ProjTransform(from_prj,to_prj)
return trans.forward(self)
class _Box2d(mapnik.Box2d,_injector):
def forward(self,from_prj,to_prj):
trans = mapnik.ProjTransform(from_prj,to_prj)
return trans.forward(self)
if __name__ == '__main__':
import doctest
doctest.testmod() | StarcoderdataPython |
1675709 | <filename>landlibrary/importers/AGRIS/agris/LandVoc/landvoc.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import csv
class LandVoc(object):
def __init__(self):
basepath = os.path.dirname(__file__)
self.concepts_and_fixed_relations, self.uris_to_concepts = self.process_landvoc_file(self._input_path(basepath, "landvoc-and-fixed-relatioships.csv"))
def get_only_concepts(self):
return self.concepts_and_fixed_relations.keys()
def get_concepts(self):
return self.concepts_and_fixed_relations
def _input_path(self, basepath, filename):
return os.path.abspath(os.path.join(basepath, filename))
def process_landvoc_file(self, filename):
concepts={}
uris_to_concept={}
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=';', quotechar='"')
headers = reader.next()
for row in reader:
concept_label = row[0]
concept_uri = row[1]
uris_to_concept[concept_uri]=concept_label
concepts[concept_label] = {"themes":set(), "oacs": set()}
for key, value in zip(headers[2:], row[2:]):
if value=="x":
if key in ["Access to Land & Tenure Security", "Land Use, Management & Investment", "Land Policy & Legislation"] : #OACS
concepts[concept_label]["oacs"].add(key)
else: # THEMES
concepts[concept_label]["themes"].add(key)
return concepts, uris_to_concept
def get_concepts_direct(self, potential_concepts):
return [x.lower() for x in potential_concepts if x.lower() in map(str.lower, self.get_only_concepts())]
def get_concepts_direct_from_uris(self, potential_uris):
return [self.uris_to_concepts[uri] for uri in potential_uris if uri in self.uris_to_concepts.keys()]
def get_fixed_themes(self, selected_concepts):
themes=set()
for x in selected_concepts:
if x in self.get_concepts():
th = self.get_concepts()[x]["themes"]
themes |= th
else:
print "Warning: unknown concept="+x
return themes
def get_fixed_oacs(self, selected_concepts):
oacs=set()
for x in selected_concepts:
if x in self.get_concepts():
oa = self.get_concepts()[x]["oacs"]
oacs |= oa
else:
print "Warning: unknown concept="+x
return oacs
# def generate_list_from_string(self, s):
# return map(str.strip, filter(None, s.split(';')))
#
#
# def get_landvoc_related(self, source_concepts, scheme):
# flatten = lambda l: [item for sublist in l for item in sublist]
# final_result={"concepts_and_fixed_relations": [], "themes": [], "oacs": [] }
# if source_concepts:
# tmp_result = [self.relations[scheme][x.lower()] for x in source_concepts if x.lower() in map(str.lower, self.relations[scheme].keys())]
# for r in tmp_result:
# final_result["concepts_and_fixed_relations"].append(r["concepts_and_fixed_relations"])
# final_result["themes"].append(r["themes"])
# final_result["oacs"].append(r["oacs"])
# final_result["concepts_and_fixed_relations"] = list(set(flatten(final_result["concepts_and_fixed_relations"])))
# final_result["themes"] = list(set(flatten(final_result["themes"])))
# final_result["oacs"] = list(set(flatten(final_result["oacs"])))
# return final_result
#
#
lv = LandVoc()
| StarcoderdataPython |
324839 | <filename>test/core_tests/test_parameters.py
import warnings
from amuse.test import amusetest
from amuse.support.exceptions import AmuseException, AmuseWarning
from amuse.units import nbody_system, generic_unit_system, generic_unit_converter
from amuse.units import units
from amuse.datamodel import parameters
from amuse.support.interface import HandleParameters
from amuse.support.interface import InCodeComponentImplementation
class BaseTestModule(object):
def before_get_parameter(self):
return
def before_set_parameter(self):
return
class TestMethodParameterDefintions(amusetest.TestCase):
def test1(self):
class TestModule(BaseTestModule):
def get_test(self):
return 123 | units.m
o = TestModule()
set = parameters.Parameters([parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.m)], o)
x = set.get_parameter("test_name")
value = x.get_value()
self.assertTrue(value.unit.has_same_base_as(units.m))
self.assertEqual(value.value_in(units.m), 123)
def test2(self):
definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.m)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
set = parameters.Parameters([definition,], o)
x = set.get_parameter("test_name")
x.set_value(10|units.m)
self.assertEqual(o.x, 10|units.m)
value = x.get_value()
self.assertEqual(value, 10|units.m)
def test3(self):
definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.no_unit)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
set = parameters.Parameters([definition,], o)
x = set.get_parameter("test_name")
x.set_value(10|units.none)
self.assertEqual(o.x, 10|units.none)
value = x.get_value()
self.assertEqual(value, 10)
def test4(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
instance.parameters.test_name = 1 | units.km
self.assertEqual(1 | units.km, instance.parameters.test_name)
self.assertEqual(1000 | units.m, instance.x)
def test5(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
None,
"set_test",
"test_name",
"a test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
instance.parameters.test_name = 1 | units.km
self.assertEqual(1 | units.km, instance.parameters.test_name)
self.assertEqual(1000 | units.m, instance.x)
def test6(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
"bla"
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
instance.parameters.test_name = "bla"
self.assertEqual("bla", instance.x)
instance.parameters.test_name = "bla"
self.assertEqual("bla", instance.x )
def test8(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
instance = TestModule()
p = parameters.Parameters([parameter_definition], instance)
p.set_defaults()
self.assertEqual(11.0 | units.m, instance.x)
def test9(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
instance = TestModule()
p = parameters.Parameters([parameter_definition], instance)
self.assertRaises(AmuseException, lambda: p.unknown,
expected_message = "tried to get unknown parameter 'unknown' for a 'TestModule' object")
with warnings.catch_warnings(record=True) as w:
p.unknown = 10 | units.m
self.assertEqual(len(w), 1)
self.assertEqual("tried to set unknown parameter 'unknown' for a 'TestModule' object", str(w[-1].message))
def test10(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a test parameter",
11.0 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
instance = TestModule()
p = parameters.Parameters([parameter_definition], instance)
instance.x = 1 | units.m
self.assertEqual(p.test_name, 1 | units.m)
def try_set_read_only_parameter(parameter_set):
parameter_set.test_name = 2 | units.m
self.assertRaises(AmuseException, try_set_read_only_parameter, p,
expected_message = "Could not set value for parameter 'test_name' of a 'TestModule' object, parameter is read-only")
def test11(self):
parameter_definition1 = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
parameter_definition2 = parameters.ModuleMethodParameterDefinition(
"get_test1",
"set_test1",
"test_name2",
"a test parameter",
12.0 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
def get_test1(self):
return self.y
def set_test1(self, value):
self.y = value
instance = TestModule()
p = parameters.Parameters([parameter_definition1, parameter_definition2], instance)
instance.x = 1 | units.m
instance.y = 2 | units.m
self.assertEqual(p.test_name, 1 | units.m)
self.assertEqual(p.test_name2, 2 | units.m)
p.test_name = 20 | units.m
p.send_not_set_parameters_to_code()
self.assertEqual(instance.x, 20 | units.m)
self.assertEqual(instance.y, 12 | units.m)
def test12(self):
parameter_definition = parameters.ModuleVectorMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
[0.1, 0.2, 0.3] | units.km,
True
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x, self.y, self.z
def set_test(self, x, y, z):
self.x = x
self.y = y
self.z = z
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
self.assertEqual([0.1, 0.2, 0.3] | units.km, instance.parameters.test_name)
instance.parameters.test_name = [1, 2, 3] | units.km
self.assertEqual([1, 2, 3] | units.km, instance.parameters.test_name)
self.assertEqual(1000 | units.m, instance.x)
class TestInterfaceParameterDefintions(amusetest.TestCase):
def test1(self):
class TestModule(BaseTestModule):
pass
o = TestModule()
set = parameters.Parameters([parameters.InterfaceParameterDefinition(
"test_name",
"a test parameter",
0.1 | units.m)], o)
x = set.get_parameter("test_name")
value = x.get_value()
self.assertTrue(value.unit.has_same_base_as(units.m))
self.assertEqual(value.value_in(units.m), 0.1)
def test2(self):
definition = parameters.InterfaceParameterDefinition(
"test_name",
"a test parameter",
0.1 | units.m)
class TestModule(BaseTestModule):
pass
o = TestModule()
set = parameters.Parameters([definition,], o)
x = set.get_parameter("test_name")
x.set_value(10|units.m)
value = x.get_value()
self.assertEqual(value, 10|units.m)
def test4(self):
parameter_definition = parameters.InterfaceParameterDefinition(
"test_name",
"a test parameter",
0.1 | units.m,
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
instance.parameters.test_name = 1 | units.km
self.assertEqual(1 | units.km, instance.parameters.test_name)
def test5(self):
parameter_definition = parameters.InterfaceParameterDefinition(
"test_name",
"a test parameter",
0.1 | units.m,
"before_"
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
def before_(self):
self.before_called=True
pass
class TestModuleBinding(object):
parameter_definitions = [parameter_definition]
def __init__(self):
self.parameters = parameters.Parameters(self.parameter_definitions, self)
class TestInterface(TestModule, TestModuleBinding):
def __init__(self):
TestModuleBinding.__init__(self)
instance = TestInterface()
self.assertTrue('test_name' in list(instance.parameters.names()))
self.assertRaises(Exception,lambda: getattr(instance,"before_called"))
instance.parameters.test_name = 1 | units.km
self.assertEqual(1 | units.km, instance.parameters.test_name)
self.assertEqual(instance.before_called,True)
class TestParameters(amusetest.TestCase):
def test1(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
class TestModule(BaseTestModule):
x = 123 | units.m
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.Parameters([parameter_definition], o)
value = x.test_name
self.assertTrue(value.unit.has_same_base_as(units.m))
self.assertEqual(value.value_in(units.m), 123)
def test2(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123 | nbody_system.length
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.Parameters([parameter_definition], o)
self.assertEqual(x.test_name, 123 | nbody_system.length)
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.ParametersWithUnitsConverted(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertAlmostEqual(y.test_name.value_in(units.m), 246.0, 6)
y.test_name = 500 | units.m
self.assertAlmostEqual(y.test_name.value_in(units.m), 500.0, 6)
print(x.test_name, o.x)
self.assertAlmostEqual(x.test_name.value_in(nbody_system.length), 250.0, 6)
self.assertAlmostEqual(o.x, 250.0 | nbody_system.length, 6)
def test3(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a test parameter",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123 | units.m
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.new_parameters_instance_with_docs([parameter_definition], o)
self.assertTrue("test_name" in x.__doc__)
self.assertTrue("a test parameter" in x.__doc__)
self.assertTrue("default" in x.__doc__)
self.assertTrue("11.0 length" in x.__doc__)
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.new_parameters_with_units_converted_instance_with_docs(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertTrue("test_name" in y.__doc__)
self.assertTrue("a test parameter" in y.__doc__)
self.assertTrue("default" in y.__doc__)
self.assertTrue("22.0 m" in y.__doc__)
def test3b(self):
# Same test as test3, but testing on the class, not instance
# This makes sure the python 'help' functionality works on parameters
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a test parameter",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123 | units.m
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.new_parameters_instance_with_docs([parameter_definition], o)
self.assertTrue("test_name" in x.__class__.__doc__)
self.assertTrue("a test parameter" in x.__class__.__doc__)
self.assertTrue("default" in x.__class__.__doc__)
self.assertTrue("11.0 length" in x.__class__.__doc__)
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.new_parameters_with_units_converted_instance_with_docs(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertTrue("test_name" in y.__class__.__doc__)
self.assertTrue("a test parameter" in y.__class__.__doc__)
self.assertTrue("default" in y.__class__.__doc__)
self.assertTrue("22.0 m" in y.__class__.__doc__)
def test4(self):
parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a test parameter",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123.0 | nbody_system.length
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
x = parameters.Parameters([parameter_definition], o)
self.assertTrue("test_name" in str(x))
self.assertTrue("123.0 length" in str(x))
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.ParametersWithUnitsConverted(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertTrue("test_name" in str(y))
self.assertTrue("246.0 m" in str(y))
def test5(self):
print("Test 5: testing mixed nbody and physical units")
phys_parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"phys_test_name",
"a test parameter with physical units",
11.0 | units.m
)
nbody_parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_test1",
"set_test1",
"nbody_test_name",
"a test parameter with nbody units",
11.0 | nbody_system.length
)
class TestModule(BaseTestModule):
x = 123.0 | units.m
y = 123.0 | nbody_system.length
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
def get_test1(self):
return self.y
def set_test1(self, value):
self.y = value
o = TestModule()
x = parameters.Parameters([phys_parameter_definition, nbody_parameter_definition], o)
self.assertTrue("nbody_test_name" in str(x))
self.assertTrue("123.0 length" in str(x))
self.assertTrue("phys_test_name" in str(x))
self.assertTrue("123.0 m" in str(x))
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.ParametersWithUnitsConverted(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertEqual(getattr(y,"phys_test_name"), 123.0 | units.m)
self.assertAlmostEqual(getattr(y,"nbody_test_name"), 246.0 | units.m)
y.phys_test_name = 1234.0 | units.m
self.assertEqual(y.phys_test_name, 1234.0 | units.m)
y.nbody_test_name = 12345.0 | nbody_system.length
self.assertAlmostEqual(y.nbody_test_name, 24690.0 | units.m)
y.nbody_test_name = 12345.0 | units.m
self.assertEqual(y.nbody_test_name, 12345.0 | units.m)
def test6(self):
print("Test 5: testing mixed nbody and string units")
nbody_parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_nbody",
None,
"nbody_par_name",
"a test parameter with nbody units",
11.0 | nbody_system.length
)
string_parameter_definition = parameters.ModuleMethodParameterDefinition(
"get_string",
None,
"string_par_name",
"a test parameter with string units",
"test string"
)
class TestModule(BaseTestModule):
x = 123.0 | nbody_system.length
def get_nbody(self):
return self.x
def get_string(self):
return str(10 * self.x.number )
o = TestModule()
x = parameters.Parameters([string_parameter_definition, nbody_parameter_definition], o)
self.assertTrue("nbody_par_name" in str(x))
self.assertTrue("123.0 length" in str(x))
self.assertTrue("string_par_name" in str(x))
self.assertTrue("1230.0" in str(x))
convert_nbody = nbody_system.nbody_to_si(2.0 | units.m, 4.0 | units.kg)
y = parameters.ParametersWithUnitsConverted(
x,
convert_nbody.as_converter_from_si_to_generic()
)
self.assertEqual(getattr(y,"string_par_name"), "1230.0")
self.assertAlmostEqual(getattr(y,"nbody_par_name"), 246.0 | units.m)
def test7(self):
parameter_definition1 = parameters.ModuleCachingParameterDefinition(
"initialize_vars",
"arg1",
"test_par1",
"a test parameter (1)",
11.0 | units.m
)
parameter_definition2 = parameters.ModuleCachingParameterDefinition(
"initialize_vars",
"arg2",
"test_par2",
"a test parameter (2)",
12.0 | units.m
)
class TestModule(BaseTestModule):
x = 123 | units.m
y = 456 | units.m
def initialize_vars(self, arg1, arg2):
self.x = arg1
self.y = arg2
o = TestModule()
x = parameters.Parameters([parameter_definition1, parameter_definition2], o)
x.test_par1 = 20 | units.m
print(x.test_par1)
self.assertEqual(x.test_par1, 20 | units.m)
self.assertEqual(x.test_par2, 12 | units.m)
self.assertEqual(o.x, 123 | units.m)
self.assertEqual(o.y, 456 | units.m)
x.send_cached_parameters_to_code()
self.assertEqual(o.x, 20 | units.m)
self.assertEqual(o.y, 12 | units.m)
def test8(self):
parameter_definition1 = parameters.ModuleCachingParameterDefinition(
"initialize_vars",
"arg1",
"test_par1",
"a test parameter (1)",
11.0 | units.m
)
parameter_definition2 = parameters.ModuleCachingParameterDefinition(
"initialize_vars",
"arg2",
"test_par2",
"a test parameter (2)",
12.0 | units.m
)
parameter_definition3 = parameters.ModuleCachingParameterDefinition(
"initialize_vars2",
"arg1",
"test_par3",
"a test parameter (3)",
14.0 | units.m
)
class TestModule(BaseTestModule):
x = 123 | units.m
y = 456 | units.m
z = 100 | units.m
def initialize_vars(self, arg1, arg2):
self.x = arg1
self.y = arg2
return 0
def initialize_vars2(self, arg1):
self.z = arg1
return 0
o = TestModule()
x = parameters.Parameters([parameter_definition1, parameter_definition2, parameter_definition3], o)
x.send_cached_parameters_to_code()
self.assertEqual(o.x, 11 | units.m)
self.assertEqual(o.y, 12 | units.m)
self.assertEqual(o.z, 14 | units.m)
def test9(self):
parameter_definition1 = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
11.0 | units.m
)
parameter_definition2 = parameters.ModuleMethodParameterDefinition(
"get_test1",
"set_test1",
"test_name2",
"a test parameter",
12.0 | units.m
)
paramer_definition3 = parameters.VectorParameterDefinition(
"test_vector",
"vector of parameters",
["test_name", "test_name2"],
[11.0, 12.0] | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
def get_test1(self):
return self.y
def set_test1(self, value):
self.y = value
instance = TestModule()
instance.x = 1 | units.m
instance.y = 2 | units.m
p = parameters.Parameters([parameter_definition1, parameter_definition2, paramer_definition3], instance)
self.assertEqual(p.test_vector, (1,2) | units.m)
p.test_vector = (3,4) | units.m
self.assertEqual(instance.x, 3 | units.m)
self.assertEqual(instance.y, 4 | units.m)
def test10(self):
print("Testing ParametersWithUnitsConverted on vector parameters")
definitions = []
for par_name in ["length_x", "length_y", "length_z"]:
definitions.append(parameters.ModuleMethodParameterDefinition(
"get_"+par_name,
"set_"+par_name,
par_name,
"a test parameter",
10.0 | generic_unit_system.length
))
definitions.append(parameters.VectorParameterDefinition(
"mesh_length",
"length of the model in the x, y and z directions",
("length_x", "length_y", "length_z"),
[10, 10, 10] | generic_unit_system.length
))
class TestModule(BaseTestModule):
x = 123.0 | generic_unit_system.length
y = 456.0 | generic_unit_system.length
z = 789.0 | generic_unit_system.length
def get_length_x(self):
return self.x
def set_length_x(self, value):
self.x = value
def get_length_y(self):
return self.y
def set_length_y(self, value):
self.y = value
def get_length_z(self):
return self.z
def set_length_z(self, value):
self.z = value
o = TestModule()
x = parameters.Parameters(definitions, o)
self.assertTrue("mesh_length" in str(x))
self.assertTrue("[123.0, 456.0, 789.0] length" in str(x))
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(2.0 | units.m, 4.0 | units.kg, 6.0 | units.s)
y = parameters.ParametersWithUnitsConverted(
x,
converter.as_converter_from_si_to_generic()
)
self.assertTrue("mesh_length" in str(y))
self.assertTrue("[246.0, 912.0, 1578.0] m" in str(y))
def test11(self):
print("Testing ParametersWithUnitsConverted on vector parameters, using add_vector_parameter")
class TestModule(BaseTestModule):
x = 123.0 | generic_unit_system.length
y = 456.0 | generic_unit_system.length
z = 789.0 | generic_unit_system.length
def get_length_x(self):
return self.x
def set_length_x(self, value):
self.x = value
def get_length_y(self):
return self.y
def set_length_y(self, value):
self.y = value
def get_length_z(self):
return self.z
def set_length_z(self, value):
self.z = value
o = TestModule()
parameters_handler = HandleParameters(o)
parameters_handler.add_vector_parameter(
"mesh_length",
"length of the model in the x, y and z directions",
("length_x", "length_y", "length_z")
)
for par_name in ["length_x", "length_y", "length_z"]:
parameters_handler.add_method_parameter(
"get_"+par_name,
"set_"+par_name,
par_name,
"a test parameter",
default_value = 10.0 | generic_unit_system.length,
)
x = parameters_handler.get_attribute(None, None)
self.assertTrue("mesh_length" in str(x))
self.assertTrue("[123.0, 456.0, 789.0] length" in str(x))
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(2.0 | units.m, 4.0 | units.kg, 6.0 | units.s)
y = parameters.ParametersWithUnitsConverted(
x,
converter.as_converter_from_si_to_generic()
)
self.assertTrue("mesh_length" in str(y))
self.assertTrue("[246.0, 912.0, 1578.0] m" in str(y))
def test12(self):
definition = parameters.ModuleMethodParameterDefinition(
"get_test",
"set_test",
"test_name",
"a test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
def get_test(self):
return self.x
def set_test(self, value):
self.x = value
o = TestModule()
set = parameters.Parameters([definition,], o)
set.test_name = 10|units.m
self.assertEqual(o.x, 10|units.m)
self.assertEqual(set.test_name, 10|units.m)
memento = set.copy()
self.assertEqual(memento.test_name, 10|units.m)
set.test_name = 20|units.m
self.assertEqual(o.x, 20|units.m)
self.assertEqual(set.test_name, 20|units.m)
self.assertEqual(memento.test_name, 10|units.m)
set.reset_from_memento(memento)
self.assertEqual(o.x, 10|units.m)
self.assertEqual(set.test_name, 10|units.m)
self.assertEqual(memento.test_name, 10|units.m)
def test13(self):
definition = parameters.ModuleMethodParameterDefinition(
"get_test",
None,
"test_name",
"a read-only test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
x = 0.1 | units.m
def get_test(self):
return self.x
o = TestModule()
set = parameters.Parameters([definition,], o)
self.assertRaises(AmuseException, setattr, set, "test_name", 1.0 | units.m,
expected_message = "Could not set value for parameter 'test_name' of a 'TestModule' object, parameter is read-only")
self.assertEqual(o.x, 0.1|units.m)
self.assertEqual(set.test_name, 0.1|units.m)
memento = set.copy()
self.assertEqual(memento.test_name, 0.1|units.m)
set.reset_from_memento(memento)
self.assertEqual(o.x, 0.1|units.m)
self.assertEqual(set.test_name, 0.1|units.m)
memento.test_name = 2.0 | units.m
self.assertEqual(memento.test_name, 2.0|units.m)
with warnings.catch_warnings(record=True) as w:
set.reset_from_memento(memento)
self.assertEqual(len(w), 1)
self.assertEqual("tried to change read-only parameter 'test_name' for a 'TestModule' object", str(w[-1].message))
self.assertEqual(o.x, 0.1|units.m)
self.assertEqual(set.test_name, 0.1|units.m)
self.assertEqual(memento.test_name, 2.0|units.m)
def test14(self):
definition = parameters.InterfaceParameterDefinition(
"test_name",
"a read-only test parameter",
0.1 | units.m
)
class TestModule(BaseTestModule):
pass
o = TestModule()
set = parameters.Parameters([definition,], o)
self.assertEqual(set.test_name, 0.1|units.m)
memento = set.copy()
self.assertEqual(memento.test_name, 0.1|units.m)
memento.test_name=2.|units.m
set.reset_from_memento(memento)
self.assertEqual(set.test_name, 2.|units.m)
def test15(self):
definition = parameters.InterfaceParameterDefinition(
"test_name",
"a read-only test parameter",
0.1
)
class TestModule(BaseTestModule):
pass
o = TestModule()
set = parameters.Parameters([definition,], o)
import numpy
b=numpy.array(2)
set.test_name=b
b*=2
self.assertEqual(set.test_name,2)
def test16(self):
print("Testing add_interface_parameter")
class TestModule(BaseTestModule):
pass
o = TestModule()
parameters_handler = HandleParameters(o)
parameters_handler.add_vector_parameter(
"mesh_length",
"length of the model in the x, y and z directions",
("length_x", "length_y", "length_z")
)
for i,par_name in enumerate(["length_x", "length_y", "length_z"]):
parameters_handler.add_interface_parameter(
par_name,
"a test parameter",
default_value = i*10.0 | generic_unit_system.length,
)
x = parameters_handler.get_attribute(None, None)
self.assertTrue("mesh_length" in str(x))
self.assertTrue("[0.0, 10.0, 20.0] length" in str(x))
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(2.0 | units.m, 4.0 | units.kg, 6.0 | units.s)
y = parameters.ParametersWithUnitsConverted(
x,
converter.as_converter_from_si_to_generic()
)
self.assertTrue("mesh_length" in str(y))
self.assertTrue("[0.0, 20.0, 40.0] m" in str(y))
def test17(self):
print("Testing ParametersWithUnitsConverted on vector parameters, using add_vector_parameter")
class TestModule(BaseTestModule):
x = [1.,2.,3.] | generic_unit_system.length
def get_length(self,i):
return self.x[i]
def set_length(self, i,value):
self.x[i] = value
def range(self):
return 0,len(self.x)-1
o = TestModule()
parameters_handler = HandleParameters(o)
parameters_handler.add_array_parameter(
"get_length",
"set_length",
"range",
"length",
"description"
)
x = parameters_handler.get_attribute(None, None)
self.assertTrue("length" in str(x))
self.assertTrue("[1.0, 2.0, 3.0] length" in str(x))
def test18(self):
print("Testing array parameters")
definitions = []
definitions.append(parameters.ModuleArrayParameterDefinition(
"get",
"set",
"range",
"param",
"a test parameter"
))
class TestModule(BaseTestModule):
x = [1.,2.,3.] | generic_unit_system.length
def get(self,i):
return self.x[i]
def set(self,i, value):
self.x[i] = value
def range(self):
return 0, len(self.x)-1
o = TestModule()
x = parameters.Parameters(definitions, o)
self.assertEqual(x.param, [1.,2.,3.] | generic_unit_system.length)
x.param*=2
self.assertEqual(x.param, [2.,4.,6.] | generic_unit_system.length)
def test19(self):
print("Testing multiple parameter sets")
class TestModule(BaseTestModule):
x = 123.0 | generic_unit_system.length
y = 456.0 | generic_unit_system.length
z = 789.0 | generic_unit_system.length
def get_length_x(self):
return self.x
def set_length_x(self, value):
self.x = value
def get_length_y(self):
return self.y
def set_length_y(self, value):
self.y = value
def get_length_z(self):
return self.z
def set_length_z(self, value):
self.z = value
o = TestModule()
parameters_handler = HandleParameters(o)
for par_name in ["length_x", "length_y", "length_z"]:
parameters_handler.add_method_parameter(
"get_"+par_name,
"set_"+par_name,
par_name,
"a test parameter",
default_value = 10.0 | generic_unit_system.length,
parameter_set = par_name+"_set"
)
for i,par_name in enumerate(["length_x", "length_y", "length_z"]):
x = parameters_handler.get_attribute(par_name+"_set", None)
self.assertTrue([123.0, 456.0, 789.0][i] == getattr(x,par_name).number)
def test20(self):
print("Testing multiple parameter sets 2")
class TestInterface(BaseTestModule):
x = 123.0
y = 456.0
def get_x(self):
return self.x
def set_x(self, value):
self.x = value
def get_y(self):
return self.y
def set_y(self, value):
self.y = value
class Testing(InCodeComponentImplementation):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, TestInterface(), **options)
def define_parameters(self,object):
object.add_method_parameter(
"get_x", "set_x", "x", "test parameter", 123.
)
object.add_method_parameter(
"get_y", "set_y", "y", "test parameter 2", 456.,
parameter_set="parameters2"
)
object.add_alias_parameter(
"y_alias","y", " new y", parameter_set="parameters2"
)
t=Testing()
self.assertEqual(set(t.parameter_set_names()), set(('parameters','parameters2')))
self.assertEqual(t.parameters.x,123.)
self.assertEqual(t.parameters2.y,456.)
t.parameters2.y=789.
self.assertEqual(t.parameters2.y,789.)
self.assertEqual(t.parameters2.y_alias,789.)
def test21(self):
print("Test change in parameter sets")
class TestInterface(BaseTestModule):
x = 123.0
y = 456.0
def get_x(self):
return self.x
def set_x(self, value):
self.x = value
def get_y(self):
return self.y
def set_y(self, value):
self.y = value
class Testing(InCodeComponentImplementation):
def __init__(self, **options):
InCodeComponentImplementation.__init__(self, TestInterface(), **options)
def define_parameters(self,handler):
handler.add_method_parameter(
"get_x", "set_x", "x", "test parameter", 123.
)
def define_additional_parameters(self):
handler=self.get_handler('PARAMETER')
handler.add_method_parameter(
"get_y", "set_y", "y", "test parameter 2", 456.,
parameter_set="parameters2"
)
handler.add_alias_parameter(
"y_alias","y", " new y", parameter_set="parameters2"
)
handler.add_method_parameter(
"get_y", "set_y", "y", "test parameter", 456.
)
t=Testing()
self.assertEqual(set(t.parameter_set_names()), set(('parameters',)))
t.define_additional_parameters()
self.assertEqual(set(t.parameter_set_names()), set(('parameters','parameters2')))
self.assertEqual(t.parameters.x,123.)
self.assertEqual(t.parameters2.y,456.)
t.parameters2.y=789.
self.assertEqual(t.parameters2.y,789.)
self.assertEqual(t.parameters2.y_alias,789.)
self.assertEqual(t.parameters.y,789.)
| StarcoderdataPython |
4994474 | # coding=utf-8
from torch import nn
from torch.nn import functional as F
class CNN(nn.Module):
def __init__(self, configuration):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(configuration['input_channels'], 32, kernel_size=8,
stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(7 * 7 * 64, 512)
self.fc2 = nn.Linear(512, configuration['output_size'])
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1) # Flatten
x = F.relu(self.fc1(x))
return self.fc2(x) | StarcoderdataPython |
126664 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Advanced Routes',
'version': '1.0',
'category': 'Manufacturing',
'description': """
This module supplements the Warehouse application by effectively implementing Push and Pull inventory flows.
============================================================================================================
Typically this could be used to:
--------------------------------
* Manage product manufacturing chains
* Manage default locations per product
* Define routes within your warehouse according to business needs, such as:
- Quality Control
- After Sales Services
- Supplier Returns
* Help rental management, by generating automated return moves for rented products
Once this module is installed, an additional tab appear on the product form,
where you can add Push and Pull flow specifications. The demo data of CPU1
product for that push/pull :
Push flows:
-----------
Push flows are useful when the arrival of certain products in a given location
should always be followed by a corresponding move to another location, optionally
after a certain delay. The original Warehouse application already supports such
Push flow specifications on the Locations themselves, but these cannot be
refined per-product.
A push flow specification indicates which location is chained with which location,
and with what parameters. As soon as a given quantity of products is moved in the
source location, a chained move is automatically foreseen according to the
parameters set on the flow specification (destination location, delay, type of
move, journal). The new move can be automatically processed, or require a manual
confirmation, depending on the parameters.
Pull flows:
-----------
Pull flows are a bit different from Push flows, in the sense that they are not
related to the processing of product moves, but rather to the processing of
procurement orders. What is being pulled is a need, not directly products. A
classical example of Pull flow is when you have an Outlet company, with a parent
Company that is responsible for the supplies of the Outlet.
[ Customer ] <- A - [ Outlet ] <- B - [ Holding ] <~ C ~ [ Supplier ]
When a new procurement order (A, coming from the confirmation of a Sale Order
for example) arrives in the Outlet, it is converted into another procurement
(B, via a Pull flow of type 'move') requested from the Holding. When procurement
order B is processed by the Holding company, and if the product is out of stock,
it can be converted into a Purchase Order (C) from the Supplier (Pull flow of
type Purchase). The result is that the procurement order, the need, is pushed
all the way between the Customer and Supplier.
Technically, Pull flows allow to process procurement orders differently, not
only depending on the product being considered, but also depending on which
location holds the 'need' for that product (i.e. the destination location of
that procurement order).
Use-Case:
---------
You can use the demo data as follow:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**CPU1:** Sell some CPU1 from Chicago Shop and run the scheduler
- Warehouse: delivery order, Chicago Shop: reception
**CPU3:**
- When receiving the product, it goes to Quality Control location then
stored to shelf 2.
- When delivering the customer: Pick List -> Packing -> Delivery Order from Gate A
""",
'author': 'OpenERP SA',
'images': ['images/pulled_flow.jpeg','images/pushed_flow.jpeg'],
'depends': ['procurement','stock','sale'],
'data': ['stock_location_view.xml', 'security/stock_location_security.xml', 'security/ir.model.access.csv', 'procurement_pull_workflow.xml'],
'demo': [
'stock_location_demo_cpu1.xml',
'stock_location_demo_cpu3.yml',
],
'installable': True,
'test': [
'test/stock_location_pull_flow.yml',
'test/stock_location_push_flow.yml',
],
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| StarcoderdataPython |
6415609 | <filename>visualization/questions.py
class VisualizeQuestions(object):
def __init__(self):
from preprocessor.database import Database
database = Database(
'host',
'db',
'user',
'pass',
'utf8mb4'
)
self.connection = database.connect_with_pymysql()
def view_source_by_month_data(self, year):
import pandas as pd
import numpy as np
numpy_list = []
if self.connection:
with self.connection.cursor() as cursor:
for month in range(1, 13):
sql = "SELECT source,count(id) as count FROM questions WHERE YEAR(created_at) = "+str(year)+" AND MONTH(created_at) = "+str(month)+" group by source"
cursor.execute(sql)
result = cursor.fetchall()
if result:
new_list = []
for i in result:
new_list.append(i['count'])
numpy_list.append(new_list)
return pd.DataFrame(np.array(numpy_list), columns=['web', 'app', 'internet.org', 'm-site'])
def view_source_by_month(self, year):
import matplotlib.pyplot as plt
plt.style.use('ggplot')
df2 = self.view_source_by_month_data(year)
df2.plot.bar()
plt.show()
def connection_close(self):
self.connection.close()
if __name__ == ' __main__':
a = VisualizeQuestions()
a.view_source_by_month(2016)
a.connection_close()
| StarcoderdataPython |
138718 | <gh_stars>0
import json
import unittest
from botocore.exceptions import WaiterError, ClientError
from mock import patch, MagicMock, mock_open
from Hummingbird.errors import SchedulerException
from Hummingbird.hummingbird_utils import PLATFORM
from Hummingbird.scheduler import AWSBatchScheduler
from Hummingbird.instance import AWSInstance
from Hummingbird.hummingbird_utils import get_full_path
class TestAWSScheduler(unittest.TestCase):
conf = {PLATFORM: {'regions': 'us-west-2', 'bucket': 'local-bucket', 'cloudformation_stack_name': 'test'}}
jobs = ['some-job-id']
cf_stack_output = [
{'OutputKey': 'PrivateSubnet1', 'OutputValue': 'subnet1'},
{'OutputKey': 'PrivateSubnet2', 'OutputValue': 'subnet2'},
{'OutputKey': 'BatchEC2SecurityGroup', 'OutputValue': 'sg-test'},
{'OutputKey': 'ECSInstanceProfileRoleARN', 'OutputValue': 'ecsInstanceRole'},
{'OutputKey': 'ECSTaskExecutionRoleARN', 'OutputValue': 'taskExecutionRole'},
{'OutputKey': 'BatchServiceRoleARN', 'OutputValue': 'awsBatchServiceRole'}
]
def setUp(self):
self.instance = AWSBatchScheduler(self.conf, AWSInstance(), 100, None)
def test_instance_fields(self):
instance = AWSBatchScheduler(self.conf, None, None, None)
self.assertIsNotNone(instance.batch_client, 'batch_client field was not initialized')
self.assertIsNotNone(instance.ec2_client, 'ec2_client field was not initialized')
self.assertIsNotNone(instance.s3_bucket, 's3_bucket field was not initialized')
@patch('botocore.waiter.create_waiter_with_client')
def test_wait_jobs(self, create_waiter_with_client_mock):
self.instance.wait_jobs(self.jobs)
create_waiter_with_client_mock.return_value.wait.assert_called_once_with(jobs=self.jobs)
@patch('logging.exception')
@patch('botocore.waiter.create_waiter_with_client')
def test_wait_jobs(self, create_waiter_with_client_mock, exception_mock):
create_waiter_with_client_mock.return_value.wait.side_effect = WaiterError('', '', '')
self.assertRaises(SchedulerException, self.instance.wait_jobs, self.jobs)
exception_mock.assert_called_once()
def test_get_compute_environment_waiter(self):
waiter_id = 'some-waiter-id'
compute_env_waiter = self.instance.get_compute_environment_waiter(waiter_id)
self.assertEqual(waiter_id, compute_env_waiter.name)
self.assertEqual(20, compute_env_waiter.config.max_attempts)
self.assertEqual(1, compute_env_waiter.config.delay)
def test_get_compute_job_queue_waiter(self):
waiter_id = 'some-waiter-id'
compute_env_waiter = self.instance.get_compute_job_queue_waiter(waiter_id)
self.assertEqual(waiter_id, compute_env_waiter.name)
self.assertEqual(20, compute_env_waiter.config.max_attempts)
self.assertEqual(10, compute_env_waiter.config.delay)
def test_get_compute_job_waiter(self):
waiter_id = 'some-waiter-id'
compute_env_waiter = self.instance.get_compute_job_waiter(waiter_id)
self.assertEqual(waiter_id, compute_env_waiter.name)
self.assertEqual(24 * 60 * 2, compute_env_waiter.config.max_attempts)
self.assertEqual(60, compute_env_waiter.config.delay)
@patch('boto3.client', return_value=MagicMock())
def test_create_or_update_launch_template_create(self, client_mock):
self.instance.ec2_client = client_mock
client_mock.describe_launch_templates.side_effect = ClientError({}, 'DescribeLaunchTemplate')
self.instance.create_or_update_launch_template()
client_mock.create_launch_template.assert_called_once()
@patch('boto3.client', return_value=MagicMock())
def test_create_or_update_launch_template_create_version(self, client_mock):
self.instance.ec2_client = client_mock
self.instance.create_or_update_launch_template()
client_mock.create_launch_template_version.assert_called_once()
@patch('boto3.client', return_value=MagicMock())
def test_create_or_update_launch_template_uses_template(self, client_mock):
self.instance.ec2_client = client_mock
self.instance.create_or_update_launch_template()
with open(get_full_path('AWS/launch-template-data.json')) as tpl:
data = json.load(tpl)
data['LaunchTemplateName'] = self.instance.get_compute_name()
client_mock.create_launch_template_version.assert_called_once_with(**data)
@patch('boto3.client', return_value=MagicMock())
def test_get_cf_stack_output(self, client_mock):
self.instance.cf_client = client_mock
client_mock.describe_stacks.return_value = {'Stacks': [{'StackName': 'test', 'Outputs': self.cf_stack_output}]}
self.instance.get_cf_stack_output()
client_mock.describe_stacks.assert_called_once_with(StackName='test')
@patch('boto3.client', return_value=MagicMock())
@patch('logging.exception')
def test_get_cf_stack_output_missing_key(self, _, client_mock):
self.instance.cf_client = client_mock
for kv in self.cf_stack_output:
output = [item for item in self.cf_stack_output if item != kv]
client_mock.describe_stacks.return_value = {'Stacks': [{'StackName': 'test', 'Outputs': output}]}
self.assertRaises(SchedulerException, self.instance.get_cf_stack_output)
| StarcoderdataPython |
12840369 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
--- Day 10: The Stars Align ---
It's no use; your navigation system simply isn't capable of providing walking directions in the arctic circle, and certainly not in 1018.
The Elves suggest an alternative. In times like these, North Pole rescue operations will arrange points of light in the sky to guide missing Elves back to base. Unfortunately, the message is easy to miss: the points move slowly enough that it takes hours to align them, but have so much momentum that they only stay aligned for a second. If you blink at the wrong time, it might be hours before another message appears.
You can see these points of light floating in the distance, and record their position in the sky and their velocity, the relative change in position per second (your puzzle input). The coordinates are all given from your perspective; given enough time, those positions and velocities will move the points into a cohesive message!
Rather than wait, you decide to fast-forward the process and calculate what the points will eventually spell.
For example, suppose you note the following points:
position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>
Each line represents one point. Positions are given as <X, Y> pairs: X represents how far left (negative) or right (positive) the point appears, while Y represents how far up (negative) or down (positive) the point appears.
At 0 seconds, each point has the position given. Each second, each point's velocity is added to its position. So, a point with velocity <1, -2> is moving to the right, but is moving upward twice as quickly. If this point's initial position were <3, 9>, after 3 seconds, its position would become <6, 3>.
Over time, the points listed above would move like this:
Initially:
........#.............
................#.....
.........#.#..#.......
......................
#..........#.#.......#
...............#......
....#.................
..#.#....#............
.......#..............
......#...............
...#...#.#...#........
....#..#..#.........#.
.......#..............
...........#..#.......
#...........#.........
...#.......#..........
After 1 second:
......................
......................
..........#....#......
........#.....#.......
..#.........#......#..
......................
......#...............
....##.........#......
......#.#.............
.....##.##..#.........
........#.#...........
........#...#.....#...
..#...........#.......
....#.....#.#.........
......................
......................
After 2 seconds:
......................
......................
......................
..............#.......
....#..#...####..#....
......................
........#....#........
......#.#.............
.......#...#..........
.......#..#..#.#......
....#....#.#..........
.....#...#...##.#.....
........#.............
......................
......................
......................
After 3 seconds:
......................
......................
......................
......................
......#...#..###......
......#...#...#.......
......#...#...#.......
......#####...#.......
......#...#...#.......
......#...#...#.......
......#...#...#.......
......#...#..###......
......................
......................
......................
......................
After 4 seconds:
......................
......................
......................
............#.........
........##...#.#......
......#.....#..#......
.....#..##.##.#.......
.......##.#....#......
...........#....#.....
..............#.......
....#......#...#......
.....#.....##.........
...............#......
...............#......
......................
......................
After 3 seconds, the message appeared briefly: HI. Of course, your message will be much longer and will take many more seconds to appear.
What message will eventually appear in the sky?
"""
from collections import namedtuple
from itertools import count
import numpy
import re
def _parse( filepath ):
nums = re.compile( R'[+-]?\d+(?:\.\d+)?' )
Light = namedtuple( 'Light', 'p_x p_y v_x v_y' )
with open( filepath, 'r' ) as f:
lines = f.readlines( )
lights = [ ]
for line in lines:
vals = [ int( x ) for x in nums.findall( line ) ]
lights.append( Light( vals[ 0 ], vals[ 1 ], vals[ 2 ], vals[ 3 ] ) )
return lights
def _simulate( lights ) -> tuple:
sky_height = 0
Light_Position = namedtuple( 'Light_Position', 'x, y' )
light_positions = [ ]
for time in count( ):
new_time = time + 1
new_light_positions = [ Light_Position( x = l.p_x + l.v_x * new_time, y = l.p_y + l.v_y * new_time ) for l in lights ]
new_light_positions = sorted( new_light_positions, key = lambda l: l.y )
min_y = new_light_positions[ 0 ].y
max_y = new_light_positions[ -1 ].y
new_sky_height = max_y - min_y
if not sky_height or new_sky_height <= sky_height:
sky_height = new_sky_height
light_positions = new_light_positions
else:
break
xs, ys = list( zip( *light_positions ) )
xs = sorted( xs )
min_x = xs[ 0 ]
max_x = xs[ -1 ]
x_range = range( min_x - 1, max_x + 2 )
ys = sorted( ys )
min_y = ys[ 0 ]
max_y = ys[ -1 ]
y_range = range( min_y - 1, max_y + 2 )
return '\n'.join( ''.join( '#' if ( i, j ) in light_positions else ' ' for i in x_range ) for j in y_range ), time
if __name__ == '__main__':
lights = _parse( r'day_10_input.txt' )
message, time = _simulate( lights )
print( 'The message {0} will appear after {1} seconds.'.format( message, time ))
| StarcoderdataPython |
9789030 | import numpy as np
import innvestigate
import innvestigate.utils as iutils
def isi(model, indicator, x, y=None, max_iter=30, norm=0, batch_size=100, **kwargs):
"""
Input Significance Indicator based Attack, two indicators: sensitivity and relevance are included.
Relevance-based attack supports l0,l2 or linf norm constraints.
Our sensitivity-based attack only supports the l0 norm, since attack in
other norms using sensitivity is very similar to the 'Basic Iterative Method'.
:param y: required for targeted attack.
:param indicator: choose sensitivity or relevance as the indicator.
:param kwargs: when norm=2, step size 'eps' and changed features 'n' is needed,
when norm=np.inf, step size 'eps' and 'clip_values'=(min,max) is needed.
:return: adversarial batch
"""
if indicator == 'sensitivity' and norm != 0:
raise ValueError('Input sensitivity based attack only supports L0 norm, for other norms try the Basic '
'Iterative Method/(or Projected Gradient Descent/)')
indicator = 'gradient' if indicator == 'sensitivity' else 'lrp.z'
dims = list(x[0].shape)
nb_features = np.product(dims)
adv_x = np.reshape(x.astype(np.float32), (-1, nb_features))
model = iutils.model_wo_softmax(model)
preds = np.argmax(model.predict(x), axis=1)
if y is None:
analyzer = innvestigate.create_analyzer(indicator, model)
else:
analyzer = innvestigate.create_analyzer(indicator, model, neuron_selection_mode='index')
for batch_id in range(int(np.ceil(adv_x.shape[0] / float(batch_size)))):
batch_index_1, batch_index_2 = batch_id * batch_size, (batch_id + 1) * batch_size
batch = adv_x[batch_index_1:batch_index_2]
current_pred = preds[batch_index_1:batch_index_2]
if y is None:
active_indices = np.where(current_pred == preds[batch_index_1:batch_index_2])[0]
else:
target = np.zeros_like(current_pred) + y
active_indices = np.where(current_pred != target)[0]
i = 0
used_features = np.zeros_like(batch)
while len(active_indices) != 0 and i < max_iter:
r = analyzer.analyze(np.reshape(batch, [batch.shape[0]] + dims)[active_indices], neuron_selection=y)
r = np.reshape(r, (-1, nb_features))
if norm == 0:
batch, used_features = _apply_l0_perturbation(batch, r, indicator, y, active_indices, used_features)
elif norm == 2:
batch = _apply_l2_perturbation(batch, r, y, active_indices, kwargs['n'], kwargs['eps'])
elif norm == np.inf:
batch = _apply_linf_perturbation(batch, r, y, active_indices, kwargs['eps'], kwargs['clip_values'])
current_pred = np.argmax(model.predict(np.reshape(batch, [batch.shape[0]] + dims)), axis=1)
if y is None:
active_indices = np.where(current_pred == preds[batch_index_1:batch_index_2])[0]
else:
active_indices = np.where(current_pred != target)[0]
i += 1
adv_x[batch_index_1:batch_index_2] = batch
adv_x = np.reshape(adv_x, x.shape)
return adv_x
def _apply_l0_perturbation(batch, score, indicator, y, active_indices, used_features):
"""
Add perturbations to data batch, and record the features that have been used.
"""
act_used_features = used_features[active_indices]
if indicator == 'gradient':
score[act_used_features == 1] = 0 # set sensitivity of already used features to zero
ind = np.argpartition(np.abs(score), -1, axis=1)[:, -1:] # find feature with the largest abs(sensitivity)
tmp_batch = batch[active_indices]
if y is None:
tmp_batch[np.arange(len(active_indices)), ind[:, 0]] = -1 * np.sign(
score[np.arange(len(active_indices)), ind[:, 0]])
else:
tmp_batch[np.arange(len(active_indices)), ind[:, 0]] = np.sign(
score[np.arange(len(active_indices)), ind[:, 0]])
batch[active_indices] = tmp_batch
else:
if y is None:
score[act_used_features == 1] = -np.inf # set relevance of already used features to -inf
ind = np.argpartition(score, -1, axis=1)[:, -1:] # find feature with the largest relevance
else:
score[act_used_features == 1] = np.inf # set relevance of already used features to inf
ind = np.argpartition(score, 0, axis=1)[:, 0:] # find feature with the least relevance
tmp_batch = batch[active_indices]
# tmp_batch[np.arange(len(active_indices)), ind[:, 0]] *= -1
tmp_batch[np.arange(len(active_indices)), ind[:, 0]] = -np.sign(
tmp_batch[np.arange(len(active_indices)), ind[:, 0]])
batch[active_indices] = tmp_batch
used_features[active_indices, ind[:, 0]] = 1
return batch, used_features
def _apply_l2_perturbation(batch, r, y, active_indices, n, eps):
if y is None:
ind = np.argpartition(r, -n, axis=1)[:, (-n):] # find n features with the largest relevance
else:
ind = np.argpartition(r, n - 1, axis=1)[:, :n] # find n features with the least relevance
tmp_batch = batch[active_indices]
for i in range(n):
tmp_batch[np.arange(len(active_indices)), ind[:, i]] -= eps * np.sign(
tmp_batch[np.arange(len(active_indices)), ind[:, i]])
batch[active_indices] = tmp_batch
return batch
def _apply_linf_perturbation(batch, r, y, active_indices, eps, clip_values):
tmp_batch = batch[active_indices]
if y is None:
tmp_batch[np.arange(len(active_indices)), :] -= eps * np.sign(r) * np.sign(
tmp_batch[np.arange(len(active_indices)), :])
else:
tmp_batch[np.arange(len(active_indices)), :] += eps * np.sign(r) * np.sign(
tmp_batch[np.arange(len(active_indices)), :])
tmp_batch = np.clip(tmp_batch, clip_values[0], clip_values[1])
batch[active_indices] = tmp_batch
return batch
| StarcoderdataPython |
1802108 | <gh_stars>0
import os
import json
import time
import smtplib
import threading
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from firebase_admin import auth
from email.message import EmailMessage
# Use the application default credentials
jsonKey = json.load(open('ServiceAccountKey.json'))
cred = credentials.Certificate(jsonKey)
app = firebase_admin.initialize_app(cred)
db = firestore.client()
def getAdminAddresses():
docs = db.collection("Users").where(
"userGroup", "in", ["Owner", "Admin"]
).where(
"emailNotifications", "==", True
).stream()
return [doc.to_dict()["email"] for doc in docs]
def getEmails():
docs = db.collection("Emails").stream()
emails = [doc.to_dict() for doc in docs]
emailMessages = []
for e in emails:
msg = EmailMessage()
msg.set_content(e["message"])
msg["Subject"] = e["subject"]
msg["From"] = e["email"]
emailMessages.append(msg)
return emailMessages
def getFaultEmails():
docs = db.collection("Notifications").stream()
faults = [doc.to_dict() for doc in docs]
emails = []
for fault in faults:
try:
loggerId = fault['logger']
message = fault['message']
# get siteID and equipment name
loggerDoc = db.collection(u'Loggers').document(
loggerId).get().to_dict()
siteId = loggerDoc['site']
siteDoc = db.collection(u'Sites').document(siteId).get().to_dict()
equipName = ''
for unit in siteDoc['equipmentUnits']:
if (loggerId in unit['loggers']):
equipName = unit['name']
# iterate over user documents
users = [doc.to_dict() for doc in db.collection("Users").stream()]
for user in users:
if (('equipmentNotifications' in user)):
if (siteId in user['equipmentNotifications']):
subscribed = user['equipmentNotifications'][siteId][equipName]
notificationsOn = ('emailNotifications' in user and user['emailNotifications']) or (
'emailNotifications' not in user)
if subscribed and notificationsOn:
# generate email
emailRecipient = user['email']
emailSubject = f"Fault detected on {equipName}"
emailContent = message
msg = EmailMessage()
msg.set_content(emailContent)
msg["Subject"] = emailSubject
msg["To"] = emailRecipient
emails.append(msg)
except:
print("a notification document was incorrectly created")
return emails
def deleteFaults():
docs = db.collection('Notifications').stream()
for doc in docs:
db.collection("Notifications").document(doc.id).delete()
def deleteEmails():
docs = db.collection("Emails").stream()
for doc in docs:
db.collection("Emails").document(doc.id).delete()
def sendMail():
EMAIL = "<EMAIL>"
PASS = "<PASSWORD>"
adminAddresses = "<EMAIL>, " + ", ".join(getAdminAddresses())
while True:
with smtplib.SMTP_SSL("smtp.gmail.com", 465, timeout=10.0) as server:
server.ehlo()
server.login(EMAIL, PASS)
emails = getEmails()
for e in emails:
server.sendmail(e["From"], adminAddresses, e.as_string())
print(f'Sent message from {e.get("From")}.')
deleteEmails()
notifications = getFaultEmails()
for n in notifications:
server.sendmail(EMAIL, [n["To"], EMAIL], n.as_string())
print(f'Sent message to {n["To"]}.')
deleteFaults()
time.sleep(5.0)
if __name__ == "__main__":
sender = threading.Thread(target=sendMail)
print("Starting sender thread...")
sender.start()
| StarcoderdataPython |
8004920 | <gh_stars>100-1000
import sublime
from subprocess import call
class LinuxBrowserRefresh:
def __init__(self, activate_browser):
# activate_browser is always true on Windows since you can't
# send keys to an inactive window programmatically. We ignore it.
self.activate_browser = activate_browser
def chrome(self):
self.SendKeysToAllWindows('google-chrome', 'F5')
def iron(self):
pass
# except NotImplemented("Iron support not implemented yet.")
def safari(self):
pass
# except NotImplemented("Safary support not implemented yet.")
def safari64(self):
pass
# except NotImplemented("Safari64 support not implemented yet.")
def firefox(self):
self.SendKeysToAllWindows('firefox', 'F5')
def opera(self):
pass
# except NotImplemented("Opera support not implemented yet.")
def ie(self):
pass
# except NotImplemented("IE support not implemented yet.")
def SendKeysToAllWindows(self, cls, key):
"Sends the keystroke to all windows whose title matches the regex"
cmd = ['xdotool', 'search', '--sync', '--onlyvisible', '--class', cls, 'windowfocus', 'key', key]
if self.activate_browser:
cmd += ['windowactivate']
status_code = call(cmd)
if status_code != 0:
sublime.error_message(
'Browser Refresh cannot execute the specified program.\n\n'
'%s\n\n'
'If program \'xdotool\' is currently not installed '
'you can install it by typing:\n\n'
'sudo apt-get install xdotool' % " ".join(cmd))
| StarcoderdataPython |
11327318 | from datetime import datetime
from bs4 import BeautifulSoup
import requests
from fake_useragent import UserAgent
from celery.utils.log import get_task_logger
from fuck_papers.models import Paper, Category, Message, User
from fuck_papers.extensions import db
from fuck_papers import celery, flask_app
logger = get_task_logger(__name__)
URL_PARSERS = []
def register_url_parser(cls):
URL_PARSERS.append(cls)
return cls
class BaseParser(object):
name = 'BaseParser'
patterns = []
def __init__(self, url):
self._url = url
self._title = None
self._author = None
self._abstract = None
self._subject = None
self._submit_info = None
self.headers = {'User-Agent': UserAgent().random}
@classmethod
def url_match(cls, url):
raise NotImplementedError
def start_pip_line(self):
paper = self.parse_url(self._url)
self._title = self.get_title(paper)
self._author = self.get_author(paper)
self._abstract = self.get_abstract(paper)
self._subject = self.get_subject(paper)
self._submit_info = self.get_submit_info(paper)
def parse_url(self, url):
raise NotImplementedError
def get_title(self, paper):
raise NotImplementedError
def get_author(self, paper):
raise NotImplementedError
def get_abstract(self, paper):
raise NotImplementedError
def get_subject(self, paper):
raise NotImplementedError
def get_submit_info(self, paper):
raise NotImplementedError
@property
def url(self):
return self._url
@property
def title(self):
return self._title
@property
def author(self):
return self._author
@property
def abstract(self):
return self._abstract
@property
def subject(self):
return self._subject
@property
def submit_info(self):
return self._submit_info
def __repr__(self):
return '%s' % self.name
@register_url_parser
class ArxivParser(BaseParser):
name = 'ArxivParser'
patterns = ['http://de.arxiv.org/abs/', 'https://arxiv.org/abs/']
def __init__(self, url):
super().__init__(url)
@classmethod
def url_match(cls, url):
for pattern in cls.patterns:
if url.startswith(pattern):
return True
return False
def parse_url(self, url):
paper = BeautifulSoup(requests.get(url, headers=self.headers).text, 'html.parser')
return paper
def get_title(self, paper):
try:
element = paper.find('h1', class_='title mathjax')
title = ''.join(list(element.strings)[1:]).strip()
except:
title = '未获取,你可以手动添加该内容。'
return title
def get_author(self, paper):
try:
element = paper.find('div', class_='authors')
authors = ''.join(list(element.strings)[1:]).replace('\n', '')
except:
authors = '未获取,你可以手动添加该内容。'
return authors
def get_abstract(self, paper):
try:
element = paper.find('blockquote', class_='abstract mathjax')
abstract = list(element.strings)[-1].strip()
except:
abstract = '未获取,你可以手动添加该内容。'
return abstract
def get_subject(self, paper):
try:
element = paper.find('span', class_='primary-subject')
subject = element.string
except:
subject = '未获取,你可以手动添加该内容。'
return subject
def get_submit_info(self, paper):
try:
element = paper.find('div', class_='submission-history')
infos = list(element.strings)[5:]
submit_info = ''.join(infos).replace('\n', ' ').strip()
except:
submit_info = '未获取,你可以手动添加该内容。'
return submit_info
def __repr__(self):
return '%s' % self.name
@register_url_parser
class BiorxivParser(BaseParser):
name = 'BiorxivParser'
patterns = ['https://www.biorxiv.org/content/']
def __init__(self, url):
super().__init__(url)
@classmethod
def url_match(cls, url):
for pattern in cls.patterns:
if url.startswith(pattern):
return True
return False
def parse_url(self, url):
paper = BeautifulSoup(requests.get(url, headers=self.headers).text, 'html.parser')
return paper
def get_title(self, paper):
try:
element = paper.find('h1', id='page-title')
title = ''.join(list(element.strings)).strip()
except:
title = '未获取,你可以手动添加该内容。'
return title
def get_author(self, paper):
try:
element = paper.find('div', class_='highwire-cite-authors')
names = list(element.strings)
authors = ''.join([name for name in names if name != 'View ORCID Profile'])
except:
authors = '未获取,你可以手动添加该内容。'
return authors
def get_abstract(self, paper):
try:
element = paper.find('div', id='abstract-1')
abstract = ''.join(list(element.strings)[1:]).strip()
except:
abstract = '未获取,你可以手动添加该内容。'
return abstract
def get_subject(self, paper):
try:
element = paper.find('span', class_='highwire-article-collection-term')
subject = ''.join(list(element.stripped_strings))
except:
subject = '未获取,你可以手动添加该内容。'
return subject
def get_submit_info(self, paper):
try:
element = paper.find('div', class_='panel-pane pane-custom pane-1')
submit_info = ''.join(list(element.stripped_strings)).replace('\xa0', ' ')
except:
submit_info = '未获取,你可以手动添加该内容。'
return submit_info
def __repr__(self):
return '%s' % self.name
@register_url_parser
class IEEEParser(BaseParser):
name = 'IEEEParser'
patterns = ['https://ieeexplore.ieee.org/document/']
def __init__(self, url):
super().__init__(url)
@classmethod
def url_match(cls, url):
for pattern in cls.patterns:
if url.startswith(pattern):
return True
return False
def parse_url(self, url):
# this need to deal with js
paper = None
return paper
def get_title(self, paper):
pass
def get_author(self, paper):
pass
def get_abstract(self, paper):
pass
def get_subject(self, paper):
pass
def get_submit_info(self, paper):
pass
@celery.task
def create_and_notify(url, category_id, current_user_id):
with flask_app.app_context():
available_parsers = [parser for parser in URL_PARSERS if parser.url_match(url)]
user = User.query.get(current_user_id)
if not user:
return
category = Category.query.filter_by(user=user).filter_by(id=category_id).first()
if not category:
return
if len(available_parsers) > 0:
for parser in available_parsers:
if parser.url_match(url):
p = parser(url)
try:
p.start_pip_line()
except requests.exceptions.RequestException:
message = Message(
content='无法解析 %s,请检查此url,或稍后再试。' % url,
add_timestamp=datetime.utcnow(),
user=user
)
db.session.add(message)
db.session.commit()
else:
paper = Paper(
url=p.url,
title=p.title,
author=p.author,
abstract=p.abstract,
subjects=p.subject,
submit_time=p.submit_info,
user=user,
category=category
)
message = Message(
content='论文 %s(%s) 收录完成。' % (p.url, p.title),
add_timestamp=datetime.utcnow(),
user=user
)
db.session.add(paper)
db.session.add(message)
db.session.commit()
else:
message = Message(
content='您输入的 %s 与标准格式不匹配,请输入格式正确的url。' % url,
add_timestamp=datetime.utcnow(),
user=user
)
db.session.add(message)
db.session.commit()
| StarcoderdataPython |
12853039 | <gh_stars>0
# -*- coding: utf-8 -*-
import click
from github import Github
from github.GithubException import RateLimitExceededException
def main():
cli(obj={})
def get_repos(key, org, repo, url):
if url:
g = Github(key, base_url=url)
else:
g = Github(key)
if org:
g_org = g.get_organization(login=org)
else:
g_org = g.get_user()
if repo:
repos = [g_org.get_repo(repo)]
else:
repos = g_org.get_repos()
return repos
@click.group()
@click.option('--key', envvar='EPITHET_KEY', help="Github OAuth Token")
@click.option('--dryrun', is_flag=True, help="Don't actually change or create labels")
@click.option('--url', help="API URL - change if GitHub Enterprise")
@click.pass_context
def cli(ctx, key, dryrun, url):
if not key:
click.echo("You must provide a GitHub API v3 key")
return
ctx.obj['dryrun'] = dryrun
ctx.obj['url'] = url
ctx.obj['key'] = key
@cli.command()
@click.option('--label', '-l', is_flag=True, help="List labels", default=False)
@click.option('--milestone', '-m', is_flag=True, help='List milestones', default=False)
@click.option('--org', '-o', help="Organization to get repos from")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.pass_context
def list(ctx, label, milestone, org, repo):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo("\n * {}:\n".format(repo.name))
if label:
for label in repo.get_labels():
click.echo(" - {} ({})".format(label.name, label.color))
if milestone:
for milestone in repo.get_milestones():
click.echo(" - {} ({})".format(milestone.title))
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Add label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Add milestone', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of new label")
@click.option('--color', '-c', help="Color of new label")
@click.pass_context
def add(ctx, label, milestone, org, repo, name, color):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Adding a label with name: {} and color: {}".format(name, color))
labels = {label.name: label for label in repo.get_labels()}
if name.lower() in [l.lower() for l in labels.keys()]:
click.echo(
" - Found {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if name not in labels.keys():
for labelname, label in labels.items():
if labelname.lower() == name.lower():
labels[labelname].edit(name=name, color=color)
elif labels[name].color != color and not ctx.obj['dryrun'] \
and not repo.archived:
labels[name].edit(name=name, color=color)
else:
click.echo(
" - Creating {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun'] and not repo.archived:
repo.create_label(name=name, color=color)
if milestone:
click.echo("Adding a milestone with name: {}".format(name))
milestones = {milestone.title: milestone
for milestone in repo.get_milestones()}
if name.lower() in [m.lower() for m in milestones.keys()]:
click.echo(
" - Found {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
else:
click.echo(
" - Creating {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun'] and not repo.archived:
repo.create_milestone(title=name)
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Delete label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Delete milestones', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of label or milestone to delete")
@click.pass_context
def delete(ctx, label, milestone, org, repo, name):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Deleting label: {}".format(name))
labels = {}
for label in repo.get_labels():
labels[label.name] = label
if name in labels:
click.echo(
" - Found {} on {}, deleting (Dryrun: {})".format(
labels[name].name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun']:
labels[name].delete()
if milestone:
click.echo("Deleting milestone: {}".format(name))
milestones = {}
for milestone in repo.get_milestones():
milestones[milestone.title] = milestone
if name in milestones:
click.echo(
" - Found {} on {}, deleting (Dryrun: {})".format(
milestones[name].title, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun']:
milestones[name].delete()
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Update label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Update milestone', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of the existing label")
@click.option('--new-name', help="New name of the label")
@click.pass_context
def update(ctx, label, milestone, org, repo, name, new_name):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Updating label {}".format(name))
labels = {}
for label in repo.get_labels():
labels[label.name] = label
if name in labels:
click.echo(
" - Found {} on {}, upating to {} (Dryrun: {})".format(
labels[name].name, repo.name, new_name, ctx.obj['dryrun']
)
)
if labels[name].name != new_name and not ctx.obj['dryrun']:
labels[name].edit(name=new_name, color=labels[name].color)
else:
click.echo("{} not found, did you mean 'add'?".format(name))
if milestone:
click.echo("Updating milestone with name: {}".format(name))
milestones = {}
for milestone in repo.get_milestones():
milestones[milestone.title] = milestone
if name in milestones:
click.echo(
" - Found {} on {}, upating to {} (Dryrun: {})".format(
milestones[name].name, repo.name, new_name, ctx.obj['dryrun']
)
)
else:
click.echo("{} not found, did you mean 'add'?".format(name))
if __name__ == "__main__":
main(obj={})
| StarcoderdataPython |
9747398 | <filename>src/mtl_trainer.py
import os
import logging
from dataclasses import dataclass, field
from typing import Dict, Optional, Callable, Tuple
import torch
import numpy as np
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from transformers import Trainer, TrainingArguments, EvalPrediction, glue_output_modes
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from src.data.glue_utils import compute_glue_metrics
logger = logging.getLogger(__name__)
@dataclass
class MultiTaskTrainingArguments(TrainingArguments):
use_mt_uncertainty: bool = field(
default=False,
metadata={"help": "Use MT-Uncertainty sampling method"},
)
uniform_mt_sampling: bool = field(
default=False,
metadata={"help": "Sample each task an equal amount to times per epoch."},
)
percent_of_max_data_size: float = field(
default=1.0,
metadata={
"help": "If uniform_mt_sampling=True, specify the samples per task per "
"epoch based on the maximum dataset length. If below 0.0 or above 1.0,"
"it will be set to the closest of 0.0 or 1.0."
},
)
warmup_proportion: float = field(
default=0.1,
metadata={"help": "0.0 to args.lr for warmup_proportion * num_training_steps"},
)
class MultiTaskTrainer(Trainer):
def __init__(
self,
tokenizer,
data_args,
eval_datasets=None,
test_datasets=None,
*args,
**kwargs,
):
super(MultiTaskTrainer, self).__init__(*args, **kwargs)
self.tokenizer = tokenizer
self.data_args = data_args
self.eval_datasets = eval_datasets
self.test_datasets = test_datasets
self.eval_results = {}
def get_optimizers(
self, num_training_steps: int
) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well.
If you want to use something else, you can pass a tuple in the Trainer's init,
or override this method in a subclass.
"""
if self.optimizers is not None:
return self.optimizers
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p
for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
num_warmup_steps = (
self.args.warmup_proportion * num_training_steps
) # this is different from overridden function
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.args.learning_rate,
eps=self.args.adam_epsilon,
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps, # this is different from overridden function
)
return optimizer, scheduler
def get_train_dataloader(self) -> DataLoader:
if self.args.use_mt_uncertainty:
return self._create_custom_dataloader()
else:
return super().get_train_dataloader()
def _create_custom_dataloader(self):
class MtUcertaintyIterator:
"""Sample tasks using uncertainty measure."""
def __init__(self, my_loader):
self.my_loader = my_loader
self.loader_iters = [iter(loader) for loader in self.my_loader.loaders]
self.loader_iter_sizes = [len(i) for i in self.loader_iters]
self.max_count = len(self.my_loader)
self.batch_count = 0
def __iter__(self):
return self
def __next__(self):
if self.batch_count == self.max_count:
self.batch_count = 0
raise StopIteration()
test_batch = {}
for idx, loader_iter in enumerate(self.loader_iters):
try:
batch = loader_iter.__next__()
except StopIteration:
new_loader_iter = iter(self.my_loader.loaders[idx])
self.loader_iters[idx] = new_loader_iter
batch = new_loader_iter.__next__()
test_batch = self.batchify_data(batch, test_batch)
inputs = {}
for k, v in test_batch.items():
if k not in ["labels"]:
inputs[k] = v.detach().to(self.my_loader.args.device)
with torch.no_grad():
model.select_batch_mode = True
outputs = model(**inputs)
model.select_batch_mode = False
(
test_batch_entropy,
test_batch_entropy_mean,
max_mean_batch_entropy,
) = outputs[-3:]
for _, v in inputs.items():
del v # free GPU mem
del inputs
test_batch_entropy_mean = (
test_batch_entropy_mean / max_mean_batch_entropy
)
test_batch_entropy = test_batch_entropy * test_batch_entropy_mean
if "sts-b" in tasks and "mrpc" in tasks:
stsb_idx = test_batch["task_id"] == tasks.index("sts-b")
mrpc_idx = test_batch["task_id"] == tasks.index("mrpc")
num_items = min(
len(test_batch_entropy[stsb_idx]),
len(test_batch_entropy[mrpc_idx]),
)
stsb_idx = stsb_idx.nonzero()[:num_items]
mrpc_idx = mrpc_idx.nonzero()[:num_items]
test_batch_entropy[stsb_idx] = test_batch_entropy[mrpc_idx]
test_batch_entropy_mean[stsb_idx] = test_batch_entropy_mean[
mrpc_idx
]
select_size = min(
self.my_loader.args.train_batch_size,
test_batch["input_ids"].shape[0],
) # Handled the last batch if it is lower than the batch size
top_entropy = torch.topk(test_batch_entropy, select_size)
for k, v in test_batch.items():
test_batch[k] = torch.index_select(v, 0, top_entropy.indices)
self.batch_count += 1
return test_batch
@staticmethod
def batchify_data(data, curr_batch):
for k in data.keys():
if k in curr_batch.keys():
curr_batch[k] = torch.cat((curr_batch[k], data[k]), dim=0)
else:
curr_batch[k] = data[k]
return curr_batch
class CustomLoader:
def __init__(self, loaders, datasets, loader_args):
self.loaders = loaders
self.dataset = datasets
self.args = loader_args
self.current_epoch = 0
def __iter__(self):
iterator = MtUcertaintyIterator(self)
# for determinism across runs
# https://github.com/pytorch/examples/issues/501
for l in self.loaders:
if isinstance(l.sampler, DistributedSampler):
l.sampler.set_epoch(self.current_epoch)
self.current_epoch += 1
return iterator
def __len__(self):
loader_len = [len(loader) for loader in self.loaders]
if self.args.uniform_mt_sampling:
return int(
self.args.percent_of_max_data_size
* max(loader_len)
* len(self.loaders)
/ self.args.train_batch_size
)
elif self.args.use_mt_uncertainty:
return int(
max(loader_len)
* len(self.loaders)
* self.args.percent_of_max_data_size
)
else:
return sum(loader_len)
model = self.model
tasks = self.data_args.tasks
data_loaders = []
for dataset in self.train_dataset.datasets:
train_sampler = (
RandomSampler(dataset)
if self.args.local_rank == -1
else DistributedSampler(dataset)
)
data_loader = DataLoader(
dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
data_loaders.append(data_loader)
return CustomLoader(data_loaders, self.train_dataset, self.args)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
context: str = None,
do_test_if_needed: bool = True,
):
datasets = eval_dataset or self.eval_datasets
logger.info("*** Evaluate on dev ***")
for task_name, eval_dataset in datasets.items():
logger.info(task_name)
self.compute_metrics = self.build_compute_metrics_fn(eval_dataset)
eval_result = super().evaluate(
eval_dataset=eval_dataset, prediction_loss_only=True
)
self.update_eval_results(eval_result, task_name)
for key, value in self.eval_results[task_name].items():
logger.info(" %s = %s", key, value)
def predict(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
):
logging.info("*** Test ***")
datasets = eval_dataset or self.test_datasets
for task_name, test_dataset in datasets.items():
logger.info(task_name)
predictions = super().predict(test_dataset=test_dataset).predictions
output_mode = glue_output_modes[task_name]
if output_mode == "classification":
predictions = np.argmax(predictions, axis=1)
output_test_file = os.path.join(
self.args.output_dir,
f"{task_name}_test_iter_{self.global_step}.tsv",
)
if self.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(task_name))
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
writer.write(
"%d\t%s\n" % (index, test_dataset.get_labels()[item])
)
def update_eval_results(self, results, task_name):
self.eval_results[task_name] = self.eval_results.get(task_name, {})
for key, value in results.items():
if key in self.eval_results[task_name] and 'loss' not in key and 'epoch' not in key:
value = max(self.eval_results[task_name][key], value)
self.eval_results[task_name][key] = value
@staticmethod
def build_compute_metrics_fn(
eval_dataset
) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
return compute_glue_metrics(eval_dataset.task_name, p)
return compute_metrics_fn
| StarcoderdataPython |
3344703 | <reponame>rhyswhitley/savanna_iav<filename>src/figures/modchecks/water_limits.py<gh_stars>0
#!/usr/bin/env python2
import os
import seaborn as sns
import numpy as np
import pandas as pd
import cPickle as pickle
import matplotlib.pyplot as plt
#from matplotlib import style
def main():
"""
Does a quick re-sampling of flux tower observation form hourly to daily
timesteps
"""
pload = lambda x: pickle.load(open(x, 'rb'))
model_dict = pload(MODFILE)
obs_data = pload(OBSFILE)
model_out = add_to_data(model_dict['Exp_1'])
tower_obs = add_to_data(obs_data)
print model_out.head(10)
create_obswater_plots(tower_obs, 'Obs')
create_modwater_plots(model_out, 'Mod')
def add_to_data(temp_df):
temp_df['GPP2'] = -temp_df['GPP']*12
temp_df['WUE'] = -temp_df['GPP']/(temp_df['Qle']/18/2.45)
temp_df['Year'] = temp_df.index.year
temp_df['Month'] = temp_df.index.month
return temp_df
def create_modwater_plots(temp_df, label="Mod"):
x_label_custom = '$z^{-1}\int^{z}_{0}\\theta_{s} dz$ (m$^{3}$ m$^{-3}$)'
p1 = plot_swcrel(temp_df, "IntSWC", "WUE")
p1.set_axis_labels(x_label_custom, \
'WUE (mol CO$_{2}$ mol$^{-1}$ H$_{2}$O)')
plt.savefig(FIGPATH + "{0}_WUExSWC.pdf".format(label))
p2 = plot_swcrel(temp_df, "IntSWC", "GPP2")
p2.set_axis_labels(x_label_custom, \
'GPP (gC m$^{-2}$ d$^{-1}$')
plt.savefig(FIGPATH + "{0}_GPPxSWC.pdf".format(label))
p3 = plot_swcrel(temp_df, "IntSWC", "Qle")
p3.set_axis_labels(x_label_custom, \
'LE (MJ m$^{-2}$ d$^{-1}$')
plt.savefig(FIGPATH + "{0}_QLExSWC.pdf".format(label))
return None
def create_obswater_plots(temp_df, label="Mod"):
x_label_custom = '$\\theta_{s,10cm}$ (m$^{3}$ m$^{-3}$)'
p1 = plot_swcrel(temp_df, "SoilMoist10", "WUE")
p1.set_axis_labels(x_label_custom, \
'WUE (mol CO$_{2}$ mol$^{-1}$ H$_{2}$O)')
plt.savefig(FIGPATH + "{0}_WUExSWC.pdf".format(label))
p2 = plot_swcrel(temp_df, "SoilMoist10", "GPP2")
p2.set_axis_labels(x_label_custom, \
'GPP (gC m$^{-2}$ d$^{-1}$')
plt.savefig(FIGPATH + "{0}_GPPxSWC.pdf".format(label))
p3 = plot_swcrel(temp_df, "SoilMoist10", "Qle")
p3.set_axis_labels(x_label_custom, \
'LE (MJ m$^{-2}$ d$^{-1}$')
plt.savefig(FIGPATH + "{0}_QLExSWC.pdf".format(label))
return None
def plot_swcrel(data, xlabel, ylabel):
month_lab = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', \
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
sns.set_style("ticks")
plt.rcParams.update({'mathtext.default': 'regular'})
kws = dict(s=50, linewidth=.5, edgecolor="w", alpha=0.7)
wue_plot = sns.FacetGrid(data, col="Year", hue="Month", col_wrap=4, size=3)
wue_plot.map(plt.scatter, xlabel, ylabel, **kws)
ymax = np.ceil(data[ylabel].mean() + 3*data[ylabel].std())
xmax = np.max(data[xlabel])
xmin = np.min(data[xlabel])
x_ticks = np.arange(0, 0.35, 0.02)
for wax in wue_plot.axes.ravel():
wax.xaxis.set_ticks(x_ticks)
wax.xaxis.set_ticklabels(['%1.2f' %x for x in x_ticks], \
rotation=45, ha="right", fontsize=11)
wue_plot.set(xlim=(xmin, xmax), ylim=(0, ymax))
leg = plt.legend(loc='right', labels=month_lab, ncol=4, bbox_to_anchor=(2.8, 0.5), \
borderpad=2)
leg.get_frame().set_edgecolor('black')
wue_plot.fig.subplots_adjust(wspace=.08, hspace=0.15, bottom=0.08)
return wue_plot
if __name__ == "__main__":
FILEPATH = os.path.expanduser("~/Savanna/Data/HowardSprings_IAV/pickled/daily/")
INFILE = FILEPATH + "daily_inputs.pkl"
OBSFILE = FILEPATH + "daily_tower_fluxes.pkl"
MODFILE = FILEPATH + "daily_fluxes.pkl"
# Figure names
FIGPATH = os.path.expanduser("~/Savanna/Analysis/figures/IAV/")
main()
# plt.savefig(WUEPLOT)
#
# sns.set_style("darkgrid")
# wue_plot = sns.FacetGrid(temp, col="Year", hue="Month", col_wrap=4, size=3)
# wue_plot.map(plt.scatter, "Qle", "GPP2", **kws)
# wue_plot.set_axis_labels('LE (MJ m$^{-2}$ d$^{-1}$)', 'GPP (gC m$^{-2}$ d$^{-1}$)')
# wue_plot.set(xlim=(0, 20), ylim=(0, 10))
# wue_plot.fig.subplots_adjust(wspace=.08)
#
# plt.show()
#
# return 1
| StarcoderdataPython |
3483831 | from __future__ import absolute_import
import os
from celery import Celery, signals
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
# django.setup()
app = Celery('celery_uncovered')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
import celery_uncovered.tricks.celery_conf
| StarcoderdataPython |
110820 | from .Workbook import Workbook
from .Style import Style
from .Fill import Fill
from .Font import Font
from .Format import Format
from .Alignment import Alignment
try:
import pkg_resources
__version__ = pkg_resources.require('PyExcelerate')[0].version
except:
__version__ = 'unknown'
| StarcoderdataPython |
3223702 | #!/usr/bin/env python
#
# Copyright (c) 2019, Arista Networks EOS+
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import time
from jinja2 import meta
import jinja2
import yaml
from cvprac.cvp_client import CvpClient
from cvprac.cvp_client_errors import CvpLoginError, CvpApiError
import argparse
import json
# Checking some Enviromental Variables
#import sys
#print '\n'.join(sys.path)
import imp
print "cvprac is here %s" %str(imp.find_module('cvprac'))
# Setting up some formated print outputs
import pprint
pp2 = pprint.PrettyPrinter(indent=2)
pp4 = pprint.PrettyPrinter(indent=4)
# Disable HTTPS Insecure Cert Warnings
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def connect(module):
''' Connects to CVP device using user provided credentials from playbook.
:param module: Ansible module with parameters and client connection.
:return: CvpClient object with connection instantiated.
'''
client = CvpClient()
try:
client.connect([module['params']['host']],
module['params']['username'],
module['params']['password'],
protocol=module['params']['protocol'],
port=module['params']['port'],
)
except CvpLoginError, e:
module['fail']=str(e)
return client
def device_info(module):
''' Get dictionary of device info from CVP.
:param module: Ansible module with parameters and client connection.
:return: Dict of device info from CVP or exit with failure if no
info for device is found.
'''
device_info = module['client'].api.get_device_by_name(module['params']['device'])
if not device_info:
device_info['warning']="Device with name '%s' does not exist." % module['params']['device']
else:
device_info['configlets'] = module['client'].api.get_configlets_by_netelement_id(device_info['systemMacAddress'])['configletList']
return device_info
def container_info(module):
''' Get dictionary of container info from CVP.
:param module: Ansible module with parameters and client connection.
:return: Dict of container info from CVP or exit with failure if no
info for device is found.
'''
container_info = module['client'].api.get_container_by_name(module['params']['container'])
if container_info == None:
container_info = {}
container_info['warning'] = "Container with name '%s' does not exist." % module['params']['container']
else:
container_info['configlets'] = module['client'].api.get_configlets_by_container_id(container_info['key'])
return container_info
def process_configlet(module, configlet):
''' Check the current status of a configlet.
Returns a list of associated containers / devices
Returns None if the configlet has no associations.
If action = add apply configlet to device or container
if device specified only apply to device
If action = delete removes configlet from device or container
param module: Ansible module with parameters and client connection.
configlet: Name of Configlet to process
return: Dict of action taken, containers/devices affected and counts of same
'''
result = {}
# Find out if configlet is associated with any containers or devices
configlet_info = module['client'].api.get_configlet_by_name(configlet)
result['start_container_count']= configlet_info["containerCount"]
result['start_device_count'] = configlet_info["netElementCount"]
# Get details of container
if module['params']['container'] != 'None':
container_data = container_info(module)
if 'Warning' in container_data:
result['data']=container_data
container_data = "None"
container_list = module['client'].api.get_applied_containers(configlet)['data']
# Remove configlet from container if action = delete
if module['params']['action'] == "delete":
for container in container_list:
if module['params']['container'] in container['containerName']:
if configlet_info["containerCount"] > 0 and module['params']['device'] == 'None':
# Remove configlet from spcified container in module params
# If none specified then do not remove configlet
# If a device is specified in module params then do not remove configlet
result['action'] = 'delete_from_container'
if container_data != "None":
result['data'] = module['client'].api.remove_configlets_from_container("Ansible Removed Configlet",
container_data, [configlet_info])
else:
result['data'] = {'error':'container not found %s' %module['params']['container']}
if module['params']['action'] == "add":
if module['params']['device'] == 'None':
# Add configlet to spcified container in module params
# If none specified then do not add configlet
# If a device is specified in module params then do not add configlet
result['action'] = 'add_to_container'
if container_data != "None":
result['data'] = module['client'].api.apply_configlets_to_container("Ansible Add Configlet",
container_data, [configlet_info])
else:
result['data'] = {'error':'container not found %s' %module['params']['container']}
# Get details of device
# Remove configlet from specified device in module params
# If none specified then do not remove configlet
if module['params']['device'] != 'None':
device_data = device_info(module)
if "Warning" in device_data:
result['data']=device_data
device_data = "None"
# Remove configlet from device if action = delete
if module['params']['action'] == "delete":
device_list = module['client'].api.get_applied_devices(configlet)['data']
for device in device_list:
# If configlet applied to device then delete it.
if module['params']['device'] in device['hostName']:
if configlet_info["netElementCount"] > 0 and device_data != "None":
result['action'] = 'delete_from_device'
result['data'] = module['client'].api.remove_configlets_from_device("Ansible Removed Configlet",
device_data, [configlet_info])
# Add configlet to device if action = add
if module['params']['action'] == "add" and device_data != "None":
result['action'] = 'add_to_device'
result['data'] = module['client'].api.apply_configlets_to_device("Ansible Added Configlet", device_data,
[configlet_info],create_task=True)
# Check to see if any containers or devices have been added or removed
configlet_info = module['client'].api.get_configlet_by_name(configlet)
result['end_container_count']= configlet_info["containerCount"]
result['end_device_count'] = configlet_info["netElementCount"]
# Added
if result['end_container_count'] > result['start_container_count']:
result['added_container'] = container_data['name']
else:
result['added_container'] = False
if result['end_device_count'] > result['start_device_count']:
result['added_device'] = device_data['fqdn']
else:
result['added_device'] = False
# Removed
if result['end_container_count'] < result['start_container_count']:
result['removed_container'] = container_data['name']
else:
result['removed_container'] = False
if result['end_device_count'] < result['start_device_count']:
result['removed_device'] = device_data['fqdn']
else:
result['removed_device'] = False
return result
#def process_container(module, container, parent):
# ''' Check for existence of a Container and its parent in CVP.
# Returns True if the Containerand Parent exist
# Creates Container if Parent exists but Container doesn't and
# Returns True
# Returns False if the Parent container does not exist and dose not
# create the Container specified.
# '''
# containers = module['client'].api.get_containers()
#
# # Ensure the parent exists
# parent = next((item for item in containers['data'] if
# item['name'] == parent), None)
# if not parent:
# print'Parent container does not exist.'
#
# cont = next((item for item in containers['data'] if
# item['name'] == container), None)
# if not cont:
# module['client'].api.add_container(container, parent['name'],
# parent['key'])
# return True
#
# return False
def config_from_template(module):
''' Load the Jinja template and apply user provided parameters in necessary
places. Fail if template is not found. Fail if rendered template does
not reference the correct port. Fail if the template requires a VLAN
but the user did not provide one with the port_vlan parameter.
:param module: Ansible module with parameters and client connection.
:return: String of Jinja template rendered with parameters or exit with
failure.
'''
template = False
if module['params']['template']:
template_loader = jinja2.FileSystemLoader('./templates')
env = jinja2.Environment(loader=template_loader,
undefined=jinja2.DebugUndefined)
template = env.get_template(module['params']['template'])
if not template:
print'Could not find template - %s'% module['params']['template']
templateData = {}
templateData["data"] = yaml.safe_load(module['params']['data'])
templateData["device"] = module['params']['device']
templateData["container"] = module['params']['container']
temp_source = env.loader.get_source(env, module['params']['template'])[0]
parsed_content = env.parse(temp_source)
temp_vars = list(meta.find_undeclared_variables(parsed_content))
for var in temp_vars:
if str(var) not in templateData:
print 'Template %s requires %s value.'%(module['params']['template'],var)
print 'Please re-run with %s provided.'%(var)
try:
template = template.render(templateData)
except Exception as templateError:
print'Template - %s: does not render correctly: %s'%(module['params']['template'],templateError)
else:
print'Template - required but not provided'
return template
def configlet_action(module):
''' Act upon specified Configlet based on options provided.
- show - display contents of existing config let
- add - update or add new configlet to CVP
- delete - delete existing configlet
:param module: Ansible module with parameters and client connection.
:return: Dict of information to updated results with.
The configlet will be named as follows:
If associated with a device the configlet name will be
device_configletName if configletName has been provided
otherwise it will be device_template
if none of the above have been provided it will be configletName
if that was not provided a default name of Ansible_Test will be used
'''
result = dict()
result['configletAction']=module['params']['action']
changed = False
configlet_found = False
existing_config = 'None'
# Create Configlet Name
if module['params']['device'] != 'None' and module['params']['configletName'] != 'None':
configlet_name = str(module['params']['device'])+'_'+str(module['params']['configletName'])
elif module['params']['device'] != 'None' and module['params']['template'] != 'None':
configlet_name = str(module['params']['device'])+'_'+str(re.split('\.',module['params']['template'])[0])
elif module['params']['configletName'] != 'None':
configlet_name = str(module['params']['configletName'])
else:
configlet_name = "Ansible_Temp"
result['configletName'] = configlet_name
# Find Configlet in CVP if it exists
configlet_list = module['client'].api.get_configlets()['data']
for configlet in configlet_list:
if str(configlet['name']) == str(configlet_name):
configlet_data = module['client'].api.get_configlet_by_name(configlet_name)
existing_config = configlet_data['config']
configlet_found = True
# Create New config if required
if module['params']['template']:
config = config_from_template(module)
# Return current config if found and action was show
if module['params']['action'] == 'show':
if configlet_found:
result['currentConfigBlock'] = existing_config
result['newConfigBlock'] = "No Config - show only existing"
else:
result['currentConfigBlock'] = "No Config - Configlet Not Found"
result['newConfigBlock'] = "No Config - show only existing"
# Amend or Create Configlet/Config if action was add
elif module['params']['action'] == 'add':
if configlet_found:
result['currentConfigBlock'] = existing_config
result['newConfigBlock'] = config
resp = module['client'].api.update_configlet(config, configlet_data['key'],
configlet_data['name'])
module['client'].api.add_note_to_configlet(configlet_data['key'],
"## Managed by Ansible ##")
result.update(process_configlet(module, configlet_name))
changed = True
else:
result['currentConfigBlock'] = "New Configlet - No Config to return"
result['newConfigBlock'] = config
resp = module['client'].api.add_configlet(configlet_name,config)
module['client'].api.add_note_to_configlet(resp,
"## Managed by Ansible ##")
result.update(process_configlet(module, configlet_name))
changed = True
# Delete Configlet if it exists
elif module['params']['action'] == 'delete':
if configlet_found:
result['currentConfigBlock'] = existing_config
result['newConfigBlock'] = "No Config - Configlet Deleted"
result.update(process_configlet(module, configlet_name))
if result['end_container_count'] > 0 or result['end_device_count'] > 0:
changed = False
result['newConfigBlock'] = config
else:
resp = module['client'].api.delete_configlet(configlet_data['name'], configlet_data['key'])
changed = True
result['newConfigBlock'] = "No Config - Configlet Deleted"
else:
result['currentConfigBlock'] = "No Config - Configlet Not Found"
result['newConfigBlock'] = "No Config - Configlet Not Found"
else:
result['currentConfigBlock'] = "No Config - Invalid action"
result['newConfigBlock'] = "No Config - Invalid action"
# Return Results from operations
return [changed,result]
def parseArgs():
"""Gathers comand line options for the script, generates help text and performs some error checking"""
usage = "usage: %prog [options]"
parser = argparse.ArgumentParser(description="Create a configlet in CVP CVP")
parser.add_argument("--username",required=True, help='Username to log into CVP')
parser.add_argument("--password",required=True, help='Password for CVP user to login')
parser.add_argument("--host",required=True, help='CVP Host IP or Name')
parser.add_argument("--protocol", default='HTTPS', help='HTTP or HTTPs')
parser.add_argument("--port", default=443 ,help='TCP port Number default 443')
parser.add_argument("--container",default='None', help='Container to add configlet to')
parser.add_argument("--parent", default="Tennant", help='Parent container for target container')
parser.add_argument("--device", default='None', help='Device to add configlet to')
parser.add_argument("--configletName", default='None', help='Name of Configlet, can be auto-generated')
parser.add_argument("--template",required=True, default='None', help='Jinja2 Template used for Configlet')
parser.add_argument("--data",required=True, help='Yaml Data File required for Configlet Data')
parser.add_argument("--action",required=True, default='show', choices=['show', 'add', 'delete'],help='show,add,delete')
args = parser.parse_args()
return (args)
def main():
""" main entry point for module execution
"""
module = {}
#module['params'] = parseArgs()
module['params'] = vars(parseArgs())
result = dict(changed=False)
print "### Connecting to CVP ###"
module['client'] = connect(module)
# Before Starting check for existing tasks
# Pass config and module params to configlet_action to act on configlet
print "### Creating Configlet ###"
result['changed'],result['configlet_data'] = configlet_action(module)
# Check if the configlet is applied to a device or container
# Device will take priority of Container
configlet_type = "None"
if module['params']['device'] != "None":
device_data = device_info(module)
if 'warning' not in device_data:
configletList = []
print "Debug device_data-configlets:"
for configlet in device_data['configlets']:
configletList.append(configlet['name'])
pp2.pprint(configletList)
for configlet in device_data['configlets']:
# Check if Configlet is applied to Device
if configlet['name'] == result['configlet_data']['configletName']:
configlet_type = "device"
if module['params']['container'] != "None" and module['params']['device'] == "None":
container_data = container_info(module)
if 'warning' not in container_data:
configletList = []
print "Debug container_data-configlets:"
for configlet in container_data['configlets']['configletList']:
configletList.append(configlet['name'])
pp2.pprint(configletList)
for configlet in container_data['configlets']['configletList']:
# Check if Configlet is applied to Container
if configlet['name'] == result['configlet_data']['configletName']:
configlet_type = "container"
result['configlet_data']['configletType'] = configlet_type
# Check Results of configlet_action and act accordingly
if result['changed']:
pass
print "\nModule Result:"
pp4.pprint(result)
print "\nModule Data:"
pp4.pprint(module)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1626842 | from setuptools import setup
package_name = 'turtlesim_robotics'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='luqman',
maintainer_email='<EMAIL>',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'test_node = turtlesim_robotics.test:main',
'w2_drive_robot = turtlesim_robotics.1_driving_node:main',
'w3_go_to_goal = turtlesim_robotics.w3_a_go_to_goal:main',
'w3_proportional_go_to_goal = turtlesim_robotics.w3_b_propertional_controller_goToGoal:main',
'w4_kinematics_model_circular_error = turtlesim_robotics.w4_kinematics_circular_rotation:main',
],
},
)
| StarcoderdataPython |
6693784 | import pytest
import networkx as nx
from floweaver.ordering import (flatten_bands, unflatten_bands, band_index,
new_node_indices, median_value,
neighbour_positions, fill_unknown, Ordering)
def test_ordering_normalisation():
o1 = Ordering([['a', 'b'], ['c']])
o2 = Ordering([[['a', 'b']], [['c']]])
assert o1 == o2
def test_ordering_insert():
a = Ordering([
[['a', 'b'], ['c']],
[[], ['d']],
])
assert a.insert(0, 0, 0, 'x') == Ordering([
[['x', 'a', 'b'], ['c']],
[[], ['d']],
])
assert a.insert(0, 0, 1, 'x') == Ordering([
[['a', 'x', 'b'], ['c']],
[[], ['d']],
])
assert a.insert(1, 1, 1, 'x') == Ordering([
[['a', 'b'], ['c']],
[[], ['d', 'x']],
])
def test_ordering_remove():
a = Ordering([
[['a', 'b'], ['c']],
[[], ['d']],
])
assert a.remove('a') == Ordering([
[['b'], ['c']],
[[], ['d']],
])
assert a.remove('d') == Ordering([[['a', 'b'], ['c']], ])
assert a == Ordering([
[['a', 'b'], ['c']],
[[], ['d']],
])
def test_ordering_indices():
a = Ordering([
[['a', 'b'], ['c']],
[[], ['d']],
])
assert a.indices('a') == (0, 0, 0)
assert a.indices('b') == (0, 0, 1)
assert a.indices('c') == (0, 1, 0)
assert a.indices('d') == (1, 1, 0)
with pytest.raises(ValueError):
a.indices('e')
def test_flatten_bands():
bands = [['a'], ['b', 'c'], ['d']]
L, idx = flatten_bands(bands)
assert L == ['a', 'b', 'c', 'd']
assert idx == [0, 1, 3]
bands2 = unflatten_bands(L, idx)
assert bands2 == bands
def test_band_index():
# bands: a | b c | d
assert band_index([0, 1, 3], 0) == 0
assert band_index([0, 1, 3], 1) == 1
assert band_index([0, 1, 3], 2) == 1
assert band_index([0, 1, 3], 3) == 2
assert band_index([0, 1, 3], 9) == 2
def test_new_node_indices():
# Simple alignment: a--x, n--y || b--z
bands0 = [['a'], ['b']]
bands1 = [['x', 'y'], ['z']]
G = nx.DiGraph()
G.add_edges_from([('a', 'x'), ('b', 'z'), ('y', 'n')])
assert new_node_indices(G, bands0, bands1, 'n') == (0, 1) # band 0, pos 1
# Simple alignment: a--x || b--z n--y
bands0 = [['a'], ['b']]
bands1 = [['x'], ['z', 'y']]
G = nx.DiGraph()
G.add_edges_from([('a', 'x'), ('b', 'z'), ('y', 'n')])
assert new_node_indices(G, bands0, bands1, 'n') == (1, 1) # band 1, pos 1
# Simple alignment: n--y || a--x b--z
bands0 = [[], ['a', 'b']]
bands1 = [['y'], ['x', 'z']]
G = nx.DiGraph()
G.add_edges_from([('a', 'x'), ('b', 'z'), ('y', 'n')])
assert new_node_indices(G, bands0, bands1, 'n') == (0, 0) # band 0, pos 0
# Another case
bands0 = [['x']]
bands1 = [[]]
G = nx.DiGraph()
G.add_edge('x', 'n')
assert new_node_indices(G, bands1, bands0, 'n') == (0, 0) # band 0, pos 0
# Another case
bands0 = [['a', 'target', 'b']]
bands1 = [['c', 'origin', 'd']]
G = nx.DiGraph()
G.add_edges_from([('a', 'c'), ('b', 'd'), ('origin', 'new')])
assert new_node_indices(G, bands0, bands1, 'new', 'above') == (0, 1)
assert new_node_indices(G, bands0, bands1, 'new', 'below') == (0, 2)
def test_new_node_indices_when_not_connected():
bands0 = [['x']]
bands1 = [[]]
G = nx.DiGraph()
# no edges
assert new_node_indices(G, bands1, bands0, 'n') == (0, 0) # default
def test_median_value():
assert median_value([3, 4, 6]) == 4, 'picks out middle value'
assert median_value([3, 4]) == 3.5, 'returns average of 2 values'
assert median_value([]) == -1, 'returns -1 for empty list of positions'
assert median_value([0, 5, 6, 7, 8, 9]) == 6.75, \
'weighted median for even number of positions'
def test_neighbour_positions():
G, order = _example_two_level()
assert neighbour_positions(G, order[1], 'n2') == [0, 3, 4], 'n2'
assert neighbour_positions(G, order[1], 'n0') == [0], 'n0'
assert neighbour_positions(G, order[0], 's4') == [2, 5], 's4'
assert neighbour_positions(G, order[0], 's0') == [0, 2, 3], 's0'
def test_fill_unknown():
assert fill_unknown([0, 1, 2], 'above') == [0, 1, 2]
assert fill_unknown([0, 1, 2], 'below') == [0, 1, 2]
assert fill_unknown([0, -1, 2], 'above') == [0, 2, 2]
assert fill_unknown([0, -1, 2], 'below') == [0, 0, 2]
assert fill_unknown([], 'above') == []
assert fill_unknown([], 'below') == []
assert fill_unknown([-1], 'above') == [1]
assert fill_unknown([-1], 'below') == [0]
assert fill_unknown([-1, -1], 'above') == [2, 2]
assert fill_unknown([-1, -1], 'below') == [0, 0]
def _example_two_level():
G = nx.DiGraph()
# Example from Barth2004
G.add_edges_from([
('n0', 's0'),
('n1', 's1'),
('n1', 's2'),
('n2', 's0'),
('n2', 's3'),
('n2', 's4'),
('n3', 's0'),
('n3', 's2'),
('n4', 's3'),
('n5', 's2'),
('n5', 's4'),
])
order = [
['n0', 'n1', 'n2', 'n3', 'n4', 'n5'],
['s0', 's1', 's2', 's3', 's4'],
]
return (G, order)
| StarcoderdataPython |
5176179 | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import json
import pytest
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
import test_util as tu
def reshape(builder, x, outshape):
return builder.reshape_const(builder.aiOnnx, [x], outshape)
def identity(builder, x, _):
return builder.aiOnnx.identity([x])
@pytest.mark.parametrize(
"a, b, target",
[[reshape, reshape, "Reshape"], [identity, identity, "Identity"],
[reshape, identity, "Reshape"], [identity, reshape, "Reshape"]])
def test_view_simplify(a, b, target):
d1 = np.random.randn(10, 20).astype(np.float32)
builder = popart.Builder()
d = builder.addInputTensor("FLOAT", d1.shape)
o = a(builder, d, [1, *d1.shape])
o = b(builder, o, [*reversed(d1.shape)])
opts = popart.SessionOptions()
# ViewSimplifyPattern only runs when outlining
opts.enableOutlining = True
# Set the threshold high so nothing actually gets outlined.
# This makes it easier to parse the IR.
opts.outlineThreshold = 100000
sess = popart.InferenceSession(fnModel=builder.getModelProto(),
deviceInfo=tu.create_test_device(),
dataFlow=popart.DataFlow(1, [o]))
sess.prepareDevice()
anchors = sess.initAnchorArrays()
stepio = popart.PyStepIO({d: d1}, anchors)
sess.weightsFromHost()
sess.run(stepio)
ir = json.loads(sess._serializeIr(popart.IrSerializationFormat.JSON))
def outputs_o(op):
return o in map(lambda t: t["name"], op["outputs"])
def matches_target(op):
return target in op["type"] and outputs_o(op)
assert len(list(filter(matches_target, ir["maingraph"]))) == 1
assert np.allclose(anchors[o].flatten(), d1.flatten())
| StarcoderdataPython |
11220836 | class StockSpanner:
# 1st: 460ms, 18.4MB
def __init__(self):
self.stack = []
def next(self, price: int) -> int:
span = 0
while self.stack and self.stack[-1][0] <= price:
span += self.stack.pop()[1]
span += 1
self.stack.append((price, span))
return span
class StockSpanner2:
# 2nd: 7064ms, 18.7MB
def __init__(self):
self.idx = -1
self.max = -1
self.arr = []
def next(self, price: int) -> int:
self.idx += 1
if not self.arr:
self.max = self.idx
count = 1
elif price >= self.arr[self.max]:
self.max = self.idx
count = len(self.arr) + 1
else:
count = 1
for num in self.arr[::-1]:
if num <= price:
count += 1
else:
break
self.arr.append(price)
return count
| StarcoderdataPython |
1688062 | import os
from os import listdir
from tqdm import tqdm
import json
from collections import Counter
import itertools
import random
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
import tensorflow as tf
import tensorflow_hub as hub
import networkx as nx
def get_papers_dict(areas:list, year_start:int, year_end:int, path_2_unpacked:str = './unpacked') -> dict:
"""
Return dictionary of papers in form id: [properties].
"""
papers_dict = {}
files = [f for f in listdir(path_2_unpacked)]
areas = set(areas)
for j in tqdm(range(len(files))):
with open(path_2_unpacked + '/' + files[j]) as f:
lines = f.readlines()
cs_papers_local = []
for i in (range(len(lines))):
paper = json.loads(lines[i])
if not paper["year"]:
continue
if len(set(paper["fieldsOfStudy"]).intersection(areas)) > 0 \
and paper["year"] >= year_start \
and paper["year"] <= year_end \
and len(paper["inCitations"]) > 0 \
and len(paper["outCitations"]) > 0 \
and len(paper["doi"]) > 0 \
and len(paper["paperAbstract"]) > 0 \
and len(paper["title"]) > 0 \
and len(paper["journalName"]) > 0:
papers_dict[paper["id"]] = paper
cs_papers_local.append(paper)
return papers_dict
def get_edge_list(papers_dict:dict) -> list:
edge_list = []
for paper_id in tqdm(papers_dict):
paper = papers_dict[paper_id]
paper_cit = paper['outCitations']
for j in range(len(paper_cit)):
if (paper_cit[j] in papers_dict):
edge_list.append([paper_id, paper_cit[j]])
return edge_list
def get_data(papers_dict:dict, edge_list:list, dataset_name:str) -> list:
no_id_counter = 0
edge_dict = {} # keys -- edge list (author_1_id, author_2_id), values -- corresponding papers ids
authors_dict = {} # keys -- author_id, values -- papers ids
authors_interests = {} # keys -- author_id, values -- papers ids
for paper_id in tqdm(papers_dict):
paper = papers_dict[paper_id]
itertools.permutations(paper["authors"], 2)
ids = []
for author in paper["authors"]:
if len(author['ids']) == 1:
author_id = author['ids'][0]
ids.append(author_id)
areas = paper['fieldsOfStudy']
if author_id in authors_dict:
authors_dict[author_id][1].add(paper_id)
for area in areas:
authors_interests[author_id][1].add(area)
else:
authors_dict[author_id] = [author['name'], {paper_id}]
authors_interests[author_id] = [author_id, set()]
for area in areas:
authors_interests[author_id][1].add(area)
else:
no_id_counter += 1
authors_pairs = list(itertools.combinations(ids, 2))
for i in range(len(authors_pairs)):
if authors_pairs[i] in edge_dict:
edge_dict[authors_pairs[i]].append(paper_id)
else:
edge_dict[authors_pairs[i]] = [paper_id]
authors_interests_list = list(authors_interests.values())
df = pd.DataFrame(np.array(authors_interests_list), columns = ["author_id", "interests"])
try:
os.mkdir("processed_data")
except:
pass
papers_df = pd.DataFrame(list(papers_dict.values()))
papers_features = papers_df.drop(["inCitations", "outCitations"], axis = 1)
papers_features.to_csv("processed_data/" + dataset_name + "_papers_features.csv")
authors_features = df.drop('interests', 1).join(df.interests.str.join('|').str.get_dummies())
authors_features.to_csv("processed_data/" + dataset_name + "_authors_features.csv")
edge_dict_values = list(edge_dict.values())
authors_papers = pd.DataFrame(np.array(edge_dict_values), columns = ["papers_ids"])
authors_papers.to_csv("processed_data/" + dataset_name + "_authors_edges_papers.csv")
edge_dict_keys = list(edge_dict.keys())
authors_edges = pd.DataFrame(edge_dict_keys, columns = ["from", "to"])
authors_edges.to_csv("processed_data/" + dataset_name + "_authors_edge_list.csv")
papers_edges = pd.DataFrame(edge_list, columns = ["from", "to"])
papers_edges.to_csv("processed_data/" + dataset_name + "_papers_edge_list.csv")
return [papers_features, authors_features, authors_papers, authors_edges, papers_edges]
def parse_global_dataset(areas, year_start, year_end, dataset_name:str = "test_dataset") -> list:
papers_dict = get_papers_dict(areas, year_start, year_end)
edge_list = get_edge_list(papers_dict)
global_dataset = get_data(papers_dict, edge_list, dataset_name)
return global_dataset
def preprocessing(global_dataset:list, dataset_name:str = "test_dataset") -> list:
papers_features, authors_features, authors_papers, authors_edges, papers_edges = global_dataset
authors = []
papers_id = papers_features["id"]
id_to_index_id = {papers_id[i]: i for i in tqdm(range(len(papers_id)))}
authors_papers_unzipped = authors_papers["papers_ids"]
authors_papers_indexed = [
[
id_to_index_id[authors_papers_unzipped[i][j]]
for j in range(len(authors_papers_unzipped[i]))
]
for i in tqdm(range(len(authors_papers_unzipped)))
]
authors_papers_indexed_str = [
str(authors_papers_indexed[i]) for i in tqdm(range(len(authors_papers_indexed)))
]
authors_edges_papers_indices = pd.DataFrame(authors_papers_indexed_str, columns=["papers_indices"])
authors_edges_papers_indices.to_csv(
"processed_data/" + dataset_name + "_authors_edges_papers_indices.csv"
)
df = papers_features[
papers_features[
["id", "title", "paperAbstract", "year", "journalName", "fieldsOfStudy"]
].notna()
]
papers_features_abstracts = list(papers_features["paperAbstract"])
papers_features_abstracts = [
str(papers_features_abstracts[i]) for i in range(len(papers_features_abstracts))
]
papers_features["paperAbstract"] = papers_features["paperAbstract"].fillna(
"No abstract provided"
)
model = hub.load("https://tfhub.dev/google/universal-sentence-encoder/4")
vectorized_abstracts = []
for i in tqdm(range(len(papers_features_abstracts))):
abstract = papers_features_abstracts[i]
vectorized_abstracts.append(model([abstract])[0])
vectorized_abstracts_list = [
vectorized_abstracts[i].numpy() for i in tqdm(range(len(vectorized_abstracts)))
]
vectorized_abstracts_df = pd.DataFrame(vectorized_abstracts_list)
print('PCA started its work.')
pca = PCA(n_components=32)
pca_result = pca.fit_transform(vectorized_abstracts_df)
print('PCA ended its work.')
compressed_paper_features = pd.DataFrame(pca_result)
compressed_paper_features.to_csv(
"processed_data/" + dataset_name + "_papers_features_vectorized_compressed_32.csv"
)
papers_edge_list_indexed = papers_edges.values
for i in tqdm(range(len(papers_edge_list_indexed))):
pair = papers_edge_list_indexed[i]
for j in range(len(pair)):
pair[j] = id_to_index_id[pair[j]]
papers_edge_list_indexed_np = pd.DataFrame(papers_edge_list_indexed)
papers_edge_list_indexed_np.to_csv(
"processed_data/" + dataset_name + "_papers_edge_list_indexed.csv"
)
return [authors_edges_papers_indices, compressed_paper_features, papers_edge_list_indexed_np]
def extract_subgraph(global_dataset:list, processed_data:list, subgraph_name:str, nodes_number:int = 1000):
def get_nx_graph(edge_list):
aev = edge_list.values
edge_to_index = {(aev[i][0], aev[i][1]): i for i in tqdm(range(len(aev)))}
edges_list_t = list(edge_to_index.keys())
return edge_to_index, nx.DiGraph((x, y) for (x, y) in tqdm(Counter(edges_list_t)))
def get_subraph(N, source: int, depth_limit: int = 4):
nodes = list(nx.dfs_preorder_nodes(N, source=source, depth_limit=depth_limit))
H = N.subgraph(nodes)
return H
authors_edges_papers, compressed_paper_features, papers_edge_list_indexed_np = processed_data
papers_features, authors_features, authors_papers, authors_edges, papers_edges = global_dataset
edge_to_index_A, A = get_nx_graph(authors_edges)
edge_to_index_G, G = get_nx_graph(papers_edge_list_indexed_np)
try:
authors_edges_papers['papers_indices'] = authors_edges_papers['papers_indices'].apply(lambda x: x.replace('[', '').replace(']', '').split(','))
except:
pass
depth_limit, ready_flag, sub_A = 3, 0, ""
for i in range(depth_limit, 15):
if ready_flag == 0:
for i in range(10):
source = random.choice(list(A.nodes()))
sub_A = get_subraph(A, source, depth_limit=i)
if len(sub_A.nodes) >= nodes_number:
ready_flag = 1
else:
break
print(len(sub_A.nodes), len(sub_A.edges))
sub_A_edges = list(sub_A.edges())
authors_edges_papers_sub = [
authors_edges_papers["papers_indices"][edge_to_index_A[sub_A_edges[i]]]
for i in tqdm(range(len(sub_A_edges)))
]
authors_edges_papers_sub_flat = [
int(item) for subarray in authors_edges_papers_sub for item in subarray
]
unique_papers = list(set(authors_edges_papers_sub_flat))
papers_to_delete_initial = list(set(unique_papers) - set(G.nodes))
G_sub = G.subgraph(unique_papers)
G_sub_nodes = list(G_sub.nodes())
papers_out_lcc = papers_to_delete_initial
collabs_indices_to_delete = []
for i in tqdm(range(len(papers_out_lcc))):
for j in range(len(authors_edges_papers_sub)):
# if str(1745104) in authors_edges_papers_sub[j]:
# jj.append(j)
if str(papers_out_lcc[i]) in authors_edges_papers_sub[j]:
del authors_edges_papers_sub[j][
authors_edges_papers_sub[j].index(str(papers_out_lcc[i]))
]
if len(authors_edges_papers_sub[j]) == 0:
collabs_indices_to_delete.append(j)
A_sub_clear = nx.DiGraph(sub_A)
A_sub_clear_edges = list(A_sub_clear.edges())
for i in tqdm(range(len(collabs_indices_to_delete))):
edge = A_sub_clear_edges[collabs_indices_to_delete[i]]
if edge not in A_sub_clear_edges:
print("error")
A_sub_clear.remove_edge(*edge)
authors_edges_papers_sub_clear = [
authors_edges_papers_sub[i]
for i in range(len(authors_edges_papers_sub))
if len(authors_edges_papers_sub[i]) > 0
]
A_sub_clear_edges_check = list(A_sub_clear.edges())
authors_edges_papers_sub_2 = [
authors_edges_papers["papers_indices"][edge_to_index_A[A_sub_clear_edges_check[i]]]
for i in tqdm(range(len(A_sub_clear_edges_check)))
]
authors_edges_papers_sub_2 = [
authors_edges_papers["papers_indices"][edge_to_index_A[A_sub_clear_edges_check[i]]]
for i in tqdm(range(len(A_sub_clear_edges_check)))
]
authors_edges_papers_sub_flat_2 = [
int(item) for subarray in authors_edges_papers_sub_2 for item in subarray
]
unique_papers_2 = list(set(authors_edges_papers_sub_flat_2))
G_sub_clear = G_sub
try:
os.mkdir('datasets')
except:
pass
try:
os.mkdir('datasets/' + subgraph_name)
except:
pass
nx.write_edgelist(
G_sub_clear,
"datasets/" + subgraph_name + "/" + subgraph_name + "_" + "papers.edgelist",
)
nx.write_edgelist(
A_sub_clear,
"datasets/" + subgraph_name + "/" + subgraph_name + "_" + "authors.edgelist",
)
authors_edges_papers.to_csv(
"datasets/"
+ subgraph_name
+ "/"
+ subgraph_name
+ "_"
+ "authors_edges_papers_indices.csv"
) | StarcoderdataPython |
8087373 | <filename>auditlog/admin.py
from __future__ import absolute_import
from __future__ import unicode_literals
import json
from django.contrib import admin
from django.contrib.admin.util import flatten_fieldsets
from django import forms
from django.contrib.auth import get_user_model
from django.utils.html import escape
from django.utils.safestring import mark_safe
from auditlog.models import ModelChange
class DictionaryDisplayWidget(forms.Widget):
def render(self, name, value, attrs=None):
value = json.loads(value)
if value:
rows = []
for key, val in value.items():
rows.append('<tr><td>{key}</td><td>{val}</td></tr>'.format(
key=escape(key),
val=escape(val),
))
return mark_safe("<table border='1'>{}</table>".format('\n'.join(rows)))
class AuditChangeAdminForm(forms.ModelForm):
timestamp = forms.DateTimeField()
pre_change_state = forms.Field(widget=DictionaryDisplayWidget)
changes = forms.Field(widget=DictionaryDisplayWidget)
class Meta:
model = ModelChange
fields = ('timestamp', 'user', 'remote_addr', 'remote_host',
'model_type', 'model_pk', 'action', 'pre_change_state', 'changes',)
class ReadOnlyAdminMixin(object):
# note: the dict display widget doesn't work if the field is readonly
def get_readonly_fields(self, *args, **kwargs):
if self.declared_fieldsets:
return flatten_fieldsets(self.declared_fieldsets)
else:
return list(set(
[field.name for field in self.opts.local_fields] +
[field.name for field in self.opts.local_many_to_many]
))
def has_delete_permission(self, *args, **kwargs):
return False
def has_add_permission(self, *args, **kwargs):
return False
class AuditAdmin(ReadOnlyAdminMixin, admin.ModelAdmin):
list_display = ('timestamp', 'action', 'model_type', 'model_pk', 'user', 'remote_addr', 'remote_host')
list_filter = ('model_type', 'action', 'user',)
ordering = ('-timestamp',)
date_hierarchy = 'timestamp'
form = AuditChangeAdminForm
fieldsets = (
(None, {
'fields': ('action', 'user', 'timestamp', 'remote_addr', 'remote_host'),
}),
('Model details', {
'fields': ('model_pk', 'model_type',),
}),
('Change Details', {
'classes': ('collapse',),
'fields': ('pre_change_state', 'changes',),
}),
)
admin.site.register(ModelChange, AuditAdmin)
| StarcoderdataPython |
3308067 | <filename>Server/covid.py
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import time
from bs4 import BeautifulSoup
from datetime import date, datetime
from selenium.webdriver.common.keys import Keys
from html5lib import html5parser
def main(url, driver):
driver.get(url)
timeout = 60
try:
element_present = EC.presence_of_element_located((By.ID, 'ember1049'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
return {
'error':'page load needs more time :( \n please try again'
}
finally:
print("Page loaded")
more_buttons = driver.find_elements_by_class_name("moreLink")
for x in range(len(more_buttons)):
if more_buttons[x].is_displayed():
driver.execute_script("arguments[0].click();", more_buttons[x])
time.sleep(1)
page_source = driver.page_source
soup = BeautifulSoup(page_source, 'html.parser')
total_confirmed = soup.find_all('text', attrs={'vector-effect':'non-scaling-stroke'})
# <text vector-effect="non-scaling-stroke" style="fill: rgb(230, 0, 0); stroke-width: 2px; font-size: 160px; line-height: normal;">685,623</text>
# for i in total_confirmed:
# # print('---' , i)
# print('--- Total confirmed ----', total_confirmed[1].text)
# print('--- Total Recovered ----', total_confirmed[-1].text)
# print('--- Total Death ----', total_confirmed[-3].text)
confirmed = total_confirmed[1].text
recovere = total_confirmed[-1].text
death = total_confirmed[-3].text
total_fig = []
for item in total_confirmed:
total_fig.append(item.text)
payload = []
list_of_confirmed_country = soup.find_all('span', attrs={'class': 'flex-horizontal feature-list-item ember-view'})
# print('---- now print left admin 1 panel ---')
for data in list_of_confirmed_country[:]:
item = data.text.strip()
payload.append(str(item))
# if(len(item.split()) == 2):
# print('-----', item)
return {
'confirmed': confirmed,
'recovere': recovere,
'death': death,
'payload': payload,
'totalFigure': total_fig
}
# print('final response', response)
# print('----- now print confirmed Province/State/Dependency ---')
# for data in list_of_confirmed_country[:]:
# item = data.text.strip()
# if (len(item.split()) > 2 and item.split()[1] == 'confirmed'):
# print('-----', item)
#
# print('----- now print death Province/State/Dependency ---')
# for data in list_of_confirmed_country[:]:
# item = data.text.strip()
# if (len(item.split()) > 2 and item.split()[1] == 'deaths'):
# print('-----', item)
#
# print('----- now print Recovered Province/State/Dependency ---')
# for data in list_of_confirmed_country[:]:
# item = data.text.strip()
# if (len(item.split()) > 2 and item.split()[1] == 'recovered'):
# print('-----', item)
# print('--- list of confirmed country ---',list_of_confirmed_country[0])
# driver.quit()
#
# options = webdriver.ChromeOptions()
# options.add_argument('--ignore-certificate-errors')
# options.add_argument('--incognito')
# options.add_argument('--headless')
# options.add_argument('--lang=en-us')
# options.add_argument('--log-level=3')
#
# driver = webdriver.Chrome("./chromedriver", chrome_options=options)
#
# url = "https://gisanddata.maps.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6"
# main(url, driver)
| StarcoderdataPython |
3588196 | <reponame>Lindronics/honours_project_dissertation
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
from yolov3 import get_anchors, bbox_iou
class Dataset():
def __init__(self, config, training=False):
self.input_size = config["TRAINING"]["INPUT_SIZE"]
self.channels = config["TRAINING"]["CHANNELS"]
self.num_classes = config["NETWORK"]["NUM_CLASSES"]
self.anchors_per_scale = config["NETWORK"]["ANCHORS_PER_SCALE"]
self.strides = np.array(config["NETWORK"]["STRIDES"])
self.output_sizes = self.input_size // self.strides
self.max_bounding_box_per_scale = 150
self.anchors = get_anchors("anchors.txt")
self.load_metadata(config["TRAINING"]["ANNOTATIONS_DIR"])
# For iterator
self.current_batch = 0
self.batch_size = config["TRAINING"]["BATCH_SIZE"] if training else config["TEST"]["BATCH_SIZE"]
self.batches = len(self.annotations) // self.batch_size
def load_metadata(self, path):
""" Loads annotations and file paths """
self.paths = []
self.annotations = []
with open(path, "r") as f:
for line in f:
line = line.strip().split(" ")
rgb_path = line[0]
if len(line) > 1:
bounding_boxes = np.array([list(map(int, box.split(','))) for box in line[1:]])
else:
bounding_boxes = []
self.annotations.append({
"rgb_path": rgb_path,
"bounding_boxes": bounding_boxes,
})
def __iter__(self):
return self
def __len__(self):
return self.batches
def __next__(self):
""" Next data item """
# Initialize batch tensors
batch_image = np.zeros((self.batch_size, self.input_size, self.input_size, self.channels), dtype=np.float32)
batch_label_sbbox = np.zeros((self.batch_size, self.output_sizes[0], self.output_sizes[0],
self.anchors_per_scale, 5 + self.num_classes), dtype=np.float32)
batch_label_mbbox = np.zeros((self.batch_size, self.output_sizes[1], self.output_sizes[1],
self.anchors_per_scale, 5 + self.num_classes), dtype=np.float32)
batch_label_lbbox = np.zeros((self.batch_size, self.output_sizes[2], self.output_sizes[2],
self.anchors_per_scale, 5 + self.num_classes), dtype=np.float32)
batch_sbboxes = np.zeros((self.batch_size, self.max_bounding_box_per_scale, 4), dtype=np.float32)
batch_mbboxes = np.zeros((self.batch_size, self.max_bounding_box_per_scale, 4), dtype=np.float32)
batch_lbboxes = np.zeros((self.batch_size, self.max_bounding_box_per_scale, 4), dtype=np.float32)
# Get items in batch
if self.current_batch < self.batches:
for i in range(self.batch_size):
idx = self.current_batch * self.batch_size + i
# Load and rescale image and bounding boxes
rgb_path = self.annotations[idx]["rgb_path"]
bounding_boxes = self.annotations[idx]["bounding_boxes"]
image, scale = self.load_image(rgb_path)
bounding_boxes = self.preprocess_bounding_boxes(bounding_boxes, scale)
# Add to batch tensors
label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = self.preprocess_true_boxes(bounding_boxes)
batch_image[i, ...] = image
batch_label_sbbox[i, ...] = label_sbbox
batch_label_mbbox[i, ...] = label_mbbox
batch_label_lbbox[i, ...] = label_lbbox
batch_sbboxes[i, ...] = sbboxes
batch_mbboxes[i, ...] = mbboxes
batch_lbboxes[i, ...] = lbboxes
self.current_batch += 1
return batch_image, ((batch_label_sbbox, batch_sbboxes), (batch_label_mbbox, batch_mbboxes), (batch_label_lbbox, batch_lbboxes))
# Shuffle dataset and reset iterator
else:
self.current_batch = 0
np.random.shuffle(self.annotations)
raise StopIteration
def load_image(self, path):
""" Loads and rescales image """
image = cv2.imread(path) / 255
h, w, _ = image.shape
image = cv2.resize(image, (self.input_size, self.input_size))
nh, nw, _ = image.shape
return image, (nh/h, nw/w)
def preprocess_bounding_boxes(self, bounding_boxes, scale):
""" Rescales bounding boxes according to image scaling factor """
h_scale, w_scale = scale
if bounding_boxes == []:
return None
bounding_boxes[:, [0, 2]] = bounding_boxes[:, [0, 2]] * w_scale
bounding_boxes[:, [1, 3]] = bounding_boxes[:, [1, 3]] * h_scale
return bounding_boxes
def preprocess_true_boxes(self, bounding_boxes):
""" From https://github.com/YunYang1994/TensorFlow2.0-Examples/tree/master/4-Object_Detection/YOLOV3 """
label = [np.zeros((self.output_sizes[i], self.output_sizes[i], self.anchors_per_scale,
5 + self.num_classes)) for i in range(3)]
bounding_boxes_xywh = [np.zeros((self.max_bounding_box_per_scale, 4)) for _ in range(3)]
bbox_count = np.zeros((3,))
for bbox in bounding_boxes:
bbox_coor = bbox[:4]
bbox_class_ind = bbox[4]
onehot = np.zeros(self.num_classes, dtype=np.float)
onehot[bbox_class_ind] = 1.0
uniform_distribution = np.full(self.num_classes, 1.0 / self.num_classes)
deta = 0.01
smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution
bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5, bbox_coor[2:] - bbox_coor[:2]], axis=-1)
bbox_xywh_scaled = 1.0 * bbox_xywh[np.newaxis, :] / self.strides[:, np.newaxis]
iou = []
exist_positive = False
for i in range(3):
anchors_xywh = np.zeros((self.anchors_per_scale, 4))
anchors_xywh[:, 0:2] = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32) + 0.5
anchors_xywh[:, 2:4] = self.anchors[i]
iou_scale = bbox_iou(bbox_xywh_scaled[i][np.newaxis, :], anchors_xywh)
iou.append(iou_scale)
iou_mask = iou_scale > 0.3
if np.any(iou_mask):
xind, yind = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32)
label[i][yind, xind, iou_mask, :] = 0
label[i][yind, xind, iou_mask, 0:4] = bbox_xywh
label[i][yind, xind, iou_mask, 4:5] = 1.0
label[i][yind, xind, iou_mask, 5:] = smooth_onehot
bbox_ind = int(bbox_count[i] % self.max_bounding_box_per_scale)
bounding_boxes_xywh[i][bbox_ind, :4] = bbox_xywh
bbox_count[i] += 1
exist_positive = True
if not exist_positive:
best_anchor_ind = np.argmax(np.array(iou).reshape(-1), axis=-1)
best_detect = int(best_anchor_ind / self.anchors_per_scale)
best_anchor = int(best_anchor_ind % self.anchors_per_scale)
xind, yind = np.floor(bbox_xywh_scaled[best_detect, 0:2]).astype(np.int32)
label[best_detect][yind, xind, best_anchor, :] = 0
label[best_detect][yind, xind, best_anchor, 0:4] = bbox_xywh
label[best_detect][yind, xind, best_anchor, 4:5] = 1.0
label[best_detect][yind, xind, best_anchor, 5:] = smooth_onehot
bbox_ind = int(bbox_count[best_detect] % self.max_bounding_box_per_scale)
bounding_boxes_xywh[best_detect][bbox_ind, :4] = bbox_xywh
bbox_count[best_detect] += 1
label_sbbox, label_mbbox, label_lbbox = label
sbboxes, mbboxes, lbboxes = bounding_boxes_xywh
return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes
with open("config.json", "r") as f:
config = json.load(f)
# d = Dataset(config, training=True)
# for image, annot in d:
# print(image)
# print(annot)
# plt.imshow(image)
# plt.show() | StarcoderdataPython |
6471682 | import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from robolearn.utils.plots import plot_multiple_process_iu_returns
from robolearn.utils.plots import plot_process_iu_policies
from robolearn.utils.plots import plot_process_iu_values_errors
from robolearn.utils.plots import plot_process_general_data
from robolearn.utils.plots.learning_process_plots import plot_process_haarnoja
import json
# SEEDS = [610, 710, 810, 1010]
SEEDS = [610]#, 1010]#, 710, 1010]
MAX_ITER = 190
# STEPS_PER_ITER = 3e3
STEPS_PER_ITER = None
LOG_PREFIX = '/home/desteban/logs/objective_test/reacher'
fig_name_prefix = 'Reacher_'
# 1: Irew=5e-1, Urew=5e-1
# 2: Irew=5e-1, Urew=5e-1
# 3: Irew=1e+0, Urew=5e-1 ????
# anh: Irew=1e+0, Urew=1e+0
# anh2: Irew=1e-1, Urew=1e-1
# anh2: Irew=1e+1, Urew=1e-1
# compo: Irew=1e+0, Urew=1e+0, Uscale=1e0, Iscale=1e0
# compo2: Irew=1e+0, Urew=1e+0, Uscale=1e0, Iscale=5e-0
# compoX3: Irew=1e+0, Urew=1e+0, Uscale=1e0, Iscale=5e-0
hiu_performance_dict = dict()
"""
# Subtask 01
hiu_performance_dict['Subtask 01'] = dict()
# hiu_performance_dict['Subtask 01']['SAC'] = dict(
# dir='sub0',
# prefix='sacD_',
# ius=[-1],
# r_scales=[1.e-0],
# )
hiu_performance_dict['Subtask 01']['HIU-SAC-W'] = dict(
dir='sub-1',
prefix='hiu_sac_newE_5_', # i:0, u:1
ius=[0],
r_scales=[1.0e-0],
)
hiu_performance_dict['Subtask 01']['HIU-SAC-E'] = dict(
dir='sub-1',
prefix='hiu_sac_new_prompE_5_', # i:0, u:1
ius=[0],
r_scales=[1.0e-0],
)
hiu_performance_dict['Subtask 01']['HIU-SAC-M'] = dict(
dir='sub-1',
prefix='hiu_sac_new_mixtureE_5_', # i:0, u:1
ius=[0],
r_scales=[1.0e-0],
)
# hiu_performance_dict['Subtask 01']['HIU-SAC-W'] = dict(
# dir='sub-1',
# prefix='hiu_sac_new5_5_',
# ius=[0],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Subtask 01']['HIU-SAC-M'] = dict(
# dir='sub-1',
# prefix='hiu_sac_new_mixture5_5_',
# ius=[0],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Subtask 01']['HIU-SAC-E'] = dict(
# dir='sub-1',
# prefix='hiu_sac_new_promp5_5_',
# ius=[0],
# r_scales=[1.0e-0],
# )
# Subtask 02
hiu_performance_dict['Subtask 02'] = dict()
# hiu_performance_dict['Subtask 02']['SAC'] = dict(
# dir='sub1',
# prefix='sacD_',
# ius=[-1],
# r_scales=[1.e-0],
# )
hiu_performance_dict['Subtask 02']['HIU-SAC-W'] = dict(
dir='sub-1',
prefix='hiu_sac_newE_5_', # i:0, u:1
ius=[1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Subtask 02']['HIU-SAC-E'] = dict(
dir='sub-1',
prefix='hiu_sac_new_prompE_5_', # i:0, u:1
ius=[1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Subtask 02']['HIU-SAC-M'] = dict(
dir='sub-1',
prefix='hiu_sac_new_mixtureE_5_', # i:0, u:1
ius=[1],
r_scales=[1.0e-0],
)
# hiu_performance_dict['Subtask 02']['HIU-SAC-W'] = dict(
# dir='sub-1',
# prefix='hiu_sac_new5_5_',
# ius=[1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Subtask 02']['HIU-SAC-M'] = dict(
# dir='sub-1',
# prefix='hiu_sac_new_mixture5_5_',
# ius=[1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Subtask 02']['HIU-SAC-E'] = dict(
# dir='sub-1',
# prefix='hiu_sac_new_promp5_5_',
# ius=[1],
# r_scales=[1.0e-0],
# )
# Maintask
hiu_performance_dict['Main Task'] = dict()
# hiu_performance_dict['Main Task']['SACC'] = dict(
# dir='sub-1',
# prefix='sacC_', # tgt:-2
# ius=[-1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Main Task']['SAC'] = dict(
# dir='sub-1',
# prefix='sacD_', # tgt:0
# ius=[-1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Main Task']['SACE'] = dict(
# dir='sub-1',
# prefix='sacE_', # tgt:1
# ius=[-1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Main Task']['SACF'] = dict(
# dir='sub-1',
# prefix='sacF_', # tgt:2
# ius=[-1],
# r_scales=[1.0e-0],
# )
# # hiu_performance_dict['Main Task']['HIU-SAC-W'] = dict(
# # dir='sub-1',
# # prefix='hiu_sac_new5_5_',
# # ius=[-1],
# # r_scales=[1.0e-0],
# # )
# hiu_performance_dict['Main Task']['HIU-SAC-Wx'] = dict(
# dir='sub-1',
# prefix='hiu_sac_new5B_5_',
# ius=[-1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Main Task']['HIU-SAC-WB'] = dict(
# dir='sub-1',
# prefix='hiu_sac_newB_5_', # i:2, u:2
# ius=[-1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Main Task']['HIU-SAC-WC'] = dict(
# dir='sub-1',
# prefix='hiu_sac_newC_5_', # i:1, u:2
# ius=[-1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Main Task']['HIU-SAC-W'] = dict(
# dir='sub-1',
# prefix='hiu_sac_newD_5_', # i:0, u:1
# ius=[-1],
# r_scales=[1.0e-0],
# )
hiu_performance_dict['Main Task']['HIU-SAC-W'] = dict(
dir='sub-1',
prefix='hiu_sac_newE_5_', # i:0, u:1
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task']['HIU-SAC-E'] = dict(
dir='sub-1',
prefix='hiu_sac_new_prompE_5_', # i:0, u:1
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task']['HIU-SAC-M'] = dict(
dir='sub-1',
prefix='hiu_sac_new_mixtureE_5_', # i:0, u:1
ius=[-1],
r_scales=[1.0e-0],
)
# # # hiu_performance_dict['Main Task']['HIU-SAC-M'] = dict(
# # # dir='sub-1',
# # # prefix='hiu_sac_new_mixture5_5_',
# # # ius=[-1],
# # # r_scales=[1.0e-0],
# # # )
# # # hiu_performance_dict['Main Task']['HIU-SAC-E'] = dict(
# # # dir='sub-1',
# # # prefix='hiu_sac_new_promp5_5_',
# # # ius=[-1],
# # # r_scales=[1.0e-0],
# # # )
# # hiu_performance_dict['Main Task']['HIU-SAC-W6'] = dict(
# # dir='sub-1',
# # prefix='hiu_sac_new6_5_',
# # ius=[-1],
# # r_scales=[1.0e-0],
# # )
# # # hiu_performance_dict['Main Task']['DDPG'] = dict(
# # # dir='sub-1',
# # # prefix='ddpg_',
# # # ius=[-1],
# # # r_scales=[1.0e-0],
# # # )
"""
# hiu_performance_dict['Sub Task 1'] = dict()
# hiu_performance_dict['Sub Task 1']['SAC-std_tanh_ini_xav_ind_opt_amsgrad_vf_imp'] = dict(
# dir='sub0',
# prefix='sac_like_spinningupM_', # tgt:-2
# ius=[-1],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Sub Task 1']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp5'] = dict(
# dir='sub-1',
# prefix='hiu_sac_spinningupA_5_',
# ius=[0],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Sub Task 1']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp1'] = dict(
# dir='sub-1',
# prefix='hiu_sac_spinningupA_1_',
# ius=[0],
# r_scales=[1.0e-0],
# )
# hiu_performance_dict['Sub Task 1']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp1_newpol'] = dict(
# dir='sub-1',
# prefix='hiu_sac_spinningupB_1_',
# ius=[0],
# r_scales=[1.0e-0],
# )
#
hiu_performance_dict['Sub Task 2'] = dict()
hiu_performance_dict['Sub Task 2']['SAC-std_tanh_ini_xav_ind_opt_amsgrad_vf_imp'] = dict(
dir='sub1',
prefix='sac_like_spinningupM_', # tgt:-2
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Sub Task 2']['hiu_sac_spinningupE_1_'] = dict(
dir='sub-1',
prefix='hiu_sac_spinningupE_1_', # Sin clip variance de compound
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Sub Task 2']['hiu_sac_spinningupF_1_'] = dict(
dir='sub-1',
prefix='hiu_sac_spinningupF_1_', # Usando tgt_ent
ius=[1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task 2'] = dict()
hiu_performance_dict['Main Task 2']['SAC-std_tanh_ini_xav_ind_opt_amsgrad_vf_imp'] = dict(
dir='sub-1',
prefix='sac_like_spinningupM_', # tgt:-2
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task 2']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp5'] = dict(
dir='sub-1',
prefix='hiu_sac_spinningupA_5_',
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task 2']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp1'] = dict(
dir='sub-1',
prefix='hiu_sac_spinningupA_1_',
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task 2']['HIUSAC-std_clip_ini_xav_ind_opt_amsgrad_vf_imp1_newpol'] = dict(
dir='sub-1',
prefix='hiu_sac_spinningupB_1_',
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task 2']['hiu_sac_spinningupD_1_'] = dict(
dir='sub-1',
prefix='hiu_sac_spinningupD_1_', # Con clip variance de compound
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task 2']['hiu_sac_spinningupE_1_'] = dict(
dir='sub-1',
prefix='hiu_sac_spinningupE_1_', # Sin clip variance de compound
ius=[-1],
r_scales=[1.0e-0],
)
hiu_performance_dict['Main Task 2']['hiu_sac_spinningupF_1_'] = dict(
dir='sub-1',
prefix='hiu_sac_spinningupF_1_', # Usando tgt_ent
ius=[-1],
r_scales=[1.0e-0],
)
def get_full_seed_paths(full_dict):
categories = list(full_dict.keys())
for cc, cate in enumerate(categories):
expt_dict = full_dict[cate]
expts = list(expt_dict)
# print(expt_dict)
expt_counter = 0
for ee, expt in enumerate(expts):
# print(expt['dir'])
run_dict = expt_dict[expt]
expt_dir = os.path.join(LOG_PREFIX, run_dict['dir'])
if len(list_files_startswith(expt_dir, run_dict['prefix'])) > 0:
expt_counter += 1
dirs_and_iu = list()
dir_prefix = os.path.join(expt_dir, run_dict['prefix'])
# print(dir_prefix)
for seed in SEEDS:
full_seed_dir = dir_prefix + str(seed)
# print('- ', full_seed_dir)
if os.path.exists(full_seed_dir):
# print('YES DATA IN: %s' % full_seed_dir)
dirs_and_iu.append((
full_seed_dir,
run_dict['ius'],
run_dict['r_scales'],
))
full_dict[cate][expt] = dirs_and_iu
if expt_counter == 0:
full_dict.pop(cate)
return full_dict
def list_files_startswith(directory, prefix):
return list(f for f in os.listdir(directory) if f.startswith(prefix))
def list_files_endswith(directory, suffix):
return list(f for f in os.listdir(directory) if f.endswith(suffix))
def main(args):
directories_dict = get_full_seed_paths(hiu_performance_dict)
# directories_dict = get_subtask_and_seed_idxs()
plot_multiple_process_iu_returns(
directories_dict,
max_iter=MAX_ITER,
steps_per_iter=STEPS_PER_ITER,
fig_name_prefix=fig_name_prefix,
)
# # Plot according to RL algorithm
# if algo_name in ['HIUSAC', 'SAC', 'HIUSACEpisodic']:
# # plot_process_iu_values_errors(csv_file=args.file, n_unintentional=args.un,
# # block=False)
# # plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,
# # block=False, plot_intentional=args.no_in,
# # deterministic=False)
# plot_multiple_process_iu_returns(csv_file=args.file, n_unintentional=args.un,
# block=False)
#
# elif algo_name in ['IUWeightedMultiDDPG']:
# # plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,
# # block=False, plot_intentional=args.no_in,
# # deterministic=True)
# plot_multiple_process_iu_returns(csv_file=args.file, n_unintentional=args.un,
# block=False)
# else:
# plot_process_general_data(csv_file=args.file, block=False)
# plot_process_haarnoja(csv_file=args.file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument('file', type=str, default='./progress.csv',
# help='path to the progress.csv file')
parser.add_argument('--un', type=int, default=-1,
help='Unintentional id')
parser.add_argument('--no_in', action='store_false')
args = parser.parse_args()
main(args)
input('Press a key to close script')
| StarcoderdataPython |
1767755 | <reponame>iuyoy/TaggedSearch<filename>Global/config.py
#!/usr/bin/python
# -*- coding:utf-8 -*-
#author:iuyyoy
#重要的全局变量
#输出等级
print_level = 0
#数据库信息
dbinfo={\
'host' : 'localhost'\
,'user' : 'root'\
,'passwd' : '<PASSWORD>'\
,'db' : 'wiki'\
,'port' : 3306\
,'charset' : 'utf8'\
}
#dbinfo={\
#'host' : '192.168.99.127'\
#,'user' : 'root'\
#,'passwd' : '<PASSWORD>'\
#,'db' : 'search'\
#,'port' : 3306\
#,'charset' : 'utf8'\
#}
#dbinfo={\
#'host' : 'localhost'\
#,'user' : 'search'\
#,'passwd' : '<PASSWORD>'\
#,'db' : 'search'\
#,'port' : 3306\
#,'charset' : 'utf8'\
#}
#db
search_db = 'search'
wiki_db = 'wiki'
#wiki_db = 'search'
#table
entities_table = 'entities'
entity_properties_table = 'entity_properties'
entity_aliases_table = 'entity_aliases'
wikidata_entities_table = 'wikidata_entities_new'
wikidata_entity_properties_table = 'wikidata_entity_properties_new'
wikidata_word_table = 'wikidata_word'
words_table = 'words'
word_properties_table = 'word_properties'
word_entity_table = 'word_entity'
cnbeta_table = 'sp_cnbeta'
sogou_all_table = 'websites_news_all'
sogou_sogou_table = 'websites_sogou'
websites_words_table = 'websites_words'
websites_tags_table = 'websites_tags'
#结巴字典文件路径
#jieba_words_dict_path = r'C:\0pros\Python27\Lib\site-packages\jieba\dict.txt'
stopwords_path = './Word_Segment/stopwords.txt' | StarcoderdataPython |
22100 | #!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='117M',
seed=None,
nsamples=1000,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=0.0
):
"""
Interactively run the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
print(length)
#elif length > hparams.n_ctx:
# raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
#config = tf.ConfigProto(device_count={'GPU': 0})
config = tf.ConfigProto()
with tf.Session(graph=tf.Graph(),config=config) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
raw_text = """Model {"""
#input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))
saver.restore(sess, ckpt)
from datetime import datetime
#while True:
generated = 0
import time
grand_start = time.time()
for cnt in range(nsamples // batch_size):
start_per_sample = time.time()
output_text = raw_text
text = raw_text
context_tokens = enc.encode(text)
#raw_text = input("Model prompt >>> ")
# while not raw_text:
# print('Prompt should not be empty!')
# raw_text = input("Model prompt >>> ")
#print(context_tokens)
#file_to_save.write(raw_text)
#for cnt in range(nsamples // batch_size):
while "<|endoftext|>" not in text:
out = sess.run(output, feed_dict={context: [context_tokens for _ in range(batch_size)]})[:,
len(context_tokens):]
for i in range(batch_size):
#generated += 1
text = enc.decode(out[i])
if "<|endoftext|>" in text:
sep = "<|endoftext|>"
rest = text.split(sep, 1)[0]
output_text += rest
break
context_tokens = enc.encode(text)
output_text += text
print("=" * 40 + " SAMPLE " + str(cnt+12) + " " + "=" * 40)
minutes, seconds = divmod(time.time() - start_per_sample, 60)
print("Output Done : {:0>2}:{:05.2f}".format(int(minutes),seconds) )
print("=" * 80)
with open("Simulink_sample/sample__"+str(cnt+12)+".mdl","w+") as f:
f.write(output_text)
elapsed_total = time.time()-grand_start
hours, rem = divmod(elapsed_total,3600)
minutes, seconds = divmod(rem, 60)
print("Total time to generate 1000 samples :{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
if __name__ == '__main__':
fire.Fire(interact_model)
| StarcoderdataPython |
1806223 | <filename>FileManagementApp.py
#cd C:\Python27\Lib\site-packages\PyQt4
#pyuic4 -x C:\Users\Gregoire\Documents\PythonCode\JCAP\JCAPCreateExperimentAndFOM\QtDesign\CreateExpForm.ui -o C:\Users\Gregoire\Documents\PythonCode\JCAP\JCAPCreateExperimentAndFOM\CreateExpForm.py
import time, shutil
import os, os.path
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
try:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'QtForms'))
sys.path.append(os.path.join(projectpath,'AuxPrograms'))
sys.path.append(os.path.join(projectpath,'OtherApps'))
#from fcns_math import *
from fcns_io import *
from fcns_ui import *
from FileManagementForm import Ui_FileManDialog
from DBPaths import *
class filemanDialog(QDialog, Ui_FileManDialog):
def __init__(self, parent=None, title='', folderpath=None):
super(filemanDialog, self).__init__(parent)
self.setupUi(self)
self.parent=parent
button_fcn=[\
(self.deletefoldersButton, self.deletefolders), \
(self.findfoldersButton, self.findfolders), \
]
#(self.UndoExpPushButton, self.undoexpfile), \
# (self.EditParamsPushButton, self.editrunparams), \
#(self.EditExpParamsPushButton, self.editexpparams), \
for button, fcn in button_fcn:
QObject.connect(button, SIGNAL("pressed()"), fcn)
self.treeWidget=self.foldersTreeWidget
self.toplevelitems=[]
self.anafolder=tryprependpath(ANAFOLDERS_L, '')
self.expfolder=tryprependpath(EXPFOLDERS_L, '')
if len(self.anafolder)==0 and len(self.expfolder)==0:
print 'cannot find exp or ana folder'
return
def deletefolders(self):
for mainitem, fold in zip(self.toplevelitems, [self.expfolder, self.anafolder]):
if mainitem is None or not bool(mainitem.checkState(0)):
continue
subitems=[mainitem.child(i) for i in range(mainitem.childCount()) if bool(mainitem.child(i).checkState(0))]
delpaths=[os.path.join(os.path.join(fold, str(subitem.text(0))), str(subitem.child(i).text(0))) for subitem in subitems for i in range(subitem.childCount()) if bool(subitem.child(i).checkState(0))]
for p in delpaths:
shutil.rmtree(p, ignore_errors=True)
print 'removed ', p
if bool(mainitem.checkState(0)):
idialog=messageDialog(self, 'folders deleted: ANA temp folder possibly deleted \nso restart before performing analysis')
idialog.exec_()
def findfolders(self):
self.treeWidget.clear()
self.toplevelitems=[]
self.endswith=str(self.endswithLineEdit.text())
for i, (lab, fold) in enumerate(zip(['EXP', 'ANA'], [self.expfolder, self.anafolder])):
if len(fold)==0: #didn't find exp or ana folder but found other one
self.toplevelitems+=[None]
continue
mainitem=QTreeWidgetItem([lab], 0)
mainitem.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
mainitem.setCheckState(0, Qt.Checked)
if i==0:
item0=mainitem
self.treeWidget.addTopLevelItem(mainitem)
self.nestedfill(fold, mainitem, 'top', endswith=None)
mainitem.setExpanded(True)
self.toplevelitems+=[mainitem]
self.treeWidget.setCurrentItem(item0)
def nestedfill(self, fold, parentitem, level, endswith='.run'):
subfolds=[fn for fn in os.listdir(fold) if os.path.isdir(os.path.join(fold, fn))]
if not endswith is None:
subfolds=[fn for fn in subfolds if fn.endswith(endswith)]
for fn in subfolds:
item=QTreeWidgetItem([fn], 0)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
if level=='top' and fn!='temp':#don't auto check the non-temp folders like eche, uvis, imag
item.setCheckState(0, Qt.Unchecked)
else:
item.setCheckState(0, Qt.Checked)
if level=='top':
p=os.path.join(fold, fn)
#print p
addbool=self.nestedfill(p, item, 'sub', endswith=self.endswith)
addbool=addbool>0
else:
addbool=True
if addbool:
parentitem.addChild(item)
return len(subfolds)
if __name__ == "__main__":
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):#, TreeWidg):
super(MainMenu, self).__init__(None)
#self.setupUi(self)
self.filemanui=filemanDialog(self, title='Delete obsolete .run folders', **kwargs)
#self.expui.importruns(pathlist=['20150422.145113.donex.zip'])
#self.expui.importruns(pathlist=['uvis'])
if execute:
self.filemanui.exec_()
os.chdir('//htejcap.caltech.edu/share/home/users/hte/demo_proto')
mainapp=QApplication(sys.argv)
form=MainMenu(None)
form.show()
form.setFocus()
mainapp.exec_()
| StarcoderdataPython |
1772695 | <reponame>KathmanduLivingLabs/tasking-manager
from backend.models.postgis.user import User, UserRole, MappingLevel
from tests.backend.base import BaseTestCase
class TestUser(BaseTestCase):
def setUp(self):
super().setUp()
self.test_user = User()
self.test_user.role = UserRole.MAPPER.value
self.test_user.id = 12
self.test_user.mapping_level = MappingLevel.BEGINNER.value
self.test_user.username = "mrtest"
self.test_user.email_address = "<EMAIL>"
def test_as_dto_will_not_return_email_if_not_owner(self):
# if self.skip_tests:
# return
# Act
user_dto = self.test_user.as_dto("mastertest")
# Assert
self.assertFalse(user_dto.email_address)
def test_as_dto_will_not_return_email_if_owner(self):
# if self.skip_tests:
# return
# Act
user_dto = self.test_user.as_dto("mrtest")
# Assert
self.assertTrue(user_dto.email_address)
| StarcoderdataPython |
5054417 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2019, <NAME>. All rights reserved.
# -----------------------------------------------------------------------------
import unittest
import os
import json
import vfxtest
mock = vfxtest.mock
# -----------------------------------------------------------------------------
class RunTestSuiteTestCase(unittest.TestCase):
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
"""
"""
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
"""
"""
# -------------------------------------------------------------------------
def setUp(self):
"""
"""
self.cwd = os.getcwd()
os.chdir('./test_sandbox')
# -------------------------------------------------------------------------
def tearDown(self):
"""
"""
os.chdir(self.cwd)
# -------------------------------------------------------------------------
def test01_runTestSuite_native_runs_successfully(self):
settings = vfxtest.collectSettings()
vfxtest.prepareTestEnvironment(settings)
vfxtest.runTestSuite(settings=settings)
self.assertEqual(settings['files_run'], 2)
self.assertEqual(settings['tests_run'], 6)
self.assertEqual(settings['errors'], 0)
# -------------------------------------------------------------------------
def test02_runTestSuite_single_context_runs_successfully(self):
settings = vfxtest.collectSettings()
vfxtest.prepareTestEnvironment(settings)
cov_file = os.path.abspath('{}/.coverage.python3.x'.format(settings['output_folder']))
if os.path.exists(cov_file):
os.remove(cov_file)
settings['context'] = 'python3.x'
settings['debug_mode'] = True
vfxtest.runTestSuite(settings=settings)
self.assertEqual(settings['files_run'], 2)
self.assertEqual(settings['tests_run'], 6)
self.assertEqual(settings['errors'], 0)
self.assertTrue(os.path.exists(cov_file))
# -------------------------------------------------------------------------
def test03_runTestSuite_nested_context_runs_successfully(self):
settings = vfxtest.collectSettings()
vfxtest.prepareTestEnvironment(settings)
cov_file_3 = os.path.abspath('{}/.coverage.python3.x'.format(settings['output_folder']))
if os.path.exists(cov_file_3):
os.remove(cov_file_3)
cov_file_2 = os.path.abspath('{}/.coverage.python2.x'.format(settings['output_folder']))
if os.path.exists(cov_file_2):
os.remove(cov_file_2)
settings['context'] = 'python'
vfxtest.runTestSuite(settings=settings)
self.assertEqual(settings['files_run'], 4)
self.assertEqual(settings['tests_run'], 12)
self.assertEqual(settings['errors'], 0)
self.assertTrue(os.path.exists(cov_file_3))
self.assertTrue(os.path.exists(cov_file_2))
# # -------------------------------------------------------------------------
# def test04_runTestSuite_wrapper_script_not_found_raises_OSError(self):
# settings = vfxtest.collectSettings()
# vfxtest.prepareTestEnvironment(settings)
# settings['context'] = 'context_without_wrapper_script'
# with self.assertRaises(OSError):
# vfxtest.runTestSuite(settings=settings)
# -------------------------------------------------------------------------
def test05_runTestSuite_raises_SystemExit_on_child_proc_exit_code_bigger_than_zero(self):
settings = vfxtest.collectSettings()
vfxtest.prepareTestEnvironment(settings)
settings['context'] = 'python3.x'
settings['debug_mode'] = True
with self.assertRaises(SystemExit):
with mock.patch('subprocess.Popen.wait', return_value=13):
vfxtest.runTestSuite(settings=settings)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5065873 | <filename>keras_extra/utils/common.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#================================================================
# God Bless You.
#
# file name: common.py
# author: klaus
# email: <EMAIL>
# created date: 2018/01/08
# description:
#
#================================================================
import keras
import numpy as np
import tensorflow as tf
def preprocess_input(img):
"""preprocess input
Args:
img (TODO): TODO
Returns: TODO
"""
# swap rgb to bgr
img = img[..., ::-1]
# substract mean
img -= [103.939, 116.779, 123.68]
return img
def preprocess_output(num_classes):
"""preprocess output
Args:
label (TODO): TODO
Returns: TODO
"""
def preprocess(label):
if isinstance(label, (np.ndarray, int, long)):
label = keras.utils.to_categorical(label, num_classes)
else:
label = tf.one_hot(label, num_classes)
return label
return preprocess
| StarcoderdataPython |
9619439 | <reponame>AKSHANSH47/crowdsource-platform2<filename>crowdsourcing/serializers/template.py
from crowdsourcing import models
from rest_framework import serializers
from crowdsourcing.serializers.dynamic import DynamicFieldsModelSerializer
from rest_framework.exceptions import ValidationError
class TemplateItemSerializer(DynamicFieldsModelSerializer):
class Meta:
model = models.TemplateItem
fields = ('id', 'name', 'type', 'sub_type', 'position', 'template', 'role', 'required', 'aux_attributes')
def create(self, *args, **kwargs):
item = models.TemplateItem.objects.create(**self.validated_data)
return item
class TemplateSerializer(DynamicFieldsModelSerializer):
template_items = TemplateItemSerializer(many=True, required=False)
class Meta:
model = models.Template
fields = ('id', 'name', 'template_items')
read_only_fields = ('template_items',)
def create(self, with_default, *args, **kwargs):
template = models.Template.objects.create(owner=kwargs['owner'], **self.validated_data)
if with_default:
item = {
"type": "radio",
"role": "input",
"name": "radio_0",
"icon": "radio_button_checked",
"position": 1,
"template": template.id,
"aux_attributes": {
"question": {
"data_source": None,
"value": "Untitled Question",
},
"layout": 'column',
"options": [
{
"data_source": None,
"value": 'Option 1',
"position": 1
},
{
"data_source": None,
"value": 'Option 2',
"position": 2
}
],
"shuffle_options": "false"
},
}
template_item_serializer = TemplateItemSerializer(data=item)
if template_item_serializer.is_valid():
template_item_serializer.create()
else:
raise ValidationError(template_item_serializer.errors)
return template
class TemplateItemPropertiesSerializer(serializers.ModelSerializer):
class Meta:
model = models.TemplateItemProperties
| StarcoderdataPython |
9691793 | #!/usr/bin/env python3
import sys
from tweet_dumper import *
from analyzer import Analyzer
from stats import Stats
from tqdm import tqdm
frequencies = [0, 0, 0]
def main():
if len(sys.argv) != 3:
sys.exit("Usage: ./tweet-sentiment <twitter username> <number of tweets to analyse>(without the <>)")
username = sys.argv[1]
count = int(sys.argv[2])
if count is None:
count = 1
tweets = get_timeline(username, count)
if tweets is None:
return
analyzer = Analyzer()
stats = Stats()
sum = 0
for tweet in tqdm(tweets):
tweet_score = analyzer.score(tweet)
sum += tweet_score
if tweet_score > 0.0:
frequencies[0] += 1
elif tweet_score < 0.0:
frequencies[1] += 1
else:
frequencies[2] += 1
try:
average = sum / len(tweets)
except ZeroDivisionError:
raise RuntimeError("No tweets made")
print(f"Sentiment average score = {average}")
stats.pieChart(frequencies)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3394225 | import DirNav;
import threading;
import time;
import os;
results = list();
results2 = dict();#results dictionary of {result:path}
no_d_threads = 0; #no. of completed d_threads
no_f_threads = 0; #no. of completed f_threads
class SearchThread(threading.Thread):
def __init__(self,d_or_f,path,search_string):
threading.Thread.__init__(self);
self.d = False;
self.f = False;
if d_or_f == "dir":
self.d = True;
print("checking : "+path+" as DIR");
else:
self.f = True;
print("checking : "+path+" as FILE");
self.path = path;
self.search_string = search_string;
d_threads = list(); #list of all dir checking threads
f_threads = list(); #list of all file checking threadsthreads
#self.dn = DirNav.DirectoryNavigator(self.path,verbose=True);
return;
def run(self):
global no_f_threads;
global no_d_threads;
if self.d == True:
self.in_search(self.path);
pass;
elif self.f == True:
f = open(self.path,"r");
if self.search_string in self.get_fname(self.path):
results.append(self.path);
no_f_threads +=1;
return;
elif self.file_search(f) == True:
#results.append(self.path);
results2[self.get_fname(self.path)] = self.path;
no_f_threads +=1;
return;
else:
no_f_threads +=1;
return;
return;
def get_fname(self,path):
dir_list = path.split("/");
last = dir_list.pop();
return(last);
def in_search(self,path):
global results;
return;
def file_search(self,f_handle):
global results;
content = f_handle.readlines();
for line in content:
#print(line);
if self.search_string in line:
return(True);
return(False);
def search(search_string,start_path):
#print("got : "+search_string+" and "+start_path);
d_dict = dict(); #dictionary of {directory_name:path}
f_dict = dict(); #dictionary of {file_name:path}
d_threads = list(); #list of all dir checking threads
f_threads = list(); #list of all file checking threads
global no_d_threads;
global no_f_threads;
start_time = time.time();
end_time = 0;
if start_path == ".":
start_path = os.getcwd();
elif start_path == ".." or start_path == "../":
start_path = os.getcwd()+"/../";
dn = DirNav.DirectoryNavigator(start_path);
dir_full = dn.get_dir_list();
for d in dir_full.keys():
if dir_full[d] == "dir":
d_dict[d] = dn.get_system_path()+d;
else:
f_dict[d] = dn.get_system_path()+d;
#print(d_dict);
#print(" :: ");
#print(f_dict);
for d in d_dict.keys():
dt = SearchThread("dir",d_dict[d],search_string);
#dt.start();
d_threads.append(dt);
for f in f_dict.keys():
df = SearchThread("file",f_dict[f],search_string);
df.start();
df.join();
f_threads.append(df);
while (True):
if no_f_threads == len(f_threads):
print(results2);
break;
else:
#print("...");
time.sleep(1);
end_time = time.time();
t_diff = end_time - start_time;
print("Time taken : "+str(t_diff));
return(results2);
if __name__ == "__main__":
r = search("Dir","../py-scripts");
exit();
| StarcoderdataPython |
1718008 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Meta-info
Author: <NAME>
Created: 10/03/2017
Updated: 10/03/2017
# Description
Unit tests for the functions in the ands.algorithms.dac.select module.
"""
import unittest
from random import randint, sample, randrange
from ands.algorithms.dac.select import select
class TestSelect(unittest.TestCase):
def test_when_empty_list(self):
# No matter which value for k, with an empty list ValueError is always raised.
self.assertRaises(ValueError, select, [], 2)
def test_when_list_size_1_invalid_k(self):
self.assertRaises(ValueError, select, [3], 1)
self.assertRaises(ValueError, select, [3], -1)
def test_when_list_size_2_invalid_k(self):
self.assertRaises(ValueError, select, [3, 5], 2)
self.assertRaises(ValueError, select, [3, 5], -1)
def test_when_list_size_1_k_is_zero(self):
self.assertEqual(select([7], 0), 7)
def test_when_list_size_2_k_is_zero(self):
self.assertEqual(select([7, 5], 0), 5)
self.assertEqual(select([5, 7], 0), 5)
def test_when_list_random_size_k_is_zero(self):
a = [randint(-100, 100) for _ in range(randint(3, 100))]
self.assertEqual(select(a, 0), min(a))
def test_when_list_random_size_all_elements_equal(self):
x = randint(-100, 100)
a = [x] * randint(1, 100)
self.assertEqual(select(a, randint(0, len(a) - 1)), x)
def test_when_list_random_size_random_k(self):
a = sample(range(100), 100)
self.assertIn(select(a, randrange(0, len(a))), a)
| StarcoderdataPython |
8071915 | from __future__ import division
import os
from labelit.command_line.imagefiles import ImageFiles
class spotfinder_proxy:
def __init__(self,old_spotfinder,phil,frames):
self.frames = frames
self.phil = phil
self.old_pd = old_spotfinder.pd
self.old_S = old_spotfinder
def get_aitbx_inputs(self):
pd = dict(xbeam = self.old_pd["xbeam"],
ybeam = self.old_pd["ybeam"],
osc_start = self.old_pd["osc_start"],
binning = "1",
size1 = self.old_pd["size1"],
size2 = self.old_pd["size2"],
pixel_size = self.old_pd["pixel_size"],
distance = self.old_pd["distance"],
wavelength = self.old_pd["wavelength"],
deltaphi = self.old_pd["deltaphi"],
indexing = self.old_S.get_aitbx_inputs()["indexing"],
endstation = self.old_pd["endstation"],
recommended_grid_sampling = self.old_S.get_aitbx_inputs()["recommended_grid_sampling"],
twotheta = self.old_pd["twotheta"],
resolution_inspection = self.old_pd["resolution_inspection"],
smallest_spot_sep = self.old_S.get_aitbx_inputs()["smallest_spot_sep"],
masks = self.old_pd["masks"], #see practical heuristics
spot_convention = self.old_pd["spot_convention"],
vendortype = self.old_pd["vendortype"],
#characteristic_grid_sampling = 0.01845574110881109,
#characteristic_resolution_mm = 43.390947190873604
)
pd["ref_maxcel"] = self.old_pd["ref_maxcel"] #post-get_aitbx_inputs
self.images = self.old_S.images # sublattice average profile
self.pd = pd
old_count = len(pd["indexing"])
from rstbx.new_horizons.speckfinder import speckfinder
self.pd["indexing"]=[] # zero out the old spotfinder spots; use speckfinder spots instead
for key in self.images.keys():
self.specks = speckfinder(imgobj = self.frames.imageindex(key),
phil = self.phil,
inputpd = self.pd)
self.pd["indexing"] += self.specks.get_active_data()
new_count = len(pd["indexing"])
print "Comparing old count %d new count %d, difference %d"%(old_count,new_count,new_count-old_count)
print self.specks
return self.pd
class AutoIndexOrganizer:
def __init__(self,verbose = 0,**kwargs):
self.rundir = os.getcwd()
self.verbose = verbose
self.horizons_phil = kwargs["horizons_phil"]
#self.horizons_phil.persist.show()
assert 'argument_module' in kwargs
self.setCommandInput(kwargs['argument_module'])
if self.verbose: print "Process frames in directory:",self.Files.filenames.FN[0].cwd
if 'delegate' in kwargs:
self.setIndexingDelegate(kwargs['delegate'])
self.exception_passthru = 0
if 'exception_passthru' in kwargs:
self.exception_passthru = kwargs['exception_passthru']
print '\n'.join(self.Files.filenames())
def setCommandInput(self,argument_module):
self.Files = ImageFiles(argument_module,self.horizons_phil)
self.frames = self.Files.frames()
def printSpots(self):
from labelit.procedure import spotfinder_and_pickle
S = spotfinder_and_pickle(self.rundir,self.Files,
spots_pickle = self.horizons_phil.spots_pickle,
horizons_phil = self.horizons_phil)
#print S.images
NEW = spotfinder_proxy(S,self.horizons_phil,self.Files)
NEW.images = {}
NEW.overlapping = False
NEW.phil_params = S.phil_params
for frame in self.frames:
NEW.images[frame]=dict(area=[1,] # not actually used for new horizons
)
self.S = NEW
for frame in self.frames:
if self.verbose:
from labelit.command_line.stats_distl import pretty_image_stats,notes
pretty_image_stats(S,frame)
notes(S,self.frames[0])
print
NEW.get_aitbx_inputs()
def setIndexingDelegate(self,function):
self.indexing_delegate = function
def executeDelegate(self):
self.info = self.indexing_delegate(self.frames,self.Files,self.S)
def pickle_the_results(self):
for key in ['best_integration','triclinic']:
if key in self.info:
if 'minimizer' in self.info[key]: #not attained when best==tri
del self.info[key]['minimizer'] # Must remove
# temporary section pending an analysis of which data need to be persistent
if 'results' in self.info[key]["integration"]:
#future options 1) make the whole object picklable--write test script
#2) just pickle the data needed for the GUI
del self.info[key]["integration"]['results']
from labelit.dptbx.pickle_support import pickle_refinements
pickle_refinements(self.info,self.horizons_phil.refinements_pickle)
def process(self):
self.printSpots()
self.executeDelegate()
if 'info' in self.__dict__: #if indexing worked
self.pickle_the_results()
return self.info
| StarcoderdataPython |
1771282 | <filename>evaluation/scripts/performance/compare.py
import os
import json
import argparse
from sklearn import metrics
def align(*args):
keys = None
for a in args:
if keys is not None:
keys &= set(a.keys())
else:
keys = set(a.keys())
return [[a[k] for k in keys] for a in args]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="compare ground truth to similarity scores"
)
parser.add_argument("labels", help="ground truth labels (json)")
parser.add_argument(
"scores", nargs="+", help="predicted similarity score files (json)"
)
parser.add_argument(
"-n",
"--naive",
type=float,
default=None,
help="add a naive metric that always estimates the same value",
)
parser.add_argument(
"-o", "--output", help="output file to write results (default: stdout)"
)
arguments = parser.parse_args()
def load(path):
with open(path, "r") as f:
data = json.load(f)
data = {tuple(sorted((d["first"], d["second"]))): d["similarity"] for d in data}
return data
labels = load(arguments.labels)
scores = {}
for score in arguments.scores:
name = os.path.splitext(os.path.basename(score))[0]
scores[name] = load(score)
aligned = align(labels, *[scores[n] for n in sorted(scores)])
labels = aligned[0]
scores = dict(zip(sorted(scores), aligned[1:]))
if arguments.naive is not None:
scores["naive"] = [arguments.naive] * len(labels)
performance = {}
for name, score in scores.items():
# performance[name] = metrics.mean_absolute_error(labels, score)
performance[name] = metrics.mean_squared_error(labels, score, squared=False)
if arguments.output:
with open(arguments.output, "w") as f:
json.dump(performance, f)
else:
print("Ranking:")
for i, name in enumerate(sorted(performance, key=performance.get)):
print(f"{i+1}. {name} ({performance[name]:.4})")
| StarcoderdataPython |
188127 | from bs4 import BeautifulSoup
from requests import get
import math
def search(term: str, num_results:int=10, lang: str="en", proxy: str="None", filter_results: bool=True):
verbose=False
usr_agent = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/61.0.3163.100 Safari/537.36'}
def fetch_results(search_term: str , number_results: int, language_code: str, filter_results: bool, start_num: int = 0):
if filter_results==False:
filter_results:int=0
else:
filter_results:int=1
escaped_search_term = search_term.replace(' ', '+')
google_url = 'https://www.google.com/search?q={}&num={}&hl={}&start={}&filter={})'.format(escaped_search_term, number_results,
language_code, start_num, filter_results)
proxies = None
if proxy:
if proxy[:5]=="https":
proxies = {"https":proxy}
else:
proxies = {"http":proxy}
response = get(google_url, headers=usr_agent, proxies=proxies)
if verbose:
print(f"[fetch_results] Search URL: {google_url}")
print(f"[fetch_results] Proxy: {proxies})")
response.raise_for_status()
return response.text
def parse_results(raw_html):
soup = BeautifulSoup(raw_html, 'html.parser')
result_block = soup.find_all('div', attrs={'class': 'g'})
for result in result_block:
link = result.find('a', href=True)
title = result.find('h3')
if link and title:
yield {'url': str(link['href']), 'title': str(title.text)}
search_step=50
rounds = math.ceil(num_results/search_step)
#Google often returns 2 or so items less than requested. Oversearch and then trim the results.
search_total=num_results+(2*rounds)
results = list()
for i in range(rounds):
start_num = i*search_step
if (search_step*(i+1))>search_total:
step=search_total-(i*search_step)
else:
step=search_step
if verbose:
print(f"Round:{i}, rounds: {rounds} ")
print(f"start_num:{start_num}, step:{step}")
results_temp=list()
html = fetch_results(term, step, lang, filter_results, start_num)
results_temp = list(parse_results(html))
if verbose:
print(f"Step Results:{len(results_temp)}")
if len(results_temp) < step-2:
# recieved less results than requested, search exausted.
results.extend(results_temp)
break
results.extend(results_temp)
if len(results)>num_results:
results=results[:num_results]
if verbose:
print(f"Total results len:{len(results)}")
return results
| StarcoderdataPython |
9630262 | #1.只负责写视图
import os
import datetime
from flask import render_template
from main import app
from models import Curriculum#导入这个表
from flask import redirect#跳转 即Django中的重定向功能
import functools
from flask import session
from models import *
class Calendar:
"""
当前类实现日历功能
1、返回列表嵌套列表的日历
2、安装日历格式打印日历
# 如果一号周周一那么第一行1-7号 0
# 如果一号周周二那么第一行empty*1+1-6号 1
# 如果一号周周三那么第一行empty*2+1-5号 2
# 如果一号周周四那么第一行empty*3+1-4号 3
# 如果一号周周五那么第一行empyt*4+1-3号 4
# 如果一号周周六那么第一行empty*5+1-2号 5
# 如果一号周日那么第一行empty*6+1号 6
# 输入 1月
# 得到1月1号是周几
# [] 填充7个元素 索引0对应周一
# 返回列表
# day_range 1-30
"""
def __init__(self,month = "now"):
self.result = []
big_month = [1, 3, 5, 7, 8, 10, 12]
small_month = [4, 6, 9, 11]
#获取当前月
now = datetime.datetime.now()
if month == "now":
month = now.month
first_date = datetime.datetime(now.year, now.month, 1, 0, 0)
# 年 月 日 时 分
else:
#assert int(month) in range(1,13)
first_date = datetime.datetime(now.year,month, 1, 0, 0)
if month in big_month:
day_range = range(1, 32) # 指定月份的总天数
elif month in small_month:
day_range = range(1, 31)
else:
day_range = range(1, 29)
# 获取指定月天数
self.day_range = list(day_range)
first_week = first_date.weekday() # 获取指定月1号是周几 6
line1 = [] # 第一行数据
for e in range(first_week):
line1.append("empty")
for d in range(7 - first_week):
line1.append(
str(self.day_range.pop(0))+"—django开发"
)
self.result.append(line1)
while self.day_range: # 如果总天数列表有值,就接着循环
line = [] # 每个子列表
for i in range(7):
if len(line) < 7 and self.day_range:
line.append(str(self.day_range.pop(0))+"—django开发")
else:
line.append("empty")
self.result.append(line)
def return_month(self):
"""
返回列表嵌套列表的日历
"""
return self.result
def print_month(self):
"""
安装日历格式打印日历
"""
print("星期一 星期二 星期三 星期四 星期五 星期六 星期日")
for line in self.result:
for day in line:
day = day.center(6)
print(day, end=" ")
print()
def loginValid(fun):#这是一个装饰器
@functools.wraps(fun)#保留原函数的名称
def inner(*args,**kwargs):
username = request.cookies.get('username')#Django中是大写的COOKIE
id = request.cookies.get('id','0')#是0 下一句就不成立了
user=User.query.get(int(id))#从数据库中获取id为此值的数据
session_username = session.get('username')#获取session--------字典可以用这种方法,
if user:#检测是否有对应id的用户
if user.user_name == username and username == session_username:#检测用户是否对应
return fun(*args,**kwargs)
else:
return redirect('/login/')
else:
return redirect('/login')
return inner
@app.route("/")#再进行路由
@loginValid#先执行这个
def index():
name = "laojiu"
return render_template("index.html",**locals())
@app.route("/login/",methods=['GET','POST'])
def login():
error = ''#放在这GET请求和POST请求都有error信息了就
if request.method == 'POST':
form_data = request.form
email = form_data.get('email')
password = form_data.get('password')
#下面是表单校验
user = User.query.filter_by(email=email).first()
if user:
db_password = <PASSWORD>
if password == <PASSWORD>:
response = redirect('/index/')
response.set_cookie('username',user.user_name)
response.set_cookie('email',user.email)
response.set_cookie('id',str(user.id))
session['username'] = user.user_name#设置session--------
return response#接下来是获取cookie校验
else:
error = '密码错误'
else:
error = '用户名不存在'
return render_template("login.html",error = error)
@app.route('/logout/',methods=['GET','POST'])
def logout():
response = redirect('/login/')
response.delete_cookie('username')
response.delete_cookie('email')
response.delete_cookie('id')
session.pop('username')#删除session第1种方法----------
# del session['username']#第二种方法
return response
@app.route("/base/")
def base():
return render_template("base.html")
@app.route("/index/")
def exindex():
# c = Curriculum()
# c.c_id = '0001'
# c.c_name = 'c++基础'
# c.c_time = datetime.datetime.now()
# c.save()
curr_list=Curriculum.query.all()
return render_template("ex_index.html",curr_list=curr_list)
@app.route("/userinfo/")
def userinfo():
calendar = Calendar().return_month()
now = datetime.datetime.now()
return render_template("userinfo.html",**locals())
from flask import request
from models import User
@app.route('/register/',methods=['GET','POST'])
def register():
'''
form表单提交的数据由request.form接收
request是从models.py中导包导入进来的
:return:
'''
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
user = User()
user.user_name = username
user.password = password
user.email = email
user.save()
return render_template('register.html')
@app.route('/holiday_leave/',methods='GET','POST')
def holiday_leave():
if request.method == 'POST':
data = request.form
request_user = data.get('request_user')
request_type = data.get('request_type')
start_time = data.get('start_time')
end_time = data.get('end_time')
phone = data.get('phone')
request_description = data.get('request_description')
leave = Leave()
leave.request_id = request.get_data() # 请假人id
request_name = models.Column(models.String(32)) # 姓名
request_type = models.Column(models.String(32)) # 请假类型
request_start_time = models.Column(models.String(32)) # 起始时间
request_end_time = models.Column(models.String(32)) # 结束时间
request_description = models.Column(models.Text) # 请假原因
request_phone = models.Column(models.String(32)) # 联系方式
request_status = models.Column(models.String(32)) # 假条状态
return render_template('holiday_leave.html')
# #9-27新增
# @app.route('/picture/',methods='GET','POST')
# def picture():
# p = {'picture':'img/1.jpg'}
# if request.method == 'POST':
# file = request.files.get('photo')
# file_name = file.filename
# file_path = 'img/%s'%file_name
# file_path = os.path.join(STATICFILES_DIR,'img/%s%filename')
# file.save(file_path)
# p = Picture()
# p.picture = file_path
# p.save()
#
# return render_template('picture.html',p = p)
# from main import api
# from flask_restful import Resource
#
# @api.resource('/Api/v1/leave/')
# class leaveApi(Resource):
# def get(self):#查
# return {'method':'这是get请求,负责返回所有的数据'}
# def post(self):#增
# data = request.form
# request_id = data.get('request_id')
# request_name = data.get('request_name')
# request_type = data.get('request_type')
# request_start_time = data.get('request_start_time')
# request_end_time = data.get('request_end_time')
# request_description = data.get('request_description')
# request_phone = data.get('request_phone')
# request_status = data.get('request_status')
#
# leave = Leave()
# leave.request_id = request_id
# leave.request_name = request_name
# leave.request_type = request_type
# leave.request_start_time = request_start_time
# leave.request_end_time = request_end_time
# leave.request_description = request_description
# leave.request_phone = request_phone
# leave.request_status = request_status
# return {'method':'负责保存数据'}
# def put(self):#改
# return {'method':'负责修改数据'}
# def delete(self):#删
# return {'method':'负责删除数据'}
| StarcoderdataPython |
5088868 | <filename>Dice Roller/dice.py
import random
def roll_dice():
numbers = ''
for i in range(1, 7):
numbers += str(i)
selected_num = (random.choice(numbers))
print(f'Your dice is rolling! You got: {selected_num}')
roll_dice()
| StarcoderdataPython |
9700183 | <reponame>hase1128/dragonfly
"""
Tensorflow Code.
"""
| StarcoderdataPython |
1792834 | """Data Embedding algorithms for bitcoin addresses"""
import hashlib
from b58 import encode as base58_encode, decode as base58_decode
TESTNET = True
version = 0 if not TESTNET else 111
def embed_in_address(x):
assert isinstance(x, bytes)
assert len(x) == 20
return base58_check_encode(x, version)
def recover_bytes_from_address(addr):
data = base58_check_decode(addr, version)
return data
def dhash(s):
return hashlib.sha256(hashlib.sha256(s).digest()).digest()
def rhash(s):
h1 = hashlib.new('ripemd160')
h1.update(hashlib.sha256(s).digest())
return h1.digest()
def base58_encode_padded(s):
res = base58_encode(int('0x' + s.encode('hex'), 16))
pad = 0
for c in s:
if c == chr(0):
pad += 1
else:
break
return '1' * pad + res
def base58_decode_padded(s):
pad = 0
for c in s:
if c == '1':
pad += 1
else:
break
h = '%x' % base58_decode(s)
if len(h) % 2:
h = '0' + h
res = h.decode('hex')
return chr(0) * pad + res
def base58_check_encode(s, version=0):
vs = chr(version) + s
check = dhash(vs)[:4]
return base58_encode_padded(vs + check)
def base58_check_decode(s, version=0):
k = base58_decode_padded(s)
v0, data, check0 = k[0], k[1:-4], k[-4:]
check1 = dhash(v0 + data)[:4]
if check0 != check1:
raise BaseException('checksum error')
if version != ord(v0):
raise BaseException('version mismatch')
return data
if __name__ == '__main__':
secret = "mytwentycharactertxt"
message = bytes(secret)
print "Original data:\t\t\t", message
addr = embed_in_address(message)
print "Address containing data:\t", addr
m = recover_bytes_from_address(addr)
print "Recovered data:\t\t\t", m
| StarcoderdataPython |
116805 | """Functions for using Gaussian Processes."""
import logging
from typing import Callable, Tuple
import numpy as np
def zero_mean_initialise(x: np.ndarray, kernel_fun: Callable, noise=0.0) -> Tuple[np.ndarray, np.ndarray]:
"""Initialise a zero mean GP using the provided kernel function.
Parameters
----------
x: ndarray
List of x points
kernel_fun: function
Kernel function, like those provided by the kernel_functions module.
Returns
-------
tuple of ndarray
The mean vector and the covariance matrix.
"""
logging.debug("x shape: {}".format(x.shape))
mean_vector = np.zeros(x.shape[0]) # initial mean vector
logging.debug("mean vector (initial) shape: {}".format(mean_vector.shape))
covariance_matrix = kernel_fun(x, x) # kernel_matrix(x, x, kernel_fun) # initial covariance matrix
covariance_matrix += noise * np.identity(covariance_matrix.shape[0])
logging.debug("x shape (after kernel call): {}".format(x.shape))
logging.debug("covariance matrix shape: {}".format(covariance_matrix.shape))
return mean_vector, covariance_matrix
def sample_function(mean_vector, covariance_matrix) -> np.ndarray:
"""Sample a function from a GP.
Parameters
----------
mean_vector: ndarray
Mean vector of the GP
covariance_matrix: ndarray
Covariance matrix of the GP
Returns
-------
ndarray
A function sampled from the GP with the given parameters.
"""
sample_function = np.random.multivariate_normal(mean_vector, covariance_matrix) # We can use it as true function
return sample_function
def regression_update(x: np.ndarray,
kernel_fun: Callable[[np.ndarray, np.ndarray], np.ndarray],
x_data: np.ndarray,
y_data: np.ndarray,
noise: float = 0.0):
"""Update the GP with the given data
Parameters
----------
x: List of x points
kernel_fun: Kernel function to be called, takes 2 vectors and returns the corresponding kernel matrix
x_data: x points for which we have data
y_data: y points for which we have data
noise: amount of noise over the feedback
Returns
-------
Updated mean and covariance of the GP
"""
k_list = [np.array([[kernel_fun(x_, x_d)[0, 0] for x_d in x_data]]).T for x_ in np.array(x)]
# noinspection PyPep8Naming
K = kernel_fun(x_data, x_data) # Direct matrix version
K += noise * np.identity(K.shape[0])
k_new_list = [np.array(kernel_fun(x_, x_)) for x_ in np.array(x)]
# Obtain posterior predictive distribution
inv_K = np.linalg.pinv(K) # Uses pseudo-inverse to overcome inversion limitations
updated_mean = np.array([(k.T.dot(inv_K).dot(y_data)) for k in k_list]).flatten()
updated_variance = np.array([(k_new - k.T.dot(inv_K).dot(k)) for k, k_new in zip(k_list, k_new_list)]).flatten()
return updated_mean, updated_variance
| StarcoderdataPython |
9789131 | <filename>src/core/mocfunctions.py
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
from core.models import *
import logging
logger = logging.getLogger(__name__)
# This array defines all the IDs in the database of the articles that are loaded for the
# various pages in the menu. Here we can differentiate between the different sites.
TAG_ID = settings.TAG_ID_LIST
PAGE_ID = settings.PAGE_ID_LIST
PROJECT_ID = settings.PROJECT_ID_LIST
PROJECT_LIST = settings.PROJECT_LIST
AUTO_BOT = 32070
# Also defined in context_processor for templates, but we need it sometimes in the Folium map configuration
MAPBOX_API_KEY = "<KEY>"
SATELLITE_TILES = "https://api.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}@2x.png?access_token=" + MAPBOX_API_KEY
STREET_TILES = "https://api.mapbox.com/styles/v1/mapbox/streets-v11/tiles/{z}/{x}/{y}?access_token=" + MAPBOX_API_KEY
PLACEHOLDER_PHOTO_THUMBNAIL = "/media/records/placeholder.thumbnail.png"
PLACEHOLDER_PHOTO = "/media/records/placeholder.png"
RELATIONSHIP_ID = {
"author": 4,
"uploader": 11,
"participant": 12,
"member": 6,
"publisher": 2,
"platformu_admin": 1,
"processor": 34,
}
# If we add any new project, we should add it to this list.
# We must make sure to filter like this to exclude non-project news
# (which we want in the community section but not here), as well as MoI news
MOC_PROJECTS = [1,2,3,4,6,7,8,9,11,13,14,15,16,18,3458,32018,32542]
# This is the list with projects that have an active forum
# It will show in the dropdown boxes to filter by this category
# Also found in core
OPEN_WORK_PROJECTS = [1,2,3,4,32018,16,18]
# Authentication of users
def get_space(request, slug):
# Here we can build an expansion if we want particular people to see dashboards that are under construction
check = get_object_or_404(ActivatedSpace, slug=slug, part_of_project_id=request.project)
return check.space
def get_project(request):
return get_object_or_404(Project, pk=request.project)
# Get all the child relationships, but making sure we only show is_deleted=False and is_public=True
def get_children(record):
list = RecordRelationship.objects.filter(record_parent=record).filter(record_child__is_deleted=False, record_child__is_public=True)
return list
# Get all the parent relationships, but making sure we only show is_deleted=False and is_public=True
def get_parents(record):
list = RecordRelationship.objects.filter(record_child=record).filter(record_parent__is_deleted=False, record_parent__is_public=True)
return list
# General script to check if a user has a certain permission
# This is used for validating access to certain pages only, so superusers
# will always have access
# Version 1.1
def has_permission(request, record_id, allowed_permissions):
if request.user.is_authenticated and request.user.is_superuser:
return True
elif request.user.is_authenticated and request.user.is_staff:
return True
try:
people = request.user.people
check = RecordRelationship.objects.filter(
relationship__slug__in = allowed_permissions,
record_parent = request.user.people,
record_child_id = record_id,
)
except:
return False
return True if check.exists() else False
# If users ARE logged in, but they try to access pages that they don't have
# access to, then we log this request for further debugging/review
# Version 1.0
def unauthorized_access(request):
logger.error("No access to this UploadSession")
#Work.objects.create(
# name = "Unauthorized access detected",
# description = request.META,
# priority = Work.WorkPriority.HIGH,
#)
raise PermissionDenied
# Quick debugging, sometimes it's tricky to locate the PRINT in all the Django
# output in the console, so just using a simply function to highlight it better
def p(text):
print("----------------------")
print(text)
print("----------------------")
# We should cache these layers for a while!
def get_layers(request):
if request.project == 6:
tag_id = 971 # CityLoops
else:
tag_id = 845
return Tag.objects.filter(parent_tag_id=tag_id)
# We should cache these layers for a while!
def get_layers_count(request):
if request.project == 6:
tag_id = 971 # CityLoops
else:
tag_id = 845
l = {}
for each in Tag.objects.filter(parent_tag_id=tag_id):
l[each.id] = each.children.count()
return l
def get_space(request, slug):
# Here we can build an expansion if we want particular people to see dashboards that are under construction
check = get_object_or_404(ActivatedSpace, slug=slug, part_of_project_id=request.project)
return check.space
# Quick function to make someone the author of something
# Version 1.0
def set_author(author, item):
RecordRelationship.objects.create(
relationship_id = RELATIONSHIP_ID["author"],
record_parent_id = author,
record_child_id = item,
)
# color scheme definitions from colorbrewer2.org combined with default MoC colours
COLOR_SCHEMES = {
"moc": ["#144d58","#a6cee3","#33a02c","#b2df8a","#e31a1c","#fb9a99","#ff7f00","#fdbf6f","#6a3d9a","#cab2d6","#b15928","#ffff99"],
"accent": ["#7fc97f","#beaed4","#fdc086","#ffff99","#386cb0","#f0027f","#bf5b17","#666666"],
"dark": ["#1b9e77","#d95f02","#7570b3","#e7298a","#66a61e","#e6ab02","#a6761d","#666666"],
"pastel": ["#fbb4ae","#b3cde3","#ccebc5","#decbe4","#fed9a6","#ffffcc","#e5d8bd","#fddaec","#f2f2f2"],
"set": ["#e41a1c","#377eb8","#4daf4a","#984ea3","#ff7f00","#ffff33","#a65628","#f781bf","#999999"],
"dozen": ["#8dd3c7","#ffffb3","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69","#fccde5","#d9d9d9","#bc80bd","#ccebc5","#ffed6f"],
"green": ["#005824", "#238b45", "#41ae76", "#66c2a4", "#99d8c9", "#ccece6", "#e5f5f9", "#f7fcfd"],
"blue": ["#084594", "#2171b5", "#4292c6", "#6baed6", "#9ecae1", "#c6dbef", "#deebf7","#f7fbff"],
"purple": ["#3f007d", "#54278f", "#6a51a3", "#807dba", "#9e9ac8", "#bcbddc", "#dadaeb", "#efedf5", "#fcfbfd"],
"red": ["#7f0000", "#b30000", "#d7301f", "#ef6548", "#fc8d59", "#fdbb84", "#fdd49e", "#fee8c8", "#fff7ec"],
"twentyfour": ["#144d58","#a6cee3","#33a02c","#b2df8a","#e31a1c","#fb9a99","#ff7f00","#fdbf6f","#6a3d9a","#cab2d6","#b15928","#ffff99", "#8dd3c7","#bd3e6e","#bebada","#fb8072","#80b1d3","#fdb462","#b3de69","#fccde5","#d9d9d9","#bc80bd","#ccebc5","#ffed6f"]
}
# This function records a copy of the information in the record table (title + description)
# in a table that maintains a historic record.
def save_record_history(info, people, comments, date=None):
# First we update the current record and mark it as a historic record
current_list = RecordHistory.objects.filter(record=info, status=RecordHistory.Status.CURRENT)
current_list.update(status=RecordHistory.Status.HISTORIC)
# And then we add the current record
history = RecordHistory.objects.create(
record=info,
name=info.name,
description=info.description,
status=RecordHistory.Status.CURRENT,
people=people,
comments=comments,
)
if date:
history.date_created = date
history.save()
# This function is used to record votes for work items
# We want to store the number of votes cast in each work item
def work_item_vote(info, people):
if not people.can_vote:
return False
RecordRelationship.objects.create(
record_parent = people,
record_child = info,
relationship_id = 36,
)
if not info.meta_data:
info.meta_data = {}
info.meta_data["votes"] = info.voters.count()
info.save()
return True
# And here we roll back votes
def work_item_unvote(info, people):
vote = RecordRelationship.objects.filter(record_parent=people, record_child=info, relationship_id=36)
if vote:
vote.delete()
info.meta_data["votes"] = info.voters.count()
info.save()
return True
| StarcoderdataPython |
6446252 | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#82. Remove Duplicates from Sorted List II
#Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.
#Example 1:
#Input: 1->2->3->3->4->4->5
#Output: 1->2->5
#Example 2:
#Input: 1->1->1->2->3
#Output: 2->3
## Definition for singly-linked list.
## class ListNode:
## def __init__(self, x):
## self.val = x
## self.next = None
#class Solution:
# def deleteDuplicates(self, head):
# """
# :type head: ListNode
# :rtype: ListNode
# """
# Time Is Money | StarcoderdataPython |
9744079 | <reponame>paultag/billy
from django.core import urlresolvers
from django.template.defaultfilters import slugify
from billy.core import mdb as db
from billy.core import settings
from .base import Document, RelatedDocument, RelatedDocuments, ListManager
from .metadata import Metadata
class CommitteeMember(dict):
legislator_object = RelatedDocument('Legislator', instance_key='leg_id')
class CommitteeMemberManager(ListManager):
keyname = 'members'
def __iter__(self):
members = self.committee['members']
# First check whether legislators are cached
# in this instance.
try:
objs = self._legislators
except AttributeError:
# If this was a metadata.committees_legislators,
# all the legislators will be accessible
# from the committee instance.
try:
objs = self.committee._legislators
except AttributeError:
ids = filter(None, [obj['leg_id'] for obj in members])
spec = {'_id': {'$in': ids}}
objs = dict((obj['_id'], obj) for obj in
db.legislators.find(spec))
self._legislators = objs
for member in members:
_id = member['leg_id']
if _id is not None and _id in objs:
yield (member, objs[_id])
else:
yield (member, None)
class Committee(Document):
collection = db.committees
feed_entries = RelatedDocuments('FeedEntry', model_keys=['entity_ids'])
members_objects = CommitteeMemberManager()
def display_name(self):
name = self['committee']
sub = self['subcommittee']
if sub is not None:
name = '%s: %s' % (name, sub)
return name
def events(self):
return db.events.find({"participants.committee_id": self['_id']})
@property
def metadata(self):
return Metadata.get_object(self[settings.LEVEL_FIELD])
def get_absolute_url(self):
args = [self.metadata['abbreviation'],
self['_id']]
url = urlresolvers.reverse('committee', args=args)
slug = slugify(self.display_name())
return '%s%s/' % (url, slug)
| StarcoderdataPython |
9691019 | <gh_stars>0
from quran.factory.audio_factory import AudioFactory
from quran.factory.edition_factory import EditionFactory
from quran.factory.image_factory import ImageFactory
from quran.factory.surah_factory import SurahFactory
from quran.factory.translation_factory import TranslationFactory
from quran.repository.ayah_repo import AyahRepo
from quran.usecase.ayah.create_ayah import CreateAyah
from quran.usecase.ayah.find_ayah import FindAyah
class AyahFactory:
@classmethod
def create(cls):
ayah_repo = AyahRepo()
return CreateAyah(ayah_repo)
@classmethod
def find_ayah(cls):
ayah_repo = AyahRepo()
find_translation = TranslationFactory.find_translation()
find_surah = SurahFactory.find_surah()
find_edition = EditionFactory.find_edition()
find_audio = AudioFactory.find_audio()
find_image = ImageFactory.find_image()
return FindAyah(ayah_repo, find_translation, find_surah, find_edition, find_audio, find_image)
| StarcoderdataPython |
6505481 | #! /usr/bin/python3
'''
Given one audio clip, output what the network thinks
'''
from __future__ import print_function
import numpy as np
import librosa
import os
from os.path import isfile
from panotti.models import *
from panotti.datautils import *
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # less TF messages, thanks
def get_canonical_shape(signal):
if len(signal.shape) == 1:
return (1, signal.shape[0])
else:
return signal.shape
def predict_one(signal, sr, model, expected_melgram_shape):# class_names, model)#, weights_file="weights.hdf5"):
X = make_layered_melgram(signal,sr)
print("signal.shape, melgram_shape, sr = ",signal.shape, X.shape, sr)
if (X.shape[1:] != expected_melgram_shape): # resize if necessary, pad with zeros
Xnew = np.zeros([1]+list(expected_melgram_shape))
min1 = min( Xnew.shape[1], X.shape[1] )
min2 = min( Xnew.shape[2], X.shape[2] )
min3 = min( Xnew.shape[3], X.shape[3] )
Xnew[0,:min1,:min2,:min3] = X[0,:min1,:min2,:min3] # truncate
X = Xnew
return model.predict(X,batch_size=1,verbose=False)[0]
def main(args):
np.random.seed(1)
weights_file=args.weights
dur = args.dur
resample = args.resample
mono = args.mono
# Load the model
model, class_names = load_model_ext(weights_file)
if model is None:
print("No weights file found. Aborting")
exit(1)
#model.summary()
#TODO: Keras load_models is spewing warnings about not having been compiled. we can ignore those,
# how to turn them off? Answer: can invoke with python -W ignore ...
#class_names = get_class_names(args.classpath) # now encoding names in model weights file
nb_classes = len(class_names)
print(nb_classes," classes to choose from: ",class_names)
expected_melgram_shape = model.layers[0].input_shape[1:]
print("Expected_melgram_shape = ",expected_melgram_shape)
file_count = 0
json_file = open("data.json", "w")
json_file.write('{\n"items":[')
idnum = 0
numfiles = len(args.file)
print("Reading",numfiles,"files")
for infile in args.file:
if os.path.isfile(infile):
file_count += 1
print("File",infile,":",end="")
signal, sr = load_audio(infile, mono=mono, sr=resample)
y_proba = predict_one(signal, sr, model, expected_melgram_shape) # class_names, model, weights_file=args.weights)
for i in range(nb_classes):
print( class_names[i],": ",y_proba[i],", ",end="",sep="")
answer = class_names[ np.argmax(y_proba)]
print("--> ANSWER:", class_names[ np.argmax(y_proba)])
outstr = '\n {\n "id": "'+str(idnum)+'",\n "name":"'+infile+'",\n "tags":[\n "'+answer+'"]\n }'
if (idnum < numfiles-1):
outstr += ','
json_file.write(outstr)
json_file.flush() # keep json file up to date
else:
pass #print(" *** File",infile,"does not exist. Skipping.")
idnum += 1
json_file.write("]\n}\n")
json_file.close()
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="predicts which class file(s) belong(s) to")
parser.add_argument('-w', '--weights', #nargs=1, type=argparse.FileType('r'),
help='weights file in hdf5 format', default="weights.hdf5")
#parser.add_argument('-c', '--classpath', #type=argparse.string, help='directory with list of classes', default="Preproc/Test/")
parser.add_argument("-m", "--mono", help="convert input audio to mono",action="store_true")
parser.add_argument("-r", "--resample", type=int, default=44100, help="convert input audio to mono")
parser.add_argument('-d', "--dur", type=float, default=None, help='Max duration (in seconds) of each clip')
parser.add_argument('file', help="file(s) to classify", nargs='+')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
6520200 | <reponame>Cobliteam/cassandra-s3-incremental-backup-watcher<gh_stars>1-10
from __future__ import absolute_import, unicode_literals
import argparse
import json
import logging
import os
import re
import shlex
import subprocess
import boto3
from .util import clean_s3_path
from .sstable import traverse_data_dir
from .transfer import generate_transfers
logger = logging.getLogger(__name__)
def get_node_info(nodetool_cmd):
cmd = nodetool_cmd + ['info']
try:
out = subprocess.check_output(cmd)
except (subprocess.CalledProcessError, OSError) as e:
raise RuntimeError('nodetool failed: {}'.format(e))
data = {}
for line in out.splitlines():
match = re.match(r'^([^:]+?)\s+:\s+(.+)\s*$', line)
if not match:
continue
key, value = match.group(1, 2)
data[key] = value
return data
def check_includes_excludes(includes, excludes):
includes = frozenset(includes)
excludes = frozenset(excludes)
def check(value):
if includes and value not in includes:
return False
return value not in excludes
return check
def main():
argp = argparse.ArgumentParser()
argp.add_argument(
'--keyspace', action='append', dest='keyspaces', default=[],
metavar='KEYSPACE',
help='Only include given keyspace. Can be specified multiple times')
argp.add_argument(
'--exclude-keyspace', action='append', dest='excluded_keyspaces',
default=[], metavar='KEYSPACE',
help='Exclude given keyspace. Can be specified multiple times')
argp.add_argument(
'--table', action='append', dest='tables', default=[],
metavar='TABLE',
help='Only include given table. Can be specified multiple times')
argp.add_argument(
'--exclude-table', action='append', dest='excluded_tables',
default=[], metavar='TABLE',
help='Exclude given table. Can be specified multiple times')
argp.add_argument(
'--s3-bucket', required=True, metavar='BUCKET',
help='Name of S3 bucket to send SSTables to')
argp.add_argument(
'--s3-path', default='/', metavar='PATH',
help='Path inside S3 bucket to send SSTables to. Subdirectories for '
'the datacenter name and host ID will be appended to it to '
'determine the final path')
argp.add_argument(
'--s3-acl', default='private', metavar='ACL',
help='Canned ACL to use for transfers')
argp.add_argument(
'--s3-metadata', default='{}', metavar='METADATA_JSON',
type=json.loads,
help='Metadata to apply to transferred files, in JSON format')
argp.add_argument(
'--s3-storage-class', default='STANDARD', metavar='STORAGE_CLASS',
help='Storage class to apply to transferred files')
argp.add_argument(
'--delete', default=False, action='store_true',
help='Whether to delete transferred files after finishing. Files '
'will only be deleted after all other files for the same SSTable '
'have been successfully sent, to avoid leaving partial data '
'behind')
argp.add_argument(
'--dry-run', default=False, action='store_true',
help="Don't upload or delete any files, only print intended actions ")
argp.add_argument(
'data_dirs', nargs='+', metavar='data_dir',
help='Path to one or more data directories to find backup files in')
args = argp.parse_args()
# Run nodetool earlier than necessary, since it's much quicker to fail than
# traversing the data dir to find the SSTables
nodetool_cmd = shlex.split(os.environ.get('NODETOOL_CMD', 'nodetool'))
node_info = get_node_info(nodetool_cmd)
host_id = node_info['ID']
data_center = node_info['Data Center']
keyspace_filter = check_includes_excludes(
args.keyspaces, args.excluded_keyspaces)
table_filter = check_includes_excludes(
args.tables, args.excluded_tables)
sstables = []
for data_dir in args.data_dirs:
sstables.extend(traverse_data_dir(data_dir, keyspace_filter,
table_filter))
s3_client = boto3.client('s3')
s3_path = '{}/{}/{}'.format(clean_s3_path(args.s3_path), data_center,
host_id)
transfers = list(generate_transfers(s3_client, args.s3_bucket, s3_path,
sstables))
s3_settings = {
'Metadata': args.s3_metadata,
'ACL': args.s3_acl,
'StorageClass': args.s3_storage_class
}
for transfer in transfers:
transfer.run(s3_client, s3_settings, delete=args.delete,
dry_run=args.dry_run)
| StarcoderdataPython |
9613136 | <filename>python_programs/breadth_first_search_test.py
from .node import Node
from .breadth_first_search import breadth_first_search
def test_main():
# Case 1: Strongly connected graph
# Output: Path found!
station1 = Node("Westminster")
station2 = Node("Waterloo", None, [station1])
station3 = Node("Trafalgar Square", None, [station1, station2])
station4 = Node("Canary Wharf", None, [station2, station3])
station5 = Node("London Bridge", None, [station4, station3])
station6 = Node("Tottenham Court Road", None, [station5, station4])
assert breadth_first_search(station6, station1)
# Case 2: Branching graph
# Output: Path found!
nodef = Node("F")
nodee = Node("E")
noded = Node("D")
nodec = Node("C", None, [nodef])
nodeb = Node("B", None, [nodee])
nodea = Node("A", None, [nodeb, nodec, noded])
assert breadth_first_search(nodea, nodee)
# Case 3: Two unconnected nodes in graph
# Output: Path not found
assert not breadth_first_search(nodef, nodee)
# Case 4: One node graph
# Output: Path found!
assert breadth_first_search(nodef, nodef)
# Case 5: Graph with cycles
# Output: Path found!
node1 = Node("1")
node2 = Node("2")
node3 = Node("3")
node4 = Node("4", None, [node1])
node5 = Node("5", None, [node2])
node6 = Node("6", None, [node5, node4, node3])
node2.successors = [node6]
assert breadth_first_search(node6, node1)
if __name__ == "__main__":
test_main()
| StarcoderdataPython |
304466 | <filename>solutions/treachery_of_whales.py
"""
Determine the horizontal position that the crabs can align to using the
least fuel possible. How much fuel must they spend to align to that
position?
"""
from dataclasses import dataclass
from typing import Callable, List, Optional
def constant_burn(units: int) -> int:
"""Return the total fuel burned to move the given number of
units.
"""
return units
def gauss_burn(units: int) -> int:
"""Return the total fuel burned in at a cumulative rate."""
return sum(unit for unit in range(1, units + 1))
@dataclass
class CrabFleet:
"""A collection of righteous crabs to protect you."""
crab_positions: List[int]
_median: Optional[int] = None
_mean: Optional[int] = None
@property
def mean(self) -> int:
"""Returns the mean of the crab positions."""
if self._mean is None:
self._mean = sum(self.crab_positions) // len(self.crab_positions)
return self._mean
@property
def median(self) -> int:
"""Returns the median of the crab positions."""
if self._median is None:
total = len(self.crab_positions)
self._median = sorted(self.crab_positions)[total // 2]
return self._median
def calculate_minimum_fuel(
self, destination: Optional[int] = None, burn_rate: Callable = constant_burn
) -> int:
"""Calculate the minimum-needed fuel to align to the median."""
if destination is None:
# Find the minimum
return min(
sum(
[
burn_rate(abs(goal - position))
for position in self.crab_positions
]
)
# Consider the positions bound by half the length of the collection,
# +/- the average position.
for goal in range(
(self.mean - (len(self.crab_positions) // 2)),
(self.mean + (len(self.crab_positions) // 2) + 1),
)
)
return sum(
burn_rate(abs(destination - position)) for position in self.crab_positions
)
if __name__ == "__main__":
with open("inputs/treachery_of_whales.txt") as input:
positions = [int(position) for position in input.read().split(",")]
fleet = CrabFleet(positions)
print(f"Part One: {fleet.calculate_minimum_fuel(fleet.median)}")
gauss_min = fleet.calculate_minimum_fuel(destination=None, burn_rate=gauss_burn)
print(f"Part Two: {gauss_min}")
| StarcoderdataPython |
3258773 | <gh_stars>0
height = int(input())
x = 1
for i in range(1,height):
for j in range(1,i+1):
print(x,end="# ")
x += 1
print()
# Sample Input :- 5
# Output :-
# 1#
# 2# 3#
# 4# 5# 6#
# 7# 8# 9# 10#
| StarcoderdataPython |
86981 | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import io
import json
from pathlib import Path
import shutil
import tempfile
import uuid
from app.modules.users.models import User
from app.modules.fileuploads.models import FileUpload
from PIL import Image
from tests.utils import TemporaryDirectoryGraceful
def test_user_id_not_found(flask_app_client, regular_user):
with flask_app_client.login(
regular_user,
auth_scopes=(
'users:read',
'users:write',
),
):
response = flask_app_client.patch(
'/api/v1/users/wrong-uuid',
content_type='application/json',
data=json.dumps(
[
{
'op': 'replace',
'path': '/full_name',
'value': 'Modified Full Name',
}
]
),
)
assert response.status_code == 404
def test_modifying_user_info_by_owner(flask_app_client, regular_user, db):
# pylint: disable=invalid-name
saved_full_name = regular_user.full_name
try:
with flask_app_client.login(regular_user, auth_scopes=('users:write',)):
response = flask_app_client.patch(
'/api/v1/users/%s' % regular_user.guid,
content_type='application/json',
data=json.dumps(
[
{
'op': 'test',
'path': '/current_password',
'value': regular_user.password_secret,
},
{
'op': 'replace',
'path': '/full_name',
'value': 'Modified Full Name',
},
]
),
)
temp_user = User.query.get(response.json['guid'])
assert response.status_code == 200
assert response.content_type == 'application/json'
assert isinstance(response.json, dict)
assert set(response.json.keys()) >= {'guid', 'email'}
assert uuid.UUID(response.json['guid']) == regular_user.guid
assert 'password' not in response.json.keys()
assert temp_user.email == regular_user.email
assert temp_user.full_name == 'Modified Full Name'
finally:
# Restore original state
regular_user.full_name = saved_full_name
with db.session.begin():
db.session.merge(regular_user)
def test_modifying_user_info_by_admin(flask_app_client, admin_user, regular_user, db):
# pylint: disable=invalid-name
saved_full_name = regular_user.full_name
try:
with flask_app_client.login(admin_user, auth_scopes=('users:write',)):
response = flask_app_client.patch(
'/api/v1/users/%s' % regular_user.guid,
content_type='application/json',
data=json.dumps(
[
{
'op': 'test',
'path': '/current_password',
'value': <PASSWORD>_user.password_secret,
},
{
'op': 'replace',
'path': '/full_name',
'value': 'Modified Full Name',
},
{'op': 'replace', 'path': '/is_active', 'value': False},
{'op': 'replace', 'path': '/is_staff', 'value': False},
{'op': 'replace', 'path': '/is_admin', 'value': True},
{'op': 'replace', 'path': '/is_contributor', 'value': True},
{'op': 'replace', 'path': '/is_researcher', 'value': True},
{'op': 'replace', 'path': '/is_user_manager', 'value': True},
{'op': 'replace', 'path': '/is_internal', 'value': False},
{'op': 'replace', 'path': '/is_exporter', 'value': True},
]
),
)
from app.modules.users.models import User
temp_user = User.query.get(response.json['guid'])
assert temp_user.is_researcher
assert temp_user.is_contributor
assert temp_user.is_user_manager
assert response.status_code == 200
assert response.content_type == 'application/json'
assert isinstance(response.json, dict)
assert set(response.json.keys()) >= {'guid', 'email'}
assert uuid.UUID(response.json['guid']) == regular_user.guid
assert 'password' not in response.json.keys()
assert temp_user.email == regular_user.email
assert temp_user.full_name == 'Modified Full Name'
assert not temp_user.is_active
assert not temp_user.is_staff
assert temp_user.is_admin
assert not temp_user.is_internal
assert temp_user.is_exporter
finally:
# Restore original state
regular_user.full_name = saved_full_name
regular_user.is_active = True
regular_user.is_staff = False
regular_user.is_admin = False
regular_user.is_researcher = False
regular_user.is_contributor = False
regular_user.is_user_manager = False
with db.session.begin():
db.session.merge(regular_user)
def test_modifying_user_info_admin_fields_by_not_admin(
flask_app_client, regular_user, db
):
# pylint: disable=invalid-name
saved_full_name = regular_user.full_name
try:
with flask_app_client.login(regular_user, auth_scopes=('users:write',)):
response = flask_app_client.patch(
'/api/v1/users/%s' % regular_user.guid,
content_type='application/json',
data=json.dumps(
[
{
'op': 'test',
'path': '/current_password',
'value': regular_user.password_secret,
},
{
'op': 'replace',
'path': '/full_name',
'value': 'Modified Full Name',
},
{'op': 'replace', 'path': '/is_active', 'value': False},
{'op': 'replace', 'path': '/is_staff', 'value': False},
{'op': 'replace', 'path': '/is_admin', 'value': True},
]
),
)
assert response.status_code == 403
assert response.content_type == 'application/json'
assert isinstance(response.json, dict)
assert set(response.json.keys()) >= {'status', 'message'}
finally:
regular_user.full_name = saved_full_name
regular_user.is_active = True
regular_user.is_staff = False
regular_user.is_admin = False
with db.session.begin():
db.session.merge(regular_user)
def test_modifying_user_info_with_invalid_format_must_fail(
flask_app_client, regular_user
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('users:write',)):
response = flask_app_client.patch(
'/api/v1/users/%s' % regular_user.guid,
content_type='application/json',
data=json.dumps(
[
{'op': 'test', 'path': '/full_name', 'value': ''},
{'op': 'replace', 'path': '/website'},
]
),
)
assert response.status_code == 422
assert response.content_type == 'application/json'
assert isinstance(response.json, dict)
assert set(response.json.keys()) >= {'status', 'message'}
def test_modifying_user_info_with_invalid_password_must_fail(
flask_app_client, regular_user
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('users:write',)):
response = flask_app_client.patch(
'/api/v1/users/%s' % regular_user.guid,
content_type='application/json',
data=json.dumps(
[
{
'op': 'test',
'path': '/current_password',
'value': '<PASSWORD>',
},
{
'op': 'replace',
'path': '/full_name',
'value': 'Modified Full Name',
},
]
),
)
assert response.status_code == 403
assert response.content_type == 'application/json'
assert isinstance(response.json, dict)
assert set(response.json.keys()) >= {'status', 'message'}
def test_modifying_user_info_with_conflict_data_must_fail(
flask_app_client, admin_user, regular_user
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('users:write',)):
response = flask_app_client.patch(
'/api/v1/users/%s' % regular_user.guid,
content_type='application/json',
data=json.dumps(
[
{
'op': 'test',
'path': '/current_password',
'value': regular_user.password_secret,
},
{'op': 'replace', 'path': '/email', 'value': admin_user.email},
]
),
)
assert response.status_code == 409
assert response.content_type == 'application/json'
assert isinstance(response.json, dict)
assert set(response.json.keys()) >= {'status', 'message'}
def test_user_profile_fileupload(
db, flask_app, flask_app_client, regular_user, request, test_root
):
clean_up_objects = []
clean_up_paths = []
upload_dir = Path(flask_app.config['UPLOADS_DATABASE_PATH'])
fileupload_dir = Path(flask_app.config['FILEUPLOAD_BASE_PATH'])
with (test_root / 'zebra.jpg').open('rb') as f:
zebra = f.read()
def cleanup_fileupload_dir(path):
for c in path.glob('*'):
child = Path(c)
if child.is_dir():
cleanup_fileupload_dir(child)
if not list(child.glob('*')):
child.rmdir()
def cleanup():
regular_user.profile_fileupload_guid = None
db.session.merge(regular_user)
for obj in clean_up_objects:
if hasattr(obj, 'delete'):
obj.delete()
else:
db.session.delete(obj)
for path in clean_up_paths:
if path.exists():
shutil.rmtree(path, ignore_errors=True)
cleanup_fileupload_dir(fileupload_dir)
request.addfinalizer(cleanup)
with flask_app_client.login(regular_user, auth_scopes=('users:write',)):
args = (f'/api/v1/users/{regular_user.guid}',)
# PATCH remove /profile_upload_guid when it's not set
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'remove',
'path': '/profile_fileupload_guid',
},
],
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 200, response.data
# Create file upload
with TemporaryDirectoryGraceful() as td:
testfile = Path(td) / 'a.txt'
with testfile.open('w') as f:
f.write('abcd\n')
fup = FileUpload.create_fileupload_from_path(str(testfile))
with db.session.begin():
db.session.add(fup)
clean_up_objects += [fup]
clean_up_paths += [Path(fup.get_absolute_path())]
# PATCH replace /profile_fileupload_guid without dict
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'replace',
'path': '/profile_fileupload_guid',
'value': str(fup.guid),
},
],
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 422, response.data
assert (
response.json['message']
== 'Expected {"transactionId": "..."} or {"guid": "..."}'
)
# PATCH replace /profile_fileupload_guid with asset.guid
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'replace',
'path': '/profile_fileupload_guid',
'value': {'guid': str(fup.guid)},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 200, response.data
assert response.json['profile_fileupload']['guid'] == str(fup.guid)
updated_user = User.query.get(regular_user.guid)
assert updated_user.profile_fileupload_guid == fup.guid
# Test transactionId is required when not using asset guid
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'replace',
'path': '/profile_fileupload_guid',
'value': {'submissionGuid': '1234'},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 422, response.data
assert response.json['message'] == '"transactionId" or "guid" is mandatory'
# PATCH replace /profile_fileupload_guid with transaction_id with no assets
td = Path(tempfile.mkdtemp(prefix='trans-', dir=upload_dir))
transaction_id = td.name[len('trans-') :]
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'replace',
'path': '/profile_fileupload_guid',
'value': {'transactionId': transaction_id},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 422, response.data
assert response.json['message'].startswith(
'Need exactly 1 asset but found 0 assets'
)
clean_up_paths.append(td)
# PATCH replace /profile_fileupload_guid with transaction_id with 2 assets
td = Path(tempfile.mkdtemp(prefix='trans-', dir=upload_dir))
transaction_id = td.name[len('trans-') :]
transaction_id = td.name[len('trans-') :]
with (td / 'image.jpg').open('wb') as f:
f.write(zebra)
with (td / 'a.txt').open('w') as f:
f.write('abcd')
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'replace',
'path': '/profile_fileupload_guid',
'value': {'transactionId': transaction_id},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 422, response.data
assert response.json['message'].startswith(
'Need exactly 1 asset but found 2 assets'
)
clean_up_paths.append(td)
# PATCH replace /profile_fileupload_guid with transaction_id with 2 assets with path
td = Path(tempfile.mkdtemp(prefix='trans-', dir=upload_dir))
transaction_id = td.name[len('trans-') :]
transaction_id = td.name[len('trans-') :]
with (td / 'image.jpg').open('wb') as f:
f.write(zebra)
with (td / 'a.txt').open('w') as f:
f.write('abcd')
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'replace',
'path': '/profile_fileupload_guid',
'value': {'transactionId': transaction_id, 'path': 'image.jpg'},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 200, response.data
fup = FileUpload.query.get(response.json['profile_fileupload']['guid'])
src_response = flask_app_client.get(fup.src)
src_data = src_response.data
src_response.close() # h/t https://github.com/pallets/flask/issues/2468#issuecomment-517797518
assert src_data == zebra
clean_up_objects.append(fup)
clean_up_paths.append(td)
# PATCH replace /profile_fileupload_guid with transaction_id
td = Path(tempfile.mkdtemp(prefix='trans-', dir=upload_dir))
transaction_id = td.name[len('trans-') :]
with (td / 'image.jpg').open('wb') as f:
f.write(zebra)
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'replace',
'path': '/profile_fileupload_guid',
'value': {'transactionId': transaction_id},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 200, response.data
response_asset_guid = response.json['profile_fileupload']['guid']
updated_user = User.query.get(regular_user.guid)
assert str(updated_user.profile_fileupload_guid) == response_asset_guid
fileupload = FileUpload.query.get(response_asset_guid)
assert updated_user.profile_fileupload == fileupload
assert fileupload is not None, 'FileUpload linked to user does not exist'
clean_up_objects += [fileupload]
# PATCH remove /profile_fileupload_guid
kwargs = {
'content_type': 'application/json',
'data': json.dumps([{'op': 'remove', 'path': '/profile_fileupload_guid'}]),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 200, response.data
updated_user = User.query.get(regular_user.guid)
assert updated_user.profile_fileupload_guid is None
# Create file upload
with TemporaryDirectoryGraceful() as td:
testfile = Path(td) / 'image.jpg'
with testfile.open('wb') as f:
f.write(zebra)
fup = FileUpload.create_fileupload_from_path(str(testfile))
with db.session.begin():
db.session.add(fup)
clean_up_objects += [fup]
clean_up_paths += [Path(fup.get_absolute_path())]
# PATCH add /profile_fileupload_guid
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'add',
'path': '/profile_fileupload_guid',
'value': {'guid': str(fup.guid)},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 200, response.data
updated_user = User.query.get(regular_user.guid)
assert str(updated_user.profile_fileupload_guid) == str(fup.guid)
# PATCH add /profile_fileupload_guid with invalid crop
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'add',
'path': '/profile_fileupload_guid',
'value': {
'guid': str(fup.guid),
'crop': 'invalid',
},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 422, response.data
assert (
response.json['message']
== 'Expected {"crop": {"x": <int>, "y": <int>, "width": <int>, "height": <int>}}'
)
with Image.open(fup.get_absolute_path()) as image:
assert image.size == (1000, 664)
# PATCH add /profile_fileupload_guid with crop
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'add',
'path': '/profile_fileupload_guid',
'value': {
'guid': str(fup.guid),
'crop': {
'x': 650,
'y': 150,
'width': 150,
'height': 150,
},
},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 200, response.data
src = response.json['profile_fileupload']['src']
response = flask_app_client.get(src)
assert response.headers['Content-Type'] == 'image/jpeg'
with Image.open(io.BytesIO(response.data)) as image:
assert image.size == (150, 150)
# Create non image fileupload
with TemporaryDirectoryGraceful() as td:
testfile = Path(td) / 'a.txt'
with testfile.open('w') as f:
f.write('abcd\n')
fup = FileUpload.create_fileupload_from_path(str(testfile))
with db.session.begin():
db.session.add(fup)
clean_up_objects += [fup]
# PATCH add /profile_fileupload_guid with crop not image
kwargs = {
'content_type': 'application/json',
'data': json.dumps(
[
{
'op': 'add',
'path': '/profile_fileupload_guid',
'value': {
'guid': str(fup.guid),
'crop': {
'x': 650,
'y': 150,
'width': 150,
'height': 150,
},
},
}
]
),
}
response = flask_app_client.patch(*args, **kwargs)
assert response.status_code == 422, response.data
assert response.json['message'].startswith(
'UnidentifiedImageError: cannot identify image file'
)
| StarcoderdataPython |
355772 | <filename>pytglib/api/types/text_entity_type_bot_command.py
from ..utils import Object
class TextEntityTypeBotCommand(Object):
"""
A bot command, beginning with "/". This shouldn't be highlighted if there are no bots in the chat
Attributes:
ID (:obj:`str`): ``TextEntityTypeBotCommand``
No parameters required.
Returns:
TextEntityType
Raises:
:class:`telegram.Error`
"""
ID = "textEntityTypeBotCommand"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "TextEntityTypeBotCommand":
return TextEntityTypeBotCommand()
| StarcoderdataPython |
1643337 | # Given a binary tree, determine if it is height-balanced.
# For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isBalanced(self, root):
"""
O(n)
O(1)
:type root: TreeNode
:rtype: bool
"""
return self.check(root) != -1
def check(self, root):
if not root:
return 0
left = self.check(root.left)
right = self.check(root.right)
if left == -1 or right == -1 or abs(left-right)>1:
return -1
# left, right == 0 return 1 itself
return max(left, right) +1 | StarcoderdataPython |
5097624 | <filename>tock/organizations/migrations/0004_unit_org.py
# Generated by Django 2.2.10 on 2020-03-04 22:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizations', '0003_unit'),
]
operations = [
migrations.AddField(
model_name='unit',
name='org',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
]
| StarcoderdataPython |
1785881 | <reponame>myhoger/ditto
# Copyright 2013 MemSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from replication_utils import *
from replication_globs import *
if __name__ == '__main__':
parser = command_line_parser()
args = parser.parse_args()
stream, memsql_conn = wrap_execution(connect_to_databases, [args])
binlog_listen(memsql_conn, stream)
| StarcoderdataPython |
3476449 | from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import Http404
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.generic.edit import FormView, UpdateView, CreateView
from django.contrib import messages
from django.contrib.auth.models import Group, Permission
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.auth import login, REDIRECT_FIELD_NAME, authenticate, logout
from osf.models.user import OSFUser
from osf.models import AdminProfile
from admin.common_auth.forms import LoginForm, UserRegistrationForm, DeskUserForm
class LoginView(FormView):
form_class = LoginForm
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'login.html'
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(LoginView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
user = authenticate(
username=form.cleaned_data.get('email').strip(),
password=form.cleaned_data.get('password').strip()
)
if user is not None:
login(self.request, user)
else:
messages.error(
self.request,
'Email and/or Password incorrect. Please try again.'
)
return redirect('auth:login')
return super(LoginView, self).form_valid(form)
def get_success_url(self):
redirect_to = self.request.GET.get(self.redirect_field_name, '')
if not redirect_to or redirect_to == '/':
redirect_to = reverse('home')
return redirect_to
def logout_user(request):
logout(request)
return redirect('auth:login')
class RegisterUser(PermissionRequiredMixin, FormView):
form_class = UserRegistrationForm
template_name = 'register.html'
permission_required = 'osf.change_user'
raise_exception = True
def form_valid(self, form):
osf_id = form.cleaned_data.get('osf_id')
osf_user = OSFUser.load(osf_id)
if not osf_user:
raise Http404('OSF user with id "{}" not found. Please double check.'.format(osf_id))
osf_user.is_staff = True
osf_user.save()
# create AdminProfile for this new user
profile, created = AdminProfile.objects.get_or_create(user=osf_user)
osf_user.groups.clear()
prereg_admin_group = Group.objects.get(name='prereg_admin')
for group in form.cleaned_data.get('group_perms'):
osf_user.groups.add(group)
if group == prereg_admin_group:
administer_permission = Permission.objects.get(codename='administer_prereg')
osf_user.user_permissions.add(administer_permission)
osf_user.save()
if created:
messages.success(self.request, 'Registration successful for OSF User {}!'.format(osf_user.username))
else:
messages.success(self.request, 'Permissions update successful for OSF User {}!'.format(osf_user.username))
return super(RegisterUser, self).form_valid(form)
def get_success_url(self):
return reverse('auth:register')
def get_initial(self):
initial = super(RegisterUser, self).get_initial()
initial['osf_id'] = self.request.GET.get('id')
return initial
class DeskUserCreateFormView(PermissionRequiredMixin, CreateView):
form_class = DeskUserForm
template_name = 'desk/settings.html'
success_url = reverse_lazy('auth:desk')
permission_required = 'osf.view_desk'
raise_exception = True
def form_valid(self, form):
form.instance.user = self.request.user
return super(DeskUserCreateFormView, self).form_valid(form)
class DeskUserUpdateFormView(PermissionRequiredMixin, UpdateView):
form_class = DeskUserForm
template_name = 'desk/settings.html'
success_url = reverse_lazy('auth:desk')
permission_required = 'osf.view_desk'
raise_exception = True
def get_object(self, queryset=None):
return self.request.user.admin_profile
| StarcoderdataPython |
3564182 | <filename>qap/test_script_utils.py
import pytest
import unittest
class TestGatherFilepathList(unittest.TestCase):
def setUp(self):
# setup
import os
import pkg_resources as p
from qap.script_utils import gather_filepath_list
self.gather_filepath_list = gather_filepath_list
# inputs
self.data_folder = \
p.resource_filename("qap", os.path.join("test_data",
"data_folder"))
# outputs
self.ref_path_list = [
"site_1/sub_01/ses_01/anat_1/anatomical_scan.nii.gz",
"site_1/sub_01/ses_01/rest_1/functional_scan.nii.gz"]
def test_custom_filepaths(self):
test_path_list = self.gather_filepath_list(self.data_folder)
self.assertListEqual(self.ref_path_list, test_path_list)
@pytest.mark.long
class TestPullS3Sublist(unittest.TestCase):
# will fail if no internet connection
# use this test fixture periodically
def setUp(self):
# setup
from qap.script_utils import pull_s3_sublist
self.pull_s3_sublist = pull_s3_sublist
# inputs
self.bids_path = "s3://fcp-indi/data/Projects/CORR/RawDataBIDS"
self.custom_path = "s3://fcp-indi/data/Projects/CORR/RawData"
self.invalid_bucket_path = "s3://fcp--indi/data/Projects/CORR/RawDataBIDS"
self.invalid_dir_path = "s3://fcp-indi/data/Projects/corr/RawDataBIDS"
# outputs
self.bids_s3_list = [
'BMB_1/T1w.json',
'BMB_1/sub-0003001/ses-1/anat/sub-0003001_ses-1_run-1_T1w.nii.gz',
'BMB_1/sub-0003001/ses-1/func/sub-0003001_ses-1_task-rest_run-1_bold.nii.gz',
'BMB_1/sub-0003001/ses-1/func/sub-0003001_ses-1_task-rest_run-2_bold.nii.gz',
'BMB_1/sub-0003001/sub-0003001_sessions.tsv']
self.custom_s3_list = [
'BMB_1/0003001/session_1/anat_1/anat.nii.gz',
'BMB_1/0003001/session_1/rest_1/rest.nii.gz',
'BMB_1/0003001/session_1/rest_2/rest.nii.gz',
'BMB_1/0003002/session_1/anat_1/anat.nii.gz',
'BMB_1/0003002/session_1/rest_1/rest.nii.gz']
def test_BIDS(self):
test_bids_s3_list = self.pull_s3_sublist(self.bids_path)
self.assertListEqual(self.bids_s3_list, test_bids_s3_list[0:5])
def test_custom(self):
test_custom_s3_list = self.pull_s3_sublist(self.custom_path)
self.assertListEqual(self.custom_s3_list, test_custom_s3_list[0:5])
def test_invalid_bucket_name(self):
with self.assertRaises(Exception):
self.pull_s3_sublist(self.invalid_bucket_path)
def test_wrong_dirpath(self):
test_wrong_list = self.pull_s3_sublist(self.invalid_dir_path)
self.assertEquals(0, len(test_wrong_list))
def test_invalid_creds_path(self):
with self.assertRaises(Exception):
self.pull_s3_sublist(self.bids_path, "/path/to/nothing.csv")
class TestParseRawDataList(unittest.TestCase):
# for non-BIDS data directory formats
def setUp(self):
# setup
from qap.script_utils import parse_raw_data_list
self.parse_raw_data_list = parse_raw_data_list
# inputs
self.local_data_folder = "/data/dir"
self.local_file_list = ["site_1/sub_01/ses_01/anat_1/mprage.nii.gz",
"site_1/sub_02/ses_01/func_1/rest.nii.gz"]
self.wrong_file_list = ["site_1/sub_01/anat_1/mprage.nii.gz"]
self.s3_data_folder = "s3://data/Projects/RawData"
self.s3_file_list = ["site_1/sub_01/ses_01/anat_1/mprage.nii.gz",
"site_1/sub_02/ses_01/func_1/rest.nii.gz"]
# outputs
self.ref_local_subdict = {
'sub_01': {
'ses_01': {
'anatomical_scan': {
'anat_1': '/data/dir/site_1/sub_01/ses_01/anat_1/mprage.nii.gz'},
'site_name': 'site_1'}},
'sub_02': {
'ses_01': {
'functional_scan': {
'func_1': '/data/dir/site_1/sub_02/ses_01/func_1/rest.nii.gz'},
'site_name': 'site_1'}}}
def test_local_filepaths(self):
test_local = self.parse_raw_data_list(self.local_file_list,
self.local_data_folder)
self.assertDictEqual(self.ref_local_subdict, test_local)
def test_s3_filepaths(self):
# TODO
pass
def test_inclusion(self):
ref_subdict = self.ref_local_subdict
del ref_subdict["sub_02"]
test_inc = self.parse_raw_data_list(self.local_file_list,
self.local_data_folder,
inclusion_list=["sub_01"])
self.assertDictEqual(ref_subdict, test_inc)
def test_wrong_dir_format(self):
# only comes out empty because there's only one entry in the input
# list
with self.assertRaisesRegexp(Exception, "came out empty"):
self.parse_raw_data_list(self.wrong_file_list,
self.local_data_folder)
class TestCheckCSVMissingSubs(unittest.TestCase):
def setUp(self):
# setup
import os
import pandas as pd
import pkg_resources as p
self.maxDiff = None
from qap.script_utils import check_csv_missing_subs
self.check_csv = check_csv_missing_subs
# inputs
anat_csv = \
p.resource_filename("qap", os.path.join("test_data",
"qap_anatomical_spatial_5rows.csv"))
func_csv = \
p.resource_filename("qap", os.path.join("test_data",
"qap_functional_spatial_5subs.csv"))
short_anat_csv = \
p.resource_filename("qap", os.path.join("test_data",
"qap_anatomical_spatial_3rows.csv"))
short_func_csv = \
p.resource_filename("qap", os.path.join("test_data",
"qap_functional_spatial_3subs.csv"))
self.anat_df = pd.read_csv(anat_csv, dtype={"Participant": str})
self.func_df = pd.read_csv(func_csv, dtype={"Participant": str})
self.short_anat_df = pd.read_csv(short_anat_csv,
dtype={"Participant": str})
self.short_func_df = pd.read_csv(short_func_csv,
dtype={"Participant": str})
self.data_dict = {
'0003001': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003001/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003001/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003001/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}},
'0003002': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003002/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003002/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003002/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}},
'0003004': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003004/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003004/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003004/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}},
'0003006': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}},
'0003007': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/anat_1/anat.nii.gz'},
"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/rest_2/rest.nii.gz'},
"site_name": "BMB_1"}}}
# outputs
self.anat_missing_dict = {
'0003006': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/anat_1/anat.nii.gz'}}},
'0003007': {"session_1": {"anatomical_scan": {"anat_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/anat_1/anat.nii.gz'}}}}
self.func_missing_dict = {
'0003006': {"session_1": {"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003006/session_1/rest_2/rest.nii.gz'}}},
'0003007': {"session_1": {"functional_scan": {"rest_1": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/rest_1/rest.nii.gz',
"rest_2": 's3://fcp-indi/data/Projects/CORR/RawData/BMB_1/0003007/session_1/rest_2/rest.nii.gz'}}}}
def test_anat_no_missing(self):
ret = self.check_csv(self.anat_df, self.data_dict, "anat")
self.assertEquals(ret, None)
def test_anat_missing(self):
ret = self.check_csv(self.short_anat_df, self.data_dict, "anat")
self.assertDictEqual(ret, self.anat_missing_dict)
def test_func_no_missing(self):
ret = self.check_csv(self.func_df, self.data_dict, "func")
self.assertEquals(ret, None)
def test_func_missing(self):
ret = self.check_csv(self.short_func_df, self.data_dict, "func")
self.assertDictEqual(ret, self.func_missing_dict)
@pytest.mark.quick
def test_gather_custom_raw_data():
from qap.script_utils import gather_custom_raw_data
# we are starting in the directory containing the site folders!
site_folder = "/home/data"
format = "/{site}/{participant}/{session}/{series}"
anatomical_keywords = "mprage"
functional_keywords = "rest func"
filepath_list = [
"/home/data/site01/sub01/sess01/anat_1/mprage.nii.gz",
"/home/data/site01/sub01/sess02/anat_1/mprage.nii.gz",
"/home/data/site01/sub02/sess01/anat_1/mprage.nii.gz",
"/home/data/site01/sub02/sess02/anat_1/mprage.nii.gz",
"/home/data/site01/sub01/sess01/rest_1/rest.nii.gz",
"/home/data/site01/sub01/sess01/rest_2/rest.nii.gz",
"/home/data/site01/sub01/sess02/rest_1/func.nii.gz",
"/home/data/site01/sub02/sess01/rest_1/rest.nii.gz",
"/home/data/site01/sub02/sess01/rest_2/rest.nii.gz",
"/home/data/site01/sub02/sess02/rest_1/func.nii.gz",
]
# include sites
ref_sub_dict = {
'sub01': {'sess01': {'anatomical_scan': {'anat_1': '/home/data/site01/sub01/sess01/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub01/sess01/rest_1/rest.nii.gz',
'rest_2': '/home/data/site01/sub01/sess01/rest_2/rest.nii.gz'},
'site_name': 'site01'},
'sess02': {'anatomical_scan': {'anat_1': '/home/data/site01/sub01/sess02/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub01/sess02/rest_1/func.nii.gz'},
'site_name': 'site01'}},
'sub02': {'sess01': {'anatomical_scan': {'anat_1': '/home/data/site01/sub02/sess01/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub02/sess01/rest_1/rest.nii.gz',
'rest_2': '/home/data/site01/sub02/sess01/rest_2/rest.nii.gz'},
'site_name': 'site01'},
'sess02': {'anatomical_scan': {'anat_1': '/home/data/site01/sub02/sess02/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub02/sess02/rest_1/func.nii.gz'},
'site_name': 'site01'}}}
sub_dict = gather_custom_raw_data(filepath_list, site_folder, format,
anatomical_keywords, functional_keywords)
assert ref_sub_dict == sub_dict
@pytest.mark.quick
def test_gather_custom_raw_data_scans_folder():
from qap.script_utils import gather_custom_raw_data
# we are starting in the directory containing the site folders!
site_folder = "/home/data"
format = "/{site}/{participant}/{session}/scans/{series}"
anatomical_keywords = "mprage"
functional_keywords = "rest func"
# inclusion of a "scans" folder in between the session and scan folders
filepath_list = [
"/home/data/site01/sub01/sess01/scans/anat_1/mprage.nii.gz",
"/home/data/site01/sub01/sess02/scans/anat_1/mprage.nii.gz",
"/home/data/site01/sub02/sess01/scans/anat_1/mprage.nii.gz",
"/home/data/site01/sub02/sess02/scans/anat_1/mprage.nii.gz",
"/home/data/site01/sub01/sess01/scans/rest_1/rest.nii.gz",
"/home/data/site01/sub01/sess01/scans/rest_2/rest.nii.gz",
"/home/data/site01/sub01/sess02/scans/rest_1/func.nii.gz",
"/home/data/site01/sub02/sess01/scans/rest_1/rest.nii.gz",
"/home/data/site01/sub02/sess01/scans/rest_2/rest.nii.gz",
"/home/data/site01/sub02/sess02/scans/rest_1/func.nii.gz",
]
# include sites
ref_sub_dict = {
'sub01': {'sess01': {'anatomical_scan': {'anat_1': '/home/data/site01/sub01/sess01/scans/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub01/sess01/scans/rest_1/rest.nii.gz',
'rest_2': '/home/data/site01/sub01/sess01/scans/rest_2/rest.nii.gz'},
'site_name': 'site01'},
'sess02': {'anatomical_scan': {'anat_1': '/home/data/site01/sub01/sess02/scans/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub01/sess02/scans/rest_1/func.nii.gz'},
'site_name': 'site01'}},
'sub02': {'sess01': {'anatomical_scan': {'anat_1': '/home/data/site01/sub02/sess01/scans/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub02/sess01/scans/rest_1/rest.nii.gz',
'rest_2': '/home/data/site01/sub02/sess01/scans/rest_2/rest.nii.gz'},
'site_name': 'site01'},
'sess02': {'anatomical_scan': {'anat_1': '/home/data/site01/sub02/sess02/scans/anat_1/mprage.nii.gz'},
'functional_scan': {'rest_1': '/home/data/site01/sub02/sess02/scans/rest_1/func.nii.gz'},
'site_name': 'site01'}}}
sub_dict = gather_custom_raw_data(filepath_list, site_folder, format,
anatomical_keywords, functional_keywords)
assert ref_sub_dict == sub_dict
| StarcoderdataPython |
6607878 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('characters', '0003_auto_20150410_1125'),
('wod_rules', '0002_auto_20150410_1125'),
('mage_rules', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MageMerit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('dots', models.IntegerField(verbose_name='dots', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('character', models.ForeignKey(related_name='merits', to='characters.MageCharacter')),
('merit', models.ForeignKey(to='wod_rules.Merit')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MageSkill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('dots', models.IntegerField(verbose_name='dots', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')])),
('character', models.ForeignKey(related_name='skills', to='characters.MageCharacter')),
('skill', models.ForeignKey(to='wod_rules.Skill')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MageVice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('character', models.ForeignKey(to='characters.MageCharacter')),
('virtue', models.ForeignKey(to='wod_rules.Vice')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MageVirtue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('character', models.ForeignKey(to='characters.MageCharacter')),
('virtue', models.ForeignKey(to='wod_rules.Virtue')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='arcanum',
name='cost',
field=models.IntegerField(default=1, verbose_name='Cost to purchase'),
preserve_default=False,
),
migrations.AddField(
model_name='arcanum',
name='cost_inferior',
field=models.IntegerField(default=8, verbose_name='Cost to purchase if inferior'),
preserve_default=False,
),
migrations.AddField(
model_name='arcanum',
name='cost_ruling',
field=models.IntegerField(default=6, verbose_name='Cost to purchase if ruling'),
preserve_default=False,
),
migrations.AddField(
model_name='arcanum',
name='unavailable',
field=models.BooleanField(default=False, verbose_name='unavailable'),
preserve_default=True,
),
migrations.AlterField(
model_name='characterarcanum',
name='dots',
field=models.IntegerField(verbose_name='dots', choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
]
| StarcoderdataPython |
11224388 | <filename>exemplo_de_test/test_basico.py
def test_one_plus_one_is_two():
assert 1 + 1 == 2
def test_negative_1_plus_is_3():
assert 1 + 1 == 3
| StarcoderdataPython |
79223 | # encoding: utf-8
"""
@author: yp
@software: PyCharm
@file: GoodsManageDemo.py
@time: 2019/8/1 0001 16:43
"""
from AutoTestPlatform.web.WebDriver import Driver
driver = Driver()
#登录Dbshop
driver.get("http://192.168.1.16/DBshop/admin")
driver.find_element_by_id_data("user_name", 'admin')
driver.find_element_by_id_data("user_passwd", "<PASSWORD>")
driver.find_element_by_xpath('//*[@id="admin_login_form"]/button').click()
#--------------------------------------------------------------------------------------------
#进入商品管理,添加商品
driver.find_element_by_xpath('/html/body/div[1]/div/ul[1]/li[3]/a').click()
driver.find_element_by_xpath('/html/body/div[1]/div/ul[1]/li[3]/ul/li[1]/a').click()
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/p[2]/a[1]').click()
#商品基本信息
driver.find_element_by_id_data('goods_name',"ipad ")
driver.find_element_by_id_data('goods_extend_name',"mini5")
driver.find_element_by_id_data('goods_item',"0551")
driver.find_element_by_id_data('goods_price',"6000")
driver.find_element_by_id_data('goods_shop_price',"5999")
driver.find_element_by_xpath('//*[@id="goods_a"]/div[2]/div[7]/div/table/tbody/tr/td[2]/input').send_keys("5899")
driver.find_element_by_id_data("virtual_sales","1000")
driver.find_element_by_id_data("goods_weight","15")
driver.switch_to_iframe(driver.find_element_by_id("ueditor_0"))
driver.find_element_by_xpath('/html/body').send_keys("最实用的ipad,你值得拥有")
driver.switch_to_parent_handle()
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button').click()
#对商品进行分类
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/table/tbody/tr[2]/td[9]/a[1]').click()
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[3]/a').click()
driver.find_element_by_id('class_id_14').click()
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#goods库存
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[5]/a').click()
driver.find_element_by_id_data('goods_stock','1000000')
driver.find_element_by_id_data('goods_out_of_stock_set','250')
driver.find_element_by_id_data('goods_cart_buy_min_num','1')
driver.find_element_by_id_data('goods_cart_buy_max_num','99')
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#优惠价格
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[6]/a').click()
driver.find_element_by_id_data('goods_preferential_price',"4999")
driver.find_element_by_id_data('goods_preferential_start_time',"2019-08-05 14:25")
driver.find_element_by_id_data('goods_preferential_end_time',"2019-08-09 14:25")
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#销售规格
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[7]/a').click()
driver.find_element_by_id('ff0000').click()
driver.find_element_by_id('other1').click()
driver.find_element_by_id_data('price_ff0000other1',"6000")
driver.find_element_by_id_data('stock_ff0000other1','100')
driver.find_element_by_id_data('item_ff0000other1','0551-001')
driver.find_element_by_id_data('weight_ff0000other1',"15")
driver.find_element_by_xpath('//*[@id="select_goods_color_size_in"]/tbody/tr/td[8]/table/tbody/tr/td[2]/input').send_keys('2999')
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#商品属性
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[8]/a').click()
driver.find_element_by_id('attribute_group_id').click()
driver.find_element_by_xpath('//*[@id="attribute_group_id"]/option[2]').click()
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#商品标签
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[9]/a').click()
driver.find_element_by_xpath('//*[@id="goods_l"]/div[2]/div[2]/div/label[1]/input').click()
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#商品自定义
driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[10]/a').click()
driver.find_element_by_xpath('//*[@id="goods_f"]/div[2]/div[2]/label/input').send_keys('蔡徐坤!!!必备')
driver.find_element_by_xpath('//*[@id="goods_f"]/div[2]/div[2]/div/input').send_keys("你其实不止是会'唱跳rap打篮球',ipad给你带来新世界的one piece~")
driver.find_element_by_xpath('//*[@id="goods_f"]/div[2]/div[2]/div/label/input').click()
driver.find_element_by_xpath('//*[@id="goods_f"]/div[2]/div[3]/label/input').send_keys("22世纪的大佬们!!!")
driver.find_element_by_xpath('//*[@id="goods_f"]/div[2]/div[3]/div/input').send_keys("大佬无处不在,因为这是22世纪,拥有ipad,你离大佬只是一步之遥")
driver.find_element_by_xpath('//*[@id="goods_f"]/div[2]/div[3]/div/label/input').click()
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
# #关联商品
#
# driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[11]/a').click()
# driver.find_element_by_xpath('//*[@id="relationgoods_id"]').execute_script('type="visable"')
# sleep(2)
# driver.find_element_by_id_data('relationgoods_id','2')
# # d = dr.find_element_by_xpath('//*[@id="mainImgclass"]/div[2]/input')
# # dr.execute_script('arguments[0].removeAttribute(\"style\")', d)
# # driver.find_element_by_id('relationgoods_id').set_element_visable('visable')
# # driver.find_element_by_id_data('relationgoods_id',"2")
# # driver.find_element_by_xpath('//*[@id="relation_goods_keyword"]').send_keys('索尼').key_down(Keys.ENTER)
# # driver.find_element_by_id_data('relationgoods_id','2').set_element_visable("type='visable'")
# driver.find_element_by_xpath('//*[@id="goods_n"]/div[2]/div[2]/button').click()
# driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
# #相关商品
# driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[12]/a').click()
# driver.find_element_by_id_data('related_goods_keyword','苹果(Apple) iPhone X 64GB 深空灰色 移动联通电信全网通4G手机')
# driver.find_element_by_xpath('//*[@id="goods_e"]/div[2]/div[2]/button').click()
# driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#
# #组合商品
# driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[13]/a').click()
# driver.find_element_by_id_data('combination_goods_keyword','苹果(Apple) iPhone X 64GB 深空灰色 移动联通电信全网通4G手机')
# driver.find_element_by_xpath('//*[@id="goods_m"]/div[2]/div[2]/button').click()
# driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#
#
#
# #商品评价
# driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/div/ul/li[14]/a').click()
#
# driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[1]').click()
#
#
#保存商品
driver.find_element_by_xpath('//*[@id="sticky_navigation_right"]/button[2]').click()
#--------------------------------------------------------------------------------------
#商品分类
driver.find_element_by_xpath('/html/body/div[1]/div/ul[1]/li[3]/a').click()
driver.find_element_by_xpath('/html/body/div[1]/div/ul[1]/li[3]/ul/li[2]/a')
driver.find_element_by_xpath('/html/body/div[1]/div/ul[1]/li[3]/ul/li[2]/ul/li[2]/a').click()
#添加侧边信息
driver.find_element_by_xpath('//*[@id="sticky_navigation"]/p[2]/a[1]').click()
driver.find_element_by_id_data('frontside_name','我是你得不到的baba')
driver.find_element_by_id_data('frontside_url','https://ask.csdn.net/questions/664268')
driver.find_element_by_xpath('//*[@id="frontside_class_id"]/option[10]').click()
driver.find_element_by_xpath('//*[@id="sticky_navigation"]/div[2]/button').click()
#-----------------------------------------------------------------------------------------
#退出登录
driver.find_element_by_xpath('/html/body/div[2]/div/div[1]/p[2]/a[2]').click()
driver.close()
| StarcoderdataPython |
103735 | import h5py
import os
import re
class H5Explorer(object):
def __init__(self, h5_obj):
self.__filename = h5_obj.file.filename
self.__file = h5_obj
self.__working_dir = h5_obj.name
self.__dir_queue = list()
@staticmethod
def from_file(filename, mode="r"):
filename = os.path.expanduser(filename)
h5_file = h5py.File(filename, mode)
return H5Explorer(h5_file)
def get_absolute_path(self, path):
if path is None:
path = self.__working_dir
elif not path.startswith("/"):
path = "/".join(self.__working_dir.split("/") + path.split("/"))
return re.sub("/+", "/", os.path.normpath(path))
def __check_group(self, path):
path = self.get_absolute_path(path)
if path not in self.__file:
raise ValueError("Directory {} does not exist.".format(path))
elif not isinstance(self.__file[path], h5py.Group):
raise ValueError("{} exists, but is not a directory".format(path))
else:
return path
def __check_dataset(self, path):
path = self.get_absolute_path(path)
if path not in self.__file:
raise ValueError("Dataset {} does not exist.".format(path))
elif not isinstance(self.__file[path], h5py.Dataset):
raise ValueError("{} exists, but is not a dataset".format(path))
return path
@property
def working_dir(self):
return self.__working_dir
@property
def filename(self):
return self.__filename
def change_dir(self, new_dir="/"):
new_dir = self.__check_group(new_dir)
self.__working_dir = new_dir
@property
def raw(self):
return self.__file
@property
def datasets(self):
target_dir = self.__check_group(None)
return [k for k, v in self.__file[target_dir].items()
if isinstance(v, h5py.Dataset)]
@property
def groups(self):
target_dir = self.__check_group(None)
return [k for k, v in self.__file[target_dir].items()
if isinstance(v, h5py.Group)]
def __getitem__(self, path):
result = self.__file[self.get_absolute_path(path)]
if isinstance(result, h5py.Dataset):
return result
return H5Explorer(result)
def __delitem__(self, path):
del self.__file[self.get_absolute_path(path)]
def close(self):
self.raw.close()
def dataset(self, path):
return self.__file[self.__check_dataset(path)]
def group(self, path):
return self.__file[self.__check_group(path)]
| StarcoderdataPython |
8177221 | <filename>String/696. Count Binary Substrings.py
"""
696. Count Binary Substrings
Example 1:
Input: "00110011"
Output: 6
Explanation: There are 6 substrings that have equal number of consecutive 1's and 0's: "0011", "01", "1100", "10", "0011", and "01".
Notice that some of these substrings repeat and are counted the number of times they occur.
Also, "00110011" is not a valid substring because all the 0's (and 1's) are not grouped together.
Example 2:
Input: "10101"
Output: 4
Explanation: There are 4 substrings: "10", "01", "10", "01" that
"""
class Solution:
def countBinarySubstrings(self, s):
"""
:type s: str
:rtype: int
"""
i = res = 0
while i < len(s):
j = 0
while i+j < len(s) and i - j - 1>=0 and s[i-1] != s[i] and s[i+j] == s[i] and s[i-1-j] == s[i-1]:
j+=1
res += j
i += j if j> 0 else 1
return res
class Solution:
def countBinarySubstrings(self, s):
"""
:type s: str
:rtype: int
"""
res = prev = 0
i = cur = 1
for i in range(1,len(s)):
if s[i]!=s[i-1]:
res += min(prev, cur)
prev, cur = cur, 1
else:
cur += 1
res += min(prev, cur)
return res
class Solution:
def countBinarySubstrings(self, s):
s =[*map(len, s.replace('01','0 1').replace('10', '1 0').split())]
return sum(min(a,b) for a, b in zip(s,s[1:]))
class Solution:
def countBinarySubstrings(self, s):
s =[*map(len, re.findall('0+|1+',s))]
return sum(map(min, s[:-1], s[1:])) | StarcoderdataPython |
4821908 | <reponame>alexgaskell10/encoded_kge<gh_stars>100-1000
from kge.job import EvaluationJob, Job
class EntityPairRankingJob(EvaluationJob):
""" Entity-pair ranking evaluation protocol """
def __init__(self, config, dataset, parent_job, model):
super().__init__(config, dataset, parent_job, model)
if self.__class__ == EntityPairRankingJob:
for f in Job.job_created_hooks:
f(self)
| StarcoderdataPython |
6580791 | <filename>pyapprox/l1_minimization.py
import numpy as np
import scipy.sparse as sp
from functools import partial
from scipy.optimize import minimize, LinearConstraint, NonlinearConstraint, BFGS, \
linprog, OptimizeResult, Bounds
try:
from ipopt import minimize_ipopt
has_ipopt = True
except:
has_ipopt = False
# print('has_ipopt',has_ipopt)
def get_method(options):
method = options.get('method', 'slsqp')
if method == 'ipopt' and not has_ipopt:
msg = 'ipopt not avaiable using default slsqp'
method = 'slsqp'
return method
def basis_pursuit(Amat, bvec, options):
nunknowns = Amat.shape[1]
nslack_variables = nunknowns
c = np.zeros(nunknowns+nslack_variables)
c[nunknowns:] = 1.0
I = sp.identity(nunknowns)
tmp = np.array([[1, -1], [-1, -1]])
A_ub = sp.kron(tmp, I)
b_ub = np.zeros(nunknowns+nslack_variables)
A_eq = sp.lil_matrix((Amat.shape[0], c.shape[0]), dtype=float)
A_eq[:, :Amat.shape[1]] = Amat
b_eq = bvec
bounds = [(-np.inf, np.inf)]*nunknowns + [(0, np.inf)]*nslack_variables
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq,
b_eq=b_eq, bounds=bounds, options=options)
assert res.success, res
return res.x[:nunknowns]
def nonlinear_basis_pursuit(func, func_jac, func_hess, init_guess, options, eps=0, return_full=False):
nunknowns = init_guess.shape[0]
nslack_variables = nunknowns
def obj(x):
val = np.sum(x[nunknowns:])
grad = np.zeros(x.shape[0])
grad[nunknowns:] = 1.0
return val, grad
def hessp(x, p):
matvec = np.zeros(x.shape[0])
return matvec
I = sp.identity(nunknowns)
tmp = np.array([[1, -1], [-1, -1]])
A_con = sp.kron(tmp, I)
# A_con = A_con.A#dense
lb_con = -np.inf*np.ones(nunknowns+nslack_variables)
ub_con = np.zeros(nunknowns+nslack_variables)
# print(A_con.A)
linear_constraint = LinearConstraint(
A_con, lb_con, ub_con, keep_feasible=False)
constraints = [linear_constraint]
def constraint_obj(x):
val = func(x[:nunknowns])
if func_jac == True:
return val[0]
return val
def constraint_jac(x):
if func_jac == True:
jac = func(x[:nunknowns])[1]
else:
jac = func_jac(x[:nunknowns])
if jac.ndim == 1:
jac = jac[np.newaxis, :]
jac = sp.hstack(
[jac, sp.csr_matrix((jac.shape[0], jac.shape[1]), dtype=float)])
jac = sp.csr_matrix(jac)
return jac
if func_hess is not None:
def constraint_hessian(x, v):
# see https://prog.world/scipy-conditions-optimization/
# for example how to define NonlinearConstraint hess
H = func_hess(x[:nunknowns])
hess = sp.lil_matrix((x.shape[0], x.shape[0]), dtype=float)
hess[:nunknowns, :nunknowns] = H*v[0]
return hess
else:
constraint_hessian = BFGS()
# experimental parameter. does not enforce interpolation but allows some
# deviation
nonlinear_constraint = NonlinearConstraint(
constraint_obj, 0, eps, jac=constraint_jac, hess=constraint_hessian,
keep_feasible=False)
constraints.append(nonlinear_constraint)
lbs = np.zeros(nunknowns+nslack_variables)
lbs[:nunknowns] = -np.inf
ubs = np.inf*np.ones(nunknowns+nslack_variables)
bounds = Bounds(lbs, ubs)
x0 = np.concatenate([init_guess, np.absolute(init_guess)])
method = get_method(options)
#method = options.get('method','slsqp')
if 'method' in options:
del options['method']
if method != 'ipopt':
res = minimize(
obj, x0, method=method, jac=True, hessp=hessp, options=options,
bounds=bounds, constraints=constraints)
else:
from ipopt import minimize_ipopt
from scipy.optimize._constraints import new_constraint_to_old
con = new_constraint_to_old(constraints[0], x0)
ipopt_bounds = []
for ii in range(len(bounds.lb)):
ipopt_bounds.append([bounds.lb[ii], bounds.ub[ii]])
res = minimize_ipopt(
obj, x0, method=method, jac=True, options=options,
constraints=con, bounds=ipopt_bounds)
if return_full:
return res.x[:nunknowns], res
else:
return res.x[:nunknowns]
def kouri_smooth_absolute_value(t, r, x):
vals = np.zeros(x.shape[0])
z = r*x+t
I = np.where(z < -1)[0]
vals[I] = -1/r*(z[I] + 0.5 + 0.5 * t[I]**2)
J = np.where((-1 <= z) & (z <= 1))[0]
vals[J] = t[J] * x[J] + 0.5*r * x[J]**2
K = np.where(1 < z)[0]
vals[K] = 1/r * (z[K] - 0.5 - 0.5 * t[K]**2)
return vals
def kouri_smooth_absolute_value_gradient(t, r, x):
z = r*x+t
grad = np.maximum(-1, np.minimum(1, z))
return grad
def kouri_smooth_absolute_value_hessian(t, r, x):
hess = np.zeros(x.shape[0])
z = r*x+t
J = np.where(np.absolute(z) <= 1)[0]
hess[J] = r
return hess
def kouri_smooth_l1_norm(t, r, x):
vals = kouri_smooth_absolute_value(t, r, x)
norm = vals.sum()
return norm
def kouri_smooth_l1_norm_gradient(t, r, x):
grad = kouri_smooth_absolute_value_gradient(t, r, x)
return grad
def kouri_smooth_l1_norm_hessian(t, r, x):
hess = kouri_smooth_absolute_value_hessian(t, r, x)
hess = np.diag(hess)
return hess
def kouri_smooth_l1_norm_hessp(t, r, x, v):
hess = kouri_smooth_absolute_value_hessian(t, r, x)
return hess*v[0]
def basis_pursuit_denoising(func, func_jac, func_hess, init_guess, eps, options):
t = np.zeros_like(init_guess)
method = get_method(options)
nunknowns = init_guess.shape[0]
def constraint_obj(x):
val = func(x)
if func_jac == True:
return val[0]
return val
def constraint_jac(x):
if func_jac == True:
jac = func(x)[1]
else:
jac = func_jac(x)
return jac
# if func_hess is None:
# constraint_hessian = BFGS()
# else:
# def constraint_hessian(x,v):
# H = func_hess(x)
# return H*v.sum()
constraint_hessian = BFGS()
nonlinear_constraint = NonlinearConstraint(
constraint_obj, 0, eps**2, jac=constraint_jac, hess=constraint_hessian,
keep_feasible=False)
constraints = [nonlinear_constraint]
# Maximum Number Outer Iterations
maxiter = options.get('maxiter', 100)
# Maximum Number Outer Iterations
maxiter_inner = options.get('maxiter_inner', 1000)
# Desired Dual Tolerance
ttol = options.get('dualtol', 1e-6)
# Verbosity Level
verbose = options.get('verbose', 1)
# Initial Penalty Parameter
r = options.get('r0', 1)
# Max Penalty Parameter
rmax = options.get('rmax', 1e6)
# Optimization Tolerance Update Factor
tfac = options.get('tfac', 1e-1)
# Penalty Parameter Update Factor
rfac = options.get('rfac', 2)
# Desired Feasibility Tolerance
ctol = options.get('ctol', 1e-8)
# Desired Optimality Tolerance
gtol = options.get('gtol', 1e-8)
# Initial Dual Tolerance
ttol0 = options.get('ttol0', 1)
# Initial Feasiblity Tolerance
ctol0 = options.get('ctol0', 1e-2)
# Initial Optimality Tolerance
gtol0 = options.get('gtol0', 1e-2)
# Tolerance for termination for change in objective
ftol = options.get('ftol', 1e-8)
niter = 0
x0 = init_guess
f0 = np.inf
nfev, njev, nhev = 0, 0, 0
constr_nfev, constr_njev, constr_nhev = 0, 0, 0
while True:
obj = partial(kouri_smooth_l1_norm, t, r)
jac = partial(kouri_smooth_l1_norm_gradient, t, r)
#hessp = partial(kouri_smooth_l1_norm_hessp,t,r)
hessp = None
if method == 'slsqp':
options0 = {'ftol': gtol0, 'verbose': max(0, verbose-2),
'maxiter': maxiter_inner, 'disp': (verbose > 2)}
elif method == 'trust-constr':
options0 = {'gtol': gtol0, 'tol': gtol0, 'verbose': max(0, verbose-2),
'barrier_tol': ctol, 'maxiter': maxiter_inner,
'disp': (verbose > 2)}
elif method == 'cobyla':
options0 = {'tol': gtol0, 'verbose': max(0, verbose-2),
'maxiter': maxiter_inner, 'rhoend': gtol0, 'rhobeg': 1,
'disp': (verbose > 2), 'catol': ctol0}
if method != 'ipopt':
# init_guess=x0
res = minimize(
obj, init_guess, method=method, jac=jac, hessp=hessp,
options=options0, constraints=constraints)
else:
from ipopt import minimize_ipopt
options0 = {'tol': gtol0, 'print_level': max(0, verbose-1),
'maxiter': int(maxiter_inner),
'acceptable_constr_viol_tol': ctol0,
'derivative_test': 'first-order',
'nlp_scaling_constr_target_gradient': 1.}
from scipy.optimize._constraints import new_constraint_to_old
con = new_constraint_to_old(constraints[0], init_guess)
res = minimize_ipopt(
obj, init_guess, method=method, jac=jac, hessp=hessp,
options=options0, constraints=con)
#assert res.success, res
if method == 'trust-constr':
assert res.status == 1 or res.status == 2
elif method == 'slsqp':
assert res.status == 0
assert res.success == True
fdiff = np.linalg.norm(f0-res.fun)
xdiff = np.linalg.norm(x0-res.x)
t0 = t.copy()
t = np.maximum(-1, np.minimum(1, t0 + r*res.x))
tdiff = np.linalg.norm(t0-t)
niter += 1
x0 = res.x.copy()
f0 = res.fun
nfev += res.nfev
if hasattr(res, 'njev'):
njev += res.njev
if hasattr(res, 'nhev'):
nhev += res.nhev
if verbose > 1:
#print(' i = %d tdiff = %11.10e r = %11.10e ttol = %3.2e ctol = %3.2e gtol = %3.2e iter = %d'%(niter,tdiff,r,ttol0,ctol0,gtol0,0))
print(' i = %d tdiff = %11.10e fdiff = %11.10e xdiff = %11.10e r = %11.10e ttol = %3.2e gtol = %3.2e nfev = %d' % (
niter, tdiff, fdiff, xdiff, r, ttol0, gtol0, nfev))
if tdiff < ttol:
msg = f'ttol {ttol} reached'
status = 0
# break
if fdiff < ftol:
msg = f'ftol {ftol} reached'
status = 0
break
if niter >= maxiter:
msg = f'maxiter {maxiter} reached'
status = 1
break
if tdiff > ttol0:
r = min(r*2, rmax)
ttol0, gtol0 = max(tfac*ttol0, ttol), max(tfac*gtol0, gtol)
ctol0 = max(tfac*ctol0, ctol)
# constr_nfev only for trust-constr
# constr_nfev+=res.constr_nfev[0];constr_njev+=res.constr_njev[0];constr_nhev+=res.constr_nhev[0]
if verbose > 0:
print(msg)
# constr_nfev=constr_nfev,constr_njev=constr_njev)
res = OptimizeResult(fun=res.fun, x=res.x, nit=niter,
msg=msg, nfev=nfev, njev=njev, status=status)
return res
def lasso(func, func_jac, func_hess, init_guess, lamda, options):
nunknowns = init_guess.shape[0]
nslack_variables = nunknowns
def obj(lamda, x):
vals = func(x[:nunknowns])
if func_jac == True:
grad = vals[1]
vals = vals[0]
else:
grad = func_jac(x[:nunknowns])
vals += lamda*np.sum(x[nunknowns:])
grad = np.concatenate([grad, lamda*np.ones(nslack_variables)])
return vals, grad
def hess(x):
H = sp.lil_matrix((x.shape[0], x.shape[0]), dtype=float)
H[:nunknowns, :nunknowns] = func_hess(x[:nunknowns])
return H
if func_hess is None:
hess = None
I = sp.identity(nunknowns)
tmp = np.array([[1, -1], [-1, -1]])
A_con = sp.kron(tmp, I)
lb_con = -np.inf*np.ones(nunknowns+nslack_variables)
ub_con = np.zeros(nunknowns+nslack_variables)
linear_constraint = LinearConstraint(
A_con, lb_con, ub_con, keep_feasible=False)
constraints = [linear_constraint]
# print(A_con.A)
lbs = np.zeros(nunknowns+nslack_variables)
lbs[:nunknowns] = -np.inf
ubs = np.inf*np.ones(nunknowns+nslack_variables)
bounds = Bounds(lbs, ubs)
x0 = np.concatenate([init_guess, np.absolute(init_guess)])
method = get_method(options)
#method = options.get('method','slsqp')
if 'method' in options:
del options['method']
if method != 'ipopt':
res = minimize(
partial(obj, lamda), x0, method=method, jac=True, hess=hess, options=options,
bounds=bounds, constraints=constraints)
else:
#jac_structure_old = lambda : np.nonzero(np.tile(np.eye(nunknowns), (2, 2)))
def jac_structure():
rows = np.repeat(np.arange(2*nunknowns), 2)
cols = np.empty_like(rows)
cols[::2] = np.hstack([np.arange(nunknowns)]*2)
cols[1::2] = np.hstack([np.arange(nunknowns, 2*nunknowns)]*2)
return rows, cols
#assert np.allclose(jac_structure()[0],jac_structure_old()[0])
#assert np.allclose(jac_structure()[1],jac_structure_old()[1])
# jac_structure=None
def hess_structure():
h = np.zeros((2*nunknowns, 2*nunknowns))
h[:nunknowns, :nunknowns] = np.tril(
np.ones((nunknowns, nunknowns)))
return np.nonzero(h)
if hess is None:
hess_structure = None
from ipopt import minimize_ipopt
from scipy.optimize._constraints import new_constraint_to_old
con = new_constraint_to_old(constraints[0], x0)
res = minimize_ipopt(
partial(obj, lamda), x0, method=method, jac=True, options=options,
constraints=con, jac_structure=jac_structure, hess_structure=hess_structure, hess=hess)
return res.x[:nunknowns], res
| StarcoderdataPython |
5109521 | """Example of use of the CodonOptimize specification."""
from dnachisel import (DnaOptimizationProblem, random_protein_sequence,
CodonOptimize, reverse_translate, EnforceTranslation)
protein = random_protein_sequence(3000, seed=123)
sequence = reverse_translate(protein)
problem = DnaOptimizationProblem(
sequence=sequence,
constraints=[EnforceTranslation()],
objectives=[CodonOptimize('e_coli')])
print ("\nBefore optimization:\n")
print (problem.objectives_text_summary())
problem.optimize()
print ("\nAfter optimization:\n")
print (problem.objectives_text_summary())
| StarcoderdataPython |
8173099 | # Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
from PySide2 import QtCore
import maya.standalone
maya.standalone.initialize(name='python')
import maya.cmds as mc
import collections
import logging
import json
import sys
import os
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.INFO,
format='%(name)s - %(levelname)s - %(message)s',
datefmt='%m-%d %H:%M',
filename='output.log',
filemode='w')
class MayaMaterials(QtCore.QObject):
def __init__(self, files_list, materials_count, parent=None):
super(MayaMaterials, self).__init__(parent)
self.files_list = files_list
self.current_scene = None
self.materials_dictionary = {}
self.materials_count = int(materials_count)
self.get_material_information()
def get_material_information(self):
"""
Main entry point for the material information extraction. Because this class is run
in Standalone mode as a subprocess, the list is passed as a string- some parsing/measures
need to be taken in order to separate values that originated as a list before passed.
:return: A dictionary of all of the materials gathered. Sent back to main UI through stdout
"""
for target_file in file_list:
self.current_scene = os.path.abspath(target_file.replace('\'', ''))
mc.file(self.current_scene, open=True, force=True)
self.set_material_descriptions()
json.dump(self.materials_dictionary, sys.stdout)
@staticmethod
def get_materials(target_mesh):
"""
Gathers a list of all materials attached to each mesh's shader
:param target_mesh: The target mesh to pull attached material information from.
:return: List of unique material values attached to the mesh passed as an argument.
"""
shading_group = mc.listConnections(target_mesh, type='shadingEngine')
materials = mc.ls(mc.listConnections(shading_group), materials=1)
return list(set(materials))
@staticmethod
def get_shader(material_name):
"""
Convenience function for obtaining the shader that the specified material (as an argument)
is attached to.
:param material_name: Takes the material name as an argument to get associated shader object
:return:
"""
connections = mc.listConnections(material_name, type='shadingEngine')[0]
shader_name = '{}.surfaceShader'.format(connections)
shader = mc.listConnections(shader_name)[0]
return shader
def get_shader_information(self, shader, material_mesh):
"""
Helper function for extracting shader/material attributes used to form the DCC specific dictionary
of found material values for conversion.
:param shader: The target shader object to analyze
:param material_mesh: The material mesh needs to be passed to search for textures attached to it.
:return: Complete set (in the form of two dictionaries) of file connections and material attribute values
"""
shader_file_connections = {}
materials = self.get_materials(material_mesh)
for material in materials:
material_files = [x for x in mc.listConnections(material, plugs=1, source=1) if x.startswith('file')]
for file_name in material_files:
file_texture = mc.getAttr('{}.fileTextureName'.format(file_name.split('.')[0]))
if os.path.basename(file_texture).split('.')[-1] != 'dds':
key_name = mc.listConnections(file_name, plugs=1, source=1)[0]
shader_file_connections[key_name] = file_texture
shader_attributes = {}
for shader_attribute in mc.listAttr(shader, s=True, iu=True):
try:
shader_attributes[str(shader_attribute)] = str(mc.getAttr('{}.{}'.format(shader, shader_attribute)))
except Exception as e:
logging.error('MayaAttributeError: {}'.format(e))
return shader_file_connections, shader_attributes
def set_material_dictionary(self, material_name, material_type, material_mesh):
"""
When a unique material has been found, this creates a dictionary entry with all relevant material values. This
includes material attributes as well as attached file textures. Later in the process this information is
leveraged when creating the Lumberyard material definition.
:param material_name: The name attached to the material
:param material_type: Specific type of material (Arnold, Stingray, etc.)
:param material_mesh: Mesh that the material is applied to
:return:
"""
self.materials_count += 1
shader = self.get_shader(material_name)
shader_file_connections, shader_attributes = self.get_shader_information(shader, material_mesh)
material_dictionary = collections.OrderedDict(MaterialName=material_name, MaterialType=material_type,
DccApplication='Maya', AppliedMesh=material_mesh,
FileConnections=shader_file_connections,
SceneName=str(self.current_scene),
MaterialAttributes=shader_attributes)
material_name = 'Material_{}'.format(self.materials_count)
self.materials_dictionary[material_name] = material_dictionary
logging.info('\n\n:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n'
'MATERIAL DEFINITION: {} \n'
':::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n{}'.format(
self.materials_dictionary[material_name]['MaterialType'],
json.dumps(self.materials_dictionary[material_name], indent=4)))
def set_material_descriptions(self):
"""
This function serves as the clearinghouse for all analyzed materials passing through the system.
It will determine whether or not the found material has already been processed, or if it needs to
be added to the final material dictionary. In the event that an encountered material has already
been processed, this function creates a register of all meshes it is applied to in the 'AppliedMesh'
attribute.
:return:
"""
scene_geo = mc.ls(v=True, geometry=True)
for target_mesh in scene_geo:
material_list = self.get_materials(target_mesh)
for material_name in material_list:
material_type = mc.nodeType(material_name)
if material_type != 'lambert':
material_listed = [x for x in self.materials_dictionary
if self.materials_dictionary[x]['MaterialName'] == material_name]
if not material_listed:
self.set_material_dictionary(str(material_name), str(material_type), str(target_mesh))
else:
mesh_list = self.materials_dictionary[material_name].get('AppliedMesh')
if not isinstance(mesh_list, list):
self.materials_dictionary[str(material_name)]['AppliedMesh'] = [mesh_list, target_mesh]
else:
mesh_list.append(target_mesh)
# ++++++++++++++++++++++++++++++++++++++++++++++++#
# Maya Specific Shader Mapping #
# ++++++++++++++++++++++++++++++++++++++++++++++++#
file_list = sys.argv[1:-1]
count = sys.argv[-1]
instance = MayaMaterials(file_list, count)
| StarcoderdataPython |
6410135 | from tool.runners.python import SubmissionPy
from collections import defaultdict
from functools import lru_cache
from textwrap import wrap
class BadouralixSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
# Before each run we need to flush the cache to avoid side effects from previous runs
self.match.cache_clear()
# We need a class attribute here to pass it to the match method and still benefit from the cache
self.rules = defaultdict(list)
result = 0
metadata, words = s.split("\n\n")
for rule in metadata.split("\n"):
rid, rlists = rule.split(": ")
for rlist in rlists.split(" | "):
self.rules[rid].append(rlist.replace('"', "").split(" "))
# Now we cannot really exhaustively enumerate all accepted words
# Nor can we build a finite automaton to recognize the language
# We have 0 = 8 11 = (42 | 42 8) (42 31 | 42 11 31) = 42+ 42...42 31...31 ( same number of 42 and 31 in the second part )
# Accepted words look like "m words recognized by 42 then n words recognized by 31, where m is strictly greater than n"
# Lucky us, all words recognized by 42 or 31 are exactly 8 character long
# At this point we do not try to be fancy with a pushdown automaton, we just try all possible values for n
for word in words.split("\n"):
subwords = wrap(word, 8)
for n in range(1, (len(subwords) - 1) // 2 + 1):
if all(
self.match(subword, "42") == len(subword)
for subword in subwords[:-n]
) and all(
self.match(subword, "31") == len(subword)
for subword in subwords[-n:]
):
result += 1
break
return result
@lru_cache(maxsize=None)
def match(self, word, rid):
if len(word) == 0:
return -1
elif self.rules[rid] == [["a"]] or self.rules[rid] == [["b"]]:
if word[0] == self.rules[rid][0][0]:
return 1
else:
return -1
else:
for subrule in self.rules[rid]:
position = 0
for step in subrule:
progress = self.match(word[position:], step)
if progress == -1:
break
else:
position += progress
else:
# Here we assume that the first match is the only match
return position
return -1
| StarcoderdataPython |
1768214 | from KnapsackProblem.KnapsackProblem import KnapsackProblem
if __name__ == '__main__':
print("------------- Knapsack Informations -------------")
max_weight = int(input("Select max weight of knapsack: "))
length_items = int(input("Select max amount of items in the knapsack: "))
max_weight_items = int(input("Select the max weight of the items in the knapsack: "))
max_price_items = int(input("Select the maximum value of the items in the knapsack: "))
lenght_population = int(input("Select population size: "))
lenght_generations = int(input("Select the number of generations: "))
knapsack = KnapsackProblem(max_weight, length_items, max_weight_items, max_price_items, lenght_population, lenght_generations)
knapsack.solveProblem | StarcoderdataPython |
5069740 | <gh_stars>1-10
import os
import pwd
import grp
from collections import OrderedDict
from envy.lib.docker_manager import ContainerManager
from envy.lib.io import StepPrinter
from envy.lib.config import ENVY_CONFIG
import envy.lib.triggers as triggers
from .script_setup_step import ScriptSetupStep
from .remote_setup_step import RemoteSetupStep
from .apt_package_manager_step import AptPackageManagerStep
class Builder:
""" Runs triggered build steps on a container
"""
def __init__(self, container: ContainerManager, printer: StepPrinter):
self.container = container
self.printer = printer
self.steps = OrderedDict()
self.system_package_step = None
def build(self):
# Create system packages step
self.__create_system_packages_step()
# Create initial-setup step
self.__create_initial_setup_steps()
# Create steps
self.__create_steps()
# Run triggered steps
self.__run_triggered()
# Persist triggers
self.__persist_triggers()
def __create_system_packages_step(self):
self.system_package_step = AptPackageManagerStep(
self.container, ENVY_CONFIG.get_system_packages()
)
self.steps[self.system_package_step.name] = self.system_package_step
def __create_initial_setup_steps(self):
# Set up this user's username and groups inside the container.
uid = os.getuid()
gid = os.getgid()
groups = os.getgroups()
uname = pwd.getpwuid(uid).pw_name
group_info = [grp.getgrgid(group) for group in groups]
group_creation = [
"echo '{}:x:{}:{}' >> /etc/group".format(
group.gr_name, str(group.gr_gid), uname if group.gr_gid != gid else ""
)
for group in group_info
]
chmod_step = ScriptSetupStep(
"ENVY_chmod_root",
"Setting up home environment",
self.container,
[
"chmod a+wrx /root",
"chmod a+wrx /",
"echo '{}:x:{}:{}::/uhome:/bin/bash' >> /etc/passwd".format(
uname, str(uid), str(gid)
),
"mkdir /uhome",
"chown {}:{} /uhome".format(str(uid), str(gid)),
]
+ group_creation,
False,
)
self.steps[chmod_step.name] = chmod_step
def __create_steps(self):
for m in ENVY_CONFIG.get_setup_steps():
# Create step
name = m["name"]
label = m["label"]
if m["type"] == "script":
step = ScriptSetupStep(
name, label, self.container, m["run"], m["as_user"]
)
elif m["type"] == "remote":
step = RemoteSetupStep(name, label, self.container, m["url"])
# Create and register triggers
if m["triggers"] == "always":
trigger = triggers.TriggerAlways()
else:
trigger_list = []
for t in m["triggers"]["system-packages"]:
trigger_list.append(
triggers.TriggerSystemPackage(t, self.system_package_step)
)
for t in m["triggers"]["files"]:
trigger_list.append(triggers.TriggerWatchfile(t))
for t in m["triggers"]["steps"]:
trigger_list.append(triggers.TriggerStep(self.steps[t]))
trigger = triggers.TriggerGroup(trigger_list)
step.set_trigger(trigger)
# Add step to dict
self.steps[name] = step
def __run_triggered(self):
for step in self.steps.values():
if step.should_trigger():
self.printer.start_step(step.label)
step.run()
self.printer.end_step()
else:
self.printer.skip_step(step.label)
def __persist_triggers(self):
for step in self.steps.values():
if step.has_built():
step.persist_trigger()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.