id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3542819 | import StructPy.cross_sections as xs
import StructPy.structural_classes as sc
import StructPy.materials as ma
xs1 = xs.generalSection(A=30, Ix=700)
s1 = sc.Structure(xs1, ma.Steel(E=29000))
s1.addNode(0, 0)
s1.addNode(0, 144)
s1.addNode(144, 144)
s1.addMember(0, 1)
s1.addMember(1, 2)
m1 = s1.members[0]
stiff1 = m1.T.T * m1.kframe * m1.T
s1.plot()
xs1 = xs.AISC('W8X48').printProperties()
| StarcoderdataPython |
3238395 | <filename>nipy/io/nibcompat.py<gh_stars>0
""" Compatibility functions for older versions of nibabel
Nibabel <= 1.3.0 do not have these attributes:
* header
* affine
* dataobj
The equivalents for these older versions of nibabel are:
* obj.get_header()
* obj.get_affine()
* obj._data
With old nibabel, getting unscaled data used `read_img_data(img,
prefer="unscaled"). Newer nibabel should prefer the `get_unscaled` method on
the image proxy object
"""
import numpy as np
import nibabel as nib
def get_dataobj(img):
""" Return data object for nibabel image
Parameters
----------
img : ``SpatialImage`` instance
Instance of nibabel ``SpatialImage`` class
Returns
-------
dataobj : object
``ArrayProxy`` or ndarray object containing data for `img`
"""
try:
return img.dataobj
except AttributeError:
return img._data
def get_header(img):
""" Return header from nibabel image
Parameters
----------
img : ``SpatialImage`` instance
Instance of nibabel ``SpatialImage`` class
Returns
-------
header : object
header object from `img`
"""
try:
return img.header
except AttributeError:
return img.get_header()
def get_affine(img):
""" Return affine from nibabel image
Parameters
----------
img : ``SpatialImage`` instance
Instance of nibabel ``SpatialImage`` class
Returns
-------
affine : object
affine object from `img`
"""
try:
return img.affine
except AttributeError:
return img.get_affine()
def get_unscaled_data(img):
""" Get the data from a nibabel image, maybe without applying scaling
Parameters
----------
img : ``SpatialImage`` instance
Instance of nibabel ``SpatialImage`` class
Returns
-------
data : ndarray
Data as loaded from image, not applying scaling if this can be avoided
"""
if hasattr(nib.AnalyzeImage.ImageArrayProxy, 'get_unscaled'):
try:
return img.dataobj.get_unscaled()
except AttributeError:
return np.array(img.dataobj)
return nib.loadsave.read_img_data(img, prefer='unscaled')
| StarcoderdataPython |
9623710 | <filename>python/raydp/spark/tf/dataset.py
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
from typing import List, Union
import numpy as np
import pandas as pd
import tensorflow as tf
from raydp.spark.context import save_to_ray
from raydp.spark.resource_manager.spark_cluster import SharedDataset
from raydp.spark.utils import divide_blocks
class _Dataset:
def __init__(self,
feature_columns: List[str],
feature_types: List[tf.DType],
feature_shapes: List[tf.TensorShape],
label_column: str,
label_type: tf.DType,
label_shape: tf.TensorShape,
shuffle: bool):
self._feature_columns: List[str] = feature_columns
self._feature_types: List[tf.DType] = feature_types
self._feature_shapes: List[tf.TensorShape] = feature_shapes
self._label_column: str = label_column
self._label_type: tf.DType = label_type
self._label_shape: tf.TensorShape = label_shape
self._shuffle: bool = shuffle
self._resolved: bool = False
self._resolved_data_set: SharedDataset = None
self._check_and_convert()
def _check_and_convert(self):
# convert to list for convenience
if not isinstance(self._feature_columns, List):
self._feature_columns = [self._feature_columns]
if self._feature_shapes:
if not isinstance(self._feature_shapes, list):
self._feature_shapes = [self._feature_shapes]
assert len(self._feature_columns) == len(self._feature_shapes), \
"The feature_shapes size must match the feature_columns"
if self._feature_types:
if not isinstance(self._feature_types, list):
self._feature_types = [self._feature_types]
assert len(self._feature_columns) == len(self._feature_types), \
"The feature_types size must match the feature_columns"
for i in range(len(self._feature_types)):
assert all(isinstance(dtype, tf.DType) for dtype in self._feature_types), \
"All value in feature_types should be tf.DType instance"
if not self._feature_shapes:
self._feature_shapes = [tf.TensorShape(([]))] * len(self._feature_columns)
if not self._feature_types:
self._feature_types = [tf.float32] * len(self._feature_columns)
if not self._label_type:
self._label_type = tf.float32
if not self._label_shape:
self._label_shape = tf.TensorShape(([]))
def _create_dataset_from_pandas(self, df: pd.DataFrame) -> tf.data.Dataset:
tensors: List[tf.Tensor] = []
feature_shapes = [shape.as_list() for shape in self._feature_shapes]
[shape.insert(0, -1) for shape in feature_shapes]
label_shape = self._label_shape.as_list()
label_shape.insert(0, -1)
for col, tp, shape in zip(self._feature_columns,
self._feature_types,
feature_shapes):
col_t = tf.convert_to_tensor(df[col], dtype=tp)
col_t = tf.reshape(col_t, shape)
tensors.append(col_t)
label_tensor = tf.convert_to_tensor(df[self._label_column], self._label_type)
label_tensor = tf.reshape(label_tensor, label_shape)
return tf.data.Dataset.from_tensor_slices((tuple(tensors), label_tensor))
def setup(self, config) -> tf.data.Dataset:
pass
class PandasDataset(_Dataset):
def __init__(self,
df: pd.DataFrame,
feature_columns: List[str],
feature_types: List[tf.DType],
feature_shapes: List[tf.TensorShape],
label_column: str,
label_type: tf.DType,
label_shape: tf.TensorShape,
shuffle: bool):
super(PandasDataset, self).__init__(
feature_columns, feature_types, feature_shapes, label_column,
label_type, label_shape, shuffle)
self._df = df
def setup(self, config) -> tf.data.Dataset:
batch_size = config["batch_size"]
return self._create_dataset_from_pandas(self._df).batch(batch_size)
class RayDataset(_Dataset):
# TODO: currently, we do not support multiple outputs model
def __init__(self,
df: Union['pyspark.sql.DataFrame', 'koalas.DataFrame'],
feature_columns: List[str],
feature_types: List[tf.DType],
feature_shapes: List[tf.TensorShape],
label_column: str,
label_type: tf.DType,
label_shape: tf.TensorShape,
shuffle: bool):
"""
Transfer Spark DataFrame to Tensorflow Dataset
:param df: the Spark DataFrame or koalas DataFrame
:param feature_columns: the feature columns, also it is the Model input name
:param feature_types: the type requirements for the given Model input
:param feature_shapes: the shape requirements for the given Model input
:param label_column: the label column
:param label_type: the label type
:param label_shape: the label shape
:param shuffle: whether shuffle the data set
"""
super(RayDataset, self).__init__(
feature_columns, feature_types, feature_shapes, label_column,
label_type, label_shape, shuffle)
self._data_set: SharedDataset = save_to_ray(df)
def setup(self, config) -> tf.data.Dataset:
is_distributed: bool = False
if "TF_CONFIG" in os.environ:
is_distributed = True
if is_distributed:
dataset = self._setup_distributed_dataset()
else:
dataset = self._setup_single_node()
batch_size = config["batch_size"]
dataset = dataset.repeat().batch(batch_size)
return dataset
def _setup_single_node(self) -> tf.data.Dataset:
self._resolved_data_set = self._data_set
self._resolved_data_set.resolve()
self._resolved = True
datasets: List[tf.data.Dataset] = []
# we assume the SharedDataset is not the subset
partition_sizes = self._resolved_data_set.partition_sizes()
for i in range(len(partition_sizes)):
pdf = self._resolved_data_set[i]
dataset = self._create_dataset_from_pandas(pdf)
datasets.append(dataset)
assert len(datasets) > 0
# concat
result = datasets[0]
for i in range(1, len(datasets)):
result.concatenate(datasets[i])
if self._shuffle:
result = result.shuffle()
return result
def _setup_distributed_dataset(self) -> tf.data.Dataset:
tf_config = json.loads(os.environ["TF_CONFIG"])
world_size = len(tf_config["cluster"]["worker"])
world_rank = tf_config["task"]["index"]
blocks, block_sizes = divide_blocks(
self._data_set.partition_sizes(), world_size, world_rank, self._shuffle, False)
self._resolved_data_set: SharedDataset = self._data_set.subset(blocks)
self._resolved_data_set.resolve()
self._resolved = True
outer = self
def make_generator():
indexes = list(range(len(blocks)))
if outer._shuffle:
np.random.shuffle(indexes)
for i in indexes:
block_index = blocks[i]
pdf: pd.DataFrame = outer._data_set[block_index]
features = [pdf[col].values for col in outer._feature_columns]
label = pdf[outer._label_column].values
inner_indexes = list(range(block_sizes[i]))
if outer._shuffle:
np.random.shuffle(inner_indexes)
for j in inner_indexes:
results = [f[j] for f in features]
yield tuple(results), label[j]
output_shapes = self._feature_shapes.copy()
output_shapes = (tuple(output_shapes), self._label_shape)
output_types = self._feature_types.copy()
output_types = (tuple(output_types), self._label_type)
return tf.data.Dataset.from_generator(generator=make_generator,
output_types=output_types,
output_shapes=output_shapes)
| StarcoderdataPython |
6624878 | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Tests for tensorflow_datasets.core.features.text_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_datasets.core import features
from tensorflow_datasets.core import test_utils
from tensorflow_datasets.core.features.text import text_encoder
tf.compat.v1.enable_eager_execution()
class TextFeatureTest(test_utils.FeatureExpectationsTestCase):
def test_text(self):
nonunicode_text = 'hello world'
unicode_text = u'你好'
self.assertFeature(
feature=features.Text(),
shape=(),
dtype=tf.string,
tests=[
# Non-unicode
test_utils.FeatureExpectationItem(
value=nonunicode_text,
expected=tf.compat.as_bytes(nonunicode_text),
),
# Unicode
test_utils.FeatureExpectationItem(
value=unicode_text,
expected=tf.compat.as_bytes(unicode_text),
),
# Empty string
test_utils.FeatureExpectationItem(
value='',
expected=tf.compat.as_bytes(''),
),
],
)
def test_text_encoded(self):
unicode_text = u'你好'
# Unicode integer-encoded by byte
self.assertFeature(
feature=features.Text(encoder=text_encoder.ByteTextEncoder()),
shape=(None,),
dtype=tf.int64,
tests=[
test_utils.FeatureExpectationItem(
value=unicode_text,
expected=[i + 1 for i in [228, 189, 160, 229, 165, 189]],
),
# Empty string
test_utils.FeatureExpectationItem(
value='',
expected=[],
),
],
)
def test_text_conversion(self):
text_f = features.Text(encoder=text_encoder.ByteTextEncoder())
text = u'你好'
self.assertEqual(text, text_f.ints2str(text_f.str2ints(text)))
def test_save_load_metadata(self):
text_f = features.Text(
encoder=text_encoder.ByteTextEncoder(additional_tokens=['HI']))
text = u'HI 你好'
ids = text_f.str2ints(text)
self.assertEqual(1, ids[0])
with test_utils.tmp_dir(self.get_temp_dir()) as data_dir:
feature_name = 'dummy'
text_f.save_metadata(data_dir, feature_name)
new_f = features.Text()
new_f.load_metadata(data_dir, feature_name)
self.assertEqual(ids, text_f.str2ints(text))
if __name__ == '__main__':
test_utils.main()
| StarcoderdataPython |
3524902 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2018-02-05 18:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('info', '0021_info_logo_header'),
]
operations = [
migrations.RemoveField(
model_name='menulinks',
name='concept_block_active',
),
migrations.RemoveField(
model_name='menulinks',
name='contacts_block_active',
),
migrations.RemoveField(
model_name='menulinks',
name='news_block_active',
),
migrations.RemoveField(
model_name='menulinks',
name='references_block_active',
),
migrations.RemoveField(
model_name='menulinks',
name='services_block_active',
),
]
| StarcoderdataPython |
8097631 | import sys
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django?")
sys.exit()
class FeedSource(models.Model):
name = models.TextField(null=False)
status = models.BooleanField(default=True)
link = models.TextField(null=False, default='')
logo_link = models.TextField(default='')
last_active_on = models.DateTimeField(auto_now_add=True)
details = models.TextField()
def __str__(self):
return self.name
def clean(self):
if not (self.name or self.link or self.details):
raise ValidationError(
{NON_FIELD_ERRORS: 'Insufficient Data'}
)
def save(self, *args, **kwargs):
self.full_clean()
return super(FeedSource, self).save(*args, **kwargs)
class Feed(models.Model):
feed_id = models.CharField(max_length=100, null=False, default='', unique=True)
title = models.TextField(null=False, default='')
summary = models.TextField()
author = models.CharField(max_length=100, blank=True, default='')
added_on = models.DateTimeField(auto_now_add=True)
source = models.ForeignKey(FeedSource, on_delete=models.CASCADE)
slug = models.CharField(max_length=255, null=False, unique=True)
link = models.TextField(blank=True, default='')
links = models.TextField()
def __str__(self):
return self.title
def clean(self):
if not (self.feed_id or self.title or self.slug or self.source):
raise ValidationError(
{NON_FIELD_ERRORS: 'Insufficient Data'}
)
def save(self, *args, **kwargs):
self.full_clean()
return super(Feed, self).save(*args, **kwargs)
class FeedDetail(models.Model):
feed = models.OneToOneField(Feed, on_delete=models.CASCADE)
content_json = models.TextField()
def __str__(self):
return self.feed.title
def clean(self):
if not (self.feed or self.content_json):
raise ValidationError(
{NON_FIELD_ERRORS: 'Insufficient Data'}
)
def save(self, *args, **kwargs):
self.full_clean()
return super(FeedDetail, self).save(*args, **kwargs)
class Comment(models.Model):
user = models.CharField(max_length=200, null=False)
feed = models.ForeignKey(Feed, on_delete=models.CASCADE)
text = models.TextField()
added_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user
def clean(self):
if not (self.user or self.feed or self.text):
raise ValidationError(
{NON_FIELD_ERRORS: 'Insufficient Data'}
)
def save(self, *args, **kwargs):
self.full_clean()
return super(Comment, self).save(*args, **kwargs)
class Bookmarked(models.Model):
feed = models.ForeignKey(Feed, on_delete=models.CASCADE)
user = models.CharField(max_length=200, null=False)
def __str__(self):
return self.feed.title
def clean(self):
if not (self.feed or self.user):
raise ValidationError(
{NON_FIELD_ERRORS: 'Insufficient Data'}
)
def save(self, *args, **kwargs):
self.full_clean()
return super(Bookmarked, self).save(*args, **kwargs)
| StarcoderdataPython |
8048237 | <filename>test/test_storage.py<gh_stars>0
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from conftest import Mock
import responses
class TestStorage(object):
@responses.activate
def test_get_storage(self, manager):
data = Mock.mock_get("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage = manager.get_storage("01d4fcd4-e446-433b-8a9c-551a1284952e")
assert type(storage).__name__ == "Storage"
assert storage.uuid == "01d4fcd4-e446-433b-8a9c-551a1284952e"
@responses.activate
def test_get_storages(self, manager):
data = Mock.mock_get("storage/public")
storages = manager.get_storages("public")
for storage in storages:
assert type(storage).__name__ == "Storage"
@responses.activate
def test_get_templates(self, manager):
data = Mock.mock_get("storage/template")
templates = manager.get_templates()
for template in templates:
assert type(template) is dict
@responses.activate
def test_storage_create(self, manager):
Mock.mock_post("storage")
storage = manager.create_storage(666, "maxiops", "My data collection", "fi-hel1")
assert type(storage).__name__ == "Storage"
assert storage.size == 666
assert storage.tier == "maxiops"
assert storage.title == "My data collection"
assert storage.zone == "fi-hel1"
@responses.activate
def test_clone_storage(self, manager):
data = Mock.mock_get("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage = manager.get_storage("01d4fcd4-e446-433b-8a9c-551a1284952e")
Mock.mock_post("storage/01d4fcd4-e446-433b-8a9c-551a1284952e/clone")
cloned_storage = storage.clone('cloned-storage-test', 'fi-hel1')
assert type(cloned_storage).__name__ == "Storage"
assert cloned_storage.size == 666
assert cloned_storage.tier == "maxiops"
assert cloned_storage.title == "cloned-storage-test"
assert cloned_storage.zone == "fi-hel1"
@responses.activate
def test_cancel_clone_storage(self, manager):
data = Mock.mock_get("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage = manager.get_storage("01d4fcd4-e446-433b-8a9c-551a1284952e")
Mock.mock_post("storage/01d4fcd4-e446-433b-8a9c-551a1284952e/clone")
cloned_storage = manager.clone_storage(storage, 'cloned-storage-test', 'fi-hel1')
Mock.mock_post("storage/01d3e9ad-8ff5-4a52-9fa2-48938e488e78/cancel", empty_content=True)
res = cloned_storage.cancel_cloning()
assert res == {}
@responses.activate
def test_load_cd_rom(self, manager):
data = Mock.mock_post("server/00798b85-efdc-41ca-8021-f6ef457b8531/cdrom/load", ignore_data_field=True)
storage_devices = manager.load_cd_rom("00798b85-efdc-41ca-8021-f6ef457b8531", "01ec5c26-a25d-4752-94e4-27bd88b62816")
assert len(storage_devices) == 2
@responses.activate
def test_eject_cd_rom(self, manager):
data = Mock.mock_post("server/00798b85-efdc-41ca-8021-f6ef457b8531/cdrom/eject", ignore_data_field=True, empty_payload=True)
storage_devices = manager.eject_cd_rom("00798b85-efdc-41ca-8021-f6ef457b8531")
assert len(storage_devices) == 1
@responses.activate
def test_create_storage_backup(self, manager):
data = Mock.mock_get("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage = manager.get_storage("01d4fcd4-e446-433b-8a9c-551a1284952e")
data = Mock.mock_post("storage/01d4fcd4-e446-433b-8a9c-551a1284952e/backup")
storage_backup = storage.create_backup("test-backup")
assert storage_backup.title == "test-backup"
assert storage_backup.size == 666
assert storage_backup.zone == "fi-hel1"
@responses.activate
def test_restore_storage_backup(self, manager):
data = Mock.mock_get("storage/01350eec-6ebf-4418-abe4-e8bb1d5c9643")
storage_backup = manager.get_storage("01350eec-6ebf-4418-abe4-e8bb1d5c9643")
data = Mock.mock_post("storage/01350eec-6ebf-4418-abe4-e8bb1d5c9643/restore", empty_content=True)
res = storage_backup.restore_backup()
assert res == {}
@responses.activate
def test_templatize_storage(self, manager):
data = Mock.mock_get("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage = manager.get_storage("01d4fcd4-e446-433b-8a9c-551a1284952e")
data = Mock.mock_post("storage/01d4fcd4-e446-433b-8a9c-551a1284952e/templatize")
storage_template = storage.templatize("my server template")
assert storage_template.title == "my server template"
assert storage_template.type == "template"
@responses.activate
def test_create_storage_import(self, manager):
data = Mock.mock_post("storage/01d4fcd4-e446-433b-8a9c-551a1284952e/import", ignore_data_field=True)
storage_import = manager.create_storage_import("01d4fcd4-e446-433b-8a9c-551a1284952e", 'direct_upload')
assert storage_import.state == "prepared"
assert storage_import.source == "direct_upload"
@responses.activate
def test_upload_file_for_storage_import(self, manager):
data = Mock.mock_post("storage/01d4fcd4-e446-433b-8a9c-551a1284952e/import", ignore_data_field=True)
storage_import = manager.create_storage_import("01d4fcd4-e446-433b-8a9c-551a1284952e", 'direct_upload')
data = Mock.mock_put("https://fi-hel1.img.upcloud.com/uploader/session/07a6c9a3-300e-4d0e-b935-624f3dbdff3f", ignore_data_field=True, empty_payload=True, call_api=False)
res = manager.upload_file_for_storage_import(storage_import, 'test/json_data/test_file.json')
assert res.get("written_bytes") == 909500125
assert res.get("md5sum") == "5cc6f7e7a1c52303ac3137d62410eec5"
assert res.get("sha256sum") == "bdf14d897406939c11a73d0720ca75c709e756d437f8be9ee26af6b58ede3bd7"
@responses.activate
def test_get_storage_import_details(self, manager):
data = Mock.mock_get("storage/01d4fcd4-e446-433b-8a9c-551a1284952e/import")
storage_import = manager.get_storage_import_details("01d4fcd4-e446-433b-8a9c-551a1284952e")
assert storage_import.state == "pending"
assert storage_import.uuid == "07a6c9a3-300e-4d0e-b935-624f3dbdff3f"
@responses.activate
def test_cancel_storage_import(self, manager):
data = Mock.mock_post("storage/01d4fcd4-e446-433b-8a9c-551a1284952e/import/cancel", empty_payload=True, ignore_data_field=True)
storage_import = manager.cancel_storage_import("01d4fcd4-e446-433b-8a9c-551a1284952e")
assert storage_import.state == "cancelling"
assert storage_import.uuid == "07a6c9a3-300e-4d0e-b935-624f3dbdff3f"
@responses.activate
def test_storage_update(self, manager):
Mock.mock_put("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage = manager.modify_storage("01d4fcd4-e446-433b-8a9c-551a1284952e", title="my bigger data collection", size=15)
assert type(storage).__name__ == "Storage"
assert storage.size == 15
assert storage.title == "my bigger data collection"
@responses.activate
def test_storage_update_oop(self, manager):
data = Mock.mock_get("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage = manager.get_storage("01d4fcd4-e446-433b-8a9c-551a1284952e")
Mock.mock_put("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage.title = "my bigger data collection"
storage.size = 15
storage.save()
assert storage.title == "my bigger data collection"
assert storage.size == 15
@responses.activate
def test_storage_delete(self, manager):
Mock.mock_delete("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
res = manager.delete_storage("01d4fcd4-e446-433b-8a9c-551a1284952e")
assert res == {}
@responses.activate
def test_storage_delete_oop(self, manager):
data = Mock.mock_get("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage = manager.get_storage("01d4fcd4-e446-433b-8a9c-551a1284952e")
Mock.mock_delete("storage/01d4fcd4-e446-433b-8a9c-551a1284952e")
storage.destroy()
# just assert no errors
| StarcoderdataPython |
1907793 | <reponame>mrjmad/gnu_linux_mag_drf<filename>hall_of_cards/cardsgame/views.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import (print_function, division, absolute_import, unicode_literals)
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import viewsets
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import detail_route, list_route, permission_classes
from rest_framework.response import Response
from .models import Card, CardType
from .serializers import CardSerializer, CardTypeSerializer, ModelCardSerializer, ModelCardTypeSerializer
class CardViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
queryset = Card.objects.all()
serializer_class = CardSerializer
class CardTypeViewSet(viewsets.ModelViewSet):
queryset = CardType.objects.all()
serializer_class = CardTypeSerializer
class ModelCardViewSet(viewsets.ModelViewSet):
queryset = Card.objects.all()
serializer_class = ModelCardSerializer
@list_route()
def count(self, request):
return Response(Card.objects.count())
@detail_route()
def modified(self, request, pk):
try:
card = Card.objects.get(pk=pk)
return Response(card.modified)
except ObjectDoesNotExist:
return Response('Card not Exist', status=status.HTTP_400_BAD_REQUEST)
def create(self, request):
return super(ModelCardViewSet, self).create(request)
class ModelCardTypeViewSet(viewsets.ModelViewSet):
queryset = CardType.objects.all()
serializer_class = ModelCardTypeSerializer
| StarcoderdataPython |
4980126 | <gh_stars>0
import requests
from printing import printin
def testCase1(serverIP,username,password):
'''
This will demostrate a user that creates an account and then leaves the app
:return: Nothing
'''
print("\nTest case #1 Create Acc and leave the App \n")
r = requests.post("http://" + serverIP + ":8080/palermo/api/v1/user/newUser",
json={"username": "tester", "password": password})
printin(r.status_code == 406, "Adding user with already existed username")
r = requests.post("http://" + serverIP + ":8080/palermo/api/v1/user/newUser",
json={"username": username, "password": password})
printin(r.status_code == 200, "Adding correct user")
userID = r.json()
r = requests.post("http://" + serverIP + ":8080/palermo/api/v1/user/logOut", json=userID)
printin(r.status_code == 200, "Log out") | StarcoderdataPython |
1775253 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import platform
from selenium import webdriver
from bs4 import BeautifulSoup
# Check Python version
# print(platform.python_version())
# Using the right PhantomJS for the corresponding OS
if platform.system() == "Windows":
PHANTOMJS_EXE = "./PhantomJS/phantomjs.exe"
else:
PHANTOMJS_EXE = "./PhantomJS/phantomjs"
def main():
# Use PhantomJS to browse the page, alternatively we can use
# browser = webdriver.Firefox()
browser = webdriver.PhantomJS(PHANTOMJS_EXE)
browser.get('http://www.scoreboard.com/en/tennis/atp-singles/us-open-2015/results/')
# Parse the html source
soup = BeautifulSoup(browser.page_source, "html.parser")
# Get all the games
games = soup.find_all('tr', {'class': 'stage-finished'})
# Print out the html for the first game
print(games[0].prettify())
if __name__ == "__main__":
main()
# print("This Python script was started as the main program.")
else:
print("This Python script is to be called as main(), not as the module ", __name__ + ".")
| StarcoderdataPython |
11368859 | from height_over_bottom import HeightOverBottom
from network_loss import NetworkLoss
from thruster_out import ThrusterOut
from bus_voltage import BusVoltage
from odom_kill import OdomKill
from kill import Kill
from hw_kill import HwKill
| StarcoderdataPython |
6702241 | <filename>tests/test_regrid_periodicity.py
import requests
import cdms2
import os
import sys
import basetest
import numpy as np
import basetest
import ssl
modFile = "model_ANN_climo.nc"
obsFile = "GPCP_ANN_climo.nc"
class TestRegrid(basetest.CDMSBaseTest):
def setUp(self):
super(TestRegrid, self).setUp()
context = ssl._create_unverified_context()
myurl = "http://uvcdat.llnl.gov/cdat/sample_data/" + obsFile
r = requests.get(myurl, stream=True)
if r.status_code == 200:
with open(obsFile, 'wb') as f:
for chunk in r:
f.write(chunk)
myurl = "http://uvcdat.llnl.gov/cdat/sample_data/" + modFile
r = requests.get(myurl, stream=True)
if r.status_code == 200:
with open(modFile, 'wb') as f:
for chunk in r:
f.write(chunk)
def tearDown(self):
super(TestRegrid, self).tearDown()
os.remove(obsFile)
os.remove(modFile)
def testPeriodicity(self):
reference_data_set = obsFile
test_data_set = modFile
f_obs = cdms2.open(reference_data_set)
f_mod = cdms2.open(test_data_set)
obs = f_obs('PRECT')
mod = (f_mod('PRECC') + f_mod('PRECL')) * 3600.0 * 24.0 * 1000.0
mod.units = 'mm/day'
self.assertEqual(
[obs.getLongitude()[0], obs.getLongitude()[-1]], [1.25, 358.75])
self.assertEqual(
[mod.getLongitude()[0], mod.getLongitude()[-1]], [0., 358.59375])
obs_grid = obs.getGrid()
# Regrid model to obs grid using 'linear'
mod_reg = mod.regrid(
obs_grid,
regridTool='esmf',
regridMethod='linear',
periodicity=1,
ignoreDegenerate=True)
# self.assertEqual(np.array_str(mod_reg[-1,-1,:], precision=2), '[ 0.61 0.61 0.61 0.62 0.62 0.62 0.62 0.62 0.62 0.62 0.62 0.62\n 0.62 0.62 0.63 0.63 0.63 0.63 0.63 0.63 0.63 0.63 0.64 0.64\n 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.65 0.65 0.65 0.65\n 0.65 0.65 0.65 0.65 0.65 0.64 0.64 0.64 0.64 0.64 0.64 0.64\n 0.64 0.64 0.64 0.64 0.64 0.63 0.63 0.63 0.63 0.63 0.63 0.64\n 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.63 0.63 0.63 0.63 0.63\n 0.63 0.63 0.63 0.62 0.62 0.62 0.62 0.62 0.61 0.61 0.61 0.61\n 0.61 0.61 0.61 0.61 0.61 0.61 0.61 0.61 0.61 0.61 0.6 0.6\n 0.6 0.6 0.6 0.6 0.6 0.6 0.6 0.6 0.6 0.59 0.59 0.59\n 0.59 0.59 0.59 0.59 0.59 0.58 0.58 0.58 0.58 0.58 0.58 0.58\n 0.59 0.59 0.59 0.59 0.59 0.59 0.59 0.59 0.59 0.59 0.59 0.59\n 0.59 0.59 0.59 0.59 0.59 0.59 0.59 0.6 0.6 0.61 0.61 0.41]')
# self.assertEqual(np.array_str(mod_reg[0,0,:], precision=2), '[ 0.19 0.19 0.18 0.17 0.18 0.18 0.17 0.17 0.16 0.18 0.17 0.17\n 0.17 0.18 0.17 0.17 0.17 0.17 0.18 0.17 0.17 0.16 0.17 0.17\n 0.17 0.16 0.16 0.17 0.17 0.16 0.16 0.18 0.18 0.18 0.18 0.17\n 0.19 0.18 0.18 0.18 0.19 0.19 0.19 0.19 0.19 0.2 0.2 0.2\n 0.2 0.21 0.21 0.21 0.21 0.21 0.22 0.22 0.22 0.22 0.22 0.23\n 0.23 0.23 0.23 0.24 0.24 0.24 0.24 0.25 0.26 0.29 0.3 0.3\n 0.28 0.29 0.3 0.35 0.32 0.33 0.35 0.36 0.37 0.33 0.34 0.34\n 0.35 0.32 0.32 0.32 0.31 0.3 0.29 0.3 0.31 0.32 0.31 0.32\n 0.33 0.34 0.35 0.31 0.32 0.33 0.34 0.3 0.3 0.29 0.29 0.3\n 0.28 0.28 0.29 0.28 0.26 0.26 0.26 0.26 0.25 0.25 0.25 0.24\n 0.24 0.24 0.23 0.23 0.23 0.22 0.22 0.22 0.21 0.21 0.21 0.21\n 0.21 0.21 0.21 0.2 0.2 0.2 0.2 0.2 0.19 0.19 0.18 0.18]')
self.assertEqual([mod_reg.getLongitude()[0],
mod_reg.getLongitude()[-1]], [1.25, 358.75])
# Regrid model to obs grid using 'conservative'
mod_reg = mod.regrid(obs_grid, regridTool='esmf', regridMethod='conservative',
periodicity=1)
# self.assertEqual(np.array_str(mod_reg[0,0,:], precision=2),'[ 0.18 0.18 0.17 0.16 0.16 0.16 0.16 0.16 0.16 0.16 0.16 0.16\n 0.15 0.15 0.15 0.15 0.15 0.16 0.16 0.15 0.15 0.15 0.15 0.15\n 0.15 0.15 0.15 0.15 0.14 0.14 0.15 0.15 0.15 0.16 0.16 0.16\n 0.16 0.16 0.16 0.17 0.17 0.17 0.18 0.18 0.18 0.18 0.18 0.18\n 0.19 0.19 0.19 0.19 0.2 0.2 0.2 0.21 0.21 0.21 0.22 0.22\n 0.23 0.24 0.24 0.25 0.25 0.26 0.26 0.27 0.28 0.31 0.36 0.37\n 0.37 0.38 0.43 0.47 0.48 0.48 0.47 0.46 0.45 0.45 0.44 0.45\n 0.45 0.45 0.45 0.44 0.43 0.43 0.43 0.43 0.43 0.43 0.43 0.43\n 0.42 0.39 0.38 0.38 0.39 0.39 0.39 0.39 0.38 0.35 0.32 0.31\n 0.31 0.31 0.3 0.29 0.28 0.28 0.27 0.27 0.27 0.27 0.26 0.26\n 0.25 0.25 0.25 0.24 0.24 0.23 0.23 0.22 0.22 0.22 0.22 0.21\n 0.21 0.2 0.2 0.2 0.2 0.2 0.2 0.19 0.19 0.18 0.18 0.18]')
# self.assertEqual(np.array_str(mod_reg[-1,-1,:], precision=2), '[ 0.61 0.61 0.61 0.62 0.62 0.62 0.62 0.62 0.62 0.62 0.62 0.63\n 0.63 0.63 0.63 0.63 0.63 0.63 0.63 0.64 0.64 0.64 0.64 0.64\n 0.65 0.65 0.65 0.65 0.65 0.65 0.65 0.65 0.66 0.66 0.66 0.66\n 0.66 0.66 0.66 0.65 0.65 0.65 0.65 0.65 0.65 0.65 0.65 0.65\n 0.65 0.65 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.64\n 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.64 0.63 0.63 0.63\n 0.63 0.63 0.63 0.62 0.62 0.62 0.62 0.62 0.62 0.62 0.62 0.61\n 0.61 0.61 0.61 0.61 0.61 0.61 0.61 0.61 0.6 0.6 0.6 0.6\n 0.6 0.6 0.6 0.6 0.6 0.6 0.6 0.59 0.59 0.59 0.59 0.59\n 0.59 0.59 0.59 0.58 0.58 0.58 0.58 0.58 0.58 0.58 0.58 0.58\n 0.58 0.58 0.58 0.58 0.58 0.58 0.58 0.58 0.58 0.58 0.58 0.58\n 0.59 0.59 0.59 0.59 0.59 0.59 0.59 0.59 0.6 0.6 0.61 0.61]')
self.assertEqual([mod_reg.getLongitude()[0],
mod_reg.getLongitude()[-1]], [1.25, 358.75])
if __name__ == "__main__":
basetest.run()
#
| StarcoderdataPython |
11294288 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 12:59:29 2019
@author: ap18525
"""
import pandas as pd
import numpy as np
from netCDF4 import Dataset
def read_csv_data(folder_path,file_name,column_name = None):
data = pd.read_csv(folder_path+"/"+file_name)
### Dates ###
# the next day at 00:00 we need to substract one day (-24h)
# to reflect that the first data member corresponds to the first
# day and not to the next day at 00:00
dates = pd.to_datetime(np.array(data['Date']),
format = '%d/%m/%Y')
# Each element of args is the name of weather variable
if isinstance(column_name,(str)):
outputs = np.array(data[column_name])
else:
outputs = np.array(data[data.columns[1:]])
return dates,outputs
def read_netcdf_data(folder_path,file_name,variable_name):
data = Dataset(folder_path+"//"+file_name, "r")
### Dates ###
# the next day at 00:00 we need to substract one day (-24h)
# to reflect that the first data member corresponds to the first
# day and not to the next day at 00:00
dates = pd.to_datetime(data['time'][:]-24,
unit='h', # hourly
origin = pd.Timestamp('01-01-1900'))
# Each element of args is the name of weather variable
outputs = np.array(data.variables[variable_name][:])
return dates,outputs | StarcoderdataPython |
209931 | # Using cryptographic-appropriate methods to generate random data
# that may be sensitive. secrets module introduced in Python 3.6
import os
import secrets
# the urandom() function in the OS module produces random numbers that
# are cryptographically safe to use for sensitive purposes
result = os.urandom(8)
print([hex(b) for b in result])
# secrets.choice is the same as random.choice but more secure
moves = ["rock", "paper", "scissors"]
print(secrets.choice(moves))
# secrets.token_bytes generates random bytes
result = secrets.token_bytes()
print(result)
# secrets.token_hex creates a random string in hexadecimal
result = secrets.token_hex()
print(result)
# secrets.token_urlsafe generates characters that can be in URLs
result = secrets.token_urlsafe()
print(result)
| StarcoderdataPython |
6520488 | import unittest
from app.models import NewsSource
class NewsSourceTest(unittest.TestCase):
"""Test class to test newsSource class"""
def setUp(self) -> None:
"""Instructions to be run before each test case"""
self.new_source = NewsSource("County-news", "County news", "Kenya", "Source for the best news", "https://abcnews.go.com")
def test_instance_of_NewsSource(self):
"""Test if new_source is an instance of class NewsSource"""
self.assertTrue(isinstance(self.new_source, NewsSource))
| StarcoderdataPython |
143423 | # import csv
# import PyPDF2
# import nltk
# from tika import parser
# from spacy.en import English
#
# #nltk.download('punkt')
# #nltk.download('averaged_perceptron_tagger')
# #from nltk.corpus import brown
# #nltk.download('brown')
#
# raw = parser.from_file('C://Users//bvjan//Documents//data.pdf')
# my = raw['content']
# print(my)
import re
mylist = ["dog", "cat", "catwild", "thundercat", "cow", "hooo"]
r = re.compile(".*cat")
newlist = list(filter(r.match, mylist)) # Read Note
print(newlist)
| StarcoderdataPython |
3206771 | #!/home/wiseman/anaconda3/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2019 University of Southampton
# All Rights Reserved.
# 12/05/2018
##############################################################################
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.2"
__date__ = "26/11/19"
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
import configparser
import os
import logging
import argparse
import glob
from time import gmtime, strftime
from astropy.table import Table
from astropy.io import fits
from astropy.time import Time
from des_stacks import des_stack as stack
from des_stacks.utils.loop_stack import iterate_source_loop, init_source_loop
sns.set_color_codes(palette='colorblind')
# define some DES specific lists
all_years = ['none','1','2','3','4'] # add 5 when available
all_fields = ['SN-X1','SN-X2','SN-X3','SN-C1','SN-C2','SN-C3','SN-E1','SN-E2','SN-S1','SN-S2']
all_chips = np.arange(1,62)
all_bands = ['g','r','i','z']
class optimiser():
def __init__(self):
parsed = self._parser()
def _parser(self):
parser = argparse.ArgumentParser(description='Stack some DES SN images')
parser.add_argument('-f','--field', help = 'Field(s) to stack. Separate with space or comma (e.g. X2 X3)',nargs='?',required=False,default='X2')
parser.add_argument('-b', '--band', help = 'Bands(s) to stack. Separate with space or comma (e.g. g r)',nargs='?',required=False,default='r')
parser.add_argument('-my','--minusyears', help = 'Which minus years to stack (e.g. 1,2,3,4,none)',nargs='?',required=False,default='1')
parser.add_argument('-ch','--chips', help = 'Which chips to stack (e.g. [1,5] = 1,3,4)',nargs=1,required=False,default='All')
parser.add_argument('-wd','--workdir', help = 'Working directory [coadding]', default = 'coadding')
parser.add_argument('-l','--looptype', help ='Parameters to optimize (can be "psf", "depth", or a comma separated list of those")',required = False, default = 'depth')
parser.add_argument('-pr','--psfrange',help = 'Range to optimize psf in (min,max): [1.5,3]',required=False,default= '1.5,3.0')
parser.add_argument('-tr','--teffrange',help = 'Range to optimize teff in (min,max): [0,0.5]',required=False,default= '0.0,0.5')
parser.add_argument('-st','--step',help = 'Size of step in the cut you want to optimize over (psf,teff): [0.25,0.01]',required = False, default = '0.25,0.01')
parser.add_argument('-pl','--plot',help='Plot a heatmap of where the best cuts are?',required = False,action = 'store_true')
parser.add_argument('-t','--tidy',help = 'Tidy up temporary files after?',action = 'store_true')
args=parser.parse_args()
parsed = {}
try:
fields = args.field.split(',')
except:
try:
fields = args.field[0].split(' ')
except:
fields =args.field
for i in range(len(fields)):
try:
field = fields[i]
field = 'SN-'+field
fields[i]=field
except:
fields = 'SN-'+fields[0]
parsed['fields']=fields
try:
bands = args.band.split(',')
except:
try:
bands = args.band[0].split(' ')
except:
bands = args.band
parsed['bands']=bands
try:
mys = args.minusyears.split(',')
except:
try:
mys = args.minusyears[0].split(' ')
except:
mys = args.minusyears
parsed['mys']=mys
if args.chips != 'All':
try:
chips = args.chips[0].split(',')
except:
if args.chips[0][0]== '[':
chip_bounds = args.chips[0][1:-1].split(',')
chips = np.arange(int(chip_bounds[0]), int(chip_bounds[-1]))
else:
chips = args.chips[0].split(' ')
else:
chips = args.chips
parsed['chips']=chips
print ('Parsed chips as %s'%chips)
if not args.workdir:
workdir = 'current'
else:
workdir = args.workdir
parsed['workdir']=workdir
try:
loop_types = args.looptype.split(',')
parsed['looptype']=loop_types
except:
parsed['looptype']='depth'
try:
parsed['teffrange'] = args.teffrange.split(',')
except:
parsed['teffrange'] = [0.0,0.5]
try:
parsed['psfrange'] = args.psfrange.split(',')
except:
parsed['psfrange'] = [1.5,3.0]
try:
parsed['step'] = args.step.split(',')
except:
parsed['step'] = [0.25,0.01]
parsed['tidy']=args.tidy
self.parsed = parsed
self.plot = args.plot
def optimise(self,f,b,y,ch):
# a function that iterates through stacks until the best one is reached
t0,t1,ts = float(self.parsed['teffrange'][0]),float(self.parsed['teffrange'][1]),float(self.parsed['step'][1])
p0,p1,ps = float(self.parsed['psfrange'][0]),float(self.parsed['psfrange'][1]),float(self.parsed['step'][0])
wd,lt = self.parsed['workdir'],self.parsed['looptype'][0]
print(t0,t1,ts)
print(p0,p1,ps)
print (lt)
teff_range = np.arange(t0,t1,ts)
psf_range = np.arange(p0,p1,ps)
lim_df = pd.DataFrame(index = [str(r) for r in psf_range],columns=[str(r) for r in teff_range])
psf_df = pd.DataFrame(index = [str(r) for r in psf_range],columns=[str(r) for r in teff_range])#create the DataFrame to put the quality measurements in
lim_df.name = 'depth'
psf_df.name = 'psf'
for psf_cut in psf_range:
for teff_cut in teff_range:
lim,psf = self.do_stack(f,b,y,ch,wd,cuts = {'zp':None,'teff':teff_cut,'psf':psf_cut})
lim_df.loc[str(psf_cut),str(teff_cut)] = lim
psf_df.loc[str(psf_cut),str(teff_cut)] = psf
best={'depth':None,'psf':None}
'''smaller_teff_step = ts/5
smaller_psf_step = ps/5
if lt=='depth':
teff_start = best['depth'][1]
psf_start = best['depth'][0]
elif lt=='psf':
teff_start = best['psf'][1]
psf_start = best['psf'][0]
elif lt=='both':
teff_start = np.mean(best['depth'][1],best['psf'][1])
psf_start = np.mean(best['depth'][0],best['psf'][0])
zoomed_teffrange = np.arange(teff_start-float(ts)*5,teff_start+float(ts)*5,smaller_teff_step)
zoomed_psfrange = np.arange(psf_start-float(ps)*5,psf_start+float(ps)*5,smaller_psf_step)
for newpsf in zoomed_psfrange:
lim_df = lim_df.append(pd.DataFrame(index=[str(newpsf)],columns=lim_df.columns))
psf_df = psf_df.append(pd.DataFrame(index=[str(newpsf)],columns=psf_df.columns))
for newteff in zoomed_teffrange:
lim_df[str(newteff)] = ''
psf_df[str(newteff)] = ''
lim,psf = do_stack(f,b,y,ch,wd,cuts = {'zp':None,'teff':newteff,'psf':newpsf})
lim_df.loc[str(newpsf),str(newteff)] = lim
psf_df.loc[str(newpsf),str(newteff)] = psf'''
for df in [lim_df,psf_df]:
best[df.name] = [np.float(np.argmax(df.max(axis=1))),np.float(np.argmax(df.max(axis=0)))]
# ADD TO PLOT!
if self.plot:
f1,ax1 = plt.subplots()
depthmin = np.min(lim_df.min().values)
depthmax = np.max(lim_df.max().values)
depthrang = depthmax-depthmin
lim_df = lim_df.astype(float)
psf_df = psf_df.astype(float)
sns.heatmap(lim_df,ax=ax1,cmap='Oranges',cbar_kws={'label': 'Limiting Magnitude'})
ax1.set_xlabel('$\\tau_{effective} cut$')
ax1.set_ylabel('PSF cut')
plt.savefig('/media/data3/wiseman/des/coadding/optimise/optimize_teff_%s_%s_%s_%s.pdf'%(f,b,y,ch[0]))
plt.close()
f2,ax2 = plt.subplots()
sns.heatmap(psf_df,ax=ax2,cmap='Blues',cbar_kws={'label': 'Limiting Magnitude'})
ax2.set_xlabel('$\\tau_{effective} cut$')
ax2.set_ylabel('PSF cut')
plt.savefig('/media/data3/wiseman/des/coadding/optimise/optimize_psf_%s_%s_%s_%s.pdf'%(f,b,y,ch[0]))
return best
def do_stack(self,f,b,y,ch,wd,cuts):
#Performs the actual stack for a given set of cuts, and returns the limiting magnitudes and psf
print ('Making stack of',f,b,y,ch,wd,cuts)
s = stack.Stack(f,b,y,ch,wd,cuts,db=True)
scifile = os.path.join(s.band_dir,'ccd_%s_%s_%.2f_%s_clipweighted_sci.fits'%(ch[0],b,cuts['teff'],cuts['psf']))
if not os.path.isfile(scifile):
print ('Did not find a file for these cuts; doing stack')
s.do_my_stack(cuts=cuts,final=True)
else:
print ('Found a stacked file for these cuts; going to source')
s.ana_dir = os.path.join(s.band_dir,ch[0],'ana')
sourcename = os.path.join(s.ana_dir,'MY%s_%s_%s_%s_%.2f_%s_clipweighted_sci.sourcecat' %(y,f,b,ch[0],cuts['teff'],cuts['psf']))
print ('Looking for file under the name: %s'%sourcename)
if os.path.isfile(sourcename):
print ('Found a sourcecat for these cuts at: %s'%sourcename)
s.sourcecats = [sourcename]
s.cuts=cuts
else:
print ('No sourcecat yet; running source extractor')
print ('Sending %s to run_stack_source'%cuts)
s.run_stack_source(cuts=cuts,final=True)
s.cutstring = '%s_%s'%(cuts['teff'],cuts['psf'])
#lim = np.median(s.init_phot()[ch[0]][-1])
skylim = s.init_phot()[ch[0]][2]
psf = np.loadtxt(os.path.join(s.band_dir,ch[0],'ana','%s_ana.qual'%s.cutstring))[2]
psf_err = np.loadtxt(os.path.join(s.band_dir,ch[0],'ana','%s_ana.qual'%s.cutstring))[3]
np.savetxt(os.path.join(s.ana_dir,'%s_limmags.txt'%s.cutstring),np.array([skylim,psf,psf_err]))
return (skylim,psf)
def main():
o = optimiser()
parsed = o.parsed
chips = [[str(chip)] for chip in parsed['chips'][0].split(',')]
best_teff_df = pd.read_csv('/media/data3/wiseman/des/coadding/optimise/best_teff.csv',header=0)
best_psf_df = pd.read_csv('/media/data3/wiseman/des/coadding/optimise/best_psf.csv',header=0)
for y in parsed['mys']:
for f in parsed['fields']:
for b in parsed['bands']:
for ch in chips:
print ('Sending chip %s to optimize'%ch)
best = o.optimise(f,b,y,ch)
best_teff_df = best_teff_df.append(pd.DataFrame([[f,b,ch,best['depth'][0],best['depth'][1]]],columns=best_teff_df.columns))
best_psf_df = best_psf_df.append(pd.DataFrame([[f,b,ch,best['psf'][0],best['psf'][1]]],columns=best_psf_df.columns))
best_teff_df.to_csv('/media/data3/wiseman/des/coadding/optimise/best_teff.csv',index=False)
best_psf_df.to_csv('/media/data3/wiseman/des/coadding/optimise/best_psf.csv',index=False)
if __name__=="__main__":
main()
| StarcoderdataPython |
1772353 | # CS+ startup python script
ThrowExceptSave = common.ThrowExcept
common.ThrowExcept = True
try:
if ScriptStarted == True:
pass
except:
ScriptStarted = False
common.ThrowExcept = ThrowExceptSave
del ThrowExceptSave
if ScriptStarted == False:
ScriptStarted = True
common.Hook(__file__)
def AfterDownload():
ThrowExceptSave = common.ThrowExcept
ViewOutputSave = common.ViewOutput
common.ThrowExcept = False
common.ViewOutput = False
# Prepare a Breakpoint Object
bp = BreakCondition()
bp.Address = "_vAssertCalled"
if debugger.DebugTool.GetType() != DebugTool.Simulator:
bp.BreakType = BreakType.Software
# Search an existing Breakpoint Object
bi = None
for bi in debugger.Breakpoint.Information():
if bi.BreakType == bp.BreakType and bi.Address1 == bp.Address:
break;
else:
bi = None
# When it is found, re-create it keeping enabled/disabled to re-evaluate
# the event address value of it. On the other hand, when it is not found,
# create new one ENABLED.
if bi != None:
debugger.Breakpoint.Delete(bi.Number)
bp_number = debugger.Breakpoint.Set(bp)
if bi != None:
if bi.Enable != True:
debugger.Breakpoint.Disable(bp_number)
common.ThrowExcept = ThrowExceptSave
common.ViewOutput = ViewOutputSave
return
| StarcoderdataPython |
3376928 | <filename>clipmon.py
#!/usr/local/bin/python3
import time, sys, os, subprocess, re, traceback
from datetime import datetime
import pyperclip
import conf
def clip_str_to_path_line(clip_str, path_exists=os.path.exists):
if clip_str.count('\n') > 1:
return
for f in test_replacements, test_partial_path:
result_line = f(clip_str, path_exists)
if result_line:
return result_line
def test_replacements(clip_str, path_exists):
replaced_str = clip_str
for find_regex, replace_str in conf.find_replace_map:
replaced_str = re.sub(find_regex, replace_str, replaced_str)
match = re.search(
# file extension
# path |
# | |
r'((?:~|/)[^@^:^\\^\(]+\.[a-z]{2,3}).*(?:line.*?|\()([0-9]+)', replaced_str)
if match and path_exists(os.path.expanduser(match.group(1))):
return ':'.join([match.group(1), match.group(2)])
match = re.search(
# file extension
# path |
# | |
r'((?:~|/)[^@^:^\\^\(]+\.[a-z]{2,3}):([0-9]+)', replaced_str)
if match and path_exists(os.path.expanduser(match.group(1))):
return ':'.join([match.group(1), match.group(2)])
def test_partial_path(clip_str, path_exists):
match = re.search(
r'([a-zA-Z_/\-\.0-9]+/[a-zA-Z_0-9\-]+\.[a-z]{2,3}).*?(line.*?|:)([0-9]+)', clip_str)
if match:
partial_path = match.group(1)
if partial_path.startswith('./'):
partial_path = partial_path.replace('./', '')
for proj_dir in conf.curr_proj_dirs:
full_path = os.path.join(proj_dir, partial_path)
if path_exists(os.path.expanduser(full_path)):
return ':'.join([full_path, match.group(3)])
if __name__ == '__main__':
try:
clip_str = None
is_first_run = True
while True:
prev_value = clip_str
try:
if not is_first_run:
time.sleep(1)
is_first_run = False
clip_str = pyperclip.paste()
# (the value that was initially on clipboard before running script)
if prev_value is None:
prev_value = clip_str
except UnicodeDecodeError:
pass
else:
if clip_str == prev_value:
continue
print('new value:', clip_str)
path_line = clip_str_to_path_line(clip_str)
if path_line:
subprocess.Popen([conf.editor_cmd, path_line])
except Exception as e:
import Tkinter
import tkMessageBox
window = Tkinter.Tk()
window.wm_withdraw()
exception_str = traceback.format_exc()
print('exception_str:', exception_str)
tkMessageBox.showinfo(title="Error", message="{}\n{}".format(
str(e), exception_str))
sys.stderr.write(str(datetime.now()) + '\n')
raise
| StarcoderdataPython |
3265298 | <gh_stars>0
from typing import Iterable
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
def get_all_dynamic_perm_names() -> Iterable[str]:
"""
Get a list of all dynamic permissions
"""
return settings.DYNAMIC_PERMISSIONS.keys()
def get_funcs_for_dynamic_perm_name(key: str) -> Iterable[str]:
"""
Get a list of strings at a particular dictionary key
"""
return settings.DYNAMIC_PERMISSIONS[key]
def get_dynamic_perms():
"""
Return a dictionary where each key has a list of functions
"""
perms = {}
try:
for perm in get_all_dynamic_perm_names():
perms[perm] = [import_string(func_path) for func_path in get_funcs_for_dynamic_perm_name(perm)]
except ImportError as e:
raise ImproperlyConfigured(e)
return perms
| StarcoderdataPython |
1801030 |
import sys
import os
# \todo need to figure out where to put the lib/tnn.so binary so that it adheres to
# usual import semantics
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'bin'))
import tnnlib
# just for testing
from .conv_einsumfunc import *
| StarcoderdataPython |
1827097 | questions = {
"q1" : {
"question": "Has your hero ever had a nationality other than the U.S.? ",
"yes" : 500,
"no" : 0
},
"q2" : {
"question" : "Are your hero a student? ",
"yes" : 200,
"no" : 0
},
"q3" : {
"question" : "Are your hero a superhero and also CEO? ",
"yes" : 100,
"no" : 0
},
"q4" : {
"question" : "Do your hero use shields among weapons? ",
"yes" : 300,
"no" : 0
},
"q5" : {
"question" : "Are your hero over 100 years old? ",
"yes" : 300,
"no" : 0
},
"q6" : {
"question" : "Are your hero married? ",
"yes" : 200,
"no" : 0
}
}
| StarcoderdataPython |
1850931 | <gh_stars>0
"""Main implementation."""
import argparse
import os
from livereload import Server
from . import __version__
from .sphinx import SPHINX_BUILD_OPTIONS, SphinxBuilder
from .watcher import LivereloadWatchdogWatcher
from .utils import find_free_port
DEFAULT_IGNORE_REGEX = [
r"__pycache__/.*\.py",
r".*\.pyc",
r".*\.kate-swp",
]
def get_parser():
"""Get the application's argument parser.
Note: this also handles SPHINX_BUILD_OPTIONS, which later get forwarded to
sphinx-build as-is.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", type=int, default=8000)
parser.add_argument("-H", "--host", type=str, default="127.0.0.1")
parser.add_argument("-r", "--re-ignore", action="append", default=[])
parser.add_argument("-i", "--ignore", action="append", default=[])
parser.add_argument(
"--poll", dest="use_polling", action="store_true", default=False
)
parser.add_argument(
"--no-initial", dest="initial_build", action="store_false", default=True
)
parser.add_argument(
"-B", "--open-browser", dest="openbrowser", action="store_true", default=False
)
parser.add_argument("-s", "--delay", dest="delay", type=int, default=5)
parser.add_argument(
"-z",
"--watch",
action="append",
metavar="DIR",
default=[],
help=(
"Specify additional directories to watch. May be" " used multiple times."
),
dest="additional_watched_dirs",
)
parser.add_argument(
"--version", action="version", version="sphinx-autobuild {}".format(__version__)
)
for opt, meta in SPHINX_BUILD_OPTIONS:
if meta is None:
parser.add_argument(
"-{0}".format(opt), action="count", help="See `sphinx-build -h`"
)
else:
parser.add_argument(
"-{0}".format(opt),
action="append",
metavar=meta,
help="See `sphinx-build -h`",
)
parser.add_argument("sourcedir")
parser.add_argument("outdir")
parser.add_argument("filenames", nargs="*", help="See `sphinx-build -h`")
return parser
def main():
"""Actual application logic."""
parser = get_parser()
args = parser.parse_args()
srcdir = os.path.realpath(args.sourcedir)
outdir = os.path.realpath(args.outdir)
build_args = []
for arg, meta in SPHINX_BUILD_OPTIONS:
val = getattr(args, arg)
if not val:
continue
opt = "-{0}".format(arg)
if meta is None:
build_args.extend([opt] * val)
else:
for v in val:
build_args.extend([opt, v])
build_args.extend([srcdir, outdir])
build_args.extend(args.filenames)
ignored = args.ignore
if args.w: # Logfile
ignored.append(os.path.realpath(args.w[0]))
if args.d: # Doctrees
ignored.append(os.path.realpath(args.d[0]))
if not os.path.exists(outdir):
os.makedirs(outdir)
re_ignore = args.re_ignore + DEFAULT_IGNORE_REGEX
# Find the free port
portn = args.port or find_free_port()
builder = SphinxBuilder(outdir, build_args, ignored, re_ignore)
server = Server(watcher=LivereloadWatchdogWatcher(use_polling=args.use_polling),)
server.watch(srcdir, builder)
for dirpath in args.additional_watched_dirs:
dirpath = os.path.realpath(dirpath)
server.watch(dirpath, builder)
server.watch(outdir)
if args.initial_build:
builder.build()
if args.openbrowser is True:
server.serve(port=portn, host=args.host, root=outdir, open_url_delay=args.delay)
else:
server.serve(port=portn, host=args.host, root=outdir)
| StarcoderdataPython |
6438441 | <reponame>mariajosearias/talleres-for-estructuras-control-
archivo = open('paises.txt', 'r')
#Imprima todos los paises y capitales, cuyo inicio sea con la letra U
lista=[]
ciudad=[]
for i in archivo:
a=i.index(":")
for r in range(a+2,len(i)):
lista.append(i[r])
a="".join(lista)
ciudad.append(a)
lista=[]
for i in ciudad:
if(i[0]=="U"):
print(i)
archivo = open('paises.txt', 'r')
lista2=[]
pais=[]
for i in archivo:
a=i.index(":")
for r in range(0,a):
lista2.append(i[r])
a="".join(lista2)
pais.append(a)
lista2=[]
for i in pais:
if(i[0]=="U"):
print(i)
archivo.close()
| StarcoderdataPython |
1765246 | import requests
from collections import namedtuple
import datetime
_YNABTransactionBase = namedtuple(
"YNABTransactionBase",
("date", "amount", "payee_name", "import_id", "is_foreign", "is_cleared"),
)
class YNABTransaction(_YNABTransactionBase):
@classmethod
def from_up_transaction_data(cls, transaction):
"""Create a YNABTransaction from Up API transaction data.
transaction should be the dict representation of the JSON data representing a
single transaction from the Up API.
"""
date = datetime.datetime.fromisoformat(transaction["attributes"]["createdAt"])
# Convert from cents to millidollars
amount = transaction["attributes"]["amount"]["valueInBaseUnits"] * 10
payee_name = transaction["attributes"]["description"]
import_id = f"up0:{transaction['id'].replace('-', '')}"
is_foreign = transaction["attributes"]["foreignAmount"] is not None
# Even if it's pending, Up counts it as part of the available value
is_cleared = True
return cls(
date.date().isoformat(),
amount,
payee_name,
import_id,
is_foreign,
is_cleared,
)
class YNABClient:
def __init__(self, api_token):
self.api_token = api_token
self.headers = {"Authorization": f"Bearer {self.api_token}"}
@staticmethod
def ynab_url(endpoint):
"""Return the full URL corresponding to the specified YNAB API endpoint."""
return "https://api.youneedabudget.com/v1" + endpoint
def ynab_get(self, endpoint, **kwargs):
"""Use requests.get to get data from the specified Up API endpoint."""
return requests.get(
YNABClient.ynab_url(endpoint), headers=self.headers, **kwargs
)
def is_authenticated(self):
r = self.ynab_get("/user")
# 401 means not authenticated properly, 200 means good to go
if r.status_code == 401:
return False
elif r.status_code == 200:
return True
# If it's neither 200 nor 401, raise it as an error
r.raise_for_status()
def account_id_from_name(self, name):
r = self.ynab_get("/budgets/last-used/accounts")
r.raise_for_status()
matching_ids = [
acc["id"] for acc in r.json()["data"]["accounts"] if acc["name"] == name
]
if len(matching_ids) == 0:
raise ValueError(f"no accounts found for name {name}")
elif len(matching_ids) > 1:
raise ValueError(f"more than one account found for {name}")
else:
return matching_ids[0]
def create_transactions(self, account_id, transactions, foreign_flag=None):
"""Create the provided YNABTransactions in the account with the specified ID.
foreign_flag is the colour of the flag that should be set for transactions in a
foreign currency.
Returns a list of IDs that had already been imported.
"""
transactions_data = [
{
"date": tx.date,
"amount": tx.amount,
"payee_name": tx.payee_name,
"import_id": tx.import_id,
"flag": (foreign_flag if tx.is_foreign else None),
"account_id": account_id,
"cleared": ("cleared" if tx.is_cleared else "uncleared"),
}
for tx in transactions
]
r = requests.post(
YNABClient.ynab_url("/budgets/last-used/transactions"),
headers=self.headers,
json={"transactions": transactions_data},
)
r.raise_for_status()
json_data = r.json()["data"]
return json_data["duplicate_import_ids"]
| StarcoderdataPython |
8161487 | <gh_stars>1-10
"""A game state for a level loaded from a file."""
# Copyright © 2014 <NAME> <<EMAIL>>
# License: MIT, see the LICENSE file.
import sfml as sf
from pymazing import world, level_loader, color, light, camera, coordinate_grid, renderer, matrix
class GameStateLoadedLevel:
def __init__(self, config):
self.world = world.World()
self.world.ambient_light.color = color.from_int(255, 255, 255)
self.world.ambient_light.intensity = 0.2
diffuse_light = light.Light()
diffuse_light.position[0] = 100
diffuse_light.position[1] = 150
diffuse_light.position[2] = 50
diffuse_light.color = color.from_int(255, 255, 255)
diffuse_light.intensity = 0.4
specular_light = light.Light()
specular_light.position[0] = 100
specular_light.position[1] = 150
specular_light.position[2] = 59
specular_light.color = color.from_int(255, 255, 255)
specular_light.intensity = 0.4
specular_light.shininess = 8.0
self.world.diffuse_lights.append(diffuse_light)
self.world.specular_lights.append(specular_light)
self.camera = camera.Camera(config)
self.camera.position[0] = 4
self.camera.position[1] = 3
self.camera.position[2] = 6
blocks = level_loader.generate_blocks_from_tga(config["game"]["level_file"])
self.meshes = level_loader.generate_partial_meshes(blocks)
self.coordinate_grid = coordinate_grid.CoordinateGrid()
self.render_wireframe = False
self.do_backface_culling = True
self.render_coordinate_grid = False
self.render_meshes = True
self.rotate_lights = False
self.key_released = dict()
def is_key_pressed_once(self, key_code):
"""
Determine if a key is pressed and signal it only once - key needs to be released before this returns true again.
"""
if sf.Keyboard.is_key_pressed(key_code):
if self.key_released.get(key_code):
self.key_released[key_code] = False
return True
else:
self.key_released[key_code] = True
return False
def update(self, time_step, mouse_delta):
self.camera.update(time_step, mouse_delta)
if self.rotate_lights:
light_rotation_matrix = matrix.create_rotation_matrix_y(0.5 * time_step)
self.world.diffuse_lights[0].position = light_rotation_matrix.dot(self.world.diffuse_lights[0].position)
self.world.specular_lights[0].position = light_rotation_matrix.dot(self.world.specular_lights[0].position)
if self.is_key_pressed_once(sf.Keyboard.F1):
self.render_wireframe = not self.render_wireframe
if self.is_key_pressed_once(sf.Keyboard.F2):
self.do_backface_culling = not self.do_backface_culling
if self.is_key_pressed_once(sf.Keyboard.F3):
self.render_coordinate_grid = not self.render_coordinate_grid
if self.is_key_pressed_once(sf.Keyboard.F4):
self.render_meshes = not self.render_meshes
if self.is_key_pressed_once(sf.Keyboard.F5):
self.world.ambient_light_enabled = not self.world.ambient_light_enabled
if self.is_key_pressed_once(sf.Keyboard.F6):
self.world.diffuse_lights_enabled = not self.world.diffuse_lights_enabled
if self.is_key_pressed_once(sf.Keyboard.F7):
self.world.specular_lights_enabled = not self.world.specular_lights_enabled
if self.is_key_pressed_once(sf.Keyboard.F8):
self.rotate_lights = not self.rotate_lights
def render(self, framebuffer, interpolation):
if self.render_coordinate_grid:
self.coordinate_grid.render(self.camera, framebuffer)
if self.render_meshes:
renderer.render_meshes(self.meshes[:1], self.world, self.camera, framebuffer, do_backface_culling=self.do_backface_culling, render_wireframe=self.render_wireframe)
renderer.render_meshes(self.meshes[1:], self.world, self.camera, framebuffer, do_backface_culling=self.do_backface_culling, render_wireframe=self.render_wireframe)
| StarcoderdataPython |
4965339 | <filename>classification/demos.py
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
import dataset_gen
from .regression import (linear_regression, multivariate_regression,
polynomial_regression)
from .classification import (binary_classifier, multinomial_classifier,
knn_classifier)
def demo_lin_reg():
# Generate dataset
m = 100
X, y, W_true, b_true = dataset_gen.polynomial_fit(m, 1)
# Regression
epochs = 1000
W, b = linear_regression(X, y, alpha=0.005, k=8, epochs=epochs)
print("Simple linear regression")
print(f" Number of examples: {m}")
print(f" Epochs: {epochs}")
print(f" Actual weight: {W_true.ravel()}")
print(f" Actual bias: {b_true.ravel()}")
print(f" Regression coeff: {W.ravel()}")
print(f" Regression bias: {b.ravel()}\n")
reg_y = np.dot(X, W) + b
# Plot data
fig = plt.figure()
plt.plot(X, y, "x", X, reg_y, "-")
plt.savefig('demos/demo-lin-reg.png')
plt.close(fig)
def demo_mul_reg():
# Generate dataset
m = 100
X, y, W_true, b_true = dataset_gen.multivariate_fit(m)
# Regression
epochs = 1000
W, b = multivariate_regression(X, y, alpha=0.005, k=8, epochs=epochs)
print("Multivariate regression")
print(f" Number of examples: {m}")
print(f" Epochs: {epochs}")
print(f" Actual weights: {W_true.ravel()}")
print(f" Actual bias: {b_true.ravel()}")
print(f" Regression weights: {W.ravel()}")
print(f" Regression bias: {b.ravel()}\n")
# Plot data
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(X[:, 0], X[:, 1], y)
fX = X[:, 0].ravel()
fX = np.linspace(fX.min(), fX.max(), 100)
fY = X[:, 1].ravel()
fY = np.linspace(fY.min(), fY.max(), 100)
fZ = np.dot(np.vstack((fX, fY)).T, W) + b
ax.scatter(fX, fY, fZ)
plt.savefig('demos/demo-mul-reg.png')
plt.close(fig)
def demo_poly_reg():
# Generate dataset
m, p = 100, 3
X, y, W_true, b_true = dataset_gen.polynomial_fit(m, p)
# Regression
epochs = 1000
W, b = polynomial_regression(X, y, p=p, alpha=0.005, k=8, epochs=epochs)
print(f"Polynomial regression (p = {p})")
print(f" Number of examples: {m}")
print(f" Epochs: {epochs}")
print(f" Actual weights: {W_true.ravel()}")
print(f" Actual bias: {b_true.ravel()}")
print(f" Regression weights: {W.ravel()}")
print(f" Regression bias: {b.ravel()}\n")
rX = np.power(X, np.arange(1, p+1))
ry = np.dot(rX, W) + b
# Plot data
fig = plt.figure()
plt.plot(X, y, "x", X, ry, "-")
plt.savefig('demos/demo-poly-reg.png')
plt.close(fig)
def demo_binary_classifier():
# Generate dataset
m = 500
X_train, X_test, y_train, y_test = dataset_gen.binary_classification(m)
# Regression
epochs = 1000
model = binary_classifier(X_train, y_train, threshold=0.5, epochs=epochs)
y_hat = model(X_test)
print("Binary classification")
print(f" Number of examples: {m}")
print(f" Epochs: {epochs}")
print(f" Accuracy: {np.mean(y_hat == y_test)}\n")
# Create color mesh
h = 0.02
x_min, x_max = X_test[:, 0].min() - .5, X_test[:, 0].max() + .5
y_min, y_max = X_test[:, 1].min() - .5, X_test[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
cmap = ListedColormap(plt.cm.tab10.colors[:2])
# Use model to predict and plot mesh values
fig = plt.figure()
z = model(np.hstack((xx.reshape(-1, 1), yy.reshape(-1, 1)))).reshape(xx.shape)
plt.contourf(xx, yy, z, cmap=cmap, alpha=.8)
# Plot data
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test.ravel(),
cmap=cmap, s=25, alpha=0.8, edgecolors='k')
plt.savefig('demos/demo-bin-classifier.png')
plt.close(fig)
def demo_multi_classifier():
# Generate dataset
m = 500
n_blobs = 3
X_train, X_test, y_train, y_test = dataset_gen.multinomial_classification(
m, n_blobs)
# Regression
epochs = 1000
model = multinomial_classifier(X_train, y_train, epochs=epochs)
y_hat = model(X_test)
print("Multinomial classification")
print(f" Number of examples: {m}")
print(f" Epochs: {epochs}")
print(f" Accuracy: {np.mean(y_hat == y_test)}\n")
# Create color mesh
h = 0.02
x_min, x_max = X_test[:, 0].min() - .5, X_test[:, 0].max() + .5
y_min, y_max = X_test[:, 1].min() - .5, X_test[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
cmap = ListedColormap(plt.cm.tab10.colors[:n_blobs])
# Use model to predict and plot mesh values
fig = plt.figure()
z = model(np.hstack((xx.reshape(-1, 1), yy.reshape(-1, 1)))).reshape(xx.shape)
plt.contourf(xx, yy, z, cmap=cmap, alpha=.8)
# Plot data
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test.ravel(),
cmap=cmap, s=25, alpha=0.8, edgecolors='k')
plt.savefig('demos/demo-multi-classifier.png')
plt.close(fig)
def demo_knn_classifier():
# Generate dataset
m = 500
n_blobs = 5
X_train, X_test, y_train, y_test = dataset_gen.multinomial_classification(
m, n_blobs)
# Regression
epochs = 1000
model = knn_classifier(X_train, y_train, default_k=n_blobs)
y_hat = model(X_test)
print("K-nearest neighbours classification")
print(f" Number of examples: {m}")
print(f" Epochs: {epochs}")
print(f" Accuracy: {np.mean(y_hat == y_test)}\n")
# Create color mesh
h = 0.1
x_min, x_max = X_test[:, 0].min() - .5, X_test[:, 0].max() + .5
y_min, y_max = X_test[:, 1].min() - .5, X_test[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
cmap = ListedColormap(plt.cm.tab10.colors[:n_blobs])
# Use model to predict and plot mesh values
fig = plt.figure()
z = model(np.hstack((xx.reshape(-1, 1), yy.reshape(-1, 1)))).reshape(xx.shape)
plt.pcolormesh(xx, yy, z, cmap=cmap, alpha=.8)
# Plot data
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test.ravel(),
cmap=cmap, s=25, alpha=0.8, edgecolors='k')
plt.savefig('demos/demo-knn-classifier.png')
plt.close(fig)
demo_lin_reg()
demo_mul_reg()
demo_poly_reg()
demo_binary_classifier()
demo_multi_classifier()
demo_knn_classifier()
| StarcoderdataPython |
9627396 | '''
Created on Nov 6, 2011
@author: cryan
Rough script for testing the python simulator
'''
import numpy as np
import matplotlib.pyplot as plt
from PySim.SystemParams import SystemParams
from PySim.QuantumSystems import Hamiltonian, Dissipator
from PySim.PulseSequence import PulseSequence
from PySim.Simulation import simulate_sequence_stack, simulate_sequence
from PySim.QuantumSystems import SCQubit
if __name__ == '__main__':
# #Setup the Hermite polynomial
# numPoints = 240
# AWGFreq = 1.2e9
# x = np.linspace(-2,2,numPoints)
# #Hermite 180
## pulseAmps = (1-0.956*x**2)*np.exp(-(x**2))
# #Hermite 90
# pulseAmps = (1-0.677*x**2)*np.exp(-(x**2))
# #Gaussian
## pulseAmps = np.exp(-(x**2))
''' Try to recreate the Bell-Rabi spectroscopy '''
#Setup the system
systemParams = SystemParams()
#First the two qubits
Q1 = SCQubit(numLevels=3, omega=4.863e9-1e6, delta=-300e6, name='Q1', T1=5.2e-6)
systemParams.add_sub_system(Q1)
Q2 = SCQubit(numLevels=3, omega=5.193e9-1e6, delta=-313.656e6, name='Q2', T1=4.4e-6)
systemParams.add_sub_system(Q2)
#Add a 2MHz ZZ interaction
systemParams.add_interaction('Q1', 'Q2', 'ZZ', -2e6)
#Create the full Hamiltonian
systemParams.create_full_Ham()
#Some Pauli operators for the controls
X = 0.5*(Q1.loweringOp + Q1.raisingOp)
Y = 0.5*(-1j*Q1.loweringOp + 1j*Q2.raisingOp)
#The cross-coupling from Q1 drive to Q2
crossCoupling = 1
#Add the Q1 drive Hamiltonians
systemParams.add_control_ham(inphase = Hamiltonian(systemParams.expand_operator('Q1', X) + crossCoupling*systemParams.expand_operator('Q2', X)),
quadrature = Hamiltonian(systemParams.expand_operator('Q1', Y) + crossCoupling*systemParams.expand_operator('Q2', Y)))
#Setup the measurement operator
# systemParams.measurement = np.kron(Q1.levelProjector(1), Q2.levelProjector(1))
systemParams.measurement = 0.5*np.kron(Q1.levelProjector(0), Q2.levelProjector(0)) + 0.67*np.kron(Q1.levelProjector(1), Q2.levelProjector(0)) + \
0.64*np.kron(Q1.levelProjector(0), Q2.levelProjector(1)) + 0.72*np.kron(Q1.levelProjector(0), Q2.levelProjector(2)) + \
0.75*np.kron(Q1.levelProjector(1), Q2.levelProjector(1)) + 0.78*np.kron(Q1.levelProjector(1), Q2.levelProjector(2))
#Add the T1 dissipators
systemParams.dissipators.append(Dissipator(systemParams.expand_operator('Q1', Q1.T1Dissipator)))
systemParams.dissipators.append(Dissipator(systemParams.expand_operator('Q2', Q2.T1Dissipator)))
#Setup the initial state as the ground state
rhoIn = np.zeros((systemParams.dim, systemParams.dim))
rhoIn[0,0] = 1
#First run 1D spectroscopy around the Bell-Rabi drive frequency
freqSweep = 1e9*np.linspace(5.02, 5.040, 20)
# freqSweep = [5.023e9]
ampSweep = np.linspace(-1,1,80)
x = np.linspace(-2,2,100)
pulseAmps = (np.exp(-x**2)).reshape((1,x.size))
# pulseAmps = np.ones((1,1))
# ampSweep = [0.1]
rabiFreq = 100e6
#Setup the pulseSequences as a series of 10us low-power pulses at different frequencies
pulseSeqs = []
for freq in freqSweep:
for controlAmp in ampSweep:
tmpPulseSeq = PulseSequence()
tmpPulseSeq.add_control_line(freq=-freq, phase=0)
tmpPulseSeq.controlAmps = rabiFreq*controlAmp*pulseAmps
tmpPulseSeq.timeSteps = 5e-9*np.ones(x.size)
tmpMat = freq*Q1.numberOp
tmpPulseSeq.H_int = Hamiltonian(systemParams.expand_operator('Q1', tmpMat) + systemParams.expand_operator('Q2', tmpMat))
pulseSeqs.append(tmpPulseSeq)
results = simulate_sequence_stack(pulseSeqs, systemParams, rhoIn, simType='lindblad')[0]
results.resize((freqSweep.size, ampSweep.size))
# plt.plot(freqSweep,results)
# plt.show()
# plt.figure()
## plt.plot(ampSweep, results)
## plt.xlabel('Frequency')
## plt.ylabel('Measurement Voltage')
## plt.title('Two Qubit Bell-Rabi Spectroscopy')
# plt.imshow(results, extent = [-1, 1, freqSweep[-1], freqSweep[0]], aspect='auto')
# plt.show()
| StarcoderdataPython |
246125 |
import unittest
from katas.kyu_8.even_or_odd import even_or_odd
class EvenOrOddTestCases(unittest.TestCase):
def test_equals(self):
self.assertEqual(even_or_odd(0), "Even")
def test_equals_2(self):
self.assertEqual(even_or_odd(1), "Odd")
def test_equals_3(self):
self.assertEqual(even_or_odd(2), "Even")
| StarcoderdataPython |
5184301 | #!/usr/bin/env python
# coding:utf-8
from __future__ import unicode_literals
import os
import sys
# 模块自己的PYTHON_PATH, 让代码找到正确的tools_lib. =>要在所有import前做!!!
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
from tools_lib.common_util.log import init_log
# 配置全局logging. => 配完PYTHON_PATH,在所有的import前做!!!
init_log(os.path.dirname(os.path.abspath(__file__)))
import logging
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.httpserver
from tornado.options import define, options, parse_command_line
from tools_lib.host_info import DEBUG
from handlers import normal, websocket
urls = [
(r'/', normal.IndexHandler),
(r'/web_socket', websocket.WebSocketHandler),
]
class Application(tornado.web.Application):
def __init__(self):
if DEBUG is True:
settings = {
"debug": True,
"autoreload": True,
}
else:
settings = {
"autoreload": False,
}
tornado.web.Application.__init__(self, urls, **settings)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = logging.info
elif handler.get_status() < 500:
log_method = logging.warning
else:
log_method = logging.error
request_time = 1000.0 * handler.request.request_time()
log_method("[timeit] [%s] [%s][%s][%s bytes] [%s]: [%d msecs]" %
(handler.request.remote_ip, handler.request.method, handler.request.uri,
handler._headers._dict.get('Content-Length', 0), handler.get_status(), request_time))
if __name__ == '__main__':
define("port", default=8888, type=int)
parse_command_line()
app = Application()
server = tornado.httpserver.HTTPServer(app, xheaders=True)
server.listen(options.port)
io_loop_ = tornado.ioloop.IOLoop.current()
try:
logging.info('application (%s) will start at %s:%s...' % (os.path.abspath(__file__), '0.0.0.0', options.port))
io_loop_.start()
except KeyboardInterrupt:
io_loop_.stop()
| StarcoderdataPython |
367888 | import pytest
from scripts import Database
from scripts import Accessor
def test_connection_success():
try:
conn = Database.Database.connect()
except MyError:
pytest.fail("Unhandled exception")
def test_api():
page = "1"
url = 'https://www.rentfaster.ca/api/search.json?keywords=&proximity_type=location-proximity'
'&cur_page=' + page + \
'&beds=&type=&price_range_adv[from]=null&price_range_adv[to]=null&novacancy=0&city_id=1'
# Access object
scr = Accessor.Accessor(url)
data = scr.get_json()
assert data != None
| StarcoderdataPython |
376831 | from abc import ABC, abstractmethod
class Entity(ABC): # pragma: no cover
"""
An interface representing a Telegram message entity.
"""
@abstractmethod
def offset(self) -> int:
"""
:return: entity offset (int)
"""
pass
@abstractmethod
def length(self) -> int:
"""
:return: entity length (int)
"""
pass
@abstractmethod
def as_html_str(self) -> str:
"""
:return: an HTML version of the entity offset (str)
"""
pass
class EncodingAwareText(ABC): # pragma: no cover
"""
An interface representing Telegram message with utf-16 encoding awareness
"""
@abstractmethod
def text_of(self, start: int, stop: int = None) -> str:
"""
start - start index
stop - stop index.
Only positive indices supported.
:return: slice the text with the given indices.
If stop=None then then the slice ends at the end of the string
"""
pass
@abstractmethod
def entity_text(self, entity: Entity) -> str:
"""
A shortcut for text_of method.
:return: an HTML version of the entity offset (str)
"""
pass
| StarcoderdataPython |
8196705 | #import dependencies
import pandas as pd
from bs4 import BeautifulSoup as bs
from splinter import Browser
import requests
def init_browser():
ex_path = {'executable_path': '../Mission_to_Mars/chromedriver.exe'} #changed pc path to git forlder
browser = Browser('chrome', **ex_path, headless=True)
#return Browser('chrome',**ex_path,headless=True)
mars_data={}
#dictionary to be exported to mongod
#NEWS
def mars_scrape_news():
try:
browser=init_browser()
nasa_url='https://mars.nasa.gov/news/'
browser.visit(nasa_url)
html=browser.html
soup=bs(html,'html.parser')
slide=soup.select_one('ul.item_list li.slide')
headline=slide.find('div',class_='content_title').get_text()
body=slide.find('div',class_='article_teaser_body').get_text()
mars_data['news_headline']=headline
mars_data['news_body']=body
return mars_data
finally:
browser.quit()
#FEATURED IMAGE
def mars_scrape_image():
try:
browser=init_browser()
image_url='https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(image_url)
elem1=browser.find_by_id("full_image")
elem1.click()
more_info=browser.links.find_by_partial_text('more info')
more_info.click()
html_image=browser.html
soup2=bs(html_image,'html.parser')
img_rel=soup2.select_one('figure.lede a img').get("src")
feat_img_link=f'https://www.jpl.nasa.gov{img_rel}'
mars_data['feat_img_link']=feat_img_link
return mars_data
finally:
browser.quit()
#MARS WEATHER
def mars_scrape_faq():
try:
browser=init_browser()
url3='https://space-facts.com/mars/'
mars_df=pd.read_html(url3)
mars1=mars_df[0]
mars1.rename(columns={"0":"Description", "1": "Mars"})
return mars1.to_html(classes="table table striped")
finally:
browser.quit()
#<NAME>
def mars_scrape_hemi():
try:
browser=init_browser()
hemi_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemi_url)
html_hemi = browser.html
soup = bs(html_hemi, 'html.parser')
items = soup.find_all('div', class_='item')
hemisphere_image_urls = []
hemi_main = 'https://astrogeology.usgs.gov'
for i in items:
title = i.find('h3').text
partial_img_url = i.find('a', class_='itemLink product-item')['href']
browser.visit(hemi_main + partial_img_url)
partial_img_html = browser.html
soup = bs( partial_img_html, 'html.parser')
img_url = hemi_main + soup.find('img', class_='wide-image')['src']
hemisphere_image_urls.append({"title" : title, "img_url" : img_url})
mars_data['hemi_main']=hemi_main
return mars_data
finally:
browser.quit() | StarcoderdataPython |
11946 | # -*- coding: utf-8 -*-
"""
manage
~~~~~~
Flask-Script Manager
"""
import os
from flask.ext.script import Manager
from flask.ext.migrate import MigrateCommand
from fbone import create_app
from fbone.extensions import db
from fbone.utils import PROJECT_PATH, MALE
from fbone.modules.user import User, ADMIN, ACTIVE
from fbone.modules.movies import Movie
from fbone.modules.user.commands import CreateUserCommand, DeleteUserCommand, ListUsersCommand
app = create_app()
manager = Manager(create_app)
manager.add_option('-c', '--config', dest='config', required=False)
manager.add_command('create_user', CreateUserCommand())
manager.add_command('delete_user', DeleteUserCommand())
manager.add_command('list_users', ListUsersCommand())
manager.add_command('db', MigrateCommand)
@manager.command
def initdb():
"""Init/reset database."""
db.drop_all()
db.create_all()
admin = User(
name=u'admin',
fullname=u'<NAME>',
email=u'<EMAIL>',
password=u'<PASSWORD>',
role_code=ADMIN,
status_code=ACTIVE,
gender_code=MALE,
bio=u'FSU Grad. Go Noles!')
db.session.add(admin)
db.session.commit()
@manager.command
def tests():
"""Run the tests."""
import pytest
exit_code = pytest.main([os.path.join(PROJECT_PATH, 'tests'), '--verbose'])
return exit_code
if __name__ == "__main__":
manager.run()
| StarcoderdataPython |
11256366 | <reponame>MacarielAerial/eos<gh_stars>1-10
"""
Ordinally encodes categorical features of a dataframe
as a preprocessing step for AutoEncoder later to embed categorical features
"""
import logging
from typing import Dict, List
from numpy import ndarray
from pandas import DataFrame
from sklearn.preprocessing import OrdinalEncoder
log = logging.getLogger(__name__)
class PreprocessEncoder:
def __init__(self, df_input: DataFrame) -> None:
print(f"PreprocessEncoder: Initiated with dataframe of shape {df_input.shape}")
self.df_input = df_input
self.df_attrs: Dict[str, str] = df_input.attrs
self._find_cat_subset_df()
self.df_output = df_input.copy()
def _find_cat_subset_df(self) -> None:
self.df_cat_feats = self.df_input[self.df_attrs["cat_feats"]]
def ordinal_encode(self) -> None:
encoder: OrdinalEncoder = OrdinalEncoder()
encoded_df: ndarray = encoder.fit_transform(self.df_cat_feats)
self.df_output[self.df_attrs["cat_feats"]] = encoded_df
self._categories = [category.tolist() for category in encoder.categories_]
log.info(
f"PreprocessEncoder: Ordinally encoded {len(self._categories)} features"
)
@property
def df_encoded(self) -> DataFrame:
return self.df_output
@property
def categories(self) -> Dict[str, List[str]]:
columns: List[str] = list(self.df_cat_feats.columns)
categories: Dict[str, List[str]] = dict(zip(columns, self._categories))
return categories
| StarcoderdataPython |
4962788 | <reponame>nuuuwan/news_lk2<gh_stars>1-10
import os
import shutil
from utils import hashx
from news_lk2._utils import log
REPO_NAME = 'news_lk2'
GIT_REPO_URL = f'https://github.com/nuuuwan/{REPO_NAME}.git'
DIR_ROOT = '/tmp'
DIR_REPO = os.path.join(DIR_ROOT, REPO_NAME)
DIR_ARTICLES = os.path.join(
DIR_REPO,
'articles',
)
SALT = '5568445278803347'
HASH_LENGTH = 8
IGNORE_LIST = ['.git', '.gitignore', '.DS_Store']
SHARD_NAME_LENGTH = 2
ARTICLE_FILE_ONLY_LEN = HASH_LENGTH + 5
def get_dir_article_shard(file_name_only, dir_prefix=''):
assert(len(file_name_only) == ARTICLE_FILE_ONLY_LEN)
dir_shard_only = file_name_only[:SHARD_NAME_LENGTH]
return os.path.join(DIR_ARTICLES + dir_prefix, dir_shard_only)
def get_hash(url):
return hashx.md5(url + SALT)[:HASH_LENGTH]
def get_article_file_only(url):
h = get_hash(url)
return f'{h}.json'
def get_article_file(url, dir_prefix=''):
file_name_only = get_article_file_only(url)
dir_article_shard = get_dir_article_shard(file_name_only, dir_prefix)
if not os.path.exists(dir_article_shard):
os.system(f'mkdir -p {dir_article_shard}')
return os.path.join(dir_article_shard, file_name_only)
def git_checkout():
if os.path.exists(DIR_REPO):
shutil.rmtree(DIR_REPO)
os.mkdir(DIR_REPO)
os.system(
'; '.join([
f'cd {DIR_ROOT}',
f'git clone {GIT_REPO_URL}',
'cd news_lk2',
'git checkout data',
])
)
log.debug(f'Cloned {GIT_REPO_URL} [data] to {DIR_REPO}')
def get_article_files():
article_files = []
for dir_article_shard_only in os.listdir(DIR_ARTICLES):
dir_article_shard = os.path.join(DIR_ARTICLES, dir_article_shard_only)
for article_file_only in os.listdir(dir_article_shard):
article_file = os.path.join(dir_article_shard, article_file_only)
article_files.append(article_file)
return article_files
| StarcoderdataPython |
5003810 | <filename>algorithm/BOJ/I:O/11721.py
word=input()
length = len(word)
for i in range(0,length,10):
th = i + 10
print(word[i:th])
| StarcoderdataPython |
40331 | <reponame>vicrsp/rto
from setuptools import setup, find_packages
setup(
name='rtotools',
version='0.1.0',
description='Real-Time Optimization Tools',
url='https://github.com/vicrsp/rto',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.8.5",
install_requires=['pandas',
'numpy',
'matplotlib',
'seaborn',
'sklearn',
'bunch',
'scipy'
],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
) | StarcoderdataPython |
6659291 | <gh_stars>1-10
# proxy module
from __future__ import absolute_import
from apptools.appscripting.scriptable import *
| StarcoderdataPython |
5002971 | <reponame>pots007/fbpic<filename>fbpic/lpa_utils/laser/longitudinal_laser_profiles.py
# Copyright 2016, FBPIC contributors
# Authors: <NAME>, <NAME>
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines a set of common longitudinal laser profiles.
"""
import numpy as np
from scipy.constants import c
# Generic classes
# ---------------
class LaserLongitudinalProfile(object):
"""
Base class for all 1D longitudinal laser profiles.
Such a profile can be combined with a 2D transverse laser profile to
define a 3D laser profile that is valid under the paraxial approximation.
Any new longitudinal laser profile should inherit from this class,
and define its own `evaluate(z,t)` method, using the same signature
as the method below.
"""
def __init__(self, propagation_direction, gpu_capable=False):
"""
Initialize the propagation direction of the laser.
(Each subclass should call this method at initialization.)
Parameter
---------
propagation_direction: int
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
gpu_capable: boolean
Indicates whether this laser profile works with cupy arrays on
GPU. This is usually the case if it only uses standard arithmetic
and numpy operations. Default: False.
"""
assert propagation_direction in [-1, 1]
self.propag_direction = float(propagation_direction)
self.gpu_capable = gpu_capable
def evaluate(self, z, t):
"""
Return the complex longitudinal laser profile.
This profile should be valid for any z and t. Under the paraxial
approximation, this is true if this function is a simple translation
at c*t along the z axis. The other propagation effects, namely the
diffraction effects, are taken into account by the transverse profile.
Parameters
-----------
z: ndarray (meters)
The longitudinal position at which to calculate the profile
(in the lab frame)
t: ndarray or float (seconds)
The time at which to calculate the profile (in the lab frame)
Returns:
--------
profile: ndarray
Arrays of the same shape as z, containing the complex
longitudinal profile
"""
# The base class only defines dummy fields
# (This should be replaced by any class that inherits from this one.)
return np.zeros_like(z, dtype='complex')
def squared_profile_integral(self):
"""
Return the integral of the square of the absolute value of
of the (complex) laser profile along the `z` axis:
.. math::
\\int_{-\\infty}^\\infty \,dz|f(z)|^2
Returns:
--------
integral: float
"""
# The base class only defines a dummy implementation
# (This should be replaced by any class that inherits from this one.)
return 0
# Particular classes for each longitudinal laser profile
# ------------------------------------------------------
class GaussianChirpedLongitudinalProfile(LaserLongitudinalProfile):
"""Class that calculates a Gaussian chirped longitudinal laser profile."""
def __init__(self, tau, z0, lambda0=0.8e-6, cep_phase=0.,
phi2_chirp=0., propagation_direction=1):
"""
Define the complex longitudinal profile of a Gaussian laser pulse.
At the focus and for zero chirp, this translates to a laser with an
axial electric field:
.. math::
E(z,t) \propto \exp\left( \\frac{(z-z_0-ct)^2}{c^2\\tau^2} \\right)
\cos[ k_0( z - z_0 - ct ) - \phi_{cep} ]
where :math:`k_0 = 2\pi/\\lambda_0` is the wavevector, :math:`\\tau`
is the laser duration, :math:`\phi_{cep}` is the CEP phase.
Note that, for a transform-limited pulse, the peak field amplitude of
the profile is unity. For a non-zero chirp, the peak amplitude is
reduced while keeping the pulse energy constant.
Parameters
----------
tau: float (in second)
The duration of the laser (in the lab frame),
defined as :math:`\\tau` in the above formula.
z0: float (in meter)
The initial position of the centroid of the laser
(in the lab frame), defined as :math:`z_0` in the above formula.
lambda0: float (in meter), optional
The wavelength of the laser (in the lab frame), defined as
:math:`\\lambda_0` in the above formula.
Default: 0.8 microns (Ti:Sapph laser).
cep_phase: float (in radian), optional
The Carrier Enveloppe Phase (CEP), defined as :math:`\phi_{cep}`
in the above formula (i.e. the phase of the laser
oscillation, at the position where the laser enveloppe is maximum)
phi2_chirp: float (in second^2)
The amount of temporal chirp, at focus (in the lab frame)
Namely, a wave packet centered on the frequency
:math:`(\omega_0 + \delta \omega)` will reach its peak intensity
at :math:`z(\delta \omega) = z_0 - c \phi^{(2)} \, \delta \omega`.
Thus, a positive :math:`\phi^{(2)}` corresponds to positive chirp,
i.e. red part of the spectrum in the front of the pulse and blue
part of the spectrum in the back.
propagation_direction: int, optional
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
"""
# Initialize propagation direction and mark the profile as GPU capable
LaserLongitudinalProfile.__init__(self,propagation_direction,
gpu_capable=True)
# Set and store the parameters
self.k0 = 2*np.pi/lambda0
self.z0 = z0
self.cep_phase = cep_phase
self.phi2_chirp = phi2_chirp
self.inv_ctau2 = 1. / (c * tau) ** 2
def evaluate(self, z, t):
"""
See the docstring of LaserLongitudinalProfile.evaluate
"""
# The formula for the longitudinal laser profile (in complex numbers)
# is obtained by defining the Fourier transform of the laser at focus
# E(\omega) = exp( -(\omega-\omega_0)^2(\tau^2/4 + i \phi^(2)/2) )
# and then by taking the inverse Fourier transform in t.
prop_dir = self.propag_direction
# Stretch factor due to chirp
stretch_factor = 1 - 2j * self.phi2_chirp * c ** 2 * self.inv_ctau2
# Calculate the argument of the complex exponential
exp_argument = - 1j * self.cep_phase \
+ 1j * self.k0 * (prop_dir * (z - self.z0) - c * t) \
- 1. / stretch_factor * self.inv_ctau2 * \
(prop_dir * (z - self.z0) - c * t) ** 2
# Get the longitudinal profile
profile = np.exp(exp_argument) / stretch_factor ** 0.5
return profile
def squared_profile_integral(self):
"""
See the docstring of LaserLongitudinalProfile.squared_profile_integral
"""
return (0.5 * np.pi * 1./self.inv_ctau2)**.5
| StarcoderdataPython |
3558024 | from setuptools import setup, find_packages
import pushoverflow
setup(
name="pushoverflow",
version=pushoverflow.__version__,
author=pushoverflow.__author__,
author_email="<EMAIL>",
description="Pushover Notifications for StackExchange Sites",
long_description=open("README.rst").read(),
url="https://github.com/amcintosh/PushOverflow",
download_url=("https://github.com/amcintosh/PushOverflow/tarball/%s" %
pushoverflow.__version__),
keywords=["stackexchange", "pushover", "notifications"],
license=pushoverflow.__license__,
packages=find_packages(exclude=["*.test", "*.test.*"]),
include_package_data=True,
install_requires=open("requirements.txt").readlines(),
entry_points={
"console_scripts": [
"pushoverflow=pushoverflow.cli:main"
]
},
test_suite="tests"
)
| StarcoderdataPython |
6588592 | import re
import ipaddress
import logging
from collections import Counter
logger = logging.getLogger(__name__)
class LogParser:
"""Class to parse a log file and search for a specific ip/subnet address"""
def __init__(self, ip, log_file, top):
""" __init_
Args:
ip (str): ip to searching for
log_file (str): log file name to parser
top (int): number of records to include in the top list
"""
self._ip_to_check = ip
self._log_file = log_file
self._top = top
self._ip = None
self._log_regex = re.compile(
r'^(?P<ip_addr>^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*\[.*\] "(?P<method>[A-Z]{3,4}).*" (?P<status>[0-9]{3}).*'
).match
self._matches = {
'requested_ip': [],
}
self._top_ip = []
self._output = []
def _check_params(self):
""" Check parameters for errors
Raises:
RuntimeError if an error occurs
"""
if self._ip_to_check is None:
raise RuntimeError('Please specify an ip/subnet for filtering')
if self._log_file is None:
raise RuntimeError('Please specify a log file')
try:
self._ip = ipaddress.ip_address(self._ip_to_check)
except ValueError:
pass
except Exception as e:
logger.error('Cannot parse {} as ip address. Error {}'.format(self._ip_to_check, e))
raise RuntimeError
if self._ip is None:
try:
self._ip = ipaddress.ip_network(self._ip_to_check)
except ValueError:
logger.error('IP {} is not a valid ip or subnet address'.format(self._ip_to_check))
raise RuntimeError
except Exception as e:
logger.error('Cannot parse {} as subnet address. Error {}'.format(self._ip_to_check, e))
raise RuntimeError
try:
with open(self._log_file, 'r') as f:
pass
except Exception as e:
raise RuntimeError(e)
def _filter_ip(self, matched_ip):
"""Check if the regex matched ip is equal to the ip address searched or if it's contained inside the subnet searched
Args:
matched_ip (str): IP address matched by log line regex
Returns:
True if equal/contained, False otherwise
"""
try:
log_ip = ipaddress.ip_address(matched_ip.strip())
except Exception:
logger.warning('Matched IP {} cannot be converted to IPv4Address class. Error: {}'.format(matched_ip, e))
return False
if isinstance(self._ip, ipaddress.IPv4Address):
if self._ip == log_ip:
return True
else:
if log_ip in self._ip:
return True
return False
def _parse_line(self, line):
"""Apply filter to the current log line
Args:
line (str): current line of the log file
"""
match = self._log_regex(line)
if match is None:
return
try:
matched_ip = match.group('ip_addr')
if self._filter_ip(matched_ip):
self._matches['requested_ip'].append(line)
if self._top > 0:
self._top_ip.append(matched_ip)
except Exception:
logger.debug('Cannot get ip address from log liune')
def print_output(self, matched_lines, top=None):
"""Print matched lines and top ip if requested
Args:
matched_lines (list): list of lines that match the search pattern
top (list): list of tuples for top ip
"""
for k, v in self._matches.items():
self._output.extend(v)
for line in self._output:
print(line.strip())
if self._top > 0:
print('\n--------- Top {} IP ---------'.format(self._top))
for ip in Counter(self._top_ip).most_common(self._top):
print('{}\t\t{}'.format(ip[0], ip[1]))
def parse(self):
"""Main function to start log parsing
Raises:
RuntimeError if any error occurs during execution
"""
try:
self._check_params()
except Exception as e:
raise RuntimeError(e)
with open(self._log_file, 'r') as f:
for line in f:
self._parse_line(line)
if self._top > 0:
return self._matches, Counter(self._top_ip).most_common(self._top)
else:
return self._matches
| StarcoderdataPython |
9776091 | import numpy as np
import torch
if __name__ == "__main__":
labels = []
model = resnet()
model.eval()
aggre = np.load('../utils/cluster_v3.npy')
images = h5py.File('path/to/video_h5', 'r')['video']
with torch.no_grad():
for im in images:
im = (im/255.0-np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
im = torch.FloatTensor(im).unsqueeze(0)
im = im.permute(0, 3, 1, 2)
prob = model(im).detach()
prob[torch.topk(prob, dim=1, k=990, largest=False)] = 0
prediction = prob.numpy() * aggre
prediction = np.sum(prediction, 1)
prediction = prediction / np.max(prediction)
labels.append(prediction)
np.save('labels_v', labels)
| StarcoderdataPython |
3237213 | <reponame>rickdg/vivi<gh_stars>1-10
# -*- coding: utf-8 -*-
import lxml.objectify
import pytest
import zeit.cms.content.interfaces
import zeit.cms.content.sources
import zeit.content.video.interfaces
import zeit.content.video.testing
import zeit.content.video.video
import zope.component
class TestVideo(zeit.content.video.testing.TestCase):
def test_security_should_allow_access_to_id_prefix(self):
import zeit.cms.testing
import zope.security.management
from zope.security.proxy import ProxyFactory
factory = zeit.content.video.testing.video_factory(self)
next(factory)
video = next(factory) # in repository
zope.security.management.endInteraction()
with zeit.cms.testing.interaction('zope.mgr'):
proxied = ProxyFactory(video)
self.assertEqual('vid', proxied.id_prefix)
def test_has_advertisement_defaults_to_true(self):
# For bw-compat to videos imported before we recognized the field.
factory = zeit.content.video.testing.video_factory(self)
video = next(factory)
self.assertEqual(True, video.has_advertisement)
@pytest.mark.parametrize(
'title,supertitle,result', [
(u'Äch bön oin Börlünär.', u'Kennedy said:',
u'kennedy-said-aech-boen-oin-boerluenaer'),
(None, u'Kennedy said:', u'kennedy-said'),
(u'Äch bön oin Börlünär.', None, u'aech-boen-oin-boerluenaer')])
def test_seo_slug_returns_url_normalized_version_of_title_and_supertitle(
title, supertitle, result):
video = zeit.content.video.video.Video()
video.title = title
video.supertitle = supertitle
assert result == video.seo_slug
class TestAuthorshipsProperty(zeit.content.video.testing.TestCase):
def test_authorships_property_converts_IAuthor_to_IReference(
self):
from zeit.cms.content.interfaces import IReference
from zeit.content.author.author import Author
from zeit.content.video.video import Video
self.repository['author'] = Author()
video = Video()
video.authorships = (self.repository['author'],)
self.assertEqual(
[True], [IReference.providedBy(x) for x in video.authorships])
self.assertEqual(
[self.repository['author']], [x.target for x in video.authorships])
def test_authorships_property_passes_IReference_without_conversion(self):
from zeit.cms.content.interfaces import IReference
from zeit.content.author.author import Author
from zeit.content.video.video import Video
self.repository['author'] = Author()
video = Video()
video.authorships = (
video.authorships.create(self.repository['author']),)
self.assertEqual(
[True], [IReference.providedBy(x) for x in video.authorships])
self.assertEqual(
[self.repository['author']], [x.target for x in video.authorships])
| StarcoderdataPython |
11225750 | """
This is the new gateway program to all of the cgns_utils.
Run cgns_utils -help to get a list of all available options. The basic
idea is as follows::
| write new file
read cngs file -> Do some operations on it -> | .or.
| write modified file
Developed by Dr. <NAME>
"""
import sys
import os
import shutil
import tempfile
import argparse
import pickle
import numpy
from cgnsutilities.cgnsutilities import (
Block,
Boco,
BC,
Grid,
explodeGrid,
readGrid,
explodeByZoneName,
write_tecplot_file,
simpleCart,
combineGrids,
convertPlot3d,
mirrorGrid,
splitGrid,
mergeGrid,
divideGrid,
libcgns_utils,
)
# set width of printing for line wrap
os.environ["COLUMNS"] = "120"
def get_parser():
# List out all of the possible options here.
parser = argparse.ArgumentParser(prog="cgns_utils")
subparsers = parser.add_subparsers(help="Choose one of the listed operations to perform", dest="mode")
# ------------- Options for 'scale' mode --------------------
p_scale = subparsers.add_parser("scale", help="Scale a grid by a constant factor")
p_scale.add_argument("gridFile", help="Name of input CGNS file")
p_scale.add_argument("scale", help="scale factor", type=float)
p_scale.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------- Options for 'flip' mode --------------------
p_flip = subparsers.add_parser("flip", help="Flip a grid about a plane defined by an axis")
p_flip.add_argument("gridFile", help="Name of input CGNS file")
p_flip.add_argument("axis", help="Flip the mesh about plane defined by axis: 'x', 'y', 'z'")
p_flip.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------- Options for 'coarsen' mode --------------------
p_coarsen = subparsers.add_parser("coarsen", help="Coarsen a grid uniformly")
p_coarsen.add_argument("gridFile", help="Name of input CGNS file")
p_coarsen.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------- Options for 'refine' mode --------------------
p_refine = subparsers.add_parser("refine", help="Refine a grid uniformly")
p_refine.add_argument("gridFile", help="Name of input CGNS file")
p_refine.add_argument("outFile", nargs="?", default=None, help="Optional output file")
p_refine.add_argument(
"--axes",
nargs="+",
help="Refine mesh only along specified axis or axes (default: %(default)s)",
default=["i", "j", "k"],
)
# ------------- Options for 'extractSurface' mode --------------------
p_extract = subparsers.add_parser("extractSurface", help="Extract a wall surface from file")
p_extract.add_argument("gridFile", help="Name of input CGNS file")
p_extract.add_argument("surfFile", help="Name of plot3d surface file")
# ------------- Options for 'extractSpecifiedSurface' mode --------------------
p_extract_spec = subparsers.add_parser(
"extractSpecifiedSurface", help="Extract a surface from a specified set of layers in a cgns block"
)
p_extract_spec.add_argument("gridFile", help="Name of input CGNS file")
p_extract_spec.add_argument("surfFile", help="Name of plot3d surface file")
p_extract_spec.add_argument("blockID", help="cgns block number to extract from")
p_extract_spec.add_argument("imin", help="lower i bound,use 0-based numbering")
p_extract_spec.add_argument("imax", help="upper i bound,use 0-based numbering")
p_extract_spec.add_argument("jmin", help="lower j bound,use 0-based numbering")
p_extract_spec.add_argument("jmax", help="upper j bound,use 0-based numbering")
p_extract_spec.add_argument("kmin", help="lower k bound,use 0-based numbering")
p_extract_spec.add_argument("kmax", help="upper k bound,use 0-based numbering")
# ------------- Options for 'mirror' mode --------------------
p_mirror = subparsers.add_parser(
"mirror", help="Mirror a grid about a plane defined by an axis. This doubles the grid size"
)
p_mirror.add_argument("gridFile", help="Name of input CGNS file")
p_mirror.add_argument("axis", help="Mirror about plane defined by axis: 'x', 'y', 'z'")
p_mirror.add_argument("tol", nargs="?", default=1e-12, help="Tolerance for node merge")
p_mirror.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------- Options for 'split' mode --------------------
p_split = subparsers.add_parser(
"split", help="Face-match a grid. If the grid is already faced matched, this will have no effect"
)
p_split.add_argument("gridFile", help="Name of input CGNS file")
p_split.add_argument("outFile", nargs="?", default=None, help="Optional output file")
p_split.add_argument(
"--splitFile",
nargs="?",
default=None,
help="""Add additional splits specified in split file. Each
line must contain a block index (1 based), idim (1, 2, or 3),
and a 1-based index of the block to split at""",
)
# ------------- Options for 'merge' mode --------------------
p_merge = subparsers.add_parser(
"merge",
help="Automatically merge as many blocks as possible. Boundary conditions and family information is kept.",
)
p_merge.add_argument("gridFile", help="Name of input CGNS file")
p_merge.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------- Options for 'connect' mode --------------------
p_connect = subparsers.add_parser(
"connect", help="Determine the block-to-block connectivity information for a point-matched grid"
)
p_connect.add_argument("gridFile", help="Name of input CGNS file")
p_connect.add_argument("tol", nargs="?", default=1e-12, help="Tolerance for node merge")
p_connect.add_argument(
"--connectSelf",
help="only check for connection on-block (periodic type)",
action="store_true",
dest="connectSelf",
default=False,
)
p_connect.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------- Options for 'divide' mode --------------------
p_divide = subparsers.add_parser("divide", help="Divide all blocks in the grid into 8 sub-blocks")
p_divide.add_argument("gridFile", help="Name of input CGNS file")
p_divide.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------- Options for 'autoBC' mode --------------------
p_bc = subparsers.add_parser(
"autoBC", help="Try to determine boundary conditions for blocks. Only suitable for external flow applications."
)
p_bc.add_argument("gridFile", help="Name of input CGNS file")
p_bc.add_argument("sym", help="Normal for possible symmetry plane.", choices=["x", "y", "z"])
p_bc.add_argument("radius", help="Radius of sphere containing bodies", type=float, default=10.0)
p_bc.add_argument("outFile", nargs="?", default=None, help="Optional output file")
p_bc.add_argument("--xOffset", nargs="?", default=0.0, type=float, help="x-coordinate of sphere origin")
p_bc.add_argument("--yOffset", nargs="?", default=0.0, type=float, help="y-coordinate of sphere origin")
p_bc.add_argument("--zOffset", nargs="?", default=0.0, type=float, help="z-coordinate of sphere origin")
# ------------ Options for 'overwriteFamilies' mode --------------------
p_fam = subparsers.add_parser(
"overwriteFamilies", help="Overwrite family information", formatter_class=argparse.RawTextHelpFormatter
)
p_fam.add_argument("gridFile", help="Name of input CGNS file")
p_fam.add_argument(
"familyFile",
help="""File containing additional family information. The file must consist of one or more lines contaning the following data:
<blockID> <faceID> <family>
where:
blockID - is the block index *IN 1 BASED NUMBERING*
faceID - one of iLow, iHigh jLow, jHigh, kLow, or kHigh
family - the family name.
To find blockID of any mesh using Tecplot,
1. Load the mesh with Advanced options > One Tecplot zone per non-poly CGNS zone/solution
2. Use the Zone Number for the blockID
Examples:
7 kLow wing
4 jHigh sym
""",
)
p_fam.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'writeSubfaceFamily' mode --------------------
p_fam = subparsers.add_parser("writeSubfaceFamiliy", help="""Overwrite the family information on a subface.""")
p_fam.add_argument("gridFile", help="Name of inputCGNS file")
p_fam.add_argument(
"familyFile",
help="""file containing data for the new families and face division.
Format is 1st line: 1-based blockID, 2nd line: {ilow, ihigh, etc}, subsequent lines,
one per line: p_extractnge (as 6 ints seperated by commas, not spaces), newFamilyName""",
)
p_fam.add_argument(
"outFile",
nargs="?",
default=None,
help="""Optional output file. ***NOTE*** It is highly recommended that an output file
is specified as this method will overwrite existing boundary conditions on a face, and it is
up to the user to supply subfaces which sufficiently replace it.""",
)
# ------------ Options for 'copyFamilyInfo' mode --------------------
p_fam = subparsers.add_parser("copyFamilyInfo", help="Copy family information from two otherwise identical grids")
p_fam.add_argument("gridFile", help="Name of CGNS file to which family information is to be copied")
p_fam.add_argument("sourceFile", help="Name of output CGNS file which contains family information")
p_fam.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'removeBC' mode --------------------
p_rem = subparsers.add_parser("removeBC", help="Remove all BC")
p_rem.add_argument("gridFile", help="Name of input CGNS file")
p_rem.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'overwriteBCFamilyWithBC' mode --------------------
p_sub = subparsers.add_parser(
"overwriteBCFamilyWithBC",
help="Overwrite boundary conditions based on family name",
formatter_class=argparse.RawTextHelpFormatter,
)
p_sub.add_argument("gridFile", help="Name of input CGNS file")
p_sub.add_argument("familyName", help="The BC family to overwrite")
p_sub.add_argument("newBCType", help="The new boundary condition to apply")
p_sub.add_argument("outFile", nargs="?", default=None, help="Optional output file")
p_sub.add_argument(
"--blockIDs",
type=int,
nargs="+",
default=None,
help="The 1-based indices of the blocks to overwrite. By default, BCs are overwritten on all blocks.",
)
# ------------ Options for 'overwriteBC' mode --------------------
p_sub = subparsers.add_parser(
"overwriteBC", help="Overwrite boundary condition information", formatter_class=argparse.RawTextHelpFormatter
)
p_sub.add_argument("gridFile", help="Name of input CGNS file")
bcFile_txt = """
The file must consist of one or more lines contaning the following data:
<blockID> <faceID> <BCType> <family> [dataset]
where:
blockID - is the block index *IN 1 BASED NUMBERING*
faceID - one of iLow, iHigh jLow, jHigh, kLow or kHigh
BCType - one of the supported CGNS boundary conditions. See below for supported types
family - the family name.
Supported BC types are : bcfarfield, bcsymmetryplane bcwall, bcwallinviscid, bcwallviscous
bcwallviscousheatflux, bcwallviscousisothermal, bcoutflow, bcoutflowsubsonic
bcinflow, bcinflowsubsonic, bcinflowsupersonic
Optionally, additional datasets may be specified. These
can be used to set additional boundary condition data.
The format of the dataset line is as follows:
<BCSetName> <BCSetType> <DirNeuArr> <DataArrName1> <dataArr1>, ..., <DataArrNameN> <dataArrN>
where:
BCSetName - bc dataset name
BCSetType - bc dataset type. This is in most cases the same type as the BCType specified
DirNeuArr - can have one of two options: Dirichlet or Neumann
DataArrNameN - name of first property specified. This can be a range of things. Refer to ICEM or ADflow for supported BC properties
dataArrN - the actual data for the property. Either a scalar or a flattened nodal array. If an array is passed, the solver will convert the 1D array to the (possibly) 2D BC face.
To find blockID of any mesh using Tecplot,
1. Load the mesh with Advanced options > One Tecplot zone per non-poly CGNS zone/solution
2. Use the Zone Number for the blockID
Examples:
7 kLow bcwallviscous wing
4 jHigh bcsymmetryplane sym
5 khigh bcoutflowsubsonic turb_inlet BCDataSet_1 BCInFlowSubsonic Dirichlet PressureStagnation 1234.0 TemperatureStagnation 4556.0
"""
p_sub.add_argument(
"bcFile",
help="File containing additional bc info." + bcFile_txt,
)
p_sub.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'writebcinfo' mode --------------------
p_sub = subparsers.add_parser(
"writebcinfo",
help="Writes boundary condition information to a file.",
formatter_class=argparse.RawTextHelpFormatter,
)
p_sub.add_argument("gridFile", help="Name of input CGNS file")
p_sub.add_argument(
"bcOutFile",
default=None,
help="A file containing bc info." + bcFile_txt,
)
# ------------ Options for 'rebunch' mode --------------------
p_bunch = subparsers.add_parser("rebunch", help="Rebunch offwall spacing (experimental)")
p_bunch.add_argument("gridFile", help="Name of input CGNS file")
p_bunch.add_argument("spacing", help="The desired off-wall spacing", type=float)
p_bunch.add_argument("outFile", nargs="?", default=None, help="Optional output file")
p_bunch.add_argument(
"--extraCells",
help="Number of additional cells to use in re-bunching. *SHOULD BE A MG NUMBER*.",
type=int,
default=0,
)
p_bunch.add_argument("--nodes", help="Only rebunch the first 'nodes' in the offwall direction", type=int, default=1)
# ------------ Options for 'cgns2plot3d' mode --------------------
p3d = subparsers.add_parser("cgns2plot3d", help="Convert a cgns file to a plain plot3d file")
p3d.add_argument("gridFile", help="Name of input CGNS file")
p3d.add_argument("plot3dFile", help="Name of output plot3d file")
# ------------ Options for 'plot3dtocgns' mode --------------------
p3dtoc = subparsers.add_parser(
"plot3d2cgns",
help="""Convert a multiblock, unformatted fortran, big-endian, multiblock plot3d file to a plain
cgns file. This specific format is widely used at NASA and Boeing.""",
)
p3dtoc.add_argument("plot3dFile", help="Name of input plot3d file")
p3dtoc.add_argument("gridFile", help="Name of output CGNS file")
# ------------ Options for 'randomize' mode --------------------
p_ran = subparsers.add_parser("randomize", help="Randomize the block orientation and order. Useful for testing.")
p_ran.add_argument("gridFile", help="Name of input CGNS file")
p_ran.add_argument(
"seed",
type=int,
default=0,
help="Seed for random generator. Specifying a seed will make process deterministic.",
)
p_ran.add_argument("--keepRHS", help="Keep right hand coordinate system", action="store_true", default=False)
p_ran.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'reorder' mode --------------------
p_reorder = subparsers.add_parser(
"reorder",
help="""Sort blocks in an alpha-numerical order. It can also add extra digits
to the integers at the end of the block names to facilitate ordering.""",
)
p_reorder.add_argument("gridFile", help="Name of input CGNS file")
p_reorder.add_argument(
"intDigits",
type=int,
default=5,
help="""Number of digits used for the integers. When CGNSlib generates a CGNS file
(when converting from a plot3d file, for instance), it does not add extra digits to the integers
when naming zones. This becomes a problem when you have more than 10 zones because the ordering will be:
Zone1, Zone11, Zone12, ..., Zone19, Zone2, Zone21, ...
This method will add extra digits to the zone names to give the correct ordering.""",
)
p_reorder.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'symmZero' mode --------------------
p_sym = subparsers.add_parser("symmZero", help="Hard-zero any nodes on symmetry plane BCs.")
p_sym.add_argument("gridFile", help="Name of input CGNS file")
p_sym.add_argument("sym", help="Normal for possible symmetry plane.", choices=["x", "y", "z"])
p_sym.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'symmZeroNoBC' mode --------------------
p_symnobc = subparsers.add_parser(
"symmZeroNoBC",
help="Hard-zero any nodes within a given tolerance of the symmetry plane. BCs are not taken into account.",
)
p_symnobc.add_argument("gridFile", help="Name of input CGNS file")
p_symnobc.add_argument("sym", help="Normal for possible symmetry plane.", choices=["x", "y", "z"])
p_symnobc.add_argument(
"--tol",
help="Distance tolerance to bring nodes to symmetry plane (default: %(default)s)",
type=float,
default=1.0e-5,
)
p_symnobc.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'timeCombine' mode --------------------
p_tc = subparsers.add_parser(
"timeCombine", help="Combine cgns files from time accurate simulation into unsteady tecplot file."
)
p_tc.add_argument("baseName", help="baseName of the files. Use %%d to denote the counter.")
p_tc.add_argument("outFile", nargs="?", default=None, help="Output file name. If not given, unsteady.plt is used")
# ------------ Options for 'double2D' mode --------------------
p_dd = subparsers.add_parser("double2D", help="Take a 2d mesh one cell wide and make it two cells wide.")
p_dd.add_argument("gridFile", help="Name of input CGNS file")
p_dd.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'combine' mode --------------------
p_dd = subparsers.add_parser("combine", help="Take 2 or more cgns files and combine them into a single file.")
p_dd.add_argument("gridFiles", metavar="files", type=str, nargs="+", help="Name of CGNS files to combine")
p_dd.add_argument("outFile", type=str, help="Output CGNS file name")
# ------------ Options for 'removeBlocks' mode --------------------
p_rm = subparsers.add_parser(
"removeBlocks",
help="""Remove blocks from a cgns file. The user should ensure that the final mesh
is still valid in terms of boundary conditions and connectivities.""",
)
p_rm.add_argument("gridFile", help="Name of input CGNS file")
p_rm.add_argument(
"blockIDs",
metavar="files",
type=int,
nargs="+",
help="IDs (integers) of the blocks that will be removed. The integers should be 1-indexed",
)
p_rm.add_argument("outFile", type=str, help="Output CGNS file name")
# ------------ Options for 'explode' mode --------------------
p_exp = subparsers.add_parser(
"explode", help="Take one multiblock cgns file and explodes it into single-block cgns files."
)
p_exp.add_argument("gridFile", type=str, help="Name of input multiblock CGNS file")
p_exp.add_argument(
"outFile",
nargs="?",
default=None,
help="""Optional reference to name output files. An integer will be added to the end.
If none is given, the input filename will be used as reference.
All connectivity information between different blocks is lost in this step, only
internal connectivity remains.""",
)
# ------------ Options for 'explodeKmin' mode --------------------
p_expkmin = subparsers.add_parser(
"explodeKmin",
help="Take one multiblock cgns file and explodes it into single-block plot3d files that contains only the K=1 faces.",
)
p_expkmin.add_argument("gridFile", type=str, help="Name of input multiblock CGNS file")
p_expkmin.add_argument(
"outFile",
nargs="?",
default=None,
help="""Optional reference to name output files. An integer will be added to the end.
if none is given, the input filename will be used as reference.""",
)
# ------------ Options for 'explodeByZoneName' mode --------------------
p_expkmin = subparsers.add_parser(
"explodeByZoneName",
help="""Take one multiblock cgns file and explode it into multiple multiblock
cgns files based on the zone name from the blocks.""",
)
p_expkmin.add_argument("gridFile", type=str, help="Name of input multiblock CGNS file")
# ------------ Options for 'cartesian' mode --------------------
p_cart = subparsers.add_parser(
"cartesian", help="Generates a background cartesian mesh", formatter_class=argparse.RawTextHelpFormatter
)
p_cart.add_argument("gridFile", help="Name of input CGNS file")
p_cart.add_argument(
"cartFile",
help="""File containing background mesh info. The file must consist of
4 lines contaning the following data:
<extensionXneg> <extensionYneg> <extensionZneg>
<extensionXpos> <extensionYpos> <extensionZpos>
<numNodesX> <numNodesY> <numNodesZ>
<weightGRX> <weightGRY> <weightGRZ>
where:
extension is the distance of the cartesian box
face to the corresponding bounding box face divided by the
bounding box length. We need 2 values of extension per
direction as we have two parallel faces for each one of them.
numNodes is the number of nodes that should be used along the
edges of the cartesian mesh. If you want one symmetry plane
in the z direction, for instance, you need to set one of the
extensionZ values to 0. If you want two symmetry planes in
the z direction, (e.g. to run a 2D case) you need to set both
extensionZ values to 0.
weightGR are values between 0.0 and 1.0 used to balance edge
growth ratio and cell volume resolution mismatch during the
optimization. If weightGR = 0, the optimizer will not care
about the growth ratios at the farfield and will just try
to match the bounding box resolution. If weightGR = 1, the
optimizer will not care about the bounding box resolution
and will just try to get an uniform growth ratio. This
results in an uniform mesh.
example:
10 10 0
10 10 10
65 65 65
0.1 0.1 0.1
""",
)
p_cart.add_argument(
"outFile",
help="""Name of output CGNS file. The output file contains only one cartesian block.
The input mesh is not included and BCs are applied.""",
)
# ------------ Options for 'simpleCart' mode --------------------
p_sub = subparsers.add_parser("simpleCart", help="Generates a background cartesian mesh")
p_sub.add_argument("gridFile", help="Name of input CGNS file")
p_sub.add_argument("dh", help="Uniform spacing size", type=float)
p_sub.add_argument("hExtra", help="Extension in each dimension", type=float)
p_sub.add_argument("nExtra", help="Number of nodes to use for extension", type=int)
p_sub.add_argument("sym", help="Normal for possible sym plane", type=str)
p_sub.add_argument("mgcycle", help="Minimum MG cycle to enforce", type=int)
p_sub.add_argument("outFile", help="Name of output CGNS file")
# ------------ Options for 'explicitCart' mode --------------------
p_sub = subparsers.add_parser("explicitCart", help="Generates a background cartesian mesh")
p_sub.add_argument("xmin", type=float, help="min x coordinate")
p_sub.add_argument("ymin", type=float, help="min y coordinate")
p_sub.add_argument("zmin", type=float, help="min z coordinate")
p_sub.add_argument("xmax", type=float, help="max x coordinate")
p_sub.add_argument("ymax", type=float, help="max y coordinate")
p_sub.add_argument("zmax", type=float, help="max z coordinate")
p_sub.add_argument("dh", help="Uniform spacing size", type=float)
p_sub.add_argument("hExtra", help="Extension in each dimension", type=float)
p_sub.add_argument("nExtra", help="Number of nodes to use for extension", type=int)
p_sub.add_argument("sym", help="Normal for possible sym plane", type=str)
p_sub.add_argument("mgcycle", help="Minimum MG cycle to enforce", type=int)
p_sub.add_argument("outFile", help="Name of output CGNS file")
# ------------ Options for 'simpleOCart' mode --------------------
p_sub = subparsers.add_parser("simpleOCart", help="Generates a background cartesian mesh surrounding by an OMesh")
p_sub.add_argument("gridFile", help="Name of input CGNS file")
p_sub.add_argument("dh", help="Uniform cartesian spacing size", type=float)
p_sub.add_argument("hExtra", help='Extension in "O" dimension', type=float)
p_sub.add_argument("nExtra", help="Number of nodes to use for extension", type=int)
p_sub.add_argument("sym", help="Normal for possible sym plane", type=str)
p_sub.add_argument("mgcycle", help="Minimum MG cycle to enforce", type=int)
p_sub.add_argument("outFile", help="Name of output CGNS file")
# ------------ Options for 'translate' mode --------------------
p_t = subparsers.add_parser("translate", help="Translate a grid.")
p_t.add_argument("gridFile", help="Name of input CGNS file")
p_t.add_argument("dx", help="x-displacement", type=float)
p_t.add_argument("dy", help="y-displacement", type=float)
p_t.add_argument("dz", help="z-displacement", type=float)
p_t.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'rotate' mode --------------------
p_rot = subparsers.add_parser("rotate", help="Rotate a grid around a given direction.")
p_rot.add_argument("gridFile", help="Name of input CGNS file")
p_rot.add_argument("vx", help="x-component of the rotation axis", type=float)
p_rot.add_argument("vy", help="y-component of the rotation axis", type=float)
p_rot.add_argument("vz", help="z-component of the rotation axis", type=float)
p_rot.add_argument("theta", help="rotation angle [deg]", type=float)
p_rot.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'autoOversetBC' mode
p_obc = subparsers.add_parser(
"autoOversetBC",
help="Automatically generate connectivity and boundary conditions"
"for an overset near field mesh generated by pyHyp. It assumes the surface is a "
"BCWallViscous and the outer boundary is a BCOverset condition."
"Only used with pyHyp hyperbolically generated meshes.",
)
p_obc.add_argument("gridFile", help="Name of input CGNS file")
p_obc.add_argument("sym", help="Normal for possible symmetry plane.", choices=["x", "y", "z", "none"])
p_obc.add_argument(
"--connectSelf",
help="only check for connection on-block (periodic type)",
action="store_true",
dest="connectSelf",
default=False,
)
p_obc.add_argument("--tol", type=float, default=1e-12, help="Tolerance for connect")
p_obc.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'autoNearfieldBC' mode
p_anf = subparsers.add_parser(
"autoNearfieldBC",
help="Automatically generate connectivity and boundary conditions"
"for an overset near field mesh with possible symmetry plane.",
)
p_anf.add_argument("gridFile", help="Name of input CGNS file")
p_anf.add_argument("sym", help="Normal for possible symmetry plane.", choices=["x", "y", "z", "none"])
p_anf.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'autoFarfieldBC' mode
p_anf = subparsers.add_parser(
"autoFarfieldBC",
help="Automatically generate connectivity and boundary conditions"
"for an overset farfield mesh with possible symmetry plane.",
)
p_anf.add_argument("gridFile", help="Name of input CGNS file")
p_anf.add_argument("sym", help="Normal for possible symmetry plane.", choices=["x", "y", "z"])
p_anf.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'fillOpenBCs' mode
p_fbc = subparsers.add_parser(
"fillOpenBCs",
help="Adds a given BC to the faces that are not face-matched and also that do not have any previously-assigned BCs.",
)
p_fbc.add_argument("gridFile", help="Name of input CGNS file")
p_fbc.add_argument(
"bocoType",
help="""Boundary condition type. Supported types are:
bcfarfield, bcsymmetryplane, bcwall, bcwallinviscid, bcwallviscous, bcwallviscousheatflux,
bcwallviscousisothermal, bcoutflow, bcoutflowsubsonic, bcoutflowsupersonic, bcinflow, bcinflowsubsonic,
bcinflowsupersonic and bcoverset""",
)
p_fbc.add_argument("famName", help="Family name for the new BCs.")
p_fbc.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'extractConv' mode --------------------
p_conv = subparsers.add_parser(
"extractConv",
help="Reads the convergence history node of a CGNS file and saves the data in a pickle or tecplot file.",
)
p_conv.add_argument("gridFile", type=str, help="Name of input CGNS file.")
p_conv.add_argument("outType", choices=["pickle", "tecplot"], help="The type of convergence data output file.")
p_conv.add_argument(
"outFile",
nargs="?",
default=None,
help="The convergence data will be saved to this filename. If none is given, the grid filename will be used as reference.",
)
# ------------ Options for 'include' mode
p_inc = subparsers.add_parser(
"include", help="Write a new file including only the zones specified the given numbers/ranges."
)
p_inc.add_argument("gridFile", help="Name of input CGNS file")
p_inc.add_argument(
"rangeSpec",
help="""Range to extract. Comma separated list. Ranges can given like 6-8. Must be 1 based.
Example: rangeSpec=4,5,9-16,19""",
)
p_inc.add_argument("outFile", help="Output file")
# ------------ Options for 'section' mode
p_sec = subparsers.add_parser(
"section",
help="For cgns files with 1 domain ONLY, write a subsection of the zone. Boundary conditions/B2Bs are deleted.",
)
p_sec.add_argument("gridFile", help="Name of input CGNS file")
p_sec.add_argument("iStart", type=int)
p_sec.add_argument("iEnd", type=int)
p_sec.add_argument("jStart", type=int)
p_sec.add_argument("jEnd", type=int)
p_sec.add_argument("kStart", type=int)
p_sec.add_argument("kEnd", type=int)
p_sec.add_argument("outFile", nargs="?", default=None, help="Optional output file")
# ------------ Options for 'info' mode
p_info = subparsers.add_parser("info", help="Print some metrics for the CGNS file.")
p_info.add_argument("gridFile", help="Name of input CGNS file")
# ------------ Options for 'extrude' mode
p_p2D = subparsers.add_parser(
"extrude",
help="""Takes a true 2D mesh (planar) and extrude it in one direction to make
it a 3D mesh, one cell wide. This method assumes that BCs are already set in the CGNS file.
BCs are retained and symmetry BCs are applied on planar surfaces.""",
)
p_p2D.add_argument(
"gridFile",
help="Name of input CGNS file. Note that the planar grid should not have symmetry BCs set on the plane.",
)
p_p2D.add_argument("direction", help="Direction which to extrude the grid", choices=["x", "y", "z"])
p_p2D.add_argument("outFile", nargs="?", default=None, help="Optional output CGNS file")
# ------------ Options for 'revolve' mode
p_p2R = subparsers.add_parser(
"revolve",
help="""Takes a true 2D mesh (planar) and reloves it about specified axis to make
a 3D axisymmetric mesh, one cell wide. This method assumes that BCs are already set in the CGNS file.
BCs are retained and symmetry BCs are applied on planar surfaces. Output should be a wedge shape.""",
)
p_p2R.add_argument(
"gridFile",
help="Name of input CGNS file. Note that the planar grid should not have symmetry BCs set on the plane.",
)
p_p2R.add_argument(
"normalDirection",
help="""This is the direction in which the plane normal points in.
Example: If supplied data is in xz plane, the normal points in y""",
choices=["x", "y", "z"],
)
p_p2R.add_argument(
"axis",
help="Axis which to rotate about Example: If supplied data is in xz plane, you would give either x or z",
choices=["x", "y", "z"],
)
p_p2R.add_argument(
"startAngle", type=float, help="Rotation starting angle given in degrees. Typically this is a small quantity."
)
p_p2R.add_argument(
"endAngle", type=float, help="Rotation ending angle given in degrees. Typically this is a small quantity."
)
p_p2R.add_argument("nThetas", type=int, help="number of angular locations to put points", default=2)
p_p2R.add_argument("outFile", nargs="?", default=None, help="Optional output CGNS file")
# ------------ Options for 'testBlock' mode
p_test = subparsers.add_parser(
"testBlock", help="Creates a single block mesh with specified dimensions. Used to quicky generate a test grid."
)
p_test.add_argument("nx", help="Number of nodes in x", type=int)
p_test.add_argument("ny", help="Number of nodes in y", type=int)
p_test.add_argument("nz", help="Number of nodes in z", type=int)
p_test.add_argument("outFile", help="Name of output file")
# ------------- Options for 'blockSizes' mode --------------------
p_blockSizes = subparsers.add_parser("blockSizes", help="Print the sizes of each block in the mesh")
p_blockSizes.add_argument("gridFile", help="Name of input CGNS file")
# return the parser
return parser
def main():
parser = get_parser()
# Get the arguments we need!
args = parser.parse_args()
# -------------------------------------------
# Selection of the task
# -------------------------------------------
# The time combine is special. First we generate the list of files we
# need to deal with.
if args.mode == "timeCombine":
# Get the directory name where the baseName is:
path = os.path.dirname(os.path.abspath(args.baseName))
# Get the full list of files in this directory:
allFiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
files = []
parts = args.baseName.split("%d")
maxLength = 0
for f in allFiles:
if (parts[0] == "" or parts[0] in f) and (parts[1] == "" or parts[1] in f):
# Make sure there is a .cgns in there somwhere
if ".cgns" in f:
files.append(f)
maxLength = max(maxLength, len(f))
files = sorted(files)
if args.outFile is None:
outFile = "unsteady.plt"
else:
outFile = args.outFile
# Now we make a character array of the file names, and hand if off to
# fortran for all the actual reading/writing.
fileNames = numpy.zeros((len(files), 256), "c")
for i in range(len(files)):
fileNames[i, 0 : len(files[i])] = files[i]
libcgns_utils.utils.time_combine(fileNames, outFile)
sys.exit(0)
if args.mode == "testBlock":
nx = args.nx
ny = args.ny
nz = args.nz
X = numpy.zeros((nx, ny, nz, 3))
Xcart = []
Xcart.append(numpy.linspace(0, 1, nx))
Xcart.append(numpy.linspace(0, 1, ny))
Xcart.append(numpy.linspace(0, 1, nz))
Xx, Xy, Xz = numpy.meshgrid(Xcart[0], Xcart[1], Xcart[2], indexing="ij")
X[:, :, :, 0] = Xx
X[:, :, :, 1] = Xy
X[:, :, :, 2] = Xz
b = Block("domain.00001", [nx, ny, nz], X)
# Add bocos so we can run it:
b.addBoco(Boco("iMin", BC["bcfarfield"], [[1, 1], [1, ny], [1, nz]], "far"))
b.addBoco(Boco("iMax", BC["bcfarfield"], [[nx, nx], [1, ny], [1, nz]], "far"))
b.addBoco(Boco("jMin", BC["bcsymmetryplane"], [[1, nx], [1, 1], [1, nz]], "sym"))
b.addBoco(Boco("jMax", BC["bcsymmetryplane"], [[1, nx], [ny, ny], [1, nz]], "sym"))
b.addBoco(Boco("kMin", BC["bcwallviscous"], [[1, nx], [1, ny], [1, 1]], "wall"))
b.addBoco(Boco("kMax", BC["bcfarfield"], [[1, nx], [1, ny], [nz, nz]], "far"))
g = Grid()
g.addBlock(b)
g.writeToCGNS(args.outFile)
sys.exit(0)
# The 'combine' function is done first sicne it is the only function
# that reads multiple cgns files.
if args.mode == "combine":
grids = []
for fName in args.gridFiles:
grid = readGrid(fName)
grids.append(grid)
combinedGrid = combineGrids(grids)
combinedGrid.writeToCGNS(args.outFile)
# This task is now finished
sys.exit(0)
if args.mode == "explicitCart":
# This task doesn't have args.gridFile so do it first
xMin = [args.xmin, args.ymin, args.zmin]
xMax = [args.xmax, args.ymax, args.zmax]
simpleCart(xMin, xMax, args.dh, args.hExtra, args.nExtra, args.sym, args.mgcycle, args.outFile)
sys.exit(0)
if args.mode == "plot3d2cgns":
# This doesn't read a cgns file so do this first too.
convertPlot3d(args.plot3dFile, args.gridFile)
sys.exit(0)
# Get the current working grid 'curGrid' by reading the input
curGrid = readGrid(args.gridFile)
# The following are "special" and done first since they do not
# have a CGNS output.
if args.mode == "extractSurface":
curGrid.extractSurface(args.surfFile)
sys.exit(0)
if args.mode == "extractSpecifiedSurface":
curGrid.extractSpecifiedSurface(
args.surfFile, args.blockID, args.imin, args.imax, args.jmin, args.jmax, args.kmin, args.kmax
)
sys.exit(0)
if args.mode == "cgns2plot3d":
curGrid.writePlot3d(args.plot3dFile)
sys.exit(0)
if args.mode == "blockSizes":
curGrid.printBlockInfo()
sys.exit(0)
# Determine if we have an output file:
try:
if args.outFile is None:
# Determine where to put a file:
dirpath = tempfile.mkdtemp()
# Define a temp output file
outFileName = os.path.join(dirpath, "tmp.cgns")
else:
outFileName = args.outFile
except Exception:
outFile = None
# Perform one of the following actions:
if args.mode == "flip":
curGrid.flip(args.axis)
elif args.mode == "scale":
curGrid.scale(args.scale)
elif args.mode == "mirror":
curGrid = mirrorGrid(curGrid, args.axis, args.tol)
elif args.mode == "coarsen":
curGrid.coarsen()
elif args.mode == "refine":
curGrid.refine(args.axes)
elif args.mode == "split":
curGrid = splitGrid(curGrid, args.splitFile)
elif args.mode == "merge":
curGrid = mergeGrid(curGrid)
elif args.mode == "connect":
if args.connectSelf:
curGrid.connectSelfOnly(args.tol)
else:
curGrid.connect(args.tol)
elif args.mode == "divide":
curGrid = divideGrid(curGrid)
elif args.mode == "autoBC":
curGrid.autoBC(args.radius, args.sym, [args.xOffset, args.yOffset, args.zOffset])
elif args.mode == "overwriteFamilies":
curGrid.overwriteFamilies(args.familyFile)
elif args.mode == "writeSubfaceFamily":
curGrid.writeSubfaceFamily(args.familyFile)
elif args.mode == "copyFamilyInfo":
sourceGrid = readGrid(args.sourceFile)
curGrid.copyFamilyInfo(sourceGrid)
elif args.mode == "overwriteBCFamilyWithBC":
curGrid.overwriteBCFamilyWithBC(args.familyName, args.newBCType, args.blockIDs)
elif args.mode == "overwriteBC":
curGrid.overwriteBCs(args.bcFile)
elif args.mode == "writebcinfo":
curGrid.writeBCs(args.bcOutFile)
sys.exit(0)
elif args.mode == "removebc":
curGrid.removeBCs()
elif args.mode == "rebunch":
curGrid.rebunch(args.spacing, args.extraCells, args.nodes)
elif args.mode == "randomize":
curGrid.randomize(args.seed, args.keepRHS)
elif args.mode == "reorder":
curGrid.reorder(args.intDigits)
elif args.mode == "symmZero":
curGrid.symmZero(args.sym)
elif args.mode == "symmZeroNoBC":
curGrid.symmZeroNoBC(args.sym, args.tol)
elif args.mode == "double2D":
curGrid.double2D()
elif args.mode == "removeBlocks":
curGrid.removeBlocks(args.blockIDs)
elif args.mode == "cartesian":
found_overset = False
for block in curGrid.blocks:
for boco in block.bocos:
if boco.type == BC["bcoverset"]:
found_overset = True
if found_overset:
curGrid.cartesian(args.cartFile, args.outFile)
else:
print("The CGNS file has no overset boundary conditions")
sys.exit(0)
elif args.mode == "simpleCart":
curGrid.simpleCart(args.dh, args.hExtra, args.nExtra, args.sym, args.mgcycle, args.outFile)
sys.exit(0)
elif args.mode == "simpleOCart":
curGrid.simpleOCart(args.dh, args.hExtra, args.nExtra, args.sym, args.mgcycle, args.outFile)
sys.exit(0)
elif args.mode == "translate":
curGrid.translate(args.dx, args.dy, args.dz)
elif args.mode == "rotate":
curGrid.rotate(args.vx, args.vy, args.vz, args.theta)
elif args.mode == "autoOversetBC":
curGrid.autoOversetBC(args.sym, args.connectSelf, args.tol)
elif args.mode == "autoNearfieldBC":
curGrid.autoNearfieldBC(args.sym)
elif args.mode == "autoFarfieldBC":
curGrid.autoFarfieldBC(args.sym)
elif args.mode == "fillOpenBCs":
curGrid.fillOpenBCs(BC[args.bocoType], args.famName)
elif args.mode == "include":
toWrite = []
for spec in args.rangeSpec.split(","):
if "-" in spec:
tmp = spec.split("-")
start = int(tmp[0])
end = int(tmp[1])
else:
start = int(spec)
end = int(spec)
for i in range(start, end + 1):
toWrite.append(i)
toWrite = numpy.unique(toWrite)
toWrite.sort()
curGrid.writeToCGNSSelected(args.outFile, toWrite)
sys.exit(0)
elif args.mode == "section":
if len(curGrid.blocks) != 1:
print("section command works only on grids with 1 block")
sys.exit(0)
curGrid.blocks[0].section(args.iStart, args.iEnd, args.jStart, args.jEnd, args.kStart, args.kEnd)
elif args.mode == "explode":
# Split original multiblock grid in a list of single-block grids
gridList = explodeGrid(curGrid)
# Check if the user gave a reference name. Otherwise, use the input name as reference
if args.outFile is None:
# Get the base name
outFile = os.path.splitext(os.path.basename(args.gridFile))[0]
else:
outFile = args.outFile
# Generate a list of names for the files by adding integers to the reference name
fileNames = [outFile + "_%03d" % index + ".cgns" for index in range(1, len(gridList) + 1)]
# Save each grid
for index in range(len(gridList)):
gridList[index].writeToCGNS(fileNames[index])
# Finish execution
sys.exit(0)
elif args.mode == "explodeKmin":
# Split original multiblock grid in a list of single-block grids
# that contains just the K = 1 face
gridList = explodeGrid(curGrid, kMin=True)
# Check if the user gave a reference name. Otherwise, use the input name as reference
if args.outFile is None:
# Get the base name
outFile = os.path.splitext(os.path.basename(args.gridFile))[0]
else:
outFile = args.outFile
# Generate a list of names for the files by adding integers to the reference name
fileNames = [outFile + "_%03d" % index + ".xyz" for index in range(1, len(gridList) + 1)]
# Save each grid
for index in range(len(gridList)):
gridList[index].writePlot3d(fileNames[index])
# Finish execution
sys.exit(0)
elif args.mode == "explodeByZoneName":
# Split original multiblock grid into a list of multiblock grids
# that correspond to each component based on zone names
gridList, nameList = explodeByZoneName(curGrid)
# Save each grid
for grid in gridList:
fileName = grid.name
grid.writeToCGNS(fileName + ".cgns")
# Finish execution
sys.exit(0)
elif args.mode == "info":
curGrid.printInfo()
sys.exit(0)
elif args.mode == "extrude":
curGrid.extrude(args.direction)
elif args.mode == "revolve":
if args.normalDirection == args.axis:
print("ERROR: Normal direction and revolve axis cannot be the same. Exiting...")
sys.exit(0)
curGrid.revolve(args.normalDirection, args.axis, args.startAngle, args.endAngle, args.nThetas)
elif args.mode == "extractConv":
# extracts the convergence history contained in the CGNS file and saves it in a pickle file
# Check if the user gave a reference name. Otherwise, use the input name as reference
if args.outFile is None:
# Get the base name
outFile = os.path.splitext(os.path.basename(args.gridFile))[0]
# Add extension based on output type
if args.outType == "pickle":
outFile = outFile + ".pickle"
elif args.outType == "tecplot":
outFile = outFile + ".dat"
else:
outFile = args.outFile
# The function readGrid already read all the convergence history arrays.
# Now we just need to save them in a file!
if args.outType == "pickle":
with open(outFile, "w") as fid:
pickle.dump(curGrid.convArray, fid)
elif args.outType == "tecplot":
# Create a single 2D array that will contain all data
data = []
# Get the number of iterations
numIter = len(curGrid.convArray[curGrid.convArray.keys()[0]])
# Append iteration counter
data.append(range(1, numIter + 1))
for entry in curGrid.convArray.keys():
data.append(curGrid.convArray[entry])
# Convert data to array
data = numpy.array(data).T
# Write tecplot results
write_tecplot_file(outFile, "Convergence", ["Iteration"] + curGrid.convArray.keys(), data)
# Print log
print("Saved convergence history into:")
print(outFile)
# Finish execution
sys.exit(0)
# Write out the grid.
curGrid.writeToCGNS(outFileName)
# Possibly copy back to the original:
if args.outFile is None:
shutil.copyfile(outFileName, args.gridFile)
shutil.rmtree(dirpath)
| StarcoderdataPython |
4979988 | <gh_stars>0
# Projeto similar ao descrito no Notebook, porém com mais volumes de dados e
# rodado em Spark, instalado localmente. Executado com pyspark.
# Tempo estimado: 29 segundos com 200.000 matches / 400.000 hitóricos
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.sql import functions as f
from pyspark.sql.types import IntegerType
def converte_coluna(df, col):
df = df.withColumn(col, df[col].cast(IntegerType()))
return df
if __name__ == "__main__":
# Criando um SparkSession e importando a base de dados
spark = SparkSession.builder.appName("Matches").getOrCreate()
matches = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("/Users/rafaelferri/Arquivos/Clash/PartidasClash200k.csv")
#Editando o tipo das informações nas colunas
matches = converte_coluna(matches, "Trophy")
matches = converte_coluna(matches, "Crowns Won")
matches = converte_coluna(matches, "Crowns Lost")
matches = converte_coluna(matches, "Result")
#Encontrando os melhores clãs pelo critério de acima de 10 vitórias e ordenado por % de Vitória
ClasVitorias = matches.groupBy("Clan").sum("Result").orderBy("sum(Result)", ascending=False)
ClasContagem = matches.groupby("Clan").count().orderBy("count", ascending=False)
MelhoresClas = ClasVitorias.join(ClasContagem, "Clan")
MelhoresClas = converte_coluna(MelhoresClas, "sum(Result)")
MelhoresClas = converte_coluna(MelhoresClas, "count")
MelhoresClas = MelhoresClas.withColumn("PercVitoria", f.round(MelhoresClas["sum(Result)"]/MelhoresClas["count"]*100, 1))
TopMelhoresClas = MelhoresClas.filter(MelhoresClas["sum(Result)"] > 450)
TopMelhoresClas = TopMelhoresClas.orderBy("PercVitoria", ascending=False)
print(TopMelhoresClas.show())
spark.stop() | StarcoderdataPython |
11317269 | import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'jgtextrank'))
sys.path.append(os.path.join(os.path.dirname(__file__)))
import types
import warnings
from collections import Counter
import networkx as nx
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from jgtextrank.utility import sort_dict_by_value, flatten
from jgtextrank.core import preprocessing, preprocessing_tokenised_context, _syntactic_filter, \
_get_cooccurs_from_single_context, _get_cooccurs, build_cooccurrence_graph, \
_build_vertices_representations, keywords_extraction, _is_top_t_vertices_connection, \
_collapse_adjacent_keywords
from jgtextrank.core import GCValue
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
test_func(self, *args, **kwargs)
return do_test
class TestTextRank(unittest.TestCase):
def test_syntactic_filtering(self):
tagged_abstract_context_list = [[('Compatibility', 'NN'), ('of', 'IN'), ('systems', 'NNS'), ('of', 'IN'),
('linear', 'JJ'), ('constraints', 'NNS'), ('over', 'IN'), ('the', 'DT'),
('set', 'NN'), ('of', 'IN'), ('natural', 'JJ'), ('numbers', 'NNS'),
('.', '.')], [('Criteria', 'NNP'), ('of', 'IN'), ('compatibility', 'NN'),
('of', 'IN'), ('a', 'DT'), ('system', 'NN'), ('of', 'IN'), ('linear', 'JJ'),
('Diophantine', 'NNP'), ('equations', 'NNS'), (',', ','), ('strict', 'JJ'),
('inequations', 'NNS'), (',', ','), ('and', 'CC'), ('nonstrict', 'JJ'),
('inequations', 'NNS'), ('are', 'VBP'), ('considered', 'VBN'), ('.', '.')]]
filtered_context_syntactic_units = _syntactic_filter(tagged_abstract_context_list)
assert isinstance(filtered_context_syntactic_units, types.GeneratorType)
all_filtered_context = []
for context_syntactic_units in filtered_context_syntactic_units:
assert isinstance(context_syntactic_units, list)
all_filtered_context.append(context_syntactic_units)
flattened_all_filtered_context = flatten(all_filtered_context)
assert len(flattened_all_filtered_context) == 17
assert ('of', 'IN') not in flattened_all_filtered_context
assert ('.', '.') not in flattened_all_filtered_context
assert ('a', 'DT') not in flattened_all_filtered_context
assert ('and', 'CC') not in flattened_all_filtered_context
assert ('Compatibility', 'NN') in flattened_all_filtered_context
assert ('linear', 'JJ') in flattened_all_filtered_context
assert ('considered', 'VBN') not in flattened_all_filtered_context
tagged_abstract_context_list2 = [[('Compatibility', 'NN'), ('of', 'IN'), ('systems', 'NNS'), ('of', 'IN'),
('linear', 'JJ'), ('constraints', 'NNS'), ('over', 'IN'), ('the', 'DT'),
('set', 'NN'), ('of', 'IN'), ('natural', 'JJ'), ('numbers', 'NNS'),
('.', '.')], [('Criteria', 'NNP'), ('of', 'IN'), ('compatibility', 'NN'),
('of', 'IN'), ('a', 'DT'), ('system', 'NN'), ('of', 'IN'), ('[', 'NN'), ('linear', 'JJ'),
('Diophantine', 'NNP'), ('equations', 'NNS'), (']', 'NN'), (',', ','), ('strict', 'JJ'),
('inequations', 'NNS'), (',', ','), ('and', 'CC'), ('nonstrict', 'JJ'),
('inequations', 'NNS'), ('are', 'VBP'), ('considered', 'VBN'), ('.', '.')]]
filtered_context_syntactic_units = _syntactic_filter(tagged_abstract_context_list2)
assert isinstance(filtered_context_syntactic_units, types.GeneratorType)
all_filtered_context = []
for context_syntactic_units in filtered_context_syntactic_units:
assert isinstance(context_syntactic_units, list)
all_filtered_context.append(context_syntactic_units)
flattened_all_filtered_context = flatten(all_filtered_context)
assert len(flattened_all_filtered_context) == 17, "punctuations should be removed from filtered context"
assert ('[', 'NN') not in flattened_all_filtered_context
assert (']', 'NN') not in flattened_all_filtered_context
def test_pre_processing(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
# original_tokenised_sentences, syntactic_units
syntactic_filtered_context = preprocessing(example_abstract)
assert isinstance(syntactic_filtered_context, types.GeneratorType)
all_tokenised_context = []
all_filtered_context = []
for tokenised_context, context_syntactic_units in syntactic_filtered_context:
assert isinstance(tokenised_context, list)
assert isinstance(context_syntactic_units, list)
assert len(tokenised_context) > 0
assert len(context_syntactic_units) > 0
assert isinstance(context_syntactic_units[0], tuple)
all_tokenised_context.append(tokenised_context)
all_filtered_context.append(context_syntactic_units)
assert len(all_tokenised_context) == 4, "Context size should be 4. The default context is sentence level."
assert len(all_filtered_context) == 4, "PoS filtered context should be 4. " \
"The default context is sentence level."
flatten_all_tokenised_context = flatten(all_tokenised_context)
assert len(flatten_all_tokenised_context) == 91, "total tokens are 91"
flatten_all_filtered_context = flatten(all_filtered_context)
assert len(flatten_all_filtered_context) == 41, "total size of filtered context tokens are 41"
check_filtered_context = [True if filtered_token[1] == 'NN' or filtered_token[1] == 'NNS'
or filtered_token[1] == 'JJ' or filtered_token[1] == 'NNP'
else False for filtered_token in flatten_all_filtered_context]
assert len(set(check_filtered_context)) == 1, "the default 'noun_adjective_filter' should be applied."
assert "." not in flatten_all_filtered_context
assert ('solutions', 'NNS') in flatten_all_filtered_context
assert ('minimal', 'JJ') in flatten_all_filtered_context
assert ('equations', 'NNS') in flatten_all_filtered_context
def test_get_cooccurs_from_single_context(self):
filtered_context = ['Compatibility', 'systems', 'linear', 'constraints', 'set', 'natural', 'numbers']
syntactic_unit_1 = 'systems'
cooccur_context_1_1 = _get_cooccurs_from_single_context(syntactic_unit_1,filtered_context)
assert len(cooccur_context_1_1) == 3, "the number of co-occur words of 'systems' in windows=2 context should be 3"
assert 'Compatibility' in cooccur_context_1_1, "Left side context window contains 'Compatibility'"
assert 'linear' in cooccur_context_1_1
assert 'constraints' in cooccur_context_1_1
cooccur_context_1_2 = _get_cooccurs_from_single_context(syntactic_unit_1,filtered_context, window_size=1)
assert len(cooccur_context_1_2) == 2, "the number of co-occur words of 'systems' in windows=1 context should be 2"
assert 'Compatibility' in cooccur_context_1_2, "Left side context window contains 'Compatibility'"
assert 'linear' in cooccur_context_1_2
syntactic_unit_2 = 'Compatibility'
cooccur_context_2_1 = _get_cooccurs_from_single_context(syntactic_unit_2, filtered_context, window_size=2)
assert len(cooccur_context_2_1) == 2, "the number of co-occur words of 'Compatibility' in windows=2 context should be 2"
assert 'systems' in cooccur_context_2_1
assert 'linear' in cooccur_context_2_1
syntactic_unit_3 = 'constraints'
cooccur_context_3_1 = _get_cooccurs_from_single_context(syntactic_unit_3, filtered_context)
assert len(cooccur_context_3_1) == 4
assert 'linear' in cooccur_context_3_1
assert 'systems' in cooccur_context_3_1
assert 'set' in cooccur_context_3_1
assert 'natural' in cooccur_context_3_1
cooccur_context_3_2 = _get_cooccurs_from_single_context(syntactic_unit_3, filtered_context, window_size=3)
assert len(cooccur_context_3_2) == 6
assert 'Compatibility' in cooccur_context_3_2
assert 'systems' in cooccur_context_3_2
assert 'linear' in cooccur_context_3_2
assert 'set' in cooccur_context_3_2
assert 'natural' in cooccur_context_3_2
assert 'numbers' in cooccur_context_3_2
cooccur_context_3_3 = _get_cooccurs_from_single_context(syntactic_unit_3, filtered_context, window_size=4)
assert len(cooccur_context_3_3) == 6
assert 'Compatibility' in cooccur_context_3_3
assert 'systems' in cooccur_context_3_3
assert 'linear' in cooccur_context_3_3
assert 'set' in cooccur_context_3_3
assert 'natural' in cooccur_context_3_3
assert 'numbers' in cooccur_context_3_3
syntactic_unit_4 = 'numbers'
cooccur_context_4_1 = _get_cooccurs_from_single_context(syntactic_unit_4,filtered_context)
assert len(cooccur_context_4_1) == 2
assert 'set' in cooccur_context_4_1
assert 'natural' in cooccur_context_4_1
def test_get_cooccurs(self):
filtered_context_corpus = [['Compatibility', 'systems', 'linear', 'constraints', 'set', 'natural', 'numbers'],
['criteria', 'corresponding', 'algorithms', 'minimal', 'supporting', 'set',
'solutions', 'solving', 'types', 'systems', 'systems', 'mixed', 'types']]
syntactic_unit_1 = 'systems'
all_cooccur_context_1_1 = _get_cooccurs(syntactic_unit_1, filtered_context_corpus)
assert len(all_cooccur_context_1_1) == 7
assert 'Compatibility' in all_cooccur_context_1_1
assert 'linear' in all_cooccur_context_1_1
assert 'constraints' in all_cooccur_context_1_1
assert 'solving' in all_cooccur_context_1_1
assert 'types' in all_cooccur_context_1_1
assert 'mixed' in all_cooccur_context_1_1
assert 'systems' in all_cooccur_context_1_1
syntactic_unit_2 = 'numbers'
all_cooccur_context_2_1 = _get_cooccurs(syntactic_unit_2, filtered_context_corpus)
assert len(all_cooccur_context_2_1) == 2
assert 'set' in all_cooccur_context_2_1
assert 'natural' in all_cooccur_context_2_1
syntactic_unit_3 = 'set'
all_cooccur_context_3_1 = _get_cooccurs(syntactic_unit_3, filtered_context_corpus, window_size=1)
assert len(all_cooccur_context_3_1) == 4
assert 'constraints' in all_cooccur_context_3_1
assert 'natural' in all_cooccur_context_3_1
assert 'supporting' in all_cooccur_context_3_1
assert 'solutions' in all_cooccur_context_3_1
all_cooccur_context_3_2 = _get_cooccurs(syntactic_unit_3, filtered_context_corpus, window_size=2)
assert len(all_cooccur_context_3_2) == 8
assert 'linear' in all_cooccur_context_3_2
assert 'constraints' in all_cooccur_context_3_2
assert 'natural' in all_cooccur_context_3_2
assert 'numbers' in all_cooccur_context_3_2
assert 'minimal' in all_cooccur_context_3_2
assert 'supporting' in all_cooccur_context_3_2
assert 'solutions' in all_cooccur_context_3_2
assert 'solving' in all_cooccur_context_3_2
syntactic_unit_4 = 'criteria'
all_cooccur_context_4_1 = _get_cooccurs(syntactic_unit_4, filtered_context_corpus)
assert len(all_cooccur_context_4_1) == 2
assert 'corresponding' in all_cooccur_context_4_1
assert 'algorithms' in all_cooccur_context_4_1
def test_get_cooccurs_with_raw_context(self):
all_tokenised_context=[['Upper', 'bounds', 'for', 'components', 'of', 'a', 'minimal', 'set', 'of',
'solutions', 'and', 'algorithms', 'of', 'construction', 'of', 'minimal',
'generating', 'sets', 'of', 'solutions', 'for', 'all', 'types', 'of', 'systems',
'are', 'given', '.']]
filtered_context_corpus = ['Upper', 'bounds', 'components', 'minimal', 'solutions', 'algorithms',
'construction', 'minimal', 'generating', 'sets', 'solutions', 'types',
'systems']
syntactic_unit_1 = 'components'
all_cooccur_context_1_1 = _get_cooccurs(syntactic_unit_1, all_tokenised_context,
all_filtered_context_tokens=filtered_context_corpus)
#print("'", syntactic_unit_1, "' cooccurs: ", all_cooccur_context_1_1)
assert len(all_cooccur_context_1_1) == 1
assert 'bounds' in all_cooccur_context_1_1
#example with two occurrences in one context
syntactic_unit_2 = 'solutions'
all_cooccur_context_2_1 = _get_cooccurs(syntactic_unit_2, all_tokenised_context,
all_filtered_context_tokens=filtered_context_corpus)
#print("'", syntactic_unit_2, "' cooccurs: ", all_cooccur_context_2_1)
assert len(all_cooccur_context_2_1) == 2, "'solutions' has two occcurrences in current context. " \
"It should have two co-occurred words in two places."
assert 'algorithms' in all_cooccur_context_2_1
assert 'sets' in all_cooccur_context_2_1
def test_build_vertices_representations(self):
#original_tokenised_text = ['Here', 'are', 'details', 'from', 'the', '13th', 'Rail', 'Steel',
# 'Campaign','.', 'I', 'have', 'checked', 'the', 'Hydrogen', 'values',
# 'reported', 'to', 'you', 'by', 'our', 'IBM', 'mainframe', 'messages', '.']
filtered_context = ['details', 'rail', 'steel', 'campaign', 'hydrogen',
'values', 'ibm', 'mainframe']
#cooccurrence window size
window_size = 2
vertices = _build_vertices_representations(filtered_context, conn_with_original_ctx=False, window_size=window_size)
assert 8 == len(vertices)
for i in range(0, len(vertices)):
vertex = vertices[i]
if 'rail' == vertex.word_type:
rail_vertex = vertex
if 'ibm' == vertex.word_type:
ibm_vertex = vertex
if 'mainframe' == vertex.word_type:
mainframe_vertex = vertex
if 'hydrogen' == vertex.word_type:
hydrogen_vertex = vertex
assert len(rail_vertex.co_occurs) == 3
assert 'details' in rail_vertex.co_occurs
assert 'steel' in rail_vertex.co_occurs
assert 'campaign' in rail_vertex.co_occurs
assert len(ibm_vertex.co_occurs) == 3
assert 'mainframe' in ibm_vertex.co_occurs
assert 'values' in ibm_vertex.co_occurs
assert 'hydrogen' in ibm_vertex.co_occurs
assert len(mainframe_vertex.co_occurs) == 2
assert 'values' in mainframe_vertex.co_occurs
assert 'ibm' in mainframe_vertex.co_occurs
assert len(hydrogen_vertex.co_occurs) == 4
assert 'steel' in hydrogen_vertex.co_occurs
assert 'ibm' in hydrogen_vertex.co_occurs
assert 'values' in hydrogen_vertex.co_occurs
assert 'ibm' in hydrogen_vertex.co_occurs
def test_build_cooccurrence_graph(self):
# example abstract taken from [Mihalcea04]
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
syntactic_filtered_context = preprocessing(example_abstract)
cooccurrence_graph, original_tokenised_context = build_cooccurrence_graph(syntactic_filtered_context, conn_with_original_ctx=False)
#print("len(cooccurrence_graph.nodes()): ", len(cooccurrence_graph.nodes()))
assert 25 == len(cooccurrence_graph.nodes())
pr = nx.pagerank(cooccurrence_graph, tol=0.0001)
#import matplotlib.pyplot as plt
#nx.draw_networkx(cooccurrence_graph, pos=None, arrows=True, with_labels=True)
#plt.show()
pr_counter = Counter(pr)
top_t_vertices = pr_counter.most_common(10)
print("top t vertices: ", top_t_vertices)
assert 'set' == top_t_vertices[0][0]
assert 'minimal' == top_t_vertices[1][0]
assert 'solutions' == top_t_vertices[2][0]
assert 'linear' == top_t_vertices[3][0]
assert 'systems' == top_t_vertices[4][0]
assert 'algorithms' == top_t_vertices[5][0]
assert 'inequations' == top_t_vertices[6][0]
assert 'strict' == top_t_vertices[7][0]
assert 'types' == top_t_vertices[8][0]
assert 'equations' == top_t_vertices[9][0]
def test_syntactic_filtering_with_custom_filter(self):
tagged_abstract_tokens = [[('Compatibility', 'NN'), ('of', 'IN'), ('systems', 'NNS'), ('of', 'IN'),
('linear', 'JJ'), ('constraints', 'NNS'), ('over', 'IN'), ('the', 'DT'),
('set', 'NN'), ('of', 'IN'), ('natural', 'JJ'), ('numbers', 'NNS'),
('.', '.'), ('Criteria', 'NNP'), ('of', 'IN'), ('compatibility', 'NN'),
('of', 'IN'), ('a', 'DT'), ('system', 'NN'), ('of', 'IN'), ('linear', 'JJ'),
('Diophantine', 'NNP'), ('equations', 'NNS'), (',', ','), ('strict', 'JJ'),
('inequations', 'NNS'), (',', ','), ('and', 'CC'), ('nonstrict', 'JJ'),
('inequations', 'NNS'), ('are', 'VBP'), ('considered', 'VBN'), ('.', '.')]]
custom_filter = lambda t : filter(lambda a: a[1] == 'NNS' or a[1] == 'NNP' or a[1] == 'NN'
or a[1] == 'JJ' or a[1] == 'VBN', t)
syntactic_units = _syntactic_filter(tagged_abstract_tokens, pos_filter=custom_filter)
syntactic_units = list(syntactic_units)
assert len(syntactic_units) == 1
print("syntactic_units filtered with custom filter from pre-tagged text:")
print(syntactic_units)
print("len(syntactic_units): ", len(syntactic_units))
assert len(syntactic_units[0]) == 18, "filtered context token size should be 18."
assert ('of', 'IN') not in syntactic_units[0]
assert ('.', '.') not in syntactic_units[0]
assert ('a', 'DT') not in syntactic_units[0]
assert ('the', 'DT') not in syntactic_units[0]
assert ('and', 'CC') not in syntactic_units[0]
assert ('Compatibility', 'NN') in syntactic_units[0]
assert ('linear', 'JJ') in syntactic_units[0]
assert ('considered', 'VBN') in syntactic_units[0]
def test_term_betweeness_ranking_via_cooccur_graph(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
preprocessed_corpus_context = preprocessing(example_abstract)
cooccurrence_graph, original_tokenised_context = build_cooccurrence_graph(preprocessed_corpus_context)
betweenness = nx.betweenness_centrality(cooccurrence_graph)
#nx.draw_networkx(cooccurrence_graph, pos=None, arrows=True, with_labels=True)
#plt.show()
btweeness_ranked_terms = sort_dict_by_value(betweenness)
print("ranked terms via betweenness: ", btweeness_ranked_terms)
btweeness_ranked_terms = list(btweeness_ranked_terms)
assert "linear" == btweeness_ranked_terms[0]
assert "systems" == btweeness_ranked_terms[1]
assert "equations" == btweeness_ranked_terms[2]
assert "strict" == btweeness_ranked_terms[3]
assert "set" == btweeness_ranked_terms[4]
#assert "inequations" == btweeness_ranked_terms[5]
#assert "compatibility" == btweeness_ranked_terms[6]
def test_is_top_t_vertices_connection(self):
top_t_vertices = [('numbers', 1.46), ('inequations', 1.45), ('linear', 1.29),
('diophantine', 1.28), ('upper', 0.99), ('bounds', 0.99), ('strict', 0.77)]
term_candidate_1 = "linear constrains"
result_term_candidate_1 = _is_top_t_vertices_connection(term_candidate_1, top_t_vertices)
assert result_term_candidate_1 is True, "'"+result_term_candidate_1+"' is a top T vertex connection"
term_candidate_2 = "linear diophantine equations"
result_term_candidate_2 = _is_top_t_vertices_connection(term_candidate_2, top_t_vertices)
assert result_term_candidate_2 is True, "'"+result_term_candidate_2+"' is a top T vertex connection"
term_candidate_3 = "natural numbers"
result_term_candidate_3 = _is_top_t_vertices_connection(term_candidate_3, top_t_vertices)
assert result_term_candidate_3 is True, "'"+result_term_candidate_3+"' is a top T vertex connection"
term_candidate_4 = "nonstrict inequations"
result_term_candidate_4 = _is_top_t_vertices_connection(term_candidate_4, top_t_vertices)
assert result_term_candidate_4 is True, "'"+term_candidate_4+"' is a top T vertex connection"
term_candidate_5 = "strict inequations"
result_term_candidate_5 = _is_top_t_vertices_connection(term_candidate_5, top_t_vertices)
assert result_term_candidate_5 is True, "'"+term_candidate_5+"' is a top T vertex connection"
term_candidate_6 = "upper bounds"
result_term_candidate_6 = _is_top_t_vertices_connection(term_candidate_6, top_t_vertices)
assert result_term_candidate_6 is True, "'"+term_candidate_6+"' is a top T vertex connection"
term_candidate_7 = "minimal generating sets"
result_term_candidate_7 = _is_top_t_vertices_connection(term_candidate_7, top_t_vertices)
assert result_term_candidate_7 is False, "'"+term_candidate_7+"' is NOT a top T vertex connection"
term_candidate_8 = "solutions"
result_term_candidate_8 = _is_top_t_vertices_connection(term_candidate_8, top_t_vertices)
assert result_term_candidate_8 is False, "'"+term_candidate_8+"' is NOT a top T vertex connection"
term_candidate_9 = "types systems"
result_term_candidate_9 = _is_top_t_vertices_connection(term_candidate_9, top_t_vertices)
assert result_term_candidate_9 is False, "'"+term_candidate_9+"' is NOT a top T vertex connection"
term_candidate_10 = "algorithms"
result_term_candidate_10 = _is_top_t_vertices_connection(term_candidate_10, top_t_vertices)
assert result_term_candidate_10 is False, "'"+term_candidate_10+"' is NOT a top T vertex connection"
def test_collapse_adjacent_keywords(self):
weighted_keywords = {'sets': 0.03472, 'supporting': 0.03448, 'compatibility': 0.04089,
'components': 0.00643, 'minimal': 0.06524, 'algorithms': 0.05472, 'inequations': 0.04641,
'corresponding': 0.02194, 'numbers': 0.02379, 'systems': 0.083597, 'constraints': 0.02148,
'linear': 0.08849, 'natural': 0.040847, 'diophantine': 0.0370565, 'mixed': 0.03591,
'equations': 0.054968, 'strict': 0.041742, 'set': 0.066734, 'construction': 0.03580,
'system': 0.02148, 'types': 0.03591, 'criteria': 0.02381, 'upper': 0.00643,
'nonstrict': 0.026167, 'solutions': 0.050879}
original_tokenised_text= ['compatibility', 'of', 'systems', 'of', 'linear', 'constraints', 'over',
'the', 'set', 'of', 'natural', 'numbers', '.', 'criteria', 'of', 'compatibility',
'of', 'a', 'system', 'of', 'linear', 'diophantine', 'equations', ',',
'strict', 'inequations', ',', 'and', 'nonstrict', 'inequations', 'are',
'considered', '.', 'upper', 'bounds', 'for', 'components', 'of', 'a',
'minimal', 'set', 'of', 'solutions', 'and', 'algorithms', 'of',
'construction', 'of', 'minimal', 'generating', 'sets', 'of', 'solutions',
'for', 'all', 'types', 'of', 'systems', 'are', 'given', '.', 'these',
'criteria', 'and', 'the', 'corresponding', 'algorithms', 'for', 'constructing',
'a', 'minimal', 'supporting', 'set', 'of', 'solutions', 'can', 'be', 'used',
'in', 'solving', 'all', 'the', 'considered', 'types', 'systems', 'and',
'systems', 'of', 'mixed', 'types', '.']
key_terms = _collapse_adjacent_keywords(weighted_keywords, original_tokenised_text)
print("key terms collapsed from context: ", key_terms)
assert len(key_terms) == 29
assert key_terms[0][0] == 'compatibility'
assert key_terms[1][0] == 'systems'
assert key_terms[2][0] == 'linear'
assert key_terms[2][1] == 'constraints'
assert key_terms[3][0] == 'set'
assert key_terms[4][0] == 'natural'
assert key_terms[4][1] == 'numbers'
assert key_terms[5][0] == 'criteria'
S0021999113005652_weighted_keywords = {'degradation': 0.03048, 'future': 0.004573, 'result': 0.004573,
'exchange': 0.03367, 'progress': 0.004573, 'important': 0.03048,
'modelling': 0.030487, 'extensive': 0.03048, 'reynolds': 0.02551,
'figure': 0.004573170731707318, 'datum': 0.004573, 'impact': 0.03048,
'study': 0.00457, 'function': 0.004573, 'environmental': 0.0304878,
'effect': 0.030487, 'air': 0.03070, 'flow': 0.016393,
'schmidt': 0.02551, 'fig': 0.030487, 'turbulent': 0.004573,
'rate': 0.024854, 'chemical': 0.03582, 'number': 0.036786,
'interface': 0.0045731, 'reaction': 0.047672, 'depict': 0.0304878,
'practical': 0.03048, 'interesting': 0.004573,
'investigation': 0.0304878, 'concentration': 0.0304878,
'worth': 0.0045731, 'increase': 0.04951, 'bulk': 0.00457,
'water': 0.055614, 'efficiency': 0.015095, 'equilibrium': 0.030487,
'product': 0.030487, 'aquarium': 0.0248545,
'by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎': 0.030487, 'acidification': 0.016393,
'gas': 0.018886, 'information': 0.03048}
S0021999113005652_tokenised_text = ['it', 'be', 'interesting', 'to', 'quantify', 'the', 'effect',
'of', 'the', 'schmidt', 'number', 'and', 'the', 'chemical',
'reaction', 'rate', 'on', 'the', 'bulk', '-', 'mean', 'concentration',
'of', 'b', 'in', 'water', '.', 'the', 'datum', 'could', 'present',
'important', 'information', 'on', 'evaluate', 'the', 'environmental',
'impact', 'of', 'the', 'degradation', 'product', 'of', 'b', ',',
'as', 'well', 'as', 'acidification', 'of', 'water', 'by', 'the',
'chemical', 'reaction', '.', 'here', ',', 'the', 'bulk', '-',
'mean', 'concentration', 'of', 'b', 'be', 'define',
'by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎', 'fig', '.', '15', 'depict', 'the',
'effect', 'of', 'the', 'schmidt', 'and', 'the', 'chemical',
'reaction', 'rate', 'on', 'the', 'bulk', '-', 'mean',
'concentration', 'cb⁎ .', 'it', 'be', 'worth', 'to', 'mention',
'here', 'that', 'the', 'bulk', '-', 'mean', 'concentration', 'of',
'b', 'reach', 'approximately', '0.6', 'as', 'the', 'chemical',
'reaction', 'rate', 'and', 'the', 'schmidt', 'number', 'increase',
'to', 'infinite', ',', 'and', 'the', 'concentration', 'be',
'small', 'than', 'the', 'equilibrium', 'concentration', 'of', 'a',
'at', 'the', 'interface', '.', 'this', 'figure', 'indicate',
'that', 'progress', 'of', 'the', 'chemical', 'reaction', 'be',
'somewhat', 'interfere', 'by', 'turbulent', 'mix', 'in', 'water',
',', 'and', 'the', 'efficiency', 'of', 'the', 'chemical',
'reaction', 'be', 'up', 'to', 'approximately', '60', '%', '.',
'the', 'efficiency', 'of', 'the', 'chemical', 'reaction', 'in',
'water', 'will', 'be', 'a', 'function', 'of', 'the', 'reynolds',
'number', 'of', 'the', 'water', 'flow', ',', 'and', 'the',
'efficiency', 'could', 'increase', 'as', 'the', 'reynolds',
'number', 'increase', '.', 'we', 'need', 'an', 'extensive',
'investigation', 'on', 'the', 'efficiency', 'of', 'the', 'aquarium',
'chemical', 'reaction', 'in', 'the', 'near', 'future', 'to', 'extend',
'the', 'result', 'of', 'this', 'study', 'further', 'to', 'establish',
'practical', 'modelling', 'for', 'the', 'gas', 'exchange',
'between', 'air', 'and', 'water', '.']
S0021999113005652_key_terms = _collapse_adjacent_keywords(S0021999113005652_weighted_keywords, S0021999113005652_tokenised_text)
print("S0021999113005652_key_terms: ", S0021999113005652_key_terms)
assert len(S0021999113005652_key_terms) == 57
assert S0021999113005652_key_terms[0][0] == "interesting"
assert S0021999113005652_key_terms[1][0] == "effect"
assert S0021999113005652_key_terms[2][0] == "schmidt"
assert S0021999113005652_key_terms[2][1] == "number"
assert S0021999113005652_key_terms[3][0] == "chemical"
assert S0021999113005652_key_terms[3][1] == "reaction"
assert S0021999113005652_key_terms[3][2] == "rate"
assert S0021999113005652_key_terms[4][0] == "bulk"
assert S0021999113005652_key_terms[5][0] == "concentration"
assert S0021999113005652_key_terms[6][0] == "water"
assert S0021999113005652_key_terms[7][0] == "datum"
assert S0021999113005652_key_terms[8][0] == "important"
assert S0021999113005652_key_terms[8][1] == "information"
assert S0021999113005652_key_terms[9][0] == "environmental"
assert S0021999113005652_key_terms[9][1] == "impact"
assert S0021999113005652_key_terms[16][0] == "by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎"
assert S0021999113005652_key_terms[16][1] == "fig"
def test_get_longer_terms(self):
candidate_term1 = ["real", "time"]
candidate_term2 = ["floating", "point"]
longer_terms = [["real", "time", "clock"],
["real", "time", "expert", "system"],
["real", "time", "image", "generation"],
["real", "time", "output"],
["real", "time", "system"],
["floating", "point", "arithmetic"],
["floating", "point", "constant"],
["floating", "point", "operation"],
["floating", "point", "routine"]]
candidate_term1_longer_terms = GCValue._get_longer_terms(candidate_term1, longer_terms)
assert len(candidate_term1_longer_terms) == 5
assert candidate_term1_longer_terms == [['real', 'time', 'clock'],
['real', 'time', 'expert', 'system'],
['real', 'time', 'image', 'generation'],
['real', 'time', 'output'],
['real', 'time', 'system']]
candidate_term2_longer_terms = GCValue._get_longer_terms(candidate_term2, longer_terms)
assert len(candidate_term2_longer_terms) == 4
assert candidate_term2_longer_terms == [["floating", "point", "arithmetic"],
["floating", "point", "constant"],
["floating", "point", "operation"],
["floating", "point", "routine"]]
#gc_value = GCValue()
#gc_value.weighing({"real": 1.0, "time":1.2, "clock":2.1, "expert":3.1, "system":4.1, "image":1.12,
# "generation":1.4, "output":2.1, "floating":0.3, "point": 0.8, "arithmetic": 0.3},
# longer_terms)
@ignore_warnings
def test_keywords_extraction(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="sum")
print("extracted keywords:"+ str(results))
print("top_vertices: ", top_vertices)
assert 13 == len(results)
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
assert "strict inequations" == term_list[5]
assert "systems" == term_list[6]
assert "corresponding algorithms" == term_list[7]
assert "nonstrict inequations" == term_list[8]
assert "set" in term_list
assert "minimal" in term_list
assert "algorithms" in term_list
assert "solutions" in term_list
assert "natural numbers" not in term_list
assert 'linear' == top_vertices[0][0]
assert 'systems' == top_vertices[1][0]
assert 'set' == top_vertices[2][0]
assert 'minimal' == top_vertices[3][0]
assert 'equations' == top_vertices[4][0]
assert 'algorithms' == top_vertices[5][0]
assert 'solutions' == top_vertices[6][0]
assert 'inequations' == top_vertices[7][0]
print("after enabling lemmatization....")
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, lemma=True, weight_comb="sum")
assert 12 == len(results)
print("extracted keywords after lemmatization: ", results)
print("top_vertices after lemmatization: ", top_vertices)
term_list = [term[0] for term in results]
assert "minimal supporting set" == term_list[0]
assert "linear diophantine equation" == term_list[1]
assert "minimal set" == term_list[2]
assert "type system" == term_list[3]
assert "linear constraint" == term_list[4]
assert "strict inequations" == term_list[5]
assert "system" == term_list[6]
assert "corresponding algorithm" == term_list[7]
assert "nonstrict inequations" == term_list[8]
assert 'system' == top_vertices[0][0]
assert 'set' == top_vertices[1][0]
assert 'linear' == top_vertices[2][0]
assert 'algorithm' == top_vertices[3][0]
assert 'equation' == top_vertices[4][0]
assert 'minimal' == top_vertices[5][0]
assert 'inequations' == top_vertices[6][0]
def test_keywords_extraction2(self):
"""
test keywords extraction with example nodes (with custom syntactic filters and step list) in the paper
"""
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
custom_categories = {'NNS', 'NNP', 'NN', 'JJ', 'VBZ'}
# manually filter few nodes not appearing in the given example of original paper
stop_words={'set', 'mixed', 'corresponding', 'supporting'}
ranked_terms, top_vertices = keywords_extraction(example_abstract, top_p = 1, top_t=None, directed=False,
syntactic_categories=custom_categories, stop_words=stop_words, weight_comb="sum")
print("ranked terms with custom filters 1: ", ranked_terms)
print("top_vertices with custom filters 1: ", top_vertices)
top_vertices_names = [top_vertex[0] for top_vertex in top_vertices]
assert 'supporting' not in top_vertices_names
assert 'corresponding' not in top_vertices_names
assert 'mixed' not in top_vertices_names
assert 'set' not in top_vertices_names
assert 'linear diophantine equations' == ranked_terms[0][0]
assert 'linear constraints' == ranked_terms[1][0]
assert 'types systems' == ranked_terms[2][0]
assert 'upper bounds' == ranked_terms[3][0]
assert 'strict inequations' == ranked_terms[4][0]
assert 'natural numbers' == ranked_terms[5][0]
assert 'systems' == ranked_terms[6][0]
assert 'nonstrict inequations' == ranked_terms[7][0]
assert 'compatibility' == ranked_terms[8][0]
assert 'construction' == ranked_terms[9][0] or 'minimal' == ranked_terms[9][0] \
or 'algorithms' == ranked_terms[9][0] or 'solutions' == ranked_terms[9][0] \
or 'sets' == ranked_terms[9][0]
# >>> [('linear diophantine equations', 0.19805), ('linear constraints', 0.12147),
# ('types systems', 0.10493), ('upper bounds', 0.10114), ('strict inequations', 0.09432),
# ('natural numbers', 0.09091), ('systems', 0.08092), ('nonstrict inequations', 0.07741),
# ('compatibility', 0.04666), ('algorithms', 0.04545), ('minimal', 0.04545),
# ('construction', 0.04545), ('sets', 0.04545), ('solutions', 0.04545),
# ('components', 0.03522), ('criteria', 0.02665), ('types', 0.02401), ('system', 0.02348)]
stop_words={'set', 'mixed', 'corresponding', 'supporting', "minimal"}
ranked_terms, top_vertices = keywords_extraction(example_abstract, top_p = 1, top_t=None, directed=False,
syntactic_categories=custom_categories, stop_words=stop_words)
print("ranked terms with custom filters 2: ", ranked_terms)
print("top_vertices with custom filters 2: ", top_vertices)
top_vertices_names = [top_vertex[0] for top_vertex in top_vertices]
assert 'minimal' not in top_vertices_names
assert 'supporting' not in top_vertices_names
assert 'corresponding' not in top_vertices_names
assert 'mixed' not in top_vertices_names
assert 'set' not in top_vertices_names
# [('linear diophantine equations', 0.20748), ('linear constraints', 0.12726), ('types systems', 0.10992),
# ('upper bounds', 0.10596), ('strict inequations', 0.09881), ('natural numbers', 0.09524),
# ('systems', 0.08477), ('nonstrict inequations', 0.0811), ('solutions', 0.06182), ('algorithms', 0.06182),
# ('compatibility', 0.04889), ('components', 0.0369), ('sets', 0.03342), ('construction', 0.03342),
# ('criteria', 0.02792), ('types', 0.02516), ('system', 0.02459)]
def test_keywords_extraction3(self):
"""
test with different pagerank algorithms
"""
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="pagerank_numpy", weight_comb="sum")
print("ranked terms computed with 'pagerank_numpy': ", results)
print("top_vertices computed with 'pagerank_numpy': ", top_vertices)
assert len(results) == 13
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="pagerank_scipy", weight_comb="sum")
print("ranked terms computed with 'pagerank_scipy': ", results)
print("top_vertices computed with 'pagerank_scipy': ", top_vertices)
assert len(results) == 13
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="betweenness_centrality", weight_comb="sum")
print("ranked terms computed with 'betweenness_centrality': ", results)
print("top_vertices computed with 'betweenness_centrality': ", top_vertices)
assert len(results) == 11
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="degree_centrality", weight_comb="sum")
print("ranked terms computed with 'degree_centrality': ", results)
print("top_vertices computed with 'degree_centrality': ", top_vertices)
assert top_vertices[0][0] == 'systems'
assert top_vertices[1][0] == 'linear'
assert top_vertices[2][0] == 'minimal' or top_vertices[2][0] == 'set'
# top 30% results is not stable for degree_centrality
# assert len(results) == 11 or len(results) == 12
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="hits", weight_comb="sum")
print("ranked terms computed with 'hits': ", results)
print("top_vertices computed with 'hits': ", top_vertices)
assert top_vertices[0][0] == 'systems'
assert top_vertices[1][0] == 'linear'
assert top_vertices[2][0] == 'mixed' or top_vertices[2][0] == 'types'
assert top_vertices[4][0] == 'equations'
assert len(results) == 7 or len(results) == 8
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="closeness_centrality", weight_comb="sum")
print("ranked terms computed with 'closeness_centrality': ", results)
print("top_vertices computed with 'closeness_centrality': ", top_vertices)
assert len(results) == 10 or len(results) == 11
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="edge_betweenness_centrality", weight_comb="sum")
print("ranked terms computed with 'edge_betweenness_centrality': ", results)
print("top_vertices computed with 'edge_betweenness_centrality': ", top_vertices)
assert len(results) == 8 or len(results) == 10
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="eigenvector_centrality", max_iter=1000, weight_comb="sum")
print("ranked terms computed with 'eigenvector_centrality': ", results)
print("top_vertices computed with 'eigenvector_centrality': ", top_vertices)
assert len(results) == 7 or len(results) == 8
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="katz_centrality", weight_comb="sum")
print("ranked terms computed with 'katz_centrality': ", results)
print("top_vertices computed with 'katz_centrality': ", top_vertices)
assert len(results) == 11
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="communicability_betweenness",
window=5, weighted=False, weight_comb="sum")
print("ranked terms computed with 'communicability_betweenness': ", results)
print("top_vertices computed with 'communicability_betweenness': ", top_vertices)
print(len(results))
assert results[0][0] == 'minimal supporting set'
assert results[1][0] == 'minimal set'
assert results[2][0] == 'linear diophantine equations'
assert len(results) == 12
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="current_flow_closeness",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'current_flow_closeness': ", results)
print("top_vertices computed with 'current_flow_closeness': ", top_vertices)
print(len(results))
assert len(results) == 9
assert results[0][0] == 'minimal supporting set'
assert results[1][0] == 'minimal set'
assert top_vertices[0][0] == 'set'
assert top_vertices[1][0] == 'minimal'
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="current_flow_betweenness",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'current_flow_betweenness': ", results)
print("top_vertices computed with 'current_flow_betweenness': ", top_vertices)
print(len(results))
assert len(results) == 11
assert top_vertices[0][0] == 'systems'
assert top_vertices[1][0] == 'linear'
assert top_vertices[2][0] == 'set'
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="edge_current_flow_betweenness",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'edge_current_flow_betweenness': ", results)
print("top_vertices computed with 'edge_current_flow_betweenness': ", top_vertices)
print(len(results))
assert len(results) == 10 or len(results) == 11
assert top_vertices[0][0] == 'systems' or top_vertices[0][0] == 'linear'
assert top_vertices[1][0] == 'linear' or top_vertices[1][0] == 'systems'
assert top_vertices[2][0] == 'strict' or top_vertices[2][0] == 'equations'
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="load_centrality",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'load_centrality': ", results)
print("top_vertices computed with 'load_centrality': ", top_vertices)
print(len(results))
assert len(results) == 11
assert results[0][0] == 'linear diophantine equations'
assert results[1][0] == 'linear constraints'
assert results[2][0] == 'systems' or results[2][0] == 'types systems'
assert top_vertices[0][0] == 'linear'
assert top_vertices[1][0] == 'systems'
assert top_vertices[2][0] == 'equations'
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="clustering_coefficient",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'clustering_coefficient': ", results)
print("top_vertices computed with 'clustering_coefficient': ", top_vertices)
assert results[0][0] == 'mixed types'
assert results[1][0] == 'linear diophantine equations'
assert results[2][0] == 'minimal supporting set'
assert len(results) == 9
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="TeRGraph",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'TeRGraph': ", results)
print("top_vertices computed with 'TeRGraph': ", top_vertices)
assert len(results) == 7
assert results[0][0] == 'nonstrict inequations'
assert results[1][0] == 'natural numbers'
assert results[2][0] == 'corresponding algorithms'
coreness_results, coreness_top_vertices = keywords_extraction(example_abstract, top_p = 1, solver="coreness",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'coreness': ", coreness_results)
print("top_vertices computed with 'coreness': ", coreness_top_vertices)
coreness_results_dict = {k:v for k, v in coreness_results}
coreness_top_vertices_dict = {k:v for k, v in coreness_top_vertices}
assert len(coreness_results) == 23
assert coreness_results_dict['minimal supporting set'] == 6
assert coreness_results_dict['linear diophantine equations'] == 6
assert coreness_results_dict['types systems'] == 4
assert coreness_results_dict['minimal set'] == 4
assert coreness_top_vertices_dict['minimal'] == 2
assert coreness_top_vertices_dict['sets'] == 2
assert coreness_top_vertices_dict['diophantine'] == 2
assert coreness_top_vertices_dict['equations'] == 2
assert coreness_top_vertices_dict['criteria'] == 1
assert coreness_top_vertices_dict['upper'] == 0
assert coreness_top_vertices_dict['components'] == 0
mean_coreness_results, coreness_top_vertices = keywords_extraction(example_abstract, top_p = 1, solver="coreness",
weighted=False, weight_comb="avg")
print("ranked term phrases computed with Mean coreness: ", mean_coreness_results)
mean_coreness_results_dict = {k:v for k, v in mean_coreness_results}
assert mean_coreness_results_dict['types'] == 2
assert mean_coreness_results_dict['minimal supporting set'] == 2
assert mean_coreness_results_dict['components'] == 0
assert mean_coreness_results_dict['linear diophantine equations'] == 2
with self.assertRaises(ValueError) as context:
keywords_extraction(example_abstract, top_p = 0.3, solver="my_pagerank")
self.assertTrue("The node weighting solver supports only pagerank, "
"pagerank_numpy, pagerank_scipy, betweenness_centrality, "
"edge_betweenness_centrality, degree_centrality, closeness_centrality, hits, "
"eigenvector_centrality, katz_centrality, communicability_betweenness, "
"current_flow_closeness, current_flow_betweenness, edge_current_flow_betweenness, "
"load_centrality,clustering_coefficient,TeRGraph,coreness got 'my_pagerank'" in context.exception)
def test_neighborhood_size(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
mean_neighbors_results, mean_neighbors_vertices = keywords_extraction(example_abstract, top_p = 1, solver="neighborhood_size",
weighted=False, weight_comb="avg")
print("ranked term phrases computed with Mean neighborhood size: ", mean_neighbors_results)
mean_neighbors_results_dict = {k:v for k, v in mean_neighbors_results}
mean_neighbors_vertices_dict = {k:v for k, v in mean_neighbors_vertices}
print(len(mean_neighbors_results))
assert len(mean_neighbors_results) == 23
assert mean_neighbors_results_dict["set"] == 4.0
assert mean_neighbors_results_dict["minimal"] == 4.0
assert mean_neighbors_results_dict["minimal set"] == 4.0
assert mean_neighbors_results_dict["linear constraints"] == 3.0
assert mean_neighbors_results_dict["solutions"] == 3.0
assert mean_neighbors_results_dict["nonstrict inequations"] == 1.5
assert mean_neighbors_results_dict["linear diophantine equations"] == 3.33333
print(mean_neighbors_vertices_dict)
assert mean_neighbors_vertices_dict["linear"] == 5
assert mean_neighbors_vertices_dict["set"] == 4
assert mean_neighbors_vertices_dict["systems"] == 4
assert mean_neighbors_vertices_dict["minimal"] == 4
assert mean_neighbors_vertices_dict["algorithms"] == 3
assert mean_neighbors_vertices_dict["compatibility"] == 2
def test_keywords_extraction_with_mwt_scoring(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="avg")
print("extracted keywords with avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal set" == term_list[2]
assert "minimal" == term_list[3]
assert "linear diophantine equations" == term_list[4]
assert "types systems" == term_list[5]
assert "minimal supporting set" == term_list[6]
assert "linear constraints" == term_list[7]
assert "algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="norm_avg")
print("extracted keywords with norm_avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal" == term_list[2]
assert "algorithms" == term_list[3]
assert "solutions" == term_list[4]
assert "minimal set" == term_list[5]
assert "types systems" == term_list[6]
assert "linear constraints" == term_list[7]
assert "strict inequations" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="log_norm_avg")
print("extracted keywords with log_norm_avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "minimal set" == term_list[0]
assert "types systems" == term_list[1]
assert "linear constraints" == term_list[2]
assert "strict inequations" == term_list[3]
assert "corresponding algorithms" == term_list[4]
assert "linear diophantine equations" == term_list[5]
assert "nonstrict inequations" == term_list[6]
assert "systems" == term_list[7]
assert "minimal supporting set" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="gaussian_norm_avg")
print("extracted keywords with gaussian_norm_avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal set" == term_list[2]
assert "minimal" == term_list[3]
assert "linear diophantine equations" == term_list[4]
assert "types systems" == term_list[5]
assert "minimal supporting set" == term_list[6]
assert "linear constraints" == term_list[7]
assert "algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="sum")
print("extracted keywords with sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
assert "strict inequations" == term_list[5]
assert "systems" == term_list[6]
assert "corresponding algorithms" == term_list[7]
assert "nonstrict inequations" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="norm_sum")
print("extracted keywords with norm_sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal set" == term_list[2]
assert "minimal" == term_list[3]
assert "linear diophantine equations" == term_list[4]
assert "types systems" == term_list[5]
assert "minimal supporting set" == term_list[6]
assert "linear constraints" == term_list[7]
assert "algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="log_norm_sum")
print("extracted keywords with log_norm_sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "minimal set" == term_list[0]
assert "types systems" == term_list[1]
assert "linear diophantine equations" == term_list[2]
assert "linear constraints" == term_list[3]
assert "minimal supporting set" == term_list[4]
assert "strict inequations" == term_list[5]
assert "corresponding algorithms" == term_list[6]
assert "nonstrict inequations" == term_list[7]
assert "systems" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="gaussian_norm_sum")
print("extracted keywords with gaussian_norm_sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
assert "strict inequations" == term_list[5]
assert "systems" == term_list[6]
assert "corresponding algorithms" == term_list[7]
assert "nonstrict inequations" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="max")
print("extracted keywords with max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear constraints" == term_list[0] or "linear diophantine equations" == term_list[0]
assert "linear diophantine equations" == term_list[1] or "linear constraints" == term_list[1]
assert "systems" == term_list[2] or "types systems" == term_list[2]
assert "systems" == term_list[3] or "types systems" == term_list[3]
assert "set" == term_list[4] or "minimal set" == term_list[4] or "minimal supporting set" == term_list[4]
assert "minimal set" == term_list[5] or "set" == term_list[5] or "minimal supporting set" == term_list[5]
assert "minimal supporting set" == term_list[6] or "minimal set" == term_list[6] or "set" == term_list[6]
assert "minimal" == term_list[7]
assert "algorithms" == term_list[8] or "corresponding algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="norm_max")
print("extracted keywords with norm_max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal" == term_list[2]
assert "algorithms" == term_list[3]
assert "solutions" == term_list[4]
assert "linear constraints" == term_list[5]
assert "types systems" == term_list[6]
assert "minimal set" == term_list[7]
assert "linear diophantine equations" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="log_norm_max")
print("extracted keywords with log_norm_max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear constraints" == term_list[0]
assert "types systems" == term_list[1]
assert "minimal set" == term_list[2]
assert "linear diophantine equations" == term_list[3]
assert "corresponding algorithms" == term_list[4]
assert "nonstrict inequations" == term_list[5] or "strict inequations" == term_list[5]
assert "strict inequations" == term_list[6] or "nonstrict inequations" == term_list[6]
assert "minimal supporting set" == term_list[7]
assert "systems" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="gaussian_norm_max")
print("extracted keywords with gaussian_norm_max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear constraints" == term_list[0]
assert "linear diophantine equations" == term_list[1]
assert "systems" == term_list[2] or "types systems" == term_list[2]
assert "types systems" == term_list[3] or "systems" == term_list[3]
assert "set" == term_list[4] or "minimal set" == term_list[4]
assert "minimal set" == term_list[5] or "set" == term_list[5]
assert "minimal supporting set" == term_list[6]
assert "minimal" == term_list[7]
assert "algorithms" == term_list[8] or "corresponding algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="len_log_norm_max")
print("extracted keywords with len_log_norm_max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "linear constraints" == term_list[2]
assert "types systems" == term_list[3]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="len_log_norm_avg")
print("extracted keywords with len_log_norm_avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="len_log_norm_sum")
print("extracted keywords with len_log_norm_sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
with self.assertRaises(ValueError) as context:
keywords_extraction(example_abstract, top_p = 0.3, weight_comb="my_norm")
self.assertTrue("Unspported weight_comb 'my_norm'! "
"Options are 'avg', 'norm_avg', 'log_norm_avg', 'gaussian_norm_avg', 'sum', "
"'norm_sum', 'log_norm_sum', 'gaussian_norm_sum', 'max', 'norm_max',"
" 'log_norm_max', 'gaussian_norm_max', "
"'len_log_norm_max', 'len_log_norm_avg', 'len_log_norm_sum'. " in context.exception)
def test_keywords_extraction_with_gcvalue(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
gcvalue_results, gcvalue_top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="gcvalue", workers=2)
print("GCValue results: ", gcvalue_results)
def test_keywords_extraction_from_segmented_corpus(self):
example_user_defined_context_corpus = [["Compatibility", "of", "systems", "of", "linear", "constraints",
"over", "the", "set", "of", "natural", "numbers",".",
"Criteria", "of", "compatibility", "of", "a", "system", "of",
"linear", "Diophantine", "equations", ",", "strict", "inequations", ",",
"and", "nonstrict", "inequations", "are", "considered", "."],
["Upper", "bounds", "for", "components", "of", "a", "minimal", "set",
"of", "solutions", "and","algorithms","of", "construction", "of",
"minimal", "generating", "sets", "of", "solutions", "for", "all",
"types", "of", "systems", "are", "given", "."],
["These", "criteria", "and", "the", "corresponding", "algorithms",
"for", "constructing", "a", "minimal", "supporting", "set", "of",
"solutions", "can", "be", "used", "in", "solving", "all", "the",
"considered", "types", "systems", "and", "systems", "of", "mixed",
"types","."]]
from jgtextrank.core import keywords_extraction_from_segmented_corpus
results, top_vertices = keywords_extraction_from_segmented_corpus(example_user_defined_context_corpus, top_p=1, weight_comb="sum")
print("extracted keywords with user defined corpus context:"+ str(results))
print("top_vertices: ", top_vertices)
assert 23 == len(results)
term_list = [term[0] for term in results]
assert "linear diophantine equations" in term_list
assert "minimal supporting set" in term_list
assert "minimal set" in term_list
assert "types systems" in term_list
assert "linear constraints" in term_list
assert "strict inequations" in term_list
assert "systems" in term_list
assert "corresponding algorithms" in term_list
assert "natural numbers" in term_list, "'natural numbers' is given more " \
"weights than the weight with computed in default sentential context."
assert "nonstrict inequations" in term_list
assert "mixed types" in term_list
assert "minimal" in term_list
assert 'set' in term_list
# [('linear diophantine equations', 0.17848), ('minimal supporting set', 0.16067),
# ('minimal set', 0.12723), ('types systems', 0.1143), ('linear constraints', 0.10842),
# ('strict inequations', 0.08805), ('systems', 0.07958), ('corresponding algorithms', 0.07575),
# ('natural numbers', 0.07384), ('nonstrict inequations', 0.07262),
# ('mixed types', 0.06943), ('minimal', 0.06362), ('set', 0.06361),
# ('algorithms', 0.05406), ('solutions', 0.04964), ('criteria', 0.03779),
# ('compatibility', 0.03606), ('construction', 0.0352), ('types', 0.03472),
# ('sets', 0.03405), ('system', 0.02125), ('upper', 0.00644), ('components', 0.00644)]
@ignore_warnings
def test_keywords_extraction_from_tagged_corpus(self):
from jgtextrank.core import keywords_extraction_from_tagged_corpus
pos_tagged_corpus= [[('Compatibility', 'NN'), ('of', 'IN'), ('systems', 'NNS'), ('of', 'IN'),
('linear', 'JJ'), ('constraints', 'NNS'), ('over', 'IN'), ('the', 'DT'),
('set', 'NN'), ('of', 'IN'), ('natural', 'JJ'), ('numbers', 'NNS'), ('.', '.')],
[('Criteria', 'NNS'), ('of', 'IN'), ('compatibility', 'NN'), ('of', 'IN'),
('a', 'DT'), ('system', 'NN'), ('of', 'IN'), ('linear', 'JJ'),
('Diophantine', 'NNP'), ('equations', 'NNS'), (',', ','), ('strict', 'JJ'),
('inequations', 'NNS'), (',', ','), ('and', 'CC'), ('nonstrict', 'JJ'),
('inequations', 'NNS'), ('are', 'VBP'), ('considered', 'VBN'), ('.', '.')],
[('Upper', 'NNP'), ('bounds', 'VBZ'), ('for', 'IN'), ('components', 'NNS'),
('of', 'IN'), ('a', 'DT'), ('minimal', 'JJ'), ('set', 'NN'), ('of', 'IN'),
('solutions', 'NNS'), ('and', 'CC'), ('algorithms', 'NN'), ('of', 'IN'),
('construction', 'NN'), ('of', 'IN'), ('minimal', 'JJ'), ('generating', 'VBG'),
('sets', 'NNS'), ('of', 'IN'), ('solutions', 'NNS'), ('for', 'IN'), ('all', 'DT'),
('types', 'NNS'), ('of', 'IN'), ('systems', 'NNS'), ('are', 'VBP'),
('given', 'VBN'), ('.', '.')],
[('These', 'DT'), ('criteria', 'NNS'), ('and', 'CC'), ('the', 'DT'),
('corresponding', 'JJ'), ('algorithms', 'NN'), ('for', 'IN'),
('constructing', 'VBG'), ('a', 'DT'), ('minimal', 'JJ'), ('supporting', 'VBG'),
('set', 'NN'), ('of', 'IN'), ('solutions', 'NNS'), ('can', 'MD'), ('be', 'VB'),
('used', 'VBN'), ('in', 'IN'), ('solving', 'VBG'), ('all', 'PDT'), ('the', 'DT'),
('considered', 'VBN'), ('types', 'NNS'), ('systems', 'NNS'), ('and', 'CC'),
('systems', 'NNS'), ('of', 'IN'), ('mixed', 'JJ'), ('types', 'NNS'), ('.', '.')]]
results, top_vertices = keywords_extraction_from_tagged_corpus(pos_tagged_corpus, top_p = 0.3, weight_comb="sum")
print()
print("extracted keywords from pre-tagged content:"+ str(results))
print("top_vertices: ", top_vertices)
print("len(results): ", len(results))
assert 10 == len(results), "check possible changes/errors in solver and hyperparameter, e.g., num_iter, tol"
term_list = [term[0] for term in results]
assert "linear diophantine equations" in term_list
assert "types systems" in term_list
assert "linear constraints" in term_list
assert "minimal set" in term_list
assert "systems" in term_list
assert "corresponding algorithms" in term_list
assert "algorithms" in term_list
assert "set" in term_list
assert "solutions" in term_list
assert "minimal" in term_list
# after lemmatisation
results, top_vertices = keywords_extraction_from_tagged_corpus(pos_tagged_corpus, top_p = 0.3, lemma=True)
print("extracted keywords from pre-tagged content after lemmatisation: ", results)
print("top_vertices after lemmatisation: ", top_vertices)
assert len(results) == 11
term_list = [term[0] for term in results]
assert "linear diophantine equation" in term_list
assert "type system" in term_list
assert "minimal set" in term_list
assert "linear constraint" in term_list
assert "strict inequations" in term_list
assert "corresponding algorithm" in term_list
assert "system" in term_list
assert "nonstrict inequations" in term_list
assert "natural number" in term_list
assert "algorithm" in term_list
assert "set" in term_list
def test_kea_with_text_formulate(self):
"""
This is to test the content with formulate
where simply splits the term units with space may have the conflicts with the original tokeniser
:return:
"""
from jgtextrank.core import _keywords_extraction_from_preprocessed_context
S0021999113005652_textsnippet = [(['it', 'be', 'interesting', 'to', 'quantify', 'the', 'effect', 'of',
'the', 'schmidt', 'number', 'and', 'the', 'chemical', 'reaction',
'rate', 'on', 'the', 'bulk', '-', 'mean', 'concentration', 'of', 'b','in', 'water', '.'],
[('interesting', 'JJ'), ('effect', 'NNS'), ('schmidt', 'NNP'), ('number', 'NN'),
('chemical', 'JJ'), ('reaction', 'NN'), ('rate', 'NN'), ('bulk', 'JJ'),
('concentration', 'NN'), ('water', 'NN')]),
(['the', 'datum', 'could', 'present', 'important', 'information', 'on',
'evaluate', 'the', 'environmental', 'impact', 'of', 'the', 'degradation',
'product', 'of', 'b', ',', 'as', 'well', 'as', 'acidification', 'of',
'water', 'by', 'the', 'chemical', 'reaction', '.'],
[('datum', 'NNS'), ('important', 'JJ'), ('information', 'NN'),
('environmental', 'JJ'), ('impact', 'NNS'), ('degradation', 'NN'), ('product', 'NN'),
('acidification', 'NN'), ('water', 'NN'), ('chemical', 'JJ'), ('reaction', 'NN')]),
(['here', ',', 'the', 'bulk', '-', 'mean', 'concentration', 'of', 'b',
'be', 'define', 'by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎', 'fig', '.'],
[('bulk', 'JJ'), ('concentration', 'NN'), ('by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎', 'NNP'), ('fig', 'NNP')]),
(['15', 'depict', 'the', 'effect', 'of', 'the', 'schmidt', 'and', 'the',
'chemical', 'reaction', 'rate', 'on', 'the', 'bulk', '-', 'mean', 'concentration', 'cb⁎ .'],
[('depict', 'NNS'), ('effect', 'NN'), ('schmidt', 'NNP'), ('chemical', 'JJ'),
('reaction', 'NN'), ('rate', 'NN'), ('bulk', 'JJ'), ('concentration', 'NN')]),
(['it', 'be', 'worth', 'to', 'mention', 'here', 'that', 'the', 'bulk', '-', 'mean',
'concentration', 'of', 'b', 'reach', 'approximately', '0.6', 'as', 'the', 'chemical',
'reaction', 'rate', 'and', 'the', 'schmidt', 'number', 'increase', 'to',
'infinite', ',', 'and', 'the', 'concentration', 'be', 'small', 'than', 'the',
'equilibrium', 'concentration', 'of', 'a', 'at', 'the', 'interface', '.'],
[('worth', 'JJ'), ('bulk', 'JJ'), ('concentration', 'NN'), ('chemical', 'JJ'),
('reaction', 'NN'), ('rate', 'NN'), ('schmidt', 'NNP'), ('number', 'NN'),
('increase', 'NN'), ('concentration', 'NN'), ('equilibrium', 'NN'), ('concentration', 'NN'), ('interface', 'NN')]),
(['this', 'figure', 'indicate', 'that', 'progress', 'of', 'the',
'chemical', 'reaction', 'be', 'somewhat', 'interfere', 'by', 'turbulent',
'mix', 'in', 'water', ',', 'and', 'the', 'efficiency', 'of', 'the',
'chemical', 'reaction', 'be', 'up', 'to', 'approximately', '60', '%', '.'],
[('figure', 'NN'), ('progress', 'NN'), ('chemical', 'JJ'), ('reaction', 'NN'),
('turbulent', 'JJ'), ('water', 'NN'), ('efficiency', 'NN'), ('chemical', 'JJ'), ('reaction', 'NN')]),
(['the', 'efficiency', 'of', 'the', 'chemical', 'reaction', 'in', 'water',
'will', 'be', 'a', 'function', 'of', 'the', 'reynolds', 'number', 'of',
'the', 'water', 'flow', ',', 'and', 'the', 'efficiency', 'could', 'increase',
'as', 'the', 'reynolds', 'number', 'increase', '.'],
[('efficiency', 'NN'), ('chemical', 'JJ'), ('reaction', 'NN'), ('water', 'NN'),
('function', 'NN'), ('reynolds', 'NNP'), ('number', 'NN'), ('water', 'NN'),
('flow', 'NN'), ('efficiency', 'NN'), ('reynolds', 'NNP'), ('number', 'NN'), ('increase', 'NNS')]),
(['we', 'need', 'an', 'extensive', 'investigation', 'on', 'the', 'efficiency',
'of', 'the', 'aquarium', 'chemical', 'reaction', 'in', 'the', 'near',
'future', 'to', 'extend', 'the', 'result', 'of', 'this', 'study',
'further', 'to', 'establish', 'practical', 'modelling', 'for', 'the',
'gas', 'exchange', 'between', 'air', 'and', 'water', '.'],
[('extensive', 'JJ'), ('investigation', 'NN'), ('efficiency', 'NN'),
('aquarium', 'JJ'), ('chemical', 'NN'), ('reaction', 'NN'),
('future', 'NN'), ('result', 'NNS'), ('study', 'NN'), ('practical', 'JJ'),
('modelling', 'NN'), ('gas', 'NN'), ('exchange', 'NN'), ('air', 'NN'), ('water', 'NN')])]
results, top_vertices = _keywords_extraction_from_preprocessed_context(S0021999113005652_textsnippet, top_p = 1, weight_comb="sum")
print("extracted keywords from pre-tagged S0021999113005652 text snippet:"+ str(results))
print("top_vertices: ", top_vertices)
print("total key terms", len(results))
assert len(results) == 37
assert results["schmidt number"] == 0.06231
assert results["chemical reaction rate"] == 0.10836
assert results["water"] == 0.05561
assert results["by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎ fig"] == 0.06098
assert results["water flow"] == 0.07201
assert results["aquarium chemical reaction"] == 0.10836
def test_visualise_cooccurrence_graph(self):
"""
produce the co-occurrence graph close to the example picture in original paper
:return: None
"""
example_tokenised_corpus_context = [["Compatibility", "of", "systems", "of", "linear", "constraints",
"over", "the", "set", "of", "natural", "numbers", "." ,
"Criteria", "of", "compatibility", "of", "a", "system", "of",
"linear", "Diophantine", "equations", "strict", "inequations", ",",
"and", "nonstrict", "inequations", "are", "considered",".", "Upper",
"bounds", "for", "components","of", "a", "minimal", "set", "of",
"solutions", "and", "algorithms", "of", "construction", "of",
"minimal", "generating", "sets", "of", "solutions", "for", "all",
"types", "of", "systems", "are", "given", ".", "These", "criteria",
"and", "the", "corresponding", "algorithms", "for",
"constructing", "a", "minimal", "supporting", "set", "of",
"solutions", "can", "be", "used", "in", "solving", "all", "the",
"considered", "types", "systems", "and", "systems", "of", "mixed",
"types", "."]]
# try to include verbs into the graph
custom_categories = {'NNS', 'NNP', 'NN', 'JJ', 'VBZ'}
# manually filter few nodes not appearing in the given example of original paper
stop_words={'set', 'mixed', 'corresponding', 'supporting'}
preprocessed_context = preprocessing_tokenised_context(example_tokenised_corpus_context,
syntactic_categories=custom_categories,
stop_words=stop_words)
cooccurrence_graph, original_tokenised_context = build_cooccurrence_graph(preprocessed_context)
connected_components = list(nx.connected_components(cooccurrence_graph))
print("visualising connected components:", connected_components)
assert len(connected_components) == 3
pos = nx.spring_layout(cooccurrence_graph,k=0.20,iterations=20)
nx.draw_networkx(cooccurrence_graph, pos=pos, arrows=True, with_labels=True)
plt.show()
plt.savefig("test_sample_cooccurrence_graph.png") # save as png | StarcoderdataPython |
8119456 | name = input("Write your name: ")
print("Hello", name, "!")
hours = int(input("Enter Hours: "))
rate = float(input("Enter Rate: "))
salary = hours * rate
print("Pay: {0:.2f}".format(salary))
width = 17
height = 12.0
print(width/2)
print(type(width/2))
print(width/2.0)
print(type(width/2.0))
print(height/3)
print(type(height/3))
print(1+2*5)
print(type(1+2*5))
from fractions import Fraction
celsius = float(input("Enter degrees in Celsius: "))
fahrenheit = celsius * Fraction(9, 5) + 32
print("{0:.2f} C⁰ is {1:.2f} F⁰".format(celsius, fahrenheit))
a = 35.0
b = 12.50
c =a*b
print(c)
hours =35.0
rate = 12.50
pay = hours * rate
print(pay)
name=input('What is your name?\n')
print(name)
first =10
second =15
print(first+second)
first='100'
second='110'
print(first+second)
first= 'Ansar'
second= 'Ahmad'
print(first+second)
first= 'test'
second= 3
print(first*second)
a=20+32-(40*5)*(15-7)
print(a) | StarcoderdataPython |
240408 | import numpy as np
import pandas as pd
anime = pd.read_csv('./data/AnimeData.csv')
def PreProcessing(anime):
#Cleaning data
#Deleting null ratings
anime['Weighted_Score'] = anime['Weighted_Score'].apply(lambda x: np.nan if x==0 else x)
anime['Genre'] = anime['Genre'].apply(lambda x: np.nan if x==0 or x=='[]' else x)
anime.dropna(inplace=True)
#setting index
anime['index'] = range(len(anime))
anime['Index'] = range(len(anime))
anime.set_index('index', inplace = True)
#Converting Genre from str to list
for i in range(len(anime)):
try:
# anime['Themes'][i] = ast.literal_eval(anime['Themes'][i])
anime['Genre'][i] = ast.literal_eval(anime['Genre'][i])
except:
pass
unique_genres = ['None', 'action', 'adventure', 'comedy', 'drama', 'erotica', 'fantasy', 'horror', 'magic', 'mystery',
'psychological', 'romance', 'science fiction', 'slice of life', 'supernatural', 'thriller', 'tournament']
anime['Joined'] = ' '
for i in range(len(anime)):
anime['Joined'][i] = ' '.join(anime['Genre'][i])+ ' ' + str(anime['Weighted_Score'][i])
return anime | StarcoderdataPython |
5043830 | # -*- coding:utf-8 -*- -
import rospy
import math
import time
import numpy as np
np.random.seed(1)
from nav_msgs.msg import Odometry
from tf_rl.envs import gazebo_env
from geometry_msgs.msg import Twist, Pose, Quaternion, Vector3, Point
from std_srvs.srv import Empty
from gazebo_msgs.srv import SetModelState, GetModelState
from gazebo_msgs.msg import ModelState
# 20181025 输入归一化,考虑局部感知域,比如10m,超过就取10m
# 20181113 考虑队形控制,修改碰撞重置机制
from sensor_msgs.msg import LaserScan
from gym.utils import seeding
class Listen_class():
def __init__(self, nor):
self.range_list = [[] for _ in range(nor)]
self.odom_list = [[] for _ in range(nor)]
for i in range(nor):
rospy.Subscriber('/robot%d/scan' % i, LaserScan, self.scan_callback, i)
rospy.Subscriber('/robot%d/odom' % i, Odometry, self.odom_callback, i)
def scan_callback(self, msg, args):
self.range_list[args] = msg.ranges
def odom_callback(self, msg, args):
self.odom_list[args] = msg
class Block:
def __init__(self, name, relative_entity_name):
self._name = name
self._relative_entity_name = relative_entity_name
class GazeboMultiJackalLidarEnv8(gazebo_env.GazeboEnv):
def __init__(self):
# Launch the simulation with the given launchfile name
gazebo_env.GazeboEnv.__init__(self, "/home/pygmalionchen/PycharmProjects/treasure/tf_rl/envs/assets/launch/multi_goal.launch")
self.vel_pub = []
# /home/pygmalionchen/PycharmProjects/treasure/tf_rl/envs/assets/launch/one_robot_world.launch
self.max_range_dis = 10
self.vel_pub.append(rospy.Publisher('/robot0/cmd_vel', Twist, queue_size=5))
self.vel_pub.append(rospy.Publisher('/robot1/cmd_vel', Twist, queue_size=5))
self.vel_pub.append(rospy.Publisher('/robot2/cmd_vel', Twist, queue_size=5))
self.vel_pub.append(rospy.Publisher('/robot3/cmd_vel', Twist, queue_size=5))
self.vel_pub.append(rospy.Publisher('/robot4/cmd_vel', Twist, queue_size=5))
self.vel_pub.append(rospy.Publisher('/robot5/cmd_vel', Twist, queue_size=5))
self.vel_pub.append(rospy.Publisher('/robot6/cmd_vel', Twist, queue_size=5))
self.vel_pub.append(rospy.Publisher('/robot7/cmd_vel', Twist, queue_size=5))
self.setState = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.getState = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.reset_proxy = rospy.ServiceProxy('/gazebo/reset_world', Empty)
self.nor = 8
self.n = self.nor
self.action_space = [2 for _ in range(self.nor)]
self.init_pose = []
self.modelState = ModelState()
self.reward_range = (-np.inf, np.inf)
self._seed()
self.listen_class = Listen_class(self.nor)
self.pong_list = np.zeros(self.nor) # 因为实际测试的时候可能不能因为碰撞就马上重置
def calculate_observation(self, data1): # determine whether there is a collision
min_range = 0.5
pong = False
where_are_inf = np.isinf(data1)
data1[where_are_inf] = self.max_range_dis # 最大距离
for i, item in enumerate(data1):
if min_range > data1[i] > 0:
pong = True
data1[i] = min(self.max_range_dis, data1[i])
# ranges = np.min(data1.reshape(36, 20), 1) / self.max_range_dis # 使用36维激光,用最小值保证安全,对应网络应该可以简单点
ranges = np.mean((np.array(data1)).reshape(180, 4), 1) / self.max_range_dis
return ranges, pong
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action, goal, first=False):
vcmds = []
for i in range(self.nor): # num of robots
vcmds.append(Twist())
vcmds[i].linear.x = action[i][0]
vcmds[i].angular.z = action[i][1]
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except rospy.ServiceException as e:
print("/gazebo/unpause_physics service call failed")
if first:
rospy.sleep(0.2)
self.get_all_init_states()
for i in range(self.nor):
self.vel_pub[i].publish(vcmds[i])
rospy.sleep(0.1/10) # 指令要持续一段时间,注意这是仿真时间,真实时间要考虑仿真速率比
rospy.wait_for_service('/gazebo/pause_physics')
try:
self.pause()
except rospy.ServiceException as e:
print("/gazebo/pause_physics service call failed")
state_list = []
done_list = []
reward_list = []
param_list = []
# pong_list = []
dis_matrix = np.zeros([self.nor, self.nor])
for i in range(self.nor):
state, pong = self.calculate_observation(np.array(self.listen_class.range_list[i]))
state_list.append(state)
done_list.append(False)
param_list.append([])
for j in range(self.nor): # 代表nor个目标点
# pong_list.append(pong)
goal_dis = np.sqrt((self.listen_class.odom_list[i].pose.pose.position.x - goal[j][0]) ** 2
+ (self.listen_class.odom_list[i].pose.pose.position.y - goal[j][1]) ** 2)
dis_matrix[i][j] = min(goal_dis, self.max_range_dis) / self.max_range_dis # 归一化(0,1)
theta = math.atan2(2 * self.listen_class.odom_list[i].pose.pose.orientation.z *
self.listen_class.odom_list[i].pose.pose.orientation.w,
1 - 2 * (self.listen_class.odom_list[i].pose.pose.orientation.z ** 2)) - \
math.atan2(goal[j][1] - self.listen_class.odom_list[i].pose.pose.position.y,
goal[j][0] - self.listen_class.odom_list[i].pose.pose.position.x)
# theta = arctan(delta y/delta x)
if theta > 3.14:
theta -= 6.28
elif theta < -3.14:
theta += 6.28
theta /= 3.14
param_list[i].append(dis_matrix[i][j])
param_list[i].append(theta)
# if not (done_list[i] or pong):
# reward_list.append(-(goal_dis / 3) ** 2)
if pong:
reward_list.append(-2)
self.pong_list[i] += 1
if self.pong_list[i] >= 20:
self.resetState(i)
self.pong_list[i] = 0
else:
reward_list.append(0)
self.pong_list[i] = 0
if abs(vcmds[i].angular.z) > 0.6:
reward_list[i] -= 0.1 * abs(vcmds[i].angular.z)
if dis_matrix[i,:].min() < 0.3 / self.max_range_dis:
done_list[i] = True
reward_list[i] += 5
# self.resetState(i)
# param_list.append(np.array([goal_dis, theta, self.listen_class.odom_list[i].twist.twist.linear.x,
# self.listen_class.odom_list[i].twist.twist.angular.z]))
goal_reward = sum(dis_matrix.min(axis=0))/self.nor # 每列的最小值,就是对每个目标点得到的最小距离,求和
reward_array = np.array(reward_list) - goal_reward
# total_toc = time.time()
# print('time is %f'%(total_toc-total_tic))
return np.array(state_list), np.array(param_list), reward_array, done_list, self.pong_list
# [xt, yt, q2, q3,q0,q1], [vl, va], {}
def get_all_init_states(self): # 在前期调用一次,获取机器人的初始位置
# 这个循环是为了等所有机器人都加载完
self.init_pose = []
for i in range(self.nor):
self.init_pose.append(self.getState('robot%d' % i, 'world'))
return self.init_pose
def resetState(self, number): # 若发生碰撞,将该robot放置到其初始位置
self.modelState.model_name = 'robot%d' % number
self.modelState.pose = self.init_pose[number].pose
self.setState(self.modelState)
def _reset(self):
# Resets the state of the environment and returns an initial observation.
rospy.wait_for_service('/gazebo/reset_world')
try:
self.reset_proxy()
except rospy.ServiceException as e:
print("/gazebo/reset_simulation service call failed")
return
| StarcoderdataPython |
41159 | #!/usr/bin/env python3
"""
Models that maps to Cloudformation functions.
"""
def replace_fn(node):
"""Iteratively replace all Fn/Ref in the node"""
if isinstance(node, list):
return [replace_fn(item) for item in node]
if isinstance(node, dict):
return {name: replace_fn(value) for name, value in node.items()}
if isinstance(node, (str, int, float)):
return node
if isinstance(node, Ref):
return node.render()
if hasattr(Fn, node.__class__.__name__):
return node.render()
raise ValueError(f"Invalid value specified in the code: {node}")
class Ref:
"""Represents a ref function in Cloudformation."""
# This is our DSL, it's a very thin wrapper around dictionary.
# pylint: disable=R0903
def __init__(self, target):
"""Creates a Ref node with a target."""
self.target = target
def render(self):
"""Render the node as a dictionary."""
return {"Ref": self.target}
class Base64:
"""Fn::Base64 function."""
# pylint: disable=R0903
def __init__(self, value):
self.value = value
def render(self):
"""Render the node with Fn::Base64."""
return {"Fn::Base64": replace_fn(self.value)}
class Cidr:
"""Fn::Cidr function."""
# pylint: disable=R0903
def __init__(self, ipblock, count, cidr_bits):
self.ipblock = ipblock
self.count = count
self.cidr_bits = cidr_bits
def render(self):
"""Render the node with Fn::Cidr."""
return {
"Fn::Cidr": [
replace_fn(self.ipblock),
replace_fn(self.count),
replace_fn(self.cidr_bits),
]
}
class And:
"""Fn::And function."""
# pylint: disable=R0903
def __init__(self, *args):
self.conditions = list(args)
def render(self):
"""Render the node with Fn::And."""
return {"Fn::And": replace_fn(self.conditions)}
class Equals:
"""Fn::Equals function."""
# pylint: disable=R0903
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def render(self):
"""Render the node with Fn::Equals."""
return {"Fn::Equals": [replace_fn(self.lhs), replace_fn(self.rhs)]}
class If:
"""Fn::If function."""
# pylint: disable=R0903
def __init__(self, condition, true_value, false_value):
self.condition = condition
self.true_value = true_value
self.false_value = false_value
def render(self):
"""Render the node with Fn::If."""
return {
"Fn::If": [
replace_fn(self.condition),
replace_fn(self.true_value),
replace_fn(self.false_value),
]
}
class Not:
"""Fn::Not function."""
# pylint: disable=R0903
def __init__(self, condition):
self.condition = condition
def render(self):
"""Render the node with Fn::Not."""
return {"Fn::Not": [replace_fn(self.condition)]}
class Or:
"""Fn::Or function."""
# pylint: disable=R0903
def __init__(self, *args):
self.conditions = list(args)
def render(self):
"""Render the node with Fn::Or."""
return {"Fn::Or": replace_fn(self.conditions)}
class FindInMap:
"""Fn::FindInMap function."""
# pylint: disable=R0903
def __init__(self, map_name, l1key, l2key):
self.map_name = map_name
self.l1key = l1key
self.l2key = l2key
def render(self):
"""Render the node with Fn::FindInMap."""
return {
"Fn::FindInMap": [
replace_fn(self.map_name),
replace_fn(self.l1key),
replace_fn(self.l2key),
]
}
class GetAtt:
"""Fn::GetAtt function."""
# pylint: disable=R0903
def __init__(self, logical_name, attr):
self.logical_name = logical_name
self.attr = attr
def render(self):
"""Render the node with Fn::GetAtt."""
return {"Fn::GetAtt": [replace_fn(self.logical_name), replace_fn(self.attr)]}
class GetAZs:
"""Fn::GetAZs function."""
# pylint: disable=R0903
def __init__(self, region):
self.region = region
def render(self):
"""Render the node with Fn::GetAZs."""
return {"Fn::GetAZs": replace_fn(self.region)}
class ImportValue:
"""Fn::ImportValue function."""
# pylint: disable=R0903
def __init__(self, export):
self.export = export
def render(self):
"""Render the node with Fn::ImportValue."""
return {"Fn::ImportValue": replace_fn(self.export)}
class Join:
"""Fn::Join function."""
# pylint: disable=R0903
def __init__(self, delimiter, elements):
self.delimiter = delimiter
self.elements = elements
def render(self):
"""Render the node with Fn::Join."""
return {"Fn::Join": [replace_fn(self.delimiter), replace_fn(self.elements)]}
class Select:
"""Fn::Select function."""
# pylint: disable=R0903
def __init__(self, index, elements):
self.index = index
self.elements = elements
def render(self):
"""Render the node with Fn::Select."""
return {"Fn::Select": [replace_fn(self.index), replace_fn(self.elements)]}
class Split:
"""Fn::Split function."""
# pylint: disable=R0903
def __init__(self, delimiter, target):
self.delimiter = delimiter
self.target = target
def render(self):
"""Render the node with Fn::Split."""
return {"Fn::Split": [replace_fn(self.delimiter), replace_fn(self.target)]}
class Sub:
"""Fn::Sub function."""
# pylint: disable=R0903
def __init__(self, target, mapping=None):
if not isinstance(target, str):
raise ValueError(
f"The first argument of Fn::Sub must be string: `{target}`"
)
if mapping is None:
self.mapping = {}
self.target = target
self.mapping = mapping
def render(self):
"""Render the node with Fn::Sub."""
if self.mapping:
return {"Fn::Sub": [replace_fn(self.target), replace_fn(self.mapping)]}
return {"Fn::Sub": replace_fn(self.target)}
class Transform:
"""Fn::Transform function."""
# pylint: disable=R0903
def __init__(self, construct):
is_dict = isinstance(construct, dict)
match_keys = set(construct.keys()) == {"Name", "Parameters"}
if not is_dict or not match_keys:
raise ValueError("Invalid Transform construct")
self.construct = construct
def render(self):
"""Render the node with Fn::Transform."""
return {
"Fn::Transform": {
"Name": replace_fn(self.construct["Name"]),
"Parameters": replace_fn(self.construct["Parameters"]),
}
}
class Fn:
"""
This is a container for all functions.
Rationale is instead of having to import all the functions,
we just import Fn and use any function as Fn.FuncName
"""
# pylint: disable=R0903
Base64 = Base64
Cidr = Cidr
And = And
Equals = Equals
If = If
Not = Not
Or = Or
FindInMap = FindInMap
GetAtt = GetAtt
GetAZs = GetAZs
ImportValue = ImportValue
Join = Join
Select = Select
Split = Split
Sub = Sub
Transform = Transform
| StarcoderdataPython |
6415889 | from collections import deque
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
n, m = map(int, input().split())
a = [list(map(int, list(input()))) for _ in range(n)]
q = deque()
check = [[False]*m for _ in range(n)]
dist = [[0]*m for _ in range(n)]
q.append((0,0))
check[0][0] = True
dist[0][0] = 1
while q:
x, y = q.popleft()
for k in range(4):
nx, ny = x+dx[k], y+dy[k]
if 0 <= nx < n and 0 <= ny < m:
if check[nx][ny] == False and a[nx][ny] == 1:
q.append((nx,ny))
dist[nx][ny] = dist[x][y] + 1
check[nx][ny] = True
print(dist[n-1][m-1])
| StarcoderdataPython |
5099483 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os.path
import re
import subprocess
from textwrap import dedent
import pytest
from pex.common import safe_open
from pex.pex import PEX
from pex.pex_bootstrapper import ensure_venv
from pex.pex_info import PexInfo
from pex.testing import make_env, run_pex_command
from pex.typing import TYPE_CHECKING
from pex.venv.pex import CollisionError
from pex.venv.virtualenv import Virtualenv
if TYPE_CHECKING:
from typing import Any, Set, Text
def test_ensure_venv_short_link(
pex_bdist, # type: str
tmpdir, # type: Any
):
# type: (...) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
collision_src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(collision_src, "will_not_collide_module.py"), "w") as fp:
fp.write(
dedent(
"""\
def verb():
return 42
"""
)
)
with safe_open(os.path.join(collision_src, "setup.cfg"), "w") as fp:
fp.write(
dedent(
"""\
[metadata]
name = collision
version = 0.0.1
[options]
py_modules =
will_not_collide_module
[options.entry_points]
console_scripts =
pex = will_not_collide_module:verb
"""
)
)
with safe_open(os.path.join(collision_src, "setup.py"), "w") as fp:
fp.write("from setuptools import setup; setup()")
collisions_pex = os.path.join(str(tmpdir), "collisions.pex")
run_pex_command(
args=[
pex_bdist,
collision_src,
"-o",
collisions_pex,
"--runtime-pex-root",
pex_root,
"--venv",
]
).assert_success()
with pytest.raises(CollisionError):
ensure_venv(PEX(collisions_pex), collisions_ok=False)
# The directory structure for successfully executed --venv PEXes is:
#
# PEX_ROOT/
# venvs/
# s/ # shortcuts dir
# <short hash>/
# venv -> <real venv parent dir (see below)>
# <full hash1>/
# <full hash2>/
# <real venv>
#
# AtomicDirectory locks are used to create both branches of the venvs/ tree; so if there is a
# failure creating a venv we expect just:
#
# PEX_ROOT/
# venvs/
# s/
# .<short hash>.atomic_directory.lck
# <full hash1>/
# .<full hash2>.atomic_directory.lck
expected_venv_dir = PexInfo.from_pex(collisions_pex).venv_dir(collisions_pex)
assert expected_venv_dir is not None
full_hash1_dir = os.path.basename(os.path.dirname(expected_venv_dir))
full_hash2_dir = os.path.basename(expected_venv_dir)
venvs_dir = os.path.join(pex_root, "venvs")
assert {"s", full_hash1_dir} == set(os.listdir(venvs_dir))
short_listing = os.listdir(os.path.join(venvs_dir, "s"))
assert 1 == len(short_listing)
assert re.match(r"^\.[0-9a-f]+\.atomic_directory.lck", short_listing[0])
assert [".{full_hash2}.atomic_directory.lck".format(full_hash2=full_hash2_dir)] == os.listdir(
os.path.join(venvs_dir, full_hash1_dir)
)
venv_pex = ensure_venv(PEX(collisions_pex), collisions_ok=True)
# We happen to know built distributions are always ordered before downloaded wheels in PEXes
# as a detail of how `pex/resolver.py` works.
assert 42 == subprocess.Popen(args=[venv_pex], env=make_env(PEX_SCRIPT="pex")).wait()
def test_ensure_venv_namespace_packages(tmpdir):
# type: (Any) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
# We know the twitter.common.metrics distributions depends on 4 other distributions contributing
# to the twitter.common namespace package:
# + twitter.common.exceptions
# + twitter.common.decorators
# + twitter.common.lang
# + twitter.common.quantity
def create_ns_pkg_pex(copies):
# type: (bool) -> Virtualenv
nspkgs_pex = os.path.join(
str(tmpdir), "ns-pkgs-{style}.pex".format(style="copies" if copies else "symlinks")
)
run_pex_command(
args=[
"twitter.common.metrics==0.3.11",
"-o",
nspkgs_pex,
"--runtime-pex-root",
pex_root,
"--venv",
"--venv-site-packages-copies" if copies else "--no-venv-site-packages-copies",
]
).assert_success()
nspkgs_venv_pex = ensure_venv(PEX(nspkgs_pex), collisions_ok=False)
pex_info = PexInfo.from_pex(nspkgs_pex)
venv_dir = pex_info.venv_dir(nspkgs_pex)
assert venv_dir is not None
venv = Virtualenv(venv_dir=venv_dir)
assert os.path.realpath(nspkgs_venv_pex) == os.path.realpath(venv.join_path("pex"))
return venv
venv_copies = create_ns_pkg_pex(copies=True)
assert not os.path.exists(os.path.join(venv_copies.site_packages_dir, "pex-ns-pkgs.pth"))
venv_symlinks = create_ns_pkg_pex(copies=False)
pex_ns_pkgs_pth = os.path.join(venv_symlinks.site_packages_dir, "pex-ns-pkgs.pth")
assert os.path.isfile(pex_ns_pkgs_pth)
with open(pex_ns_pkgs_pth) as fp:
assert 4 == len(fp.readlines())
expected_path_entries = [
os.path.join(venv_symlinks.site_packages_dir, d)
for d in ("", "pex-ns-pkgs/1", "pex-ns-pkgs/2", "pex-ns-pkgs/3", "pex-ns-pkgs/4")
]
for d in expected_path_entries:
assert os.path.islink(os.path.join(venv_symlinks.site_packages_dir, d, "twitter"))
assert os.path.isdir(os.path.join(venv_symlinks.site_packages_dir, d, "twitter", "common"))
def find_package_paths(venv):
# type: (Virtualenv) -> Set[Text]
return set(
subprocess.check_output(
args=[
venv.join_path("pex"),
"-c",
dedent(
"""\
from __future__ import print_function
import os
from twitter.common import decorators, exceptions, lang, metrics, quantity
for pkg in decorators, exceptions, lang, metrics, quantity:
# These are all packages; so __file__ looks like:
# <sys.path entry>/twitter/common/<pkg>/__init__.pyc
print(os.path.realpath(os.path.dirname(os.path.dirname(pkg.__file__))))
"""
),
]
)
.decode("utf-8")
.splitlines()
)
assert 1 == len(
find_package_paths(venv_copies)
), "Expected 1 unique package path for a venv built from copies."
symlink_package_paths = find_package_paths(venv_symlinks)
assert 5 == len(symlink_package_paths), "Expected 5 unique package paths for symlinked venv."
# We expect package paths like:
# .../twitter.common.foo-0.3.11.*.whl/twitter/common
package_file_installed_wheel_dirs = {
os.path.dirname(os.path.dirname(p)) for p in symlink_package_paths
}
assert os.path.realpath(os.path.join(pex_root, PexInfo.INSTALL_CACHE)) == os.path.realpath(
os.path.commonprefix(list(package_file_installed_wheel_dirs))
), "Expected contributing wheel content to be symlinked from the installed wheel cache."
assert {
"twitter.common.{package}-0.3.11-py{py_major}-none-any.whl".format(
package=p, py_major=venv_symlinks.interpreter.version[0]
)
for p in ("decorators", "exceptions", "lang", "metrics", "quantity")
} == {
os.path.basename(d) for d in package_file_installed_wheel_dirs
}, "Expected 5 unique contributing wheels."
def test_ensure_venv_site_packages_copies(
pex_bdist, # type: str
tmpdir, # type: Any
):
# type: (...) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
pex_file = os.path.join(str(tmpdir), "pex")
def assert_venv_site_packages_copies(copies):
# type: (bool) -> None
run_pex_command(
args=[
pex_bdist,
"-o",
pex_file,
"--pex-root",
pex_root,
"--runtime-pex-root",
pex_root,
"--venv",
"--venv-site-packages-copies" if copies else "--no-venv-site-packages-copies",
"--seed",
]
).assert_success()
venv_dir = PexInfo.from_pex(pex_file).venv_dir(pex_file)
assert venv_dir is not None
venv = Virtualenv(venv_dir=venv_dir)
pex_package = os.path.join(venv.site_packages_dir, "pex")
assert os.path.isdir(pex_package)
assert copies != os.path.islink(pex_package)
assert_venv_site_packages_copies(copies=True)
assert_venv_site_packages_copies(copies=False)
| StarcoderdataPython |
11305409 | from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from rest_framework import response, viewsets, permissions, status, decorators, mixins
from nodeconductor.core import serializers as core_serializers, filters as core_filters
from nodeconductor.core.views import BaseSummaryView
from nodeconductor.logging import elasticsearch_client, models, serializers, filters
class EventViewSet(viewsets.GenericViewSet):
filter_backends = (filters.EventFilterBackend,)
def get_queryset(self):
return elasticsearch_client.ElasticsearchResultList()
def list(self, request, *args, **kwargs):
self.queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(self.queryset)
if page is not None:
return self.get_paginated_response(page)
return response.Response(self.queryset)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
@decorators.list_route()
def count(self, request, *args, **kwargs):
self.queryset = self.filter_queryset(self.get_queryset())
return response.Response({'count': self.queryset.count()}, status=status.HTTP_200_OK)
@decorators.list_route()
def count_history(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
mapped = {
'start': request.query_params.get('start'),
'end': request.query_params.get('end'),
'points_count': request.query_params.get('points_count'),
'point_list': request.query_params.getlist('point'),
}
serializer = core_serializers.HistorySerializer(data={k: v for k, v in mapped.items() if v})
serializer.is_valid(raise_exception=True)
timestamp_ranges = [{'end': point_date} for point_date in serializer.get_filter_data()]
aggregated_count = queryset.aggregated_count(timestamp_ranges)
return response.Response(
[{'point': int(ac['end']), 'object': {'count': ac['count']}} for ac in aggregated_count],
status=status.HTTP_200_OK)
class AlertViewSet(mixins.CreateModelMixin,
viewsets.ReadOnlyModelViewSet):
queryset = models.Alert.objects.all()
serializer_class = serializers.AlertSerializer
lookup_field = 'uuid'
permission_classes = (permissions.IsAuthenticated,)
filter_backends = (
core_filters.DjangoMappingFilterBackend,
filters.AdditionalAlertFilterBackend,
filters.ExternalAlertFilterBackend,
filters.AlertScopeFilterBackend,
)
filter_class = filters.AlertFilter
def get_queryset(self):
return models.Alert.objects.filtered_for_user(self.request.user).order_by('-created')
@decorators.detail_route(methods=['post'])
def close(self, request, *args, **kwargs):
if not request.user.is_staff:
raise PermissionDenied()
alert = self.get_object()
alert.close()
return response.Response(status=status.HTTP_204_NO_CONTENT)
@decorators.detail_route(methods=['post'])
def acknowledge(self, request, *args, **kwargs):
alert = self.get_object()
if not alert.acknowledged:
alert.acknowledge()
return response.Response(status=status.HTTP_200_OK)
else:
return response.Response({'detail': 'Alert is already acknowledged'}, status=status.HTTP_409_CONFLICT)
return response.Response(status=status.HTTP_200_OK)
@decorators.detail_route(methods=['post'])
def cancel_acknowledgment(self, request, *args, **kwargs):
alert = self.get_object()
if alert.acknowledged:
alert.cancel_acknowledgment()
return response.Response(status=status.HTTP_200_OK)
else:
return response.Response({'detail': 'Alert is not acknowledged'}, status=status.HTTP_409_CONFLICT)
@decorators.list_route()
def stats(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
alerts_severities_count = queryset.values('severity').annotate(count=Count('severity'))
severity_names = dict(models.Alert.SeverityChoices.CHOICES)
# For consistency with all other endpoint we need to return severity names in lower case.
alerts_severities_count = {
severity_names[asc['severity']].lower(): asc['count'] for asc in alerts_severities_count}
for severity_name in severity_names.values():
if severity_name.lower() not in alerts_severities_count:
alerts_severities_count[severity_name.lower()] = 0
return response.Response(alerts_severities_count, status=status.HTTP_200_OK)
class BaseHookViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
filter_backends = (core_filters.StaffOrUserFilter,)
lookup_field = 'uuid'
class WebHookViewSet(BaseHookViewSet):
queryset = models.WebHook.objects.all()
serializer_class = serializers.WebHookSerializer
class EmailHookViewSet(BaseHookViewSet):
queryset = models.EmailHook.objects.all()
serializer_class = serializers.EmailHookSerializer
class HookSummary(BaseSummaryView):
def get_urls(self, request):
return ('webhook-list', 'emailhook-list')
| StarcoderdataPython |
1951586 | import git
# Git SHA
repo = git.Repo(search_parent_directories=True)
GIT_SHA = repo.head.object.hexsha
| StarcoderdataPython |
1823694 | <gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: <EMAIL>
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: DataProcess.py
@time: 2015-12-29 下午4:25
"""
import ActionProcess
import Tools
import pickle
import threading,os
import datetime
import global_settings
from db import InfluxdbClient
class DataProcess(object):
def __init__(self):
self.hosts = Tools.all_host_configs()
self.db = InfluxdbClient.InfluxdbClient()
def handle(self,msg):
# print 'recv:',msg
# print '>> process data:: %s' % pickle.loads(msg)
data = pickle.loads(msg)
for k,msg in data.items():
fun_name = k.split('::')[0]
time = k.split('::')[1]
ActionProcess.action_process(self, fun_name, time, msg)
print '---------waiting for new msg ---------'
# received data
for host,val in self.hosts['hosts'].items():
if val:
t = threading.Thread(target=self.process,args=[host,val])
t.start()
else:
print '%s host monitor info is null...' % host
def forward(self,msg):
print '-------starting Processing data---------'
self.handle(msg)
def process(self,host,val):
print 'Task %s runs pid %s' % (host,os.getpid())
tags = {
"host": "%s" % host,
"region": "us-west"
}
for v in val.values():
timestamp = float(v['timestamp'])
time = datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
measurement = v['data'].keys()[0]
data = v['data'].values()[0]
self.db.wirte_points(tags,measurement,time,data)
# clear service_instance data object
self.hosts['hosts'][host].clear()
| StarcoderdataPython |
11280332 | <gh_stars>0
import time
import client
class Actuator():
"""Class to represent greenhouse's actuators.
Open two channels of communication, one with Manager server to read
instructions to be turned on and off, the other with Environment server to
act on it.
Parameters
----------
`ID: str`
Its identification and purpose, check it out on `Report.pdf`;
`strength: float`
Its strength of variation in the greenhouse;
"""
def mana(self, client):
"""Receive orders from Manager.
Protocols
---------
`PUT <- MANA`
Turns this actuator on or off;
`ACK -> MANA`
Confirms setting;
"""
while True:
data = None
while not data:
try:
data = client.sck.recv(1024)
except Exception:
pass
if client.v:
print(client.ID + " <- " + client.target + ": " +
str(data, "utf-8"))
data = str(data, "utf-8").split("|")
if data[1] == "PUT" and data[2] == client.ID:
if data[3] == "ON":
self.on[0] = True
else:
self.on[0] = False
# Acknowledgement
string = "|ACK|PUT|"
if client.v:
print(client.ID, "-> " + client.target + ": " + string)
try:
client.sck.send(bytes(string, "utf-8"))
except OSError: # If connection error:
print(client.target + " disconnected")
print(client.ID + " disconnecting from " + client.target)
client.sck.close()
return
def envi(self, client):
"""Interact with Environment.
Protocols
---------
`PUT -> ENVI`
Acts on Environment;
"""
while True:
if self.on[0]:
string = "|PUT|" + client.ID + "|" + str(self.strength) + "|"
if client.v:
print(client.ID + " -> " + client.target + ": " + string)
try:
client.sck.send(bytes(string, "utf-8"))
except OSError: # If connection error:
print(client.target + " disconnected")
print(client.ID + " disconnecting from " + client.target)
client.sck.close()
return
time.sleep(1)
def __init__(self, ID, strength):
"""Class constructor."""
self.on = [False]
self.strength = strength
HOST, v = client.inputs()
if HOST is not None:
client.Client(7777, ID, "MANA", self.mana, v, HOST).start()
client.Client(8888, ID, "ENVI", self.envi, v, HOST).start()
else:
client.Client(7777, ID, "MANA", self.mana, v).start()
client.Client(8888, ID, "ENVI", self.envi, v).start()
| StarcoderdataPython |
19108 | """
Enum Assembler-Directives
"""
from enum import Enum, auto
class AssemblerDirectives(Enum):
START = auto()
END = auto()
ORG = auto()
DEFINE = auto()
@classmethod
def to_string(cls):
return "{START},{END},{ORG},{DEFINE}".format(
START=cls.START.name,
END=cls.END.name,
ORG=cls.ORG.name,
DEFINE=cls.DEFINE.name
)
| StarcoderdataPython |
11399218 | # !/usr/bin/env python
# title :experiment_helper.py
# description : Experiment helper class along with subclasses for each experiment, with their respective
# characteristics and methods.
# author :<NAME>
# date :30092018
# version :1.0
# usage : -
# notes : -
# python_version :3.5.5
# ==============================================================================
import os
os.environ['KERAS_BACKEND']='tensorflow'
from data_utils import *
from base_model import BaseModel
from models import get_model
from data_generator import DataGenerator
from images_data_augmenter_seqaware import ImageDataAugmenter
import experiment_utils as exp_utils
class ExperimentHelper(object):
"""
Class that contains all experiments and their specific characteristics. It contains methods to register new
experiments (as subclasses) and to gather them based on their name. Characteristics and methods that are shared by
all experiments are defined in the parent class (this class) as DEFAULT. The Default mode is Face only networks,
without converting data to normalized space. Each experiment can have their own specific methods and characteristics,
which are overridden in their respective subclasses.
This class and subclasses also contain methods to be passed to DataGenerator. See DataGenerator for more info.
"""
experiments = {}
def __init__(self,
name: str=None,
description: str=None,
weights: str=None,
train: bool=False,
base_model: BaseModel=None,
model=None,
fc_dimensions: int=4096,
label_pos: int=-1,
look_back: int=1,
n_output: int=2,
recurrent_type: str="lstm",
num_recurrent_layers: int=1,
num_recurrent_units: int=128,
train_data_generator: DataGenerator=None,
val_data_generator: DataGenerator=None):
"""
Initialize ExperimentHelper class.
:param name: name of experiment
:param description: description of experiment
:param weights: weights of model (in case it has been already trained)
:param train: True if training is activated
:param base_model: base model used (for instance, VGGFace)
:param model: model architecture type
:param fc_dimensions: dimensions of FC layers
:param label_pos: label position
:param look_back: sequence length
:param n_output: number of outputs of model
:param recurrent_type: type of recurrent network (gru or lstm)
:param num_recurrent_layers: number of recurrent layers
:param num_recurrent_units: number of recurrent units
:param train_data_generator: DataGenerator for training
:param val_data_generator: DataGenerator for validation/test (in case there is any)
"""
self.name = name
self.description = description
self.weights = weights
self.train = train
self.base_model = base_model
self.model = model
self.fc_dimensions = fc_dimensions
self.label_pos = label_pos
self.n_output = n_output
# --- temporal options ---
self.look_back = look_back
self.recurrent_type = recurrent_type
self.num_recurrent_layers = num_recurrent_layers
self.num_recurrent_units = num_recurrent_units
# --- other ---
self.train_data_generator = train_data_generator
self.val_data_generator = val_data_generator
@classmethod
def register_subclass(cls, name: str):
"""
Register experiment to list of available experiments
(there can be subclasses not registered that cannot be used)
:param name: experiment name
:return:
"""
def decorator(subclass):
cls.experiments[name] = subclass
return subclass
return decorator
@classmethod
def get_experiment(cls, name: str, *args, **kwargs):
"""
Gather experiment subclass by name
:param name: experiment name
:param args: arguments
:param kwargs:
:return: Retrieved experiment class (if it exists)
"""
name = name.upper()
if name not in cls.experiments:
raise ValueError('{} is not a valid experiment'.format(name))
return cls.experiments[name](*args, **kwargs)
def get_name(self):
"""
Get class name
:return: class name
"""
return self.__class__.__name__
def init_data_gen_train(self,
data: DataTuple,
batch_size: int=64,
augmenter: ImageDataAugmenter=None,
shuffle: bool=True,
debug: bool=False):
"""
Initialize data generator for training stage
:param data: DataTuple including x, y and feats
:param batch_size: batch size
:param augmenter: augmenter object (ImageDataAugmenter)
:param shuffle: True to shuffle input data
:param debug: True if debug mode is activated to show augmentation and normalization image results
"""
self.train_data_generator = self.init_data_gen(data, batch_size, augmenter, shuffle, debug)
def init_data_gen_val(self,
data: DataTuple,
batch_size: int=64,
augmenter: ImageDataAugmenter=None,
shuffle: bool=False,
debug: bool = False):
"""
Initialize data generator for validation/test stage
:param data: DataTuple including x, y and feats
:param batch_size: batch size
:param augmenter: augmenter object (ImageDataAugmenter)
:param shuffle: True to shuffle input data
:param debug: True if debug mode is activated to show augmentation and normalization image results
"""
self.val_data_generator = self.init_data_gen(data, batch_size, augmenter, shuffle, debug)
def init_data_gen(self,
data: DataTuple,
batch_size: int=64,
augmenter: ImageDataAugmenter=None,
shuffle: bool=False,
debug: bool=False):
"""
Initialize new data generator object with custom methods that depend on the experiment used. The code assumes
that the "default" mode is to convert to normalized space the input data, so "norm" methods are used as input
for the data generator here. If that's not the case, this method is overridden in respective experiments.
:param data: DataTuple including x, y and feats
:param batch_size: batch size
:param augmenter: augmenter object (ImageDataAugmenter)
:param shuffle: True to shuffle input data
:param debug: True if debug mode is activated to show augmentation and normalization image results
"""
datagen = DataGenerator(data.x, data.y, data.feats, batch_size, augmenter, shuffle, debug)
datagen.set_methods(self.arrange_arrays, self.arrange_label_array, self.look_back_range,
self.get_preprocess_info, self.load_image, self.preprocess_input_data_norm,
self.preprocess_input_label_norm, self.resize_input_data, self.prepare_tensor_dims,
self.normalize_input_data, self.arrange_final_data, self.decide_input_label)
return datagen
def arrange_arrays(self, batch_size: int):
"""
Initialize data arrays for generator according to batch size and type of data.
In this case FACE only (default).
:param batch_size: batch size
:return: empty data arrays
"""
return arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.FACE]))
def arrange_label_array(self, batch_size: int):
"""
Initialize data arrays for generator according to batch size and number of output labels
:param batch_size: batch size
:return: empty label arrays
"""
return arrange_array(arrange_array_size(batch_size, [self.n_output]))
def look_back_range(self):
"""
Returns the range of frames between 0 and the sequence length (i.e. for a 4-frame sequence, the method
would return range[0,1,2,3]
:return: look back range
"""
return range(0, self.look_back)
def load_image(self, img: str):
"""
Reads image from directory
:param img: Image directory
:return: Image object (ndarray)
"""
return load_image(img)
def prepare_data(self,
train_data: DataTuple,
validation_data: DataTuple,
args: list,
train: bool=True): #Used
"""
Perform all necessary actions to data before fitting model.
In this case, add extra dimension to data after first dimension.
:param train_data: training data DataTuple
:param validation_data: validation data DataTuple
:param train: True if training, False if validating/testing
:param args: list of possible arguments
:return: modified (or not) training and validation data
"""
if train:
train_data = add_dimension(train_data)
if validation_data is not None:
validation_data = add_dimension(validation_data)
return train_data, validation_data, args
def prepare_metadata(self, train_data: DataTuple, validation_data: DataTuple, args, train=True):
"""
Perform necessary actions to metadata before fitting model.
In case of training, minimum and maximum landmarks of training set are computed so as to normalize all landmarks
later. Otherwise, previously computed min and max landmarks are added to set.
:param train_data: DataTuple containing training data
:param validation_data: DataTuple containing validation data (if there is none, it's None).
:param args: possible variables needed to prepare metadata
:param train: True if training is activated
:return: training (and validation) DataTuples now containing min and max training landmarks, input arguments
plus computed min and max landmarks
"""
if train:
min_lndmk, max_lndmk = compute_min_max_landmarks_fold(train_data.feats, True)
train_data = train_data._replace(
feats=add_minxmax_landmarks_values(train_data.feats, min_lndmk, max_lndmk))
args['min_landmark'] = min_lndmk
args['max_landmark'] = max_lndmk
else:
args['min_landmark'] = self.min_lndmk
args['max_landmark'] = self.max_lndmk
if validation_data is not None:
validation_data = validation_data._replace(
feats=add_minxmax_landmarks_values(validation_data.feats, args['min_landmark'], args['max_landmark']))
return train_data, validation_data, args
def get_preprocess_info_ind(self, index: dict, feature: np.ndarray):
"""
Get necessary information to preprocess the frame (from face_features, see data_utils.read_face_features_file)
and store in dict.
:param index: dict to contain preprocessing info for frame
:param feature: original preprocessing info saved in maps
:return: index
"""
index["face_conv"] = get_face_conv(feature)
index["gaze_conv"] = get_gaze_conv(feature)
index["face_roi_size"] = get_face_roi_size(feature)
index["eyes_roi_size"] = get_eyes_roi_size(feature)
index["face_warp"] = get_face_warp(feature)
index["leye_warp"] = get_leye_warp(feature)
index["reye_warp"] = get_reye_warp(feature)
index["bb"] = get_bb(feature)
index["landmarks"] = get_landmarks(feature)
index["min_landmark"], index["max_landmark"] = get_min_max_landmarks(feature)
return index
def get_preprocess_info(self, features: np.ndarray):
"""
Get necessary information from face_features to preprocess (a series of) frames and stores it in list of dicts.
:param features: original preprocessing info saved in maps
:return: list of dicts containing preprocessing information.
"""
features_array = copy_face_features(features)
info = [dict() for x in range(len(features_array))]
for i, f in enumerate(features_array):
info[i] = self.get_preprocess_info_ind(info[i], f)
return info
def preprocess_input_imgs(self, img: np.ndarray, info: dict):
"""
Preprocess input images: crops "original image" and applies warping to normalize rest of images to the
"normalized space". Note that even though the chosen experiment may not contain all images, they are all
processed anyway (to ensure reproducibility due to random number generation).
:param img: array of images
:param info: preprocessing information for this specific frame.
:return: preprocessed and normalized images
"""
return [preprocess_oface(img, info["bb"]),
warp_image(img, info["face_warp"], info["face_roi_size"]),
warp_image(img, info["leye_warp"], info["eyes_roi_size"]),
warp_image(img, info["reye_warp"], info["eyes_roi_size"])]
def preprocess_input_metadata(self, info: dict):
"""
Preprocess input metadata (landmarks) by substracting the mean face coordinate.
:param info: preprocessing information for this specific frame.
:return: mean centered landmarks
"""
mean_face = np.mean(info["landmarks"], axis=0)
return info["landmarks"] - mean_face
def preprocess_input_metadata_norm(self, info: dict):
"""
If landmarks have to be converted to normalized space, normalize them first and then mean center them
:param info: preprocessing information for this specific frame.
:return: normalized, mean centered landmarks
"""
mean_face = np.mean(info["landmarks"], axis=0)
landmarks, mean_face = transform_landmarks(info["landmarks"], info["face_conv"], mean_face)
return landmarks - mean_face
def preprocess_input_data(self, imgs: list, info: dict):
"""
Preprocess all input data (images and metadata)
:param imgs: list of input images
:param info: preprocessing information for this specific frame.
:return: preprocessed/normalized data
"""
data = self.preprocess_input_imgs(imgs, info)
data.append(self.preprocess_input_metadata(info))
return data
def preprocess_input_data_norm(self, imgs: list, info: dict):
"""
Preprocess all input data (images and metadata), taking into account that metadata has to be converted to
normalized space
:param imgs: list of input images
:param info: preprocessing information for this specific frame.
:return: preprocessed/normalized data
"""
data = self.preprocess_input_imgs(imgs, info)
data.append(self.preprocess_input_metadata_norm(info))
return data
def preprocess_input_label(self, label: np.ndarray, info: dict=None):
"""
Preprocess input label: convert 3D unit gaze vector to 2D angles (yaw and pitch).
:param label: 3D unit gaze vector as input label (y)
:param info: preprocessing information for this specific frame (not used here, included for compatibility).
:return: preprocessed input label
"""
preproc_label = vector2angles(label)
return preproc_label
def preprocess_input_label_norm(self, label: np.ndarray, info: dict):
"""
Preprocess input label: convert it to normalized space and then convert it to 2D angles.
:param label: 3D unit gaze vector as input label (y)
:param info: preprocessing information for this specific frame.
:return: normalized and preprocessed input label
"""
norm_label = normalize_gaze(label, info["gaze_conv"])
return self.preprocess_input_label(norm_label, info)
def resize_input_data(self, input_data: list, info: dict):
"""
Resize input images to size that is compatible with model architecture. In this case metadata is not resized.
:param input_data: list of input data (images and metadata).
:param info: preprocessing information for this specific frame.
:return: resized images
"""
return [resize_oface(input_data[0], info["bb"], self.base_model.input_size[input_type.FACE]),
resize_nface(input_data[1], self.base_model.input_size[input_type.FACE]),
resize_eyes(input_data[2], input_data[3], self.base_model.input_size[input_type.EYES]),
input_data[4]]
def prepare_tensor_dims(self, input_data: list):
"""
Modify tensor dimensions so that they are compatible with model architecture and Keras. All but the last
input_data elements are modified, since the code assumes that the last element is the metadata.
:param input_data: list of input data (images and metadata).
:return: input data with ready-to-be-used dimensions.
"""
prepared_input_data = []
for i in range(len(input_data)-1):
prepared_input_data.append(prepare_tensor_dims(image.img_to_array(input_data[i])))
prepared_input_data.append(input_data[-1])
return prepared_input_data
def normalize_input_data(self, input_data: list, info: dict):
"""
Performs mean centering in each of the input images (not metadata - the code assumes that the last element of
input_data is the metadata). Mean centering values are given by the base model.
:param input_data: list of input data (images and metadata).
:param info: preprocessing information for this specific frame.
:return: mean centered data
"""
normalized_input_data = []
for i in range(len(input_data) - 1):
normalized_input_data.append(self.base_model.mean_center(input_data[i]))
return normalized_input_data
def normalize_metadata(self, landmarks: np.ndarray, info: dict):
"""
Normalize/standardize metadata. In this case the code performs a min/max normalization.
:param landmarks: list of 3D landmarks
:param info: preprocessing information for this specific frame.
:return: normalized/standardized metadata
"""
lndmks = min_max_normalization(landmarks, info["min_landmark"], info["max_landmark"], 20)
lndmks = lndmks.reshape((3 * 68,))
return lndmks
def arrange_final_data(self, input_data: list, batch_data: list, batch_pos: int, frame: int=0):
"""
Select data from input_data that will be fed to the network (note that all data is processed but only some of it
is fed to the network according to experiment and model architecture).
:param input_data: list of input data (images and metadata).
:param batch_data: list of data to be fed to network.
:param batch_pos: index within the batch (batch position).
:param frame: frame number in sequence (only used with sequences).
:return: batch_data
"""
batch_data[batch_pos] = input_data[0]
return batch_data
def decide_input_label(self, input_labels: list, info: dict=None):
"""
Decide final label for this frame/sequence.
:param input_labels: list of input labels (there are as many labels as frames in sequence).
:param info: preprocessing information for this specific frame.
:return: value to be used as label in training.
"""
return input_labels[-1]
def compile_model(self, learning_rate: float):
"""
Compiles experiment model.
:param learning_rate: learning rate
"""
self.model.compile(learning_rate)
def define_model(self, dropout: float):
"""
Defines model architecture.
:param dropout: dropout value
"""
pass
def load_model(self):
"""
Load trained model and weights
"""
self.define_model(1)
self.model.load_weights(exp_utils.get_file(self.weights))
@ExperimentHelper.register_subclass('OF4096')
class OF4096(ExperimentHelper):
"""
OF4096 experiment
"""
def __init__(self):
"""
Initialize exp.
"""
super().__init__()
self.name = "OF4096"
self.description = "Original face, fc 4096D, finetuned VGGFace except last fc"
self.weights = exp_utils.OF4096_VGG16
self.base_model = BaseModel.get_base_model("VGGFace")
self.model = get_model("face_finetune")
print(self.name)
print(self.description)
def define_model(self, dropout: float):
"""
Overridden. Defines model architecture based on experiment characteristics.
:param dropout: dropout value
"""
self.model.define(dropout=dropout, base_model=self.base_model)
def get_preprocess_info(self, features: np.ndarray):
"""
Overridden. Get necessary information from face_features to preprocess (a series of) frames and stores it in
list of dicts.
:param features: original preprocessing info saved in maps
:return: list of dicts containing preprocessing information.
"""
info = super().get_preprocess_info(features)
for i in range(len(info)):
info[i]["gaze_conv"] = None
info[i]["face_conv"] = None
return info
def init_data_gen(self, data: DataTuple, batch_size: int=64, augmenter: ImageDataAugmenter=None,
shuffle: bool=False, debug: bool=False):
"""
Overridden. Initialize new data generator. Overrides (and calls) super() method so that "norm" methods are not
used.
:param data: DataTuple including x, y and feats
:param batch_size: batch size
:param augmenter: augmenter object (ImageDataAugmenter)
:param shuffle: True to shuffle input data
:param debug: True if debug mode is activated to show augmentation and normalization image results
"""
datagen = super().init_data_gen(data, batch_size, augmenter, shuffle, debug)
datagen.set_methods(preprocess_input_data=self.preprocess_input_data,
preprocess_input_label=self.preprocess_input_label)
return datagen
@ExperimentHelper.register_subclass('NF4096')
class NF4096(ExperimentHelper):
"""
NF4096 experiment
"""
def __init__(self):
"""
Initializes exp.
"""
super().__init__()
self.name = "NF4096"
self.description = "Normalized face, fc 4096D, fcs trained from scratch"
self.weights = exp_utils.NF4096_VGG16
self.base_model = BaseModel.get_base_model("VGGFace")
self.model = get_model("face_fcscratch")
print(self.name)
print(self.description)
def define_model(self, dropout: float):
"""
Overridden. Defines model architecture based on experiment characteristics.
:param dropout: dropout value
"""
self.model.define(dropout=dropout, base_model=self.base_model)
def arrange_final_data(self, input_data: list, batch_data: list, batch_pos: int, frame: int=0):
"""
Overridden. Select data from input_data that will be fed to the network (note that all data is processed but
only some of it is fed to the network according to experiment and model architecture).
:param input_data: list of input data (images and metadata).
:param batch_data: list of data to be fed to network.
:param batch_pos: index within the batch (batch position).
:param frame: frame number in sequence (only used with sequences).
:return: batch_data
"""
batch_data[batch_pos] = input_data[1]
return batch_data
@ExperimentHelper.register_subclass('NF5632')
class NF5632(ExperimentHelper):
"""
NF5632 experiment
"""
def __init__(self):
"""
Initializes exp.
"""
super().__init__()
self.name = "NF5632"
self.description = "Normalized face, fc 5632D, fcs trained from scratch"
self.fc_dimensions = 5632
self.weights = exp_utils.NF5632_VGG16
self.base_model = BaseModel.get_base_model("VGGFace")
self.model = get_model("face_fcscratch")
print(self.name)
print(self.description)
def define_model(self, dropout: float):
"""
Overridden. Defines model architecture based on experiment characteristics.
:param dropout: dropout value
"""
self.model.define(dropout=dropout, base_model=self.base_model, hidden_dim=self.fc_dimensions)
def arrange_final_data(self, input_data: list, batch_data: list, batch_pos: int, frame: int=0):
"""
Overridden. Select data from input_data that will be fed to the network (note that all data is processed but
only some of it is fed to the network according to experiment and model architecture).
:param input_data: list of input data (images and metadata).
:param batch_data: list of data to be fed to network.
:param batch_pos: index within the batch (batch position).
:param frame: frame number in sequence (only used with sequences).
:return: batch_data
"""
batch_data[batch_pos] = input_data[1]
return batch_data
@ExperimentHelper.register_subclass('NE1536')
class NE1536(ExperimentHelper):
"""
NE1536 experiment
"""
def __init__(self):
"""
Initializes exp.
"""
super().__init__()
self.name = "NE1536"
self.description = "Normalized eyes, fc 1536D, fcs trained from scratch"
self.weights = exp_utils.NE1536_VGG16
self.base_model = BaseModel.get_base_model("VGGFace")
self.model = get_model("eyes_fcscratch")
print(self.name)
print(self.description)
def define_model(self, dropout: float):
"""
Overridden. Defines model architecture based on experiment characteristics.
:param dropout: dropout value
"""
self.model.define(dropout=dropout, base_model=self.base_model)
def arrange_arrays(self, batch_size: int):
"""
Overridden. Initialize data arrays for generator according to batch size and type of data.
In this case EYES only (default).
:param batch_size: batch size
:return: empty data arrays with correct input size.
"""
return [arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.EYES]))]
def arrange_final_data(self, input_data: list, batch_data: list, batch_pos: int, frame: int=0):
"""
Overridden. Select data from input_data that will be fed to the network (note that all data is processed but
only some of it is fed to the network according to experiment and model architecture).
:param input_data: list of input data (images and metadata).
:param batch_data: list of data to be fed to network.
:param batch_pos: index within the batch (batch position).
:param frame: frame number in sequence (only used with sequences).
:return: batch_data
"""
batch_data[batch_pos] = input_data[2]
return batch_data
@ExperimentHelper.register_subclass('NFL4300')
class NFL4300(ExperimentHelper):
"""
NFL4300 experiment
"""
def __init__(self):
"""Initializes experiment"""
super().__init__()
self.name = "NFL4300"
self.description = "Normalized face and landmarks, fc 4300D"
self.fc_dimensions = 4300
self.weights = exp_utils.NFL4300_VGG16
self.min_lndmk = exp_utils.NFL4300_MIN_LNMDK
self.max_lndmk = exp_utils.NFL4300_MAX_LNMDK
self.base_model = BaseModel.get_base_model("VGGFace")
self.model = get_model("face_fcscratch")
print(self.name)
print(self.description)
def define_model(self, dropout: float):
"""
Overridden. Defines model architecture based on experiment characteristics.
:param dropout: dropout value
"""
self.model.define(dropout=dropout, base_model=self.base_model, hidden_dim=self.fc_dimensions,
use_metadata=True)
def arrange_arrays(self, batch_size: int):
"""
Overridden. Initialize data arrays for generator according to batch size and type of data.
In this case FACE and LANDMARKS.
:param batch_size: batch size
:return: empty data arrays with correct input size.
"""
return [arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.FACE])),
arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.LANDMARKS]))]
def arrange_final_data(self, input_data: list, batch_data: list, batch_pos: int, frame: int=0):
"""
Overridden. Select data from input_data that will be fed to the network (note that all data is processed but only
some of it is fed to the network according to experiment and model architecture).
:param input_data: list of input data (images and metadata).
:param batch_data: list of data to be fed to network.
:param batch_pos: index within the batch (batch position).
:param frame: frame number in sequence (only used with sequences).
:return: batch_data
"""
batch_data[0][batch_pos] = input_data[1]
batch_data[1][batch_pos] = input_data[3]
return batch_data
def prepare_data(self, train_data: DataTuple, validation_data: DataTuple, args: list, train: bool=True): #Used
"""
Perform all necessary actions to data before fitting model.
In this case, metadata and regular data are processed.
:param train_data: training data DataTuple
:param validation_data: validation data DataTuple
:param train: True if training, False if validating/testing
:param args: list of possible arguments
:return: modified (or not) training and validation data,
and list of arguments containing max look back and max/min training landmarks
"""
train_data, validation_data, args = self.prepare_metadata(train_data, validation_data, args, train)
return super().prepare_data(train_data, validation_data, args, train)
def normalize_input_data(self, input_data, info):
"""
Overridden. Normalizes/standardizes input data (images and metadata) according to used functions.
:param input_data: list of input data (images and metadata).
:param info: preprocessing information for this specific frame.
:return: normalized/standardized data
"""
return super().normalize_input_data(input_data, info) + [self.normalize_metadata(input_data[3], info)]
@ExperimentHelper.register_subclass('NFE5632')
class NFE5632(ExperimentHelper):
"""
NFE5632 experiment
"""
def __init__(self):
"""Initializes exp."""
super().__init__()
self.name = "NFE5632"
self.description = "Normalized face and eyes, two-stream network, fc 5632D"
self.fc_dimensions = 5632
self.weights = exp_utils.NFE5632_VGG16
self.base_model = BaseModel.get_base_model("VGGFace")
self.model = get_model("two_stream")
print(self.name)
print(self.description)
def define_model(self, dropout: float):
"""
Overridden. Defines model architecture based on experiment characteristics.
:param dropout: dropout value
"""
self.model.define(dropout=dropout, base_model=self.base_model, hidden_dim_last=self.fc_dimensions)
def arrange_arrays(self, batch_size: int):
"""
Overridden. Initialize data arrays for generator according to batch size and type of data.
In this case FACE and EYES.
:param batch_size: batch size
:return: empty data arrays with correct input size.
"""
return [arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.FACE])),
arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.EYES]))]
def arrange_final_data(self, input_data: list, batch_data: list, batch_pos: int, frame: int=0):
"""
Overridden. Select data from input_data that will be fed to the network (note that all data is processed but
only some of it is fed to the network according to experiment and model architecture).
:param input_data: list of input data (images and metadata).
:param batch_data: list of data to be fed to network.
:param batch_pos: index within the batch (batch position).
:param frame: frame number in sequence (only used with sequences).
:return: batch_data
"""
batch_data[0][batch_pos] = input_data[1]
batch_data[1][batch_pos] = input_data[2]
return batch_data
@ExperimentHelper.register_subclass('NFEL5836')
class NFEL5836(ExperimentHelper):
"""
NFEL5836 experiment
"""
def __init__(self):
"""Initializes exp."""
super().__init__()
self.name = "NFEL5836"
self.description = "Normalized face, eyes and landmarks, two-stream + metadata network, fc 5836D"
self.fc_dimensions = 2918
self.weights = exp_utils.NFEL5836_VGG16
self.min_lndmk = exp_utils.NFEL5836_MIN_LNMDK
self.max_lndmk = exp_utils.NFEL5836_MAX_LNMDK
self.base_model = BaseModel.get_base_model("VGGFace")
self.model = get_model("two_stream")
print(self.name)
print(self.description)
def define_model(self, dropout: float):
"""
Overridden. Defines model architecture based on experiment characteristics.
:param dropout: dropout value
"""
self.model.define(dropout=dropout, base_model=self.base_model, hidden_dim_last=self.fc_dimensions,
use_metadata=True)
def arrange_arrays(self, batch_size: int):
"""
Overridden. Initialize data arrays for generator according to batch size and type of data.
In this case FACE, EYES and LANDMARKS.
:param batch_size: batch size
:return: empty data arrays with correct input size.
"""
return [arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.FACE])),
arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.EYES])),
arrange_array(arrange_array_size(batch_size, self.base_model.input_size[input_type.LANDMARKS]))]
def arrange_final_data(self, input_data: list, batch_data: list, batch_pos: int, frame: int=0):
"""
Overridden. Select data from input_data that will be fed to the network (note that all data is processed but only
some of it is fed to the network according to experiment and model architecture).
:param input_data: list of input data (images and metadata).
:param batch_data: list of data to be fed to network.
:param batch_pos: index within the batch (batch position).
:param frame: frame number in sequence (only used with sequences).
:return: batch_data
"""
batch_data[0][batch_pos] = input_data[1]
batch_data[1][batch_pos] = input_data[2]
batch_data[2][batch_pos] = input_data[3]
return batch_data
def prepare_data(self, train_data: DataTuple, validation_data: DataTuple, args: list, train: bool=True):
"""
Overridden. Perform all necessary actions to data before fitting model.
In this case, metadata and images are processed.
:param train_data: training data DataTuple
:param validation_data: validation data DataTuple
:param train: True if training, False if validating/testing
:param args: list of possible arguments
:return: modified (or not) training and validation data,
and list of arguments containing max look back and max/min training landmarks
"""
train_data, validation_data, args = self.prepare_metadata(train_data, validation_data, args, train)
return super().prepare_data(train_data, validation_data, args, train)
def normalize_input_data(self, input_data: list, info: dict):
"""
Overridden. Normalizes/standardizes input data (images and metadata) according to used functions.
:param input_data: list of input data (images and metadata).
:param info: preprocessing information for this specific frame.
:return: normalized/standardized data
"""
return super().normalize_input_data(input_data, info) + [self.normalize_metadata(input_data[3], info)]
@ExperimentHelper.register_subclass('NFEL5836_2918')
class NFEL5836_2918(NFEL5836):
"""
NFEL5836_2918 experiment. Same as NFEL5836 but last FC has dimension of 2918D.
Used only as pre-model for RNN models.
"""
def __init__(self):
""" Initializes exp. """
super().__init__()
self.name = "NFEL5836_2918"
self.description = "Normalized face, eyes and landmarks, two-stream + metadata network, fc 5836D-2918D"
self.fc_dimensions = 2918
self.weights = exp_utils.NFEL5836_2918_VGG16
self.min_lndmk = exp_utils.NFEL5836_2918_MIN_LNMDK
self.max_lndmk = exp_utils.NFEL5836_2918_MAX_LNMDK
print(self.name)
print(self.description)
@ExperimentHelper.register_subclass('NFEL5836GRU')
class NFEL5836GRU(ExperimentHelper):
"""
NFEL5836GRU experiment.
"""
def __init__(self):
"""Initializes exp."""
super().__init__()
self.name = "NFEL5836GRU"
self.description = "Sequence of normalized face, eyes and landmarks. Frozen static model, fine-tune fusion " \
"layers and train RNN-GRU module from scratch"
self.recurrent_type = "gru"
self.num_recurrent_layers = 1
self.num_recurrent_units = 128
self.look_back = 4
self.weights = exp_utils.NFEL5836GRU_VGG16
self.min_lndmk = exp_utils.NFEL5836GRU_MIN_LNMDK
self.max_lndmk = exp_utils.NFEL5836GRU_MAX_LNMDK
self.label_pos = -1
self.model = get_model("two_stream_rnn")
print(self.name)
print(self.description)
self.feature_arch = NFEL5836_2918()
self.base_model = self.feature_arch.base_model
def define_model(self, dropout: float):
"""
Overridden. Defines model architecture based on experiment characteristics.
:param dropout: dropout value
"""
self.feature_arch.define_model(dropout)
self.feature_arch.model.load_weights(exp_utils.get_file(self.feature_arch.weights))
self.model.define(dropout=dropout, features_model=self.feature_arch.model.model,
base_model=self.base_model, n_units=self.num_recurrent_units,
lstm_layers=self.num_recurrent_layers, rec_type=self.recurrent_type)
def compile_model(self, learning_rate: float):
"""
Overridden. Compiles experiment model. Using ADAM optimizer that accumulates mini-batch updates.
:param learning_rate: learning rate
"""
self.model.compile(learning_rate, accum=True)
def arrange_arrays(self, batch_size: int):
"""
Overridden. Initialize data arrays for generator according to batch size and type of data.
In this case FACE, EYES and LANDMARKS SEQUENCES of sequence length = self.look_back.
:param batch_size: batch size
:return: empty data arrays with correct input size.
"""
return [arrange_array(
arrange_sequence_array_size(batch_size, self.base_model.input_size[input_type.FACE], self.look_back)),
arrange_array(
arrange_sequence_array_size(batch_size, self.base_model.input_size[input_type.EYES], self.look_back)),
arrange_array(
arrange_sequence_array_size(batch_size, self.base_model.input_size[input_type.LANDMARKS], self.look_back))]
def arrange_final_data(self, input_data: list, batch_data: list, batch_pos: int, frame: int=0):
"""
Overridden. Select data from input_data that will be fed to the network (note that all data is processed but only
some of it is fed to the network according to experiment and model architecture).
:param input_data: list of input data (images and metadata).
:param batch_data: list of data to be fed to network.
:param batch_pos: index within the batch (batch position).
:param frame: frame number in sequence (only used with sequences).
:return: batch_data
"""
batch_data[0][batch_pos][frame] = input_data[1]
batch_data[1][batch_pos][frame] = input_data[2]
batch_data[2][batch_pos][frame] = input_data[3]
return batch_data
def prepare_data(self, train_data: DataTuple, validation_data: DataTuple, args: list, train: bool=True):
"""
Overridden. Perform all necessary actions to data before fitting model.
In this case, metadata and images are processed.
See arrange_sequences for more info.
:param train_data: training data DataTuple
:param validation_data: validation data DataTuple
:param train: True if training, False if validating/testing
:param args: list of possible arguments
:return: modified (or not) training and validation data,
and list of arguments containing max look back and max/min training landmarks
"""
train_data, validation_data, args = self.prepare_metadata(train_data, validation_data, args, train)
if train:
train_data = arrange_sequences(train_data, self.look_back, self.look_back)
if validation_data is not None:
validation_data = arrange_sequences(validation_data, self.look_back, args['max_look_back'])
return train_data, validation_data, args
def normalize_input_data(self, input_data: list, info: dict):
"""
Overridden. Normalizes/standardizes input data (images and metadata) according to used functions.
:param input_data: list of input data (images and metadata).
:param info: preprocessing information for this specific frame.
:return: normalized/standardized data
"""
return super().normalize_input_data(input_data, info) + [self.normalize_metadata(input_data[3], info)]
def decide_input_label(self, input_labels: list, info: dict=None):
"""
Overridden. Decide final label for this frame/sequence.
:param input_labels: list of input labels (there are as many labels as frames in sequence).
:param info: preprocessing information for this specific frame.
:return: value to be used as label in training.
"""
return input_labels[self.label_pos]
| StarcoderdataPython |
6572362 | <filename>examples/example_1.py<gh_stars>10-100
from causaleffect import createGraph, plotGraph, graph
'''Code in Figure 3.5 (a)'''
edges = ['X<->Z', 'X<->W', 'X->Z', 'Z->W', 'W->Y', 'X->Y']
G = createGraph(edges)
print(graph.to_R_notation(edges))
plotGraph(G)
| StarcoderdataPython |
6546422 | <reponame>kirichoi/tellurium
#!/usr/bin/env python
"""
Functions to work with event triggers and event handling.
Example demonstrates ho to attach such function.
"""
# TODO: test me, write example, update me
import roadrunner
# --------------------------------------------------
# Event handling functions
# --------------------------------------------------
def onEventTrigger(model, eventIndex, eventId):
print("event {} was triggered at time {}".format(eventId, model.getTime()))
def onEventAssignment(model, eventIndex, eventId):
print("event {} was assignend at time {}".format(eventId, model.getTime()))
def testEvents(filePath):
""" Attaches eventTrigger and eventAssignment functions to events.
Runs simulation.
:param filePath:
:type filePath:
"""
r = roadrunner.RoadRunner(filePath)
eventIds = r.model.getEventIds()
for eid in eventIds:
e = r.model.getEvent(eid)
e.setOnTrigger(onEventTrigger)
e.setOnAssignment(onEventAssignment)
r.simulate()
# --------------------------------------------------
# Integration handling function
# --------------------------------------------------
def onTimeStep(integrator, model, time):
""" Is called after the internal integrator completes each internal time step. """
print("onTimeStep, time: {}".format(time))
def onEvent(integrator, model, time):
""" Whenever model event occurs and after it is procesed."""
print("onEvent, time: {}".format(time))
def testMultiStepIntegrator(filePath, t0, tf, dt, minStep=-1, maxStep=-1):
r = roadrunner.RoadRunner(filePath)
listener = roadrunner.PyIntegratorListener()
listener.setOnTimeStep(onTimeStep)
listener.setOnEvent(onEvent)
r.integrator.setListener(listener)
r.integrator.integratorFlags = roadrunner.SimulateOptions.MULTI_STEP
r.integrator.initialTimeStep = dt
r.integrator.maximumTimeStep = maxStep
r.integrator.minimumTimeStep = minStep
r.integrate(t0, tf)
| StarcoderdataPython |
4876754 | <reponame>SergeHall/Tony-Gaddis-Python-4th
class Pet:
def __init__(self, name, animal_type, age):
self.__name = name
self.__animal_type = animal_type
self.__age = age
def get_name(self):
return self.__name
def get_animal_type(self):
return self.__animal_type
def get_age(self):
return self.__age
def set_name(self, name):
self.__name = name
def set_animal_type(self, animal_type):
self.__animal_type = animal_type
def set_publisher_name(self, age):
self.__age = age
def __str__(self):
return "Name: " + self.__name + "\n" \
+ "Animal type: " + self.__animal_type + "\n" \
+ "Age: " + str(self.__age)
def main():
# в ручную ввели
my_pet = Pet("Bobbik", "Dog", 7)
print(my_pet)
# с помощью цикла выводим данные одращаясь к обьекту класса используя
# методы класса
print(my_pet.get_name())
print(my_pet.get_animal_type())
print(my_pet.get_age())
main() | StarcoderdataPython |
3308000 | <gh_stars>10-100
from django.urls import include
from django.urls import path
from django.urls import register_converter
from common import views
from common.path_converters import NumericSIDConverter
register_converter(NumericSIDConverter, "sid")
urlpatterns = [
path("", views.index, name="index"),
path("healthcheck", views.healthcheck, name="healthcheck"),
path("login", views.LoginView.as_view(), name="login"),
path("logout", views.LogoutView.as_view(), name="logout"),
path("api-auth/", include("rest_framework.urls")),
]
| StarcoderdataPython |
4971240 | <filename>images/producer/src/producer.py
# Copyright (c) 2021, Oracle and/or its affiliates.
# All rights reserved. The Universal Permissive License (UPL), Version 1.0 as shown at http://oss.oracle.com/licenses/upl
import json
import socket
import ssl
import sys
from datetime import datetime
from math import cos
from os import environ
from random import random
from time import sleep
from kafka import KafkaProducer
from log_util import get_logger
# Override kafka logger
kafka_logger = get_logger('kafka', environ.get('KAFKA_LOG_LEVEL'))
# set local logger
logger = get_logger(__name__, environ.get('LOG_LEVEL'))
hostname = socket.gethostname()
sleep_time = float(environ.get('SLEEP_TIME', 0.2))
def get_producer():
'''Initialize the connection to the streaming service (i.e. Kafka broker)
and returns a producer object
'''
# Create a new context using system defaults, disable all but TLS1.2
context = ssl.create_default_context()
context.options &= ssl.OP_NO_TLSv1
context.options &= ssl.OP_NO_TLSv1_1
message_endpoint = environ.get('messageEndpoint')
kafka_brokers = f"{message_endpoint}:9092"
username = environ.get('USERNAME')
stream_pool_id = environ.get('streamPoolId')
kafka_username = f"{username}/{stream_pool_id}"
kafka_password = environ.get('KAFKA_PASSWORD')
# the endpoint from the OCI Service broker includes the 'https://' scheme
# and needs to be removed.
if "https://" in kafka_brokers:
kafka_brokers = kafka_brokers.replace("https://", "")
# create a producer
producer = KafkaProducer(
bootstrap_servers=kafka_brokers,
sasl_plain_username=kafka_username, # tenancy/username/streampoolid
sasl_plain_password=<PASSWORD>, # auth token
security_protocol='SASL_SSL',
ssl_context=context,
sasl_mechanism='PLAIN',
max_request_size=1024 * 1024, # required to match the max Write throughput of the service
retries=5)
return producer
def send_message(producer, key, message):
"""Send a message to the streaming service
producer: the producer connection to use
key: a key to partition the data
message: the message payload (as a string)
"""
try:
logger.debug(message.encode('utf-8'))
return producer.send(
environ.get('TOPIC'),
key=key.encode('utf-8'),
value=message.encode('utf-8'))
except Exception as e:
logger.error("Unexpected error:", sys.exc_info()[0])
raise e
if __name__ == '__main__':
logger.info("connecting...")
try:
producer = get_producer()
except Exception as e:
logger.error(str(e))
if environ.get('KAFKA_LOG_LEVEL').lower() == 'debug':
sleep(3600)
logger.info(f"connected: {producer.bootstrap_connected()}")
logger.info("ready to send")
variance = 0
i = 0
while True:
val = cos(i / 10.0) + variance
result = send_message(producer, hostname, json.dumps({
"value": val,
"index": i,
"ts": datetime.now().timestamp(),
"hostname": hostname
}))
i += 1
variance += (random() - 0.5) / 10.0
sleep(sleep_time)
| StarcoderdataPython |
4924385 | from Climate import Climate, getDetermination
from math import sin, cos, radians, exp
class Location():
N = 0
Latitude = 0
Longitude = 0
Height = 0
ClimateType = Climate.NONE_TYPE
Time = 0
G_on = 0
G_0 = 0
G_cd = 0
G_cb = 0
CosOfZenithAngle = 0
Permeance = 0
def __init__(self, n, latitude, longitude, height, time, climateType):
self.N = n
self.Latitude = latitude
self.Longitude = longitude
self.Height = height
self.ClimateType = climateType
self.Time = time
self.CosOfZenithAngle = self.getCosOfZenithAngle()
self.Permeance = self.getPermeance()
self.G_on = self.getG_on()
self.G_0 = self.getG_0()
self.G_cd = self.getG_cd()
self.G_cb = self.getG_cb()
def getDeclination(self): #赤纬角
return 23.45*sin(radians(360*(284+self.N)/365))
def getSolarHourAngle(self): #太阳时角
return 15*(self.Time - 12)
def getCosOfZenithAngle(self): #太阳天顶角
declination = self.getDeclination()
solarHourAngle = self.getSolarHourAngle()
latitude = self.Latitude
return cos(radians(latitude)) * cos(radians(declination)) * cos(radians(solarHourAngle)) \
+ sin(radians(latitude)) * sin(radians(declination))
def getPermeance(self):
r = getDetermination(self.ClimateType)
A = self.Height
a0_ = 0.4237-0.00821*((6-A)**2)
a1_ = 0.5055+0.00595*((6.5-A)**2)
k_ = 0.2711+0.1858*((2.5-A)**2)
a0 = r[0]*a0_
a1 = r[1]*a1_
k = r[2]*k_
return a0 + a1 * exp(-k / self.CosOfZenithAngle)
def getG_on(self):
G_SC = 1367 #太阳常数
return G_SC*(1 + 0.033 * cos(radians(360 / 365 * self.N)))
def getG_0(self):
return self.G_on * self.CosOfZenithAngle
def getG_cb(self):
return self.G_0*self.Permeance
def getG_cd(self):
return self.getPermeance2()*self.G_0
def getPermeance2(self):
return 0.271-0.294*self.Permeance
def getH_instant(self):
return self.G_cb + self.G_cd
| StarcoderdataPython |
4819421 | from setuptools import setup,find_packages
setup(
name='DDos',
version='1.0.3',
description='DDos any site',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url='https://github.com/donno2048/DDos',
packages=find_packages(),
include_package_data=True,
license='MIT',
author='<NAME>',
install_requires=['fake-useragent'],
classifiers=['Programming Language :: Python :: 3'],
entry_points={ 'console_scripts': [ 'ddos=DDos.__main__:main', 'DDos=DDos.__main__:main' ] }
)
| StarcoderdataPython |
240984 | from rest_framework import serializers
from .models import Ranking, Track
class TrackSerializer(serializers.ModelSerializer):
class Meta:
model = Track
fields = ("name", "duration", "track_number", "order")
class RankingSerializer(serializers.ModelSerializer):
tracks = TrackSerializer(many=True)
class Meta:
model = Ranking
fields = ("handle", "artist", "album", "tracks")
def create(self, validated_data):
tracks_data = validated_data.pop("tracks")
ranking = Ranking.objects.create(**validated_data)
for track_data in tracks_data:
Track.objects.create(ranking=ranking, **track_data)
return ranking
| StarcoderdataPython |
3263237 | from django.apps import AppConfig
class Sql3Config(AppConfig):
name = 'SQL3'
| StarcoderdataPython |
1620843 | from ..utility import *
def handle(data, theme):
theme.ping(
message = data['zen'],
zen = not configValue('allowArbitraryMessages')
)
| StarcoderdataPython |
6403315 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 11:18:14 2018
SIFT特征点检测demo2
准备两张人脸图片,分别检测特征点并尝试匹配
若匹配成功,则可一定概率认为提供的两张人脸图片是同一个人
@author: zyb_as
"""
import cv2
import numpy as np
from PIL import Image
# 1) 以灰度图的形式读入图片
# 人脸模板
psd_img_1 = cv2.imread('jgz2.jpg', cv2.IMREAD_GRAYSCALE)
# 待匹配图片
psd_img_2 = cv2.imread('jgz1.jpg', cv2.IMREAD_GRAYSCALE)
image = Image.fromarray(psd_img_2) # 将图片缩放到统一尺寸便于显示
image = image.resize((480, 550))
psd_img_2 = np.array(image)
# 2) SIFT特征计算
sift = cv2.xfeatures2d.SIFT_create()
psd_kp1, psd_des1 = sift.detectAndCompute(psd_img_1, None)
psd_kp2, psd_des2 = sift.detectAndCompute(psd_img_2, None)
# 3) Flann特征匹配
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(psd_des1, psd_des2, k=2)
goodMatch = []
for m, n in matches:
# goodMatch是经过筛选的优质配对,如果2个配对中第一匹配的距离小于第二匹配的距离的1/2,基本可以说明这个第一配对是两幅图像中独特的,不重复的特征点,可以保留。
if m.distance < 0.75*n.distance:
goodMatch.append(m)
# 增加一个维度
goodMatch = np.expand_dims(goodMatch, 1)
print(goodMatch[:20])
#img_out = cv2.drawMatchesKnn(psd_img_1, psd_kp1, psd_img_2, psd_kp2, goodMatch[:15], None, flags=2)
img_out = cv2.drawMatchesKnn(psd_img_1, psd_kp1, psd_img_2, psd_kp2, goodMatch, None, flags=2)
cv2.imshow('image', img_out)#展示图片
cv2.waitKey(0)#等待按键按下
cv2.destroyAllWindows()#清除所有窗口
| StarcoderdataPython |
4801445 | #!/usr/bin/env python3
from numpy.random import randint
import pygame, sys, time
from detection import OpenCV
from objects import *
from layout import Button, Menu, printText
from handleScore import getScore, saveScore
pygame.init()
class Game:
w,h = 900,720
screen = pygame.display.set_mode((w,h))
detector = OpenCV()
enemy_count = 20
paused = True
def __init__(self):
self.player = Player(20, (255,0,0))
self.debries = []
self.enemies = []
w,h = self.w,self.h
self.high_score = getScore()[0]["score"]
self.score = 0
self.player_alive = True
for i in range(self.enemy_count):
self.enemies.append(Enemy())
self.pause_menu = Menu((w//2-40,h//2-(20)*2),(100,60), 30,["PLAY",],[self.resume])
self.restart_menu = Menu((w//2-40,h//2-(20)*2),(100,60), 30,["RESTART",],[self.restart])
def update(self):
if not self.paused:
self.score+=.2
pos = self.detector.get_pos()
pos = (pos[0]*self.w/640,pos[1]*self.h/480)
self.player.set_target(pos[0],pos[1])
# self.player.set_target(200,300)
self.player.update()
ei = 0
while ei!=self.enemy_count:
enemy = self.enemies[ei]
player = self.player
damaged_enemy = enemy.hit(self.enemies)
if enemy.exceeds_boundary(self.w,self.h):
self.enemies.remove(enemy)
self.enemies.append(Enemy())
elif damaged_enemy and enemy.inside_area(0,self.w,0,self.h):
self.enemies.remove(damaged_enemy)
self.enemies.remove(enemy)
self.enemies.extend([Enemy(),Enemy()])
enemy.damage()
for i in range(20):
self.debries.extend([Rocks(enemy.get_pos(),8,color=randint(0,200,size=3)),Rocks(damaged_enemy.get_pos(),8,color=(200,100,50))])
continue
# i+=1
else:
ei+=1
di = 0
while di!=len(self.debries):
deb = self.debries[di]
if not deb.alive():
self.debries.remove(deb)
else :
di+=1
for e in self.enemies:
e.update()
for d in self.debries:
d.update()
#-----------------------------------checking playes's collision with enemies and boundaries-----------------------
if player.isCollide(self.enemies) or player.exceeds_boundary():
self.player_alive = False
self.stop()
def draw(self,surf):
pygame.draw.line(surf,(200,200,0),self.player.get_pos(),self.player.get_target(),2)
pygame.draw.circle(surf,(0,100,200),self.player.get_target(),4)
self.player.draw(surf)
for e in self.enemies:
e.draw(surf)
for d in self.debries:
d.draw(surf)
printText(surf,(20,20),"SCORE: {}".format(int(self.score)), 30)
printText(surf,(self.w-200 ,20),"HIGHSCORE: {}".format(self.high_score), 30)
if self.paused and self.player_alive:
self.pause_menu.draw(surf)
elif not self.player_alive:
self.restart_menu.draw(surf)
def checkEvents(self,ev):
if ev.type == pygame.QUIT:
self.quit()
if ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_ESCAPE:
self.quit()
if self.paused and self.player_alive:
self.pause_menu.checkEvents()
elif not self.player_alive:
self.restart_menu.checkEvents()
def quit(self):
self.detector.stop()
sys.exit()
def resume(self):
self.paused = False
self.player.resume()
for e in self.enemies:
e.resume()
for deb in self.debries:
deb.resume()
def stop(self):
self.paused = True
self.player.stop()
for e in self.enemies:
e.stop()
for deb in self.debries:
deb.stop()
def restart(self):
self.paused = False
saveScore(int(self.score))
self.__init__()
def main():
clock = pygame.time.Clock()
game = Game()
screen = game.screen
while True:
clock.tick(20)
for ev in pygame.event.get():
game.checkEvents(ev)
screen.fill((0,200,200))
# if not game.paused:
game.update()
game.draw(screen)
pygame.display.update()
pygame.QUIT()
if __name__ =="__main__":
main() | StarcoderdataPython |
11335271 | <reponame>vipshop/Tuplenet<gh_stars>0
from pyDatalog import pyDatalog
import physical_flow
import middle_table as mid
import lsp_ingress
import lsp_egress
import lrp_ingress
import lrp_egress
import pkt_trace
import action
import match
from reg import *
from logicalview import *
from flow_common import *
pyDatalog.create_terms('build_flows_phy')
pyDatalog.create_terms('build_flows_mid')
pyDatalog.create_terms('build_flows_lsp')
pyDatalog.create_terms('build_flows_lrp')
pyDatalog.create_terms('build_flows_drop')
pyDatalog.create_terms('build_flows')
pyDatalog.create_terms('build_const_flows')
def init_build_flows_clause(options):
action.init_action_clause()
match.init_match_clause()
init_entity_clause(options)
physical_flow.init_physical_flow_clause(options)
lsp_ingress.init_lsp_ingress_clause(options)
lsp_egress.init_lsp_egress_clause(options)
lrp_ingress.init_lrp_ingress_clause(options)
lrp_egress.init_lrp_egress_clause(options)
build_flows(Table, Priority, Match, Action, State) <= (
build_flows_lrp(Table, Priority, Match, Action, State))
build_flows(Table, Priority, Match, Action, State) <= (
build_flows_lsp(Table, Priority, Match, Action, State))
build_flows(Table, Priority, Match, Action, State) <= (
build_flows_phy(Table, Priority, Match, Action, State))
build_flows(Table, Priority, Match, Action, State) <= (
build_flows_mid(Table, Priority, Match, Action, State))
# build const flows which were executed only once
build_const_flows(Table, Priority, Match, Action) <= (
build_flows_drop(Table, Priority, Match, Action))
build_const_flows(Table, Priority, Match, Action) <= (
build_flows_phy(Table, Priority, Match, Action))
build_const_flows(Table, Priority, Match, Action) <= (
build_flows_lsp(Table, Priority, Match, Action))
build_const_flows(Table, Priority, Match, Action) <= (
build_flows_lrp(Table, Priority, Match, Action))
build_const_flows(Table, Priority, Match, Action) <= (
build_flows_mid(Table, Priority, Match, Action))
# build physical flows
build_flows_phy(Table, Priority, Match, Action, State) <= (
(Table == TABLE_CONVERT_PHY_LOGICAL) &
physical_flow.convert_phy_logical(Priority, Match,
Action1, State) &
action.note(flows_note2idx('convert_phy_logical'), Action2) &
(Action == Action1 + Action2)
)
build_flows_phy(Table, Priority, Match, Action, State) <= (
physical_flow.arp_feedback_construct(LS, Priority, Match2,
Action, State) &
# TODO adding note here introduce performance regression
# should figure out the root cause
match.datapath(LS[LS_ID], Match1) &
(Match == Match1 + Match2) &
(Table == TABLE_ARP_FEEDBACK_CONSTRUCT)
)
build_flows_phy(Table, Priority, Match, Action) <= (
physical_flow.output_pkt_by_reg(Priority, Match, Action1) &
action.note(flows_note2idx('output_pkt'), Action2) &
(Action == Action1 + Action2) &
(Table == TABLE_OUTPUT_PKT)
)
build_flows_phy(Table, Priority, Match, Action) <= (
pkt_trace.trace_pipeline_module(Match1, Action1) &
# NOTE: refresh TUN_METADATA0_IDX, may output to remote chassis
action.move(NXM_Reg(REG_FLAG_IDX, 0, 31),
NXM_Reg(TUN_METADATA0_IDX, 32, 63), Action2) &
physical_flow.output_pkt_by_reg(Priority1, Match2, Action3) &
(Priority == Priority1 + 10) &
action.note(flows_note2idx('pkt_trace_output_pkt'), Action4) &
(Match == Match1 + Match2) &
(Action == Action1 + Action2 + Action3 + Action4) &
(Table == TABLE_OUTPUT_PKT)
)
# build middle table flows
build_flows_mid(Table, Priority, Match, Action) <= (
mid.embed_metadata(Priority, Match, Action1) &
action.note(flows_note2idx('embed_metadata'), Action2) &
(Action == Action1 + Action2) &
(Table == TABLE_EMBED2_METADATA)
)
build_flows_mid(Table, Priority, Match, Action) <= (
mid.extract_metadata(Priority, Match, Action1) &
action.note(flows_note2idx('extract_metadata'), Action2) &
(Action == Action1 + Action2) &
(Table == TABLE_EXTRACT_METADATA)
)
build_flows_mid(Table, Priority, Match, Action) <= (
mid.pipeline_forward(Priority, Match, Action1) &
action.note(flows_note2idx('pipeline_forward'), Action2) &
(Action == Action1 + Action2) &
(Table == TABLE_PIPELINE_FORWARD)
)
build_flows_mid(Table, Priority, Match, Action, State) <= (
mid.redirect_other_chassis(Priority, Match, Action1, State) &
action.note(flows_note2idx('redirect_other_chassis'), Action2) &
(Action == Action1 + Action2) &
(Table == TABLE_REDIRECT_CHASSIS)
)
# const flow
build_flows_mid(Table, Priority, Match, Action) <= (
mid.redirect_other_chassis(Priority, Match, Action1) &
action.note(flows_note2idx('redirect_other_chassis'), Action2) &
(Action == Action1 + Action2) &
(Table == TABLE_REDIRECT_CHASSIS)
)
# build flows for logical port ingress pipline
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_INGRESS_UNTUNNEL) &
lsp_ingress.lsp_untunnel_deliver(LS, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lsp_untunnel_deliver'), Action2) &
(Action == Action1 + Action2) &
(match.datapath(LS[LS_ID], Match1)) &
(Match == Match1 + Match2)
)
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_INGRESS_OUTPUT_DST_PORT) &
lsp_ingress.lsp_output_dst_port(LS, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lsp_output_dst_port'), Action2) &
(Action == Action1 + Action2) &
(match.datapath(LS[LS_ID], Match1)) &
(Match == Match1 + Match2)
)
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_INGRESS_LOOKUP_DST_PORT) &
lsp_ingress.lsp_lookup_dst_port(LS, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lsp_lookup_dst_port'), Action2) &
(Action == Action1 + Action2) &
(match.datapath(LS[LS_ID], Match1)) &
(Match == Match1 + Match2)
)
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_INGRESS_ARP_CONTROLLER) &
lsp_ingress.lsp_arp_controller(LS, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lsp_arp_controller'), Action2) &
(Action == Action1 + Action2) &
(match.datapath(LS[LS_ID], Match1)) &
(Match == Match1 + Match2)
)
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_INGRESS_ARP_RESPONSE) &
lsp_ingress.lsp_arp_response(LS, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lsp_arp_response'), Action2) &
(Action == Action1 + Action2) &
(match.datapath(LS[LS_ID], Match1)) &
(Match == Match1 + Match2)
)
# build flows for logical port egress pipline
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_EGRESS_JUDGE_LOOPBACK) &
lsp_egress.lsp_judge_loopback(LS, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lsp_judge_loopback'), Action2) &
(Action == Action1 + Action2) &
(match.datapath(LS[LS_ID], Match1)) &
(Match == Match1 + Match2)
)
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_EGRESS_FORWARD_PACKET) &
lsp_egress.lsp_forward_packet(LS, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lsp_forward_packet'), Action2) &
(Action == Action1 + Action2) &
(match.datapath(LS[LS_ID], Match1)) &
(Match == Match1 + Match2)
)
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_EGRESS_PUSHOUT) &
lsp_egress.lsp_pushout_packet(LS, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lsp_pushout_packet'), Action2) &
(Action == Action1 + Action2) &
(match.datapath(LS[LS_ID], Match1)) &
(Match == Match1 + Match2)
)
# build const trace flow in first stage of lsp ingress
build_flows_lsp(Table, Priority, Match, Action) <= (
(Table == TABLE_LSP_TRACE_INGRESS_IN) &
action.load(0, NXM_Reg(REG_DST_IDX), Action1) &
pkt_trace.trace_pipeline_start(Priority, Match, Action2) &
action.note(flows_note2idx('pkt_trace_lsp_ingress_in'), Action3) &
(Action == Action1 + Action2 + Action3)
)
# build trace flow for in end stage of lsp ingress
# because the end stage of lsp ingress has no uniq path, so
# we have to add similar flows(simliar to regular flow) to trace
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_TRACE_INGRESS_OUT) &
pkt_trace.trace_pipeline_module(Match1, Action1) &
lsp_ingress.lsp_output_dst_port(LS, Priority1, Match2,
Action2, State) &
(Priority == Priority1 + 10) &
(match.datapath(LS[LS_ID], Match3)) &
(Match == Match1 + Match2 + Match3) &
action.note(flows_note2idx('pkt_trace_lsp_output_dst_port'),
Action3) &
(Action == Action1 + Action2 + Action3)
)
# build const trace flow in first stage of lsp egress
build_flows_lsp(Table, Priority, Match, Action) <= (
(Table == TABLE_LSP_TRACE_EGRESS_IN) &
pkt_trace.trace_pipeline_start(Priority, Match, Action1) &
action.note(flows_note2idx('pkt_trace_lsp_egress_in'), Action2) &
(Action == Action1 + Action2)
)
# build const flows to forward packet to third party table
build_flows_lsp(Table, Priority, Match, Action) <= (
(Table == TABLE_LSP_INGRESS_PROCESS_EXT_LOGIC) &
(Priority == 0) &
match.match_none(Match) &
action.resubmit_table(TABLE_THIRD_PARTY, Action1) &
action.note(flows_note2idx('process_third_logic'), Action2) &
(Action == Action1 + Action2)
)
build_flows_lsp(Table, Priority, Match, Action) <= (
(Table == TABLE_THIRD_PARTY) &
(Priority == 0) &
match.match_none(Match) &
action.resubmit_table(TABLE_LSP_INGRESS_PROCESS_EXT_LOGIC+1, Action1) &
action.note(flows_note2idx('process_third_logic'), Action2) &
(Action == Action1 + Action2)
)
# build trace flow in end stage of lsp egress
# build trace flow in end stage of lsp egress
# build trace flow in end stage of lsp egress
# because the end stage of lsp egress has no uniq path, so
# we have to add similar flows(simliar to regular flow) to trace
build_flows_lsp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LSP_TRACE_EGRESS_OUT) &
pkt_trace.trace_pipeline_module(Match1, Action1) &
lsp_egress.lsp_pushout_packet(LS, Priority1, Match2,
Action2, State) &
action.note(flows_note2idx('pkt_trace_lsp_pushout_packet'),
Action3) &
(Priority == Priority1 + 10) &
(match.datapath(LS[LS_ID], Match3)) &
(Match == Match1 + Match2 + Match3) &
(Action == Action1 + Action2 + Action3)
)
#-----------------------------LRP---------------------------------------------
#build flows for logical router port ingress pipline
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_INGRESS_PKT_RESPONSE) &
lrp_ingress.lrp_pkt_response(LR, Priority, Match2, Action1, State) &
action.note(flows_note2idx('lrp_pkt_response'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_INGRESS_DROP_UNEXPECT) &
lrp_ingress.lrp_drop_unexpect(LR, Priority, Match2, Action1, State) &
action.note(flows_note2idx('lrp_drop_unexpect'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_INGRESS_UNSNAT_STAGE1) &
lrp_ingress.lrp_ip_unsnat_stage1(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_ip_unsnat_stage1'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_INGRESS_UNSNAT_STAGE2) &
lrp_ingress.lrp_ip_unsnat_stage2(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_ip_unsnat_stage2'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_INGRESS_DNAT_STAGE1) &
lrp_ingress.lrp_ip_dnat_stage1(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_ip_dnat_stage1'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_INGRESS_DNAT_STAGE2) &
lrp_ingress.lrp_ip_dnat_stage2(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_ip_dnat_stage2'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_INGRESS_IP_ROUTE) &
lrp_ingress.lrp_ip_route(LR, Priority, Match2, Action1, State) &
action.note(flows_note2idx('lrp_ip_route'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_INGRESS_ECMP) &
lrp_ingress.lrp_ecmp_judge(LR, Priority, Match2, Action1, State) &
action.note(flows_note2idx('lrp_ecmp_judge'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
#build flows for logical router port egress pipline
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_EGRESS_UPDATE_ETH_DST) &
lrp_egress.lrp_update_eth_dst(LR, Priority, Match2, Action1, State) &
action.note(flows_note2idx('lrp_update_eth_dst'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_EGRESS_UNDNAT_STAGE1) &
lrp_egress.lrp_ip_undnat_stage1(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_ip_undnat_stage1'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_EGRESS_UNDNAT_STAGE2) &
lrp_egress.lrp_ip_undnat_stage2(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_ip_undnat_stage2'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_EGRESS_SNAT_STAGE1) &
lrp_egress.lrp_ip_snat_stage1(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_ip_snat_stage1'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_EGRESS_SNAT_STAGE2) &
lrp_egress.lrp_ip_snat_stage2(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_ip_snat_stage2'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_EGRESS_HANDLE_UNK_PKT) &
lrp_egress.lrp_handle_unknow_dst_pkt(LR, Priority, Match2,
Action1, State) &
action.note(flows_note2idx('lrp_handle_unknow_dst_pkt'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
build_flows_lrp(Table, Priority, Match, Action, State) <= (
(Table == TABLE_LRP_EGRESS_FORWARD_PACKET) &
lrp_egress.lrp_forward_packet(LR, Priority, Match2, Action1, State) &
action.note(flows_note2idx('lrp_forward_packet'), Action2) &
(Action == Action1 + Action2) &
match.datapath(LR[LR_ID], Match1) &
(Match == Match1 + Match2)
)
# build const trace flow in first stage of lrp ingress
build_flows_lrp(Table, Priority, Match, Action) <= (
(Table == TABLE_LRP_TRACE_INGRESS_IN) &
action.load(0, NXM_Reg(REG_DST_IDX), Action1) &
pkt_trace.trace_pipeline_start(Priority, Match, Action2) &
action.note(flows_note2idx('pkt_trace_lrp_ingress_in'), Action3) &
(Action == Action1 + Action2 + Action3)
)
# build const trace flow in last stage of lrp ingress
build_flows_lrp(Table, Priority, Match, Action) <= (
(Table == TABLE_LRP_TRACE_INGRESS_OUT) &
pkt_trace.trace_pipeline_end(Priority, Match, Action1) &
action.resubmit_table(TABLE_LRP_EGRESS_FIRST, Action2) &
action.note(flows_note2idx('pkt_trace_lrp_ingress_out'), Action3) &
(Action == Action1 + Action2 + Action3)
)
# build const trace flow in first stage of lrp egress
build_flows_lrp(Table, Priority, Match, Action) <= (
(Table == TABLE_LRP_TRACE_EGRESS_IN) &
pkt_trace.trace_pipeline_start(Priority, Match, Action1) &
action.note(flows_note2idx('pkt_trace_lrp_egress_in'), Action2) &
(Action == Action1 + Action2)
)
# build const trace flow in last stage of lrp egress
build_flows_lrp(Table, Priority, Match, Action) <= (
(Table == TABLE_LRP_TRACE_EGRESS_OUT) &
pkt_trace.trace_pipeline_end(Priority, Match, Action1) &
action.resubmit_table(TABLE_LSP_INGRESS_FIRST, Action2) &
action.note(flows_note2idx('pkt_trace_lrp_egress_out'), Action3) &
(Action == Action1 + Action2 + Action3)
)
#---------------------const drop table--------------------------------
build_flows_drop(Table, Priority, Match, Action) <= (
(Priority == 0) &
(Table == TABLE_DROP_PACKET) &
match.match_none(Match) &
action.drop(Action)
)
build_flows_drop(Table, Priority, Match, Action) <= (
(Priority == 1) &
(Table == TABLE_DROP_PACKET) &
# we do not add drop action, because drop action
# must not be accompanied by any other action or instruction
# so we just add packet tracing action.
pkt_trace.trace_pipeline_module(Match, Action1) &
action.note(flows_note2idx('pkt_trace_drop_packet'), Action2) &
(Action == Action1 + Action2)
)
| StarcoderdataPython |
6401450 | <gh_stars>1-10
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.text_contact_annotation import TextContactAnnotation
from openapi_server.models.text_date_annotation import TextDateAnnotation
from openapi_server.models.text_id_annotation import TextIdAnnotation
from openapi_server.models.text_location_annotation import TextLocationAnnotation
from openapi_server.models.text_person_name_annotation import TextPersonNameAnnotation
from openapi_server import util
from openapi_server.models.text_contact_annotation import TextContactAnnotation # noqa: E501
from openapi_server.models.text_date_annotation import TextDateAnnotation # noqa: E501
from openapi_server.models.text_id_annotation import TextIdAnnotation # noqa: E501
from openapi_server.models.text_location_annotation import TextLocationAnnotation # noqa: E501
from openapi_server.models.text_person_name_annotation import TextPersonNameAnnotation # noqa: E501
class AnnotationSet(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, text_date_annotations=None, text_person_name_annotations=None, text_location_annotations=None, text_id_annotations=None, text_contact_annotations=None): # noqa: E501
"""AnnotationSet - a model defined in OpenAPI
:param text_date_annotations: The text_date_annotations of this AnnotationSet. # noqa: E501
:type text_date_annotations: List[TextDateAnnotation]
:param text_person_name_annotations: The text_person_name_annotations of this AnnotationSet. # noqa: E501
:type text_person_name_annotations: List[TextPersonNameAnnotation]
:param text_location_annotations: The text_location_annotations of this AnnotationSet. # noqa: E501
:type text_location_annotations: List[TextLocationAnnotation]
:param text_id_annotations: The text_id_annotations of this AnnotationSet. # noqa: E501
:type text_id_annotations: List[TextIdAnnotation]
:param text_contact_annotations: The text_contact_annotations of this AnnotationSet. # noqa: E501
:type text_contact_annotations: List[TextContactAnnotation]
"""
self.openapi_types = {
'text_date_annotations': List[TextDateAnnotation],
'text_person_name_annotations': List[TextPersonNameAnnotation],
'text_location_annotations': List[TextLocationAnnotation],
'text_id_annotations': List[TextIdAnnotation],
'text_contact_annotations': List[TextContactAnnotation]
}
self.attribute_map = {
'text_date_annotations': 'textDateAnnotations',
'text_person_name_annotations': 'textPersonNameAnnotations',
'text_location_annotations': 'textLocationAnnotations',
'text_id_annotations': 'textIdAnnotations',
'text_contact_annotations': 'textContactAnnotations'
}
self._text_date_annotations = text_date_annotations
self._text_person_name_annotations = text_person_name_annotations
self._text_location_annotations = text_location_annotations
self._text_id_annotations = text_id_annotations
self._text_contact_annotations = text_contact_annotations
@classmethod
def from_dict(cls, dikt) -> 'AnnotationSet':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The AnnotationSet of this AnnotationSet. # noqa: E501
:rtype: AnnotationSet
"""
return util.deserialize_model(dikt, cls)
@property
def text_date_annotations(self):
"""Gets the text_date_annotations of this AnnotationSet.
Date annotations in a text # noqa: E501
:return: The text_date_annotations of this AnnotationSet.
:rtype: List[TextDateAnnotation]
"""
return self._text_date_annotations
@text_date_annotations.setter
def text_date_annotations(self, text_date_annotations):
"""Sets the text_date_annotations of this AnnotationSet.
Date annotations in a text # noqa: E501
:param text_date_annotations: The text_date_annotations of this AnnotationSet.
:type text_date_annotations: List[TextDateAnnotation]
"""
if text_date_annotations is None:
raise ValueError("Invalid value for `text_date_annotations`, must not be `None`") # noqa: E501
self._text_date_annotations = text_date_annotations
@property
def text_person_name_annotations(self):
"""Gets the text_person_name_annotations of this AnnotationSet.
Person name annotations in a text # noqa: E501
:return: The text_person_name_annotations of this AnnotationSet.
:rtype: List[TextPersonNameAnnotation]
"""
return self._text_person_name_annotations
@text_person_name_annotations.setter
def text_person_name_annotations(self, text_person_name_annotations):
"""Sets the text_person_name_annotations of this AnnotationSet.
Person name annotations in a text # noqa: E501
:param text_person_name_annotations: The text_person_name_annotations of this AnnotationSet.
:type text_person_name_annotations: List[TextPersonNameAnnotation]
"""
if text_person_name_annotations is None:
raise ValueError("Invalid value for `text_person_name_annotations`, must not be `None`") # noqa: E501
self._text_person_name_annotations = text_person_name_annotations
@property
def text_location_annotations(self):
"""Gets the text_location_annotations of this AnnotationSet.
Location annotations in a text # noqa: E501
:return: The text_location_annotations of this AnnotationSet.
:rtype: List[TextLocationAnnotation]
"""
return self._text_location_annotations
@text_location_annotations.setter
def text_location_annotations(self, text_location_annotations):
"""Sets the text_location_annotations of this AnnotationSet.
Location annotations in a text # noqa: E501
:param text_location_annotations: The text_location_annotations of this AnnotationSet.
:type text_location_annotations: List[TextLocationAnnotation]
"""
if text_location_annotations is None:
raise ValueError("Invalid value for `text_location_annotations`, must not be `None`") # noqa: E501
self._text_location_annotations = text_location_annotations
@property
def text_id_annotations(self):
"""Gets the text_id_annotations of this AnnotationSet.
ID annotations in a text # noqa: E501
:return: The text_id_annotations of this AnnotationSet.
:rtype: List[TextIdAnnotation]
"""
return self._text_id_annotations
@text_id_annotations.setter
def text_id_annotations(self, text_id_annotations):
"""Sets the text_id_annotations of this AnnotationSet.
ID annotations in a text # noqa: E501
:param text_id_annotations: The text_id_annotations of this AnnotationSet.
:type text_id_annotations: List[TextIdAnnotation]
"""
if text_id_annotations is None:
raise ValueError("Invalid value for `text_id_annotations`, must not be `None`") # noqa: E501
self._text_id_annotations = text_id_annotations
@property
def text_contact_annotations(self):
"""Gets the text_contact_annotations of this AnnotationSet.
Contact annotations in a text # noqa: E501
:return: The text_contact_annotations of this AnnotationSet.
:rtype: List[TextContactAnnotation]
"""
return self._text_contact_annotations
@text_contact_annotations.setter
def text_contact_annotations(self, text_contact_annotations):
"""Sets the text_contact_annotations of this AnnotationSet.
Contact annotations in a text # noqa: E501
:param text_contact_annotations: The text_contact_annotations of this AnnotationSet.
:type text_contact_annotations: List[TextContactAnnotation]
"""
if text_contact_annotations is None:
raise ValueError("Invalid value for `text_contact_annotations`, must not be `None`") # noqa: E501
self._text_contact_annotations = text_contact_annotations
| StarcoderdataPython |
1964474 | <gh_stars>0
from ..utils.jl import jlprepare
from ._helper import init, params
def prepare(name):
kwds = init(**params[name])
jlrun = jlprepare("Comparatist.Simulators.rnn.vec", name, m=kwds["m"])
def run():
jlkwds = jlrun()
jlkwds["m"] = jlkwds["m"].T
return jlkwds
return run
| StarcoderdataPython |
3423599 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Netheos (http://www.netheos.net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# OAuth2 tokens bootstrapper: retrieving tokens manually
# (we usually fetch refresh_tokens, if provider supports these ;
# if not, access_token are returned but with a long lifetime)
#
# This small utility program is an example that shows
# how to populate a UserCredentialsRepository
from __future__ import absolute_import, unicode_literals, print_function
import argparse
from pcs_api.credentials.app_info_file_repo import AppInfoFileRepository
from pcs_api.credentials.user_creds_file_repo import UserCredentialsFileRepository
from pcs_api.credentials.user_credentials import UserCredentials
from pcs_api.oauth.oauth2_bootstrap import OAuth2BootStrapper
from pcs_api.storage import StorageFacade
# Required for registering providers :
from pcs_api.providers import *
parser = argparse.ArgumentParser(description='Manual OAuth authorization, to get refresh tokens.',
epilog='AppInfoRepository must be ready; '
'UserCredentialsRepository will be populated')
parser.add_argument('provider_name', help='provider name')
parser.add_argument('-a', '--app_name',
help='application name, as registered provider-side. '
'If not supplied, app repos should contain a single application for given provider')
cli_args = parser.parse_args()
# Here we use basic file repositories :
apps_repo = AppInfoFileRepository("../../repositories/app_info_data.txt")
user_credentials_repo = UserCredentialsFileRepository("../../repositories/user_credentials_data.txt")
import logging
import httplib
debug = True
if debug:
httplib.HTTPConnection.debuglevel = 1
logging.basicConfig(level=logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Check it is really an oauth app :
app_info = apps_repo.get(cli_args.provider_name, cli_args.app_name)
if not app_info.is_oauth():
raise ValueError('This application does not use OAuth : %s' % app_info)
# Instantiate storage :
storage = StorageFacade.for_provider(cli_args.provider_name) \
.app_info_repository(apps_repo, cli_args.app_name) \
.user_credentials_repository(user_credentials_repo) \
.for_bootstrap() \
.build()
bootstrapper = OAuth2BootStrapper(storage)
bootstrapper.do_code_workflow()
| StarcoderdataPython |
8071228 | <filename>tests/test_SimTimestampDiv.py
#
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
import unittest
import os
import time
from basil.dut import Dut
from basil.utils.sim.utils import cocotb_compile_and_run, cocotb_compile_clean
cnfg_yaml = """
transfer_layer:
- name : intf
type : SiSim
init:
host : localhost
port : 12345
hw_drivers:
- name : gpio
type : gpio
interface : intf
base_addr : 0x0000
size : 64
- name : timestamp_div
type : timestamp_div
interface : intf
base_addr : 0x1000
- name : PULSE_GEN
type : pulse_gen
interface : intf
base_addr : 0x3000
- name : fifo
type : sram_fifo
interface : intf
base_addr : 0x8000
base_data_addr: 0x80000000
registers:
- name : timestamp_value
type : StdRegister
hw_driver : gpio
size : 64
fields:
- name : OUT3
size : 16
offset : 63
- name : OUT2
size : 24
offset : 47
- name : OUT1
size : 24
offset : 23
"""
class TestSimTimestampDiv(unittest.TestCase):
def setUp(self):
cocotb_compile_and_run([os.path.join(os.path.dirname(__file__), 'test_SimTimestampDiv.v')])
self.chip = Dut(cnfg_yaml)
self.chip.init()
def test_io(self):
self.chip['timestamp_div'].reset()
self.chip['timestamp_div']["ENABLE"] = 1
self.chip['gpio'].reset()
self.chip['fifo'].reset()
ret = self.chip['fifo'].get_fifo_size()
self.assertEqual(ret, 0)
# trigger timestamp
repeat = 16
width = 0x18
self.chip['PULSE_GEN'].set_delay(0x20 + 0x7)
self.chip['PULSE_GEN'].set_width(width)
self.chip['PULSE_GEN'].set_repeat(repeat)
self.chip['PULSE_GEN'].start()
while(not self.chip['PULSE_GEN'].is_done()):
pass
# get data from fifo
ret = self.chip['fifo'].get_fifo_size()
self.assertEqual(ret, 3 * 4 * repeat)
ret = self.chip['fifo'].get_data()
self.assertEqual(len(ret), 3 * repeat)
for i, r in enumerate(ret):
self.assertEqual(r & 0xF0000000, 0x50000000)
self.assertEqual(r & 0xF000000, 0x1000000 * (3 - i % 3))
self.chip['timestamp_div']["ENABLE_TOT"] = 1
self.chip['PULSE_GEN'].start()
while(not self.chip['PULSE_GEN'].is_done()):
pass
ret = self.chip['fifo'].get_fifo_size()
self.assertEqual(ret, 3 * 4 * repeat)
ret = self.chip['fifo'].get_data()
self.assertEqual(len(ret), 3 * repeat)
for i, r in enumerate(ret):
self.assertEqual(r & 0xF0000000, 0x50000000)
self.assertEqual(r & 0xF000000, 0x1000000 * (3 - i % 3))
if i % 3 == 0:
self.assertEqual(r & 0xFFFF00, 0x100 * width) # ToT value
def tearDown(self):
time.sleep(2)
self.chip.close() # let it close connection and stop simulator
time.sleep(2)
cocotb_compile_clean()
time.sleep(2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
2468 | <filename>tests/core/test_headerupdater.py
# -*- coding: utf-8 -*-
'''
HeaderUpdater class test
========================
'''
import unittest
from tests.testutils import print_testtitle, validate_with_fail
from builder.commands.scode import SCode, SCmd
from builder.containers.chapter import Chapter
from builder.containers.episode import Episode
from builder.containers.scene import Scene
from builder.containers.story import Story
from builder.core import headerupdater as hd
class HeaderUpdaterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print_testtitle(hd.__name__, 'HeaderUpdater class')
def test_instance(self):
tmp = hd.HeaderUpdater()
self.assertIsInstance(tmp, hd.HeaderUpdater)
def test_title_of(self):
data = [
# (src, expect, exp_opt)
(True, Story('test',), ('test',), 1),
]
def checker(src, expect, exp_opt):
tmp = hd.HeaderUpdater()._title_of(src)
self.assertIsInstance(tmp, SCode)
self.assertEqual(tmp.cmd, SCmd.TAG_TITLE)
self.assertEqual(tmp.script, expect)
self.assertEqual(tmp.option, exp_opt)
validate_with_fail(self, 'title_of', checker, data)
def test_outline_of(self):
data = [
# (src, expect)
(True, Story('test',outline='apple'), ('apple',)),
]
def checker(src, expect):
tmp = hd.HeaderUpdater()._outline_of(src)
self.assertIsInstance(tmp, SCode)
self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT)
self.assertEqual(tmp.script, expect)
validate_with_fail(self, 'outline_of', checker, data)
def test_end_of(self):
data = [
# (src, expect)
(True, Chapter('test',), SCmd.END_CHAPTER),
]
validate_with_fail(self, 'end_of',
lambda src, expect: self.assertEqual(
hd.HeaderUpdater()._end_of(src).cmd, expect),
data)
| StarcoderdataPython |
1899514 | import sys
import os
import time
from torchvision import transforms
import torch, torchaudio
import yarp
import numpy as np
from speechbrain.pretrained import EncoderClassifier
from project.voiceRecognition.speaker_embeddings import EmbeddingsHandler
from project.faceRecognition.utils import format_face_coord, face_alignement, format_names_to_bottle, \
fixed_image_standardization, get_center_face
from project.AVRecognition.lit_AVperson_classifier import LitSpeakerClassifier, Backbone
from project.yarpModules.DatabaseHandler import DatabaseHandler
import scipy.io.wavfile as wavfile
import scipy
import dlib
import cv2 as cv
def info(msg):
print("[INFO] {}".format(msg))
class PersonsRecognition(yarp.RFModule):
"""
Description:
Class to recognize a person from the audio or the face
Args:
input_port : Audio from remoteInterface, raw image from iCub cameras
"""
def __init__(self):
yarp.RFModule.__init__(self)
# handle port for the RFModule
self.module_name = None
self.handle_port = None
self.process = False
# Define vars to receive audio
self.audio_in_port = None
self.eventPort = None
self.is_voice = False
# Predictions parameters
self.label_outputPort = None
self.predictions = []
self.database = None
# Speaker module parameters
self.model_audio = None
self.dataset_path = None
self.db_embeddings_audio = None
self.threshold_audio = None
self.length_input = None
self.resample_trans = None
self.speaker_emb = []
# Parameters for the audio
self.sound = None
self.audio = []
self.np_audio = None
self.nb_samples_received = 0
self.sampling_rate = None
# Define port to receive an Image
self.image_in_port = yarp.BufferedPortImageRgb()
self.face_coord_port = yarp.BufferedPortBottle()
# Port to query and update the memory (OPC)
self.opc_port = yarp.RpcClient()
# Image parameters
self.width_img = None
self.height_img = None
self.input_img_array = None
self.frame = None
self.coord_face = None
self.threshold_face = None
self.face_emb = []
# Model face recognition modele
self.modele_face = None
self.db_embeddings_face = None
self.trans = None
self.faces_img = []
self.face_coord_request = None
self.face_model_path = None
# Model for cross-modale recognition
self.model_av = None
self.sm = torch.nn.Softmax(dim=1)
self.threshold_multimodal = None
self.device = None
self.save_face = False
self.name = ""
self.predict = False
def configure(self, rf):
success = True
# handle port for the RFModule
self.handle_port = yarp.Port()
self.attach(self.handle_port)
# Define vars to receive audio
self.audio_in_port = yarp.BufferedPortSound()
self.label_outputPort = yarp.Port()
self.eventPort = yarp.BufferedPortBottle()
# Module parameters
self.module_name = rf.check("name",
yarp.Value("PersonRecognition"),
"module name (string)").asString()
self.handle_port.open('/' + self.module_name)
self.dataset_path = rf.check("dataset_path",
yarp.Value(
""),
"Root path of the embeddings database (voice & face) (string)").asString()
self.database = DatabaseHandler(self.dataset_path)
self.length_input = rf.check("length_input",
yarp.Value(1),
"length audio input in seconds (int)").asInt()
self.threshold_audio = rf.check("threshold_audio",
yarp.Value(0.32),
"threshold_audio for detection (double)").asDouble()
self.threshold_face = rf.check("threshold_face",
yarp.Value(0.55),
"threshold_face for detection (double)").asDouble()
self.face_model_path = rf.check("face_model_path",
yarp.Value(""),
"Path of the model for face embeddings (string)").asString()
# Set the device for inference for the models
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(self.device))
success &= self.load_model_face()
self.sampling_rate = rf.check("fs",
yarp.Value(48000),
" Sampling rate of the incoming audio signal (int)").asInt()
success &= self.load_model_audio()
# Audio and voice events
self.audio_in_port.open('/' + self.module_name + '/audio:i')
self.eventPort.open('/' + self.module_name + '/events:i')
# Label
self.label_outputPort.open('/' + self.module_name + '/label:o')
# Image and face
self.width_img = rf.check('width', yarp.Value(320),
'Width of the input image').asInt()
self.height_img = rf.check('height', yarp.Value(244),
'Height of the input image').asInt()
self.face_coord_port.open('/' + self.module_name + '/coord:i')
self.face_coord_port.setStrict(False)
self.image_in_port.open('/' + self.module_name + '/image:i')
self.input_img_array = np.zeros((self.height_img, self.width_img, 3), dtype=np.uint8).tobytes()
self.opc_port.open('/' + self.module_name + '/OPC:rpc')
self.threshold_multimodal = 0.8
info("Initialization complete")
return success
def load_model_audio(self):
self.resample_trans = torchaudio.transforms.Resample(self.sampling_rate, 16000)
# Load Database for audio embeddings
try:
self.db_embeddings_audio = EmbeddingsHandler(os.path.join(self.dataset_path, "audio"), n_neighbors=4)
self.model_audio = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb")
except FileNotFoundError:
info(f"Unable to find dataset {EmbeddingsHandler(os.path.join(self.dataset_path, 'audio'))}")
return False
return True
def load_model_face(self):
try:
self.modele_face = torch.load(self.face_model_path)
self.modele_face.eval()
self.db_embeddings_face = EmbeddingsHandler(os.path.join(self.dataset_path, "face"), threshold=self.threshold_face)
# Transform for face embeddings
self.trans = transforms.Compose([
np.float32,
transforms.ToTensor(),
fixed_image_standardization,
transforms.Resize((180, 180))
])
except FileNotFoundError:
info(f"Unable to find dataset {EmbeddingsHandler(os.path.join(self.dataset_path, 'face'))} \
or model {self.face_model_path}")
return False
return True
def interruptModule(self):
print("[INFO] Stopping the module")
self.audio_in_port.interrupt()
self.label_outputPort.interrupt()
self.eventPort.interrupt()
self.handle_port.interrupt()
self.image_in_port.interrupt()
self.face_coord_port.interrupt()
return True
def close(self):
self.audio_in_port.close()
self.handle_port.close()
self.label_outputPort.close()
self.image_in_port.close()
self.eventPort.close()
self.face_coord_port.close()
return True
def respond(self, command, reply):
ok = False
# Is the command recognized
rec = False
reply.clear()
if command.get(0).asString() == "quit":
reply.addString("quitting")
return False
elif command.get(0).asString() == "start":
reply.addString("ok")
self.process = True
elif command.get(0).asString() == "predict":
self.predict = True
reply.addString("ok")
elif command.get(0).asString() == "stop":
self.process = False
reply.addString("ok")
elif command.get(0).asString() == "predict":
if command.get(1).asString() == "stop":
self.predict = False
reply.addString("ok")
elif command.get(0).asString() == "check":
if command.get(1).asString() == "tracker":
new_detection = []
new_detection.append(command.get(2).asList().get(0).asDouble())
new_detection.append(command.get(2).asList().get(1).asDouble())
new_detection.append(command.get(2).asList().get(2).asDouble())
new_detection.append(command.get(2).asList().get(3).asDouble())
name_to_assign, id_to_assign = self.check_existing_face(new_detection)
if name_to_assign:
reply.addString(name_to_assign)
reply.addString(id_to_assign)
else:
reply.addString("nack")
elif command.get(0).asString() == "save":
if command.get(1).asString() == "face":
if command.get(2).asString() == "start":
self.save_face = True
else:
name = command.get(2).asString().lower()
if name in self.db_embeddings_face.data_dict.keys():
self.db_embeddings_face.data_dict[name] = self.db_embeddings_face.data_dict[name] + self.face_emb
else:
self.db_embeddings_face.data_dict[name] = self.face_emb
self.database.save_faces(self.faces_img, self.face_emb, name)
self.save_face = False
self.faces_img = []
self.face_emb = []
reply.addString("ok")
elif command.get(0).asString() == "reset":
self.db_embeddings_face.excluded_entities = []
elif command.get(0).asString() == "set":
if command.get(1).asString() == "thr":
if command.get(2).asString() == "audio":
self.threshold_audio = command.get(3).asDouble()
self.db_embeddings_audio.threshold = self.threshold_audio
reply.addString("ok")
elif command.get(2).asString() == "face":
self.threshold_face = command.get(3).asDouble() if command.get(3).asDouble() > 0 else self.threshold_face
self.db_embeddings_face.threshold = self.threshold_face
reply.addString("ok")
else:
reply.addString("nack")
else:
reply.addString("nack")
elif command.get(0).asString() == "get":
if command.get(1).asString() == "thr":
if command.get(2).asString() == "audio":
reply.addDouble(self.threshold_audio)
elif command.get(2).asString() == "face":
reply.addDouble(self.threshold_face)
else:
reply.addString("nack")
elif command.get(1).asString() == "face":
self.face_coord_request = [command.get(2).asDouble(), command.get(3).asDouble(), command.get(4).asDouble(),
command.get(5).asDouble()]
reply.addString("ok")
else:
reply.addString("nack")
else:
reply.addString("nack")
return True
def getPeriod(self):
"""
Module refresh rate.
Returns : The period of the module in seconds.
"""
return 0.05
def record_audio(self):
self.sound = self.audio_in_port.read(False)
if self.sound and self.is_voice:
chunk = np.zeros((self.sound.getChannels(), self.sound.getSamples()), dtype=np.float32)
self.nb_samples_received += self.sound.getSamples()
for c in range(self.sound.getChannels()):
for i in range(self.sound.getSamples()):
chunk[c][i] = self.sound.get(i, c) / 32768.0
self.audio.append(chunk)
return True
return False
def read_image(self):
input_yarp_image = self.image_in_port.read(False)
if input_yarp_image:
input_yarp_image.setExternal(self.input_img_array, self.width_img, self.height_img)
self.frame = np.frombuffer(self.input_img_array, dtype=np.uint8).reshape(
(self.height_img, self.width_img, 3)).copy()
return True
return False
def check_voice(self):
if self.eventPort.getInputCount():
event_name = self.eventPort.read(False)
if event_name:
event_name = event_name.get(0).asString()
if event_name == "start_voice":
self.is_voice = True
elif event_name == "stop_voice":
self.audio = []
self.nb_samples_received = 0
self.is_voice = False
else:
pass
def get_face_coordinate(self):
if self.face_coord_port.getInputCount():
self.coord_face = self.face_coord_port.read(False)
return self.coord_face is not None
self.coord_face = None
return False
def set_name_memory(self, face_id, face_name):
if self.opc_port.getOutputCount():
reply = yarp.Bottle()
cmd = yarp.Bottle("ask")
list_condition = cmd.addList()
cond1 = list_condition.addList()
cond1.addString("id_tracker")
cond1.addString("==")
cond1.addString(face_id)
self.opc_port.write(cmd, reply)
list_id = reply.get(1).asList().get(1).asList()
if list_id.size():
cmd = yarp.Bottle()
cmd.addString("get")
list_all = cmd.addList()
list_1 = list_all.addList()
list_1.addString("id")
list_1.addInt(list_id.get(0).asInt())
list_2 = list_all.addList()
list_2.addString("propSet")
list_3 = list_2.addList()
list_3.addString("verified")
reply_ver = yarp.Bottle()
self.opc_port.write(cmd, reply_ver)
print("Sent cmd to OPC {}, and received response {}".format(cmd.toString(), reply_ver.toString()))
verified = reply_ver.get(1).asList().get(0).asList().get(1).asInt()
if verified == 0:
reply2 = yarp.Bottle()
cmd = yarp.Bottle()
cmd.addString("set")
list_cmd = cmd.addList()
id_cmd = list_cmd.addList()
id_cmd.addString("id")
id_cmd.addInt(list_id.get(0).asInt())
label_cmd = list_cmd.addList()
label_cmd.addString("label_tracker")
label_cmd.addString(face_name.strip())
# cmd_str = "set ((id " + str(list_id.get(0).asInt()) + ") (label_tracker" + face_name + "))"
self.opc_port.write(cmd, reply2)
print("Sent cmd to OPC {} and received reply {}".format(cmd.toString(), reply2.toString()))
return "ack" + reply2.get(0).asString()
return False
def get_name_in_memory(self):
if self.opc_port.getOutputCount():
reply = yarp.Bottle()
cmd = yarp.Bottle("ask")
list_condition = cmd.addList()
cond1 = list_condition.addList()
cond1.addString("verified")
cond1.addString("==")
cond1.addInt(1)
self.opc_port.write(cmd, reply)
list_id = reply.get(1).asList().get(1).asList()
for i in range(list_id.size()):
cmd_str = "get ((id " + str(list_id.get(i).asInt()) + ") (propSet (label_tracker)))"
cmd = yarp.Bottle(cmd_str)
reply_id = yarp.Bottle()
self.opc_port.write(cmd, reply_id)
if reply_id.size() > 0:
name = reply_id.get(1).asList().get(0).asList().get(1).asString()
self.db_embeddings_face.excluded_entities.append(name)
self.db_embeddings_audio.excluded_entities.append(name)
def get_name_to_verify(self):
if self.opc_port.getOutputCount():
reply = yarp.Bottle()
cmd = yarp.Bottle("ask")
list_condition = cmd.addList()
cond1 = list_condition.addList()
cond1.addString("verified")
cond1.addString("==")
cond1.addInt(1)
list_condition.addString("&&")
cond2 = list_condition.addList()
cond2.addString("active")
cond2.addString("==")
cond2.addInt(0)
self.opc_port.write(cmd, reply)
list_id = reply.get(1).asList().get(1).asList()
name_to_verify = []
id_to_verify = []
if list_id.size() > 0:
reply_id = yarp.Bottle()
for i in range(list_id.size()):
cmd_str = "get ((id " + str(list_id.get(i).asInt()) + ") (propSet (label_tracker id_tracker)))"
cmd = yarp.Bottle(cmd_str)
self.opc_port.write(cmd, reply_id)
name = reply_id.get(1).asList().get(1).asList().get(1).asString()
id = reply_id.get(1).asList().get(0).asList().get(1).asString()
name_to_verify.append(name)
id_to_verify.append(id)
return name_to_verify, id_to_verify
return False
def updateModule(self):
current_face_emb = []
current_id_faces = []
speaker_name, audio_score = "unknown", 0
self.check_voice()
record_image = self.read_image()
record_audio = self.record_audio()
self.get_name_in_memory()
self.get_face_coordinate()
if self.process:
if record_audio and self.nb_samples_received >= self.length_input * self.sound.getFrequency():
print("Computing Speaker Embedding")
audio_signal = self.format_signal(self.audio)
# Compute speaker embeddings and do speaker prediction only if the audio database is updated with
# the same people folders as the face embedding folders (make empty folders?)
self.speaker_emb = self.get_audio_embeddings(audio_signal)
self.audio = []
self.nb_samples_received = 0
speaker_name, audio_score = self.predict_speaker(self.speaker_emb)
if record_image and self.frame.size != 0 and self.coord_face:
try:
current_id_faces, self.coord_face = format_face_coord(self.coord_face)
face_img = [face_alignement(f, self.frame) for f in self.coord_face]
current_face_emb = self.get_face_embeddings(face_img)
if self.save_face and len(current_face_emb) > 0:
self.faces_img = self.faces_img + face_img
self.face_emb.append(current_face_emb[0].numpy())
except Exception as e:
info("Exception while computing face embeddings" + str(e))
if self.predict:
if speaker_name != 'unknown' and len(current_face_emb):
info("Got Audio and Face embeddings")
faces_name, face_scores = self.predict_face(current_face_emb)
unknown_faces = []
distances = []
for face_id, emb, name, score in zip(current_id_faces, current_face_emb, faces_name, face_scores):
if name != "unknown":
name = self.format_name(name)
self.set_name_memory(face_id, name)
print("Predicted for face_id {} : {} with score {}".format(face_id, name, score))
else:
distances.append(self.db_embeddings_face.get_distance_from_user(emb, speaker_name))
unknown_faces.append(face_id)
if len(unknown_faces):
min_distance_index = np.argmax(distances)
min_face_id = unknown_faces.pop(min_distance_index)
self.set_name_memory(min_face_id, speaker_name)
# print("Speaker name closest to unknown face is {} ".format(speaker_name))
for face_id in unknown_faces:
self.set_name_memory(face_id, "unknown")
elif len(current_face_emb):
faces_name, scores = self.predict_face(current_face_emb)
for face_id, name, score in zip(current_id_faces, faces_name, scores):
self.set_name_memory(face_id, name)
print("Predicted for face_id {} : {} with score {}".format(face_id, name, score))
else:
pass
return True
def check_existing_face(self, detection):
users_to_verify, id_to_verify = self.get_name_to_verify()
face_name = ""
face_id = ""
if len(users_to_verify) > 0:
face_img_list = []
face_img = face_alignement(detection, self.frame)
face_img_list.append(face_img)
current_face_emb = self.get_face_embeddings(face_img_list)
if len(current_face_emb):
distances = []
names = []
ids = []
current_face_emb = current_face_emb[0]
for (user, id) in zip(users_to_verify, id_to_verify):
# if user exist in db_embedding folder
distances.append(self.db_embeddings_face.get_distance_from_user(current_face_emb, user))
names.append(user)
ids.append(id)
# max similarity is min distance (cosine similarity output [-1,1]
min_distance_index = np.argmax(distances)
face_name = names[min_distance_index]
face_id = ids[min_distance_index]
return face_name, face_id
def format_signal(self, audio_list_samples):
"""
Format an audio given a list of samples
:param audio_list_samples:
:return: numpy array
"""
np_audio = np.concatenate(audio_list_samples, axis=1)
np_audio = np.squeeze(np_audio)
signal = np.transpose(np_audio, (1, 0))
signal = signal.mean(axis=1)
return signal
def get_audio_embeddings(self, audio):
"""
Generate voice embedding from audio sample
:param audio:
:return:
"""
resample_audio = self.resample_trans(torch.from_numpy(audio.transpose()))
embedding = self.model_audio.encode_batch(resample_audio)
embedding = embedding.squeeze(axis=0)
return embedding
def get_face_embeddings(self, images):
"""
Generate faces embedding from images of faces
:param images: list of cropped faces (list->np.array)
:return: (list->np.array)
"""
face_embeddings = []
with torch.no_grad():
for np_img in images:
cv.cvtColor(np_img, cv.COLOR_RGB2BGR, np_img)
input_img = self.trans(np_img)
input_img = input_img.unsqueeze_(0)
input = input_img.to(self.device)
emb = self.modele_face(input)
face_embeddings.append(emb.cpu())
return face_embeddings
def predict_speaker(self, embedding):
score, speaker_name = self.db_embeddings_audio.get_speaker_db_scan(embedding)
if score == -1:
speaker_name = "unknown"
self.db_embeddings_audio.excluded_entities = []
print("Predicted speaker name is {} with score {}".format(speaker_name, score))
return speaker_name, float(score)
def predict_face(self, embeddings):
predicted_faces = []
score_faces = []
for emb in embeddings:
score, face_name = self.db_embeddings_face.get_speaker_db_scan(emb)
if score == -1:
face_name = "unknown"
predicted_faces.append(face_name)
score_faces.append(score)
self.db_embeddings_face.excluded_entities = []
return predicted_faces, score_faces
def predict_multimodal(self, audio_emb, face_emb):
if audio_emb.shape[0] > 1:
audio_emb = audio_emb[0]
input_emb = np.hstack((audio_emb, face_emb[0]))
with torch.no_grad():
input_emb = torch.from_numpy(input_emb).cuda()
outputs = self.sm(self.model_av(input_emb))
proba, p_id = torch.max(outputs, 1)
prediction_id = int(p_id.cpu().numpy()[0])
score = float(proba.cpu().numpy()[0])
recognized_name = self.db_embeddings_face.get_name_speaker(prediction_id)
return recognized_name, score
def write_label(self, name_speaker, score, mode):
if self.label_outputPort.getOutputCount():
name_bottle = yarp.Bottle()
name_bottle.clear()
name_bottle.addString(name_speaker)
name_bottle.addFloat32(score)
name_bottle.addInt(mode)
self.label_outputPort.write(name_bottle)
def format_name(self, name):
name.strip()
return name
if __name__ == '__main__':
# Initialise YARP
if not yarp.Network.checkNetwork():
info("Unable to find a yarp server exiting ...")
sys.exit(1)
yarp.Network.init()
speaker_recognition = PersonsRecognition()
rf = yarp.ResourceFinder()
rf.setVerbose(True)
rf.setDefaultContext('peopleRecognition')
rf.setDefaultConfigFile('peopleRecognition.ini')
if rf.configure(sys.argv):
speaker_recognition.runModule(rf)
speaker_recognition.close()
sys.exit()
| StarcoderdataPython |
5128002 | <reponame>tlee911/aoc2021<gh_stars>0
with open('input.txt', 'r') as file:
input = file.readlines()
input = [ line.strip() for line in input ]
def get_values_at_index(i, l):
values = []
for line in l:
values.append((line[i]))
return values
def get_counts_at_index(i, l):
values = get_values_at_index(i, l)
zero = values.count('0')
one = values.count('1')
return (zero, one)
def get_most_common_at_index(i, l):
zero, one = get_counts_at_index(i, l)
if zero == one:
return '1'
return '1' if one > zero else '0'
def part1():
gamma = []
for i in range(len(input[0])):
gamma.append(get_most_common_at_index(i, input))
epsilon = [ '1' if i=='0' else '0' for i in gamma ]
gamma = ''.join(gamma)
epsilon = ''.join(epsilon)
#print(gamma)
#print(epsilon)
gamma = int(gamma, 2)
epsilon = int(epsilon, 2)
#print(gamma, epsilon)
return gamma * epsilon
def get_least_common_at_index(i, l):
b = get_most_common_at_index(i, l)
return '0' if b == '1' else '1'
def get_rating(f):
input_list = input.copy()
i = 0
while len(input_list) > 1:
keep = []
keep_value = f(i, input_list)
for item in input_list:
if item[i] == keep_value:
keep.append(item)
input_list = keep.copy()
#print(i, len(input_list))
i += 1
return int(input_list[0], 2)
def get_o2():
return get_rating(get_most_common_at_index)
def get_co2():
return get_rating(get_least_common_at_index)
def part2():
o2 = get_o2()
co2 = get_co2()
print(o2, co2)
return o2 * co2
print(part1())
print(part2()) | StarcoderdataPython |
3208466 | <filename>src/testSylvSequence.py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import matplotlib.patches as patches
from LucasKanadeBasis import *
from LucasKanade import *
from TemplateCorrection import *
import time
def copyRect(rect):
rect_new = []
for ele in rect:
rect_new += [ele]
return rect_new
# write your script here, we recommend the above libraries for making your animation
bases = np.load('../data/sylvbases.npy')
frames = np.load('../data/sylvseq.npy')
seq_len = frames.shape[2]
frame0 = frames[:,:,0]
rect = [101, 61, 155, 107]
rect_baseline = [101, 61, 155, 107]
width = rect[3] - rect[1]
length = rect[2] - rect[0]
rectList = [copyRect(frame0)]
rectList_baseline = [copyRect(frame0)]
time_total = 0
# since template driftingb uses only the first ever frame
# lots of things can be pre-computed here
rows_img, cols_img = frame0.shape
x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
rows_rect, cols_rect = x2 - x1, y2 - y1
y = np.arange(0, rows_img, 1)
x = np.arange(0, cols_img, 1)
c = np.linspace(x1, x2, cols_rect)
r = np.linspace(y1, y2, rows_rect)
cc, rr = np.meshgrid(c, r)
spline = RectBivariateSpline(y, x, frame0)
T = spline.ev(rr, cc)
#Apply LucasKanadeWithTemplateCorrection Algorithm
for i in range(seq_len):
if i == 0:
continue
It = frames[:,:,i-1]
It1 = frames[:,:,i]
p_baseline = LucasKanade(It, It1, rect_baseline)
rect_baseline[0] += p_baseline[0]
rect_baseline[1] += p_baseline[1]
rect_baseline[2] += p_baseline[0]
rect_baseline[3] += p_baseline[1]
TemplateCorrection(T, It1, rect_baseline)
rectList_baseline.append(copyRect(rect_baseline))
#Apply LucasKanadeBasis Algorithm
for i in range(seq_len):
if i == 0:
continue
print("Processing frame %d" % i)
start = time.time()
It = frames[:,:,i-1]
It1 = frames[:,:,i]
p = LucasKanadeBasis(It, It1, rect, bases)
rect[0] += p[0]
rect[1] += p[1]
rect[2] += p[0]
rect[3] += p[1]
end = time.time()
time_total += end - start
rectList.append(copyRect(rect))
if i == 1 or i == 100 or i == 200 or i == 300 or i == 350 or i == 400:
plt.figure()
plt.imshow(frames[:,:,i],cmap='gray')
bbox1 = patches.Rectangle((int(rectList[i][0]), int(rectList[i][1])), length, width,
fill=False, edgecolor='blue', linewidth=2)
plt.gca().add_patch(bbox1)
bbox0 = patches.Rectangle((int(rectList_baseline[i][0]), int(rectList_baseline[i][1])), length, width,
fill=False, edgecolor='red', linewidth=2)
plt.gca().add_patch(bbox0)
plt.title('frame %d' % i)
plt.show()
np.save('Sylvseqrects.npy',rectList)
print('Finished, the tracking frequency is %.4f' % (seq_len / time_total))
| StarcoderdataPython |
3594387 | <reponame>a1b2c3d4e5x/spider_iyp<filename>const/sub_categories/base_category.py<gh_stars>0
from typing import List
class BaseCategory(object):
def list(self) -> List[str]:
all = []
method_list = [method for method in dir(self) if method.startswith('_') is False]
for item in method_list:
if 'category_name' == item:
continue
elif 'category_id' == item:
continue
elif BaseCategory.list.__name__ == item:
continue
all.append(item)
return all | StarcoderdataPython |
1750900 | import discord
# Make sure we're using the correct version of Discord.py
if discord.__author__ != "Rapptz":
print("Error: Please use the rewrite version of Discord.py.")
print("Check the README for instructions.")
exit()
import requests, asyncio, platform
from discord.ext.commands import Bot
# End imports
bot = Bot(command_prefix='!', case_insensitive=True, description='A bot for comparing cryptocurrency values.')
coin_data = requests.get("https://www.cryptocompare.com/api/data/coinlist/").json()['Data']
# User typing is checked against this list of valid symbols
coin_list = list(coin_data.keys())
coin_list.append("USD")
invalid_coin = "Error: {} is not a valid symbol. Please check your typing."
# Place the token in a file called "token.txt"
# IMPORTANT: Make sure there is no newline at the end!
# You will get cryptic errors.
# Try "tr -d '\n' < token.txt" (*NIX only)
TOKEN = open("token.txt", "r").read()
# So we can have our neat little custom help function
bot.remove_command("help")
# Help strings; sent when a user types "!help {command_here}"
help_strings = {
"price": "\nExample: Getting the price of a Bitcoin in USD:\n` !price USD BTC`",
"image": "\nExample: Getting the Bitcoin logo:\n` !image BTC`",
"name": "\nExample: Finding out what \"ETC\" means:\n` !name ETC`",
"algo": "\nExample: "
}
# Commands / events below
@bot.event
async def on_ready():
print(
"Logged in\n"
+ str(bot.user.name) + "\n"
+ str(bot.user.id)
)
# NOTE: The whole point of the help strings is so that this command is
# short and readable. Do not change.
@bot.command()
async def help(ctx, cmd=None):
# Print a list of commands if one is not supplied
if cmd is None:
await ctx.send("List of commands:\n")
for i in help_strings.keys():
await ctx.send("`!" + i + "`")
# Make sure the command typed actually exists
if cmd in help_strings.keys():
await ctx.send(help_strings[cmd])
else:
# Get rid of annoying errors when no command is passed
if cmd is not None:
await ctx.send(cmd + ": Command not found")
# TODO: Clean this up at some point, it's a mess.
@bot.command()
async def price(ctx, to_sym, from_sym):
if to_sym in coin_list and from_sym in coin_list:
res = requests.get("https://min-api.cryptocompare.com/data/pricemulti?fsyms="+from_sym+"&tsyms="+to_sym)
await ctx.send("Price of 1 " + from_sym + ": " + str(res.json()[from_sym][to_sym]) + " " + to_sym)
await ctx.send("Data from <https://www.cryptocompare.com>")
else:
if to_sym not in coin_list:
await ctx.send("Error: " + to_sym + " is not a valid symbol. Please check your typing.")
if from_sym not in coin_list:
await ctx.send("Error: " + from_sym + " is not a valid symbol. Please check your typing.")
await ctx.send("Please note that USD is the only non-cryptocurrency accepted.")
@bot.command()
async def image(ctx, coin):
if coin in coin_list:
await ctx.send("https://www.cryptocompare.com" + coin_data[coin]["ImageUrl"])
else:
await ctx.send(invalid_coin.format(coin))
@bot.command()
async def name(ctx, coin):
if coin in coin_list and coin is not "USD":
await ctx.send(coin_data[coin]["FullName"])
else:
await ctx.send(invalid_coin.format(coin))
@bot.command()
async def algo(ctx, coin):
if coin in coin_list and coin is not "USD":
await ctx.send(coin_data[coin]["Algorithm"])
else:
await ctx.send(invalid_coin.format(coin))
@bot.command()
async def dedede(ctx):
await ctx.send("https://cdn.discordapp.com/attachments/477252163899228160/477273917795467284/image.jpg")
# End commands / events
bot.run(TOKEN)
| StarcoderdataPython |
8059483 | import sys, json
TILE_SIZE = 32
def print_usage():
print("""USAGE: python tiled2json.py <input json tiled file> <output json file>""")
def move_to_visible_area(out_map):
"""Moves the sprites in the map so that they begin at (0, 0)"""
min_x = min(map(lambda node: node["x"], out_map["objects"]))
min_y = min(map(lambda node: node["y"], out_map["objects"]))
for node in out_map["objects"]:
node["x"] = node["x"] - min_x
node["y"] = node["y"] - min_y
for trigger in out_map["triggers"]:
trigger["x"] = trigger["x"] - min_x * TILE_SIZE
trigger["y"] = trigger["y"] - min_y * TILE_SIZE
def parse_data(objects, out_map, width):
"""Parses tile data from the objects source"""
count = 0
for tile in objects:
col = count % width
row = count // width
count += 1
if tile == 1:
out_map["objects"].append({"x": col, "y": row, "name": "WALL"})
elif tile == 2:
out_map["objects"].append({"x": col, "y": row, "name": "APPLE"})
elif tile == 3:
out_map["objects"].append({"x": col, "y": row, "name": "DOOR", "dir": "up"})
elif tile == 4:
out_map["objects"].append({"x": col, "y": row, "name": "DOOR", "dir": "right"})
elif tile == 5:
out_map["objects"].append({"x": col, "y": row, "name": "DOOR", "dir": "down"})
elif tile == 6:
out_map["objects"].append({"x": col, "y": row, "name": "DOOR", "dir": "left"})
elif 9 <= tile <= 16 or 19 <= tile <= 24:
out_map["objects"].append({"x": col, "y": row, "name": "PLAYER"})
def parse_triggers(triggers, out_map):
"""Parses the triggers from the triggers source"""
for trigger in triggers:
out_map["triggers"].append({"x": trigger["x"],
"y": trigger["y"],
"width": trigger["width"],
"height": trigger["height"],
"text": trigger["properties"]["text"]})
def main():
if len(sys.argv) < 3:
print_usage()
return
in_file = sys.argv[1]
out_file = sys.argv[2]
with open(in_file, 'rt') as f:
data = json.loads(f.read())
out_map = {"objects": [], "triggers": []}
width = data["width"]
for layer in data["layers"]:
if "data" in layer:
parse_data(layer["data"], out_map, width)
if "objects" in layer:
parse_triggers(layer["objects"], out_map)
move_to_visible_area(out_map)
with open(out_file, 'wt') as f:
f.write(json.dumps(out_map, sort_keys=True, indent=4))
if __name__ == '__main__':
main() | StarcoderdataPython |
110168 | import json
from django import template
from django.utils.html import format_html
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(is_safe=True)
def json_script_with_non_ascii(value, element_id):
from django.core.serializers.json import DjangoJSONEncoder
_json_script_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
value_json = json.dumps(
value,
cls=DjangoJSONEncoder, ensure_ascii=False
).translate(_json_script_escapes)
return format_html(
'<script id="{}" type="application/json">{}</script>',
element_id, mark_safe(value_json)
)
| StarcoderdataPython |
12809448 | #!/usr/bin/env python
from datetime import datetime
from collections import OrderedDict
import argparse
import csv
import os
import re
import subprocess
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Saved Measurements:
measurements = ['Min', 'Median', '90%ile', '95%ile', 'Max', 'Avg', 'Success%', 'Count']
"""
Results directory structure:
".../browbeat/results/full-apache-fernet-keystone-36/keystone/keystone-cc/run-1/
full-apache-fernet-keystone-36-iteration_1-keystone-cc-0256.log"
Structure of compiled results dictionary:
results[service][test][iteration][#workers][concurrency][measurement] = value
"""
def list_only_directories(the_directory):
return [a_dir for a_dir in os.listdir(the_directory)
if os.path.isdir(os.path.join(the_directory, a_dir)) ]
def main():
parser = argparse.ArgumentParser(
description='Processes multiple rally log files from brwowbeat into compiled graphs.')
parser.add_argument('test_prefix', help='Use the resulting prefixed directories/files in '
'browbeat results directory.')
args = parser.parse_args()
compiled_results = OrderedDict()
compiled_issues = []
# Should be /home/<user>/browbeat/graphing:
rallyplot_path = os.path.dirname(os.path.realpath(__file__))
browbeat_path = rallyplot_path.replace('/graphing', '')
test_runs = [a_dir for a_dir in list_only_directories('{}/results/'.format(browbeat_path))
if re.match('^{}-[A-Za-z]+-[0-9]+'.format(args.test_prefix), a_dir)]
for test_run in test_runs:
extract = re.search('{}-([a-zA-Z]*)-([0-9]*)'.format(args.test_prefix), test_run)
skip = True
if extract:
service = extract.group(1)
w_count = extract.group(2)
skip = False
else:
print 'Potentially incorrect directory: {}'.format(test_run)
if not skip:
for service in os.listdir('{}/results/{}/'.format(browbeat_path, test_run)):
if service not in compiled_results:
compiled_results[service] = OrderedDict()
for test in os.listdir('{}/results/{}/{}/'.format(browbeat_path, test_run, service)):
if test not in compiled_results[service]:
compiled_results[service][test] = OrderedDict()
for iteration in os.listdir('{}/results/{}/{}/{}/'.format(browbeat_path, test_run, service, test)):
iter_num = int(iteration.replace('run-', ''))
if iter_num not in compiled_results[service][test]:
compiled_results[service][test][iter_num] = OrderedDict()
if w_count not in compiled_results[service][test][iter_num]:
compiled_results[service][test][iter_num][w_count] = OrderedDict()
result_files = os.listdir('{}/results/{}/{}/{}/{}/'.format(browbeat_path, test_run, service, test, iteration))
result_files = [a_file for a_file in result_files if re.match('.*log', a_file)]
for r_file in result_files:
# Extract concurrency of test
extract = re.search('{}-{}-{}-iteration_{}-{}-([0-9]*)\.log'.format(args.test_prefix, service, w_count, iter_num, test), r_file)
if extract:
concurrency = extract.group(1)
if concurrency not in compiled_results[service][test][iter_num][w_count]:
compiled_results[service][test][iter_num][w_count][concurrency] = OrderedDict()
result_file_full_path = '{}/results/{}/{}/{}/{}/{}'.format(browbeat_path, test_run, service, test, iteration, r_file)
# print 'Test_run: {}, Service: {}, Test: {}, iteration: {}, Concurrency: {}, Result_file: {}'.format(test_run, service, test, iteration, concurrency, r_file)
# print 'Full Path: {}'.format(result_file_full_path)
grep_cmd = subprocess.Popen(['grep', 'total', result_file_full_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = grep_cmd.communicate()
if len(out) == 0:
print 'Could not find results. Setting to -1'
compiled_issues.append(r_file)
compiled_results[service][test][iter_num][w_count][concurrency]['Min'] = '-1'
compiled_results[service][test][iter_num][w_count][concurrency]['Median'] = '-1'
compiled_results[service][test][iter_num][w_count][concurrency]['90%ile'] = '-1'
compiled_results[service][test][iter_num][w_count][concurrency]['95%ile'] = '-1'
compiled_results[service][test][iter_num][w_count][concurrency]['Max'] = '-1'
compiled_results[service][test][iter_num][w_count][concurrency]['Avg'] = '-1'
compiled_results[service][test][iter_num][w_count][concurrency]['Success%'] = '0'
compiled_results[service][test][iter_num][w_count][concurrency]['Count'] = '-1'
else:
output = [s.strip() for s in out.strip().split('|') if s]
compiled_results[service][test][iter_num][w_count][concurrency]['Min'] = output[1]
compiled_results[service][test][iter_num][w_count][concurrency]['Median'] = output[2]
compiled_results[service][test][iter_num][w_count][concurrency]['90%ile'] = output[3]
compiled_results[service][test][iter_num][w_count][concurrency]['95%ile'] = output[4]
compiled_results[service][test][iter_num][w_count][concurrency]['Max'] = output[5]
compiled_results[service][test][iter_num][w_count][concurrency]['Avg'] = output[6]
compiled_results[service][test][iter_num][w_count][concurrency]['Success%'] = output[7].replace('%', '')
compiled_results[service][test][iter_num][w_count][concurrency]['Count'] = output[8]
rally_graph_dir = '{}/results/{}-rally-compiled-graphs/'.format(browbeat_path, args.test_prefix)
if not os.path.exists(rally_graph_dir):
os.mkdir(rally_graph_dir)
# Now graph results based on measurements list:
for service in compiled_results:
for test in compiled_results[service]:
# Assumption is all tests have same number of iterations!!!
for iteration in compiled_results[service][test]:
for measurement in measurements:
concurrency_dict = {}
for worker_count in sorted(compiled_results[service][test][iteration].keys()):
for concurrency in compiled_results[service][test][iteration][worker_count]:
if concurrency not in concurrency_dict:
concurrency_dict[concurrency] = []
if str(compiled_results[service][test][iteration][worker_count][concurrency][measurement]) == "n/a":
# Rally will place n/a in place of an actual result when it fails
# completely, we can't graph n/a, so replace with -1
concurrency_dict[concurrency].append(-1)
else:
concurrency_dict[concurrency].append(float(compiled_results[service][test][iteration][worker_count][concurrency][measurement]))
graph_file_name = '{}{}-{}-{}-{}.png'.format(rally_graph_dir, service, test, iteration, measurement)
print '----------------------------------------------------------'
print 'Test Prefix: {}'.format(args.test_prefix)
print 'Service: {}'.format(service)
print 'Test: {}'.format(test)
print 'Iteration: {}'.format(iteration)
print 'Measurement: {}'.format(measurement)
print 'File Name: {}'.format(graph_file_name)
print 'X-Axis (Worker Counts): {}'.format(sorted(compiled_results[service][test][iteration].keys()))
print 'X-Axis (# of values per series): {}'.format(len(compiled_results[service][test][iteration].keys()))
print '# of Series (# of Concurrencies tested): {}'.format(len(compiled_results[service][test][iteration][worker_count].keys()))
for series in sorted(concurrency_dict):
print 'Series: {}, Values: {}'.format(series, concurrency_dict[series])
print 'Legend: {}'.format(sorted(concurrency_dict.keys()))
print '----------------------------------------------------------'
fig = plt.figure()
plt.title(
'Test Name: {}\n'
'Service: {}, Test: {}, Iteration: {}, Measurement: {}\n'
'Graphed from rally task log output'.format(args.test_prefix, service, test,
iteration, measurement))
plt.xlabel('Workers')
plt.ylabel('{} Time (s)'.format(measurement))
ax = fig.add_subplot(111)
for series in sorted(concurrency_dict.keys()):
plt_linewidth = 1
if '-1' in concurrency_dict[series]:
plt_linewidth = 2
plt.plot(sorted(compiled_results[service][test][iteration].keys()),
concurrency_dict[series], linewidth=plt_linewidth, label=series, marker='o')
for x, y in zip(sorted(compiled_results[service][test][iteration].keys()),
concurrency_dict[series]):
ax.annotate('%s' % y, xy=(x,y), xytext=(4,4), textcoords='offset points')
plt.legend(loc='upper center', bbox_to_anchor=(1.12, 0.5), fancybox=True)
ax.grid(True)
plt.savefig(graph_file_name, bbox_inches='tight')
plt.close()
# Print files that had an issue:
print '----------------------------------------------------------'
print 'Files missing results:'
print '----------------------------------------------------------'
for issue in compiled_issues:
print 'File: {}'.format(issue)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
9664387 | # Authors: <NAME> <<EMAIL>>
# License: BSD 3-Clause
import os
from nose.plugins.attrib import attr
from numpy import array, sqrt, zeros
from numpy.random import randn
from numpy.testing import assert_allclose
from commpy.channelcoding.ldpc import get_ldpc_code_params, ldpc_bp_decode
from commpy.utilities import hamming_dist
@attr('slow')
class TestLDPCCode(object):
@classmethod
def setup_class(cls):
dir = os.path.dirname(__file__)
ldpc_design_file_1 = os.path.join(dir, '../designs/ldpc/gallager/96.33.964.txt')
#ldpc_design_file_1 = "../designs/ldpc/gallager/96.33.964.txt"
cls.ldpc_code_params = get_ldpc_code_params(ldpc_design_file_1)
@classmethod
def teardown_class(cls):
pass
def test_ldpc_bp_decode(self):
N = 96
k = 48
rate = 0.5
Es = 1.0
snr_list = array([2.0, 2.5])
niters = 10000000
tx_codeword = zeros(N, int)
ldpcbp_iters = 100
fer_array_ref = array([200.0/1000, 200.0/2000])
fer_array_test = zeros(len(snr_list))
for idx, ebno in enumerate(snr_list):
noise_std = 1/sqrt((10**(ebno/10.0))*rate*2/Es)
fer_cnt_bp = 0
for iter_cnt in range(niters):
awgn_array = noise_std * randn(N)
rx_word = 1-(2*tx_codeword) + awgn_array
rx_llrs = 2.0*rx_word/(noise_std**2)
[dec_word, out_llrs] = ldpc_bp_decode(rx_llrs, self.ldpc_code_params, 'SPA',
ldpcbp_iters)
num_bit_errors = hamming_dist(tx_codeword, dec_word)
if num_bit_errors > 0:
fer_cnt_bp += 1
if fer_cnt_bp >= 200:
fer_array_test[idx] = float(fer_cnt_bp)/(iter_cnt+1)
break
assert_allclose(fer_array_test, fer_array_ref, rtol=.5, atol=0)
| StarcoderdataPython |
8060681 | import dataclasses
import enum
from typing import Any, Dict, List, Optional
class Form(enum.Enum):
"""
Represents the "forms" a JSON Typedef schema can take on. The JSON Typedef
spec restricts valid schemas to only using certain combinations of keywords.
This enum represents which of those valid combinations a schema is using.
"""
EMPTY = enum.auto()
REF = enum.auto()
TYPE = enum.auto()
ENUM = enum.auto()
ELEMENTS = enum.auto()
PROPERTIES = enum.auto()
VALUES = enum.auto()
DISCRIMINATOR = enum.auto()
@dataclasses.dataclass
class Schema:
"""
Represents a JSON Typedef schema. To construct an instance of Schema, it's
recommended you use :func:`from_dict`.
>>> import jtd
>>> schema = jtd.Schema.from_dict({ 'elements': { 'type': 'string' }})
>>> schema.form()
<Form.ELEMENTS: 5>
>>> schema.elements.form()
<Form.TYPE: 3>
"""
metadata: Optional[Dict[str, Any]]
"""Additional metadata. Does not affect validation."""
nullable: Optional[bool]
"""Describes data that can be JSON ``null`` (Python ``None``)."""
definitions: Optional[Dict[str, 'Schema']]
"""A set of definitions that ``ref`` can refer to. Can only appear on root schemas."""
ref: Optional[str]
"""A reference to a definition."""
type: Optional[str]
"""Describes data that is a boolean, number, string, or timestamp."""
enum: Optional[List[str]]
"""Describes data that must be in a predefined list of strings."""
elements: Optional['Schema']
"""Describes arrays."""
properties: Optional[Dict[str, 'Schema']]
"""Describes required properties of an object."""
optional_properties: Optional[Dict[str, 'Schema']]
"""Describes optional properties of an object."""
additional_properties: Optional[bool]
"""Describes whether there may be properties not in ``properties`` or ``optional_properties``."""
values: Optional['Schema']
"""Describes the values of an object."""
discriminator: Optional[str]
"""Specifies the "tag" property of an object, indicating what kind of data it contains."""
mapping: Optional[Dict[str, 'Schema']]
"""Describes the data, depending on the value of the "tag" property of an object."""
_KEYWORDS = [
"metadata",
"nullable",
"definitions",
"ref",
"type",
"enum",
"elements",
"properties",
"optionalProperties",
"additionalProperties",
"values",
"discriminator",
"mapping",
]
_TYPE_VALUES = [
'boolean',
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'float32',
'float64',
'string',
'timestamp',
]
_VALID_FORMS = [
# Empty form
[False, False, False, False, False, False, False, False, False, False],
# Ref form
[True, False, False, False, False, False, False, False, False, False],
# Type form
[False, True, False, False, False, False, False, False, False, False],
# Enum form
[False, False, True, False, False, False, False, False, False, False],
# Elements form
[False, False, False, True, False, False, False, False, False, False],
# Properties form -- properties or optional properties or both, and
# never additional properties on its own
[False, False, False, False, True, False, False, False, False, False],
[False, False, False, False, False, True, False, False, False, False],
[False, False, False, False, True, True, False, False, False, False],
[False, False, False, False, True, False, True, False, False, False],
[False, False, False, False, False, True, True, False, False, False],
[False, False, False, False, True, True, True, False, False, False],
# Values form
[False, False, False, False, False, False, False, True, False, False],
# Discriminator form
[False, False, False, False, False, False, False, False, True, True],
]
@classmethod
def from_dict(cls, dict: Dict[str, Any]) -> 'Schema':
"""
Instantiate a Schema from a dictionary. The dictionary should only
contain types produced by ``json.loads``; otherwise, the output is not
meaningful.
>>> import jtd
>>> jtd.Schema.from_dict({ 'elements': { 'type': 'string' }})
Schema(metadata=None, nullable=None, definitions=None, ref=None, type=None, enum=None, elements=Schema(metadata=None, nullable=None, definitions=None, ref=None, type='string', enum=None, elements=None, properties=None, optional_properties=None, additional_properties=None, values=None, discriminator=None, mapping=None), properties=None, optional_properties=None, additional_properties=None, values=None, discriminator=None, mapping=None)
"""
definitions = None
if "definitions" in dict:
definitions = { k: cls.from_dict(v) for k, v in dict["definitions"].items() }
elements = None
if "elements" in dict:
elements = cls.from_dict(dict["elements"])
properties = None
if "properties" in dict:
properties = { k: cls.from_dict(v) for k, v in dict["properties"].items() }
optional_properties = None
if "optionalProperties" in dict:
optional_properties = { k: cls.from_dict(v) for k, v in dict["optionalProperties"].items() }
values = None
if "values" in dict:
values = cls.from_dict(dict["values"])
mapping = None
if "mapping" in dict:
mapping = { k: cls.from_dict(v) for k, v in dict["mapping"].items() }
for k in dict.keys():
if k not in cls._KEYWORDS:
raise AttributeError("illegal keyword")
return Schema(
metadata=dict.get("metadata"),
nullable=dict.get("nullable"),
definitions=definitions,
ref=dict.get("ref"),
type=dict.get("type"),
enum=dict.get("enum"),
elements=elements,
properties=properties,
optional_properties=optional_properties,
additional_properties=dict.get("additionalProperties"),
values=values,
discriminator=dict.get("discriminator"),
mapping=mapping,
)
def validate(self, root=None):
"""
Checks whether a schema satisfies the semantic rules of JSON Typedef,
such as ensuring that all refs have a corresponding definition.
>>> import jtd
>>> schema = jtd.Schema.from_dict({ 'ref': 'xxx' })
>>> schema.validate()
Traceback (most recent call last):
...
TypeError: ref but no definitions
"""
if root is None:
root = self
if self.definitions is not None:
if self is not root:
raise TypeError("non-root definitions")
for v in self.definitions.values():
v.validate(root)
if self.nullable is not None and type(self.nullable) is not bool:
raise TypeError("nullable not bool")
if self.ref is not None:
if type(self.ref) is not str:
raise TypeError("ref not string")
if type(root.definitions) is not dict:
raise TypeError("ref but no definitions")
if self.ref not in root.definitions:
raise TypeError("ref to non-existent definition")
if self.type is not None and self.type not in self._TYPE_VALUES:
raise TypeError("type not valid string value")
if self.enum is not None:
if type(self.enum) is not list:
raise TypeError("enum not list")
if len(self.enum) == 0:
raise TypeError("enum is empty")
for v in self.enum:
if type(v) is not str:
raise TypeError("enum not list of strings")
if len(self.enum) != len(set(self.enum)):
raise TypeError("enum contains duplicates")
if self.elements is not None:
self.elements.validate(root)
if self.properties is not None:
for v in self.properties.values():
v.validate(root)
if self.optional_properties is not None:
for v in self.optional_properties.values():
v.validate(root)
if self.properties is not None and self.optional_properties is not None:
if set(self.properties).intersection(self.optional_properties):
raise TypeError("properties shares keys with optional_properties")
if self.additional_properties is not None:
if type(self.additional_properties) is not str:
raise TypeError("additional_properties not string")
if self.values is not None:
self.values.validate(root)
if self.discriminator is not None:
if type(self.discriminator) is not str:
raise TypeError("discriminator not string")
if self.mapping is not None:
for v in self.mapping.values():
v.validate(root)
if v.nullable:
raise TypeError("mapping value is nullable")
if v.form() != Form.PROPERTIES:
raise TypeError("mapping value not of properties form")
if self.discriminator in (v.properties or {}):
raise TypeError("mapping properties redefines discriminator")
if self.discriminator in (v.optional_properties or {}):
raise TypeError("mapping optional_properties redefines discriminator")
form_signature = [
self.ref is not None,
self.type is not None,
self.enum is not None,
self.elements is not None,
self.properties is not None,
self.optional_properties is not None,
self.additional_properties is not None,
self.values is not None,
self.discriminator is not None,
self.mapping is not None,
]
if form_signature not in self._VALID_FORMS:
raise TypeError("invalid form")
def form(self) -> Form:
"""
Determine the form of the schema. Meaningful only if :func:`validate`
did not throw any exceptions.
>>> import jtd
>>> jtd.Schema.from_dict({}).form()
<Form.EMPTY: 1>
>>> jtd.Schema.from_dict({ 'enum': ['foo', 'bar' ]}).form()
<Form.ENUM: 4>
>>> jtd.Schema.from_dict({ 'elements': {} }).form()
<Form.ELEMENTS: 5>
"""
if self.ref is not None:
return Form.REF
if self.type is not None:
return Form.TYPE
if self.enum is not None:
return Form.ENUM
if self.elements is not None:
return Form.ELEMENTS
if self.properties is not None or self.optional_properties is not None:
return Form.PROPERTIES
if self.values is not None:
return Form.VALUES
if self.discriminator is not None:
return Form.DISCRIMINATOR
return Form.EMPTY
| StarcoderdataPython |
8118491 | <reponame>ericu-u/GirlHacks
from cProfile import label
from email.policy import default
from django import forms
diet_choices = (
("Vegetarian", "Vegetarian"),
("Vegan", "Vegan"),
("Pescatarians", "Pescatarians"),
("Normal (Hybrid)", "Normal (Hybrid)"),
("Keto", "Keto"),
)
sex_choices = (
('Male', 'Male'),
('Female', 'Female')
)
class UserCreation(forms.Form):
email = forms.EmailField(label=False, widget=forms.TextInput(attrs={'class':'su-field', 'placeholder':'Email'}))
username = forms.CharField(label=False, min_length=3, max_length=20, widget=forms.TextInput(attrs={'class':'su-field', 'placeholder':'Username'}))
password = forms.CharField(label=False, min_length=6, max_length=20, widget=forms.TextInput(attrs={'class':'su-field', 'placeholder':'Password (6 Characters Minimum)', 'type':'password'}))
height = forms.IntegerField(label=False)
weight = forms.IntegerField(label=False)
age = forms.IntegerField(label=False)
sex = forms.ChoiceField(choices=sex_choices)
diet = forms.ChoiceField(choices=diet_choices)
excercise = forms.IntegerField(label=False) # Times per week
| StarcoderdataPython |
3405405 | <reponame>ZhongXinWang/python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
import logging
def div(s):
return 10/int(s);
def foo(s):
print(div(s));
if __name__ == '__main__':
try:
foo(0)
except ZeroDivisionError as e:
logging.error(e)
finally:
logging.debug('程序执行结束')
| StarcoderdataPython |
5189825 | <reponame>jippo015/Sub-Zero.bundle<filename>Contents/Code/support/i18n.py
# coding=utf-8
import inspect
from support.config import config
core = getattr(Data, "_core")
# get original localization module in order to access its base classes later on
def get_localization_module():
cls = getattr(core.localization, "__class__")
return inspect.getmodule(cls)
plex_i18n_module = get_localization_module()
def old_style_placeholders_count(s):
# fixme: incomplete, use regex
return sum(s.count(c) for c in ["%s", "%d", "%r", "%f", "%i"])
def check_old_style_placeholders(k, args):
# replace escaped %'s?
k = k.__str__().replace("%%", "")
if "%(" in k:
Log.Error(u"%r defines named placeholders for formatting" % k)
return "NEEDS NAMED ARGUMENTS"
placeholders_found = old_style_placeholders_count(k)
if placeholders_found and not args:
Log.Error(u"%r requires a arguments for formatting" % k)
return "NEEDS FORMAT ARGUMENTS"
elif not placeholders_found and args:
Log.Error(u"%r doesn't define placeholders for formatting" % k)
return "HAS NO FORMAT ARGUMENTS"
elif placeholders_found and placeholders_found != len(args):
Log.Error(u"%r wrong amount of arguments supplied for formatting" % k)
return "WRONG FORMAT ARGUMENT COUNT"
class SmartLocalStringFormatter(plex_i18n_module.LocalStringFormatter):
"""
this allows the use of dictionaries for string formatting, also does some sanity checking on the keys and values
"""
def __init__(self, string1, string2, locale=None):
if isinstance(string2, tuple):
# dictionary passed
if len(string2) == 1 and hasattr(string2[0], "iteritems"):
string2 = string2[0]
if config.debug_i18n:
if "%(" not in string1.__str__().replace("%%", ""):
Log.Error(u"%r: dictionary for non-named format string supplied" % string1.__str__())
string1 = "%s"
string2 = "NO NAMED ARGUMENTS"
# arguments
elif len(string2) >= 1 and config.debug_i18n:
msg = check_old_style_placeholders(string1, string2)
if msg:
string1 = "%s"
string2 = msg
setattr(self, "_string1", string1)
setattr(self, "_string2", string2)
setattr(self, "_locale", locale)
def local_string_with_optional_format(key, *args, **kwargs):
if kwargs:
args = (kwargs,)
else:
args = tuple(args)
if args:
# fixme: may not be the best idea as this evaluates the string early
try:
return unicode(SmartLocalStringFormatter(plex_i18n_module.LocalString(core, key, Locale.CurrentLocale), args))
except (TypeError, ValueError):
Log.Exception("Broken translation!")
Log.Debug("EN string: %s", plex_i18n_module.LocalString(core, key, "en"))
Log.Debug("%s string: %r", Locale.CurrentLocale,
unicode(plex_i18n_module.LocalString(core, key, Locale.CurrentLocale)))
return unicode(SmartLocalStringFormatter(plex_i18n_module.LocalString(core, key, "en"), args))
# check string instances for arguments
if config.debug_i18n:
msg = check_old_style_placeholders(key, args)
if msg:
return msg
try:
return unicode(plex_i18n_module.LocalString(core, key, Locale.CurrentLocale))
except TypeError:
Log.Exception("Broken translation!")
return unicode(plex_i18n_module.LocalString(core, key, "en"))
_ = local_string_with_optional_format
def is_localized_string(s):
return hasattr(s, "localize")
| StarcoderdataPython |
6636618 | import os
import pytest
from testing_utils import *
try:
import pandas
from matmodlab2.fitting.mcgen import *
except ImportError:
pandas = None
this_d = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.mcgen
@pytest.mark.skipif(pandas is None, reason='pandas not imported')
def test_mcgen():
"""example test case """
# Baseline solution
c = np.array([3.292, 181.82])
p = np.array([[.0001, 2489],
[.001, 1482],
[.01, 803],
[.1, 402],
[1, 207],
[10, 124],
[100, 101],
[0, 222]], dtype=np.float64)
f = os.path.join(this_d, 'data/mcgen.csv')
mc = MasterCurve.Import(f, ref_temp=75., apply_log=True,
fitter=PRONY, optimizer=FMIN, optwlf=False)
mc.fit()
s1 = 'WLF coefficients not within tolerance'
assert np.allclose(mc.wlf_opt, c, rtol=1.e-3, atol=1.e-3), s1
s2 = 'Prony series not within tolerance'
assert np.allclose(mc.mc_fit[:, 1], p[:, 1], rtol=1.e-2, atol=1.e-2), s2
if __name__ == '__main__':
test_mcgen()
| StarcoderdataPython |
4819496 | from rest_framework.generics import CreateAPIView, ListAPIView
from .serializers import BookmarkSerializer
from authors.apps.articles.models import Article
from authors.apps.articles.views import article_not_found
from .models import BookmarkArticle
from rest_framework.response import Response
from rest_framework import status
class BookmarkAPIView(CreateAPIView):
""" Bookmark togggle to mark and unmark article"""
serializer_class = BookmarkSerializer
def post(self, request, slug):
""" method to verify bookmark status """
user = request.user
try:
article = Article.objects.get(slug=slug)
except Article.DoesNotExist:
raise article_not_found()
bookmark, created = BookmarkArticle.objects.get_or_create(
user=user,
article=article
)
if not created:
bookmark.delete()
return Response(
{
'message': 'Article succesfully BOOKMARKED',
}, status=status.HTTP_201_CREATED,
) if created else\
Response(
{
'message': 'Bookmark succesfully DELETED',
}, status.HTTP_200_OK
)
class BookmarkListAPIView(ListAPIView):
""" Retrieve all bookmarked articles"""
def list(self, request):
""" method to verify bookmark status """
queryset = BookmarkArticle.objects.select_related(
'article', 'user'
).filter(user=request.user)
serializer = BookmarkSerializer(queryset, many=True)
return Response({
'bookmarked articles': serializer.data,
'count': len(serializer.data)
}, status=status.HTTP_201_CREATED,
) if len(serializer.data) else \
Response({
'message': 'NO article Bookmarked',
}, status.HTTP_200_OK)
| StarcoderdataPython |
3579554 | <reponame>Jiezhi/myleetcode
#!/usr/bin/env python
"""
CREATED AT: 2021/8/11
Des:
https://leetcode.com/problems/array-of-doubled-pairs/
https://leetcode.com/explore/challenge/card/august-leetcoding-challenge-2021/614/week-2-august-8th-august-14th/3877/
GITHUB: https://github.com/Jiezhi/myleetcode
Reference: https://leetcode.com/problems/array-of-doubled-pairs/solution/
"""
from typing import List
class Solution:
def canReorderDoubled(self, arr: List[int]) -> bool:
"""
102 / 102 test cases passed.
Status: Accepted
Runtime: 7000 ms
Memory Usage: 16.9 MB
My solution is spend two much time at list operations.
The leetcode solution 1 is:
```python
count = collections.Counter(A)
for x in sorted(A, key = abs):
if count[x] == 0: continue
if count[2*x] == 0: return False
count[x] -= 1
count[2*x] -= 1
```
return True
:param arr:
:return:
"""
arr = sorted(arr)
# we remove two elements in a iteration
for _ in range(len(arr) // 2):
i = arr.pop(0)
if i < 0:
m, n = divmod(i, 2)
if n == 1 or m not in arr:
return False
else:
arr.remove(m)
else:
if 2 * i not in arr:
return False
else:
arr.remove(2 * i)
return True
def test():
assert not Solution().canReorderDoubled(arr=[3, 1, 3, 6])
assert not Solution().canReorderDoubled(arr=[2, 1, 2, 6])
assert Solution().canReorderDoubled(arr=[4, -2, 2, -4])
assert not Solution().canReorderDoubled(arr=[1, 2, 4, 16, 8, 4])
if __name__ == '__main__':
test()
| StarcoderdataPython |
9745804 | <gh_stars>1-10
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This module contains the base TaurusModel class"""
__all__ = ["TaurusModel"]
__docformat__ = "restructuredtext"
import weakref
import operator
import threading
from .util.log import Logger
from .util.event import CallableRef, BoundMethodWeakref
from .taurusbasetypes import TaurusEventType, MatchLevel
from .taurushelper import Factory
class TaurusModel(Logger):
_factory = None
RegularEvent = (TaurusEventType.Change,
TaurusEventType.Config, TaurusEventType.Periodic)
def __init__(self, full_name, parent, serializationMode=None):
v = self.getNameValidator()
self._full_name, self._norm_name, self._simp_name = v.getNames(
full_name, self.factory())
if self._full_name is None and self._norm_name and self._simp_name is None:
self.trace("invalid name")
name = self._simp_name or self._norm_name or self._full_name or 'TaurusModel'
self.call__init__(Logger, name, parent)
if serializationMode is None:
s_obj = parent
if s_obj is None:
s_obj = self.factory()
serializationMode = s_obj.getSerializationMode()
self._serialization_mode = serializationMode
self._parentObj = parent
self._listeners = []
def __str__name__(self, name):
return '{0}({1})'.format(self.__class__.__name__, name)
def __str__(self):
return self.__str__name__(self.getNormalName())
def __repr__(self):
return self.__str__name__(self.getFullName())
def cleanUp(self):
self.trace("[TaurusModel] cleanUp")
self._parentObj = None
self._listeners = None
Logger.cleanUp(self)
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# API for Factory access
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
@classmethod
def factory(cls):
if cls._factory is None:
cls._factory = Factory(scheme=cls._scheme)
return cls._factory
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# API for naming
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
@classmethod
def getTaurusElementType(cls):
raise NotImplementedError("TaurusModel.getTaurusElementType cannot"
" be called")
def getFullName(self):
return self._full_name
def getNormalName(self):
return self._norm_name
def getSimpleName(self):
return self._simp_name
@classmethod
def isValid(cls, *args, **kwargs):
return cls.getNameValidator().isValid(*args, **kwargs)
@classmethod
def buildModelName(cls, parent_model, relative_name):
raise NotImplementedError(
"TaurusModel.buildModelName cannot be called")
@classmethod
def getNameValidator(cls):
raise NotImplementedError("TaurusModel.getNameValidator cannot be"
"called")
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# API for hierarchy access
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def getParentObj(self):
return self._parentObj
def getChildObj(self, child_name):
return None # TODO: consider raising NotImplementedError instead
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# API for serialization
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def setSerializationMode(self, mode):
"""Sets the serialization mode for the system.
:param mode: (TaurusSerializationMode) the new serialization mode"""
self._serialization_mode = mode
def getSerializationMode(self):
"""Gives the serialization operation mode.
:return: (TaurusSerializationMode) the current serialization mode"""
return self._serialization_mode
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# API for value access
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def getDisplayDescrObj(self, cache=True):
"""A brief description of the model. Can be used as tooltip, for example"""
raise NotImplementedError("TaurusModel.getDisplayDescrObj cannot be"
" called")
def getDisplayName(self, cache=True, complete=True):
full_name = self.getFullName()
normal_name = self.getNormalName()
simple_name = self.getSimpleName()
if simple_name:
ret = simple_name
if complete:
ret += " (" + normal_name.upper() + ")"
elif normal_name:
ret = normal_name.upper()
else:
ret = full_name.upper()
return ret
def getFragmentObj(self, fragmentName=None):
"""Returns a fragment object of the model. A fragment of a model is a
python attribute of the model object.
Fragment names including dots will be used to recursively get fragments
of fragments.
For a simple fragmentName (no dots), this is roughly equivalent to
getattr(self, fragmentName)
If the model does not have the fragment, :class:`AttributeError` is
raised
:param fragmentName: (str or None) the returned value will correspond to
the given fragmentName. If None is passed the
defaultFragmentName will be used instead.
:return: (obj) the member of the modelObj referred by the fragment.
"""
if fragmentName is None:
fragmentName = self.defaultFragmentName
obj = self
for fn in fragmentName.split('.'):
if fn == '':
# avoid a generic Exception, make it AttributeError instead
raise AttributeError('Cannot get empty fragment')
obj = getattr(obj, fn)
return obj
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# API for listeners
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def _listenerDied(self, weak_listener):
if self._listeners is None:
return
try:
self._listeners.remove(weak_listener)
except Exception, e:
pass
def _getCallableRef(self, listener, cb=None):
# return weakref.ref(listener, self._listenerDied)
meth = getattr(listener, 'eventReceived', None)
if meth is not None and operator.isCallable(meth):
return weakref.ref(listener, cb)
else:
return CallableRef(listener, cb)
def addListener(self, listener):
if self._listeners is None or listener is None:
return False
weak_listener = self._getCallableRef(listener, self._listenerDied)
if weak_listener in self._listeners:
return False
self._listeners.append(weak_listener)
return True
def removeListener(self, listener):
if self._listeners is None:
return
weak_listener = self._getCallableRef(listener)
try:
self._listeners.remove(weak_listener)
except Exception, e:
return False
return True
def forceListening(self):
class __DummyListener:
def eventReceived(self, *args):
pass
if not hasattr(self, '__dummyListener') or self.__dummyListener is None:
self.__dummyListener = __DummyListener()
self.addListener(self.__dummyListener)
def unforceListening(self):
if hasattr(self, '__dummyListener') and self.__dummyListener is not None:
self.removeListener(self.__dummyListener)
self.__dummyListener = None
def deleteListener(self, listener):
self.deprecated("Use removeListener(listener) instead")
self.removeListener(listener)
def hasListeners(self):
""" returns True if anybody is listening to events from this attribute """
if self._listeners is None:
return False
return len(self._listeners) > 0
def fireEvent(self, event_type, event_value, listeners=None):
"""sends an event to all listeners or a specific one"""
if listeners is None:
listeners = self._listeners
if listeners is None:
return
if not operator.isSequenceType(listeners):
listeners = listeners,
for listener in listeners:
if isinstance(listener, weakref.ref) or isinstance(listener, BoundMethodWeakref):
l = listener()
else:
l = listener
if l is None:
continue
meth = getattr(l, 'eventReceived', None)
if meth is not None and operator.isCallable(meth):
l.eventReceived(self, event_type, event_value)
elif operator.isCallable(l):
l(self, event_type, event_value)
def isWritable(self):
return False
@property
def name(self):
return self._simp_name
@property
def fullname(self):
return self._full_name
parentObj = property(fget=getParentObj)
| StarcoderdataPython |
6663466 | import openseespy.opensees as ops
from math import pi,cos,cosh,ceil
from PyPonding.PondingLoadCell import PondingLoadCell2d
import numpy as np
import matplotlib.pyplot as plt
class PondingLoadCell2d_OPS(PondingLoadCell2d):
def __init__(self,id,nodeI,nodeJ,gamma,tw):
self.id = id
self.nodeI = nodeI
self.nodeJ = nodeJ
self.gamma = gamma
self.tw = tw
# Retreive Node Coordinates
self.xI = ops.nodeCoord(self.nodeI,1)
self.yI = ops.nodeCoord(self.nodeI,2)
self.xJ = ops.nodeCoord(self.nodeJ,1)
self.yJ = ops.nodeCoord(self.nodeJ,2)
def update(self):
# Code currently only updates y postion of nodes - @todo maybe update x position also
# self.dxI = ops.nodeDisp(self.nodeI,1)
self.dyI = ops.nodeDisp(self.nodeI,2)
# self.dxJ = ops.nodeDisp(self.nodeJ,1)
self.dyJ = ops.nodeDisp(self.nodeJ,2)
class wf:
def __init__(self,d,tw,bf,tf,Fy):
self.d = d
self.tw = tw
self.bf = bf
self.tf = tf
self.Fy = Fy
self.E = 29000.0
self.material_type = 'ElasticPP'
self.num_fiber = 20
def dw(self):
dw = self.d-2*self.tf
return dw
def A(self):
A = 2*self.bf*self.tf + (self.d-2*self.tf)*self.tw
return A
def Iz(self):
Iz = (1/12)*self.bf*self.d**3 - (1/12)*(self.bf-self.tw)*self.dw()**3
return Iz
def define_fiber_section(self,secTag,matTag):
if self.material_type == 'Elastic':
ops.uniaxialMaterial('Elastic', matTag, self.E)
elif self.material_type == 'ElasticPP':
# ops.uniaxialMaterial('ElasticPP', matTag, self.E, self.Fy/self.E)
ops.uniaxialMaterial('Steel01', matTag, self.Fy, self.E, 0.001)
else:
raise Exception('Input Error - unknown material type (%s)' % self.material_type)
ops.section('Fiber', secTag)
numSubdivY = ceil(self.tf*(self.num_fiber/self.d))
ops.patch('rect', matTag, numSubdivY, 1, self.dw()/2, -self.bf/2, self.d/2, self.bf/2)
numSubdivY = ceil(self.dw()*(self.num_fiber/self.d))
ops.patch('rect', matTag, numSubdivY, 1, -self.dw()/2, -self.tw/2, self.dw()/2, self.tw/2)
numSubdivY = ceil(self.tf*(self.num_fiber/self.d))
ops.patch('rect', matTag, numSubdivY, 1, -self.d/2, -self.bf/2, -self.dw()/2, self.bf/2)
return
nsteps = 100
data_volume = np.zeros((nsteps+1,2))
data_height = np.zeros((nsteps+1,2))
end_step = [nsteps, nsteps]
material_types = ['Elastic','ElasticPP']
# input parameters
for iAnalysis in range(2):
wf_section = wf(5.99,0.230,5.99,0.260,50.0) # W6x15
wf_section.material_type = material_types[iAnalysis]
# print(wf_section.A()) # 4.43 from the Steel Manual
# print(wf_section.Iz()) # 29.1 from the Steel Manual
L = 480.0
E = 29000.0
A = wf_section.A()
Iz = wf_section.Iz()
gamma = 62.4/1000/12**3
tw = 60.0
zi = 0.0
zj = 10.0
max_volume = 10*L*tw
nsteps_vol = 30
nele = 20
vol_tol = max_volume/nsteps/100
mid_node = int(nele/2)
# set modelbuilder
ops.model('basic', '-ndm', 2, '-ndf', 3)
# create nodes
for i in range(nele+1):
ops.node(i,L*i/(nele),zi+(zj-zi)*i/(nele))
# set boundary condition
ops.fix( 0, 1, 1, 0)
ops.fix(nele, 0, 1, 0)
# define coordinate transformation
ops.geomTransf('Linear',1)
# define cross section
wf_section.define_fiber_section(1,1)
ops.beamIntegration('Lobatto', 1, 1, 3)
# define elements
for i in range(0,nele):
# ops.element("elasticBeamColumn",i,i,i+1,A,E,Iz,1)
ops.element("forceBeamColumn",i,i,i+1,1,1)
# define ponding load cells
PondingLoadCells = dict()
for i in range(0,nele):
PondingLoadCells[i] = PondingLoadCell2d_OPS(id,i,i+1,gamma,tw)
# ------------------------------
# Start of analysis generation
# ------------------------------
# create SOE
ops.system("BandSPD")
# create DOF number
ops.numberer("RCM")
# create constraint handler
ops.constraints("Plain")
# create integrator
ops.integrator("LoadControl", 1.0)
# create algorithm
ops.algorithm("Linear")
# create analysis object
ops.analysis("Static")
# ------------------------------
# Finally perform the analysis
# ------------------------------
# Create dict of each node that can have ponding load applied and initilize load to zero
EmptyPondingLoad = dict()
for iCell in PondingLoadCells:
if not PondingLoadCells[iCell].nodeI in EmptyPondingLoad:
EmptyPondingLoad[PondingLoadCells[iCell].nodeI] = 0.0
if not PondingLoadCells[iCell].nodeJ in EmptyPondingLoad:
EmptyPondingLoad[PondingLoadCells[iCell].nodeJ] = 0.0
# Perform analysis, ramping up volume
zw = 0.1
CurrentPondingLoad = EmptyPondingLoad.copy()
for iStep in range(0,nsteps):
target_volume = (iStep+1)/nsteps*max_volume
# Update ponding load cells
for iCell in PondingLoadCells:
PondingLoadCells[iCell].update()
# Estimate water height
for i in range(nsteps_vol):
V = 0
dVdz = 0
for iCell in PondingLoadCells:
(iV,idVdz) = PondingLoadCells[iCell].get_volume(zw)
V += iV
dVdz += idVdz
zw = zw - (V-target_volume)/dVdz
if abs(target_volume-V) <= vol_tol:
break
# Compute load vector
UpdatedPondingLoad = EmptyPondingLoad.copy()
for iCell in PondingLoadCells:
f = PondingLoadCells[iCell].get_load_vector(zw)
UpdatedPondingLoad[PondingLoadCells[iCell].nodeI] += f.item(0)
UpdatedPondingLoad[PondingLoadCells[iCell].nodeJ] += f.item(1)
# Apply difference to model
ops.timeSeries("Linear", iStep)
ops.pattern("Plain", iStep, iStep)
for iNode in UpdatedPondingLoad:
fy = UpdatedPondingLoad[iNode] - CurrentPondingLoad[iNode]
ops.load(iNode, 0.0, fy, 0.0)
CurrentPondingLoad = UpdatedPondingLoad
# Run analysis
ops.analyze(1)
ops.loadConst('-time',0.0)
# Store Data
data_volume[iStep+1,iAnalysis] = target_volume
data_height[iStep+1,iAnalysis] = zw
# Stop analysis if water level too low
if zw <= -1:
end_step[iAnalysis] = iStep+1
break
# Wipe Analysis
ops.wipe()
#wi = gamma*zw*tw
#deltai = -5*wi*L**4/(384*E*Iz)
#C = gamma*tw*L**4/(pi**4*E*Iz)
#delta = deltai/(5*pi**4*C/192/(1/(cos(pi/2*C**0.25))+1/(cosh(pi/2*C**0.25))-2))
#
#uy = nodeDisp(mid_node,2)
#print('Ponding Amplification: %.3f' % (delta/deltai))
#print('Closed-form: %.5f' % delta)
#print('OpenSees: %.5f' % uy)
#print('Percent Diff %.2f%%' % (100*(uy-delta)/delta))
# Show plot
plt.plot(data_volume[:end_step[0]+1,0], data_height[:end_step[0]+1,0])
plt.plot(data_volume[:end_step[1]+1,1], data_height[:end_step[1]+1,1])
plt.xlabel('Water Volume')
plt.ylabel('Water Height')
plt.show()
| StarcoderdataPython |
1910646 | import json
import argparse
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_err(msg):
print("{}{}{}".format(bcolors.FAIL, msg, bcolors.ENDC))
def print_info(msg):
print("{}{}{}".format(bcolors.OKBLUE, msg, bcolors.ENDC))
def print_warn(msg):
print("{}{}{}".format(bcolors.WARNING, msg, bcolors.ENDC))
def print_ok(msg):
print("{}{}{}".format(bcolors.OKGREEN, msg, bcolors.ENDC))
def load_json(file_dir):
ret = None
with open(file_dir, "r") as fin:
ret = json.load(fin)
return ret
def arg_parser():
"""Argument Parser
Parse arguments from command line, and perform error checking
Returns:
An argument object which contains arguments from cmd line
"""
parser = argparse.ArgumentParser(prog='Scene Builder')
parser.add_argument(
"-c, --src",
dest="src",
type=str,
required=True,
help="Input source"
)
parser.add_argument(
"-o, --out",
dest="out",
type=str,
default="./output",
help="Output directory"
)
parser.add_argument(
"--name",
dest="name",
type=str,
default=None,
help="Scene name"
)
parser.add_argument(
"--vrgym",
dest="vrgym",
action="store_true",
help="Enable VRGym"
)
parser.add_argument(
"--physics",
dest="physics",
action="store_true",
help="Enable Physical Properties"
)
parser.add_argument(
"--gazebo",
dest="gazebo",
action="store_true",
help="Enable Gazebo Output"
)
args = parser.parse_args()
# if gazebo output is enabled, the physical properties
# must be enabled as well
if args.gazebo:
args.physics = True
return args | StarcoderdataPython |
5137038 | <reponame>rsmekala/junosautomation
#!/usr/bin/env python
#
# Copyright 2017 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied.
#
# Author.........: <NAME> <<EMAIL>>
# Created on.....: 15/Dec/2017
# Version........: 1.0
# Platform.......: agnostic
# Description....: Simple example of utilising PyEZ tables (part of bb3 example)
#
from jnpr.junos.factory import loadyaml
from os.path import splitext
_YAML_ = splitext(__file__)[0] + '.yml'
globals().update(loadyaml(_YAML_))
| StarcoderdataPython |
4898313 | import numpy as np
import av
duration = 4
fps = 24
total_frames = duration * fps
container = av.open("test.mp4", mode="w")
stream = container.add_stream("mpeg4", rate=fps)
stream.width = 480
stream.height = 320
stream.pix_fmt = "yuv420p"
for frame_i in range(total_frames):
img = np.empty((480, 320, 3))
img[:, :, 0] = 0.5 + 0.5 * np.sin(2 * np.pi * (0 / 3 + frame_i / total_frames))
img[:, :, 1] = 0.5 + 0.5 * np.sin(2 * np.pi * (1 / 3 + frame_i / total_frames))
img[:, :, 2] = 0.5 + 0.5 * np.sin(2 * np.pi * (2 / 3 + frame_i / total_frames))
img = np.round(255 * img).astype(np.uint8)
img = np.clip(img, 0, 255)
frame = av.VideoFrame.from_ndarray(img, format="rgb24")
for packet in stream.encode(frame):
container.mux(packet)
# Flush stream
for packet in stream.encode():
container.mux(packet)
# Close the file
container.close()
| StarcoderdataPython |
389864 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup
import os
packages = ['vspk', 'vspk.cli']
resources = []
api_version_path = "./vspk"
for version_folder in os.listdir(api_version_path):
if os.path.isfile("%s/%s" % (api_version_path, version_folder)):
continue
if version_folder == "cli":
continue
packages.append("vspk.%s" % version_folder)
packages.append("vspk.%s.fetchers" % version_folder)
if os.path.exists('vspk/%s/resources' % version_folder):
resources.append(('vspk/%s/resources' % version_folder, ['vspk/%s/resources/attrs_defaults.ini' % version_folder]))
setup(
name='vspk',
version="20.10.1",
url='http://nuagenetworks.net/',
author='nuage networks',
author_email='<EMAIL>',
packages=packages,
description='SDK for the VSD API',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='BSD-3',
include_package_data=True,
install_requires=[line for line in open('requirements.txt')],
data_files=resources,
entry_points={
'console_scripts': [
'vsd = vspk.cli.cli:main']
}
) | StarcoderdataPython |
3394387 | import scrapy, re
from alleco.objects.official import Official
class mccandless_t(scrapy.Spider):
name = "mccandless_t"
muniName = "MCCANDLESS"
muniType = "TOWNSHIP"
complete = True
def start_requests(self):
urls = ['https://www.townofmccandless.org/town-council']
for url in urls:
yield scrapy.Request(url=url,
callback=self.parse,
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'
})
def parse(self, response):
for quote in response.xpath('//section[@class="field field-name-field-description field-type-text-with-summary field-label-hidden"]/table'):
yield Official(
muniName=self.muniName,
muniType=self.muniType,
office="MEMBER OF COUNCIL",
district=quote.xpath('thead/tr/th[2]/text()').get().split(",")[-1].strip().upper(),
name=quote.xpath('thead/tr/th[1]/text()').get(),
phone=quote.xpath('tr/td[2]/p[5]/text()').get(),
termEnd=self._termEnd(quote.xpath('tr/td[2]/p[7]/text()').get()),
email=quote.xpath('tr//a/@href').get(),
address=", ".join([i.strip() for i in quote.xpath('tr/td[2]/p[1]/text()').getall()]),
url=response.url)
def _termEnd(self, string):
parts = string.replace("\xa0",' ').strip().split(" ")[-3:]
if parts[0]=="1st" and parts[1]=="Monday":
if parts[2] == "2022": return "January 3 2022"
elif parts[2] == "2024": return "January 1 2024"
else: return None
else: return None | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.