max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
listener/normal/receiving/urls.py | andymckay/arecibo | 6 | 6623251 | from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^v/1/$', 'receiving.http.post', name="error-post"),
)
| from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^v/1/$', 'receiving.http.post', name="error-post"),
)
| none | 1 | 1.336103 | 1 | |
bot/migrations/0002_auto_20191118_0825.py | azimjohn/covid-19-self-diagnosis-test-bot | 8 | 6623252 | <gh_stars>1-10
# Generated by Django 2.2 on 2019-11-18 08:25
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('bot', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='respondent',
name='details',
),
migrations.RemoveField(
model_name='respondent',
name='step',
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('step', models.SmallIntegerField(default=0)),
('details', jsonfield.fields.JSONField(default=dict, max_length=8192)),
('completed', models.BooleanField(default=False)),
('respondent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Respondent')),
],
),
]
| # Generated by Django 2.2 on 2019-11-18 08:25
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('bot', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='respondent',
name='details',
),
migrations.RemoveField(
model_name='respondent',
name='step',
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('step', models.SmallIntegerField(default=0)),
('details', jsonfield.fields.JSONField(default=dict, max_length=8192)),
('completed', models.BooleanField(default=False)),
('respondent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Respondent')),
],
),
] | en | 0.763234 | # Generated by Django 2.2 on 2019-11-18 08:25 | 1.659079 | 2 |
nuplan/database/maps_db/gpkg_mapsdb.py | motional/nuplan-devkit | 128 | 6623253 | import fcntl
import glob
import json
import logging
import os
import time
import warnings
from functools import lru_cache
from typing import List, Sequence
import fiona
import geopandas as gpd
import numpy as np
import numpy.typing as npt
import rasterio
from nuplan.database.common.blob_store.creator import BlobStoreCreator
from nuplan.database.common.blob_store.local_store import LocalStore
from nuplan.database.maps_db import layer_dataset_ops
from nuplan.database.maps_db.imapsdb import IMapsDB
from nuplan.database.maps_db.layer import MapLayer
from nuplan.database.maps_db.metadata import MapLayerMeta
logger = logging.getLogger(__name__)
# To silence NotGeoreferencedWarning
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)
# Available map locations
MAP_LOCATIONS = {'sg-one-north', 'us-ma-boston', 'us-nv-las-vegas-strip', 'us-pa-pittsburgh-hazelwood'}
# Dimensions of raster layers for each location
MAP_DIMENSIONS = {
'sg-one-north': (21070, 28060),
'us-ma-boston': (20380, 28730),
'us-nv-las-vegas-strip': (69820, 30120),
'us-pa-pittsburgh-hazelwood': (22760, 23090),
}
# S3 download params.
MAX_ATTEMPTS = 360
SECONDS_BETWEEN_ATTEMPTS = 5
# Dummy layer to use for downloading the map package for the first time
DUMMY_LOAD_LAYER = 'lane_connectors'
class GPKGMapsDBException(Exception):
"""GPKGMapsDB Exception Class."""
def __init__(self, message: str) -> None:
"""
Constructor.
:param message: Exception message.
"""
super().__init__(message)
class GPKGMapsDB(IMapsDB):
"""GPKG MapsDB implementation."""
def __init__(self, map_version: str, map_root: str) -> None:
"""
Constructor.
:param map_version: Version of map.
:param map_root: Root folder of the maps.
"""
self._map_version = map_version
self._map_root = map_root
self._blob_store = BlobStoreCreator.create_mapsdb(map_root=self._map_root)
version_file = self._blob_store.get(f"{self._map_version}.json") # get blob and save to disk
self._metadata = json.load(version_file)
# The dimensions of the maps are hard-coded for the 4 locations.
self._map_dimensions = MAP_DIMENSIONS
# S3 file download parameters.
self._max_attempts = MAX_ATTEMPTS
self._seconds_between_attempts = SECONDS_BETWEEN_ATTEMPTS
self._map_lock_dir = os.path.join(self._map_root, '.maplocks')
os.makedirs(self._map_lock_dir, exist_ok=True)
# Load map data to trigger automatic downloading.
self._load_map_data()
def _load_map_data(self) -> None:
"""Load all available maps once to trigger automatic downloading if the maps are loaded for the first time."""
# TODO: Spawn multiple threads for parallel downloading
for location in MAP_LOCATIONS:
self.load_vector_layer(location, DUMMY_LOAD_LAYER)
@property
def version_names(self) -> List[str]:
"""
Lists the map version names for all valid map locations, e.g.
['9.17.1964', '9.12.1817', '9.15.1915', '9.17.1937']
"""
return [self._metadata[location]["version"] for location in self.get_locations()]
def get_map_version(self) -> str:
"""Inherited, see superclass."""
return self._map_version
def get_version(self, location: str) -> str:
"""Inherited, see superclass."""
return str(self._metadata[location]["version"])
def _get_shape(self, location: str, layer_name: str) -> List[int]:
"""
Gets the shape of a layer given the map location and layer name.
:param location: Name of map location, e.g. "sg-one-north". See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
if layer_name == 'intensity':
return self._metadata[location]["layers"]["Intensity"]["shape"] # type: ignore
else:
# The dimensions of other map layers are using the hard-coded values.
return list(self._map_dimensions[location])
def _get_transform_matrix(self, location: str, layer_name: str) -> npt.NDArray[np.float64]:
"""
Get transformation matrix of a layer given location and layer name.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
return np.array(self._metadata[location]["layers"][layer_name]["transform_matrix"])
@staticmethod
def is_binary(layer_name: str) -> bool:
"""
Checks if the layer is binary.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
return layer_name in ["drivable_area", "intersection", "pedestrian_crossing", "walkway", "walk_way"]
@staticmethod
def _can_dilate(layer_name: str) -> bool:
"""
If the layer can be dilated.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
return layer_name in ["drivable_area"]
def get_locations(self) -> Sequence[str]:
"""
Gets the list of available location in this GPKGMapsDB version.
"""
return self._metadata.keys() # type: ignore
def layer_names(self, location: str) -> Sequence[str]:
"""Inherited, see superclass."""
gpkg_layers = self._metadata[location]["layers"].keys()
return list(filter(lambda x: '_distance_px' not in x, gpkg_layers))
def load_layer(self, location: str, layer_name: str) -> MapLayer:
"""Inherited, see superclass."""
if layer_name == "intensity":
layer_name = "Intensity"
is_bin = self.is_binary(layer_name)
can_dilate = self._can_dilate(layer_name)
layer_data = self._get_layer_matrix(location, layer_name)
transform_matrix = self._get_transform_matrix(location, layer_name)
# We assume that the map's pixel-per-meter ratio is the same in the x and y directions,
# since the MapLayer class requires that.
precision = 1 / transform_matrix[0, 0]
layer_meta = MapLayerMeta(
name=layer_name,
md5_hash="not_used_for_gpkg_mapsdb",
can_dilate=can_dilate,
is_binary=is_bin,
precision=precision,
)
distance_matrix = None
return MapLayer(
data=layer_data, metadata=layer_meta, joint_distance=distance_matrix, transform_matrix=transform_matrix
)
def _wait_for_expected_filesize(self, path_on_disk: str, location: str) -> None:
"""
Waits until the file at `path_on_disk` is exactly `expected_size` bytes.
:param path_on_disk: Path of the file being downloaded.
:param location: Location to which the file belongs.
"""
if isinstance(self._blob_store, LocalStore):
return
s3_bucket = self._blob_store._remote._bucket
s3_key = os.path.join(self._blob_store._remote._prefix, self._get_gpkg_file_path(location))
map_file_size = self._blob_store._remote._client.head_object(Bucket=s3_bucket, Key=s3_key).get(
'ContentLength', 0
)
# Wait if file not downloaded.
for _ in range(self._max_attempts):
if os.path.getsize(path_on_disk) == map_file_size:
break
time.sleep(self._seconds_between_attempts)
if os.path.getsize(path_on_disk) != map_file_size:
raise GPKGMapsDBException(
f"Waited {self._max_attempts * self._seconds_between_attempts} seconds for "
f"file {path_on_disk} to reach {map_file_size}, "
f"but size is now {os.path.getsize(path_on_disk)}"
)
def _safe_save_layer(self, layer_lock_file: str, file_path: str) -> None:
"""
Safely download the file.
:param layer_lock_file: Path to lock file.
:param file_path: Path of the file being downloaded.
"""
fd = open(layer_lock_file, 'w')
try:
fcntl.flock(fd, fcntl.LOCK_EX)
_ = self._blob_store.save_to_disk(file_path, check_for_compressed=True)
finally:
fcntl.flock(fd, fcntl.LOCK_UN)
fd.close()
@lru_cache(maxsize=None)
def load_vector_layer(self, location: str, layer_name: str) -> gpd.geodataframe:
"""Inherited, see superclass."""
# TODO: Remove temporary workaround once map_version is cleaned
location = location.replace('.gpkg', '')
rel_path = self._get_gpkg_file_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
if not os.path.exists(path_on_disk):
layer_lock_file = f'{self._map_lock_dir}/{location}_{layer_name}.lock'
self._safe_save_layer(layer_lock_file, rel_path)
self._wait_for_expected_filesize(path_on_disk, location)
with warnings.catch_warnings():
# Suppress the warnings from the GPKG operations below so that they don't spam the training logs.
warnings.filterwarnings("ignore")
# The projected coordinate system depends on which UTM zone the mapped location is in.
map_meta = gpd.read_file(path_on_disk, layer="meta")
projection_system = map_meta[map_meta["key"] == "projectedCoordSystem"]["value"].iloc[0]
gdf_in_pixel_coords = gpd.read_file(path_on_disk, layer=layer_name)
gdf_in_utm_coords = gdf_in_pixel_coords.to_crs(projection_system)
# Restore "fid" column, which isn't included by default due to a quirk.
# See http://kuanbutts.com/2019/07/02/gpkg-write-from-geopandas/
with fiona.open(path_on_disk, layer=layer_name) as fiona_collection:
gdf_in_utm_coords["fid"] = [f["id"] for f in fiona_collection]
return gdf_in_utm_coords
def vector_layer_names(self, location: str) -> Sequence[str]:
"""Inherited, see superclass."""
# TODO: Remove temporary workaround once map_version is cleaned
location = location.replace('.gpkg', '')
rel_path = self._get_gpkg_file_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
self._blob_store.save_to_disk(rel_path)
return fiona.listlayers(path_on_disk) # type: ignore
def purge_cache(self) -> None:
"""Inherited, see superclass."""
logger.debug("Purging cache...")
for f in glob.glob(os.path.join(self._map_root, "gpkg", "*")):
os.remove(f)
logger.debug("Done purging cache.")
def _get_map_dataset(self, location: str) -> rasterio.DatasetReader:
"""
Returns a *context manager* for the map dataset (includes all the layers).
Extract the result in a "with ... as ...:" line.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:return: A *context manager* for the map dataset (includes all the layers).
"""
rel_path = self._get_gpkg_file_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
# Save the gpkg file to disk.
self._blob_store.save_to_disk(rel_path)
return rasterio.open(path_on_disk)
def get_layer_dataset(self, location: str, layer_name: str) -> rasterio.DatasetReader:
"""
Returns a *context manager* for the layer dataset.
Extract the result in a "with ... as ...:" line.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
:return: A *context manager* for the layer dataset.
"""
with self._get_map_dataset(location) as map_dataset:
layer_dataset_path = next(
(path for path in map_dataset.subdatasets if path.endswith(":" + layer_name)), None
)
if layer_dataset_path is None:
raise ValueError(
f"Layer '{layer_name}' not found in map '{location}', " f"version '{self.get_version(location)}'"
)
return rasterio.open(layer_dataset_path)
def get_raster_layer_names(self, location: str) -> Sequence[str]:
"""
Gets the list of available layers for a given map location.
:param location: The layers name for this map location will be returned.
:return: List of available raster layers.
"""
all_layers_dataset = self._get_map_dataset(location)
fully_qualified_layer_names = all_layers_dataset.subdatasets
# fully_qualified_layer_names is a list of strings like:
# ["GPKG:/file/path/map.gpkg:drivable_area", "GPKG:/file/path/map.gpkg:intersection", ...]
# The layer name is everything after the last colon.
return [name.split(":")[-1] for name in fully_qualified_layer_names]
def get_gpkg_path_and_store_on_disk(self, location: str) -> str:
"""
Saves a gpkg map from a location to disk.
:param location: The layers name for this map location will be returned.
:return: Path on disk to save a gpkg file.
"""
rel_path = self._get_gpkg_file_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
self._blob_store.save_to_disk(rel_path)
return path_on_disk
def get_metadata_json_path_and_store_on_disk(self, location: str) -> str:
"""
Saves a metadata.json for a location to disk.
:param location: The layers name for this map location will be returned.
:return: Path on disk to save metadata.json.
"""
rel_path = self._get_metadata_json_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
self._blob_store.save_to_disk(rel_path)
return path_on_disk
def _get_gpkg_file_path(self, location: str) -> str:
"""
Gets path to the gpkg map file.
:param location: Location for which gpkg needs to be loaded.
:return: Path to the gpkg file.
"""
version = self.get_version(location)
return f"{location}/{version}/map.gpkg"
def _get_metadata_json_path(self, location: str) -> str:
"""
Gets path to the metadata json file.
:param location: Location for which json needs to be loaded.
:return: Path to the meta json file.
"""
version = self.get_version(location)
return f"{location}/{version}/metadata.json"
def _get_layer_matrix_npy_path(self, location: str, layer_name: str) -> str:
"""
Gets path to the numpy file for the layer.
:param location: Location for which layer needs to be loaded.
:param layer_name: Which layer to load.
:return: Path to the numpy file.
"""
version = self.get_version(location)
return f"{location}/{version}/{layer_name}.npy.npz"
@staticmethod
def _get_np_array(path_on_disk: str) -> np.ndarray: # type: ignore
"""
Gets numpy array from file.
:param path_on_disk: Path to numpy file.
:return: Numpy array containing the layer.
"""
np_data = np.load(path_on_disk)
return np_data['data'] # type: ignore
def _get_expected_file_size(self, path: str, shape: List[int]) -> int:
"""
Gets the expected file size.
:param path: Path to the file.
:param shape: The shape of the map file.
:return: The expected file size.
"""
if path.endswith('_dist.npy'):
return shape[0] * shape[1] * 4 # float32 values take 4 bytes per pixel.
return shape[0] * shape[1]
def _get_layer_matrix(self, location: str, layer_name: str) -> npt.NDArray[np.uint8]:
"""
Returns the map layer for `location` and `layer_name` as a numpy array.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
:return: Numpy representation of layer.
"""
rel_path = self._get_layer_matrix_npy_path(location, layer_name)
path_on_disk = os.path.join(self._map_root, rel_path)
if not os.path.exists(path_on_disk):
self._save_layer_matrix(location=location, layer_name=layer_name)
return self._get_np_array(path_on_disk)
def _save_layer_matrix(self, location: str, layer_name: str) -> None:
"""
Extracts the data for `layer_name` from the GPKG file for `location`,
and saves it on disk so it can be retrieved with `_get_layer_matrix`.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
is_bin = self.is_binary(layer_name)
with self.get_layer_dataset(location, layer_name) as layer_dataset:
layer_data = layer_dataset_ops.load_layer_as_numpy(layer_dataset, is_bin)
# Convert distance_px to dist matrix.
if '_distance_px' in layer_name:
transform_matrix = self._get_transform_matrix(location, layer_name)
precision = 1 / transform_matrix[0, 0]
layer_data = np.negative(layer_data / precision).astype('float32')
npy_file_path = os.path.join(self._map_root, f"{location}/{self.get_version(location)}/{layer_name}.npy")
np.savez_compressed(npy_file_path, data=layer_data)
def _save_all_layers(self, location: str) -> None:
"""
Saves data on disk for all layers in the GPKG file for `location`.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
"""
rasterio_layers = self.get_raster_layer_names(location)
for layer_name in rasterio_layers:
logger.debug("Working on layer: ", layer_name)
self._save_layer_matrix(location, layer_name)
| import fcntl
import glob
import json
import logging
import os
import time
import warnings
from functools import lru_cache
from typing import List, Sequence
import fiona
import geopandas as gpd
import numpy as np
import numpy.typing as npt
import rasterio
from nuplan.database.common.blob_store.creator import BlobStoreCreator
from nuplan.database.common.blob_store.local_store import LocalStore
from nuplan.database.maps_db import layer_dataset_ops
from nuplan.database.maps_db.imapsdb import IMapsDB
from nuplan.database.maps_db.layer import MapLayer
from nuplan.database.maps_db.metadata import MapLayerMeta
logger = logging.getLogger(__name__)
# To silence NotGeoreferencedWarning
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)
# Available map locations
MAP_LOCATIONS = {'sg-one-north', 'us-ma-boston', 'us-nv-las-vegas-strip', 'us-pa-pittsburgh-hazelwood'}
# Dimensions of raster layers for each location
MAP_DIMENSIONS = {
'sg-one-north': (21070, 28060),
'us-ma-boston': (20380, 28730),
'us-nv-las-vegas-strip': (69820, 30120),
'us-pa-pittsburgh-hazelwood': (22760, 23090),
}
# S3 download params.
MAX_ATTEMPTS = 360
SECONDS_BETWEEN_ATTEMPTS = 5
# Dummy layer to use for downloading the map package for the first time
DUMMY_LOAD_LAYER = 'lane_connectors'
class GPKGMapsDBException(Exception):
"""GPKGMapsDB Exception Class."""
def __init__(self, message: str) -> None:
"""
Constructor.
:param message: Exception message.
"""
super().__init__(message)
class GPKGMapsDB(IMapsDB):
"""GPKG MapsDB implementation."""
def __init__(self, map_version: str, map_root: str) -> None:
"""
Constructor.
:param map_version: Version of map.
:param map_root: Root folder of the maps.
"""
self._map_version = map_version
self._map_root = map_root
self._blob_store = BlobStoreCreator.create_mapsdb(map_root=self._map_root)
version_file = self._blob_store.get(f"{self._map_version}.json") # get blob and save to disk
self._metadata = json.load(version_file)
# The dimensions of the maps are hard-coded for the 4 locations.
self._map_dimensions = MAP_DIMENSIONS
# S3 file download parameters.
self._max_attempts = MAX_ATTEMPTS
self._seconds_between_attempts = SECONDS_BETWEEN_ATTEMPTS
self._map_lock_dir = os.path.join(self._map_root, '.maplocks')
os.makedirs(self._map_lock_dir, exist_ok=True)
# Load map data to trigger automatic downloading.
self._load_map_data()
def _load_map_data(self) -> None:
"""Load all available maps once to trigger automatic downloading if the maps are loaded for the first time."""
# TODO: Spawn multiple threads for parallel downloading
for location in MAP_LOCATIONS:
self.load_vector_layer(location, DUMMY_LOAD_LAYER)
@property
def version_names(self) -> List[str]:
"""
Lists the map version names for all valid map locations, e.g.
['9.17.1964', '9.12.1817', '9.15.1915', '9.17.1937']
"""
return [self._metadata[location]["version"] for location in self.get_locations()]
def get_map_version(self) -> str:
"""Inherited, see superclass."""
return self._map_version
def get_version(self, location: str) -> str:
"""Inherited, see superclass."""
return str(self._metadata[location]["version"])
def _get_shape(self, location: str, layer_name: str) -> List[int]:
"""
Gets the shape of a layer given the map location and layer name.
:param location: Name of map location, e.g. "sg-one-north". See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
if layer_name == 'intensity':
return self._metadata[location]["layers"]["Intensity"]["shape"] # type: ignore
else:
# The dimensions of other map layers are using the hard-coded values.
return list(self._map_dimensions[location])
def _get_transform_matrix(self, location: str, layer_name: str) -> npt.NDArray[np.float64]:
"""
Get transformation matrix of a layer given location and layer name.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
return np.array(self._metadata[location]["layers"][layer_name]["transform_matrix"])
@staticmethod
def is_binary(layer_name: str) -> bool:
"""
Checks if the layer is binary.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
return layer_name in ["drivable_area", "intersection", "pedestrian_crossing", "walkway", "walk_way"]
@staticmethod
def _can_dilate(layer_name: str) -> bool:
"""
If the layer can be dilated.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
return layer_name in ["drivable_area"]
def get_locations(self) -> Sequence[str]:
"""
Gets the list of available location in this GPKGMapsDB version.
"""
return self._metadata.keys() # type: ignore
def layer_names(self, location: str) -> Sequence[str]:
"""Inherited, see superclass."""
gpkg_layers = self._metadata[location]["layers"].keys()
return list(filter(lambda x: '_distance_px' not in x, gpkg_layers))
def load_layer(self, location: str, layer_name: str) -> MapLayer:
"""Inherited, see superclass."""
if layer_name == "intensity":
layer_name = "Intensity"
is_bin = self.is_binary(layer_name)
can_dilate = self._can_dilate(layer_name)
layer_data = self._get_layer_matrix(location, layer_name)
transform_matrix = self._get_transform_matrix(location, layer_name)
# We assume that the map's pixel-per-meter ratio is the same in the x and y directions,
# since the MapLayer class requires that.
precision = 1 / transform_matrix[0, 0]
layer_meta = MapLayerMeta(
name=layer_name,
md5_hash="not_used_for_gpkg_mapsdb",
can_dilate=can_dilate,
is_binary=is_bin,
precision=precision,
)
distance_matrix = None
return MapLayer(
data=layer_data, metadata=layer_meta, joint_distance=distance_matrix, transform_matrix=transform_matrix
)
def _wait_for_expected_filesize(self, path_on_disk: str, location: str) -> None:
"""
Waits until the file at `path_on_disk` is exactly `expected_size` bytes.
:param path_on_disk: Path of the file being downloaded.
:param location: Location to which the file belongs.
"""
if isinstance(self._blob_store, LocalStore):
return
s3_bucket = self._blob_store._remote._bucket
s3_key = os.path.join(self._blob_store._remote._prefix, self._get_gpkg_file_path(location))
map_file_size = self._blob_store._remote._client.head_object(Bucket=s3_bucket, Key=s3_key).get(
'ContentLength', 0
)
# Wait if file not downloaded.
for _ in range(self._max_attempts):
if os.path.getsize(path_on_disk) == map_file_size:
break
time.sleep(self._seconds_between_attempts)
if os.path.getsize(path_on_disk) != map_file_size:
raise GPKGMapsDBException(
f"Waited {self._max_attempts * self._seconds_between_attempts} seconds for "
f"file {path_on_disk} to reach {map_file_size}, "
f"but size is now {os.path.getsize(path_on_disk)}"
)
def _safe_save_layer(self, layer_lock_file: str, file_path: str) -> None:
"""
Safely download the file.
:param layer_lock_file: Path to lock file.
:param file_path: Path of the file being downloaded.
"""
fd = open(layer_lock_file, 'w')
try:
fcntl.flock(fd, fcntl.LOCK_EX)
_ = self._blob_store.save_to_disk(file_path, check_for_compressed=True)
finally:
fcntl.flock(fd, fcntl.LOCK_UN)
fd.close()
@lru_cache(maxsize=None)
def load_vector_layer(self, location: str, layer_name: str) -> gpd.geodataframe:
"""Inherited, see superclass."""
# TODO: Remove temporary workaround once map_version is cleaned
location = location.replace('.gpkg', '')
rel_path = self._get_gpkg_file_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
if not os.path.exists(path_on_disk):
layer_lock_file = f'{self._map_lock_dir}/{location}_{layer_name}.lock'
self._safe_save_layer(layer_lock_file, rel_path)
self._wait_for_expected_filesize(path_on_disk, location)
with warnings.catch_warnings():
# Suppress the warnings from the GPKG operations below so that they don't spam the training logs.
warnings.filterwarnings("ignore")
# The projected coordinate system depends on which UTM zone the mapped location is in.
map_meta = gpd.read_file(path_on_disk, layer="meta")
projection_system = map_meta[map_meta["key"] == "projectedCoordSystem"]["value"].iloc[0]
gdf_in_pixel_coords = gpd.read_file(path_on_disk, layer=layer_name)
gdf_in_utm_coords = gdf_in_pixel_coords.to_crs(projection_system)
# Restore "fid" column, which isn't included by default due to a quirk.
# See http://kuanbutts.com/2019/07/02/gpkg-write-from-geopandas/
with fiona.open(path_on_disk, layer=layer_name) as fiona_collection:
gdf_in_utm_coords["fid"] = [f["id"] for f in fiona_collection]
return gdf_in_utm_coords
def vector_layer_names(self, location: str) -> Sequence[str]:
"""Inherited, see superclass."""
# TODO: Remove temporary workaround once map_version is cleaned
location = location.replace('.gpkg', '')
rel_path = self._get_gpkg_file_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
self._blob_store.save_to_disk(rel_path)
return fiona.listlayers(path_on_disk) # type: ignore
def purge_cache(self) -> None:
"""Inherited, see superclass."""
logger.debug("Purging cache...")
for f in glob.glob(os.path.join(self._map_root, "gpkg", "*")):
os.remove(f)
logger.debug("Done purging cache.")
def _get_map_dataset(self, location: str) -> rasterio.DatasetReader:
"""
Returns a *context manager* for the map dataset (includes all the layers).
Extract the result in a "with ... as ...:" line.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:return: A *context manager* for the map dataset (includes all the layers).
"""
rel_path = self._get_gpkg_file_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
# Save the gpkg file to disk.
self._blob_store.save_to_disk(rel_path)
return rasterio.open(path_on_disk)
def get_layer_dataset(self, location: str, layer_name: str) -> rasterio.DatasetReader:
"""
Returns a *context manager* for the layer dataset.
Extract the result in a "with ... as ...:" line.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
:return: A *context manager* for the layer dataset.
"""
with self._get_map_dataset(location) as map_dataset:
layer_dataset_path = next(
(path for path in map_dataset.subdatasets if path.endswith(":" + layer_name)), None
)
if layer_dataset_path is None:
raise ValueError(
f"Layer '{layer_name}' not found in map '{location}', " f"version '{self.get_version(location)}'"
)
return rasterio.open(layer_dataset_path)
def get_raster_layer_names(self, location: str) -> Sequence[str]:
"""
Gets the list of available layers for a given map location.
:param location: The layers name for this map location will be returned.
:return: List of available raster layers.
"""
all_layers_dataset = self._get_map_dataset(location)
fully_qualified_layer_names = all_layers_dataset.subdatasets
# fully_qualified_layer_names is a list of strings like:
# ["GPKG:/file/path/map.gpkg:drivable_area", "GPKG:/file/path/map.gpkg:intersection", ...]
# The layer name is everything after the last colon.
return [name.split(":")[-1] for name in fully_qualified_layer_names]
def get_gpkg_path_and_store_on_disk(self, location: str) -> str:
"""
Saves a gpkg map from a location to disk.
:param location: The layers name for this map location will be returned.
:return: Path on disk to save a gpkg file.
"""
rel_path = self._get_gpkg_file_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
self._blob_store.save_to_disk(rel_path)
return path_on_disk
def get_metadata_json_path_and_store_on_disk(self, location: str) -> str:
"""
Saves a metadata.json for a location to disk.
:param location: The layers name for this map location will be returned.
:return: Path on disk to save metadata.json.
"""
rel_path = self._get_metadata_json_path(location)
path_on_disk = os.path.join(self._map_root, rel_path)
self._blob_store.save_to_disk(rel_path)
return path_on_disk
def _get_gpkg_file_path(self, location: str) -> str:
"""
Gets path to the gpkg map file.
:param location: Location for which gpkg needs to be loaded.
:return: Path to the gpkg file.
"""
version = self.get_version(location)
return f"{location}/{version}/map.gpkg"
def _get_metadata_json_path(self, location: str) -> str:
"""
Gets path to the metadata json file.
:param location: Location for which json needs to be loaded.
:return: Path to the meta json file.
"""
version = self.get_version(location)
return f"{location}/{version}/metadata.json"
def _get_layer_matrix_npy_path(self, location: str, layer_name: str) -> str:
"""
Gets path to the numpy file for the layer.
:param location: Location for which layer needs to be loaded.
:param layer_name: Which layer to load.
:return: Path to the numpy file.
"""
version = self.get_version(location)
return f"{location}/{version}/{layer_name}.npy.npz"
@staticmethod
def _get_np_array(path_on_disk: str) -> np.ndarray: # type: ignore
"""
Gets numpy array from file.
:param path_on_disk: Path to numpy file.
:return: Numpy array containing the layer.
"""
np_data = np.load(path_on_disk)
return np_data['data'] # type: ignore
def _get_expected_file_size(self, path: str, shape: List[int]) -> int:
"""
Gets the expected file size.
:param path: Path to the file.
:param shape: The shape of the map file.
:return: The expected file size.
"""
if path.endswith('_dist.npy'):
return shape[0] * shape[1] * 4 # float32 values take 4 bytes per pixel.
return shape[0] * shape[1]
def _get_layer_matrix(self, location: str, layer_name: str) -> npt.NDArray[np.uint8]:
"""
Returns the map layer for `location` and `layer_name` as a numpy array.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
:return: Numpy representation of layer.
"""
rel_path = self._get_layer_matrix_npy_path(location, layer_name)
path_on_disk = os.path.join(self._map_root, rel_path)
if not os.path.exists(path_on_disk):
self._save_layer_matrix(location=location, layer_name=layer_name)
return self._get_np_array(path_on_disk)
def _save_layer_matrix(self, location: str, layer_name: str) -> None:
"""
Extracts the data for `layer_name` from the GPKG file for `location`,
and saves it on disk so it can be retrieved with `_get_layer_matrix`.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
:param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list.
"""
is_bin = self.is_binary(layer_name)
with self.get_layer_dataset(location, layer_name) as layer_dataset:
layer_data = layer_dataset_ops.load_layer_as_numpy(layer_dataset, is_bin)
# Convert distance_px to dist matrix.
if '_distance_px' in layer_name:
transform_matrix = self._get_transform_matrix(location, layer_name)
precision = 1 / transform_matrix[0, 0]
layer_data = np.negative(layer_data / precision).astype('float32')
npy_file_path = os.path.join(self._map_root, f"{location}/{self.get_version(location)}/{layer_name}.npy")
np.savez_compressed(npy_file_path, data=layer_data)
def _save_all_layers(self, location: str) -> None:
"""
Saves data on disk for all layers in the GPKG file for `location`.
:param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`.
"""
rasterio_layers = self.get_raster_layer_names(location)
for layer_name in rasterio_layers:
logger.debug("Working on layer: ", layer_name)
self._save_layer_matrix(location, layer_name)
| en | 0.757979 | # To silence NotGeoreferencedWarning # Available map locations # Dimensions of raster layers for each location # S3 download params. # Dummy layer to use for downloading the map package for the first time GPKGMapsDB Exception Class. Constructor. :param message: Exception message. GPKG MapsDB implementation. Constructor. :param map_version: Version of map. :param map_root: Root folder of the maps. # get blob and save to disk # The dimensions of the maps are hard-coded for the 4 locations. # S3 file download parameters. # Load map data to trigger automatic downloading. Load all available maps once to trigger automatic downloading if the maps are loaded for the first time. # TODO: Spawn multiple threads for parallel downloading Lists the map version names for all valid map locations, e.g. ['9.17.1964', '9.12.1817', '9.15.1915', '9.17.1937'] Inherited, see superclass. Inherited, see superclass. Gets the shape of a layer given the map location and layer name. :param location: Name of map location, e.g. "sg-one-north". See `self.get_locations()`. :param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list. # type: ignore # The dimensions of other map layers are using the hard-coded values. Get transformation matrix of a layer given location and layer name. :param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`. :param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list. Checks if the layer is binary. :param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list. If the layer can be dilated. :param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list. Gets the list of available location in this GPKGMapsDB version. # type: ignore Inherited, see superclass. Inherited, see superclass. # We assume that the map's pixel-per-meter ratio is the same in the x and y directions, # since the MapLayer class requires that. Waits until the file at `path_on_disk` is exactly `expected_size` bytes. :param path_on_disk: Path of the file being downloaded. :param location: Location to which the file belongs. # Wait if file not downloaded. Safely download the file. :param layer_lock_file: Path to lock file. :param file_path: Path of the file being downloaded. Inherited, see superclass. # TODO: Remove temporary workaround once map_version is cleaned # Suppress the warnings from the GPKG operations below so that they don't spam the training logs. # The projected coordinate system depends on which UTM zone the mapped location is in. # Restore "fid" column, which isn't included by default due to a quirk. # See http://kuanbutts.com/2019/07/02/gpkg-write-from-geopandas/ Inherited, see superclass. # TODO: Remove temporary workaround once map_version is cleaned # type: ignore Inherited, see superclass. Returns a *context manager* for the map dataset (includes all the layers). Extract the result in a "with ... as ...:" line. :param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`. :return: A *context manager* for the map dataset (includes all the layers). # Save the gpkg file to disk. Returns a *context manager* for the layer dataset. Extract the result in a "with ... as ...:" line. :param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`. :param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list. :return: A *context manager* for the layer dataset. Gets the list of available layers for a given map location. :param location: The layers name for this map location will be returned. :return: List of available raster layers. # fully_qualified_layer_names is a list of strings like: # ["GPKG:/file/path/map.gpkg:drivable_area", "GPKG:/file/path/map.gpkg:intersection", ...] # The layer name is everything after the last colon. Saves a gpkg map from a location to disk. :param location: The layers name for this map location will be returned. :return: Path on disk to save a gpkg file. Saves a metadata.json for a location to disk. :param location: The layers name for this map location will be returned. :return: Path on disk to save metadata.json. Gets path to the gpkg map file. :param location: Location for which gpkg needs to be loaded. :return: Path to the gpkg file. Gets path to the metadata json file. :param location: Location for which json needs to be loaded. :return: Path to the meta json file. Gets path to the numpy file for the layer. :param location: Location for which layer needs to be loaded. :param layer_name: Which layer to load. :return: Path to the numpy file. # type: ignore Gets numpy array from file. :param path_on_disk: Path to numpy file. :return: Numpy array containing the layer. # type: ignore Gets the expected file size. :param path: Path to the file. :param shape: The shape of the map file. :return: The expected file size. # float32 values take 4 bytes per pixel. Returns the map layer for `location` and `layer_name` as a numpy array. :param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`. :param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list. :return: Numpy representation of layer. Extracts the data for `layer_name` from the GPKG file for `location`, and saves it on disk so it can be retrieved with `_get_layer_matrix`. :param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`. :param layer_name: Name of layer, e.g. `drivable_area`. Use self.layer_names(location) for complete list. # Convert distance_px to dist matrix. Saves data on disk for all layers in the GPKG file for `location`. :param location: Name of map location, e.g. "sg-one-north`. See `self.get_locations()`. | 1.853296 | 2 |
playbook_init/__init__.py | rowancarr/ansible-playbook-init | 0 | 6623254 | __author__ = 'rc'
| __author__ = 'rc'
| none | 1 | 1.04578 | 1 | |
coderbyte/Longest Word.py | andraantariksa/code-exercise-answer | 1 | 6623255 | <filename>coderbyte/Longest Word.py
import re
def LongestWord(sen):
d = {}
str_l = re.sub(r"[^a-zA-Z0-9 ]", r"", sen).split()
max_str = ""
for i in str_l:
if len(i) > len(max_str):
max_str = i
return max_str
print LongestWord(raw_input())
| <filename>coderbyte/Longest Word.py
import re
def LongestWord(sen):
d = {}
str_l = re.sub(r"[^a-zA-Z0-9 ]", r"", sen).split()
max_str = ""
for i in str_l:
if len(i) > len(max_str):
max_str = i
return max_str
print LongestWord(raw_input())
| none | 1 | 3.845679 | 4 | |
tests/integration/python_await/multiple_outputs/__main__.py | sticha/pulumi | 12,004 | 6623256 | <reponame>sticha/pulumi<gh_stars>1000+
import asyncio
import pulumi
output = pulumi.Output.from_input(asyncio.sleep(3, "magic string"))
output.apply(print)
exported = pulumi.Output.from_input(asyncio.sleep(2, "foo"))
pulumi.export("exported", exported)
exported.apply(print)
another = pulumi.Output.from_input(asyncio.sleep(5, "bar"))
another.apply(print)
| import asyncio
import pulumi
output = pulumi.Output.from_input(asyncio.sleep(3, "magic string"))
output.apply(print)
exported = pulumi.Output.from_input(asyncio.sleep(2, "foo"))
pulumi.export("exported", exported)
exported.apply(print)
another = pulumi.Output.from_input(asyncio.sleep(5, "bar"))
another.apply(print) | none | 1 | 2.214173 | 2 | |
farnsworth/pre_fill.py | naderm/farnsworth | 0 | 6623257 | <gh_stars>0
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import logging
import os
import sys
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "farnsworth.settings")
this_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if this_dir not in sys.path:
sys.path.insert(0, this_dir)
import django
if hasattr(django, "setup"):
django.setup()
def _parse_args(args):
import argparse
parser = argparse.ArgumentParser(
description="Fill the database with basic information, such as the "
"manager position and workshifts",
)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--managers', action='store_true')
parser.add_argument('--requests', action='store_true')
parser.add_argument('--workshift', action='store_true')
return parser.parse_args(args=args)
def main(args):
args = _parse_args(args)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
# Add Managers
if args.managers:
from managers.fill import fill_managers
fill_managers(verbose=args.verbose)
# Add Requests
if args.requests:
from managers.fill import fill_requests
fill_requests(verbose=args.verbose)
if args.workshift and "workshift" in settings.INSTALLED_APPS:
from workshift.fill import fill_regular_shifts, fill_bathroom_shifts, \
fill_hi_shifts, fill_social_shifts, fill_humor_shifts
fill_regular_shifts()
fill_bathroom_shifts()
fill_hi_shifts()
fill_social_shifts()
fill_humor_shifts()
if __name__ == "__main__":
main(sys.argv[1:])
| #!/usr/bin/env python
from __future__ import absolute_import, print_function
import logging
import os
import sys
from django.conf import settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "farnsworth.settings")
this_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if this_dir not in sys.path:
sys.path.insert(0, this_dir)
import django
if hasattr(django, "setup"):
django.setup()
def _parse_args(args):
import argparse
parser = argparse.ArgumentParser(
description="Fill the database with basic information, such as the "
"manager position and workshifts",
)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--managers', action='store_true')
parser.add_argument('--requests', action='store_true')
parser.add_argument('--workshift', action='store_true')
return parser.parse_args(args=args)
def main(args):
args = _parse_args(args)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
# Add Managers
if args.managers:
from managers.fill import fill_managers
fill_managers(verbose=args.verbose)
# Add Requests
if args.requests:
from managers.fill import fill_requests
fill_requests(verbose=args.verbose)
if args.workshift and "workshift" in settings.INSTALLED_APPS:
from workshift.fill import fill_regular_shifts, fill_bathroom_shifts, \
fill_hi_shifts, fill_social_shifts, fill_humor_shifts
fill_regular_shifts()
fill_bathroom_shifts()
fill_hi_shifts()
fill_social_shifts()
fill_humor_shifts()
if __name__ == "__main__":
main(sys.argv[1:]) | en | 0.352612 | #!/usr/bin/env python # Add Managers # Add Requests | 2.132356 | 2 |
python/1579.A.py | arechesk/cf | 0 | 6623258 | n=int(input())
for i in range(n):
s=input()
if 0==sum([int(k=="B" or -1) for k in s]):
print("YES")
else:
print("NO")
| n=int(input())
for i in range(n):
s=input()
if 0==sum([int(k=="B" or -1) for k in s]):
print("YES")
else:
print("NO")
| none | 1 | 3.662426 | 4 | |
electrum/networks/abosom_mainnet.py | SOME-BODY84/electrum-1 | 4 | 6623259 | from electrum.util import inv_dict, read_json, bfh
from .abstract_network import AbstractNet
from .stake_mixin import StakeMixin
from ..bitcoin import hash_encode
from ..exceptions import MissingHeader
class AbosomMainnet(AbstractNet, StakeMixin):
NAME = 'Abosom'
NAME_LOWER = 'abosom'
SHORT_CODE = 'ABOSOM'
DATA_DIR = 'abosom'
OPEN_ALIAS_PREFIX = 'abosom'
PAYMENT_URI_SCHEME = 'abosom'
PAYMENT_REQUEST_PKI_TYPE = "dnssec+abosom"
APPLICATION_PAYMENT_REQUEST_TYPE = 'application/abosom-paymentrequest'
APPLICATION_PAYMENT_TYPE = 'application/abosom-payment'
APPLICATION_PAYMENT_ACK_TYPE = 'application/abosom-paymentack'
BASE_UNITS = {'ABOSOM': 8, 'mABOSOM': 5, 'uABOSOM': 2, 'satoshi': 0}
BASE_UNITS_INVERSE = inv_dict(BASE_UNITS)
BASE_UNITS_LIST = ['ABOSOM', 'mABOSOM', 'uABOSOM', 'satoshi']
TESTNET = False
WIF_PREFIX = 0x80
ADDRTYPE_P2PKH = 75
ADDRTYPE_P2SH = 78
XPRV_HEADERS = {
'standard': 0x800001c8,
}
XPRV_HEADERS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x800101c8,
}
XPUB_HEADERS_INV = inv_dict(XPUB_HEADERS)
BIP44_COIN_TYPE = 704
GENESIS = "00000e8048ffa0a80549ed405640e95e01590e70baf4888ef594d87402635697"
DEFAULT_PORTS = {'t': '50041', 's': '50042'}
DEFAULT_SERVERS = read_json('servers/Abosom-Mainnet.json', {})
CHECKPOINTS = read_json('checkpoints/Abosom-Mainnet.json', [])
LN_REALM_BYTE = 0
LN_DNS_SEEDS = []
COINBASE_MATURITY = 8
COIN = 1000000
TOTAL_COIN_SUPPLY_LIMIT = 110000000
SIGNED_MESSAGE_PREFIX = b"\x18Abosom Signed Message:\n"
DECIMAL_POINT_DEFAULT = 8 # CRW
TARGET_TIMESPAN = int(24 * 60 * 60)
TARGET_SPACING = int(60)
INTERVAL = int(TARGET_TIMESPAN / TARGET_SPACING)
POS_START_HEIGHT = 1
BLOCK_EXPLORERS = {
'system default': ('blockchain:/', {'tx': 'tx/', 'addr': 'address/'}),
}
@classmethod
def hash_raw_header(cls, header):
import algomodule
return hash_encode(algomodule._x11_hash(bfh(header)))
@classmethod
def is_pos_active(cls, header) -> bool:
return True
@classmethod
def get_target(cls, height: int, blockchain) -> int:
index = height // 2016 - 1
if index == -1:
return cls.MAX_TARGET
if index < len(blockchain.checkpoints):
h, t = blockchain.checkpoints[index]
return t
return cls.get_target_btc(height, blockchain)
@classmethod
def get_target_btc(cls, height: int, blockchain) -> int:
if not height % cls.INTERVAL == 0:
# Get the first block of this retarget period
last = blockchain.read_header(height - 1)
if not last:
raise MissingHeader()
return blockchain.bits_to_target(last['bits'])
# new target
first = blockchain.read_header(height - cls.INTERVAL)
last = blockchain.read_header(height - 1)
if not first or not last:
raise MissingHeader()
bits = last.get('bits')
target = blockchain.bits_to_target(bits)
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nActualTimespan = max(nActualTimespan, cls.TARGET_TIMESPAN // 4)
nActualTimespan = min(nActualTimespan, cls.TARGET_TIMESPAN * 4)
new_target = min(cls.MAX_TARGET, (target * nActualTimespan) // cls.TARGET_TIMESPAN)
# not any target can be represented in 32 bits:
new_target = blockchain.bits_to_target(blockchain.target_to_bits(new_target))
return new_target | from electrum.util import inv_dict, read_json, bfh
from .abstract_network import AbstractNet
from .stake_mixin import StakeMixin
from ..bitcoin import hash_encode
from ..exceptions import MissingHeader
class AbosomMainnet(AbstractNet, StakeMixin):
NAME = 'Abosom'
NAME_LOWER = 'abosom'
SHORT_CODE = 'ABOSOM'
DATA_DIR = 'abosom'
OPEN_ALIAS_PREFIX = 'abosom'
PAYMENT_URI_SCHEME = 'abosom'
PAYMENT_REQUEST_PKI_TYPE = "dnssec+abosom"
APPLICATION_PAYMENT_REQUEST_TYPE = 'application/abosom-paymentrequest'
APPLICATION_PAYMENT_TYPE = 'application/abosom-payment'
APPLICATION_PAYMENT_ACK_TYPE = 'application/abosom-paymentack'
BASE_UNITS = {'ABOSOM': 8, 'mABOSOM': 5, 'uABOSOM': 2, 'satoshi': 0}
BASE_UNITS_INVERSE = inv_dict(BASE_UNITS)
BASE_UNITS_LIST = ['ABOSOM', 'mABOSOM', 'uABOSOM', 'satoshi']
TESTNET = False
WIF_PREFIX = 0x80
ADDRTYPE_P2PKH = 75
ADDRTYPE_P2SH = 78
XPRV_HEADERS = {
'standard': 0x800001c8,
}
XPRV_HEADERS_INV = inv_dict(XPRV_HEADERS)
XPUB_HEADERS = {
'standard': 0x800101c8,
}
XPUB_HEADERS_INV = inv_dict(XPUB_HEADERS)
BIP44_COIN_TYPE = 704
GENESIS = "00000e8048ffa0a80549ed405640e95e01590e70baf4888ef594d87402635697"
DEFAULT_PORTS = {'t': '50041', 's': '50042'}
DEFAULT_SERVERS = read_json('servers/Abosom-Mainnet.json', {})
CHECKPOINTS = read_json('checkpoints/Abosom-Mainnet.json', [])
LN_REALM_BYTE = 0
LN_DNS_SEEDS = []
COINBASE_MATURITY = 8
COIN = 1000000
TOTAL_COIN_SUPPLY_LIMIT = 110000000
SIGNED_MESSAGE_PREFIX = b"\x18Abosom Signed Message:\n"
DECIMAL_POINT_DEFAULT = 8 # CRW
TARGET_TIMESPAN = int(24 * 60 * 60)
TARGET_SPACING = int(60)
INTERVAL = int(TARGET_TIMESPAN / TARGET_SPACING)
POS_START_HEIGHT = 1
BLOCK_EXPLORERS = {
'system default': ('blockchain:/', {'tx': 'tx/', 'addr': 'address/'}),
}
@classmethod
def hash_raw_header(cls, header):
import algomodule
return hash_encode(algomodule._x11_hash(bfh(header)))
@classmethod
def is_pos_active(cls, header) -> bool:
return True
@classmethod
def get_target(cls, height: int, blockchain) -> int:
index = height // 2016 - 1
if index == -1:
return cls.MAX_TARGET
if index < len(blockchain.checkpoints):
h, t = blockchain.checkpoints[index]
return t
return cls.get_target_btc(height, blockchain)
@classmethod
def get_target_btc(cls, height: int, blockchain) -> int:
if not height % cls.INTERVAL == 0:
# Get the first block of this retarget period
last = blockchain.read_header(height - 1)
if not last:
raise MissingHeader()
return blockchain.bits_to_target(last['bits'])
# new target
first = blockchain.read_header(height - cls.INTERVAL)
last = blockchain.read_header(height - 1)
if not first or not last:
raise MissingHeader()
bits = last.get('bits')
target = blockchain.bits_to_target(bits)
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nActualTimespan = max(nActualTimespan, cls.TARGET_TIMESPAN // 4)
nActualTimespan = min(nActualTimespan, cls.TARGET_TIMESPAN * 4)
new_target = min(cls.MAX_TARGET, (target * nActualTimespan) // cls.TARGET_TIMESPAN)
# not any target can be represented in 32 bits:
new_target = blockchain.bits_to_target(blockchain.target_to_bits(new_target))
return new_target | en | 0.834208 | # CRW # Get the first block of this retarget period # new target # not any target can be represented in 32 bits: | 1.900048 | 2 |
gpenkf/core/classic_gp.py | danilkuzin/GP-EnKF | 12 | 6623260 | <filename>gpenkf/core/classic_gp.py
"""
Classic GP
"""
import GPy
import numpy as np
from gpenkf.gp_util.squared_exponential import SquaredExponential
class NormalGP:
"""
Classic GP model. Wrapper of GPy functions without inducing points
:param parameters: The :class:`~gpenkf.core.parameters.parameters` parameters
"""
def __init__(self, parameters):
self.inducing_points_locations = parameters.inducing_points_locations
self.initial_log_gp_params = parameters.initial_log_gp_params
self.initial_log_sigma = parameters.initial_log_sigma
self.x_history = np.empty((0, self.inducing_points_locations.shape[1]))
self.f_history = np.empty((0,))
self.m = None
self.g = None
self.params = np.zeros((3,))
def compute_nmse(self, x_sample, f_true_sample):
"""
:param x_sample: location of the points to predict
:param f_true_sample: true value at sample points
:return: NMSE between predicted and true values at sample points
"""
mean, _ = np.squeeze(self.m.predict(x_sample))
return np.mean(np.sqrt((mean-f_true_sample)**2) / np.sqrt(f_true_sample**2))
def compute_log_likelihood(self, x_sample, f_true_sample):
"""
:param x_sample: location of the points to predict
:param f_true_sample: true value at sample points
:return: log likelihood of true values at sample points given the estimated model
"""
cov_func = SquaredExponential(variance=np.exp(self.params[0]), lengthscale=np.exp(self.params[1]))
return cov_func.log_likelihood(f_true_sample, x_sample, np.exp(self.params[2]))
def run_iteration(self, x_new, f_new_noisy):
"""
Perform one iteration learning parameters with sample data.
:param x_new: locations of new observations
:param f_new_noisy: values of new observations
"""
self.x_history = np.append(self.x_history, x_new, axis=0)
self.f_history = np.append(self.f_history, f_new_noisy, axis=0)
kernel = GPy.kern.RBF(input_dim=self.inducing_points_locations.shape[1], variance=np.exp(self.initial_log_gp_params[0]),
lengthscale=np.exp(self.initial_log_gp_params[1]))
self.m = GPy.models.GPRegression(self.x_history, np.expand_dims(self.f_history, axis=1), kernel)
self.m.Gaussian_noise.variance = np.exp(self.initial_log_sigma)
self.m.constrain_positive()
self.m.optimize()
self.m.Gaussian_noise.variance = np.exp(self.initial_log_sigma)
self.params[0] = np.log(self.m.kern.param_array[0])
self.params[1] = np.log(self.m.kern.param_array[1])
self.params[2] = np.log(self.m.likelihood.variance[0])
self.g, _ = np.squeeze(self.m.predict(self.inducing_points_locations))
def get_g_mean(self):
"""
:return: mean of state ensemble
"""
return self.g
def get_eta_ensemble(self):
"""
For compatibility with other models.
:return: nans
"""
return np.nan
def get_log_mean_params(self):
"""
:return: logarithm of mean GP hyperparameters, logarithm of mean noise variance
"""
return self.params[:-1], self.params[-1]
| <filename>gpenkf/core/classic_gp.py
"""
Classic GP
"""
import GPy
import numpy as np
from gpenkf.gp_util.squared_exponential import SquaredExponential
class NormalGP:
"""
Classic GP model. Wrapper of GPy functions without inducing points
:param parameters: The :class:`~gpenkf.core.parameters.parameters` parameters
"""
def __init__(self, parameters):
self.inducing_points_locations = parameters.inducing_points_locations
self.initial_log_gp_params = parameters.initial_log_gp_params
self.initial_log_sigma = parameters.initial_log_sigma
self.x_history = np.empty((0, self.inducing_points_locations.shape[1]))
self.f_history = np.empty((0,))
self.m = None
self.g = None
self.params = np.zeros((3,))
def compute_nmse(self, x_sample, f_true_sample):
"""
:param x_sample: location of the points to predict
:param f_true_sample: true value at sample points
:return: NMSE between predicted and true values at sample points
"""
mean, _ = np.squeeze(self.m.predict(x_sample))
return np.mean(np.sqrt((mean-f_true_sample)**2) / np.sqrt(f_true_sample**2))
def compute_log_likelihood(self, x_sample, f_true_sample):
"""
:param x_sample: location of the points to predict
:param f_true_sample: true value at sample points
:return: log likelihood of true values at sample points given the estimated model
"""
cov_func = SquaredExponential(variance=np.exp(self.params[0]), lengthscale=np.exp(self.params[1]))
return cov_func.log_likelihood(f_true_sample, x_sample, np.exp(self.params[2]))
def run_iteration(self, x_new, f_new_noisy):
"""
Perform one iteration learning parameters with sample data.
:param x_new: locations of new observations
:param f_new_noisy: values of new observations
"""
self.x_history = np.append(self.x_history, x_new, axis=0)
self.f_history = np.append(self.f_history, f_new_noisy, axis=0)
kernel = GPy.kern.RBF(input_dim=self.inducing_points_locations.shape[1], variance=np.exp(self.initial_log_gp_params[0]),
lengthscale=np.exp(self.initial_log_gp_params[1]))
self.m = GPy.models.GPRegression(self.x_history, np.expand_dims(self.f_history, axis=1), kernel)
self.m.Gaussian_noise.variance = np.exp(self.initial_log_sigma)
self.m.constrain_positive()
self.m.optimize()
self.m.Gaussian_noise.variance = np.exp(self.initial_log_sigma)
self.params[0] = np.log(self.m.kern.param_array[0])
self.params[1] = np.log(self.m.kern.param_array[1])
self.params[2] = np.log(self.m.likelihood.variance[0])
self.g, _ = np.squeeze(self.m.predict(self.inducing_points_locations))
def get_g_mean(self):
"""
:return: mean of state ensemble
"""
return self.g
def get_eta_ensemble(self):
"""
For compatibility with other models.
:return: nans
"""
return np.nan
def get_log_mean_params(self):
"""
:return: logarithm of mean GP hyperparameters, logarithm of mean noise variance
"""
return self.params[:-1], self.params[-1]
| en | 0.608042 | Classic GP Classic GP model. Wrapper of GPy functions without inducing points :param parameters: The :class:`~gpenkf.core.parameters.parameters` parameters :param x_sample: location of the points to predict :param f_true_sample: true value at sample points :return: NMSE between predicted and true values at sample points :param x_sample: location of the points to predict :param f_true_sample: true value at sample points :return: log likelihood of true values at sample points given the estimated model Perform one iteration learning parameters with sample data. :param x_new: locations of new observations :param f_new_noisy: values of new observations :return: mean of state ensemble For compatibility with other models. :return: nans :return: logarithm of mean GP hyperparameters, logarithm of mean noise variance | 2.618545 | 3 |
nameko_couchbase.py | geoffjukes/nameko-couchbase | 0 | 6623261 | <reponame>geoffjukes/nameko-couchbase
from urllib.parse import urlparse, urlencode
from nameko.extensions import DependencyProvider
from couchbase.cluster import Cluster
from couchbase.cluster import PasswordAuthenticator
COUCHBASE_KEY = 'COUCHBASE'
class Couchbase(DependencyProvider):
def __init__(self, bucket):
self.cluster = None
self.authenticator = None
self.bucket = bucket
def setup(self):
config = self.container.config[COUCHBASE_KEY]
uri = urlparse(config['URI'])
params = urlencode(config.get('CLIENT_CONFIG'), {})
self.authenticator = PasswordAuthenticator(uri.username, uri.password)
self.cluster = Cluster('{}://{}?{}'.format(uri.scheme, uri.hostname, params))
def start(self):
self.cluster.authenticate(self.authenticator)
def stop(self):
self.cluster = None
def kill(self):
self.cluster = None
def get_dependency(self, worker_ctx):
return self.cluster.open_bucket(self.bucket)
| from urllib.parse import urlparse, urlencode
from nameko.extensions import DependencyProvider
from couchbase.cluster import Cluster
from couchbase.cluster import PasswordAuthenticator
COUCHBASE_KEY = 'COUCHBASE'
class Couchbase(DependencyProvider):
def __init__(self, bucket):
self.cluster = None
self.authenticator = None
self.bucket = bucket
def setup(self):
config = self.container.config[COUCHBASE_KEY]
uri = urlparse(config['URI'])
params = urlencode(config.get('CLIENT_CONFIG'), {})
self.authenticator = PasswordAuthenticator(uri.username, uri.password)
self.cluster = Cluster('{}://{}?{}'.format(uri.scheme, uri.hostname, params))
def start(self):
self.cluster.authenticate(self.authenticator)
def stop(self):
self.cluster = None
def kill(self):
self.cluster = None
def get_dependency(self, worker_ctx):
return self.cluster.open_bucket(self.bucket) | none | 1 | 2.173152 | 2 | |
server/src/nmea.py | mikepfrank/COSMICi | 0 | 6623262 | #|******************************************************************************
#|
#| FILE NAME: nmea.py [python module source file]
#|
#| DESCRIPTION:
#| This file defines functions associated with processing
#| of NMEA-formatted 'sentences' or message lines.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
__all__ = ['BadChecksum', # Exceptions
'isNMEA', # Functions.
'getCRC',
'calcCRC',
'stripNMEA',
'makeNMEA',
]
class BadChecksum(Exception): pass # NMEA checksum doesn't match.
# Given a string, is it potentially an NMEA-formatted sentence?
def isNMEA(line:str):
return line != "" and line[0] == '$'
# Given an NMEA-formatted sentence, return the two-nibble hex
# checksum at the end of it, if present. The value is returned
# as an integer in the range 0-255.
def getCRC(sent:str):
length = len(sent) # Get the length of the string.
if length < 3: # Less than 3 characters? Can't have a checksum.
return None
if sent[-3] != '*': # If 3rd character from end of string isn't an asterisk,
return None # then there's no checksum and we're done.
last2 = sent[-2:] # Get the last two characters of the string.
return int(last2, 16) # Convert to integer as base-16.
# Calculate and return the NMEA CRC/checksum (xor of byte values)
# of the given string (not already decorated with $*).
def calcCRC(bareStr:str):
codes = bareStr.encode() # Convert string to byte array.
crc = 0 # Initialize checksum to 0.
for byte in codes: # Go through array,
crc ^= byte # XOR'ing each byte into checksum.
return crc # Return result.
# Given a possible NMEA sentence, with or without a checksum
# present, if the checksum is present then verify it (and
# throw a BadChecksum exception if it doesn't match), and
# return the "bare" string (without the '$', the '*', or the
# checksum).
# The input is assumed to be a single line (containing no
# line-end characters) with any initial/final whitespace
# already stripped off.
def stripNMEA(line:str):
# First, if the line doesn't begin with a dollar sign '$'
# then it's not an NMEA sentence at all; just return it.
if not isNMEA(line):
return line
# At this point we know that there's a dollar sign at the
# start. Let's go ahead and strip it off (we don't need
# it any more.
line = line[1:] # All chars from 2nd to last.
# OK, so at this point we know we have an NMEA-type
# sentence, but with the '$' already stripped off.
# Let's see if it has a CRC code ("*XX") at the end.
crc = getCRC(line)
# If it has no CRC code, then all we have to do is
# return the line, which already has the '$' stripped.
if crc == None:
return line
# OK, so at this point we have a CRC code, and a line
# with the '$' stripped off the front. We know the
# last 3 characters are "*XX", so strip those off too.
line = line[:-3] # All but last 3 characters of string.
# Now we have a "bare" line (with $* stuff stripped away).
# Calculate its CRC value and compare it to the one given.
# If they don't match, raise an exception.
if calcCRC(line) != crc:
raise BadChecksum # Raise a "bad checksum" exception.
# At this point the line is bare and we've verified that
# the checksum matches. Just return the bare line.
return line
# Given a string, return the NMEA sentence equivalent.
# Include a checksum if and only if makeChecksum is true (default False).
# Does not add any line-end character(s).
def makeNMEA(line:str, makeChecksum:bool=False):
if makeChecksum:
crcNum = calcCRC(line)
crcStr = "*%02x" % crcNum
line = line + crcStr
return ("$%s" % line)
| #|******************************************************************************
#|
#| FILE NAME: nmea.py [python module source file]
#|
#| DESCRIPTION:
#| This file defines functions associated with processing
#| of NMEA-formatted 'sentences' or message lines.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
__all__ = ['BadChecksum', # Exceptions
'isNMEA', # Functions.
'getCRC',
'calcCRC',
'stripNMEA',
'makeNMEA',
]
class BadChecksum(Exception): pass # NMEA checksum doesn't match.
# Given a string, is it potentially an NMEA-formatted sentence?
def isNMEA(line:str):
return line != "" and line[0] == '$'
# Given an NMEA-formatted sentence, return the two-nibble hex
# checksum at the end of it, if present. The value is returned
# as an integer in the range 0-255.
def getCRC(sent:str):
length = len(sent) # Get the length of the string.
if length < 3: # Less than 3 characters? Can't have a checksum.
return None
if sent[-3] != '*': # If 3rd character from end of string isn't an asterisk,
return None # then there's no checksum and we're done.
last2 = sent[-2:] # Get the last two characters of the string.
return int(last2, 16) # Convert to integer as base-16.
# Calculate and return the NMEA CRC/checksum (xor of byte values)
# of the given string (not already decorated with $*).
def calcCRC(bareStr:str):
codes = bareStr.encode() # Convert string to byte array.
crc = 0 # Initialize checksum to 0.
for byte in codes: # Go through array,
crc ^= byte # XOR'ing each byte into checksum.
return crc # Return result.
# Given a possible NMEA sentence, with or without a checksum
# present, if the checksum is present then verify it (and
# throw a BadChecksum exception if it doesn't match), and
# return the "bare" string (without the '$', the '*', or the
# checksum).
# The input is assumed to be a single line (containing no
# line-end characters) with any initial/final whitespace
# already stripped off.
def stripNMEA(line:str):
# First, if the line doesn't begin with a dollar sign '$'
# then it's not an NMEA sentence at all; just return it.
if not isNMEA(line):
return line
# At this point we know that there's a dollar sign at the
# start. Let's go ahead and strip it off (we don't need
# it any more.
line = line[1:] # All chars from 2nd to last.
# OK, so at this point we know we have an NMEA-type
# sentence, but with the '$' already stripped off.
# Let's see if it has a CRC code ("*XX") at the end.
crc = getCRC(line)
# If it has no CRC code, then all we have to do is
# return the line, which already has the '$' stripped.
if crc == None:
return line
# OK, so at this point we have a CRC code, and a line
# with the '$' stripped off the front. We know the
# last 3 characters are "*XX", so strip those off too.
line = line[:-3] # All but last 3 characters of string.
# Now we have a "bare" line (with $* stuff stripped away).
# Calculate its CRC value and compare it to the one given.
# If they don't match, raise an exception.
if calcCRC(line) != crc:
raise BadChecksum # Raise a "bad checksum" exception.
# At this point the line is bare and we've verified that
# the checksum matches. Just return the bare line.
return line
# Given a string, return the NMEA sentence equivalent.
# Include a checksum if and only if makeChecksum is true (default False).
# Does not add any line-end character(s).
def makeNMEA(line:str, makeChecksum:bool=False):
if makeChecksum:
crcNum = calcCRC(line)
crcStr = "*%02x" % crcNum
line = line + crcStr
return ("$%s" % line)
| en | 0.862927 | #|****************************************************************************** #| #| FILE NAME: nmea.py [python module source file] #| #| DESCRIPTION: #| This file defines functions associated with processing #| of NMEA-formatted 'sentences' or message lines. #| #|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv # Exceptions # Functions. # NMEA checksum doesn't match. # Given a string, is it potentially an NMEA-formatted sentence? # Given an NMEA-formatted sentence, return the two-nibble hex # checksum at the end of it, if present. The value is returned # as an integer in the range 0-255. # Get the length of the string. # Less than 3 characters? Can't have a checksum. # If 3rd character from end of string isn't an asterisk, # then there's no checksum and we're done. # Get the last two characters of the string. # Convert to integer as base-16. # Calculate and return the NMEA CRC/checksum (xor of byte values) # of the given string (not already decorated with $*). # Convert string to byte array. # Initialize checksum to 0. # Go through array, # XOR'ing each byte into checksum. # Return result. # Given a possible NMEA sentence, with or without a checksum # present, if the checksum is present then verify it (and # throw a BadChecksum exception if it doesn't match), and # return the "bare" string (without the '$', the '*', or the # checksum). # The input is assumed to be a single line (containing no # line-end characters) with any initial/final whitespace # already stripped off. # First, if the line doesn't begin with a dollar sign '$' # then it's not an NMEA sentence at all; just return it. # At this point we know that there's a dollar sign at the # start. Let's go ahead and strip it off (we don't need # it any more. # All chars from 2nd to last. # OK, so at this point we know we have an NMEA-type # sentence, but with the '$' already stripped off. # Let's see if it has a CRC code ("*XX") at the end. # If it has no CRC code, then all we have to do is # return the line, which already has the '$' stripped. # OK, so at this point we have a CRC code, and a line # with the '$' stripped off the front. We know the # last 3 characters are "*XX", so strip those off too. # All but last 3 characters of string. # Now we have a "bare" line (with $* stuff stripped away). # Calculate its CRC value and compare it to the one given. # If they don't match, raise an exception. # Raise a "bad checksum" exception. # At this point the line is bare and we've verified that # the checksum matches. Just return the bare line. # Given a string, return the NMEA sentence equivalent. # Include a checksum if and only if makeChecksum is true (default False). # Does not add any line-end character(s). | 3.175312 | 3 |
Scripts/preprocess_and_tokenize.py | Jabalov/Arabic-Dialects-Identification | 2 | 6623263 | import pandas as pd
import numpy as np
from sklearn import preprocessing
import nltk
# nltk.download('all')
from nltk.corpus import stopwords, wordnet
from nltk.tokenize import word_tokenize
from nltk.tokenize import RegexpTokenizer
import regex as re
class PreprocessTweets:
def __init__ (self, text):
self.text = text
def normalize_letters(self):
self.text = re.sub("[إأآا]", "ا", self.text)
self.text = re.sub("ى", "ي", self.text)
self.text = re.sub("ؤ", "ء", self.text)
self.text = re.sub("ئ", "ء", self.text)
self.text = re.sub("ة", "ه", self.text)
self.text = re.sub("گ", "ك", self.text)
self.text = re.sub(r'(.)\1+', r'\1', self.text)
def remove_tashkeel(self):
tashkeel = re.compile("""
ّ | # Tashdid
َ | # Fatha
ً | # Tanwin Fath
ُ | # Damma
ٌ | # Tanwin Damm
ِ | # Kasra
ٍ | # Tanwin Kasr
ْ | # Sukun
ـ # Tatwil/Kashida
""", re.VERBOSE)
self.text = re.sub(tashkeel, '', self.text)
def sub_chars(self):
chlist = [chr(i) for i in range(1569, 1611)] + [chr(i) for i in range(1646, 1749)]
self.text = ''.join([i if i in chlist else ' ' for i in self.text])
self.text = ' '.join([u'{}'.format(i) for i in self.text.split() if len(i) > 1])
self.text = re.sub(r'\d+', ' ', self.text)
def remove_symbols_spaces_and_english(self):
symbols1 = re.compile('[/(){}\[\]\|,;]')
symbols2 = re.compile("[@|(|)|\||:|>|<|_|#|\.|+|÷|×|'|!|\?|٪|؟\|&|;|\*|[|]|{|}|-|،|_|’|;|!|:|^|&|%|/]")
arabic_punctuations = '''`÷×؛<>_()*&^%][ـ،/:"؟.,'{}~¦+|!”…“–ـ'''
emojies = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002500-\U00002BEF"
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u0640"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", re.UNICODE)
self.text = re.sub(emojies, ' ', self.text)
self.text = re.sub(symbols1, ' ', self.text)
self.text = re.sub(symbols2, ' ', self.text)
translator = str.maketrans('', '', arabic_punctuations)
self.text = self.text.translate(translator)
self.text = self.text.replace('"', " ")
self.text = self.text.replace('…', " ")
self.text = re.sub(r'\s*[A-Za-z]+\b', ' ' , self.text)
while ' ' in self.text:
self.text = self.text.replace(' ', ' ')
def preprocessing_pipeline(self):
self.normalize_letters()
self.remove_tashkeel()
self.sub_chars()
self.remove_symbols_spaces_and_english()
return self.text
class TweetsTokenizing:
def __init__(self, text):
self.text = text
self.tokens = []
def tokenize_text(self):
tokens = word_tokenize(self.text)
self.tokens = [token.strip() for token in tokens]
def removeStopWords(self):
stopwords_list = stopwords.words('arabic')
listStopWords = stopwords_list
self.tokens = [i for i in self.tokens if not i in listStopWords]
def remove_repeated_characters(self):
repeat_pattern = re.compile(r'(\w*)(\w)\2(\w*)')
match_substitution = r'\1\2\3'
def replace(old_word):
if wordnet.synsets(old_word):
return old_word
new_word = repeat_pattern.sub(match_substitution, old_word)
return replace(new_word) if new_word != old_word else new_word
self.tokens = [replace(word) for word in self.tokens]
def tokenize_pipeline(self):
self.tokenize_text()
self.removeStopWords()
self.remove_repeated_characters()
return self.tokens
| import pandas as pd
import numpy as np
from sklearn import preprocessing
import nltk
# nltk.download('all')
from nltk.corpus import stopwords, wordnet
from nltk.tokenize import word_tokenize
from nltk.tokenize import RegexpTokenizer
import regex as re
class PreprocessTweets:
def __init__ (self, text):
self.text = text
def normalize_letters(self):
self.text = re.sub("[إأآا]", "ا", self.text)
self.text = re.sub("ى", "ي", self.text)
self.text = re.sub("ؤ", "ء", self.text)
self.text = re.sub("ئ", "ء", self.text)
self.text = re.sub("ة", "ه", self.text)
self.text = re.sub("گ", "ك", self.text)
self.text = re.sub(r'(.)\1+', r'\1', self.text)
def remove_tashkeel(self):
tashkeel = re.compile("""
ّ | # Tashdid
َ | # Fatha
ً | # Tanwin Fath
ُ | # Damma
ٌ | # Tanwin Damm
ِ | # Kasra
ٍ | # Tanwin Kasr
ْ | # Sukun
ـ # Tatwil/Kashida
""", re.VERBOSE)
self.text = re.sub(tashkeel, '', self.text)
def sub_chars(self):
chlist = [chr(i) for i in range(1569, 1611)] + [chr(i) for i in range(1646, 1749)]
self.text = ''.join([i if i in chlist else ' ' for i in self.text])
self.text = ' '.join([u'{}'.format(i) for i in self.text.split() if len(i) > 1])
self.text = re.sub(r'\d+', ' ', self.text)
def remove_symbols_spaces_and_english(self):
symbols1 = re.compile('[/(){}\[\]\|,;]')
symbols2 = re.compile("[@|(|)|\||:|>|<|_|#|\.|+|÷|×|'|!|\?|٪|؟\|&|;|\*|[|]|{|}|-|،|_|’|;|!|:|^|&|%|/]")
arabic_punctuations = '''`÷×؛<>_()*&^%][ـ،/:"؟.,'{}~¦+|!”…“–ـ'''
emojies = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002500-\U00002BEF"
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u0640"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", re.UNICODE)
self.text = re.sub(emojies, ' ', self.text)
self.text = re.sub(symbols1, ' ', self.text)
self.text = re.sub(symbols2, ' ', self.text)
translator = str.maketrans('', '', arabic_punctuations)
self.text = self.text.translate(translator)
self.text = self.text.replace('"', " ")
self.text = self.text.replace('…', " ")
self.text = re.sub(r'\s*[A-Za-z]+\b', ' ' , self.text)
while ' ' in self.text:
self.text = self.text.replace(' ', ' ')
def preprocessing_pipeline(self):
self.normalize_letters()
self.remove_tashkeel()
self.sub_chars()
self.remove_symbols_spaces_and_english()
return self.text
class TweetsTokenizing:
def __init__(self, text):
self.text = text
self.tokens = []
def tokenize_text(self):
tokens = word_tokenize(self.text)
self.tokens = [token.strip() for token in tokens]
def removeStopWords(self):
stopwords_list = stopwords.words('arabic')
listStopWords = stopwords_list
self.tokens = [i for i in self.tokens if not i in listStopWords]
def remove_repeated_characters(self):
repeat_pattern = re.compile(r'(\w*)(\w)\2(\w*)')
match_substitution = r'\1\2\3'
def replace(old_word):
if wordnet.synsets(old_word):
return old_word
new_word = repeat_pattern.sub(match_substitution, old_word)
return replace(new_word) if new_word != old_word else new_word
self.tokens = [replace(word) for word in self.tokens]
def tokenize_pipeline(self):
self.tokenize_text()
self.removeStopWords()
self.remove_repeated_characters()
return self.tokens
| en | 0.232098 | # nltk.download('all') ّ | # Tashdid
َ | # Fatha
ً | # Tanwin Fath
ُ | # Damma
ٌ | # Tanwin Damm
ِ | # Kasra
ٍ | # Tanwin Kasr
ْ | # Sukun
ـ # Tatwil/Kashida #|\.|+|÷|×|'|!|\?|٪|؟\|&|;|\*|[|]|{|}|-|،|_|’|;|!|:|^|&|%|/]") `÷×؛<>_()*&^%][ـ،/:"؟.,'{}~¦+|!”…“–ـ # dingbats | 3.093982 | 3 |
unittests/test_creation_lib_cWDictFile_SHADicts.py | ddbox/glideinwms | 0 | 6623264 | <reponame>ddbox/glideinwms
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
"""
Project:
glideinWMS
Description:
unit test for SHA1DictFile and SummarySHA1DictFile classes in
glideinwms/creation/lib/cWDictFile.py
Author:
<NAME> <EMAIL>
"""
import copy
import unittest
import xmlrunner
from glideinwms.unittests.unittest_utils import TestImportError
try:
from glideinwms.creation.lib.cWDictFile import SHA1DictFile, SummarySHA1DictFile
except ImportError as err:
raise TestImportError(str(err))
class TestSHA1DictFile(unittest.TestCase):
def setUp(self):
self.dic = SHA1DictFile("fixtures/frontend", "signatures.sha1")
self.dic.load()
def test_init(self):
self.assertTrue(isinstance(self.dic, SHA1DictFile))
self.assertTrue("description.e98f4o.cfg group_group1" in self.dic.keys)
self.assertTrue("description.e98f4o.cfg group_group1" in self.dic)
def test_add_from_file(self):
self.dic.add_from_file("fixtures/frontend/group_group1/params.cfg", "params.cfg")
self.assertTrue("params.cfg" in self.dic)
def test_format_val(self):
expected = "ad0f57615c3df8bbb2130d96cfdf09363f4bd3ed " + "description.e98f4o.cfg group_group1"
mykey = "description.e98f4o.cfg group_group1"
self.assertEqual(expected, self.dic.format_val(mykey, None))
def test_parse_val(self):
cpy = copy.deepcopy(self.dic)
self.assertEqual(cpy.keys, self.dic.keys)
self.dic.parse_val("# ignore this line")
self.assertEqual(cpy.keys, self.dic.keys)
self.dic.parse_val("")
self.assertEqual(cpy.keys, self.dic.keys)
try:
self.dic.parse_val("this should throw RuntimeError")
except RuntimeError:
pass
self.dic.parse_val("foo bar")
self.assertTrue("bar" in self.dic.keys)
self.assertNotEqual(cpy.keys, self.dic.keys)
class TestSummarySHA1DictFile(unittest.TestCase):
def setUp(self):
self.dic = SummarySHA1DictFile("fixtures/frontend", "signatures.sha1")
self.dic.load()
def test_init(self):
self.assertTrue(isinstance(self.dic, SummarySHA1DictFile))
self.assertTrue("group_group1" in self.dic.keys)
self.assertTrue("group_group1" in self.dic)
def test_add_from_file(self):
self.dic.add_from_file("fixtures/frontend/group_group1/params.cfg", "params.cfg")
self.assertTrue("params.cfg" in self.dic)
def test_format_val(self):
expected = "ad0f57615c3df8bbb2130d96cfdf09363f4bd3ed " + "description.e98f4o.cfg group_group1"
self.assertEqual(expected, self.dic.format_val("group_group1", None))
def test_parse_val(self):
cpy = copy.deepcopy(self.dic)
self.assertEqual(cpy.keys, self.dic.keys)
self.dic.parse_val("# ignore this line")
self.assertEqual(cpy.keys, self.dic.keys)
self.dic.parse_val("")
self.assertEqual(cpy.keys, self.dic.keys)
try:
self.dic.parse_val("too short")
except RuntimeError:
pass
self.dic.parse_val("foo bar baz")
self.assertTrue("baz" in self.dic)
self.assertNotEqual(cpy.keys, self.dic.keys)
def test_add(self):
self.dic.add("foo", ["7cea6e20d5a4e65e94689377771e3e44c72735", "foo.e98f4o.cfg"])
self.assertTrue("foo" in self.dic.keys)
if __name__ == "__main__":
OFL = "unittests-reports"
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=OFL))
| #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
"""
Project:
glideinWMS
Description:
unit test for SHA1DictFile and SummarySHA1DictFile classes in
glideinwms/creation/lib/cWDictFile.py
Author:
<NAME> <EMAIL>
"""
import copy
import unittest
import xmlrunner
from glideinwms.unittests.unittest_utils import TestImportError
try:
from glideinwms.creation.lib.cWDictFile import SHA1DictFile, SummarySHA1DictFile
except ImportError as err:
raise TestImportError(str(err))
class TestSHA1DictFile(unittest.TestCase):
def setUp(self):
self.dic = SHA1DictFile("fixtures/frontend", "signatures.sha1")
self.dic.load()
def test_init(self):
self.assertTrue(isinstance(self.dic, SHA1DictFile))
self.assertTrue("description.e98f4o.cfg group_group1" in self.dic.keys)
self.assertTrue("description.e98f4o.cfg group_group1" in self.dic)
def test_add_from_file(self):
self.dic.add_from_file("fixtures/frontend/group_group1/params.cfg", "params.cfg")
self.assertTrue("params.cfg" in self.dic)
def test_format_val(self):
expected = "ad0f57615c3df8bbb2130d96cfdf09363f4bd3ed " + "description.e98f4o.cfg group_group1"
mykey = "description.e98f4o.cfg group_group1"
self.assertEqual(expected, self.dic.format_val(mykey, None))
def test_parse_val(self):
cpy = copy.deepcopy(self.dic)
self.assertEqual(cpy.keys, self.dic.keys)
self.dic.parse_val("# ignore this line")
self.assertEqual(cpy.keys, self.dic.keys)
self.dic.parse_val("")
self.assertEqual(cpy.keys, self.dic.keys)
try:
self.dic.parse_val("this should throw RuntimeError")
except RuntimeError:
pass
self.dic.parse_val("foo bar")
self.assertTrue("bar" in self.dic.keys)
self.assertNotEqual(cpy.keys, self.dic.keys)
class TestSummarySHA1DictFile(unittest.TestCase):
def setUp(self):
self.dic = SummarySHA1DictFile("fixtures/frontend", "signatures.sha1")
self.dic.load()
def test_init(self):
self.assertTrue(isinstance(self.dic, SummarySHA1DictFile))
self.assertTrue("group_group1" in self.dic.keys)
self.assertTrue("group_group1" in self.dic)
def test_add_from_file(self):
self.dic.add_from_file("fixtures/frontend/group_group1/params.cfg", "params.cfg")
self.assertTrue("params.cfg" in self.dic)
def test_format_val(self):
expected = "ad0f57615c3df8bbb2130d96cfdf09363f4bd3ed " + "description.e98f4o.cfg group_group1"
self.assertEqual(expected, self.dic.format_val("group_group1", None))
def test_parse_val(self):
cpy = copy.deepcopy(self.dic)
self.assertEqual(cpy.keys, self.dic.keys)
self.dic.parse_val("# ignore this line")
self.assertEqual(cpy.keys, self.dic.keys)
self.dic.parse_val("")
self.assertEqual(cpy.keys, self.dic.keys)
try:
self.dic.parse_val("too short")
except RuntimeError:
pass
self.dic.parse_val("foo bar baz")
self.assertTrue("baz" in self.dic)
self.assertNotEqual(cpy.keys, self.dic.keys)
def test_add(self):
self.dic.add("foo", ["7cea6e20d5a4e65e94689377771e3e44c72735", "foo.e98f4o.cfg"])
self.assertTrue("foo" in self.dic.keys)
if __name__ == "__main__":
OFL = "unittests-reports"
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=OFL)) | en | 0.377445 | #!/usr/bin/env python3 # SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC # SPDX-License-Identifier: Apache-2.0 Project: glideinWMS Description: unit test for SHA1DictFile and SummarySHA1DictFile classes in glideinwms/creation/lib/cWDictFile.py Author: <NAME> <EMAIL> | 2.270693 | 2 |
webservice/utils/check.py | iducn/Paddle-bot | 0 | 6623265 | import requests
import re
def checkPRCI(commit_url, sha, CHECK_CI):
"""
Check if PR's commit message can trigger CI.
Args:
commit_url(url): PR's commit url.
sha(str): PR's commit code. (The only code provided by GitHub)
CHECK_CI(str): PR's commit message checker.
Returns:
res: True or False
"""
res = False
reponse = requests.get(commit_url).json()
for i in range(0, len(reponse)):
if reponse[i]['sha'] == sha:
if CHECK_CI in reponse[i]['commit']['message'] or len(CHECK_CI) == 0:
res = True
return res
def re_rule(body, CHECK_TEMPLATE):
PR_RE = re.compile(CHECK_TEMPLATE, re.DOTALL)
result = PR_RE.search(body)
return result
def checkPRTemplate(body, CHECK_TEMPLATE, CHECK_TEMPLATE_doc=None):
"""
Check if PR's description meet the standard of template
Args:
body: PR's Body.
CHECK_TEMPLATE: check template str.
Returns:
res: True or False
"""
res = False
if CHECK_TEMPLATE_doc != None:
print(CHECK_TEMPLATE_doc)
note1 = '<!-- ADD SCREENSHOT HERE IF APPLICABLE. -->'
note2 = '<!-- DESCRIBE THE BUG OR REQUIREMENT HERE. eg. #2020(格式为 #Issue编号)-->'
body_no_note = re.sub(note2, "", re.sub(note1, "", body))
doc_check = "- PR changes:(改动点)is \(\s*[A-D]*[C][A-D]*\s*\):"
match_doc = re.search(doc_check, body, re.M|re.I)
print(match_doc)
if match_doc != None:
result_doc = re_rule(body_no_note, CHECK_TEMPLATE_doc)
if result_doc != None:
res = True
return res
result = re_rule(body, CHECK_TEMPLATE)
if len(CHECK_TEMPLATE) == 0 and len(body) == 0:
res = False
elif result != None:
res = True
return res | import requests
import re
def checkPRCI(commit_url, sha, CHECK_CI):
"""
Check if PR's commit message can trigger CI.
Args:
commit_url(url): PR's commit url.
sha(str): PR's commit code. (The only code provided by GitHub)
CHECK_CI(str): PR's commit message checker.
Returns:
res: True or False
"""
res = False
reponse = requests.get(commit_url).json()
for i in range(0, len(reponse)):
if reponse[i]['sha'] == sha:
if CHECK_CI in reponse[i]['commit']['message'] or len(CHECK_CI) == 0:
res = True
return res
def re_rule(body, CHECK_TEMPLATE):
PR_RE = re.compile(CHECK_TEMPLATE, re.DOTALL)
result = PR_RE.search(body)
return result
def checkPRTemplate(body, CHECK_TEMPLATE, CHECK_TEMPLATE_doc=None):
"""
Check if PR's description meet the standard of template
Args:
body: PR's Body.
CHECK_TEMPLATE: check template str.
Returns:
res: True or False
"""
res = False
if CHECK_TEMPLATE_doc != None:
print(CHECK_TEMPLATE_doc)
note1 = '<!-- ADD SCREENSHOT HERE IF APPLICABLE. -->'
note2 = '<!-- DESCRIBE THE BUG OR REQUIREMENT HERE. eg. #2020(格式为 #Issue编号)-->'
body_no_note = re.sub(note2, "", re.sub(note1, "", body))
doc_check = "- PR changes:(改动点)is \(\s*[A-D]*[C][A-D]*\s*\):"
match_doc = re.search(doc_check, body, re.M|re.I)
print(match_doc)
if match_doc != None:
result_doc = re_rule(body_no_note, CHECK_TEMPLATE_doc)
if result_doc != None:
res = True
return res
result = re_rule(body, CHECK_TEMPLATE)
if len(CHECK_TEMPLATE) == 0 and len(body) == 0:
res = False
elif result != None:
res = True
return res | en | 0.408126 | Check if PR's commit message can trigger CI. Args: commit_url(url): PR's commit url. sha(str): PR's commit code. (The only code provided by GitHub) CHECK_CI(str): PR's commit message checker. Returns: res: True or False Check if PR's description meet the standard of template Args: body: PR's Body. CHECK_TEMPLATE: check template str. Returns: res: True or False #2020(格式为 #Issue编号)-->' | 2.885983 | 3 |
utils.py | wuaalb/pytorch_template_audio | 18 | 6623266 | <reponame>wuaalb/pytorch_template_audio<filename>utils.py
from pathlib import Path
import warnings
import torch
from torch.optim.lr_scheduler import _LRScheduler
import numpy as np
# find checkpoint with latest creation time (meta data modification on unix)
def find_latest_checkpoint(path, pattern='*.pt'):
path = Path(path)
fns = path.glob(pattern)
fns = list(fns)
if len(fns) == 0:
return None
fn_latest = max(fns, key=lambda fn: fn.stat().st_ctime)
return fn_latest
# raise if inconsistent
def check_step_consistency(optimizer, lr_scheduler, step):
for group in optimizer.param_groups:
for p in group['params']:
if p.grad == None:
continue
if optimizer.state[p]['step'] != step:
raise IOError('optimizer step and loop step inconsistent (opt step = {:d}, global step = {:d})'.format(optimizer.state[p]['step'], step))
if lr_scheduler:
if lr_scheduler.last_epoch != step:
raise IOError('lr scheduler step and loop step inconsistent')
def save_checkpoint(fn_checkpoint, model, model_ema, optimizer, lr_scheduler, step):
# check step consistency
check_step_consistency(optimizer, lr_scheduler, step)
# get data from components
checkpoint_dict = {}
checkpoint_dict['model'] = model.state_dict()
if model_ema is not None:
checkpoint_dict['model_ema'] = model_ema.state_dict()
checkpoint_dict['optimizer'] = optimizer.state_dict()
if lr_scheduler is not None:
checkpoint_dict['lr_scheduler'] = lr_scheduler.state_dict()
checkpoint_dict['step'] = step
# save file
torch.save(checkpoint_dict, fn_checkpoint)
def load_checkpoint(fn_checkpoint, model, model_ema, optimizer, lr_scheduler):
# load file
fn_checkpoint = str(fn_checkpoint) # XXX: issue with torch.load() not working with pathlib.Path
checkpoint_dict = torch.load(fn_checkpoint)
# load data into components
model.load_state_dict(checkpoint_dict['model'])
if model_ema is not None:
model_ema.load_state_dict(checkpoint_dict['model_ema'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(checkpoint_dict['lr_scheduler'])
step = checkpoint_dict['step']
# check step consistency
check_step_consistency(optimizer, lr_scheduler, step)
return step
def load_checkpoint_inference(fn_checkpoint, model, use_ema):
# load file
fn_checkpoint = str(fn_checkpoint) # XXX: issue with torch.load() not working with pathlib.Path
checkpoint_dict = torch.load(fn_checkpoint)
# load data into components
if use_ema:
model.load_state_dict(checkpoint_dict['model_ema'])
else:
model.load_state_dict(checkpoint_dict['model'])
step = checkpoint_dict['step']
return step
# e.g. backup_source(out_dir / 'source.zip', ['./*.py', './models/*.py'])
def backup_source(fn_out_zip, path_pattern_list):
from pathlib import Path
from zipfile import ZipFile
with ZipFile(fn_out_zip, 'w') as backup:
for path_pattern in path_pattern_list:
path_pattern = Path(path_pattern)
dir = path_pattern.parent
pattern = path_pattern.name
fns_py = list(dir.glob(pattern))
if len(fns_py) == 0:
raise IOError('No files found to backup!')
for fn_py in fns_py:
backup.write(fn_py)
def worker_init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32)) # seed numpy with worker-specific seed from pytorch, to avoid all workers using same random seed (afaik only problem on linux, not windows); note no need to add worker id to torch.initial_seed() (already worker specific), but have to wrap to uint32 for numpy
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class ExpDecayWarmupScheduler(_LRScheduler):
# initial lr for n_steps_warmup steps (updates),
# then drops to initial lr * decay, and continues (smoothly) exponentially decaying by factor decay every step_size steps
# continue decaying until lr_floor is reached, then keep constant
def __init__(self, optimizer, warmup_steps=200_000, step_size=200_000, decay=0.5, lr_floor=1e-6):
self.warmup_steps = warmup_steps
self.step_size = step_size
self.decay = decay
self.lr_floor = lr_floor
super().__init__(optimizer) # calls self.get_lr(), thus do last
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
## boilerplate from torch.optim (1.5)
last_step = self.last_epoch # _LRScheduler (originally) designed for epoch-wise scheduling; incremented BEFORE get_lr() is called and initialized to -1
if last_step >= self.warmup_steps:
lr_scale = self.decay**(1 + (last_step - self.warmup_steps)/self.step_size)
else:
lr_scale = 1.0
# XXX: get_lr() in torch.optim (1.5) tend to continuously update param_group['lr'] instead of using base_lrs
# possibly related to chainable lr schedulers
return [max(lr_scale*base_lr, self.lr_floor) for base_lr in self.base_lrs]
# XXX:
# had a more advanced version, but didn't seem to work properly (didn't debug)
# improvements/differences
# - just pass source model, create internal target model via deep_copy()
# - internal step counter, instead of passing step counter on step() for warmup
# - state_dict(), load_state_dict() to store/load internal target model and step counter
# - optional device argument, so EMA (target) model can be kept on CPU model rather than using up GPU model
# - optional list of excluded parameter names (e.g. don't use up compute decaying, some big constant tensors stored in state_dict using register_buffer())
# - switching target model to eval mode (assuming it will only ever be used for inference or keeping track of EMA weights, not training directly)
# - set requires_grad_(False) on all parameters of internal target model (not just make operations have no gradients, but the actual parameters themselves)
class EMA(object):
def __init__(self, source, target, decay, warmup_steps=0):
self.source = source
self.target = target
self.decay = decay
self.warmup_steps = warmup_steps
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.source_dict[key].data)
# pass step_idx if warup_steps > 0
# XXX: need to pass it explicitly, because EMA is state-less and thus cannot have internal step counter which
# is properly stored/loaded from checkpoint
def step(self, step_idx=None):
if step_idx and step_idx < self.warmup_steps:
decay = 0.0 # no EMA, use source parameters directly
else:
decay = self.decay
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.target_dict[key].data*decay + self.source_dict[key].data*(1.0 - decay)) | from pathlib import Path
import warnings
import torch
from torch.optim.lr_scheduler import _LRScheduler
import numpy as np
# find checkpoint with latest creation time (meta data modification on unix)
def find_latest_checkpoint(path, pattern='*.pt'):
path = Path(path)
fns = path.glob(pattern)
fns = list(fns)
if len(fns) == 0:
return None
fn_latest = max(fns, key=lambda fn: fn.stat().st_ctime)
return fn_latest
# raise if inconsistent
def check_step_consistency(optimizer, lr_scheduler, step):
for group in optimizer.param_groups:
for p in group['params']:
if p.grad == None:
continue
if optimizer.state[p]['step'] != step:
raise IOError('optimizer step and loop step inconsistent (opt step = {:d}, global step = {:d})'.format(optimizer.state[p]['step'], step))
if lr_scheduler:
if lr_scheduler.last_epoch != step:
raise IOError('lr scheduler step and loop step inconsistent')
def save_checkpoint(fn_checkpoint, model, model_ema, optimizer, lr_scheduler, step):
# check step consistency
check_step_consistency(optimizer, lr_scheduler, step)
# get data from components
checkpoint_dict = {}
checkpoint_dict['model'] = model.state_dict()
if model_ema is not None:
checkpoint_dict['model_ema'] = model_ema.state_dict()
checkpoint_dict['optimizer'] = optimizer.state_dict()
if lr_scheduler is not None:
checkpoint_dict['lr_scheduler'] = lr_scheduler.state_dict()
checkpoint_dict['step'] = step
# save file
torch.save(checkpoint_dict, fn_checkpoint)
def load_checkpoint(fn_checkpoint, model, model_ema, optimizer, lr_scheduler):
# load file
fn_checkpoint = str(fn_checkpoint) # XXX: issue with torch.load() not working with pathlib.Path
checkpoint_dict = torch.load(fn_checkpoint)
# load data into components
model.load_state_dict(checkpoint_dict['model'])
if model_ema is not None:
model_ema.load_state_dict(checkpoint_dict['model_ema'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(checkpoint_dict['lr_scheduler'])
step = checkpoint_dict['step']
# check step consistency
check_step_consistency(optimizer, lr_scheduler, step)
return step
def load_checkpoint_inference(fn_checkpoint, model, use_ema):
# load file
fn_checkpoint = str(fn_checkpoint) # XXX: issue with torch.load() not working with pathlib.Path
checkpoint_dict = torch.load(fn_checkpoint)
# load data into components
if use_ema:
model.load_state_dict(checkpoint_dict['model_ema'])
else:
model.load_state_dict(checkpoint_dict['model'])
step = checkpoint_dict['step']
return step
# e.g. backup_source(out_dir / 'source.zip', ['./*.py', './models/*.py'])
def backup_source(fn_out_zip, path_pattern_list):
from pathlib import Path
from zipfile import ZipFile
with ZipFile(fn_out_zip, 'w') as backup:
for path_pattern in path_pattern_list:
path_pattern = Path(path_pattern)
dir = path_pattern.parent
pattern = path_pattern.name
fns_py = list(dir.glob(pattern))
if len(fns_py) == 0:
raise IOError('No files found to backup!')
for fn_py in fns_py:
backup.write(fn_py)
def worker_init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32)) # seed numpy with worker-specific seed from pytorch, to avoid all workers using same random seed (afaik only problem on linux, not windows); note no need to add worker id to torch.initial_seed() (already worker specific), but have to wrap to uint32 for numpy
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class ExpDecayWarmupScheduler(_LRScheduler):
# initial lr for n_steps_warmup steps (updates),
# then drops to initial lr * decay, and continues (smoothly) exponentially decaying by factor decay every step_size steps
# continue decaying until lr_floor is reached, then keep constant
def __init__(self, optimizer, warmup_steps=200_000, step_size=200_000, decay=0.5, lr_floor=1e-6):
self.warmup_steps = warmup_steps
self.step_size = step_size
self.decay = decay
self.lr_floor = lr_floor
super().__init__(optimizer) # calls self.get_lr(), thus do last
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
## boilerplate from torch.optim (1.5)
last_step = self.last_epoch # _LRScheduler (originally) designed for epoch-wise scheduling; incremented BEFORE get_lr() is called and initialized to -1
if last_step >= self.warmup_steps:
lr_scale = self.decay**(1 + (last_step - self.warmup_steps)/self.step_size)
else:
lr_scale = 1.0
# XXX: get_lr() in torch.optim (1.5) tend to continuously update param_group['lr'] instead of using base_lrs
# possibly related to chainable lr schedulers
return [max(lr_scale*base_lr, self.lr_floor) for base_lr in self.base_lrs]
# XXX:
# had a more advanced version, but didn't seem to work properly (didn't debug)
# improvements/differences
# - just pass source model, create internal target model via deep_copy()
# - internal step counter, instead of passing step counter on step() for warmup
# - state_dict(), load_state_dict() to store/load internal target model and step counter
# - optional device argument, so EMA (target) model can be kept on CPU model rather than using up GPU model
# - optional list of excluded parameter names (e.g. don't use up compute decaying, some big constant tensors stored in state_dict using register_buffer())
# - switching target model to eval mode (assuming it will only ever be used for inference or keeping track of EMA weights, not training directly)
# - set requires_grad_(False) on all parameters of internal target model (not just make operations have no gradients, but the actual parameters themselves)
class EMA(object):
def __init__(self, source, target, decay, warmup_steps=0):
self.source = source
self.target = target
self.decay = decay
self.warmup_steps = warmup_steps
self.source_dict = self.source.state_dict()
self.target_dict = self.target.state_dict()
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.source_dict[key].data)
# pass step_idx if warup_steps > 0
# XXX: need to pass it explicitly, because EMA is state-less and thus cannot have internal step counter which
# is properly stored/loaded from checkpoint
def step(self, step_idx=None):
if step_idx and step_idx < self.warmup_steps:
decay = 0.0 # no EMA, use source parameters directly
else:
decay = self.decay
with torch.no_grad():
for key in self.source_dict:
self.target_dict[key].data.copy_(self.target_dict[key].data*decay + self.source_dict[key].data*(1.0 - decay)) | en | 0.802657 | # find checkpoint with latest creation time (meta data modification on unix) # raise if inconsistent # check step consistency # get data from components # save file # load file # XXX: issue with torch.load() not working with pathlib.Path # load data into components # check step consistency # load file # XXX: issue with torch.load() not working with pathlib.Path # load data into components # e.g. backup_source(out_dir / 'source.zip', ['./*.py', './models/*.py']) # seed numpy with worker-specific seed from pytorch, to avoid all workers using same random seed (afaik only problem on linux, not windows); note no need to add worker id to torch.initial_seed() (already worker specific), but have to wrap to uint32 for numpy # initial lr for n_steps_warmup steps (updates), # then drops to initial lr * decay, and continues (smoothly) exponentially decaying by factor decay every step_size steps # continue decaying until lr_floor is reached, then keep constant # calls self.get_lr(), thus do last ## boilerplate from torch.optim (1.5) # _LRScheduler (originally) designed for epoch-wise scheduling; incremented BEFORE get_lr() is called and initialized to -1 # XXX: get_lr() in torch.optim (1.5) tend to continuously update param_group['lr'] instead of using base_lrs # possibly related to chainable lr schedulers # XXX: # had a more advanced version, but didn't seem to work properly (didn't debug) # improvements/differences # - just pass source model, create internal target model via deep_copy() # - internal step counter, instead of passing step counter on step() for warmup # - state_dict(), load_state_dict() to store/load internal target model and step counter # - optional device argument, so EMA (target) model can be kept on CPU model rather than using up GPU model # - optional list of excluded parameter names (e.g. don't use up compute decaying, some big constant tensors stored in state_dict using register_buffer()) # - switching target model to eval mode (assuming it will only ever be used for inference or keeping track of EMA weights, not training directly) # - set requires_grad_(False) on all parameters of internal target model (not just make operations have no gradients, but the actual parameters themselves) # pass step_idx if warup_steps > 0 # XXX: need to pass it explicitly, because EMA is state-less and thus cannot have internal step counter which # is properly stored/loaded from checkpoint # no EMA, use source parameters directly | 2.092524 | 2 |
notebooks/final_recommendationdata_creation.py | TanveerSingh09/bhavitus-main | 0 | 6623267 | # -*- coding: utf-8 -*-
"""Final_recommendationdata_creation.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1jFSjmRyZAPq8YDYfO4vLnzoG9QUVQZdB
# Creating final data for crop and fertilizer recommendation system
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
fertilizer_data_path = '../Data-raw/FertilizerData.csv'
merge_fert = pd.read_csv(fertilizer_data_path)
merge_fert.head()
del merge_fert['Unnamed: 0']
merge_fert.describe()
merge_fert['Crop'].unique()
plt.plot(merge_fert["N"])
plt.plot(merge_fert["P"])
plt.plot(merge_fert["K"])
sns.heatmap(merge_fert.corr(),annot=True)
merge_crop = pd.read_csv('../Data-raw/MergeFileCrop.csv')
reco_fert = merge_fert
#Add +/-3 for every NPK value
import random
temp = pd.DataFrame(columns = ['N','P','K'])
for i in range(0,merge_crop.shape[0]):
crop = merge_crop.label.iloc[i]
#print(crop)
N = reco_fert[reco_fert['Crop'] == crop]["N"].iloc[0] + random.randint(-20,20)
P = reco_fert[reco_fert['Crop'] == crop]["P"].iloc[0] + random.randint(-5,20)
K = reco_fert[reco_fert['Crop'] == crop]["K"].iloc[0] + random.randint(-5,5)
d = {"N":N,"P":P,"K":K}
#print(d)
temp = temp.append(d,ignore_index = True)
temp
merge_crop['N'] = temp['N']
merge_crop['P'] = temp['P']
merge_crop['K'] = temp['K']
merge_crop
del merge_crop['Unnamed: 0']
merge_crop
merge_crop = merge_crop[[ 'N', 'P', 'K','temperature', 'humidity', 'ph', 'rainfall', 'label']]
merge_crop.to_csv("../Data-processed/crop_recommendation.csv",index=False)
# Checking if everything went fine
df = pd.read_csv('../Data-processed/crop_recommendation.csv')
df.head()
df.shape | # -*- coding: utf-8 -*-
"""Final_recommendationdata_creation.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1jFSjmRyZAPq8YDYfO4vLnzoG9QUVQZdB
# Creating final data for crop and fertilizer recommendation system
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
fertilizer_data_path = '../Data-raw/FertilizerData.csv'
merge_fert = pd.read_csv(fertilizer_data_path)
merge_fert.head()
del merge_fert['Unnamed: 0']
merge_fert.describe()
merge_fert['Crop'].unique()
plt.plot(merge_fert["N"])
plt.plot(merge_fert["P"])
plt.plot(merge_fert["K"])
sns.heatmap(merge_fert.corr(),annot=True)
merge_crop = pd.read_csv('../Data-raw/MergeFileCrop.csv')
reco_fert = merge_fert
#Add +/-3 for every NPK value
import random
temp = pd.DataFrame(columns = ['N','P','K'])
for i in range(0,merge_crop.shape[0]):
crop = merge_crop.label.iloc[i]
#print(crop)
N = reco_fert[reco_fert['Crop'] == crop]["N"].iloc[0] + random.randint(-20,20)
P = reco_fert[reco_fert['Crop'] == crop]["P"].iloc[0] + random.randint(-5,20)
K = reco_fert[reco_fert['Crop'] == crop]["K"].iloc[0] + random.randint(-5,5)
d = {"N":N,"P":P,"K":K}
#print(d)
temp = temp.append(d,ignore_index = True)
temp
merge_crop['N'] = temp['N']
merge_crop['P'] = temp['P']
merge_crop['K'] = temp['K']
merge_crop
del merge_crop['Unnamed: 0']
merge_crop
merge_crop = merge_crop[[ 'N', 'P', 'K','temperature', 'humidity', 'ph', 'rainfall', 'label']]
merge_crop.to_csv("../Data-processed/crop_recommendation.csv",index=False)
# Checking if everything went fine
df = pd.read_csv('../Data-processed/crop_recommendation.csv')
df.head()
df.shape | en | 0.781328 | # -*- coding: utf-8 -*- Final_recommendationdata_creation.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1jFSjmRyZAPq8YDYfO4vLnzoG9QUVQZdB # Creating final data for crop and fertilizer recommendation system #Add +/-3 for every NPK value #print(crop) #print(d) # Checking if everything went fine | 2.652254 | 3 |
zwog/__init__.py | tare/zwog | 2 | 6623268 | <filename>zwog/__init__.py
"""zwog."""
from .utils import ZWOG
__all__ = ['ZWOG', ]
__author__ = '<NAME>'
__version__ = '0.0.1'
__license__ = 'BSD 3-Clause "New" or "Revised" License'
| <filename>zwog/__init__.py
"""zwog."""
from .utils import ZWOG
__all__ = ['ZWOG', ]
__author__ = '<NAME>'
__version__ = '0.0.1'
__license__ = 'BSD 3-Clause "New" or "Revised" License'
| none | 1 | 1.089386 | 1 | |
AoC/2020/05/pt2.py | N-l1/dmoj | 0 | 6623269 | <reponame>N-l1/dmoj<filename>AoC/2020/05/pt2.py
# Find this puzzle at:
# https://adventofcode.com/2020/day/5
with open('input.txt', 'r') as file:
puzzle_input = file.read().splitlines()
seat_ids = []
for line in puzzle_input:
col, row = list(range(8)), list(range(128))
# Slices the existing row or column in half depending on character
for character in line:
if character == 'F':
row = row[:len(row)//2]
elif character == 'B':
row = row[len(row)//2:]
if character == 'L':
col = col[:len(col)//2]
elif character == 'R':
col = col[len(col)//2:]
seat_id = row[0] * 8 + col[0]
seat_ids.append(seat_id)
# Find the missing ID in the list of IDs
for idx, seat in enumerate(sorted(seat_ids)):
if seat + 2 == sorted(seat_ids)[idx + 1]:
print(seat + 1)
break
| # Find this puzzle at:
# https://adventofcode.com/2020/day/5
with open('input.txt', 'r') as file:
puzzle_input = file.read().splitlines()
seat_ids = []
for line in puzzle_input:
col, row = list(range(8)), list(range(128))
# Slices the existing row or column in half depending on character
for character in line:
if character == 'F':
row = row[:len(row)//2]
elif character == 'B':
row = row[len(row)//2:]
if character == 'L':
col = col[:len(col)//2]
elif character == 'R':
col = col[len(col)//2:]
seat_id = row[0] * 8 + col[0]
seat_ids.append(seat_id)
# Find the missing ID in the list of IDs
for idx, seat in enumerate(sorted(seat_ids)):
if seat + 2 == sorted(seat_ids)[idx + 1]:
print(seat + 1)
break | en | 0.753325 | # Find this puzzle at: # https://adventofcode.com/2020/day/5 # Slices the existing row or column in half depending on character # Find the missing ID in the list of IDs | 3.639366 | 4 |
freemiumSpotify/FileHandler.py | alkislardeniz/freemium-spotify | 2 | 6623270 | import pickle
from pathlib import Path
class FileHandler:
@staticmethod
def save_obj(obj, file_name):
with open(file_name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load_obj(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
@staticmethod
def create_download_directory(path):
p = Path(path)
p.mkdir(exist_ok=True)
| import pickle
from pathlib import Path
class FileHandler:
@staticmethod
def save_obj(obj, file_name):
with open(file_name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load_obj(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
@staticmethod
def create_download_directory(path):
p = Path(path)
p.mkdir(exist_ok=True)
| none | 1 | 3.152429 | 3 | |
engine.py | MattJTrueblood/Allies_RL_Prototype | 1 | 6623271 | import tcod
import constants
import core
from view import renderer
from view.frame import Frame
from view.panel import Panel
from controllers.game_panel_controller import GamePanelController
def main():
key_event = tcod.Key()
mouse_event = tcod.Mouse()
#Build start menu
start_frame = Frame("start frame")
start_panel = Panel(0, 0, constants.CONSOLE_WIDTH, constants.CONSOLE_HEIGHT)
start_panel.set_controller(GamePanelController())
start_frame.add_panel(start_panel)
core.add_frame(start_frame)
core.set_current_frame("start frame")
#Main Loop
while not tcod.console_is_window_closed():
#Get input
tcod.sys_check_for_event(tcod.EVENT_KEY_PRESS, key_event, mouse_event)
if key_event.vk == tcod.KEY_ESCAPE:
return True;
else:
core.current_frame.receive_events(key_event, mouse_event)
core.current_frame.update()
renderer.render()
if __name__ == '__main__':
main()
| import tcod
import constants
import core
from view import renderer
from view.frame import Frame
from view.panel import Panel
from controllers.game_panel_controller import GamePanelController
def main():
key_event = tcod.Key()
mouse_event = tcod.Mouse()
#Build start menu
start_frame = Frame("start frame")
start_panel = Panel(0, 0, constants.CONSOLE_WIDTH, constants.CONSOLE_HEIGHT)
start_panel.set_controller(GamePanelController())
start_frame.add_panel(start_panel)
core.add_frame(start_frame)
core.set_current_frame("start frame")
#Main Loop
while not tcod.console_is_window_closed():
#Get input
tcod.sys_check_for_event(tcod.EVENT_KEY_PRESS, key_event, mouse_event)
if key_event.vk == tcod.KEY_ESCAPE:
return True;
else:
core.current_frame.receive_events(key_event, mouse_event)
core.current_frame.update()
renderer.render()
if __name__ == '__main__':
main()
| en | 0.405687 | #Build start menu #Main Loop #Get input | 2.440738 | 2 |
code/rosws/src/ros_demonstration/src/utility/config.py | Ohara124c41/PlanEx | 0 | 6623272 | #!/usr/bin/python
# Configuration file to tweak parameters
group_name = "Group_D"
# namespaces for different system units
lidar_driver_ns = '/lidar'
motor_driver_ns = '/motor'
imu_driver_ns = '/imu'
mag_driver_ns = '/mag'
movement_controller_ns = '/movement'
# --- movement ---
# once we are this distance in the vicinity of the desired position (x,y,z), we declare us arrived at the position (but not pose)
arrived_at_position_threshold = 0.005 # [m]
# once the difference of our actual and the desired orientation is less than the threshold, we declare us arrived at the orientation
arrived_at_orientation_threshold = 5 # [degrees]
# if our heading to drive straight to the current heading is wrong by more than the threshold we will stop and only turn without linear motion
correct_heading_with_turn_only_threshold = 10 # [degrees] | #!/usr/bin/python
# Configuration file to tweak parameters
group_name = "Group_D"
# namespaces for different system units
lidar_driver_ns = '/lidar'
motor_driver_ns = '/motor'
imu_driver_ns = '/imu'
mag_driver_ns = '/mag'
movement_controller_ns = '/movement'
# --- movement ---
# once we are this distance in the vicinity of the desired position (x,y,z), we declare us arrived at the position (but not pose)
arrived_at_position_threshold = 0.005 # [m]
# once the difference of our actual and the desired orientation is less than the threshold, we declare us arrived at the orientation
arrived_at_orientation_threshold = 5 # [degrees]
# if our heading to drive straight to the current heading is wrong by more than the threshold we will stop and only turn without linear motion
correct_heading_with_turn_only_threshold = 10 # [degrees] | en | 0.857544 | #!/usr/bin/python # Configuration file to tweak parameters # namespaces for different system units # --- movement --- # once we are this distance in the vicinity of the desired position (x,y,z), we declare us arrived at the position (but not pose) # [m] # once the difference of our actual and the desired orientation is less than the threshold, we declare us arrived at the orientation # [degrees] # if our heading to drive straight to the current heading is wrong by more than the threshold we will stop and only turn without linear motion # [degrees] | 2.414915 | 2 |
intro/try_and_exception.py | RobertoRosa7/python | 0 | 6623273 | <reponame>RobertoRosa7/python<filename>intro/try_and_exception.py<gh_stars>0
# -*- coding: utf-8 -*-
# def dividePorDois(dois):
# # usando try and execpt trata do error mas não interrompe a execução do script
# try:
# return 32 / dois
# except ZeroDivisionError:
# print('Error: you tried to divide by zero')
# print(dividePorDois(2))
# print(dividePorDois(12))
# print(dividePorDois(0))
# print(dividePorDois(0))
# print(dividePorDois(3))
# Quanto gatos vc tem?
print('How many cats do you have?')
numCats = input()
try:
if int(numCats) >= 4:
print('That is a lot cats')
else:
print('That is not a lot cats')
except ValueError:
print('Not a number')
| # -*- coding: utf-8 -*-
# def dividePorDois(dois):
# # usando try and execpt trata do error mas não interrompe a execução do script
# try:
# return 32 / dois
# except ZeroDivisionError:
# print('Error: you tried to divide by zero')
# print(dividePorDois(2))
# print(dividePorDois(12))
# print(dividePorDois(0))
# print(dividePorDois(0))
# print(dividePorDois(3))
# Quanto gatos vc tem?
print('How many cats do you have?')
numCats = input()
try:
if int(numCats) >= 4:
print('That is a lot cats')
else:
print('That is not a lot cats')
except ValueError:
print('Not a number') | pt | 0.428414 | # -*- coding: utf-8 -*- # def dividePorDois(dois): # # usando try and execpt trata do error mas não interrompe a execução do script # try: # return 32 / dois # except ZeroDivisionError: # print('Error: you tried to divide by zero') # print(dividePorDois(2)) # print(dividePorDois(12)) # print(dividePorDois(0)) # print(dividePorDois(0)) # print(dividePorDois(3)) # Quanto gatos vc tem? | 3.776446 | 4 |
gs/group/messages/add/smtp2gs/xverp.py | groupserver/gs.group.messages.add.smtp2gs | 0 | 6623274 | # -*- coding: utf-8 -*-
'''XVERP handling.'''
############################################################################
#
# Copyright © 2014, 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals
import re
from .servercomms import add_bounce
#: The regular expression for matching an XVERP address. They look like
#: listId+userMailbox=user.domain@this.server
XVERP_RE = re.compile('(.*?)\+(.*?)\=(.*?)\@(.*)')
def is_an_xverp_bounce(toAddress):
'''Test if an address in an XVERP bounce.
:param str toAddress: The address to check.
:return: ``True`` if the address is an XVERP bounce.
:rtype: bool'''
result = XVERP_RE.search(toAddress) if toAddress else None
retval = bool(result) and (len(result.groups()) == 4)
assert type(retval) == bool
return retval
def handle_bounce(netloc, usessl, toAddress, token):
'''Record that an XVERP bounce has occurred.
:param str netloc: The host-name of the GroupServer site (can have a
``:port``).
:param bool usessl: ``True`` if TLS should be used with communicating with
GroupServer.
:param str toAddress: The address that is bouncing.
:param str token: The token used to authenticate with GroupServer.
:return: Nothing.
The ``toAddress`` is decomposed to the email address of the person whose
inbox is bouncing, and this addresses is used to record the bounce.
'''
groups = XVERP_RE.search(toAddress).groups()
# <EMAIL>
listAddress = '@'.join((groups[0], groups[3]))
# <EMAIL>
userAddress = '@'.join((groups[1], groups[2]))
add_bounce(netloc, usessl, userAddress, listAddress, token)
| # -*- coding: utf-8 -*-
'''XVERP handling.'''
############################################################################
#
# Copyright © 2014, 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals
import re
from .servercomms import add_bounce
#: The regular expression for matching an XVERP address. They look like
#: listId+userMailbox=user.domain@this.server
XVERP_RE = re.compile('(.*?)\+(.*?)\=(.*?)\@(.*)')
def is_an_xverp_bounce(toAddress):
'''Test if an address in an XVERP bounce.
:param str toAddress: The address to check.
:return: ``True`` if the address is an XVERP bounce.
:rtype: bool'''
result = XVERP_RE.search(toAddress) if toAddress else None
retval = bool(result) and (len(result.groups()) == 4)
assert type(retval) == bool
return retval
def handle_bounce(netloc, usessl, toAddress, token):
'''Record that an XVERP bounce has occurred.
:param str netloc: The host-name of the GroupServer site (can have a
``:port``).
:param bool usessl: ``True`` if TLS should be used with communicating with
GroupServer.
:param str toAddress: The address that is bouncing.
:param str token: The token used to authenticate with GroupServer.
:return: Nothing.
The ``toAddress`` is decomposed to the email address of the person whose
inbox is bouncing, and this addresses is used to record the bounce.
'''
groups = XVERP_RE.search(toAddress).groups()
# <EMAIL>
listAddress = '@'.join((groups[0], groups[3]))
# <EMAIL>
userAddress = '@'.join((groups[1], groups[2]))
add_bounce(netloc, usessl, userAddress, listAddress, token)
| en | 0.676678 | # -*- coding: utf-8 -*- XVERP handling. ############################################################################ # # Copyright © 2014, 2015 OnlineGroups.net and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################ #: The regular expression for matching an XVERP address. They look like #: listId+userMailbox=user.domain@this.server Test if an address in an XVERP bounce. :param str toAddress: The address to check. :return: ``True`` if the address is an XVERP bounce. :rtype: bool Record that an XVERP bounce has occurred. :param str netloc: The host-name of the GroupServer site (can have a ``:port``). :param bool usessl: ``True`` if TLS should be used with communicating with GroupServer. :param str toAddress: The address that is bouncing. :param str token: The token used to authenticate with GroupServer. :return: Nothing. The ``toAddress`` is decomposed to the email address of the person whose inbox is bouncing, and this addresses is used to record the bounce. # <EMAIL> # <EMAIL> | 2.11345 | 2 |
InvenTree/company/migrations/0004_company_url.py | morelale/Inventory-G52 | 1 | 6623275 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-24 08:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0003_auto_20180423_1117'),
]
operations = [
migrations.AddField(
model_name='company',
name='URL',
field=models.URLField(blank=True, help_text='Link to external company information'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-24 08:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0003_auto_20180423_1117'),
]
operations = [
migrations.AddField(
model_name='company',
name='URL',
field=models.URLField(blank=True, help_text='Link to external company information'),
),
]
| en | 0.758063 | # -*- coding: utf-8 -*- # Generated by Django 1.11.12 on 2018-04-24 08:01 | 1.47785 | 1 |
prob.py | elkd/deriv | 4 | 6623276 | import asyncio
async def check_stop(session, start_balance, stake):
'''
Check wether to continue playing or not
The rule for this function is that it shouldn't be run often
Only when the Balance is approaching the stop loss/profit,
Then this function should be called more often
'''
bl = await session.page.locator("#header__acc-balance").inner_text()
if bl:
cur_balance = float(bl.split()[0].replace(',',''))
stop_est = cur_balance - session.start_balance
if stop_est > float(session.stop_profit) or abs(stop_est) > float(session.stop_loss):
session.loop = False
return 'STOP'
| import asyncio
async def check_stop(session, start_balance, stake):
'''
Check wether to continue playing or not
The rule for this function is that it shouldn't be run often
Only when the Balance is approaching the stop loss/profit,
Then this function should be called more often
'''
bl = await session.page.locator("#header__acc-balance").inner_text()
if bl:
cur_balance = float(bl.split()[0].replace(',',''))
stop_est = cur_balance - session.start_balance
if stop_est > float(session.stop_profit) or abs(stop_est) > float(session.stop_loss):
session.loop = False
return 'STOP'
| en | 0.96736 | Check wether to continue playing or not The rule for this function is that it shouldn't be run often Only when the Balance is approaching the stop loss/profit, Then this function should be called more often | 2.797022 | 3 |
distributed/mondrian/evaluation.py | stegianna/mondrian | 10 | 6623277 | <reponame>stegianna/mondrian<filename>distributed/mondrian/evaluation.py
# Copyright 2020 <NAME> (https://seclab.unibg.it)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Functions to evaluate the information loss
def evaluate_information_loss(adf, udf):
"""Run the PandasUDF on fragments and aggregate the output."""
penalties = adf.groupby('fragment').applyInPandas(udf.func, udf.returnType)
penalty = penalties.toPandas().sum()
return penalty['information_loss']
def extract_span(aggregation, column_name=None, quasiid_gnrlz=None):
if column_name and quasiid_gnrlz[column_name][
'generalization_type'] == 'categorical':
# count the leaves of the subtree originated by the categorical values
subtree = quasiid_gnrlz[column_name]['taxonomy_tree'].subtree(
aggregation)
leaves = len(subtree.leaves())
return leaves if leaves > 1 else 0
if column_name and quasiid_gnrlz[column_name][
'generalization_type'] == 'common_prefix':
# if the string was generalized return 1 else 0
hm = quasiid_gnrlz[column_name]['params']['hide-mark']
if hm in aggregation:
return int(aggregation[aggregation.index("[") + 1:-1])
else:
return 0
return 1 if hm in aggregation else 0
if aggregation.startswith('[') and (aggregation.endswith(']')
or aggregation.endswith(')')):
low, high = map(float, aggregation[1:-1].split('-'))
return high - low
if aggregation.startswith('{') and aggregation.endswith('}'):
return aggregation[1:-1].count(',') + 1
return 0
def normalized_certainty_penalty(adf,
quasiid_columns,
quasiid_range,
quasiid_gnrlz=None):
# compute dataset-level range on the quasi-identifiers columns
partitions = adf.groupby(quasiid_columns)
ncp = 0
for _, partition in partitions:
# work on a single row, each row has the same value of the
# quasi-identifiers
row = partition.iloc[0]
rncp = 0
for column_idx, column in enumerate(quasiid_columns):
if quasiid_gnrlz and column in quasiid_gnrlz:
rncp += extract_span(row[column], column,
quasiid_gnrlz) / quasiid_range[column_idx]
else:
rncp += extract_span(row[column]) / quasiid_range[column_idx]
rncp *= len(partition)
ncp += rncp
return ncp
def discernability_penalty(adf, quasiid_columns):
"""Compute Discernability Penalty (DP)."""
sizes = adf.groupby(quasiid_columns).size()
dp = 0
for size in sizes:
dp += size**2
return dp
| # Copyright 2020 <NAME> (https://seclab.unibg.it)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Functions to evaluate the information loss
def evaluate_information_loss(adf, udf):
"""Run the PandasUDF on fragments and aggregate the output."""
penalties = adf.groupby('fragment').applyInPandas(udf.func, udf.returnType)
penalty = penalties.toPandas().sum()
return penalty['information_loss']
def extract_span(aggregation, column_name=None, quasiid_gnrlz=None):
if column_name and quasiid_gnrlz[column_name][
'generalization_type'] == 'categorical':
# count the leaves of the subtree originated by the categorical values
subtree = quasiid_gnrlz[column_name]['taxonomy_tree'].subtree(
aggregation)
leaves = len(subtree.leaves())
return leaves if leaves > 1 else 0
if column_name and quasiid_gnrlz[column_name][
'generalization_type'] == 'common_prefix':
# if the string was generalized return 1 else 0
hm = quasiid_gnrlz[column_name]['params']['hide-mark']
if hm in aggregation:
return int(aggregation[aggregation.index("[") + 1:-1])
else:
return 0
return 1 if hm in aggregation else 0
if aggregation.startswith('[') and (aggregation.endswith(']')
or aggregation.endswith(')')):
low, high = map(float, aggregation[1:-1].split('-'))
return high - low
if aggregation.startswith('{') and aggregation.endswith('}'):
return aggregation[1:-1].count(',') + 1
return 0
def normalized_certainty_penalty(adf,
quasiid_columns,
quasiid_range,
quasiid_gnrlz=None):
# compute dataset-level range on the quasi-identifiers columns
partitions = adf.groupby(quasiid_columns)
ncp = 0
for _, partition in partitions:
# work on a single row, each row has the same value of the
# quasi-identifiers
row = partition.iloc[0]
rncp = 0
for column_idx, column in enumerate(quasiid_columns):
if quasiid_gnrlz and column in quasiid_gnrlz:
rncp += extract_span(row[column], column,
quasiid_gnrlz) / quasiid_range[column_idx]
else:
rncp += extract_span(row[column]) / quasiid_range[column_idx]
rncp *= len(partition)
ncp += rncp
return ncp
def discernability_penalty(adf, quasiid_columns):
"""Compute Discernability Penalty (DP)."""
sizes = adf.groupby(quasiid_columns).size()
dp = 0
for size in sizes:
dp += size**2
return dp | en | 0.820295 | # Copyright 2020 <NAME> (https://seclab.unibg.it) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Functions to evaluate the information loss Run the PandasUDF on fragments and aggregate the output. # count the leaves of the subtree originated by the categorical values # if the string was generalized return 1 else 0 # compute dataset-level range on the quasi-identifiers columns # work on a single row, each row has the same value of the # quasi-identifiers Compute Discernability Penalty (DP). | 2.161448 | 2 |
src/clims/legacy/integration.py | withrocks/commonlims | 0 | 6623278 | <filename>src/clims/legacy/integration.py<gh_stars>0
from __future__ import absolute_import, print_function
import os
import shutil
import logging
import importlib
import pkgutil
from driverfile import DriverFileIntegrationTests
from clims.legacy.extensions import NoTestsFoundException
logger = logging.getLogger(__name__)
# Creates an integration test config file based on convention
# i.e. position and contents of the script classes themselves.
class ConfigFromConventionProvider(object):
@classmethod
def _enumerate_modules(cls, root_name):
root = importlib.import_module(root_name)
for loader, module_name, is_pkg in pkgutil.walk_packages(root.__path__):
try:
module = loader.find_module(module_name).load_module(module_name)
except SyntaxError:
logger.warning("Syntax error in module {}".format(module_name))
except ImportError:
logger.warning("ImportError in module {}".format(module_name))
yield module
@classmethod
def _enumerate_extensions(cls, root_pkg):
for module in cls._enumerate_modules(root_pkg):
if hasattr(module, "Extension"):
yield module
@classmethod
def get_extension_config(cls, root_pkg):
for extension in cls._enumerate_extensions(root_pkg):
# NOTE: For some reason, the root does not get added to the enumerated modules
entry = dict()
entry["module"] = "{}.{}".format(root_pkg, extension.__name__)
yield entry
class IntegrationTestService(object):
CACHE_NAME = "test_run_cache"
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.CACHE_FULL_NAME = "{}.sqlite".format(self.CACHE_NAME)
@staticmethod
def _test_run_directory(config_entry, pid):
return os.path.join(".", "runs", config_entry["name"], pid, "test-run")
@staticmethod
def _test_frozen_directory(config_entry, pid):
return os.path.join(".", "runs", config_entry["name"], pid, "test-frozen")
def _validate_run(self, entry):
if entry["cmd"] == "driverfile":
test_provider = DriverFileIntegrationTests()
for test in entry["tests"]:
run_path = self._test_run_directory(entry, test["pid"])
frozen_path = self._test_frozen_directory(entry, test["pid"])
test_provider.validate(run_path, frozen_path, test)
def _freeze_test(self, entry, test):
source = self._test_run_directory(entry, test["pid"])
if not os.path.exists(source):
raise FreezingBeforeRunning()
target = self._test_frozen_directory(entry, test["pid"])
print("Freezing test {} => {}".format(source, target)) # noqa: B314
if os.path.exists(target):
print("Target already exists, removing it") # noqa: B314
shutil.rmtree(False)
shutil.copytree(source, target)
def validate(self, module, config):
"""
Runs the tests on the frozen tests. The idea is that this should run (at least) on every official build,
thus validating every script against a known state
:param config:
:return:
"""
from clims.legacy.extensions import ExtensionService
extension_svc = ExtensionService(lambda _: None)
config_obj = ConfigFromConventionProvider.get_extension_config(module)
exception_count = 0
for entry in config_obj:
module = entry["module"]
try:
extension_svc.run_test(config, None, module, False, True, True)
print("- {}: SUCCESS".format(module)) # noqa: B314
except NoTestsFoundException:
print("- {}: WARNING - No tests were found".format(module)) # noqa: B314
except Exception as e:
# It's OK to use a catch-all exception handler here since this is only used while
# running tests, so we want to be optimistic and try to run all tests:
print("- {}: ERROR - {}".format(module, e.message)) # noqa: B314
print(" Fresh run: legacy-ext extension {} test-fresh".format(module)) # noqa: B314
print(" Review, then: legacy-ext extension {} freeze".format(module)) # noqa: B314
exception_count += 1
return exception_count
class FreezingBeforeRunning(Exception):
"""Thrown when the user tries to freeze a state before doing an initial run"""
pass
| <filename>src/clims/legacy/integration.py<gh_stars>0
from __future__ import absolute_import, print_function
import os
import shutil
import logging
import importlib
import pkgutil
from driverfile import DriverFileIntegrationTests
from clims.legacy.extensions import NoTestsFoundException
logger = logging.getLogger(__name__)
# Creates an integration test config file based on convention
# i.e. position and contents of the script classes themselves.
class ConfigFromConventionProvider(object):
@classmethod
def _enumerate_modules(cls, root_name):
root = importlib.import_module(root_name)
for loader, module_name, is_pkg in pkgutil.walk_packages(root.__path__):
try:
module = loader.find_module(module_name).load_module(module_name)
except SyntaxError:
logger.warning("Syntax error in module {}".format(module_name))
except ImportError:
logger.warning("ImportError in module {}".format(module_name))
yield module
@classmethod
def _enumerate_extensions(cls, root_pkg):
for module in cls._enumerate_modules(root_pkg):
if hasattr(module, "Extension"):
yield module
@classmethod
def get_extension_config(cls, root_pkg):
for extension in cls._enumerate_extensions(root_pkg):
# NOTE: For some reason, the root does not get added to the enumerated modules
entry = dict()
entry["module"] = "{}.{}".format(root_pkg, extension.__name__)
yield entry
class IntegrationTestService(object):
CACHE_NAME = "test_run_cache"
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.CACHE_FULL_NAME = "{}.sqlite".format(self.CACHE_NAME)
@staticmethod
def _test_run_directory(config_entry, pid):
return os.path.join(".", "runs", config_entry["name"], pid, "test-run")
@staticmethod
def _test_frozen_directory(config_entry, pid):
return os.path.join(".", "runs", config_entry["name"], pid, "test-frozen")
def _validate_run(self, entry):
if entry["cmd"] == "driverfile":
test_provider = DriverFileIntegrationTests()
for test in entry["tests"]:
run_path = self._test_run_directory(entry, test["pid"])
frozen_path = self._test_frozen_directory(entry, test["pid"])
test_provider.validate(run_path, frozen_path, test)
def _freeze_test(self, entry, test):
source = self._test_run_directory(entry, test["pid"])
if not os.path.exists(source):
raise FreezingBeforeRunning()
target = self._test_frozen_directory(entry, test["pid"])
print("Freezing test {} => {}".format(source, target)) # noqa: B314
if os.path.exists(target):
print("Target already exists, removing it") # noqa: B314
shutil.rmtree(False)
shutil.copytree(source, target)
def validate(self, module, config):
"""
Runs the tests on the frozen tests. The idea is that this should run (at least) on every official build,
thus validating every script against a known state
:param config:
:return:
"""
from clims.legacy.extensions import ExtensionService
extension_svc = ExtensionService(lambda _: None)
config_obj = ConfigFromConventionProvider.get_extension_config(module)
exception_count = 0
for entry in config_obj:
module = entry["module"]
try:
extension_svc.run_test(config, None, module, False, True, True)
print("- {}: SUCCESS".format(module)) # noqa: B314
except NoTestsFoundException:
print("- {}: WARNING - No tests were found".format(module)) # noqa: B314
except Exception as e:
# It's OK to use a catch-all exception handler here since this is only used while
# running tests, so we want to be optimistic and try to run all tests:
print("- {}: ERROR - {}".format(module, e.message)) # noqa: B314
print(" Fresh run: legacy-ext extension {} test-fresh".format(module)) # noqa: B314
print(" Review, then: legacy-ext extension {} freeze".format(module)) # noqa: B314
exception_count += 1
return exception_count
class FreezingBeforeRunning(Exception):
"""Thrown when the user tries to freeze a state before doing an initial run"""
pass
| en | 0.893147 | # Creates an integration test config file based on convention # i.e. position and contents of the script classes themselves. # NOTE: For some reason, the root does not get added to the enumerated modules # noqa: B314 # noqa: B314 Runs the tests on the frozen tests. The idea is that this should run (at least) on every official build, thus validating every script against a known state :param config: :return: # noqa: B314 # noqa: B314 # It's OK to use a catch-all exception handler here since this is only used while # running tests, so we want to be optimistic and try to run all tests: # noqa: B314 # noqa: B314 # noqa: B314 Thrown when the user tries to freeze a state before doing an initial run | 1.99053 | 2 |
lab2/T5/main.py | bronemos/design-patterns | 0 | 6623279 | from copy import deepcopy
from typing import Callable
import datetime
class Izvor:
def next(self):
raise NotImplementedError
def attach_observer(self):
raise NotImplementedError
def detach_observer(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
class TargetClass:
def get_state(self):
raise NotImplementedError
class Promatrac:
def __init__(self, source: TargetClass, action: Callable):
self.__source = source
self.__action = action
def update(self):
return self.__action(self.__source.get_state())
def write_to_file(path, data):
with open(path, 'w') as f:
f.write(f'{datetime.datetime.now()},{data}')
def calculate_sum(sequence):
print(f'Sum: {sum(sequence)}')
def calcualte_mean(sequence):
print(f'Mean: {sum(sequence) / len(sequence)}')
def calculate_median(sequence):
index = int(len(sequence) / 2)
median = float(sequence[index] + sequence[index + 1])/2
print(f'Median: {median}')
class TipkovnickiIzvor(Izvor):
def next(self):
return int(input())
class DatotecniIzvor(Izvor):
def __init__(self, path):
self.__current_index = -1
with open(path) as f:
self.__lines = [int(x) for x in f.readlines()]
self.__lines.append(-1)
def next(self):
self.__current_index += 1
return self.__lines[self.__current_index]
class SlijedBrojeva(TargetClass):
def __init__(self, source: Izvor):
self.__sequence = list()
self.__source = source
self.__observers = list()
def get_state(self):
return deepcopy(self.__sequence)
def kreni(self):
while (number := self.__source.next()) != -1:
print(number)
self.update()
print('Terminating')
def attach_observer(self, observer: Promatrac):
self.__observers.append(observer)
def detach_observer(self, observer: Promatrac):
self.__observers.remove(observer)
def update(self):
for observer in self.__observers:
observer.update()
def main():
sequence = SlijedBrojeva(TipkovnickiIzvor())
sequence.kreni()
if __name__ == '__main__':
main()
| from copy import deepcopy
from typing import Callable
import datetime
class Izvor:
def next(self):
raise NotImplementedError
def attach_observer(self):
raise NotImplementedError
def detach_observer(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
class TargetClass:
def get_state(self):
raise NotImplementedError
class Promatrac:
def __init__(self, source: TargetClass, action: Callable):
self.__source = source
self.__action = action
def update(self):
return self.__action(self.__source.get_state())
def write_to_file(path, data):
with open(path, 'w') as f:
f.write(f'{datetime.datetime.now()},{data}')
def calculate_sum(sequence):
print(f'Sum: {sum(sequence)}')
def calcualte_mean(sequence):
print(f'Mean: {sum(sequence) / len(sequence)}')
def calculate_median(sequence):
index = int(len(sequence) / 2)
median = float(sequence[index] + sequence[index + 1])/2
print(f'Median: {median}')
class TipkovnickiIzvor(Izvor):
def next(self):
return int(input())
class DatotecniIzvor(Izvor):
def __init__(self, path):
self.__current_index = -1
with open(path) as f:
self.__lines = [int(x) for x in f.readlines()]
self.__lines.append(-1)
def next(self):
self.__current_index += 1
return self.__lines[self.__current_index]
class SlijedBrojeva(TargetClass):
def __init__(self, source: Izvor):
self.__sequence = list()
self.__source = source
self.__observers = list()
def get_state(self):
return deepcopy(self.__sequence)
def kreni(self):
while (number := self.__source.next()) != -1:
print(number)
self.update()
print('Terminating')
def attach_observer(self, observer: Promatrac):
self.__observers.append(observer)
def detach_observer(self, observer: Promatrac):
self.__observers.remove(observer)
def update(self):
for observer in self.__observers:
observer.update()
def main():
sequence = SlijedBrojeva(TipkovnickiIzvor())
sequence.kreni()
if __name__ == '__main__':
main()
| none | 1 | 2.869272 | 3 | |
basecrm/test/test_contacts_service.py | seedinvest/basecrm-python | 19 | 6623280 | <gh_stars>10-100
import unittest
import munch
import basecrm
from basecrm.test.testutils import BaseTestCase
class ContactsServiceTests(BaseTestCase):
def test_service_property_exists(self):
self.assertTrue(hasattr(self.client, 'contacts'))
def test_method_list_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'list') and callable(getattr(self.client.contacts, 'list')))
def test_method_create_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'create') and callable(getattr(self.client.contacts, 'create')))
def test_method_retrieve_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'retrieve') and callable(getattr(self.client.contacts, 'retrieve')))
def test_method_update_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'update') and callable(getattr(self.client.contacts, 'update')))
def test_method_destroy_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'destroy') and callable(getattr(self.client.contacts, 'destroy')))
def test_list(self):
contacts = self.client.contacts.list(page=1)
self.assertIsInstance(contacts, list)
for contact in contacts:
self.assertIsInstance(contact, munch.Munch)
def test_create(self):
self.assertIsInstance(self.contact, munch.Munch)
self.assertGreaterEqual(len(self.contact), 1)
def test_retrieve(self):
found_contact = self.client.contacts.retrieve(self.contact.id);
self.assertIsInstance(found_contact, munch.Munch);
self.assertEqual(found_contact.id, self.contact.id);
def test_update(self):
updated_contact = self.client.contacts.update(self.contact.id, self.contact)
self.assertIsInstance(updated_contact, munch.Munch)
self.assertGreaterEqual(len(updated_contact), 1)
def test_destroy(self):
new_contact = self.create_contact()
self.assertTrue(self.client.contacts.destroy(new_contact.id))
| import unittest
import munch
import basecrm
from basecrm.test.testutils import BaseTestCase
class ContactsServiceTests(BaseTestCase):
def test_service_property_exists(self):
self.assertTrue(hasattr(self.client, 'contacts'))
def test_method_list_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'list') and callable(getattr(self.client.contacts, 'list')))
def test_method_create_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'create') and callable(getattr(self.client.contacts, 'create')))
def test_method_retrieve_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'retrieve') and callable(getattr(self.client.contacts, 'retrieve')))
def test_method_update_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'update') and callable(getattr(self.client.contacts, 'update')))
def test_method_destroy_exists(self):
self.assertTrue(hasattr(self.client.contacts, 'destroy') and callable(getattr(self.client.contacts, 'destroy')))
def test_list(self):
contacts = self.client.contacts.list(page=1)
self.assertIsInstance(contacts, list)
for contact in contacts:
self.assertIsInstance(contact, munch.Munch)
def test_create(self):
self.assertIsInstance(self.contact, munch.Munch)
self.assertGreaterEqual(len(self.contact), 1)
def test_retrieve(self):
found_contact = self.client.contacts.retrieve(self.contact.id);
self.assertIsInstance(found_contact, munch.Munch);
self.assertEqual(found_contact.id, self.contact.id);
def test_update(self):
updated_contact = self.client.contacts.update(self.contact.id, self.contact)
self.assertIsInstance(updated_contact, munch.Munch)
self.assertGreaterEqual(len(updated_contact), 1)
def test_destroy(self):
new_contact = self.create_contact()
self.assertTrue(self.client.contacts.destroy(new_contact.id)) | none | 1 | 2.440259 | 2 | |
Code/Data_Cleaning/pipelines.py | gilnribeiro/Work-Project | 1 | 6623281 | <reponame>gilnribeiro/Work-Project
from pathlib import Path, PureWindowsPath
import pandas as pd
from data_cleaning_functions import *
def main():
main_folder = PureWindowsPath("c:\\Users\\gilnr\\OneDrive - NOVASBE\\Work Project\\Code")
MAIN_FOLDER = Path(main_folder)
DATA_FOLDER = MAIN_FOLDER / "Data"
bons_empregos = pd.read_json(DATA_FOLDER / 'bons_empregos_jobs.json')
career_jet = pd.read_json(DATA_FOLDER / 'career_jet_api.json', lines=True)
carga_de_trabalhos = pd.read_json(DATA_FOLDER / 'CargaDeTrabalhos.json', lines=True)
emprego_xl = pd.read_json(DATA_FOLDER / 'EmpregoXl.json', lines=True)
emprego_org = pd.read_json(DATA_FOLDER / 'EmpregoOrg.json', lines=True)
itjobs = pd.read_json(DATA_FOLDER / 'itjobs_api.json', lines=True)
jooble = pd.read_json(DATA_FOLDER / 'jooble_api.json', lines=True)
landing_jobs = pd.read_json(DATA_FOLDER / 'landingjobs_api.json', lines=True)
net_empregos = pd.read_json(DATA_FOLDER / 'NetEmpregos.json', lines=True)
# Bons Empregos
def getPortugalLocation(dataframe):
# Get only job offers in Portugal
dataframe = dataframe.loc[dataframe['job_location'] != 'Estrangeiro'].copy()
return dataframe
bons_empregos_clean = (bons_empregos.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(getPortugalLocation).
pipe(convertToDatetime, longToShortDate).
pipe(removeDupes)
)
print(f'bons_empregos:\n Previous shape: {bons_empregos.shape}\nCurrent shape:{bons_empregos_clean.shape}\n Removed Duplicates: {len(bons_empregos)-len(bons_empregos_clean)}\n')
# Career Jet
# convert job location to list
career_jet['job_location'] = career_jet['job_location'].apply(lambda x: x.split(','))
career_jet_clean = (career_jet.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date', 'post_date'], dayfirst=True).
pipe(listToRows, 'job_location').
pipe(removeDupes)
)
print(f'career_jet:\nPrevious shape: {career_jet.shape}\nCurrent shape:{career_jet_clean.shape}\n Removed Duplicates: {len(career_jet)-len(career_jet_clean)}\n')
# Carga de Trabalhos
carga_de_trabalhos_clean = (carga_de_trabalhos.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(convertToDatetime, longToShortDate, '/').
pipe(removeDupes)
)
print(f'carga_de_trabalhos:\nPrevious shape: {carga_de_trabalhos.shape}\nCurrent shape:{carga_de_trabalhos_clean.shape}\n Removed Duplicates: {len(carga_de_trabalhos)-len(carga_de_trabalhos_clean)}\n')
# Emprego XL
emprego_xl_clean = (emprego_xl.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(applyFuncToColumn).
pipe(pipeInvertDate).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date', 'post_date'], dayfirst=True).
# # pipe(convertToDatetime, longToShortDate, '/').
pipe(removeDupes)
)
print(f'emprego_xl:\nPrevious shape: {emprego_xl.shape}\nCurrent shape:{emprego_xl_clean.shape}\n Removed Duplicates: {len(emprego_xl)-len(emprego_xl_clean)}\n')
# Emprego Org
emprego_org_clean = (emprego_org.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(postDatePreprocess, '/').
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(toDatetime, ['post_date']).
pipe(removeDupes)
)
print(f'emprego_org:\nPrevious shape: {emprego_org.shape}\nCurrent shape:{emprego_org_clean.shape}\n Removed Duplicates: {len(emprego_org)-len(emprego_org_clean)}\n')
# ITJobs
def simplifyDate(x):
return dt.datetime.strptime(x.split(' ')[0], '%Y-%m-%d')
itjobs_clean = (itjobs.
pipe(copy_df).
pipe(listToRows, 'job_location').
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(applyFuncToColumn, function=simplifyDate, columns_list=['post_date']).
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(toDatetime, ['post_date']).
# pipe(.apply(lambda x: dt.datetime.strftime('%Y-%m-%d'))).
pipe(removeDupes)
)
print(f'itjobs:\nPrevious shape: {itjobs.shape}\nCurrent shape:{itjobs_clean.shape}\n Removed Duplicates: {len(itjobs)-len(itjobs_clean)}\n')
# Jooble
jooble_clean = (jooble.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date', 'post_date'], dayfirst=True).
pipe(removeTags, ['job_title']).
pipe(removeDupes)
)
print(f'jooble:\nPrevious shape: {jooble.shape}\nCurrent shape:{jooble_clean.shape}\n Removed Duplicates: {len(jooble)-len(jooble_clean)}\n')
# Landing Jobs IT
landing_jobs_clean = (landing_jobs.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(postDatePreprocess, 'T').
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(toDatetime, ['post_date']).
# pipe(removeTags, 'job_title').
pipe(removeDupes)
)
print(f'landing_jobs:\nPrevious shape: {landing_jobs.shape}\nCurrent shape:{landing_jobs_clean.shape}\n Removed Duplicates: {len(landing_jobs)-len(landing_jobs_clean)}\n')
# Net Empregos
net_empregos_clean = (net_empregos.
pipe(copy_df).
pipe(replacenan).
pipe(pipeInvertDate).
pipe(postDateFillNa).
pipe(dropNullJobs).
# two pipes are needed beacause - for some reason, the function was not replacing some words it should
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
# pipe(cleanDescription, ['job_title']).
pipe(toDatetime, columns_list=['scrape_date', 'post_date'], dayfirst=True).
pipe(removeDupes)
)
print(f'net_empregos:\nPrevious shape: {net_empregos.shape}\nCurrent shape:{net_empregos_clean.shape}\n Removed Duplicates: {len(net_empregos)-len(net_empregos_clean)}\n')
# Add Website Identifier before concating all dataframes into a single one
jobs_dfs = [bons_empregos_clean, career_jet_clean, carga_de_trabalhos_clean, emprego_xl_clean, emprego_org_clean, itjobs_clean, jooble_clean, landing_jobs_clean, net_empregos_clean]
websites = ['Bons empregos', 'Career Jet', 'Carga de Trabalhos', 'Emprego XL', 'Emprego.org','ITjobs','Jooble','Landing Jobs','Net-empregos']
# Add column with website name
for idx, value in enumerate(jobs_dfs):
value['website'] = websites[idx]
# CONCAT DATAFRAMES AND DEFINE COLUMN ORDER
neworder = ['job_title','job_description','company','job_location','job_category','salary', 'post_date', 'scrape_date','job_href', 'website']
df = pd.concat([i.reindex(columns=neworder) for i in jobs_dfs])
# Validate that the concatenation is happening properly
assert len(df) == sum(len(i) for i in jobs_dfs)
#######################################################
############# Clean the Main Dataframe ################
#######################################################
df_clean = (df.
pipe(copy_df).
pipe(replacenan).sort_values(by='post_date').
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
# pipe(cleanCompany).
pipe(cleanDescription, ['job_title', 'job_description']).
pipe(removeDupes, ['job_title', 'job_description','company', 'job_location'])
)
df_clean.reset_index(drop=True, inplace=True)
print(f'Full_dataset:\nPrevious shape: {df.shape}\nCurrent shape:{df_clean.shape}\n Removed Duplicates: {len(df)-len(df_clean)}\n')
#######################################################
############# Pass the Data Into JSON #################
#######################################################
with open(DATA_FOLDER / 'full_data_clean.json', 'w', encoding='utf-8') as file:
df_clean.to_json(file, force_ascii=False, orient='records', date_format='iso', date_unit='s')
if __name__ == '__main__':
main() | from pathlib import Path, PureWindowsPath
import pandas as pd
from data_cleaning_functions import *
def main():
main_folder = PureWindowsPath("c:\\Users\\gilnr\\OneDrive - NOVASBE\\Work Project\\Code")
MAIN_FOLDER = Path(main_folder)
DATA_FOLDER = MAIN_FOLDER / "Data"
bons_empregos = pd.read_json(DATA_FOLDER / 'bons_empregos_jobs.json')
career_jet = pd.read_json(DATA_FOLDER / 'career_jet_api.json', lines=True)
carga_de_trabalhos = pd.read_json(DATA_FOLDER / 'CargaDeTrabalhos.json', lines=True)
emprego_xl = pd.read_json(DATA_FOLDER / 'EmpregoXl.json', lines=True)
emprego_org = pd.read_json(DATA_FOLDER / 'EmpregoOrg.json', lines=True)
itjobs = pd.read_json(DATA_FOLDER / 'itjobs_api.json', lines=True)
jooble = pd.read_json(DATA_FOLDER / 'jooble_api.json', lines=True)
landing_jobs = pd.read_json(DATA_FOLDER / 'landingjobs_api.json', lines=True)
net_empregos = pd.read_json(DATA_FOLDER / 'NetEmpregos.json', lines=True)
# Bons Empregos
def getPortugalLocation(dataframe):
# Get only job offers in Portugal
dataframe = dataframe.loc[dataframe['job_location'] != 'Estrangeiro'].copy()
return dataframe
bons_empregos_clean = (bons_empregos.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(getPortugalLocation).
pipe(convertToDatetime, longToShortDate).
pipe(removeDupes)
)
print(f'bons_empregos:\n Previous shape: {bons_empregos.shape}\nCurrent shape:{bons_empregos_clean.shape}\n Removed Duplicates: {len(bons_empregos)-len(bons_empregos_clean)}\n')
# Career Jet
# convert job location to list
career_jet['job_location'] = career_jet['job_location'].apply(lambda x: x.split(','))
career_jet_clean = (career_jet.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date', 'post_date'], dayfirst=True).
pipe(listToRows, 'job_location').
pipe(removeDupes)
)
print(f'career_jet:\nPrevious shape: {career_jet.shape}\nCurrent shape:{career_jet_clean.shape}\n Removed Duplicates: {len(career_jet)-len(career_jet_clean)}\n')
# Carga de Trabalhos
carga_de_trabalhos_clean = (carga_de_trabalhos.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(convertToDatetime, longToShortDate, '/').
pipe(removeDupes)
)
print(f'carga_de_trabalhos:\nPrevious shape: {carga_de_trabalhos.shape}\nCurrent shape:{carga_de_trabalhos_clean.shape}\n Removed Duplicates: {len(carga_de_trabalhos)-len(carga_de_trabalhos_clean)}\n')
# Emprego XL
emprego_xl_clean = (emprego_xl.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(applyFuncToColumn).
pipe(pipeInvertDate).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date', 'post_date'], dayfirst=True).
# # pipe(convertToDatetime, longToShortDate, '/').
pipe(removeDupes)
)
print(f'emprego_xl:\nPrevious shape: {emprego_xl.shape}\nCurrent shape:{emprego_xl_clean.shape}\n Removed Duplicates: {len(emprego_xl)-len(emprego_xl_clean)}\n')
# Emprego Org
emprego_org_clean = (emprego_org.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(postDatePreprocess, '/').
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(toDatetime, ['post_date']).
pipe(removeDupes)
)
print(f'emprego_org:\nPrevious shape: {emprego_org.shape}\nCurrent shape:{emprego_org_clean.shape}\n Removed Duplicates: {len(emprego_org)-len(emprego_org_clean)}\n')
# ITJobs
def simplifyDate(x):
return dt.datetime.strptime(x.split(' ')[0], '%Y-%m-%d')
itjobs_clean = (itjobs.
pipe(copy_df).
pipe(listToRows, 'job_location').
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(applyFuncToColumn, function=simplifyDate, columns_list=['post_date']).
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(toDatetime, ['post_date']).
# pipe(.apply(lambda x: dt.datetime.strftime('%Y-%m-%d'))).
pipe(removeDupes)
)
print(f'itjobs:\nPrevious shape: {itjobs.shape}\nCurrent shape:{itjobs_clean.shape}\n Removed Duplicates: {len(itjobs)-len(itjobs_clean)}\n')
# Jooble
jooble_clean = (jooble.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(toDatetime, columns_list=['scrape_date', 'post_date'], dayfirst=True).
pipe(removeTags, ['job_title']).
pipe(removeDupes)
)
print(f'jooble:\nPrevious shape: {jooble.shape}\nCurrent shape:{jooble_clean.shape}\n Removed Duplicates: {len(jooble)-len(jooble_clean)}\n')
# Landing Jobs IT
landing_jobs_clean = (landing_jobs.
pipe(copy_df).
pipe(replacenan).
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
pipe(postDatePreprocess, 'T').
pipe(toDatetime, columns_list=['scrape_date'], dayfirst=True).
pipe(toDatetime, ['post_date']).
# pipe(removeTags, 'job_title').
pipe(removeDupes)
)
print(f'landing_jobs:\nPrevious shape: {landing_jobs.shape}\nCurrent shape:{landing_jobs_clean.shape}\n Removed Duplicates: {len(landing_jobs)-len(landing_jobs_clean)}\n')
# Net Empregos
net_empregos_clean = (net_empregos.
pipe(copy_df).
pipe(replacenan).
pipe(pipeInvertDate).
pipe(postDateFillNa).
pipe(dropNullJobs).
# two pipes are needed beacause - for some reason, the function was not replacing some words it should
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
# pipe(cleanDescription, ['job_title']).
pipe(toDatetime, columns_list=['scrape_date', 'post_date'], dayfirst=True).
pipe(removeDupes)
)
print(f'net_empregos:\nPrevious shape: {net_empregos.shape}\nCurrent shape:{net_empregos_clean.shape}\n Removed Duplicates: {len(net_empregos)-len(net_empregos_clean)}\n')
# Add Website Identifier before concating all dataframes into a single one
jobs_dfs = [bons_empregos_clean, career_jet_clean, carga_de_trabalhos_clean, emprego_xl_clean, emprego_org_clean, itjobs_clean, jooble_clean, landing_jobs_clean, net_empregos_clean]
websites = ['Bons empregos', 'Career Jet', 'Carga de Trabalhos', 'Emprego XL', 'Emprego.org','ITjobs','Jooble','Landing Jobs','Net-empregos']
# Add column with website name
for idx, value in enumerate(jobs_dfs):
value['website'] = websites[idx]
# CONCAT DATAFRAMES AND DEFINE COLUMN ORDER
neworder = ['job_title','job_description','company','job_location','job_category','salary', 'post_date', 'scrape_date','job_href', 'website']
df = pd.concat([i.reindex(columns=neworder) for i in jobs_dfs])
# Validate that the concatenation is happening properly
assert len(df) == sum(len(i) for i in jobs_dfs)
#######################################################
############# Clean the Main Dataframe ################
#######################################################
df_clean = (df.
pipe(copy_df).
pipe(replacenan).sort_values(by='post_date').
pipe(postDateFillNa).
pipe(dropNullJobs).
# pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']).
# pipe(cleanCompany).
pipe(cleanDescription, ['job_title', 'job_description']).
pipe(removeDupes, ['job_title', 'job_description','company', 'job_location'])
)
df_clean.reset_index(drop=True, inplace=True)
print(f'Full_dataset:\nPrevious shape: {df.shape}\nCurrent shape:{df_clean.shape}\n Removed Duplicates: {len(df)-len(df_clean)}\n')
#######################################################
############# Pass the Data Into JSON #################
#######################################################
with open(DATA_FOLDER / 'full_data_clean.json', 'w', encoding='utf-8') as file:
df_clean.to_json(file, force_ascii=False, orient='records', date_format='iso', date_unit='s')
if __name__ == '__main__':
main() | en | 0.32675 | # Bons Empregos # Get only job offers in Portugal # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # Career Jet # convert job location to list # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # Carga de Trabalhos # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # Emprego XL # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # # pipe(convertToDatetime, longToShortDate, '/'). # Emprego Org # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # ITJobs # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # pipe(.apply(lambda x: dt.datetime.strftime('%Y-%m-%d'))). # Jooble # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # Landing Jobs IT # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # pipe(removeTags, 'job_title'). # Net Empregos # two pipes are needed beacause - for some reason, the function was not replacing some words it should # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # pipe(cleanDescription, ['job_title']). # Add Website Identifier before concating all dataframes into a single one # Add column with website name # CONCAT DATAFRAMES AND DEFINE COLUMN ORDER # Validate that the concatenation is happening properly ####################################################### ############# Clean the Main Dataframe ################ ####################################################### # pipe(applyFuncToColumn, function=cleanJobTitle, columns_list=['job_title']). # pipe(cleanCompany). ####################################################### ############# Pass the Data Into JSON ################# ####################################################### | 2.516128 | 3 |
src/normality_indexes.py | gmum/MoW | 0 | 6623282 | <reponame>gmum/MoW<gh_stars>0
import tensorflow as tf
import numpy as np
from rec_errors import euclidean_norm_squared
# CWAE
def silverman_rule_of_thumb(N: int):
return tf.pow(4/(3*N), 0.4)
def cw(X):
D = tf.cast(tf.shape(X)[1], tf.float32)
N = tf.cast(tf.shape(X)[0], tf.float32)
y = silverman_rule_of_thumb(N)
K = 1/(2*D-3)
A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)
A = (1/(N**2)) * tf.reduce_sum((1/tf.sqrt(y + K*A1)))
B1 = euclidean_norm_squared(X, axis=1)
B = (2/N)*tf.reduce_sum((1/tf.sqrt(y + 0.5 + K*B1)))
return (1/tf.sqrt(1+y)) + A - B
def log_cw(X):
return tf.log(cw(X))
# WAE-MMD
def mmd_penalty(sample_qz, sample_pz):
n = tf.cast(tf.shape(sample_qz)[0], tf.float32)
d = tf.cast(tf.shape(sample_qz)[1], tf.float32)
n = tf.cast(n, tf.int32)
nf = tf.cast(n, tf.float32)
sigma2_p = 1. ** 2
norms_pz = tf.reduce_sum(tf.square(sample_pz), axis=1, keepdims=True)
distances_pz = norms_pz + tf.transpose(norms_pz) - 2. * tf.matmul(sample_pz, sample_pz, transpose_b=True)
norms_qz = tf.reduce_sum(tf.square(sample_qz), axis=1, keepdims=True)
distances_qz = norms_qz + tf.transpose(norms_qz) - 2. * tf.matmul(sample_qz, sample_qz, transpose_b=True)
dotprods = tf.matmul(sample_qz, sample_pz, transpose_b=True)
distances = norms_qz + tf.transpose(norms_pz) - 2. * dotprods
Cbase = 2. * d * sigma2_p
stat = 0.
TempSubtract = 1. - tf.eye(n)
for scale in [.1, .2, .5, 1., 2., 5., 10.]:
C = Cbase * scale
res1 = C / (C + distances_qz) + C / (C + distances_pz)
res1 = tf.multiply(res1, TempSubtract)
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = C / (C + distances)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat += res1 - res2
return stat
def wae_normality_index(Z: tf.Tensor, z_dim: int):
dist = tf.distributions.Normal(np.zeros(z_dim, dtype=np.float32), np.ones(z_dim, dtype=np.float32))
tensor_input_latent_sample = dist.sample(tf.shape(Z)[0])
return mmd_penalty(Z, tensor_input_latent_sample)
# SWAE
def swae_normality_index_inner(projected_latent, theta, z_dim):
n = tf.cast(tf.shape(projected_latent)[0], tf.int32)
dist = tf.distributions.Normal(np.zeros(z_dim, dtype=np.float32), np.ones(z_dim, dtype=np.float32))
sample = dist.sample(n)
projz = tf.keras.backend.dot(sample, tf.transpose(theta))
transposed_projected_latent = tf.transpose(projected_latent)
transpose_projected_sample = tf.transpose(projz)
W2 = (tf.nn.top_k(transposed_projected_latent, k=n).values -
tf.nn.top_k(transpose_projected_sample, k=n).values)**2
return W2
def swae_normality_index(Z: tf.Tensor, z_dim: int):
randomed_normal = tf.random_normal(shape=(50, z_dim))
theta = randomed_normal / tf.reshape(tf.sqrt(tf.reduce_sum(tf.square(randomed_normal), axis=1)), (-1, 1))
projae = tf.keras.backend.dot(Z, tf.transpose(theta))
normality_test_result = swae_normality_index_inner(projae, theta, z_dim)
return tf.reduce_mean(normality_test_result)
| import tensorflow as tf
import numpy as np
from rec_errors import euclidean_norm_squared
# CWAE
def silverman_rule_of_thumb(N: int):
return tf.pow(4/(3*N), 0.4)
def cw(X):
D = tf.cast(tf.shape(X)[1], tf.float32)
N = tf.cast(tf.shape(X)[0], tf.float32)
y = silverman_rule_of_thumb(N)
K = 1/(2*D-3)
A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)
A = (1/(N**2)) * tf.reduce_sum((1/tf.sqrt(y + K*A1)))
B1 = euclidean_norm_squared(X, axis=1)
B = (2/N)*tf.reduce_sum((1/tf.sqrt(y + 0.5 + K*B1)))
return (1/tf.sqrt(1+y)) + A - B
def log_cw(X):
return tf.log(cw(X))
# WAE-MMD
def mmd_penalty(sample_qz, sample_pz):
n = tf.cast(tf.shape(sample_qz)[0], tf.float32)
d = tf.cast(tf.shape(sample_qz)[1], tf.float32)
n = tf.cast(n, tf.int32)
nf = tf.cast(n, tf.float32)
sigma2_p = 1. ** 2
norms_pz = tf.reduce_sum(tf.square(sample_pz), axis=1, keepdims=True)
distances_pz = norms_pz + tf.transpose(norms_pz) - 2. * tf.matmul(sample_pz, sample_pz, transpose_b=True)
norms_qz = tf.reduce_sum(tf.square(sample_qz), axis=1, keepdims=True)
distances_qz = norms_qz + tf.transpose(norms_qz) - 2. * tf.matmul(sample_qz, sample_qz, transpose_b=True)
dotprods = tf.matmul(sample_qz, sample_pz, transpose_b=True)
distances = norms_qz + tf.transpose(norms_pz) - 2. * dotprods
Cbase = 2. * d * sigma2_p
stat = 0.
TempSubtract = 1. - tf.eye(n)
for scale in [.1, .2, .5, 1., 2., 5., 10.]:
C = Cbase * scale
res1 = C / (C + distances_qz) + C / (C + distances_pz)
res1 = tf.multiply(res1, TempSubtract)
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = C / (C + distances)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat += res1 - res2
return stat
def wae_normality_index(Z: tf.Tensor, z_dim: int):
dist = tf.distributions.Normal(np.zeros(z_dim, dtype=np.float32), np.ones(z_dim, dtype=np.float32))
tensor_input_latent_sample = dist.sample(tf.shape(Z)[0])
return mmd_penalty(Z, tensor_input_latent_sample)
# SWAE
def swae_normality_index_inner(projected_latent, theta, z_dim):
n = tf.cast(tf.shape(projected_latent)[0], tf.int32)
dist = tf.distributions.Normal(np.zeros(z_dim, dtype=np.float32), np.ones(z_dim, dtype=np.float32))
sample = dist.sample(n)
projz = tf.keras.backend.dot(sample, tf.transpose(theta))
transposed_projected_latent = tf.transpose(projected_latent)
transpose_projected_sample = tf.transpose(projz)
W2 = (tf.nn.top_k(transposed_projected_latent, k=n).values -
tf.nn.top_k(transpose_projected_sample, k=n).values)**2
return W2
def swae_normality_index(Z: tf.Tensor, z_dim: int):
randomed_normal = tf.random_normal(shape=(50, z_dim))
theta = randomed_normal / tf.reshape(tf.sqrt(tf.reduce_sum(tf.square(randomed_normal), axis=1)), (-1, 1))
projae = tf.keras.backend.dot(Z, tf.transpose(theta))
normality_test_result = swae_normality_index_inner(projae, theta, z_dim)
return tf.reduce_mean(normality_test_result) | en | 0.5171 | # CWAE # WAE-MMD # SWAE | 2.008613 | 2 |
python/logger.py | bdastur/utils | 1 | 6623283 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Logging utility.
'''
import logging
import common
from ConfigParser import SafeConfigParser
class LoggerConfig(object):
'''
Class to handle logger config.
'''
def __init__(self, logconfig):
'''
Initialize Logger config.
'''
self.logfile = common.get_absolute_path_for_file(logconfig)
self.cfgparser = SafeConfigParser()
reslist = self.cfgparser.read(self.logfile)
if len(reslist) == 0:
print "No log file provided"
class Logger(object):
'''
Logger class.
'''
def __init__(self, name=__name__, level="debug",
filename="/dev/null", logconfig=None,
verbose=True, logformat=None):
'''
Initialize logger class.
'''
if logformat is None:
logformat = "[%(levelname)s %(asctime)s]"\
"[%(process)d " + name + "]" \
"%(message)s"
datefmt = '%I:%M:%S %p'
if not logging.getLogger(name):
logging.basicConfig(filename=filename,
format=logformat,
datefmt=datefmt)
self.logger = logging.getLogger(name)
if not self.logger.handlers:
# Format.
formatter = logging.Formatter(logformat,
datefmt=datefmt)
filehdl = logging.FileHandler(filename)
filehdl.setFormatter(formatter)
self.logger.addHandler(filehdl)
if verbose is True:
console = logging.StreamHandler()
console.setFormatter(formatter)
self.logger.addHandler(console)
def get_logger(self):
'''
Return the logger
'''
return self.logger
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 <NAME> <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': '0;30', 'bright gray': '0;37',
'blue': '0;34', 'white': '1;37',
'green': '0;32', 'bright blue': '1;34',
'cyan': '0;36', 'bright green': '1;32',
'red': '0;31', 'bright cyan': '1;36',
'purple': '0;35', 'bright red': '1;31',
'yellow': '0;33', 'bright purple': '1;35',
'dark gray': '1;30', 'bright yellow': '1;33',
'normal': '0'
}
def stringc(text, color):
"""String in color."""
return "\033["+codeCodes[color]+"m"+text+"\033[0m"
# --- end "pretty"
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Logging utility.
'''
import logging
import common
from ConfigParser import SafeConfigParser
class LoggerConfig(object):
'''
Class to handle logger config.
'''
def __init__(self, logconfig):
'''
Initialize Logger config.
'''
self.logfile = common.get_absolute_path_for_file(logconfig)
self.cfgparser = SafeConfigParser()
reslist = self.cfgparser.read(self.logfile)
if len(reslist) == 0:
print "No log file provided"
class Logger(object):
'''
Logger class.
'''
def __init__(self, name=__name__, level="debug",
filename="/dev/null", logconfig=None,
verbose=True, logformat=None):
'''
Initialize logger class.
'''
if logformat is None:
logformat = "[%(levelname)s %(asctime)s]"\
"[%(process)d " + name + "]" \
"%(message)s"
datefmt = '%I:%M:%S %p'
if not logging.getLogger(name):
logging.basicConfig(filename=filename,
format=logformat,
datefmt=datefmt)
self.logger = logging.getLogger(name)
if not self.logger.handlers:
# Format.
formatter = logging.Formatter(logformat,
datefmt=datefmt)
filehdl = logging.FileHandler(filename)
filehdl.setFormatter(formatter)
self.logger.addHandler(filehdl)
if verbose is True:
console = logging.StreamHandler()
console.setFormatter(formatter)
self.logger.addHandler(console)
def get_logger(self):
'''
Return the logger
'''
return self.logger
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 <NAME> <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': '0;30', 'bright gray': '0;37',
'blue': '0;34', 'white': '1;37',
'green': '0;32', 'bright blue': '1;34',
'cyan': '0;36', 'bright green': '1;32',
'red': '0;31', 'bright cyan': '1;36',
'purple': '0;35', 'bright red': '1;31',
'yellow': '0;33', 'bright purple': '1;35',
'dark gray': '1;30', 'bright yellow': '1;33',
'normal': '0'
}
def stringc(text, color):
"""String in color."""
return "\033["+codeCodes[color]+"m"+text+"\033[0m"
# --- end "pretty" | en | 0.754949 | #!/usr/bin/env python # -*- coding: utf-8 -*- Logging utility. Class to handle logger config. Initialize Logger config. Logger class. Initialize logger class. # Format. Return the logger # --- begin "pretty" # # pretty - A miniature library that provides a Python print and stdout # wrapper that makes colored terminal text easier to use (e.g. without # having to mess around with ANSI escape sequences). This code is public # domain - there is no license except that you must leave this header. # # Copyright (C) 2008 <NAME> <thedude at bri1 dot com> # # http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/ String in color. # --- end "pretty" | 2.489188 | 2 |
manage_single.py | cponecp/iHone | 1 | 6623284 | <filename>manage_single.py
# coding:utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_session import Session
from flask_wtf import CSRFProtect
import redis
# 创建flask的应用对象
app = Flask(__name__)
class Config(object):
"""配置信息"""
SECRET_KEY = "<KEY>"
# 数据库
SQLALCHEMY_DATABASE_URI = "mysql://root:mysql@127.0.0.1:3306/ihome_python04"
SQLALCHEMY_TRACK_MODIFICATIONS = True
# redis
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379
# flask-session配置
SESSION_TYPE = "redis"
SESSION_REDIS = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
SESSION_USE_SIGNER = True # 对cookie中session_id进行隐藏处理
PERMANENT_SESSION_LIFETIME = 86400 # session数据的有效期,单位秒
app.config.from_object(Config)
db = SQLAlchemy(app)
# 创建redis连接对象
redis_store = redis.StrictRedis(host=Config.REDIS_HOST, port=Config.REDIS_PORT)
# 利用flask-session,将session数据保存到redis中
Session(app)
# 为flask补充csrf防护
CSRFProtect(app)
@app.route("/index")
def index():
return "index page"
if __name__ == '__main__':
app.run()
| <filename>manage_single.py
# coding:utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_session import Session
from flask_wtf import CSRFProtect
import redis
# 创建flask的应用对象
app = Flask(__name__)
class Config(object):
"""配置信息"""
SECRET_KEY = "<KEY>"
# 数据库
SQLALCHEMY_DATABASE_URI = "mysql://root:mysql@127.0.0.1:3306/ihome_python04"
SQLALCHEMY_TRACK_MODIFICATIONS = True
# redis
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379
# flask-session配置
SESSION_TYPE = "redis"
SESSION_REDIS = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
SESSION_USE_SIGNER = True # 对cookie中session_id进行隐藏处理
PERMANENT_SESSION_LIFETIME = 86400 # session数据的有效期,单位秒
app.config.from_object(Config)
db = SQLAlchemy(app)
# 创建redis连接对象
redis_store = redis.StrictRedis(host=Config.REDIS_HOST, port=Config.REDIS_PORT)
# 利用flask-session,将session数据保存到redis中
Session(app)
# 为flask补充csrf防护
CSRFProtect(app)
@app.route("/index")
def index():
return "index page"
if __name__ == '__main__':
app.run()
| zh | 0.823182 | # coding:utf-8 # 创建flask的应用对象 配置信息 # 数据库 # redis # flask-session配置 # 对cookie中session_id进行隐藏处理 # session数据的有效期,单位秒 # 创建redis连接对象 # 利用flask-session,将session数据保存到redis中 # 为flask补充csrf防护 | 2.339221 | 2 |
library/calelib/migrations/0033_auto_20180620_2346.py | IAmDrozdov/task-tracker | 0 | 6623285 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-20 20:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calelib', '0032_customer_shared_tasks'),
]
operations = [
migrations.RemoveField(
model_name='task',
name='performers',
),
migrations.AlterField(
model_name='customer',
name='shared_tasks',
field=models.ManyToManyField(related_name='performers', to='calelib.Task'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-20 20:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calelib', '0032_customer_shared_tasks'),
]
operations = [
migrations.RemoveField(
model_name='task',
name='performers',
),
migrations.AlterField(
model_name='customer',
name='shared_tasks',
field=models.ManyToManyField(related_name='performers', to='calelib.Task'),
),
] | en | 0.639901 | # -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-06-20 20:46 | 1.46278 | 1 |
core/pages/video_player_page.py | artembashlak/share-youtube-to-mail | 0 | 6623286 | <filename>core/pages/video_player_page.py<gh_stars>0
from selenium.webdriver.common.by import By
from core.pages.base_page import BasePage
SHARE_BUTTON = (By.CSS_SELECTOR, "#menu-container .size-default:nth-child(3)")
COPY_BUTTON = (By.CSS_SELECTOR, "#copy-button #text")
class VideoPlayerPage(BasePage):
def click_button(self):
pass
def copy_to_clipboard(self):
pass
| <filename>core/pages/video_player_page.py<gh_stars>0
from selenium.webdriver.common.by import By
from core.pages.base_page import BasePage
SHARE_BUTTON = (By.CSS_SELECTOR, "#menu-container .size-default:nth-child(3)")
COPY_BUTTON = (By.CSS_SELECTOR, "#copy-button #text")
class VideoPlayerPage(BasePage):
def click_button(self):
pass
def copy_to_clipboard(self):
pass
| none | 1 | 2.153937 | 2 | |
tests/unit/auto_ml/test_modelling.py | Amplo-GmbH/AutoML | 5 | 6623287 | <filename>tests/unit/auto_ml/test_modelling.py
import pytest
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from Amplo.AutoML import Modeller
from tests import rmtree
@pytest.fixture(scope='class', params=['classification', 'regression'])
def make_mode(request):
mode = request.param
if mode == 'classification':
x, y = make_classification()
objective = 'neg_log_loss'
elif mode == 'regression':
x, y = make_regression()
objective = 'r2'
else:
raise ValueError('Invalid mode')
request.cls.mode = mode
request.cls.objective = objective
request.cls.x = pd.DataFrame(x)
request.cls.y = pd.Series(y)
yield
@pytest.mark.usefixtures('make_mode')
class TestModelling:
folder = 'tmp/'
@pytest.fixture(autouse=True)
def teardown(self):
yield
rmtree(self.folder)
def test_modeller(self):
mod = Modeller(mode=self.mode, objective=self.objective, folder=self.folder)
mod.fit(self.x, self.y)
# Tests
assert isinstance(mod.results, pd.DataFrame), 'Results should be type pd.DataFrame'
assert len(mod.results) != 0, 'Results empty'
assert mod.results['mean_objective'].max() < 1, 'R2 needs to be smaller than 1'
assert not mod.results['mean_objective'].isna().any(), "Mean Objective shouldn't contain NaN"
assert not mod.results['std_objective'].isna().any(), "Std Objective shouldn't contain NaN"
assert not mod.results['mean_time'].isna().any(), "Mean time shouldn't contain NaN"
assert not mod.results['std_time'].isna().any(), "Std time shouldn't contain NaN"
assert 'date' in mod.results.keys()
assert 'model' in mod.results.keys()
assert 'dataset' in mod.results.keys()
assert 'params' in mod.results.keys()
@pytest.mark.parametrize('n_samples', [100, 100_000])
def test_return(self, n_samples):
Modeller(mode=self.mode, samples=n_samples).return_models()
| <filename>tests/unit/auto_ml/test_modelling.py
import pytest
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from Amplo.AutoML import Modeller
from tests import rmtree
@pytest.fixture(scope='class', params=['classification', 'regression'])
def make_mode(request):
mode = request.param
if mode == 'classification':
x, y = make_classification()
objective = 'neg_log_loss'
elif mode == 'regression':
x, y = make_regression()
objective = 'r2'
else:
raise ValueError('Invalid mode')
request.cls.mode = mode
request.cls.objective = objective
request.cls.x = pd.DataFrame(x)
request.cls.y = pd.Series(y)
yield
@pytest.mark.usefixtures('make_mode')
class TestModelling:
folder = 'tmp/'
@pytest.fixture(autouse=True)
def teardown(self):
yield
rmtree(self.folder)
def test_modeller(self):
mod = Modeller(mode=self.mode, objective=self.objective, folder=self.folder)
mod.fit(self.x, self.y)
# Tests
assert isinstance(mod.results, pd.DataFrame), 'Results should be type pd.DataFrame'
assert len(mod.results) != 0, 'Results empty'
assert mod.results['mean_objective'].max() < 1, 'R2 needs to be smaller than 1'
assert not mod.results['mean_objective'].isna().any(), "Mean Objective shouldn't contain NaN"
assert not mod.results['std_objective'].isna().any(), "Std Objective shouldn't contain NaN"
assert not mod.results['mean_time'].isna().any(), "Mean time shouldn't contain NaN"
assert not mod.results['std_time'].isna().any(), "Std time shouldn't contain NaN"
assert 'date' in mod.results.keys()
assert 'model' in mod.results.keys()
assert 'dataset' in mod.results.keys()
assert 'params' in mod.results.keys()
@pytest.mark.parametrize('n_samples', [100, 100_000])
def test_return(self, n_samples):
Modeller(mode=self.mode, samples=n_samples).return_models()
| none | 1 | 2.549843 | 3 | |
python/test-integration/helper.py | mbloch1986/swagger-aem | 39 | 6623288 | <gh_stars>10-100
import swaggeraem
import swaggeraem.configuration
def init_client():
swaggeraem.configuration.username = 'admin'
swaggeraem.configuration.password = '<PASSWORD>'
return swaggeraem.ApiClient('http://localhost:4502')
| import swaggeraem
import swaggeraem.configuration
def init_client():
swaggeraem.configuration.username = 'admin'
swaggeraem.configuration.password = '<PASSWORD>'
return swaggeraem.ApiClient('http://localhost:4502') | none | 1 | 1.886841 | 2 | |
KillSmartDoor.py | AdamCaviness/SmartDoor | 0 | 6623289 | <gh_stars>0
import os
import sys
import signal
import MonitorSmartDoor
def kill():
""" Gracefully stops all SmartDoor processes.
"""
process_count = 0
if MonitorSmartDoor.is_running(False):
processes = MonitorSmartDoor.get_processes()
for line in processes.splitlines():
process_count += 1
line = bytes.decode(line)
print("Killing SmartDoor process: {0}".format(line))
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGTERM)
if process_count > 0:
print("{0} SmartDoor processes killed".format(process_count))
else:
print('There were no SmartDoor processes running')
sys.exit()
if __name__ == '__main__':
# Being executed as a script
kill() | import os
import sys
import signal
import MonitorSmartDoor
def kill():
""" Gracefully stops all SmartDoor processes.
"""
process_count = 0
if MonitorSmartDoor.is_running(False):
processes = MonitorSmartDoor.get_processes()
for line in processes.splitlines():
process_count += 1
line = bytes.decode(line)
print("Killing SmartDoor process: {0}".format(line))
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGTERM)
if process_count > 0:
print("{0} SmartDoor processes killed".format(process_count))
else:
print('There were no SmartDoor processes running')
sys.exit()
if __name__ == '__main__':
# Being executed as a script
kill() | en | 0.931539 | Gracefully stops all SmartDoor processes. # Being executed as a script | 2.938209 | 3 |
botnet/modules/lib/network.py | admdev8/botnet-2 | 69 | 6623290 | <filename>botnet/modules/lib/network.py
"""
Contains network related utilities which can be used by the modules, for
example to query various online APIs.
"""
import requests
# User agent used while performing requests (429 status codes can be encountered
# when using the default user agent)
_uagent = 'Mozilla/5.0 (X11; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'
def get_url(*args, **kwargs):
"""Performs a request. Thin wrapper over requests.request.
method: request method, defaults to 'GET'.
"""
method = kwargs.pop('method', None)
if method is None:
method = 'GET'
method = method.upper()
if not 'headers' in kwargs:
kwargs['headers'] = {}
kwargs['headers']['User-Agent'] = _uagent
return requests.request(method, *args, **kwargs)
| <filename>botnet/modules/lib/network.py
"""
Contains network related utilities which can be used by the modules, for
example to query various online APIs.
"""
import requests
# User agent used while performing requests (429 status codes can be encountered
# when using the default user agent)
_uagent = 'Mozilla/5.0 (X11; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'
def get_url(*args, **kwargs):
"""Performs a request. Thin wrapper over requests.request.
method: request method, defaults to 'GET'.
"""
method = kwargs.pop('method', None)
if method is None:
method = 'GET'
method = method.upper()
if not 'headers' in kwargs:
kwargs['headers'] = {}
kwargs['headers']['User-Agent'] = _uagent
return requests.request(method, *args, **kwargs)
| en | 0.73816 | Contains network related utilities which can be used by the modules, for example to query various online APIs. # User agent used while performing requests (429 status codes can be encountered # when using the default user agent) Performs a request. Thin wrapper over requests.request. method: request method, defaults to 'GET'. | 2.862164 | 3 |
objetto/_data/set.py | brunonicko/objetto | 8 | 6623291 | <reponame>brunonicko/objetto
# -*- coding: utf-8 -*-
"""Set data structures."""
from typing import TYPE_CHECKING, TypeVar, cast
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # type: ignore
from six import with_metaclass
from .._bases import final
from .._states import BaseState, SetState
from .._structures import (
BaseInteractiveSetStructure,
BaseSetStructure,
BaseSetStructureMeta,
SerializationError,
)
from .bases import (
BaseAuxiliaryData,
BaseAuxiliaryDataMeta,
BaseInteractiveAuxiliaryData,
)
if TYPE_CHECKING:
from typing import Any, Iterable, List, Type
__all__ = ["SetDataMeta", "SetData", "InteractiveSetData"]
T = TypeVar("T") # Any type.
class SetDataMeta(BaseAuxiliaryDataMeta, BaseSetStructureMeta):
"""
Metaclass for :class:`objetto.data.SetData`.
Inherits from:
- :class:`objetto.bases.BaseAuxiliaryDataMeta`
- :class:`objetto.bases.BaseSetStructureMeta`
Features:
- Defines a base auxiliary type.
"""
@property
@final
def _base_auxiliary_type(cls):
# type: () -> Type[SetData]
"""
Base auxiliary container type.
:rtype: type[objetto.data.SetData]
"""
return SetData
# noinspection PyTypeChecker
_SD = TypeVar("_SD", bound="SetData")
class SetData(
with_metaclass(
SetDataMeta,
BaseSetStructure[T],
BaseAuxiliaryData[T],
)
):
"""
Set data.
Metaclass:
- :class:`objetto.data.SetDataMeta`
Inherits from:
- :class:`objetto.bases.BaseSetStructure`
- :class:`objetto.bases.BaseAuxiliaryData`
Inherited by:
- :class:`objetto.data.InteractiveSetData`
:param initial: Initial values.
:type initial: collections.abc.Iterable
"""
__slots__ = ()
@classmethod
@final
def __make__(cls, state=SetState()):
# type: (Type[_SD], BaseState) -> _SD
"""
Make a new set data.
:param state: Internal state.
:return: New set data.
"""
return super(SetData, cls).__make__(state)
@classmethod
@final
def _from_iterable(cls, iterable):
# type: (Iterable) -> SetState
"""
Make set state from iterable.
:param iterable: Iterable.
:type iterable: collections.abc.Iterable
:return: Set state.
:rtype: objetto.states.SetState
"""
return SetState(iterable)
@final
def __init__(self, initial=()):
# type: (Iterable[T]) -> None
self._init_state(self.__get_initial_state(initial))
@classmethod
@final
def __get_initial_state(cls, input_values, factory=True):
# type: (Iterable[T], bool) -> SetState[T]
"""
Get initial state.
:param input_values: Input values.
:param factory: Whether to run values through factory.
:return: Initial state.
"""
if not cls._relationship.passthrough:
state = SetState(
cls._relationship.fabricate_value(v, factory=factory)
for v in input_values
)
else:
state = SetState(input_values)
return state
@final
def _clear(self):
# type: (_SD) -> _SD
"""
Clear all values.
:return: Transformed.
:rtype: objetto.data.SetData
"""
return type(self).__make__()
@final
def _add(self, value):
# type: (_SD, T) -> _SD
"""
Add value.
:param value: Value.
:type value: collections.abc.Hashable
:return: Transformed.
:rtype: objetto.data.SetData
"""
cls = type(self)
fabricated_value = cls._relationship.fabricate_value(value)
return type(self).__make__(self._state.add(fabricated_value))
@final
def _discard(self, *values):
# type: (_SD, T) -> _SD
"""
Discard value(s).
:param values: Value(s).
:type values: collections.abc.Hashable
:return: Transformed.
:rtype: objetto.data.SetData
:raises ValueError: No values provided.
"""
return type(self).__make__(self._state.discard(*values))
@final
def _remove(self, *values):
# type: (_SD, T) -> _SD
"""
Remove existing value(s).
:param values: Value(s).
:type value: collections.abc.Hashable
:return: Transformed.
:rtype: objetto.data.SetData
:raises ValueError: No values provided.
:raises KeyError: Value is not present.
"""
return type(self).__make__(self._state.remove(*values))
@final
def _replace(self, value, new_value):
# type: (_SD, T, T) -> _SD
"""
Replace existing value with a new one.
:param value: Existing value.
:type value: collections.abc.Hashable
:param new_value: New value.
:type value: collections.abc.Hashable
:return: Transformed.
:rtype: objetto.data.SetData
:raises KeyError: Value is not present.
"""
cls = type(self)
fabricated_new_value = cls._relationship.fabricate_value(new_value)
return type(self).__make__(self._state.remove(value).add(fabricated_new_value))
@final
def _update(self, iterable):
# type: (_SD, Iterable[T]) -> _SD
"""
Update with iterable.
:param iterable: Iterable.
:type iterable: collections.abc.Iterable[collections.abc.Hashable]
:return: Transformed.
:rtype: objetto.data.SetData
"""
cls = type(self)
if not cls._relationship.passthrough:
fabricated_iterable = (
cls._relationship.fabricate_value(v) for v in iterable
)
return type(self).__make__(self._state.update(fabricated_iterable))
else:
return type(self).__make__(self._state.update(iterable))
@classmethod
@final
def deserialize(cls, serialized, **kwargs):
# type: (Type[_SD], List, Any) -> _SD
"""
Deserialize.
:param serialized: Serialized.
:type serialized: list
:param kwargs: Keyword arguments to be passed to the deserializers.
:return: Deserialized.
:rtype: objetto.data.SetData
:raises objetto.exceptions.SerializationError: Can't deserialize.
"""
if not cls._relationship.serialized:
error = "'{}' is not deserializable".format(cls.__name__)
raise SerializationError(error)
state = SetState(
cls.deserialize_value(v, location=None, **kwargs) for v in serialized
)
return cls.__make__(state)
@final
def serialize(self, **kwargs):
# type: (Any) -> List
"""
Serialize.
:param kwargs: Keyword arguments to be passed to the serializers.
:return: Serialized.
:rtype: list
:raises objetto.exceptions.SerializationError: Can't serialize.
"""
if not type(self)._relationship.serialized:
error = "'{}' is not serializable".format(type(self).__fullname__)
raise SerializationError(error)
return list(
self.serialize_value(v, location=None, **kwargs)
for v in sorted(self._state, key=lambda v: hash(v))
)
@property
@final
def _state(self):
# type: () -> SetState[T]
"""
Internal state.
:rtype: objetto.states.SetState
"""
return cast("SetState", super(BaseSetStructure, self)._state)
class InteractiveSetData(
SetData[T],
BaseInteractiveSetStructure[T],
BaseInteractiveAuxiliaryData[T],
):
"""
Interactive set data.
Inherits from:
- :class:`objetto.data.SetData`
- :class:`objetto.bases.BaseInteractiveSetStructure`
- :class:`objetto.bases.BaseInteractiveAuxiliaryData`
"""
__slots__ = ()
| # -*- coding: utf-8 -*-
"""Set data structures."""
from typing import TYPE_CHECKING, TypeVar, cast
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # type: ignore
from six import with_metaclass
from .._bases import final
from .._states import BaseState, SetState
from .._structures import (
BaseInteractiveSetStructure,
BaseSetStructure,
BaseSetStructureMeta,
SerializationError,
)
from .bases import (
BaseAuxiliaryData,
BaseAuxiliaryDataMeta,
BaseInteractiveAuxiliaryData,
)
if TYPE_CHECKING:
from typing import Any, Iterable, List, Type
__all__ = ["SetDataMeta", "SetData", "InteractiveSetData"]
T = TypeVar("T") # Any type.
class SetDataMeta(BaseAuxiliaryDataMeta, BaseSetStructureMeta):
"""
Metaclass for :class:`objetto.data.SetData`.
Inherits from:
- :class:`objetto.bases.BaseAuxiliaryDataMeta`
- :class:`objetto.bases.BaseSetStructureMeta`
Features:
- Defines a base auxiliary type.
"""
@property
@final
def _base_auxiliary_type(cls):
# type: () -> Type[SetData]
"""
Base auxiliary container type.
:rtype: type[objetto.data.SetData]
"""
return SetData
# noinspection PyTypeChecker
_SD = TypeVar("_SD", bound="SetData")
class SetData(
with_metaclass(
SetDataMeta,
BaseSetStructure[T],
BaseAuxiliaryData[T],
)
):
"""
Set data.
Metaclass:
- :class:`objetto.data.SetDataMeta`
Inherits from:
- :class:`objetto.bases.BaseSetStructure`
- :class:`objetto.bases.BaseAuxiliaryData`
Inherited by:
- :class:`objetto.data.InteractiveSetData`
:param initial: Initial values.
:type initial: collections.abc.Iterable
"""
__slots__ = ()
@classmethod
@final
def __make__(cls, state=SetState()):
# type: (Type[_SD], BaseState) -> _SD
"""
Make a new set data.
:param state: Internal state.
:return: New set data.
"""
return super(SetData, cls).__make__(state)
@classmethod
@final
def _from_iterable(cls, iterable):
# type: (Iterable) -> SetState
"""
Make set state from iterable.
:param iterable: Iterable.
:type iterable: collections.abc.Iterable
:return: Set state.
:rtype: objetto.states.SetState
"""
return SetState(iterable)
@final
def __init__(self, initial=()):
# type: (Iterable[T]) -> None
self._init_state(self.__get_initial_state(initial))
@classmethod
@final
def __get_initial_state(cls, input_values, factory=True):
# type: (Iterable[T], bool) -> SetState[T]
"""
Get initial state.
:param input_values: Input values.
:param factory: Whether to run values through factory.
:return: Initial state.
"""
if not cls._relationship.passthrough:
state = SetState(
cls._relationship.fabricate_value(v, factory=factory)
for v in input_values
)
else:
state = SetState(input_values)
return state
@final
def _clear(self):
# type: (_SD) -> _SD
"""
Clear all values.
:return: Transformed.
:rtype: objetto.data.SetData
"""
return type(self).__make__()
@final
def _add(self, value):
# type: (_SD, T) -> _SD
"""
Add value.
:param value: Value.
:type value: collections.abc.Hashable
:return: Transformed.
:rtype: objetto.data.SetData
"""
cls = type(self)
fabricated_value = cls._relationship.fabricate_value(value)
return type(self).__make__(self._state.add(fabricated_value))
@final
def _discard(self, *values):
# type: (_SD, T) -> _SD
"""
Discard value(s).
:param values: Value(s).
:type values: collections.abc.Hashable
:return: Transformed.
:rtype: objetto.data.SetData
:raises ValueError: No values provided.
"""
return type(self).__make__(self._state.discard(*values))
@final
def _remove(self, *values):
# type: (_SD, T) -> _SD
"""
Remove existing value(s).
:param values: Value(s).
:type value: collections.abc.Hashable
:return: Transformed.
:rtype: objetto.data.SetData
:raises ValueError: No values provided.
:raises KeyError: Value is not present.
"""
return type(self).__make__(self._state.remove(*values))
@final
def _replace(self, value, new_value):
# type: (_SD, T, T) -> _SD
"""
Replace existing value with a new one.
:param value: Existing value.
:type value: collections.abc.Hashable
:param new_value: New value.
:type value: collections.abc.Hashable
:return: Transformed.
:rtype: objetto.data.SetData
:raises KeyError: Value is not present.
"""
cls = type(self)
fabricated_new_value = cls._relationship.fabricate_value(new_value)
return type(self).__make__(self._state.remove(value).add(fabricated_new_value))
@final
def _update(self, iterable):
# type: (_SD, Iterable[T]) -> _SD
"""
Update with iterable.
:param iterable: Iterable.
:type iterable: collections.abc.Iterable[collections.abc.Hashable]
:return: Transformed.
:rtype: objetto.data.SetData
"""
cls = type(self)
if not cls._relationship.passthrough:
fabricated_iterable = (
cls._relationship.fabricate_value(v) for v in iterable
)
return type(self).__make__(self._state.update(fabricated_iterable))
else:
return type(self).__make__(self._state.update(iterable))
@classmethod
@final
def deserialize(cls, serialized, **kwargs):
# type: (Type[_SD], List, Any) -> _SD
"""
Deserialize.
:param serialized: Serialized.
:type serialized: list
:param kwargs: Keyword arguments to be passed to the deserializers.
:return: Deserialized.
:rtype: objetto.data.SetData
:raises objetto.exceptions.SerializationError: Can't deserialize.
"""
if not cls._relationship.serialized:
error = "'{}' is not deserializable".format(cls.__name__)
raise SerializationError(error)
state = SetState(
cls.deserialize_value(v, location=None, **kwargs) for v in serialized
)
return cls.__make__(state)
@final
def serialize(self, **kwargs):
# type: (Any) -> List
"""
Serialize.
:param kwargs: Keyword arguments to be passed to the serializers.
:return: Serialized.
:rtype: list
:raises objetto.exceptions.SerializationError: Can't serialize.
"""
if not type(self)._relationship.serialized:
error = "'{}' is not serializable".format(type(self).__fullname__)
raise SerializationError(error)
return list(
self.serialize_value(v, location=None, **kwargs)
for v in sorted(self._state, key=lambda v: hash(v))
)
@property
@final
def _state(self):
# type: () -> SetState[T]
"""
Internal state.
:rtype: objetto.states.SetState
"""
return cast("SetState", super(BaseSetStructure, self)._state)
class InteractiveSetData(
SetData[T],
BaseInteractiveSetStructure[T],
BaseInteractiveAuxiliaryData[T],
):
"""
Interactive set data.
Inherits from:
- :class:`objetto.data.SetData`
- :class:`objetto.bases.BaseInteractiveSetStructure`
- :class:`objetto.bases.BaseInteractiveAuxiliaryData`
"""
__slots__ = () | en | 0.41193 | # -*- coding: utf-8 -*- Set data structures. # type: ignore # Any type. Metaclass for :class:`objetto.data.SetData`. Inherits from: - :class:`objetto.bases.BaseAuxiliaryDataMeta` - :class:`objetto.bases.BaseSetStructureMeta` Features: - Defines a base auxiliary type. # type: () -> Type[SetData] Base auxiliary container type. :rtype: type[objetto.data.SetData] # noinspection PyTypeChecker Set data. Metaclass: - :class:`objetto.data.SetDataMeta` Inherits from: - :class:`objetto.bases.BaseSetStructure` - :class:`objetto.bases.BaseAuxiliaryData` Inherited by: - :class:`objetto.data.InteractiveSetData` :param initial: Initial values. :type initial: collections.abc.Iterable # type: (Type[_SD], BaseState) -> _SD Make a new set data. :param state: Internal state. :return: New set data. # type: (Iterable) -> SetState Make set state from iterable. :param iterable: Iterable. :type iterable: collections.abc.Iterable :return: Set state. :rtype: objetto.states.SetState # type: (Iterable[T]) -> None # type: (Iterable[T], bool) -> SetState[T] Get initial state. :param input_values: Input values. :param factory: Whether to run values through factory. :return: Initial state. # type: (_SD) -> _SD Clear all values. :return: Transformed. :rtype: objetto.data.SetData # type: (_SD, T) -> _SD Add value. :param value: Value. :type value: collections.abc.Hashable :return: Transformed. :rtype: objetto.data.SetData # type: (_SD, T) -> _SD Discard value(s). :param values: Value(s). :type values: collections.abc.Hashable :return: Transformed. :rtype: objetto.data.SetData :raises ValueError: No values provided. # type: (_SD, T) -> _SD Remove existing value(s). :param values: Value(s). :type value: collections.abc.Hashable :return: Transformed. :rtype: objetto.data.SetData :raises ValueError: No values provided. :raises KeyError: Value is not present. # type: (_SD, T, T) -> _SD Replace existing value with a new one. :param value: Existing value. :type value: collections.abc.Hashable :param new_value: New value. :type value: collections.abc.Hashable :return: Transformed. :rtype: objetto.data.SetData :raises KeyError: Value is not present. # type: (_SD, Iterable[T]) -> _SD Update with iterable. :param iterable: Iterable. :type iterable: collections.abc.Iterable[collections.abc.Hashable] :return: Transformed. :rtype: objetto.data.SetData # type: (Type[_SD], List, Any) -> _SD Deserialize. :param serialized: Serialized. :type serialized: list :param kwargs: Keyword arguments to be passed to the deserializers. :return: Deserialized. :rtype: objetto.data.SetData :raises objetto.exceptions.SerializationError: Can't deserialize. # type: (Any) -> List Serialize. :param kwargs: Keyword arguments to be passed to the serializers. :return: Serialized. :rtype: list :raises objetto.exceptions.SerializationError: Can't serialize. # type: () -> SetState[T] Internal state. :rtype: objetto.states.SetState Interactive set data. Inherits from: - :class:`objetto.data.SetData` - :class:`objetto.bases.BaseInteractiveSetStructure` - :class:`objetto.bases.BaseInteractiveAuxiliaryData` | 2.14699 | 2 |
bianca/config.py | bumper-app/bumper-bianca | 0 | 6623292 | <reponame>bumper-app/bumper-bianca
"""
file: config.py
author: <NAME> <<EMAIL>>
date: November 2013
description: Reads the config.json info into a varible
"""
import json
#from StringIO import StringIO
import os
config = json.load(open('./bianca/config.json'))
# Defining default repository path if not specified
if config['repo_location']['location'] and config['repo_location']['location'] != "":
REPO_DIRECTORY = config['repo_location']['location'] + "/"
else:
REPO_DIRECTORY = os.path.join(os.path.dirname(__file__), "ingester/CASRepos/git/")
| """
file: config.py
author: <NAME> <<EMAIL>>
date: November 2013
description: Reads the config.json info into a varible
"""
import json
#from StringIO import StringIO
import os
config = json.load(open('./bianca/config.json'))
# Defining default repository path if not specified
if config['repo_location']['location'] and config['repo_location']['location'] != "":
REPO_DIRECTORY = config['repo_location']['location'] + "/"
else:
REPO_DIRECTORY = os.path.join(os.path.dirname(__file__), "ingester/CASRepos/git/") | en | 0.465587 | file: config.py author: <NAME> <<EMAIL>> date: November 2013 description: Reads the config.json info into a varible #from StringIO import StringIO # Defining default repository path if not specified | 2.464709 | 2 |
backend/migrations/versions/dd72f4edfb71_.py | sartography/star-drive | 0 | 6623293 | <gh_stars>0
"""empty message
Revision ID: dd<PASSWORD>
Revises: <PASSWORD>
Create Date: 2021-02-18 15:25:07.218891
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dd<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('chain_session_step_participant_id_fkey', 'chain_session_step', type_='foreignkey')
op.drop_constraint('chain_session_step_user_id_fkey', 'chain_session_step', type_='foreignkey')
op.drop_column('chain_session_step', 'user_id')
op.drop_column('chain_session_step', 'participant_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('chain_session_step', sa.Column('participant_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('chain_session_step', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('chain_session_step_user_id_fkey', 'chain_session_step', 'stardrive_user', ['user_id'], ['id'])
op.create_foreign_key('chain_session_step_participant_id_fkey', 'chain_session_step', 'stardrive_participant', ['participant_id'], ['id'])
# ### end Alembic commands ###
| """empty message
Revision ID: dd<PASSWORD>
Revises: <PASSWORD>
Create Date: 2021-02-18 15:25:07.218891
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dd<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('chain_session_step_participant_id_fkey', 'chain_session_step', type_='foreignkey')
op.drop_constraint('chain_session_step_user_id_fkey', 'chain_session_step', type_='foreignkey')
op.drop_column('chain_session_step', 'user_id')
op.drop_column('chain_session_step', 'participant_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('chain_session_step', sa.Column('participant_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('chain_session_step', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('chain_session_step_user_id_fkey', 'chain_session_step', 'stardrive_user', ['user_id'], ['id'])
op.create_foreign_key('chain_session_step_participant_id_fkey', 'chain_session_step', 'stardrive_participant', ['participant_id'], ['id'])
# ### end Alembic commands ### | en | 0.444597 | empty message Revision ID: dd<PASSWORD> Revises: <PASSWORD> Create Date: 2021-02-18 15:25:07.218891 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.841061 | 2 |
src/utils.py | kodaim1115/test | 4 | 6623294 | import math
import os
import shutil
import sys
import time
import torch
import torch.distributions as dist
import torch.nn.functional as F
# Classes
class Constants(object):
eta = 1e-6
eps =1e-8
log2 = math.log(2)
log2pi = math.log(2 * math.pi)
logceilc = 88 # largest cuda v s.t. exp(v) < inf
logfloorc = -104 # smallest cuda v s.t. exp(v) > 0
# https://stackoverflow.com/questions/14906764/how-to-redirect-stdout-to-both-file-and-console-with-scripting
class Logger(object):
def __init__(self, filename, mode="a"):
self.terminal = sys.stdout
self.log = open(filename, mode)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
class Timer:
def __init__(self, name):
self.name = name
def __enter__(self):
self.begin = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.elapsed = self.end - self.begin
self.elapsedH = time.gmtime(self.elapsed)
print('====> [{}] Time: {:7.3f}s or {}'
.format(self.name,
self.elapsed,
time.strftime("%H:%M:%S", self.elapsedH)))
# Functions
def save_vars(vs, filepath):
"""
Saves variables to the given filepath in a safe manner.
"""
if os.path.exists(filepath):
shutil.copyfile(filepath, '{}.old'.format(filepath))
torch.save(vs, filepath)
def save_model(model, filepath):
"""
To load a saved model, simply use
`model.load_state_dict(torch.load('path-to-saved-model'))`.
"""
save_vars(model.state_dict(), filepath)
#if hasattr(model, 'vaes'):
# for vae in model.vaes:
# fdir, fext = os.path.splitext(filepath)
# save_vars(vae.state_dict(), fdir + '_' + vae.modelName + fext)
def is_multidata(dataB):
return isinstance(dataB, list) or isinstance(dataB, tuple)
def unpack_data(dataB, device='cuda'):
# dataB :: (Tensor, Idx) | [(Tensor, Idx)]
""" Unpacks the data batch object in an appropriate manner to extract data """
if is_multidata(dataB):
if torch.is_tensor(dataB[0]):
if torch.is_tensor(dataB[1]):
return dataB[0].to(device) # mnist, svhn, cubI
elif is_multidata(dataB[1]):
return dataB[0].to(device), dataB[1][0].to(device) # cubISft
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[1])))
elif is_multidata(dataB[0]):
return [d.to(device) for d in list(zip(*dataB))[0]] # mnist-svhn, cubIS
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[0])))
elif torch.is_tensor(dataB):
return dataB.to(device)
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB)))
def get_mean(d, K=100):
"""
Extract the `mean` parameter for given distribution.
If attribute not available, estimate from samples.
"""
try:
mean = d.mean
except NotImplementedError:
samples = d.rsample(torch.Size([K]))
mean = samples.mean(0)
return mean
def log_mean_exp(value, dim=0, keepdim=False):
return torch.logsumexp(value, dim, keepdim=keepdim) - math.log(value.size(dim))
def kl_divergence(d1, d2, K=100):
"""Computes closed-form KL if available, else computes a MC estimate."""
if (type(d1), type(d2)) in torch.distributions.kl._KL_REGISTRY:
return torch.distributions.kl_divergence(d1, d2)
else:
samples = d1.rsample(torch.Size([K]))
return (d1.log_prob(samples) - d2.log_prob(samples)).mean(0)
def vade_kld_uni(model, zs):
n_centroids = model.params.n_centroids
gamma, lgamma, mu_c, var_c, pi = model.get_gamma(zs) #pi, var_cは get_gammaでConstants.eta足してる
mu, var = model._qz_x_params
mu_expand = mu.unsqueeze(2).expand(mu.size(0), mu.size(1), n_centroids)
var_expand = var.unsqueeze(2).expand(var.size(0), var.size(1), n_centroids)
lpz_c = -0.5*torch.sum(gamma*torch.sum(math.log(2*math.pi) + \
torch.log(var_c) + \
var_expand/var_c + \
(mu_expand-mu_c)**2/var_c, dim=1), dim=1) # log p(z|c)
lpc = torch.sum(gamma*torch.log(pi), dim=1) # log p(c) #log(pi)が-inf怪しい
lqz_x = -0.5*torch.sum(1+torch.log(var)+math.log(2*math.pi), dim=1) #see VaDE paper # log q(z|x)
lqc_x = torch.sum(gamma*(lgamma), dim=1) # log q(c|x)
kld = -lpz_c - lpc + lqz_x + lqc_x
return kld
def vade_kld(model, zs, r):
n_centroids = model.params.n_centroids
gamma, lgamma, mu_c, var_c, pi = model.get_gamma(zs) #pi, var_cは get_gammaでConstants.eta足してる
mu, var = model.vaes[r]._qz_x_params
mu_expand = mu.unsqueeze(2).expand(mu.size(0), mu.size(1), n_centroids)
var_expand = var.unsqueeze(2).expand(var.size(0), var.size(1), n_centroids)
lpz_c = -0.5*torch.sum(gamma*torch.sum(math.log(2*math.pi) + \
torch.log(var_c) + \
var_expand/var_c + \
(mu_expand-mu_c)**2/var_c, dim=1), dim=1) # log p(z|c)
lpc = torch.sum(gamma*torch.log(pi), dim=1) # log p(c) #log(pi)が-inf怪しい
lqz_x = -0.5*torch.sum(1+torch.log(var)+math.log(2*math.pi), dim=1) #see VaDE paper # log q(z|x)
lqc_x = torch.sum(gamma*(lgamma), dim=1) # log q(c|x)
kld = -lpz_c - lpc + lqz_x + lqc_x
return kld
def pdist(sample_1, sample_2, eps=1e-5):
"""Compute the matrix of all squared pairwise distances. Code
adapted from the torch-two-sample library (added batching).
You can find the original implementation of this function here:
https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/util.py
Arguments
---------
sample_1 : torch.Tensor or Variable
The first sample, should be of shape ``(batch_size, n_1, d)``.
sample_2 : torch.Tensor or Variable
The second sample, should be of shape ``(batch_size, n_2, d)``.
norm : float
The l_p norm to be used.
batched : bool
whether data is batched
Returns
-------
torch.Tensor or Variable
Matrix of shape (batch_size, n_1, n_2). The [i, j]-th entry is equal to
``|| sample_1[i, :] - sample_2[j, :] ||_p``."""
if len(sample_1.shape) == 2:
sample_1, sample_2 = sample_1.unsqueeze(0), sample_2.unsqueeze(0)
B, n_1, n_2 = sample_1.size(0), sample_1.size(1), sample_2.size(1)
norms_1 = torch.sum(sample_1 ** 2, dim=-1, keepdim=True)
norms_2 = torch.sum(sample_2 ** 2, dim=-1, keepdim=True)
norms = (norms_1.expand(B, n_1, n_2)
+ norms_2.transpose(1, 2).expand(B, n_1, n_2))
distances_squared = norms - 2 * sample_1.matmul(sample_2.transpose(1, 2))
return torch.sqrt(eps + torch.abs(distances_squared)).squeeze() # batch x K x latent
def NN_lookup(emb_h, emb, data):
indices = pdist(emb.to(emb_h.device), emb_h).argmin(dim=0)
# indices = torch.tensor(cosine_similarity(emb, emb_h.cpu().numpy()).argmax(0)).to(emb_h.device).squeeze()
return data[indices]
class FakeCategorical(dist.Distribution):
support = dist.constraints.real
has_rsample = True
def __init__(self, locs):
self.logits = locs
self._batch_shape = self.logits.shape
@property
def mean(self):
return self.logits
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.logits.expand([*sample_shape, *self.logits.shape]).contiguous()
def log_prob(self, value):
# value of shape (K, B, D)
lpx_z = -F.cross_entropy(input=self.logits.view(-1, self.logits.size(-1)),
target=value.expand(self.logits.size()[:-1]).long().view(-1),
reduction='none',
ignore_index=0)
return lpx_z.view(*self.logits.shape[:-1])
# it is inevitable to have the word embedding dimension summed up in
# cross-entropy loss ($\sum -gt_i \log(p_i)$ with most gt_i = 0, We adopt the
# operationally equivalence here, which is summing up the sentence dimension
# in objective.
#from github Bjarten/early-stopping-pytorch
import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, runPath):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, runPath)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, runPath) #runPath追加
self.counter = 0
def save_checkpoint(self, val_loss, model, runPath):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
#torch.save(model.state_dict(), 'checkpoint.pt')
save_model(model, runPath + '/model.rar') #mmvaeより移植
self.val_loss_min = val_loss
class EarlyStopping_nosave:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = -1e9
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, runPath):
score = -val_loss
if self.best_score is None:
self.best_score = score
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
| import math
import os
import shutil
import sys
import time
import torch
import torch.distributions as dist
import torch.nn.functional as F
# Classes
class Constants(object):
eta = 1e-6
eps =1e-8
log2 = math.log(2)
log2pi = math.log(2 * math.pi)
logceilc = 88 # largest cuda v s.t. exp(v) < inf
logfloorc = -104 # smallest cuda v s.t. exp(v) > 0
# https://stackoverflow.com/questions/14906764/how-to-redirect-stdout-to-both-file-and-console-with-scripting
class Logger(object):
def __init__(self, filename, mode="a"):
self.terminal = sys.stdout
self.log = open(filename, mode)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
class Timer:
def __init__(self, name):
self.name = name
def __enter__(self):
self.begin = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.elapsed = self.end - self.begin
self.elapsedH = time.gmtime(self.elapsed)
print('====> [{}] Time: {:7.3f}s or {}'
.format(self.name,
self.elapsed,
time.strftime("%H:%M:%S", self.elapsedH)))
# Functions
def save_vars(vs, filepath):
"""
Saves variables to the given filepath in a safe manner.
"""
if os.path.exists(filepath):
shutil.copyfile(filepath, '{}.old'.format(filepath))
torch.save(vs, filepath)
def save_model(model, filepath):
"""
To load a saved model, simply use
`model.load_state_dict(torch.load('path-to-saved-model'))`.
"""
save_vars(model.state_dict(), filepath)
#if hasattr(model, 'vaes'):
# for vae in model.vaes:
# fdir, fext = os.path.splitext(filepath)
# save_vars(vae.state_dict(), fdir + '_' + vae.modelName + fext)
def is_multidata(dataB):
return isinstance(dataB, list) or isinstance(dataB, tuple)
def unpack_data(dataB, device='cuda'):
# dataB :: (Tensor, Idx) | [(Tensor, Idx)]
""" Unpacks the data batch object in an appropriate manner to extract data """
if is_multidata(dataB):
if torch.is_tensor(dataB[0]):
if torch.is_tensor(dataB[1]):
return dataB[0].to(device) # mnist, svhn, cubI
elif is_multidata(dataB[1]):
return dataB[0].to(device), dataB[1][0].to(device) # cubISft
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[1])))
elif is_multidata(dataB[0]):
return [d.to(device) for d in list(zip(*dataB))[0]] # mnist-svhn, cubIS
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[0])))
elif torch.is_tensor(dataB):
return dataB.to(device)
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB)))
def get_mean(d, K=100):
"""
Extract the `mean` parameter for given distribution.
If attribute not available, estimate from samples.
"""
try:
mean = d.mean
except NotImplementedError:
samples = d.rsample(torch.Size([K]))
mean = samples.mean(0)
return mean
def log_mean_exp(value, dim=0, keepdim=False):
return torch.logsumexp(value, dim, keepdim=keepdim) - math.log(value.size(dim))
def kl_divergence(d1, d2, K=100):
"""Computes closed-form KL if available, else computes a MC estimate."""
if (type(d1), type(d2)) in torch.distributions.kl._KL_REGISTRY:
return torch.distributions.kl_divergence(d1, d2)
else:
samples = d1.rsample(torch.Size([K]))
return (d1.log_prob(samples) - d2.log_prob(samples)).mean(0)
def vade_kld_uni(model, zs):
n_centroids = model.params.n_centroids
gamma, lgamma, mu_c, var_c, pi = model.get_gamma(zs) #pi, var_cは get_gammaでConstants.eta足してる
mu, var = model._qz_x_params
mu_expand = mu.unsqueeze(2).expand(mu.size(0), mu.size(1), n_centroids)
var_expand = var.unsqueeze(2).expand(var.size(0), var.size(1), n_centroids)
lpz_c = -0.5*torch.sum(gamma*torch.sum(math.log(2*math.pi) + \
torch.log(var_c) + \
var_expand/var_c + \
(mu_expand-mu_c)**2/var_c, dim=1), dim=1) # log p(z|c)
lpc = torch.sum(gamma*torch.log(pi), dim=1) # log p(c) #log(pi)が-inf怪しい
lqz_x = -0.5*torch.sum(1+torch.log(var)+math.log(2*math.pi), dim=1) #see VaDE paper # log q(z|x)
lqc_x = torch.sum(gamma*(lgamma), dim=1) # log q(c|x)
kld = -lpz_c - lpc + lqz_x + lqc_x
return kld
def vade_kld(model, zs, r):
n_centroids = model.params.n_centroids
gamma, lgamma, mu_c, var_c, pi = model.get_gamma(zs) #pi, var_cは get_gammaでConstants.eta足してる
mu, var = model.vaes[r]._qz_x_params
mu_expand = mu.unsqueeze(2).expand(mu.size(0), mu.size(1), n_centroids)
var_expand = var.unsqueeze(2).expand(var.size(0), var.size(1), n_centroids)
lpz_c = -0.5*torch.sum(gamma*torch.sum(math.log(2*math.pi) + \
torch.log(var_c) + \
var_expand/var_c + \
(mu_expand-mu_c)**2/var_c, dim=1), dim=1) # log p(z|c)
lpc = torch.sum(gamma*torch.log(pi), dim=1) # log p(c) #log(pi)が-inf怪しい
lqz_x = -0.5*torch.sum(1+torch.log(var)+math.log(2*math.pi), dim=1) #see VaDE paper # log q(z|x)
lqc_x = torch.sum(gamma*(lgamma), dim=1) # log q(c|x)
kld = -lpz_c - lpc + lqz_x + lqc_x
return kld
def pdist(sample_1, sample_2, eps=1e-5):
"""Compute the matrix of all squared pairwise distances. Code
adapted from the torch-two-sample library (added batching).
You can find the original implementation of this function here:
https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/util.py
Arguments
---------
sample_1 : torch.Tensor or Variable
The first sample, should be of shape ``(batch_size, n_1, d)``.
sample_2 : torch.Tensor or Variable
The second sample, should be of shape ``(batch_size, n_2, d)``.
norm : float
The l_p norm to be used.
batched : bool
whether data is batched
Returns
-------
torch.Tensor or Variable
Matrix of shape (batch_size, n_1, n_2). The [i, j]-th entry is equal to
``|| sample_1[i, :] - sample_2[j, :] ||_p``."""
if len(sample_1.shape) == 2:
sample_1, sample_2 = sample_1.unsqueeze(0), sample_2.unsqueeze(0)
B, n_1, n_2 = sample_1.size(0), sample_1.size(1), sample_2.size(1)
norms_1 = torch.sum(sample_1 ** 2, dim=-1, keepdim=True)
norms_2 = torch.sum(sample_2 ** 2, dim=-1, keepdim=True)
norms = (norms_1.expand(B, n_1, n_2)
+ norms_2.transpose(1, 2).expand(B, n_1, n_2))
distances_squared = norms - 2 * sample_1.matmul(sample_2.transpose(1, 2))
return torch.sqrt(eps + torch.abs(distances_squared)).squeeze() # batch x K x latent
def NN_lookup(emb_h, emb, data):
indices = pdist(emb.to(emb_h.device), emb_h).argmin(dim=0)
# indices = torch.tensor(cosine_similarity(emb, emb_h.cpu().numpy()).argmax(0)).to(emb_h.device).squeeze()
return data[indices]
class FakeCategorical(dist.Distribution):
support = dist.constraints.real
has_rsample = True
def __init__(self, locs):
self.logits = locs
self._batch_shape = self.logits.shape
@property
def mean(self):
return self.logits
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.logits.expand([*sample_shape, *self.logits.shape]).contiguous()
def log_prob(self, value):
# value of shape (K, B, D)
lpx_z = -F.cross_entropy(input=self.logits.view(-1, self.logits.size(-1)),
target=value.expand(self.logits.size()[:-1]).long().view(-1),
reduction='none',
ignore_index=0)
return lpx_z.view(*self.logits.shape[:-1])
# it is inevitable to have the word embedding dimension summed up in
# cross-entropy loss ($\sum -gt_i \log(p_i)$ with most gt_i = 0, We adopt the
# operationally equivalence here, which is summing up the sentence dimension
# in objective.
#from github Bjarten/early-stopping-pytorch
import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, runPath):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, runPath)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, runPath) #runPath追加
self.counter = 0
def save_checkpoint(self, val_loss, model, runPath):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
#torch.save(model.state_dict(), 'checkpoint.pt')
save_model(model, runPath + '/model.rar') #mmvaeより移植
self.val_loss_min = val_loss
class EarlyStopping_nosave:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = -1e9
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, runPath):
score = -val_loss
if self.best_score is None:
self.best_score = score
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
| en | 0.646944 | # Classes # largest cuda v s.t. exp(v) < inf # smallest cuda v s.t. exp(v) > 0 # https://stackoverflow.com/questions/14906764/how-to-redirect-stdout-to-both-file-and-console-with-scripting # this flush method is needed for python 3 compatibility. # this handles the flush command by doing nothing. # you might want to specify some extra behavior here. # Functions Saves variables to the given filepath in a safe manner. To load a saved model, simply use `model.load_state_dict(torch.load('path-to-saved-model'))`. #if hasattr(model, 'vaes'): # for vae in model.vaes: # fdir, fext = os.path.splitext(filepath) # save_vars(vae.state_dict(), fdir + '_' + vae.modelName + fext) # dataB :: (Tensor, Idx) | [(Tensor, Idx)] Unpacks the data batch object in an appropriate manner to extract data # mnist, svhn, cubI # cubISft # mnist-svhn, cubIS Extract the `mean` parameter for given distribution. If attribute not available, estimate from samples. Computes closed-form KL if available, else computes a MC estimate. #pi, var_cは get_gammaでConstants.eta足してる # log p(z|c) # log p(c) #log(pi)が-inf怪しい #see VaDE paper # log q(z|x) # log q(c|x) #pi, var_cは get_gammaでConstants.eta足してる # log p(z|c) # log p(c) #log(pi)が-inf怪しい #see VaDE paper # log q(z|x) # log q(c|x) Compute the matrix of all squared pairwise distances. Code adapted from the torch-two-sample library (added batching). You can find the original implementation of this function here: https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/util.py Arguments --------- sample_1 : torch.Tensor or Variable The first sample, should be of shape ``(batch_size, n_1, d)``. sample_2 : torch.Tensor or Variable The second sample, should be of shape ``(batch_size, n_2, d)``. norm : float The l_p norm to be used. batched : bool whether data is batched Returns ------- torch.Tensor or Variable Matrix of shape (batch_size, n_1, n_2). The [i, j]-th entry is equal to ``|| sample_1[i, :] - sample_2[j, :] ||_p``. # batch x K x latent # indices = torch.tensor(cosine_similarity(emb, emb_h.cpu().numpy()).argmax(0)).to(emb_h.device).squeeze() # value of shape (K, B, D) # it is inevitable to have the word embedding dimension summed up in # cross-entropy loss ($\sum -gt_i \log(p_i)$ with most gt_i = 0, We adopt the # operationally equivalence here, which is summing up the sentence dimension # in objective. #from github Bjarten/early-stopping-pytorch Early stops the training if validation loss doesn't improve after a given patience. Args: patience (int): How long to wait after last time validation loss improved. Default: 7 verbose (bool): If True, prints a message for each validation loss improvement. Default: False delta (float): Minimum change in the monitored quantity to qualify as an improvement. Default: 0 #runPath追加 Saves model when validation loss decrease. #torch.save(model.state_dict(), 'checkpoint.pt') #mmvaeより移植 Early stops the training if validation loss doesn't improve after a given patience. Args: patience (int): How long to wait after last time validation loss improved. Default: 7 verbose (bool): If True, prints a message for each validation loss improvement. Default: False delta (float): Minimum change in the monitored quantity to qualify as an improvement. Default: 0 | 2.526357 | 3 |
tof/tests/tests.py | pilikp/django-tof | 22 | 6623295 | <reponame>pilikp/django-tof
# -*- coding: utf-8 -*-
# @Author: MaxST
# @Date: 2019-11-15 19:17:59
# @Last Modified by: MaxST
# @Last Modified time: 2019-12-15 14:23:41
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import SEARCH_VAR
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.management import call_command
from django.db.models import Q
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from django.utils.translation import activate, override
from main.models import Vintage, Wine, Winery
from mixer.backend.django import mixer
from tof.admin import (
ContentTypeAdmin, LanguageAdmin, TranslatableFieldAdmin, TranslationAdmin,
)
from tof.fields import TranslatableFieldFormField
from tof.models import (
Language, TranslatableField, Translation, TranslationFieldMixin,
)
from tof.settings import FALLBACK_LANGUAGES
from tof.utils import TranslatableText
site = admin.AdminSite(name='admin')
site.register(User, UserAdmin)
site.register(ContentType, ContentTypeAdmin)
site.register(Language, LanguageAdmin)
site.register(TranslatableField, TranslatableFieldAdmin)
site.register(Translation, TranslationAdmin)
def create_field(name='title', cls=None):
ct = ContentType.objects.get_for_model(cls or Wine)
fld = TranslatableField.objects.filter(content_type=ct).first()
if not fld:
mixer.blend(TranslatableField, name=name, title=name.title(), content_type=ct)
def clean_model(cls, attr='title'):
if issubclass(cls, TranslationFieldMixin):
for fld in {**cls._meta._field_tof['by_id']}.values():
fld.remove_translation_from_class()
class TranslatableFieldTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
clean_model(LogEntry)
mixer.blend(Wine, title='Wine 1')
def test_save(self):
wine1 = Wine.objects.first()
log = LogEntry.objects.first()
self.assertNotIsInstance(wine1, TranslationFieldMixin)
self.assertNotIsInstance(log, TranslationFieldMixin)
self.assertIsNone(vars(LogEntry._meta).get('_field_tof'))
create_field()
self.assertIsInstance(wine1, TranslationFieldMixin)
self.assertIsNotNone(vars(Wine._meta).get('_field_tof'))
self.assertIsNone(vars(LogEntry._meta).get('_field_tof'))
create_field('change_message', LogEntry)
self.assertIsNotNone(vars(LogEntry._meta).get('_field_tof'))
def test_delete(self):
create_field()
wine1 = Wine.objects.first()
self.assertIsInstance(wine1, TranslationFieldMixin)
fld = TranslatableField.objects.first()
fld.delete()
wine1 = Wine.objects.first()
self.assertNotIsInstance(wine1, TranslationFieldMixin)
self.assertEqual(wine1.title, 'Wine 1')
wine2 = mixer.blend(Wine, title='Wine 2')
self.assertEqual(wine2.title, 'Wine 2')
def test_str(self):
create_field()
fld = TranslatableField.objects.first()
self.assertEqual(str(fld), 'wine|Title')
class TranslationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
def test_str(self):
fld = TranslatableField.objects.first()
lang_en = Language.objects.get(iso='en')
new_title = 'Wine 1 en'
wine1 = Wine.objects.first()
self.assertEqual(wine1.title, 'Wine 1')
trans = mixer.blend(Translation, content_object=wine1, field=fld, lang=lang_en, value=new_title)
str_make = f'{wine1}.{fld.name}.{lang_en} = "{new_title}"'
self.assertEqual(str(trans), str_make)
class TranslationFieldMixinTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
def test_save(self):
wine1 = Wine.objects.first()
title_de = 'Wine 1 de'
title_en = 'Wine 1 en'
title_nl = 'Wine 1 en'
with override('de'):
wine1.title = title_de
wine1.save()
wine1 = Wine.objects.first()
self.assertEqual(wine1.title.de, title_de)
value = TranslatableText()
vars(value).update({'en': title_en, 'nl': title_nl})
wine1.title = value
wine1.save()
wine1 = Wine.objects.first()
self.assertEqual(wine1.title.en, title_en)
self.assertEqual(wine1.title.nl, title_nl)
def test_get(self):
self.assertIsInstance(Wine.title, TranslatableField)
def test_prefetch(self):
wine1 = Wine.objects.first()
wine1.title = f'{wine1.title}'
wine1.save()
with self.assertNumQueries(2):
for wine in Wine.objects.all():
self.assertIsNotNone(wine.title)
mixer.cycle(5).blend(Wine, title=mixer.sequence('Wine {0}'))
with override('en'):
for wine in Wine.objects.all():
wine.title = f'{wine.title} en'
wine.save()
with self.assertNumQueries(2):
for wine in Wine.objects.all():
self.assertIsNotNone(wine.title.en)
class FilterTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
def test_behavior(self):
wine1 = Wine.objects.first()
title_de = 'Wine 1 de'
title_nl = 'Wine 1 nl'
for title in (title_de, title_nl):
with override(title.split()[-1]):
wine1.title = title
wine1.save()
wine1 = Wine.objects.first()
with override('nl'):
self.assertEqual(str(wine1.title), title_nl)
with override('fr'):
self.assertEqual(str(wine1.title), title_nl)
with override('de'):
serch_wine = Wine.objects.filter(title=title_de).first()
self.assertEqual(title_de, str(serch_wine.title))
serch_wine = Wine.objects.exclude(title=title_de).first()
self.assertIsNone(serch_wine)
serch_wine = Wine.objects.get(title=title_de)
self.assertEqual(title_de, str(serch_wine.title))
serch_wine = Wine.objects.filter(Q(title=title_de)).first()
self.assertEqual(title_de, str(serch_wine.title))
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertIsNone(serch_wine)
from tof import decorators
decorators.DEFAULT_FILTER_LANGUAGE = '__all__'
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertEqual(wine1, serch_wine)
decorators.DEFAULT_FILTER_LANGUAGE = 'nl'
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertEqual(wine1, serch_wine)
decorators.DEFAULT_FILTER_LANGUAGE = ('nl', )
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertEqual(wine1, serch_wine)
decorators.DEFAULT_FILTER_LANGUAGE = {'de': ('nl', )}
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertEqual(wine1, serch_wine)
decorators.DEFAULT_FILTER_LANGUAGE = set()
serch_wine = Wine.objects.filter(title=title_de).first()
self.assertEqual(wine1, serch_wine)
class TranslatableTextTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
def test_common(self):
wine1 = Wine.objects.first()
title_nl = 'Wine 1 nl'
for title in (title_nl, ):
with override(title.split()[-1]):
wine1.title = title
wine1.save()
val = wine1.title
self.assertIsInstance(val, TranslatableText)
self.assertEqual(val, 'Wine 1')
self.assertEqual(val[0], 'W')
self.assertEqual(val + '1', 'Wine 11')
self.assertEqual('1' + val, '1Wine 1')
self.assertEqual(repr(val), f"'{val}'")
self.assertEqual(str(val), val.__html__())
self.assertFalse(hasattr(val, 'resolve_expression'))
self.assertFalse(hasattr(val, 'prepare_database_save'))
FALLBACK_LANGUAGES['aa'] = ('nl', )
with override('aa'):
self.assertEqual(str(val), title_nl)
del wine1.title
self.assertEqual(wine1.title, 'Wine 1')
class Benchmark(TestCase):
def test_benchmark(self):
call_command('benchmark')
class ModelAdminTests(TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
activate('en')
cls.superuser = User.objects.create_superuser(username='super', email='<EMAIL>', password='<PASSWORD>')
def setUp(self):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
self.site = AdminSite()
def test_search_result(self):
wine = ContentType.objects.get_for_model(Wine)
vintage = ContentType.objects.get_for_model(Vintage)
winery = ContentType.objects.get_for_model(Winery)
m = ContentTypeAdmin(ContentType, site)
request = self.factory.get('/', data={SEARCH_VAR: 'tof'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
request = self.factory.get('/', data={SEARCH_VAR: 'main'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [vintage, wine, winery])
m = LanguageAdmin(Language, site)
lang_aa = Language.objects.get(iso='aa')
request = self.factory.get('/', data={SEARCH_VAR: 'aa', IS_POPUP_VAR: '1'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [lang_aa])
lang_aa.is_active = False
lang_aa.save()
request = self.factory.get('/', data={SEARCH_VAR: 'aa', IS_POPUP_VAR: '1'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
request = self.factory.get('/autocomplete/', data={SEARCH_VAR: 'aa'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_delete_qs(self):
request = self.factory.get('/')
request.user = self.superuser
m = TranslatableFieldAdmin(TranslatableField, site)
m.delete_queryset(request, TranslatableField.objects.all())
wine1 = Wine.objects.first()
self.assertNotIsInstance(wine1, TranslationFieldMixin)
def test_response(self):
# TranslatableFieldAdmin
ct = ContentType.objects.get_for_model(Wine)
field = TranslatableField.objects.first()
self.client.force_login(self.superuser)
url = reverse('admin:tof_translatablefield_change', args=(field.pk, ))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.get(url, data={'id_ct': ct.pk})
self.assertEqual(response.json(), {'pk': 7, 'fields': ['title', 'title', 'title', 'title', 'description']}) # WTF?
response = self.client.get(url, data={'id_ct': 999})
self.assertTrue('errors' in response.json())
# TranslationAdmin
wine1 = Wine.objects.first()
wine1.title = 'Wine 1 en'
wine1.save()
trans = Translation.objects.first()
url = reverse('admin:tof_translation_change', args=(trans.pk, ))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.get(url, data={'field_id': field.pk})
url_auto = reverse('admin:main_wine_autocomplete')
self.assertEqual(response.json(), {
'pk': field.content_type.pk,
'url': url_auto,
'text': '',
})
response = self.client.get(url, data={'field_id': field.pk, 'id_obj': wine1.pk})
self.assertEqual(response.json(), {'pk': field.content_type.pk, 'text': str(wine1), 'url': url_auto})
response = self.client.get(url, data={'field_id': 999, 'id_obj': wine1.pk})
self.assertTrue('errors' in response.json())
#
wine = Wine.objects.first()
url = reverse('admin:main_wine_change', args=(wine.pk, ))
response = self.client.get(url)
self.assertContains(response, 'translatable_fields_widget.js')
self.assertContains(response, 'en_id_title_en')
class FieldTests(TestCase):
def test_field(self):
activate('en')
fld = TranslatableFieldFormField()
data = [['en', 'Wine en'], ['de', 'Wine de']]
cmps = fld.clean(data)
self.assertEqual(dict(data), vars(cmps))
with self.assertRaises(ValidationError):
fail_data = data.copy()
fail_data[0] = ['en', '']
cmps = fld.clean(fail_data)
fld.required = False
self.assertEqual('', fld.clean(None))
fld.required = True
fld.require_all_fields = False
cmps = fld.clean(fail_data)
self.assertEqual(dict(fail_data), vars(cmps))
fld.fields[0].required = True
with self.assertRaises(ValidationError):
fld.clean(fail_data)
fld.fields[0].required = False
fld.require_all_fields = True
with self.assertRaises(ValidationError):
cmps = fld.clean(None)
val = TranslatableText()
vars(val).update(dict(data))
with self.assertRaises(ValidationError):
fld.clean(val)
fld.disabled = True
fld.required = False
self.assertEqual('Wine en', fld.clean(val))
fld.required = True
fld.disabled = False
with self.assertRaises(ValidationError):
fail_data[0][1] += '\x00'
fld.clean(fail_data)
| # -*- coding: utf-8 -*-
# @Author: MaxST
# @Date: 2019-11-15 19:17:59
# @Last Modified by: MaxST
# @Last Modified time: 2019-12-15 14:23:41
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import SEARCH_VAR
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.management import call_command
from django.db.models import Q
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from django.utils.translation import activate, override
from main.models import Vintage, Wine, Winery
from mixer.backend.django import mixer
from tof.admin import (
ContentTypeAdmin, LanguageAdmin, TranslatableFieldAdmin, TranslationAdmin,
)
from tof.fields import TranslatableFieldFormField
from tof.models import (
Language, TranslatableField, Translation, TranslationFieldMixin,
)
from tof.settings import FALLBACK_LANGUAGES
from tof.utils import TranslatableText
site = admin.AdminSite(name='admin')
site.register(User, UserAdmin)
site.register(ContentType, ContentTypeAdmin)
site.register(Language, LanguageAdmin)
site.register(TranslatableField, TranslatableFieldAdmin)
site.register(Translation, TranslationAdmin)
def create_field(name='title', cls=None):
ct = ContentType.objects.get_for_model(cls or Wine)
fld = TranslatableField.objects.filter(content_type=ct).first()
if not fld:
mixer.blend(TranslatableField, name=name, title=name.title(), content_type=ct)
def clean_model(cls, attr='title'):
if issubclass(cls, TranslationFieldMixin):
for fld in {**cls._meta._field_tof['by_id']}.values():
fld.remove_translation_from_class()
class TranslatableFieldTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
clean_model(LogEntry)
mixer.blend(Wine, title='Wine 1')
def test_save(self):
wine1 = Wine.objects.first()
log = LogEntry.objects.first()
self.assertNotIsInstance(wine1, TranslationFieldMixin)
self.assertNotIsInstance(log, TranslationFieldMixin)
self.assertIsNone(vars(LogEntry._meta).get('_field_tof'))
create_field()
self.assertIsInstance(wine1, TranslationFieldMixin)
self.assertIsNotNone(vars(Wine._meta).get('_field_tof'))
self.assertIsNone(vars(LogEntry._meta).get('_field_tof'))
create_field('change_message', LogEntry)
self.assertIsNotNone(vars(LogEntry._meta).get('_field_tof'))
def test_delete(self):
create_field()
wine1 = Wine.objects.first()
self.assertIsInstance(wine1, TranslationFieldMixin)
fld = TranslatableField.objects.first()
fld.delete()
wine1 = Wine.objects.first()
self.assertNotIsInstance(wine1, TranslationFieldMixin)
self.assertEqual(wine1.title, 'Wine 1')
wine2 = mixer.blend(Wine, title='Wine 2')
self.assertEqual(wine2.title, 'Wine 2')
def test_str(self):
create_field()
fld = TranslatableField.objects.first()
self.assertEqual(str(fld), 'wine|Title')
class TranslationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
def test_str(self):
fld = TranslatableField.objects.first()
lang_en = Language.objects.get(iso='en')
new_title = 'Wine 1 en'
wine1 = Wine.objects.first()
self.assertEqual(wine1.title, 'Wine 1')
trans = mixer.blend(Translation, content_object=wine1, field=fld, lang=lang_en, value=new_title)
str_make = f'{wine1}.{fld.name}.{lang_en} = "{new_title}"'
self.assertEqual(str(trans), str_make)
class TranslationFieldMixinTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
def test_save(self):
wine1 = Wine.objects.first()
title_de = 'Wine 1 de'
title_en = 'Wine 1 en'
title_nl = 'Wine 1 en'
with override('de'):
wine1.title = title_de
wine1.save()
wine1 = Wine.objects.first()
self.assertEqual(wine1.title.de, title_de)
value = TranslatableText()
vars(value).update({'en': title_en, 'nl': title_nl})
wine1.title = value
wine1.save()
wine1 = Wine.objects.first()
self.assertEqual(wine1.title.en, title_en)
self.assertEqual(wine1.title.nl, title_nl)
def test_get(self):
self.assertIsInstance(Wine.title, TranslatableField)
def test_prefetch(self):
wine1 = Wine.objects.first()
wine1.title = f'{wine1.title}'
wine1.save()
with self.assertNumQueries(2):
for wine in Wine.objects.all():
self.assertIsNotNone(wine.title)
mixer.cycle(5).blend(Wine, title=mixer.sequence('Wine {0}'))
with override('en'):
for wine in Wine.objects.all():
wine.title = f'{wine.title} en'
wine.save()
with self.assertNumQueries(2):
for wine in Wine.objects.all():
self.assertIsNotNone(wine.title.en)
class FilterTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
def test_behavior(self):
wine1 = Wine.objects.first()
title_de = 'Wine 1 de'
title_nl = 'Wine 1 nl'
for title in (title_de, title_nl):
with override(title.split()[-1]):
wine1.title = title
wine1.save()
wine1 = Wine.objects.first()
with override('nl'):
self.assertEqual(str(wine1.title), title_nl)
with override('fr'):
self.assertEqual(str(wine1.title), title_nl)
with override('de'):
serch_wine = Wine.objects.filter(title=title_de).first()
self.assertEqual(title_de, str(serch_wine.title))
serch_wine = Wine.objects.exclude(title=title_de).first()
self.assertIsNone(serch_wine)
serch_wine = Wine.objects.get(title=title_de)
self.assertEqual(title_de, str(serch_wine.title))
serch_wine = Wine.objects.filter(Q(title=title_de)).first()
self.assertEqual(title_de, str(serch_wine.title))
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertIsNone(serch_wine)
from tof import decorators
decorators.DEFAULT_FILTER_LANGUAGE = '__all__'
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertEqual(wine1, serch_wine)
decorators.DEFAULT_FILTER_LANGUAGE = 'nl'
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertEqual(wine1, serch_wine)
decorators.DEFAULT_FILTER_LANGUAGE = ('nl', )
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertEqual(wine1, serch_wine)
decorators.DEFAULT_FILTER_LANGUAGE = {'de': ('nl', )}
serch_wine = Wine.objects.filter(title=title_nl).first()
self.assertEqual(wine1, serch_wine)
decorators.DEFAULT_FILTER_LANGUAGE = set()
serch_wine = Wine.objects.filter(title=title_de).first()
self.assertEqual(wine1, serch_wine)
class TranslatableTextTestCase(TestCase):
@classmethod
def setUpTestData(cls):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
def test_common(self):
wine1 = Wine.objects.first()
title_nl = 'Wine 1 nl'
for title in (title_nl, ):
with override(title.split()[-1]):
wine1.title = title
wine1.save()
val = wine1.title
self.assertIsInstance(val, TranslatableText)
self.assertEqual(val, 'Wine 1')
self.assertEqual(val[0], 'W')
self.assertEqual(val + '1', 'Wine 11')
self.assertEqual('1' + val, '1Wine 1')
self.assertEqual(repr(val), f"'{val}'")
self.assertEqual(str(val), val.__html__())
self.assertFalse(hasattr(val, 'resolve_expression'))
self.assertFalse(hasattr(val, 'prepare_database_save'))
FALLBACK_LANGUAGES['aa'] = ('nl', )
with override('aa'):
self.assertEqual(str(val), title_nl)
del wine1.title
self.assertEqual(wine1.title, 'Wine 1')
class Benchmark(TestCase):
def test_benchmark(self):
call_command('benchmark')
class ModelAdminTests(TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
activate('en')
cls.superuser = User.objects.create_superuser(username='super', email='<EMAIL>', password='<PASSWORD>')
def setUp(self):
clean_model(Wine)
mixer.blend(Wine, title='Wine 1')
create_field()
self.site = AdminSite()
def test_search_result(self):
wine = ContentType.objects.get_for_model(Wine)
vintage = ContentType.objects.get_for_model(Vintage)
winery = ContentType.objects.get_for_model(Winery)
m = ContentTypeAdmin(ContentType, site)
request = self.factory.get('/', data={SEARCH_VAR: 'tof'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
request = self.factory.get('/', data={SEARCH_VAR: 'main'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [vintage, wine, winery])
m = LanguageAdmin(Language, site)
lang_aa = Language.objects.get(iso='aa')
request = self.factory.get('/', data={SEARCH_VAR: 'aa', IS_POPUP_VAR: '1'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [lang_aa])
lang_aa.is_active = False
lang_aa.save()
request = self.factory.get('/', data={SEARCH_VAR: 'aa', IS_POPUP_VAR: '1'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
request = self.factory.get('/autocomplete/', data={SEARCH_VAR: 'aa'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_delete_qs(self):
request = self.factory.get('/')
request.user = self.superuser
m = TranslatableFieldAdmin(TranslatableField, site)
m.delete_queryset(request, TranslatableField.objects.all())
wine1 = Wine.objects.first()
self.assertNotIsInstance(wine1, TranslationFieldMixin)
def test_response(self):
# TranslatableFieldAdmin
ct = ContentType.objects.get_for_model(Wine)
field = TranslatableField.objects.first()
self.client.force_login(self.superuser)
url = reverse('admin:tof_translatablefield_change', args=(field.pk, ))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.get(url, data={'id_ct': ct.pk})
self.assertEqual(response.json(), {'pk': 7, 'fields': ['title', 'title', 'title', 'title', 'description']}) # WTF?
response = self.client.get(url, data={'id_ct': 999})
self.assertTrue('errors' in response.json())
# TranslationAdmin
wine1 = Wine.objects.first()
wine1.title = 'Wine 1 en'
wine1.save()
trans = Translation.objects.first()
url = reverse('admin:tof_translation_change', args=(trans.pk, ))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.get(url, data={'field_id': field.pk})
url_auto = reverse('admin:main_wine_autocomplete')
self.assertEqual(response.json(), {
'pk': field.content_type.pk,
'url': url_auto,
'text': '',
})
response = self.client.get(url, data={'field_id': field.pk, 'id_obj': wine1.pk})
self.assertEqual(response.json(), {'pk': field.content_type.pk, 'text': str(wine1), 'url': url_auto})
response = self.client.get(url, data={'field_id': 999, 'id_obj': wine1.pk})
self.assertTrue('errors' in response.json())
#
wine = Wine.objects.first()
url = reverse('admin:main_wine_change', args=(wine.pk, ))
response = self.client.get(url)
self.assertContains(response, 'translatable_fields_widget.js')
self.assertContains(response, 'en_id_title_en')
class FieldTests(TestCase):
def test_field(self):
activate('en')
fld = TranslatableFieldFormField()
data = [['en', 'Wine en'], ['de', 'Wine de']]
cmps = fld.clean(data)
self.assertEqual(dict(data), vars(cmps))
with self.assertRaises(ValidationError):
fail_data = data.copy()
fail_data[0] = ['en', '']
cmps = fld.clean(fail_data)
fld.required = False
self.assertEqual('', fld.clean(None))
fld.required = True
fld.require_all_fields = False
cmps = fld.clean(fail_data)
self.assertEqual(dict(fail_data), vars(cmps))
fld.fields[0].required = True
with self.assertRaises(ValidationError):
fld.clean(fail_data)
fld.fields[0].required = False
fld.require_all_fields = True
with self.assertRaises(ValidationError):
cmps = fld.clean(None)
val = TranslatableText()
vars(val).update(dict(data))
with self.assertRaises(ValidationError):
fld.clean(val)
fld.disabled = True
fld.required = False
self.assertEqual('Wine en', fld.clean(val))
fld.required = True
fld.disabled = False
with self.assertRaises(ValidationError):
fail_data[0][1] += '\x00'
fld.clean(fail_data) | en | 0.544509 | # -*- coding: utf-8 -*- # @Author: MaxST # @Date: 2019-11-15 19:17:59 # @Last Modified by: MaxST # @Last Modified time: 2019-12-15 14:23:41 # TranslatableFieldAdmin # WTF? # TranslationAdmin # | 1.746256 | 2 |
Introducao-Python/ex055.py | reglabel/Estudos | 0 | 6623296 | <gh_stars>0
peso_maior = 0.0
peso_menor = 99999.0
for i in range(1, 6):
peso = float(input("Qual é o seu peso (em kg)? "))
if peso > peso_maior:
peso_maior = peso
if peso < peso_menor:
peso_menor = peso
print(f"O maior peso foi {peso_maior}kg e o menor foi {peso_menor}kg.")
| peso_maior = 0.0
peso_menor = 99999.0
for i in range(1, 6):
peso = float(input("Qual é o seu peso (em kg)? "))
if peso > peso_maior:
peso_maior = peso
if peso < peso_menor:
peso_menor = peso
print(f"O maior peso foi {peso_maior}kg e o menor foi {peso_menor}kg.") | none | 1 | 3.662879 | 4 | |
Damerau–Levenshtein_distance__misprints__опечатки/use__pyxdameraulevenshtein/match_two_words.py | gil9red/SimplePyScripts | 117 | 6623297 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install pyxDamerauLevenshtein
# SOURCE: https://github.com/gfairchild/pyxDamerauLevenshtein
def match_two_words(word_1, word_2):
from pyxdameraulevenshtein import damerau_levenshtein_distance
number = damerau_levenshtein_distance(word_1, word_2)
# Считаем что разница в 2 символа и меньше еще нормальная
return number < 3
if __name__ == '__main__':
need_word = 'Привет'
print(match_two_words('Привет', need_word)) # True
print(match_two_words('Првет', need_word)) # True
print(match_two_words('Прывет', need_word)) # True
print(match_two_words('Привед', need_word)) # True
print(match_two_words('Превед', need_word)) # True
print(match_two_words('Преед', need_word)) # False
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install pyxDamerauLevenshtein
# SOURCE: https://github.com/gfairchild/pyxDamerauLevenshtein
def match_two_words(word_1, word_2):
from pyxdameraulevenshtein import damerau_levenshtein_distance
number = damerau_levenshtein_distance(word_1, word_2)
# Считаем что разница в 2 символа и меньше еще нормальная
return number < 3
if __name__ == '__main__':
need_word = 'Привет'
print(match_two_words('Привет', need_word)) # True
print(match_two_words('Првет', need_word)) # True
print(match_two_words('Прывет', need_word)) # True
print(match_two_words('Привед', need_word)) # True
print(match_two_words('Превед', need_word)) # True
print(match_two_words('Преед', need_word)) # False
| ru | 0.537976 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # pip install pyxDamerauLevenshtein # SOURCE: https://github.com/gfairchild/pyxDamerauLevenshtein # Считаем что разница в 2 символа и меньше еще нормальная # True # True # True # True # True # False | 3.152475 | 3 |
negative_cache/util.py | ParikhKadam/google-research | 2 | 6623298 | <reponame>ParikhKadam/google-research
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for negative cache training."""
import tensorflow.compat.v2 as tf
def approximate_top_k_with_indices(negative_scores, k):
"""Approximately mines the top k highest scoreing negatives with indices.
This function groups the negative scores into num_negatives / k groupings and
returns the highest scoring element from each group. It also returns the index
where the selected elements were found in the score matrix.
Args:
negative_scores: A matrix with the scores of the negative elements.
k: The number of negatives to mine.
Returns:
The tuple (top_k_scores, top_k_indices), where top_k_indices describes the
index of the mined elements in the given score matrix.
"""
bs = tf.shape(negative_scores)[0]
num_elem = tf.shape(negative_scores)[1]
batch_indices = tf.range(num_elem)
indices = tf.tile(tf.expand_dims(batch_indices, axis=0), multiples=[bs, 1])
grouped_negative_scores = tf.reshape(negative_scores, [bs * k, -1])
grouped_batch_indices = tf.range(tf.shape(grouped_negative_scores)[0])
grouped_top_k_scores, grouped_top_k_indices = tf.math.top_k(
grouped_negative_scores)
grouped_top_k_indices = tf.squeeze(grouped_top_k_indices, axis=1)
gather_indices = tf.stack([grouped_batch_indices, grouped_top_k_indices],
axis=1)
grouped_indices = tf.reshape(indices, [bs * k, -1])
grouped_top_k_indices = tf.gather_nd(grouped_indices, gather_indices)
top_k_indices = tf.reshape(grouped_top_k_indices, [bs, k])
top_k_scores = tf.reshape(grouped_top_k_scores, [bs, k])
return top_k_scores, top_k_indices
def cross_replica_concat(tensor, axis):
replica_context = tf.distribute.get_replica_context()
return replica_context.all_gather(tensor, axis=axis)
| # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for negative cache training."""
import tensorflow.compat.v2 as tf
def approximate_top_k_with_indices(negative_scores, k):
"""Approximately mines the top k highest scoreing negatives with indices.
This function groups the negative scores into num_negatives / k groupings and
returns the highest scoring element from each group. It also returns the index
where the selected elements were found in the score matrix.
Args:
negative_scores: A matrix with the scores of the negative elements.
k: The number of negatives to mine.
Returns:
The tuple (top_k_scores, top_k_indices), where top_k_indices describes the
index of the mined elements in the given score matrix.
"""
bs = tf.shape(negative_scores)[0]
num_elem = tf.shape(negative_scores)[1]
batch_indices = tf.range(num_elem)
indices = tf.tile(tf.expand_dims(batch_indices, axis=0), multiples=[bs, 1])
grouped_negative_scores = tf.reshape(negative_scores, [bs * k, -1])
grouped_batch_indices = tf.range(tf.shape(grouped_negative_scores)[0])
grouped_top_k_scores, grouped_top_k_indices = tf.math.top_k(
grouped_negative_scores)
grouped_top_k_indices = tf.squeeze(grouped_top_k_indices, axis=1)
gather_indices = tf.stack([grouped_batch_indices, grouped_top_k_indices],
axis=1)
grouped_indices = tf.reshape(indices, [bs * k, -1])
grouped_top_k_indices = tf.gather_nd(grouped_indices, gather_indices)
top_k_indices = tf.reshape(grouped_top_k_indices, [bs, k])
top_k_scores = tf.reshape(grouped_top_k_scores, [bs, k])
return top_k_scores, top_k_indices
def cross_replica_concat(tensor, axis):
replica_context = tf.distribute.get_replica_context()
return replica_context.all_gather(tensor, axis=axis) | en | 0.852787 | # coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Utilities for negative cache training. Approximately mines the top k highest scoreing negatives with indices. This function groups the negative scores into num_negatives / k groupings and returns the highest scoring element from each group. It also returns the index where the selected elements were found in the score matrix. Args: negative_scores: A matrix with the scores of the negative elements. k: The number of negatives to mine. Returns: The tuple (top_k_scores, top_k_indices), where top_k_indices describes the index of the mined elements in the given score matrix. | 2.817252 | 3 |
20180728/Excel_01.py | bijitchakraborty12/MyProjects01 | 0 | 6623299 | # this is how we write to a file
import csv
fields=['Name','Branch','Year','CGPA']
rows=[['Nikhil','COE','2','9.0'],['Aditya','IT','2','9.3']]
filename='University_Records.csv'
with open(filename,'w+', newline='')as fn:
csvWriter=csv.writer(fn)
csvWriter.writerow(fields)
csvWriter.writerows(rows)
| # this is how we write to a file
import csv
fields=['Name','Branch','Year','CGPA']
rows=[['Nikhil','COE','2','9.0'],['Aditya','IT','2','9.3']]
filename='University_Records.csv'
with open(filename,'w+', newline='')as fn:
csvWriter=csv.writer(fn)
csvWriter.writerow(fields)
csvWriter.writerows(rows)
| en | 0.979443 | # this is how we write to a file | 3.647976 | 4 |
dae/dae/variant_annotation/tests/conftest.py | iossifovlab/gpf | 0 | 6623300 | import pytest
from .mocks import ExonMock
from .mocks import ReferenceGenomeMock
from .mocks import AnnotatorMock
@pytest.fixture(scope="session")
def annotator():
return AnnotatorMock(ReferenceGenomeMock())
@pytest.fixture(scope="session")
def exons():
return [ExonMock(60, 70, 0), ExonMock(80, 90, 1), ExonMock(100, 110, 2)]
@pytest.fixture(scope="session")
def coding():
return [ExonMock(65, 70, 0), ExonMock(80, 90, 1), ExonMock(100, 110, 2)]
| import pytest
from .mocks import ExonMock
from .mocks import ReferenceGenomeMock
from .mocks import AnnotatorMock
@pytest.fixture(scope="session")
def annotator():
return AnnotatorMock(ReferenceGenomeMock())
@pytest.fixture(scope="session")
def exons():
return [ExonMock(60, 70, 0), ExonMock(80, 90, 1), ExonMock(100, 110, 2)]
@pytest.fixture(scope="session")
def coding():
return [ExonMock(65, 70, 0), ExonMock(80, 90, 1), ExonMock(100, 110, 2)]
| none | 1 | 2.232456 | 2 | |
src/app.py | abhinavarora/BotServer | 0 | 6623301 | <filename>src/app.py
import tornado.ioloop
import tornado.web
from tornado.web import RequestHandler
import tornado.httpserver
import os
class MainHandler(RequestHandler):
def get(self):
self.write("Hello, World")
class WebHookHandler(RequestHandler):
def get(self):
print "Chor" + str(self.get_query_argument('hub.verify_token'))
if self.get_query_argument('hub.verify_token') == 'TERA_BAAP_KAUN_HAI_BC':
self.write(self.get_query_argument('hub.challenge'))
else:
self.write("")
def make_app():
return tornado.web.Application([(r"/webhook",WebHookHandler)
,(r'/(favicon.ico)', tornado.web.StaticFileHandler, {"path": ""}),(r"/*",MainHandler)])
if __name__ == '__main__':
app = make_app()
'''
http_server = tornado.httpserver.HTTPServer(app, ssl_options={
"certfile": "/etc/ssl/certs/apache-selfsigned.crt",
"keyfile": "/etc/ssl/private/apache-selfsigned.key"
})
http_server.listen(443)
'''
#app.listen(8888)
http_server = tornado.httpserver.HTTPServer(app)
port = int(os.environ.get("PORT", 5000))
http_server.listen(port)
tornado.ioloop.IOLoop.current().start()
| <filename>src/app.py
import tornado.ioloop
import tornado.web
from tornado.web import RequestHandler
import tornado.httpserver
import os
class MainHandler(RequestHandler):
def get(self):
self.write("Hello, World")
class WebHookHandler(RequestHandler):
def get(self):
print "Chor" + str(self.get_query_argument('hub.verify_token'))
if self.get_query_argument('hub.verify_token') == 'TERA_BAAP_KAUN_HAI_BC':
self.write(self.get_query_argument('hub.challenge'))
else:
self.write("")
def make_app():
return tornado.web.Application([(r"/webhook",WebHookHandler)
,(r'/(favicon.ico)', tornado.web.StaticFileHandler, {"path": ""}),(r"/*",MainHandler)])
if __name__ == '__main__':
app = make_app()
'''
http_server = tornado.httpserver.HTTPServer(app, ssl_options={
"certfile": "/etc/ssl/certs/apache-selfsigned.crt",
"keyfile": "/etc/ssl/private/apache-selfsigned.key"
})
http_server.listen(443)
'''
#app.listen(8888)
http_server = tornado.httpserver.HTTPServer(app)
port = int(os.environ.get("PORT", 5000))
http_server.listen(port)
tornado.ioloop.IOLoop.current().start()
| en | 0.485869 | http_server = tornado.httpserver.HTTPServer(app, ssl_options={ "certfile": "/etc/ssl/certs/apache-selfsigned.crt", "keyfile": "/etc/ssl/private/apache-selfsigned.key" }) http_server.listen(443) #app.listen(8888) | 2.583603 | 3 |
python/src/hackerrank/2020/nested_logic_book_fines.py | ccampo133/coding-challenges | 0 | 6623302 | <reponame>ccampo133/coding-challenges
# https://www.hackerrank.com/challenges/30-nested-logic/problem
# Can use the date class for this, but it somewhat defeats the spirit of the problem.
def calc_fine(d_ex: int, m_ex: int, y_ex: int, d_act: int, m_act: int, y_act: int) -> int:
if y_act < y_ex:
return 0
if y_act == y_ex:
if m_act < m_ex:
return 0
if m_act == m_ex:
if d_act <= d_ex:
return 0
return 15 * (d_act - d_ex)
return 500 * (m_act - m_ex)
return 10000
if __name__ == '__main__':
d_act, m_act, y_act = map(int, input().split(' '))
d_ex, m_ex, y_ex = map(int, input().split(' '))
# d_act, m_act, y_act = 9, 6, 2015
# d_ex, m_ex, y_ex = 6, 6, 2015
print(calc_fine(d_ex, m_ex, y_ex, d_act, m_act, y_act))
| # https://www.hackerrank.com/challenges/30-nested-logic/problem
# Can use the date class for this, but it somewhat defeats the spirit of the problem.
def calc_fine(d_ex: int, m_ex: int, y_ex: int, d_act: int, m_act: int, y_act: int) -> int:
if y_act < y_ex:
return 0
if y_act == y_ex:
if m_act < m_ex:
return 0
if m_act == m_ex:
if d_act <= d_ex:
return 0
return 15 * (d_act - d_ex)
return 500 * (m_act - m_ex)
return 10000
if __name__ == '__main__':
d_act, m_act, y_act = map(int, input().split(' '))
d_ex, m_ex, y_ex = map(int, input().split(' '))
# d_act, m_act, y_act = 9, 6, 2015
# d_ex, m_ex, y_ex = 6, 6, 2015
print(calc_fine(d_ex, m_ex, y_ex, d_act, m_act, y_act)) | en | 0.878845 | # https://www.hackerrank.com/challenges/30-nested-logic/problem # Can use the date class for this, but it somewhat defeats the spirit of the problem. # d_act, m_act, y_act = 9, 6, 2015 # d_ex, m_ex, y_ex = 6, 6, 2015 | 3.213183 | 3 |
tinystomp_test.py | dw/tinystomp | 2 | 6623303 | # The MIT License (MIT)
#
# Copyright (c) 2016, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import unittest
import mock
import tinystomp
class ErrorTest(unittest.TestCase):
def test_constructor(self):
tinystomp.Error()
class ProtocolErrorTest(unittest.TestCase):
def test_constructor(self):
tinystomp.ProtocolError()
class FrameTest(unittest.TestCase):
def test_constructor(self):
f = tinystomp.Frame('cmd')
assert 'cmd' == f.command
assert {} == f.headers
def test_repr(self):
f = tinystomp.Frame('cmd')
assert "<cmd None {\n \n}>" == repr(f)
def test_repr_headers(self):
f = tinystomp.Frame('cmd')
f.headers['a'] = 'b'
assert "<cmd None {\n a b\n}>" == repr(f)
class ParseUrlTest(unittest.TestCase):
def test_parse_url(self):
h, p = tinystomp.parse_url('tcp://host:1234/')
assert h == 'host'
assert p == 1234
class SplitFrameTest(unittest.TestCase):
def func(self, *args, **kwargs):
end, it = tinystomp.split_frame(*args, **kwargs)
return end, list(it)
def test_empty(self):
assert (0, []) == self.func('', 0, 0)
def test_oob(self):
assert (0, []) == self.func('dave\n', 5, 0)
def test_offset(self):
assert (6, ['ave']) == self.func('dave\n\n', 1, 6)
def test_several(self):
s = '\ndave\ndave\n\n'
assert self.func(s, 0, len(s)) == (12, [
'',
'dave',
'dave',
])
def test_prefix_eol_pairs_odd(self):
s = '\n\r\n\r\n\n\r\ndave\n\n'
assert self.func(s, 0, len(s)) == (14, ['', 'dave'])
def test_prefix_eol_pairs_even(self):
s = '\n\n\r\n\r\n\n\r\ndave\n\n'
assert self.func(s, 0, len(s)) == (15, ['dave'])
class FormatTest(unittest.TestCase):
def test_nobody_noheaders(self):
s = tinystomp._format('cmd', None, {})
assert 'cmd\n\n\x00' == s
def test_nobody_headers(self):
s = tinystomp._format('cmd', None, {
'a': 'b',
})
assert 'cmd\na:b\n\n\x00' == s
def test_body_headers(self):
s = tinystomp._format('cmd', 'dave', {
'a': 'b',
})
assert s == (
'cmd\n'
'content-length:4\n'
'a:b\n'
'\n'
'dave'
'\x00'
)
def assertParse(self, s, cmd, body, headers):
p = tinystomp.Parser()
p.receive(s)
f = p.next()
assert f.command == cmd
assert f.body == body
assert sorted(f.headers.items()) == sorted(headers.items())
def test_connect(self):
s = tinystomp.connect('localhost', a='b')
self.assertParse(s, 'CONNECT', '', {
'a': 'b',
'accept-version': '1.0,1.1,1.2',
'host': 'localhost'
})
def test_send_nobody(self):
s = tinystomp.send('/foo/bar', a='b')
self.assertParse(s, 'SEND', '', {
'a': 'b',
'destination': '/foo/bar'
})
def test_send_body(self):
s = tinystomp.send('/foo/bar', 'dave', a='b')
self.assertParse(s, 'SEND', 'dave', {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar'
})
def test_subscribe(self):
s = tinystomp.subscribe('/foo/bar', id=123, a='b')
self.assertParse(s, 'SUBSCRIBE', '', {
'a': 'b',
'destination': '/foo/bar',
'id': '123',
})
def test_unsubscribe(self):
s = tinystomp.unsubscribe('/foo/bar', 123, a='b')
self.assertParse(s, 'UNSUBSCRIBE', '', {
'a': 'b',
'destination': '/foo/bar',
'id': '123',
})
def test_ack(self):
s = tinystomp.ack('123', a='b')
self.assertParse(s, 'ACK', '', {
'a': 'b',
'id': '123',
})
def test_nack(self):
s = tinystomp.nack('123', a='b')
self.assertParse(s, 'NACK', '', {
'a': 'b',
'id': '123',
})
def test_begin(self):
s = tinystomp.begin('123', a='b')
self.assertParse(s, 'BEGIN', '', {
'a': 'b',
'transaction': '123',
})
def test_commit(self):
s = tinystomp.commit('123', a='b')
self.assertParse(s, 'COMMIT', '', {
'a': 'b',
'transaction': '123',
})
def test_abort(self):
s = tinystomp.abort('123', a='b')
self.assertParse(s, 'ABORT', '', {
'a': 'b',
'transaction': '123',
})
def test_disconnect(self):
s = tinystomp.disconnect('123', a='b')
self.assertParse(s, 'DISCONNECT', '', {
'a': 'b',
'receipt': '123',
})
class ParserTest(unittest.TestCase):
def test_constructor(self):
p = tinystomp.Parser()
assert p.s == ''
assert p.frames == collections.deque()
assert p.frame_eof is None
class ParserComplianceTest(unittest.TestCase):
def test_dup_headers_preserve_first(self):
# STOMP 1.2 "Repeated Header Entries" requires only the first header is
# preserved.
p = tinystomp.Parser()
p.receive('SEND\r\n'
'key:value1\r\n'
'key:value2\r\n'
'\r\n'
'\x00')
f = p.next()
assert f.headers['key'] == 'value1'
class ParserReceiveTest(unittest.TestCase):
def test_empty_str(self):
p = tinystomp.Parser()
p.receive('')
assert not p.can_read()
def test_one_nobody(self):
p = tinystomp.Parser()
p.receive(tinystomp.connect('host'))
assert p.can_read()
f = p.next()
assert f.command == 'CONNECT'
assert f.body == ''
assert f.headers == {
'accept-version': '1.0,1.1,1.2',
'host': 'host'
}
def test_one_body(self):
p = tinystomp.Parser()
p.receive(tinystomp.send('/foo/bar', 'dave', a='b'))
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_one_ignore_eol(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
p.receive(eols + tinystomp.send('/foo/bar', 'dave', a='b') + eols)
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
assert not p.can_read()
def test_two_nobody(self):
p = tinystomp.Parser()
p.receive(tinystomp.connect('host') * 2)
for x in range(2):
assert p.can_read()
f = p.next()
assert f.command == 'CONNECT'
assert f.body == ''
assert f.headers == {
'accept-version': '1.0,1.1,1.2',
'host': 'host'
}
assert not p.can_read()
def test_two_body(self):
p = tinystomp.Parser()
p.receive(tinystomp.send('/foo/bar', 'dave', a='b') * 2)
for x in range(2):
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_two_ignore_eol(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
p.receive((eols+tinystomp.send('/foo/bar', 'dave', a='b')+eols) * 2)
for x in range(2):
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
assert not p.can_read()
def test_one_partial_inverb(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
s = eols + tinystomp.send('/foo/bar', 'dave', a='b') + eols
p.receive(s[:6])
assert not p.can_read()
p.receive(s[6:])
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_one_partial_ineols(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
s = eols + tinystomp.send('/foo/bar', 'dave', a='b') + eols
p.receive(s[:3])
assert not p.can_read()
p.receive(s[3:])
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_one_partial_inheader(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
s = eols + tinystomp.send('/foo/bar', 'dave', a='b') + eols
p.receive(s[:12])
assert not p.can_read()
p.receive(s[12:])
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_one_partial_inbody(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
s = eols + tinystomp.send('/foo/bar', 'dave'*2000, a='b') + eols
p.receive(s[:150])
assert not p.can_read()
p.receive(s[150:])
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'*2000
assert f.headers == {
'a': 'b',
'content-length': '8000',
'destination': '/foo/bar',
}
class ClientTest(unittest.TestCase):
def test_constructor(self):
c = tinystomp.Client('host', 1234, 'login', 'passcode')
assert c.host == 'host'
assert c.port == 1234
assert c.login == 'login'
assert c.passcode == '<PASSWORD>'
assert isinstance(c.parser, tinystomp.Parser)
def test_from_url(self):
c = tinystomp.Client.from_url('tcp://host:1234/')
assert isinstance(c, tinystomp.Client)
assert c.host == 'host'
assert c.port == 1234
@mock.patch('socket.socket')
def test_connect(self, sock):
c = tinystomp.Client.from_url('tcp://host:1234/')
c.connect()
assert sock.mock_calls == [
mock.call(),
mock.call().connect(('host', 1234)),
mock.call().send(tinystomp.connect('host')),
]
@mock.patch('socket.socket')
def test_next(self, sock):
sock.return_value = mock.Mock(
recv=mock.Mock(side_effect=[tinystomp.connect('host'), '']))
c = tinystomp.Client.from_url('tcp://host:1234/')
c.connect()
f = c.next()
assert f.command == 'CONNECT'
self.assertRaises(tinystomp.ProtocolError, c.next)
def test_getattr_absent(self):
c = tinystomp.Client.from_url('tcp://host:1234/')
self.assertRaises(AttributeError, lambda: c.pants)
@mock.patch('socket.socket')
def test_getattr_present(self, sock):
c = tinystomp.Client.from_url('tcp://host:1234/')
c.connect()
c.send('/foo/bar/', 'a', a='b')
assert sock.mock_calls == [
mock.call(),
mock.call().connect(('host', 1234)),
mock.call().send(tinystomp.connect('host')),
mock.call().send(tinystomp.send('/foo/bar/', 'a', a='b')),
]
| # The MIT License (MIT)
#
# Copyright (c) 2016, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import unittest
import mock
import tinystomp
class ErrorTest(unittest.TestCase):
def test_constructor(self):
tinystomp.Error()
class ProtocolErrorTest(unittest.TestCase):
def test_constructor(self):
tinystomp.ProtocolError()
class FrameTest(unittest.TestCase):
def test_constructor(self):
f = tinystomp.Frame('cmd')
assert 'cmd' == f.command
assert {} == f.headers
def test_repr(self):
f = tinystomp.Frame('cmd')
assert "<cmd None {\n \n}>" == repr(f)
def test_repr_headers(self):
f = tinystomp.Frame('cmd')
f.headers['a'] = 'b'
assert "<cmd None {\n a b\n}>" == repr(f)
class ParseUrlTest(unittest.TestCase):
def test_parse_url(self):
h, p = tinystomp.parse_url('tcp://host:1234/')
assert h == 'host'
assert p == 1234
class SplitFrameTest(unittest.TestCase):
def func(self, *args, **kwargs):
end, it = tinystomp.split_frame(*args, **kwargs)
return end, list(it)
def test_empty(self):
assert (0, []) == self.func('', 0, 0)
def test_oob(self):
assert (0, []) == self.func('dave\n', 5, 0)
def test_offset(self):
assert (6, ['ave']) == self.func('dave\n\n', 1, 6)
def test_several(self):
s = '\ndave\ndave\n\n'
assert self.func(s, 0, len(s)) == (12, [
'',
'dave',
'dave',
])
def test_prefix_eol_pairs_odd(self):
s = '\n\r\n\r\n\n\r\ndave\n\n'
assert self.func(s, 0, len(s)) == (14, ['', 'dave'])
def test_prefix_eol_pairs_even(self):
s = '\n\n\r\n\r\n\n\r\ndave\n\n'
assert self.func(s, 0, len(s)) == (15, ['dave'])
class FormatTest(unittest.TestCase):
def test_nobody_noheaders(self):
s = tinystomp._format('cmd', None, {})
assert 'cmd\n\n\x00' == s
def test_nobody_headers(self):
s = tinystomp._format('cmd', None, {
'a': 'b',
})
assert 'cmd\na:b\n\n\x00' == s
def test_body_headers(self):
s = tinystomp._format('cmd', 'dave', {
'a': 'b',
})
assert s == (
'cmd\n'
'content-length:4\n'
'a:b\n'
'\n'
'dave'
'\x00'
)
def assertParse(self, s, cmd, body, headers):
p = tinystomp.Parser()
p.receive(s)
f = p.next()
assert f.command == cmd
assert f.body == body
assert sorted(f.headers.items()) == sorted(headers.items())
def test_connect(self):
s = tinystomp.connect('localhost', a='b')
self.assertParse(s, 'CONNECT', '', {
'a': 'b',
'accept-version': '1.0,1.1,1.2',
'host': 'localhost'
})
def test_send_nobody(self):
s = tinystomp.send('/foo/bar', a='b')
self.assertParse(s, 'SEND', '', {
'a': 'b',
'destination': '/foo/bar'
})
def test_send_body(self):
s = tinystomp.send('/foo/bar', 'dave', a='b')
self.assertParse(s, 'SEND', 'dave', {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar'
})
def test_subscribe(self):
s = tinystomp.subscribe('/foo/bar', id=123, a='b')
self.assertParse(s, 'SUBSCRIBE', '', {
'a': 'b',
'destination': '/foo/bar',
'id': '123',
})
def test_unsubscribe(self):
s = tinystomp.unsubscribe('/foo/bar', 123, a='b')
self.assertParse(s, 'UNSUBSCRIBE', '', {
'a': 'b',
'destination': '/foo/bar',
'id': '123',
})
def test_ack(self):
s = tinystomp.ack('123', a='b')
self.assertParse(s, 'ACK', '', {
'a': 'b',
'id': '123',
})
def test_nack(self):
s = tinystomp.nack('123', a='b')
self.assertParse(s, 'NACK', '', {
'a': 'b',
'id': '123',
})
def test_begin(self):
s = tinystomp.begin('123', a='b')
self.assertParse(s, 'BEGIN', '', {
'a': 'b',
'transaction': '123',
})
def test_commit(self):
s = tinystomp.commit('123', a='b')
self.assertParse(s, 'COMMIT', '', {
'a': 'b',
'transaction': '123',
})
def test_abort(self):
s = tinystomp.abort('123', a='b')
self.assertParse(s, 'ABORT', '', {
'a': 'b',
'transaction': '123',
})
def test_disconnect(self):
s = tinystomp.disconnect('123', a='b')
self.assertParse(s, 'DISCONNECT', '', {
'a': 'b',
'receipt': '123',
})
class ParserTest(unittest.TestCase):
def test_constructor(self):
p = tinystomp.Parser()
assert p.s == ''
assert p.frames == collections.deque()
assert p.frame_eof is None
class ParserComplianceTest(unittest.TestCase):
def test_dup_headers_preserve_first(self):
# STOMP 1.2 "Repeated Header Entries" requires only the first header is
# preserved.
p = tinystomp.Parser()
p.receive('SEND\r\n'
'key:value1\r\n'
'key:value2\r\n'
'\r\n'
'\x00')
f = p.next()
assert f.headers['key'] == 'value1'
class ParserReceiveTest(unittest.TestCase):
def test_empty_str(self):
p = tinystomp.Parser()
p.receive('')
assert not p.can_read()
def test_one_nobody(self):
p = tinystomp.Parser()
p.receive(tinystomp.connect('host'))
assert p.can_read()
f = p.next()
assert f.command == 'CONNECT'
assert f.body == ''
assert f.headers == {
'accept-version': '1.0,1.1,1.2',
'host': 'host'
}
def test_one_body(self):
p = tinystomp.Parser()
p.receive(tinystomp.send('/foo/bar', 'dave', a='b'))
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_one_ignore_eol(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
p.receive(eols + tinystomp.send('/foo/bar', 'dave', a='b') + eols)
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
assert not p.can_read()
def test_two_nobody(self):
p = tinystomp.Parser()
p.receive(tinystomp.connect('host') * 2)
for x in range(2):
assert p.can_read()
f = p.next()
assert f.command == 'CONNECT'
assert f.body == ''
assert f.headers == {
'accept-version': '1.0,1.1,1.2',
'host': 'host'
}
assert not p.can_read()
def test_two_body(self):
p = tinystomp.Parser()
p.receive(tinystomp.send('/foo/bar', 'dave', a='b') * 2)
for x in range(2):
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_two_ignore_eol(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
p.receive((eols+tinystomp.send('/foo/bar', 'dave', a='b')+eols) * 2)
for x in range(2):
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
assert not p.can_read()
def test_one_partial_inverb(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
s = eols + tinystomp.send('/foo/bar', 'dave', a='b') + eols
p.receive(s[:6])
assert not p.can_read()
p.receive(s[6:])
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_one_partial_ineols(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
s = eols + tinystomp.send('/foo/bar', 'dave', a='b') + eols
p.receive(s[:3])
assert not p.can_read()
p.receive(s[3:])
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_one_partial_inheader(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
s = eols + tinystomp.send('/foo/bar', 'dave', a='b') + eols
p.receive(s[:12])
assert not p.can_read()
p.receive(s[12:])
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'
assert f.headers == {
'a': 'b',
'content-length': '4',
'destination': '/foo/bar',
}
def test_one_partial_inbody(self):
p = tinystomp.Parser()
eols = '\n\r\n\n'
s = eols + tinystomp.send('/foo/bar', 'dave'*2000, a='b') + eols
p.receive(s[:150])
assert not p.can_read()
p.receive(s[150:])
assert p.can_read()
f = p.next()
assert f.command == 'SEND'
assert f.body == 'dave'*2000
assert f.headers == {
'a': 'b',
'content-length': '8000',
'destination': '/foo/bar',
}
class ClientTest(unittest.TestCase):
def test_constructor(self):
c = tinystomp.Client('host', 1234, 'login', 'passcode')
assert c.host == 'host'
assert c.port == 1234
assert c.login == 'login'
assert c.passcode == '<PASSWORD>'
assert isinstance(c.parser, tinystomp.Parser)
def test_from_url(self):
c = tinystomp.Client.from_url('tcp://host:1234/')
assert isinstance(c, tinystomp.Client)
assert c.host == 'host'
assert c.port == 1234
@mock.patch('socket.socket')
def test_connect(self, sock):
c = tinystomp.Client.from_url('tcp://host:1234/')
c.connect()
assert sock.mock_calls == [
mock.call(),
mock.call().connect(('host', 1234)),
mock.call().send(tinystomp.connect('host')),
]
@mock.patch('socket.socket')
def test_next(self, sock):
sock.return_value = mock.Mock(
recv=mock.Mock(side_effect=[tinystomp.connect('host'), '']))
c = tinystomp.Client.from_url('tcp://host:1234/')
c.connect()
f = c.next()
assert f.command == 'CONNECT'
self.assertRaises(tinystomp.ProtocolError, c.next)
def test_getattr_absent(self):
c = tinystomp.Client.from_url('tcp://host:1234/')
self.assertRaises(AttributeError, lambda: c.pants)
@mock.patch('socket.socket')
def test_getattr_present(self, sock):
c = tinystomp.Client.from_url('tcp://host:1234/')
c.connect()
c.send('/foo/bar/', 'a', a='b')
assert sock.mock_calls == [
mock.call(),
mock.call().connect(('host', 1234)),
mock.call().send(tinystomp.connect('host')),
mock.call().send(tinystomp.send('/foo/bar/', 'a', a='b')),
]
| en | 0.781436 | # The MIT License (MIT) # # Copyright (c) 2016, <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # STOMP 1.2 "Repeated Header Entries" requires only the first header is # preserved. | 2.190526 | 2 |
procedural_world/perlin_noise_wrapper/perlin_noise_wrapper.py | mikhaildruzhinin/procedural-world | 0 | 6623304 | <gh_stars>0
from perlin_noise import PerlinNoise
from procedural_world.config import (
PERLIN_NOISE_AMPLITUDE,
PERLIN_NOISE_FREQUENCY,
PERLIN_NOISE_OCTAVES,
PERLIN_NOISE_SEED,
)
class PerlinNoiseWrapper:
def __init__(self):
self.perlin_noise = PerlinNoise(
octaves=PERLIN_NOISE_OCTAVES,
seed=PERLIN_NOISE_SEED,
)
def get_height(
self,
x: int,
z: int,
) -> int:
return self.perlin_noise(
[x / PERLIN_NOISE_FREQUENCY, z / PERLIN_NOISE_FREQUENCY]
) * PERLIN_NOISE_AMPLITUDE
| from perlin_noise import PerlinNoise
from procedural_world.config import (
PERLIN_NOISE_AMPLITUDE,
PERLIN_NOISE_FREQUENCY,
PERLIN_NOISE_OCTAVES,
PERLIN_NOISE_SEED,
)
class PerlinNoiseWrapper:
def __init__(self):
self.perlin_noise = PerlinNoise(
octaves=PERLIN_NOISE_OCTAVES,
seed=PERLIN_NOISE_SEED,
)
def get_height(
self,
x: int,
z: int,
) -> int:
return self.perlin_noise(
[x / PERLIN_NOISE_FREQUENCY, z / PERLIN_NOISE_FREQUENCY]
) * PERLIN_NOISE_AMPLITUDE | none | 1 | 2.640416 | 3 | |
2017/Q1.py | s-cork/BIO | 0 | 6623305 | <filename>2017/Q1.py<gh_stars>0
def decide_letter(pair):
'''expects a pair of letters and decides which letter will go below the pair'''
if len(set(pair)) == 1:
return next(let for let in 'RGB' if let in pair)
else:
return next(let for let in 'RGB' if let not in pair)
def Triangle(row):
'''expects a str of length less than 10
consisting of uppercase Rs Gs and Bs
returns the final line of the triangle
'''
if len(row) == 1:
return row
next_row = ''
for i in range(len(row)-1):
next_row += decide_letter(row[i:i+2])
return Triangle(next_row)
start_row = input().upper()
print(Triangle(start_row)) | <filename>2017/Q1.py<gh_stars>0
def decide_letter(pair):
'''expects a pair of letters and decides which letter will go below the pair'''
if len(set(pair)) == 1:
return next(let for let in 'RGB' if let in pair)
else:
return next(let for let in 'RGB' if let not in pair)
def Triangle(row):
'''expects a str of length less than 10
consisting of uppercase Rs Gs and Bs
returns the final line of the triangle
'''
if len(row) == 1:
return row
next_row = ''
for i in range(len(row)-1):
next_row += decide_letter(row[i:i+2])
return Triangle(next_row)
start_row = input().upper()
print(Triangle(start_row)) | en | 0.853329 | expects a pair of letters and decides which letter will go below the pair expects a str of length less than 10 consisting of uppercase Rs Gs and Bs returns the final line of the triangle | 3.892538 | 4 |
migrations/postgres_versions/2021-03-01_0f8d24a830d0_rename_new_value_to_previous_value.py | debrief/pepys-import | 4 | 6623306 | """Rename new_value to previous_value
Revision ID: 0f8d24a830d0
Revises: <PASSWORD>
Create Date: 2021-03-01 10:09:44.140941+00:00
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "0f8d24a830d0"
down_revision = "80bc8b0d199b"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"Logs", sa.Column("previous_value", sa.String(length=150), nullable=True), schema="pepys"
)
op.drop_column("Logs", "new_value", schema="pepys")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"Logs",
sa.Column("new_value", sa.VARCHAR(length=150), autoincrement=False, nullable=True),
schema="pepys",
)
op.drop_column("Logs", "previous_value", schema="pepys")
# ### end Alembic commands ###
| """Rename new_value to previous_value
Revision ID: 0f8d24a830d0
Revises: <PASSWORD>
Create Date: 2021-03-01 10:09:44.140941+00:00
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "0f8d24a830d0"
down_revision = "80bc8b0d199b"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"Logs", sa.Column("previous_value", sa.String(length=150), nullable=True), schema="pepys"
)
op.drop_column("Logs", "new_value", schema="pepys")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"Logs",
sa.Column("new_value", sa.VARCHAR(length=150), autoincrement=False, nullable=True),
schema="pepys",
)
op.drop_column("Logs", "previous_value", schema="pepys")
# ### end Alembic commands ###
| en | 0.523408 | Rename new_value to previous_value Revision ID: 0f8d24a830d0 Revises: <PASSWORD> Create Date: 2021-03-01 10:09:44.140941+00:00 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.838952 | 2 |
migrations/versions/4cdcf068ef59_additional_indexing.py | hamhands/pittsburgh-purchasing-suite | 22 | 6623307 | """additional indexing
Revision ID: 4cdcf068ef59
Revises: 54373a7de<PASSWORD>
Create Date: 2015-10-22 19:58:41.145656
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '54373a<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_contract_current_stage_id'), 'contract', ['current_stage_id'], unique=False)
op.create_index(op.f('ix_contract_department_id'), 'contract', ['department_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_contract_department_id'), table_name='contract')
op.drop_index(op.f('ix_contract_current_stage_id'), table_name='contract')
### end Alembic commands ###
| """additional indexing
Revision ID: 4cdcf068ef59
Revises: 54373a7de<PASSWORD>
Create Date: 2015-10-22 19:58:41.145656
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '54373a<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_contract_current_stage_id'), 'contract', ['current_stage_id'], unique=False)
op.create_index(op.f('ix_contract_department_id'), 'contract', ['department_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_contract_department_id'), table_name='contract')
op.drop_index(op.f('ix_contract_current_stage_id'), table_name='contract')
### end Alembic commands ###
| en | 0.552192 | additional indexing Revision ID: 4cdcf068ef59 Revises: 54373a7de<PASSWORD> Create Date: 2015-10-22 19:58:41.145656 # revision identifiers, used by Alembic. ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### | 1.171599 | 1 |
nistchempy/__init__.py | EPiCs-group/NistChemPy | 0 | 6623308 | # imports
from .nistchempy import __version__, \
get_all_data, Compound, Spectrum, \
print_search_parameters, \
SearchParameters, Search
# module functions
__all__ = [
'__version__',
'get_all_data',
'Compound', 'Spectrum'
'print_search_parameters', 'SearchParameters', 'Search'
]
| # imports
from .nistchempy import __version__, \
get_all_data, Compound, Spectrum, \
print_search_parameters, \
SearchParameters, Search
# module functions
__all__ = [
'__version__',
'get_all_data',
'Compound', 'Spectrum'
'print_search_parameters', 'SearchParameters', 'Search'
]
| en | 0.123245 | # imports # module functions | 0.994247 | 1 |
userbot/modules/flicks.py | BeroyStwn/Flicks-Userbot | 0 | 6623309 | from time import sleep
from userbot import ALIVE_NAME, CMD_HELP, WEATHER_DEFCITY
from userbot import CMD_HANDLER as cmd
from userbot.utils import flicks_cmd
@flicks_cmd(pattern="intro")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit(f"**Hai Perkenalkan Namaku {ALIVE_NAME}**")
sleep(3)
await typew.edit(f"**Umurku Rahasia :D**")
sleep(1)
await typew.edit(f"**Tinggal Di {WEATHER_DEFCITY}, Salam Kenal :)**")
# Create by myself @localheart
@flicks_cmd(pattern="lopyu")
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(3)
await typew.edit("`Cuma Mau Bilang`")
sleep(3)
await typew.edit("`Aku Sayang Kamu`")
sleep(1)
await typew.edit("`I LOVE YOU 💞`")
sleep(1)
await typew.edit("`I LOVE YOU SO MUCH 🥰`")
# Create by myself @localheart
@flicks_cmd(pattern="semangat")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Apapun Yang Terjadi`")
sleep(3)
await typew.edit("`Tetaplah Bernapas`")
sleep(1)
await typew.edit("`Selalu Bersyukur`")
sleep(1)
await typew.edit("`Dan Jangan Lupa Tertawa:)`")
# Create by myself @localheart
@flicks_cmd(pattern="aku")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Aku Userbot`")
sleep(3)
await typew.edit("`Jangan Main Main`")
sleep(2)
await typew.edit("`<NAME> 🥺`")
# Create by myself @localheart
CMD_HELP.update({
"flicks": f"\
**Perintah:** `{cmd}intro`\
\n**Penjelasan:** Memperkenalkan diri anda\
\n\n**Perintah:** `{cmd}semangat`\
\n**Penjelasan:** Sedikit Motifasi\
\n\n**Perintah:** `{cmd}aku`\
\n**Penjelasan:** Lihat sendiri 🏃\
\n\n**Perintah:** `{cmd}lopyu`\
\n**Penjelasan:** Lihat Sendiri 🏃"})
| from time import sleep
from userbot import ALIVE_NAME, CMD_HELP, WEATHER_DEFCITY
from userbot import CMD_HANDLER as cmd
from userbot.utils import flicks_cmd
@flicks_cmd(pattern="intro")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit(f"**Hai Perkenalkan Namaku {ALIVE_NAME}**")
sleep(3)
await typew.edit(f"**Umurku Rahasia :D**")
sleep(1)
await typew.edit(f"**Tinggal Di {WEATHER_DEFCITY}, Salam Kenal :)**")
# Create by myself @localheart
@flicks_cmd(pattern="lopyu")
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(3)
await typew.edit("`Cuma Mau Bilang`")
sleep(3)
await typew.edit("`Aku Sayang Kamu`")
sleep(1)
await typew.edit("`I LOVE YOU 💞`")
sleep(1)
await typew.edit("`I LOVE YOU SO MUCH 🥰`")
# Create by myself @localheart
@flicks_cmd(pattern="semangat")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Apapun Yang Terjadi`")
sleep(3)
await typew.edit("`Tetaplah Bernapas`")
sleep(1)
await typew.edit("`Selalu Bersyukur`")
sleep(1)
await typew.edit("`Dan Jangan Lupa Tertawa:)`")
# Create by myself @localheart
@flicks_cmd(pattern="aku")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`Aku Userbot`")
sleep(3)
await typew.edit("`Jangan Main Main`")
sleep(2)
await typew.edit("`<NAME> 🥺`")
# Create by myself @localheart
CMD_HELP.update({
"flicks": f"\
**Perintah:** `{cmd}intro`\
\n**Penjelasan:** Memperkenalkan diri anda\
\n\n**Perintah:** `{cmd}semangat`\
\n**Penjelasan:** Sedikit Motifasi\
\n\n**Perintah:** `{cmd}aku`\
\n**Penjelasan:** Lihat sendiri 🏃\
\n\n**Perintah:** `{cmd}lopyu`\
\n**Penjelasan:** Lihat Sendiri 🏃"})
| en | 0.892973 | # Create by myself @localheart # Create by myself @localheart # Create by myself @localheart # Create by myself @localheart | 2.669872 | 3 |
setup.py | hallazzang/pknulms-py | 0 | 6623310 | <filename>setup.py
import os
from setuptools import setup
def get_content(path):
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), path)
with open(path) as f:
return f.read()
setup(
name='pknulms',
version='1.0.2',
url='https://github.com/hallazzang/pknulms-py',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Pukyong National University Smart-LMS Python client',
long_description=get_content('README.rst'),
py_modules=['pknulms'],
install_requires=[
'requests',
],
zip_safe=False,
platforms='any',
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| <filename>setup.py
import os
from setuptools import setup
def get_content(path):
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), path)
with open(path) as f:
return f.read()
setup(
name='pknulms',
version='1.0.2',
url='https://github.com/hallazzang/pknulms-py',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Pukyong National University Smart-LMS Python client',
long_description=get_content('README.rst'),
py_modules=['pknulms'],
install_requires=[
'requests',
],
zip_safe=False,
platforms='any',
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| none | 1 | 1.670179 | 2 | |
src/day10.py | Trattpingvin/advent-of-code-2018 | 0 | 6623311 | import numpy as np
def update_positions(v, steps = 1):
v[:,0:2] += steps*v[:,2:4]
def solve(vectors):
"""message should be readable when data is mostly aligned on y-axis.
let's find the rate of change of alignment, and print the message when alignment is at its minimum (looks like 8 or 9 from problem description)"""
v = np.array(vectors)
stop = 9
alignment_error1 = max(v[:,1])-min(v[:,1])
update_positions(v)
alignment_error2 = max(v[:,1])-min(v[:,1])
rate_of_change = alignment_error1-alignment_error2
update_positions(v, (alignment_error2-stop)/rate_of_change)
msg_start_y = min(v[:,1])
msg_start_X = min(v[:,0])
msg_width = max(v[:,0]) - msg_start_X + 1
msg_height = max(v[:,1]) - msg_start_y + 1
ans = ""
grid = np.zeros(shape = (msg_width+1, msg_height+1))
for vector in v:
grid[vector[0]-msg_start_X, vector[1]-msg_start_y] = 1
for y in range(msg_height):
ans +=''.join("#" if 1==grid[x,y] else "." for x in range(msg_width))
ans += "\n"
return ans, (alignment_error1-stop)/rate_of_change
if __name__=='__main__':
vectors = []
with open('inputs/day10.txt') as f:
for line in f:
pos_start = line.find('<')
pos_end = line.find('>')
pos = line[pos_start+1:pos_end]
line = line[pos_end+1:]
vel_start = line.find('<')
vel_end = line.find('>')
vel = line[vel_start+1:vel_end]
vectors.append([int(i) for i in (pos+","+vel).split(',')])
print solve(vectors)
| import numpy as np
def update_positions(v, steps = 1):
v[:,0:2] += steps*v[:,2:4]
def solve(vectors):
"""message should be readable when data is mostly aligned on y-axis.
let's find the rate of change of alignment, and print the message when alignment is at its minimum (looks like 8 or 9 from problem description)"""
v = np.array(vectors)
stop = 9
alignment_error1 = max(v[:,1])-min(v[:,1])
update_positions(v)
alignment_error2 = max(v[:,1])-min(v[:,1])
rate_of_change = alignment_error1-alignment_error2
update_positions(v, (alignment_error2-stop)/rate_of_change)
msg_start_y = min(v[:,1])
msg_start_X = min(v[:,0])
msg_width = max(v[:,0]) - msg_start_X + 1
msg_height = max(v[:,1]) - msg_start_y + 1
ans = ""
grid = np.zeros(shape = (msg_width+1, msg_height+1))
for vector in v:
grid[vector[0]-msg_start_X, vector[1]-msg_start_y] = 1
for y in range(msg_height):
ans +=''.join("#" if 1==grid[x,y] else "." for x in range(msg_width))
ans += "\n"
return ans, (alignment_error1-stop)/rate_of_change
if __name__=='__main__':
vectors = []
with open('inputs/day10.txt') as f:
for line in f:
pos_start = line.find('<')
pos_end = line.find('>')
pos = line[pos_start+1:pos_end]
line = line[pos_end+1:]
vel_start = line.find('<')
vel_end = line.find('>')
vel = line[vel_start+1:vel_end]
vectors.append([int(i) for i in (pos+","+vel).split(',')])
print solve(vectors)
| en | 0.921585 | message should be readable when data is mostly aligned on y-axis. let's find the rate of change of alignment, and print the message when alignment is at its minimum (looks like 8 or 9 from problem description) | 3.37214 | 3 |
webapp/decorators.py | NighttWatch/hetes | 0 | 6623312 | <filename>webapp/decorators.py
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
def partner_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a partner,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_partner,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def employee_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a employee,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_employee,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def executive_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a executive,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_executive,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def partner_admin_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a partner_admin
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_partner_admin,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def system_customer_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a system_customer_required,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_system_customer_required,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator | <filename>webapp/decorators.py
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
def partner_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a partner,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_partner,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def employee_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a employee,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_employee,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def executive_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a executive,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_executive,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def partner_admin_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a partner_admin
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_partner_admin,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def system_customer_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='loginApp'):
'''
Decorator for views that checks that the logged in user is a system_customer_required,
redirects to the log-in page if necessary.
'''
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_system_customer_required,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator | en | 0.919173 | Decorator for views that checks that the logged in user is a partner, redirects to the log-in page if necessary. Decorator for views that checks that the logged in user is a employee, redirects to the log-in page if necessary. Decorator for views that checks that the logged in user is a executive, redirects to the log-in page if necessary. Decorator for views that checks that the logged in user is a partner_admin redirects to the log-in page if necessary. Decorator for views that checks that the logged in user is a system_customer_required, redirects to the log-in page if necessary. | 2.607162 | 3 |
21922/solution.py | bossm0n5t3r/BOJ | 2 | 6623313 | import sys
def sol():
# sys.stdin = open("./21922/input.txt")
input = sys.stdin.readline
N, M = map(int, input().split())
visited = [[False] * M for _ in range(N)]
air_conditioners = {}
lab = []
for r in range(N):
tmp = list(map(int, input().split()))
lab.append(tmp)
for c in range(M):
if tmp[c] == 9:
air_conditioners[(r, c)] = [True, True, True, True]
for r, c in air_conditioners:
visited[r][c] = True
for i in range(4):
if air_conditioners[(r, c)][i]:
visit_all(N, M, lab, air_conditioners, visited, r, c, i)
print(sum([sum(row) for row in visited]))
def visit_all(N, M, lab, air_conditioners, visited, r, c, dir):
dr = [0, 0, 1, -1, 2000]
dc = [1, -1, 0, 0, 2000]
cur_dir = dir
nr = r + dr[cur_dir]
nc = c + dc[cur_dir]
while 0 <= nr < N and 0 <= nc < M:
if lab[nr][nc] == 9:
if cur_dir == 0 or cur_dir == 1:
air_conditioners[(nr, nc)][1 - cur_dir] = False
else:
air_conditioners[(nr, nc)][5 - cur_dir] = False
return
visited[nr][nc] = True
cur_dir = next_dir(cur_dir, lab[nr][nc])
nr += dr[cur_dir]
nc += dc[cur_dir]
def next_dir(cur_dir, cur_pos):
if cur_pos == 0:
return cur_dir
if cur_pos == 1 and (cur_dir == 0 or cur_dir == 1):
return -1
if cur_pos == 2 and (cur_dir == 2 or cur_dir == 3):
return -1
nd = cur_dir
if cur_pos == 3:
nd = 3 - cur_dir
elif cur_pos == 4:
if cur_dir == 0 or cur_dir == 1:
nd = cur_dir + 2
else:
nd = cur_dir - 2
return nd
if __name__ == "__main__":
sol()
| import sys
def sol():
# sys.stdin = open("./21922/input.txt")
input = sys.stdin.readline
N, M = map(int, input().split())
visited = [[False] * M for _ in range(N)]
air_conditioners = {}
lab = []
for r in range(N):
tmp = list(map(int, input().split()))
lab.append(tmp)
for c in range(M):
if tmp[c] == 9:
air_conditioners[(r, c)] = [True, True, True, True]
for r, c in air_conditioners:
visited[r][c] = True
for i in range(4):
if air_conditioners[(r, c)][i]:
visit_all(N, M, lab, air_conditioners, visited, r, c, i)
print(sum([sum(row) for row in visited]))
def visit_all(N, M, lab, air_conditioners, visited, r, c, dir):
dr = [0, 0, 1, -1, 2000]
dc = [1, -1, 0, 0, 2000]
cur_dir = dir
nr = r + dr[cur_dir]
nc = c + dc[cur_dir]
while 0 <= nr < N and 0 <= nc < M:
if lab[nr][nc] == 9:
if cur_dir == 0 or cur_dir == 1:
air_conditioners[(nr, nc)][1 - cur_dir] = False
else:
air_conditioners[(nr, nc)][5 - cur_dir] = False
return
visited[nr][nc] = True
cur_dir = next_dir(cur_dir, lab[nr][nc])
nr += dr[cur_dir]
nc += dc[cur_dir]
def next_dir(cur_dir, cur_pos):
if cur_pos == 0:
return cur_dir
if cur_pos == 1 and (cur_dir == 0 or cur_dir == 1):
return -1
if cur_pos == 2 and (cur_dir == 2 or cur_dir == 3):
return -1
nd = cur_dir
if cur_pos == 3:
nd = 3 - cur_dir
elif cur_pos == 4:
if cur_dir == 0 or cur_dir == 1:
nd = cur_dir + 2
else:
nd = cur_dir - 2
return nd
if __name__ == "__main__":
sol()
| en | 0.293173 | # sys.stdin = open("./21922/input.txt") | 2.817881 | 3 |
pfxbrick/scripts/pfxrestart.py | aholzel/pfx-brick-py | 0 | 6623314 | #! /usr/bin/env python3
"""
pfxrestart - restarts the PFx Brick
"""
import argparse
from pfxbrick import *
def main():
parser = argparse.ArgumentParser(
description="Restarts the PFx Brick",
prefix_chars="-+",
)
parser.add_argument(
"-s",
"--serialno",
default=None,
help="Specify PFx Brick with serial number (if more than one connected)",
)
parser.add_argument(
"-x",
"--halt",
action="store_true",
default=False,
help="Halt all activity on PFx Brick without restarting",
)
args = parser.parse_args()
argsd = vars(args)
b = get_one_pfxbrick(argsd["serialno"])
r = b.open()
if not r:
exit()
if argsd["halt"]:
b.test_action(PFxAction().all_off())
b.close()
else:
b.send_raw_icd_command(
[
PFX_USB_CMD_REBOOT,
PFX_REBOOT_BYTE0,
PFX_REBOOT_BYTE1,
PFX_REBOOT_BYTE2,
PFX_REBOOT_BYTE3,
PFX_REBOOT_BYTE4,
PFX_REBOOT_BYTE5,
PFX_REBOOT_BYTE6,
]
)
print("PFx Brick restarted")
if __name__ == "__main__":
main()
| #! /usr/bin/env python3
"""
pfxrestart - restarts the PFx Brick
"""
import argparse
from pfxbrick import *
def main():
parser = argparse.ArgumentParser(
description="Restarts the PFx Brick",
prefix_chars="-+",
)
parser.add_argument(
"-s",
"--serialno",
default=None,
help="Specify PFx Brick with serial number (if more than one connected)",
)
parser.add_argument(
"-x",
"--halt",
action="store_true",
default=False,
help="Halt all activity on PFx Brick without restarting",
)
args = parser.parse_args()
argsd = vars(args)
b = get_one_pfxbrick(argsd["serialno"])
r = b.open()
if not r:
exit()
if argsd["halt"]:
b.test_action(PFxAction().all_off())
b.close()
else:
b.send_raw_icd_command(
[
PFX_USB_CMD_REBOOT,
PFX_REBOOT_BYTE0,
PFX_REBOOT_BYTE1,
PFX_REBOOT_BYTE2,
PFX_REBOOT_BYTE3,
PFX_REBOOT_BYTE4,
PFX_REBOOT_BYTE5,
PFX_REBOOT_BYTE6,
]
)
print("PFx Brick restarted")
if __name__ == "__main__":
main()
| en | 0.211881 | #! /usr/bin/env python3 pfxrestart - restarts the PFx Brick | 2.956023 | 3 |
placement/migrations/0003_auto_20201006_2339.py | SauravDharwadkar/erp_project | 2 | 6623315 | <gh_stars>1-10
# Generated by Django 3.0 on 2020-10-06 18:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('placement', '0002_auto_20201006_2251'),
]
operations = [
migrations.AlterField(
model_name='student',
name='department',
field=models.CharField(choices=[('1', 'Computer Science'), ('2', 'Electronics and Telecommunication'), ('3', 'Mechanical'), ('4', 'Civil')], default='1', max_length=150),
),
]
| # Generated by Django 3.0 on 2020-10-06 18:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('placement', '0002_auto_20201006_2251'),
]
operations = [
migrations.AlterField(
model_name='student',
name='department',
field=models.CharField(choices=[('1', 'Computer Science'), ('2', 'Electronics and Telecommunication'), ('3', 'Mechanical'), ('4', 'Civil')], default='1', max_length=150),
),
] | en | 0.848883 | # Generated by Django 3.0 on 2020-10-06 18:09 | 1.893116 | 2 |
jiraannouncer/views/client.py | FuelRats/JIRAAnnouncer | 0 | 6623316 | import hmac
import requests
from pyramid.view import view_config
from sys import hexversion
from ..utils import send, devsay
import logging
log = logging.getLogger(__name__)
@view_config(route_name='client', renderer="json")
def client(request):
"""Handle Client arrival announcements."""
referer = request.headers['Referer'] if 'referer' in request.headers else None
possiblefake = False
settings = request.registry.settings
fr_token = settings['fr_token'] if 'fr_token' in settings else None
api_url = settings['fr_url'] if 'fr_url' in settings else None
try:
cmdrname = request.params['cmdrname']
system = request.params['system']
platform = request.params['platform']
o2status = request.params['EO2']
if 'odyssey' in request.params:
# Shit gonna get wild!
odyssey = True if request.params['odyssey'].lower() == "true" else False
else:
odyssey = False
except NameError:
log.critical("Missing parameters to Client announcement call.")
devsay("Parameters were missing in a Client announcement call!", request)
if 'X-Client-Signature' in request.headers:
client_secret = settings['client_secret'] if 'client_secret' in settings else None
header_signature = request.headers['X-Client-Signature']
log.debug("HMAC signature was passed by referrer.")
sha_name, signature = header_signature.split('=')
if sha_name != 'sha1':
log.error("Signature not in SHA1 format, aborting.")
possiblefake = True
mac = hmac.new(bytes(client_secret, 'utf8'), msg=request.body, digestmod='sha1')
if hexversion >= 0x020707F0:
if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):
log.error("Signature mismatch, possible fake call!")
possiblefake = True
else:
if not str(mac.hexdigest()) == str(signature):
log.error("Signature mismatch! GitHub event not parsed.")
log.error(f"{mac.hexdigest()} vs {str(signature)}")
devsay(f"Invalid MAC in Client message: {str(signature)}", request)
possiblefake = True
elif referer not in ["https://clients.fuelrats.com:7777/", "https://clients.fuelrats.com/"]:
log.error(f"Client announcer called with invalid referer: {referer}")
browser = request.user_agent
if 'iPhone' in browser or 'Android' in browser:
# Client is using an iPhone/iPad that does not forward referrer.
send('#ratchat', f"[Client Announcer] Warning! Client {cmdrname} is using a phone or tablet device that"
f" does not preserve connections if switching apps/sleeping!", "", request)
elif 'PlayStation 4' in browser or 'PLAYSTATION' in browser:
send('#ratchat', f"[Client Announcer] Warning! Client {cmdrname} is using a known BROKEN browser that"
f" will not let them send to chat channels!", "", request)
else:
possiblefake = True
devsay(f"Someone tried to call the client announcer with an invalid referer '{referer}'! Absolver!",
request)
else:
log.warning("Non-signed request from valid referer.")
if system == "" or platform == "" or o2status == "":
send("#ratchat", f"[Client Announcer] Client {cmdrname} has connected through the rescue page,"
f" but has not submitted system information! No automated r@tsignal sent!")
log.warning(f"Client {cmdrname} connected with an empty required field. System: {system}"
f" Platform: {platform} o2status: {o2status}")
return
if system.lower() in ["sabiyhan"]:
send("#ratchat", f"[Client Announcer] ALERT! Arriving client {cmdrname} submitted a system name known"
f" to cause a game client crash. Proceed with caution!")
log.warning(f"Client {cmdrname} used blocked system name {system} in an attempt to crash game clients.")
if 'extradata' not in request.params:
message = f"Incoming Client: {cmdrname} - System: {system} - Platform: {platform} " \
f"{'(Odyssey)' if odyssey else ''} - O2: {o2status}"
else:
extradata = request.params['extradata']
message = f"Incoming Client: {cmdrname} - System: {system} - Platform: {platform} " \
f"{'(Odyssey)' if odyssey else ''} - O2: {o2status} - {extradata}"
rescues = requests.get(f'{api_url}/rescues?filter[status]=open', headers={'Accept': 'application/json',
'Authorization':
f'Bearer {fr_token}'}).json()
active_cases = []
try:
for rescue in rescues['data']:
active_cases.append(rescue['attributes']['clientNick'])
if cmdrname in active_cases:
log.warning(f"Suppressing active case announcement for client {cmdrname}")
else:
send("#fuelrats", message, "No Short for you!", request)
if possiblefake:
send("#ratchat",
f"[Client Announcer] Warning! The arriving case is not passing validation information!",
"", request)
except:
print("Failed to parse rescue data, not attempting to suppress repeat cases.")
send("#fuelrats", message, "No Short for you!", request)
if possiblefake:
send("#ratchat",
f"[Client Announcer] Warning! The arriving case is not passing validation information!",
"", request)
return
| import hmac
import requests
from pyramid.view import view_config
from sys import hexversion
from ..utils import send, devsay
import logging
log = logging.getLogger(__name__)
@view_config(route_name='client', renderer="json")
def client(request):
"""Handle Client arrival announcements."""
referer = request.headers['Referer'] if 'referer' in request.headers else None
possiblefake = False
settings = request.registry.settings
fr_token = settings['fr_token'] if 'fr_token' in settings else None
api_url = settings['fr_url'] if 'fr_url' in settings else None
try:
cmdrname = request.params['cmdrname']
system = request.params['system']
platform = request.params['platform']
o2status = request.params['EO2']
if 'odyssey' in request.params:
# Shit gonna get wild!
odyssey = True if request.params['odyssey'].lower() == "true" else False
else:
odyssey = False
except NameError:
log.critical("Missing parameters to Client announcement call.")
devsay("Parameters were missing in a Client announcement call!", request)
if 'X-Client-Signature' in request.headers:
client_secret = settings['client_secret'] if 'client_secret' in settings else None
header_signature = request.headers['X-Client-Signature']
log.debug("HMAC signature was passed by referrer.")
sha_name, signature = header_signature.split('=')
if sha_name != 'sha1':
log.error("Signature not in SHA1 format, aborting.")
possiblefake = True
mac = hmac.new(bytes(client_secret, 'utf8'), msg=request.body, digestmod='sha1')
if hexversion >= 0x020707F0:
if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):
log.error("Signature mismatch, possible fake call!")
possiblefake = True
else:
if not str(mac.hexdigest()) == str(signature):
log.error("Signature mismatch! GitHub event not parsed.")
log.error(f"{mac.hexdigest()} vs {str(signature)}")
devsay(f"Invalid MAC in Client message: {str(signature)}", request)
possiblefake = True
elif referer not in ["https://clients.fuelrats.com:7777/", "https://clients.fuelrats.com/"]:
log.error(f"Client announcer called with invalid referer: {referer}")
browser = request.user_agent
if 'iPhone' in browser or 'Android' in browser:
# Client is using an iPhone/iPad that does not forward referrer.
send('#ratchat', f"[Client Announcer] Warning! Client {cmdrname} is using a phone or tablet device that"
f" does not preserve connections if switching apps/sleeping!", "", request)
elif 'PlayStation 4' in browser or 'PLAYSTATION' in browser:
send('#ratchat', f"[Client Announcer] Warning! Client {cmdrname} is using a known BROKEN browser that"
f" will not let them send to chat channels!", "", request)
else:
possiblefake = True
devsay(f"Someone tried to call the client announcer with an invalid referer '{referer}'! Absolver!",
request)
else:
log.warning("Non-signed request from valid referer.")
if system == "" or platform == "" or o2status == "":
send("#ratchat", f"[Client Announcer] Client {cmdrname} has connected through the rescue page,"
f" but has not submitted system information! No automated r@tsignal sent!")
log.warning(f"Client {cmdrname} connected with an empty required field. System: {system}"
f" Platform: {platform} o2status: {o2status}")
return
if system.lower() in ["sabiyhan"]:
send("#ratchat", f"[Client Announcer] ALERT! Arriving client {cmdrname} submitted a system name known"
f" to cause a game client crash. Proceed with caution!")
log.warning(f"Client {cmdrname} used blocked system name {system} in an attempt to crash game clients.")
if 'extradata' not in request.params:
message = f"Incoming Client: {cmdrname} - System: {system} - Platform: {platform} " \
f"{'(Odyssey)' if odyssey else ''} - O2: {o2status}"
else:
extradata = request.params['extradata']
message = f"Incoming Client: {cmdrname} - System: {system} - Platform: {platform} " \
f"{'(Odyssey)' if odyssey else ''} - O2: {o2status} - {extradata}"
rescues = requests.get(f'{api_url}/rescues?filter[status]=open', headers={'Accept': 'application/json',
'Authorization':
f'Bearer {fr_token}'}).json()
active_cases = []
try:
for rescue in rescues['data']:
active_cases.append(rescue['attributes']['clientNick'])
if cmdrname in active_cases:
log.warning(f"Suppressing active case announcement for client {cmdrname}")
else:
send("#fuelrats", message, "No Short for you!", request)
if possiblefake:
send("#ratchat",
f"[Client Announcer] Warning! The arriving case is not passing validation information!",
"", request)
except:
print("Failed to parse rescue data, not attempting to suppress repeat cases.")
send("#fuelrats", message, "No Short for you!", request)
if possiblefake:
send("#ratchat",
f"[Client Announcer] Warning! The arriving case is not passing validation information!",
"", request)
return
| en | 0.834307 | Handle Client arrival announcements. # Shit gonna get wild! # Client is using an iPhone/iPad that does not forward referrer. | 2.247527 | 2 |
tweetbots/clevergirl_bot.py | plaidfluff/fluff_ebooks | 0 | 6623317 | <gh_stars>0
from twython import TwythonStreamer, Twython, TwythonError
import logging
import random
import datetime
import Queue
import time
import thread
from collections import namedtuple
QueueItem = namedtuple("QueueItem", "when text reply_id")
class CleverGirlBot(TwythonStreamer):
def __init__(self, site_config, bot_config, timeout=30):
super(CleverGirlBot,self).__init__(
site_config.app_key,
site_config.app_secret,
bot_config['oauth_token'],
bot_config['oauth_token_secret'],
timeout=timeout)
self.post_client = Twython(
site_config.app_key,
site_config.app_secret,
bot_config['oauth_token'],
bot_config['oauth_token_secret'])
self.my_user_id = bot_config['user_id']
self.respond_to = bot_config.get('respond_to', [])
self.post_queue = Queue.Queue()
self.thread_id = thread.start_new_thread(self.worker_task, ())
logging.info("Worker thread: %d", self.thread_id)
def on_success(self, tweet):
logging.debug(repr(tweet))
if 'text' in tweet and not 'retweeted_status' in tweet:
screen_name = tweet['user']['screen_name']
respond = False
for mention in tweet['entities']['user_mentions']:
if mention['id'] == self.my_user_id:
logging.info("Mentioned")
respond = True
for term in self.respond_to:
if term.lower() in tweet['text'].lower():
logging.info("Matched phrase %s", term)
respond = True
if respond:
response = '@%s ' % screen_name
logging.info("Replying: <%s> %s", screen_name,tweet['text'])
response += self.generate_tweet()
logging.info("Response: %s", response)
self.post(response, tweet['id'])
def post(self, response, reply_id):
self.post_queue.put(QueueItem(datetime.datetime.now() + datetime.timedelta(seconds=10), response, reply_id))
def worker_task(self):
while True:
tweet = self.post_queue.get()
wait_time = (tweet.when - datetime.datetime.now()).seconds
if wait_time > 0:
time.sleep(wait_time)
logging.info("Worker posting tweet [%s](in reply to %s)", tweet.text, tweet.reply_id)
try:
self.post_client.update_status(status=tweet.text, in_reply_to_status_id=tweet.reply_id)
except TwythonError, e:
logging.info("Post failed: %s", e)
# self.post_queue.put(tweet)
def generate_tweet(self):
rawr = 'R' + 'a'*random.randint(0,5) + 'w'*random.randint(0,4) + 'r'*random.randint(1,3)
rawr += random.choice(['', '...', '.', '...?', '!'])
rawr = random.choice([
(lambda x:x.upper()),
(lambda x:x.lower()),
(lambda x:x),
(lambda x:x.upper() + '!')
])(rawr)
rawr += random.choice(['', '', '', '', ' :>', ' *nuzzles*', ' *playfully nips*'])
return rawr
def run(self):
self.user(track=','.join(self.respond_to))
| from twython import TwythonStreamer, Twython, TwythonError
import logging
import random
import datetime
import Queue
import time
import thread
from collections import namedtuple
QueueItem = namedtuple("QueueItem", "when text reply_id")
class CleverGirlBot(TwythonStreamer):
def __init__(self, site_config, bot_config, timeout=30):
super(CleverGirlBot,self).__init__(
site_config.app_key,
site_config.app_secret,
bot_config['oauth_token'],
bot_config['oauth_token_secret'],
timeout=timeout)
self.post_client = Twython(
site_config.app_key,
site_config.app_secret,
bot_config['oauth_token'],
bot_config['oauth_token_secret'])
self.my_user_id = bot_config['user_id']
self.respond_to = bot_config.get('respond_to', [])
self.post_queue = Queue.Queue()
self.thread_id = thread.start_new_thread(self.worker_task, ())
logging.info("Worker thread: %d", self.thread_id)
def on_success(self, tweet):
logging.debug(repr(tweet))
if 'text' in tweet and not 'retweeted_status' in tweet:
screen_name = tweet['user']['screen_name']
respond = False
for mention in tweet['entities']['user_mentions']:
if mention['id'] == self.my_user_id:
logging.info("Mentioned")
respond = True
for term in self.respond_to:
if term.lower() in tweet['text'].lower():
logging.info("Matched phrase %s", term)
respond = True
if respond:
response = '@%s ' % screen_name
logging.info("Replying: <%s> %s", screen_name,tweet['text'])
response += self.generate_tweet()
logging.info("Response: %s", response)
self.post(response, tweet['id'])
def post(self, response, reply_id):
self.post_queue.put(QueueItem(datetime.datetime.now() + datetime.timedelta(seconds=10), response, reply_id))
def worker_task(self):
while True:
tweet = self.post_queue.get()
wait_time = (tweet.when - datetime.datetime.now()).seconds
if wait_time > 0:
time.sleep(wait_time)
logging.info("Worker posting tweet [%s](in reply to %s)", tweet.text, tweet.reply_id)
try:
self.post_client.update_status(status=tweet.text, in_reply_to_status_id=tweet.reply_id)
except TwythonError, e:
logging.info("Post failed: %s", e)
# self.post_queue.put(tweet)
def generate_tweet(self):
rawr = 'R' + 'a'*random.randint(0,5) + 'w'*random.randint(0,4) + 'r'*random.randint(1,3)
rawr += random.choice(['', '...', '.', '...?', '!'])
rawr = random.choice([
(lambda x:x.upper()),
(lambda x:x.lower()),
(lambda x:x),
(lambda x:x.upper() + '!')
])(rawr)
rawr += random.choice(['', '', '', '', ' :>', ' *nuzzles*', ' *playfully nips*'])
return rawr
def run(self):
self.user(track=','.join(self.respond_to)) | en | 0.510674 | # self.post_queue.put(tweet) | 2.510884 | 3 |
httputility.py | yuqian5/CMPUT404-assignment-web-client | 0 | 6623318 | from urllib.parse import quote_plus
def parse_response(resp):
header, body = resp.split("\r\n\r\n")
parts = header.split("\r\n")
result = {}
# get code
code = get_response_code(parts[0])
result["code"] = int(code[0])
result["code message"] = code[1]
parts.pop(0)
# get body
result["body"] = body
# parse the rest of the headers
for i in parts:
(name, value) = i.split(": ", 1)
result[name] = value
return result
def get_response_code(part):
parts = part.split(" ")
parts.pop(0)
code = parts.pop(0)
code_message = " ".join(parts)
return code, code_message
def args_2_url_encode(args):
if args is None:
return ""
result = ""
for i in args:
result += f"{quote_plus(i)}={quote_plus(args[i])}&"
return result[:-1]
| from urllib.parse import quote_plus
def parse_response(resp):
header, body = resp.split("\r\n\r\n")
parts = header.split("\r\n")
result = {}
# get code
code = get_response_code(parts[0])
result["code"] = int(code[0])
result["code message"] = code[1]
parts.pop(0)
# get body
result["body"] = body
# parse the rest of the headers
for i in parts:
(name, value) = i.split(": ", 1)
result[name] = value
return result
def get_response_code(part):
parts = part.split(" ")
parts.pop(0)
code = parts.pop(0)
code_message = " ".join(parts)
return code, code_message
def args_2_url_encode(args):
if args is None:
return ""
result = ""
for i in args:
result += f"{quote_plus(i)}={quote_plus(args[i])}&"
return result[:-1]
| en | 0.576717 | # get code # get body # parse the rest of the headers | 3.0877 | 3 |
scripts/download_and_setup.py | frazer-lab/cardips-data-software | 0 | 6623319 | # This script downloads and installs necessary software and annotations. At some
# points the script may stop and ask for user input, and you may have to stop
# the script and restart at some points (this weirdness is due to the fun of
# trying to compile and install software automatically).
import os
import cdpybio as cpb
import pipelines as ps
import cardipspy as cpy
if not os.path.exists(cpy.epacts):
ps.prepare.download_epacts(cpy.software)
if not os.path.exists(cpy.snpeff):
ps.prepare.download_snpeff(cpy.software)
if not os.path.exists(cpy.igvtools):
ps.prepare.download_igvtools(cpy.software)
if not os.path.exists(cpy.gtfToGenePred):
ps.prepare.download_gtfToGenePred(cpy.software)
if not os.path.exists(cpy.bedGraphToBigWig):
ps.prepare.download_bedGraphToBigWig(cpy.software)
if not os.path.exists(cpy.liftOver):
ps.prepare.download_liftOver(cpy.software)
if not os.path.exists(cpy.bedtools):
ps.prepare.download_bedtools(cpy.software)
if not os.path.exists(cpy.bcftools):
ps.prepare.download_bcftools(cpy.software)
if not os.path.exists(cpy.samtools):
ps.prepare.download_samtools(cpy.software, lncurses=True)
if not os.path.exists(cpy.htslib):
ps.prepare.download_htslib(cpy.software)
if not os.path.exists(cpy.picard):
ps.prepare.download_picard(cpy.software)
if not os.path.exists(cpy.R):
ps.prepare.download_r(cpy.software)
if not os.path.exists(cpy.star):
ps.prepare.download_star(cpy.software)
if not os.path.exists(cpy.fastqc):
ps.prepare.download_fastqc(cpy.software)
if not os.path.exists(cpy.featureCounts):
ps.prepare.download_subread(cpy.software)
if not os.path.exists(cpy.fastx):
ps.prepare.download_fastx_toolkit(cpy.software)
if not os.path.exists(cpy.vcftools):
ps.prepare.download_vcftools(cpy.software)
if not os.path.exists(cpy.hg19):
ps.prepare.download_hg19(cpy.public_data, cpy.samtools)
if not os.path.exists(cpy.gencode_gtf):
ps.prepare.download_gencode_gtf(cpy.public_data)
if not os.path.exists(cpy.gencode_splice_jxn_info):
df = cpb.gencode.make_splice_junction_df(cpy.gencode_gtf)
df.to_csv(cpy.gencode_splice_jxn_info, sep='\t')
if not os.path.exists(cpy.gencode_gene_info):
df = cpb.gencode.make_gene_info_df(cpy.gencode_gtf)
df.to_csv(cpy.gencode_gene_info, sep='\t')
if not os.path.exists(cpy.gencode_gene_bed):
bt = cpb.gencode.make_feature_bed(cpy.gencode_gtf, 'gene',
out=cpy.gencode_gene_bed)
if not os.path.exists(cpy.gencode_transcript_bed):
bt = cpb.gencode.make_feature_bed(cpy.gencode_gtf, 'transcript',
out=cpy.gencode_transcript_bed)
if not os.path.exists(cpy.gencode_exon_bed):
bt = cpb.gencode.make_feature_bed(cpy.gencode_gtf, 'exon',
out=cpy.gencode_exon_bed)
if not os.path.exists(cpy.gencode_utr_bed):
bt = cpb.gencode.make_feature_bed(cpy.gencode_gtf, 'utr',
out=cpy.gencode_utr_bed)
if not os.path.exists(cpy.gencode_transcript_gene):
tg = cpb.gencode.make_transcript_gene_se(cpy.gencode_gtf)
tg.to_csv(cpy.gencode_transcript_gene, sep='\t')
if not os.path.exists(cpy.gencode_promoter_bed):
bt = cpb.gencode.make_promoter_bed(cpy.gencode_gtf,
out=cpy.gencode_promoter_bed)
if not os.path.exists(cpy.gencode_tss_bed):
bt = cpb.gencode.make_promoter_bed(cpy.gencode_gtf,
out=cpy.gencode_tss_bed,
tss=True)
if not os.path.exists(cpy.roadmap_15_state):
ps.prepare.download_roadmap_15_state_chromatin_model(cpy.roadmap_15_state)
if not os.path.exists(cpy.roadmap_25_state):
ps.prepare.download_roadmap_25_state_chromatin_model(cpy.roadmap_25_state)
if not os.path.exists(cpy.featureCounts):
ps.prepare.download_subread(cpy.software)
if not os.path.exists(cpy.weblogo):
ps.prepare.download_weblogo(cpy.software)
if not os.path.exists(cpy.blat):
ps.prepare.download_blat(cpy.software)
if not os.path.exists(cpy.star_index):
ps.prepare.make_star_index(cpy.public_data, 30, cpy.hg19, cpy.gencode_gtf,
star_path=cpy.star)
if not os.path.exists(cpy.rsem):
ps.prepare.download_rsem(cpy.software, lncurses=True)
dy = os.path.split(cpy.rsem_reference)[0]
if not os.path.exists(dy):
cpy.makedir(dy)
ps.prepare.rsem_prepare_reference(cpy.hg19, cpy.rsem_reference, cpy.rsem,
gtf=cpy.gencode_gtf)
# try:
# import rpy2
# except ImportError:
# ps.prepare.download_install_rpy2(cpy.R, os.path.join(cpy.software))
#
# # I need to update this so the bioconductor dependencies aren't installed
# # every time the script is run.
# try:
# import rpy2
# ps.prepare.install_bioconductor_dependencies()
# except ImportError:
# print('rpy2 not installed\n')
#
if not os.path.exists(cpy.gencode_dexseq_annotation):
ps.prepare.make_dexseq_annotation(cpy.gencode_gtf,
cpy.gencode_dexseq_annotation)
if not os.path.exists(cpy.gwas_catalog):
ps.prepare.download_gwas_catalog(cpy.public_data)
if not os.path.exists(cpy.encode_blacklist):
ps.prepare.download_encode_blacklist(cpy.public_data)
if not os.path.exists(cpy.picard_ref_flat):
ps.prepare.make_rna_seq_metrics_files(
os.path.join(cpy.public_data, 'gencode_v19'), cpy.gencode_gtf, cpy.hg19,
cpy.picard, cpy.gtfToGenePred)
if not os.path.exists(cpy.kheradpour_motifs):
dy = os.path.split(cpy.kheradpour_motifs)[0]
cpy.makedir(dy)
ps.prepare.download_kheradpour_motifs(dy)
| # This script downloads and installs necessary software and annotations. At some
# points the script may stop and ask for user input, and you may have to stop
# the script and restart at some points (this weirdness is due to the fun of
# trying to compile and install software automatically).
import os
import cdpybio as cpb
import pipelines as ps
import cardipspy as cpy
if not os.path.exists(cpy.epacts):
ps.prepare.download_epacts(cpy.software)
if not os.path.exists(cpy.snpeff):
ps.prepare.download_snpeff(cpy.software)
if not os.path.exists(cpy.igvtools):
ps.prepare.download_igvtools(cpy.software)
if not os.path.exists(cpy.gtfToGenePred):
ps.prepare.download_gtfToGenePred(cpy.software)
if not os.path.exists(cpy.bedGraphToBigWig):
ps.prepare.download_bedGraphToBigWig(cpy.software)
if not os.path.exists(cpy.liftOver):
ps.prepare.download_liftOver(cpy.software)
if not os.path.exists(cpy.bedtools):
ps.prepare.download_bedtools(cpy.software)
if not os.path.exists(cpy.bcftools):
ps.prepare.download_bcftools(cpy.software)
if not os.path.exists(cpy.samtools):
ps.prepare.download_samtools(cpy.software, lncurses=True)
if not os.path.exists(cpy.htslib):
ps.prepare.download_htslib(cpy.software)
if not os.path.exists(cpy.picard):
ps.prepare.download_picard(cpy.software)
if not os.path.exists(cpy.R):
ps.prepare.download_r(cpy.software)
if not os.path.exists(cpy.star):
ps.prepare.download_star(cpy.software)
if not os.path.exists(cpy.fastqc):
ps.prepare.download_fastqc(cpy.software)
if not os.path.exists(cpy.featureCounts):
ps.prepare.download_subread(cpy.software)
if not os.path.exists(cpy.fastx):
ps.prepare.download_fastx_toolkit(cpy.software)
if not os.path.exists(cpy.vcftools):
ps.prepare.download_vcftools(cpy.software)
if not os.path.exists(cpy.hg19):
ps.prepare.download_hg19(cpy.public_data, cpy.samtools)
if not os.path.exists(cpy.gencode_gtf):
ps.prepare.download_gencode_gtf(cpy.public_data)
if not os.path.exists(cpy.gencode_splice_jxn_info):
df = cpb.gencode.make_splice_junction_df(cpy.gencode_gtf)
df.to_csv(cpy.gencode_splice_jxn_info, sep='\t')
if not os.path.exists(cpy.gencode_gene_info):
df = cpb.gencode.make_gene_info_df(cpy.gencode_gtf)
df.to_csv(cpy.gencode_gene_info, sep='\t')
if not os.path.exists(cpy.gencode_gene_bed):
bt = cpb.gencode.make_feature_bed(cpy.gencode_gtf, 'gene',
out=cpy.gencode_gene_bed)
if not os.path.exists(cpy.gencode_transcript_bed):
bt = cpb.gencode.make_feature_bed(cpy.gencode_gtf, 'transcript',
out=cpy.gencode_transcript_bed)
if not os.path.exists(cpy.gencode_exon_bed):
bt = cpb.gencode.make_feature_bed(cpy.gencode_gtf, 'exon',
out=cpy.gencode_exon_bed)
if not os.path.exists(cpy.gencode_utr_bed):
bt = cpb.gencode.make_feature_bed(cpy.gencode_gtf, 'utr',
out=cpy.gencode_utr_bed)
if not os.path.exists(cpy.gencode_transcript_gene):
tg = cpb.gencode.make_transcript_gene_se(cpy.gencode_gtf)
tg.to_csv(cpy.gencode_transcript_gene, sep='\t')
if not os.path.exists(cpy.gencode_promoter_bed):
bt = cpb.gencode.make_promoter_bed(cpy.gencode_gtf,
out=cpy.gencode_promoter_bed)
if not os.path.exists(cpy.gencode_tss_bed):
bt = cpb.gencode.make_promoter_bed(cpy.gencode_gtf,
out=cpy.gencode_tss_bed,
tss=True)
if not os.path.exists(cpy.roadmap_15_state):
ps.prepare.download_roadmap_15_state_chromatin_model(cpy.roadmap_15_state)
if not os.path.exists(cpy.roadmap_25_state):
ps.prepare.download_roadmap_25_state_chromatin_model(cpy.roadmap_25_state)
if not os.path.exists(cpy.featureCounts):
ps.prepare.download_subread(cpy.software)
if not os.path.exists(cpy.weblogo):
ps.prepare.download_weblogo(cpy.software)
if not os.path.exists(cpy.blat):
ps.prepare.download_blat(cpy.software)
if not os.path.exists(cpy.star_index):
ps.prepare.make_star_index(cpy.public_data, 30, cpy.hg19, cpy.gencode_gtf,
star_path=cpy.star)
if not os.path.exists(cpy.rsem):
ps.prepare.download_rsem(cpy.software, lncurses=True)
dy = os.path.split(cpy.rsem_reference)[0]
if not os.path.exists(dy):
cpy.makedir(dy)
ps.prepare.rsem_prepare_reference(cpy.hg19, cpy.rsem_reference, cpy.rsem,
gtf=cpy.gencode_gtf)
# try:
# import rpy2
# except ImportError:
# ps.prepare.download_install_rpy2(cpy.R, os.path.join(cpy.software))
#
# # I need to update this so the bioconductor dependencies aren't installed
# # every time the script is run.
# try:
# import rpy2
# ps.prepare.install_bioconductor_dependencies()
# except ImportError:
# print('rpy2 not installed\n')
#
if not os.path.exists(cpy.gencode_dexseq_annotation):
ps.prepare.make_dexseq_annotation(cpy.gencode_gtf,
cpy.gencode_dexseq_annotation)
if not os.path.exists(cpy.gwas_catalog):
ps.prepare.download_gwas_catalog(cpy.public_data)
if not os.path.exists(cpy.encode_blacklist):
ps.prepare.download_encode_blacklist(cpy.public_data)
if not os.path.exists(cpy.picard_ref_flat):
ps.prepare.make_rna_seq_metrics_files(
os.path.join(cpy.public_data, 'gencode_v19'), cpy.gencode_gtf, cpy.hg19,
cpy.picard, cpy.gtfToGenePred)
if not os.path.exists(cpy.kheradpour_motifs):
dy = os.path.split(cpy.kheradpour_motifs)[0]
cpy.makedir(dy)
ps.prepare.download_kheradpour_motifs(dy)
| en | 0.784562 | # This script downloads and installs necessary software and annotations. At some # points the script may stop and ask for user input, and you may have to stop # the script and restart at some points (this weirdness is due to the fun of # trying to compile and install software automatically). # try: # import rpy2 # except ImportError: # ps.prepare.download_install_rpy2(cpy.R, os.path.join(cpy.software)) # # # I need to update this so the bioconductor dependencies aren't installed # # every time the script is run. # try: # import rpy2 # ps.prepare.install_bioconductor_dependencies() # except ImportError: # print('rpy2 not installed\n') # | 2.110546 | 2 |
autoflow/scripts/cli/jump.py | sh-biswas/autoflow | 7 | 6623320 | import os
import click
import subprocess
from sys import platform
from autoflow.scripts.mactab import openTab
from autoflow.env import projectsDir, slash
@click.command()
@click.argument('dir',type=click.STRING)
def jump(dir):
"""
Jumps to your specified project directory
DIR is the name of the project directory
"""
project = projectsDir + slash + dir
try:
os.chdir(project)
if platform == "linux" or platform == "linux2":
subprocess.run([f'gnome-terminal --tab'],shell=True)
elif platform == "darwin":
openTab()
except:
click.echo('😅 Project doesn\'t exist') | import os
import click
import subprocess
from sys import platform
from autoflow.scripts.mactab import openTab
from autoflow.env import projectsDir, slash
@click.command()
@click.argument('dir',type=click.STRING)
def jump(dir):
"""
Jumps to your specified project directory
DIR is the name of the project directory
"""
project = projectsDir + slash + dir
try:
os.chdir(project)
if platform == "linux" or platform == "linux2":
subprocess.run([f'gnome-terminal --tab'],shell=True)
elif platform == "darwin":
openTab()
except:
click.echo('😅 Project doesn\'t exist') | en | 0.69013 | Jumps to your specified project directory DIR is the name of the project directory | 2.739815 | 3 |
blog/admin.py | mamad-azimi-jozani/charity_django_blog | 0 | 6623321 | from django_summernote.admin import SummernoteModelAdmin
from django.contrib import admin
from .models import Post, Category
# Register your models here.
@admin.register(Post)
class AdminPost(SummernoteModelAdmin):
date_hierarchy = "published_date"
# exclude = ['title', 'content']
list_display = ['title', 'status', 'author', 'published_date']
list_filter = ['status']
ordering = ['-created_date']
search_fields = ['title', 'content']
summernote_fields = ('content',)
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass | from django_summernote.admin import SummernoteModelAdmin
from django.contrib import admin
from .models import Post, Category
# Register your models here.
@admin.register(Post)
class AdminPost(SummernoteModelAdmin):
date_hierarchy = "published_date"
# exclude = ['title', 'content']
list_display = ['title', 'status', 'author', 'published_date']
list_filter = ['status']
ordering = ['-created_date']
search_fields = ['title', 'content']
summernote_fields = ('content',)
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass | en | 0.626947 | # Register your models here. # exclude = ['title', 'content'] | 1.594372 | 2 |
a.py | anishmax/python_basics | 0 | 6623322 | <filename>a.py
print("hello 1")
print("hello 2") | <filename>a.py
print("hello 1")
print("hello 2") | none | 1 | 1.461247 | 1 | |
Chapter07/Unit Tests/testExercise7_02.py | nijinjose/The-Supervised-Learning-Workshop | 19 | 6623323 | <reponame>nijinjose/The-Supervised-Learning-Workshop<gh_stars>10-100
import unittest
import pandas as pd
import pickle
from sklearn.metrics import (accuracy_score, confusion_matrix, precision_score,
recall_score, f1_score)
import os
class TestingActivity7_02(unittest.TestCase):
def setUp(self) -> None:
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
self.house_prices_reg = pd.read_csv(os.path.join(ROOT_DIR, '..', 'Datasets', 'boston_house_prices_regression.csv'))
self.titanic_clf = pd.read_csv(os.path.join(ROOT_DIR, '..', 'Datasets', 'titanic_classification.csv'))
with open(os.path.join(ROOT_DIR, '../..', 'Chapter06', 'Saved Models/stacked_linear_regression.pkl'),
'rb') as f:
self.reg = pickle.load(f)
with open(os.path.join(ROOT_DIR, '../..', 'Chapter06', 'Saved Models/random_forest_clf.pkl'), 'rb') as f:
self.rf = pickle.load(f)
def test_dataset_shape(self):
self.assertEqual(self.house_prices_reg.shape, (102, 18))
self.assertEqual(self.titanic_clf.shape, (891, 10))
def test_accuracy_precision_recall(self):
X = self.titanic_clf.iloc[:, :-1].values
y = self.titanic_clf.iloc[:, -1].values
y_pred = self.rf.predict(X)
self.assertAlmostEqual(accuracy_score(y, y_pred), 0.64646465, places=4)
self.assertEqual(confusion_matrix(y_pred=y_pred, y_true=y)[0][0], 547)
self.assertAlmostEqual(precision_score(y, y_pred), 0.93548387, places=4)
self.assertAlmostEqual(recall_score(y, y_pred), 0.08479532, places=4)
self.assertAlmostEqual(f1_score(y, y_pred), 0.15549598, places=4)
if __name__ == '__main__':
unittest.main()
| import unittest
import pandas as pd
import pickle
from sklearn.metrics import (accuracy_score, confusion_matrix, precision_score,
recall_score, f1_score)
import os
class TestingActivity7_02(unittest.TestCase):
def setUp(self) -> None:
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
self.house_prices_reg = pd.read_csv(os.path.join(ROOT_DIR, '..', 'Datasets', 'boston_house_prices_regression.csv'))
self.titanic_clf = pd.read_csv(os.path.join(ROOT_DIR, '..', 'Datasets', 'titanic_classification.csv'))
with open(os.path.join(ROOT_DIR, '../..', 'Chapter06', 'Saved Models/stacked_linear_regression.pkl'),
'rb') as f:
self.reg = pickle.load(f)
with open(os.path.join(ROOT_DIR, '../..', 'Chapter06', 'Saved Models/random_forest_clf.pkl'), 'rb') as f:
self.rf = pickle.load(f)
def test_dataset_shape(self):
self.assertEqual(self.house_prices_reg.shape, (102, 18))
self.assertEqual(self.titanic_clf.shape, (891, 10))
def test_accuracy_precision_recall(self):
X = self.titanic_clf.iloc[:, :-1].values
y = self.titanic_clf.iloc[:, -1].values
y_pred = self.rf.predict(X)
self.assertAlmostEqual(accuracy_score(y, y_pred), 0.64646465, places=4)
self.assertEqual(confusion_matrix(y_pred=y_pred, y_true=y)[0][0], 547)
self.assertAlmostEqual(precision_score(y, y_pred), 0.93548387, places=4)
self.assertAlmostEqual(recall_score(y, y_pred), 0.08479532, places=4)
self.assertAlmostEqual(f1_score(y, y_pred), 0.15549598, places=4)
if __name__ == '__main__':
unittest.main() | none | 1 | 2.720695 | 3 | |
int_compute/int_compute.py | IratePirates/AoC2019 | 0 | 6623324 | from copy import deepcopy
def load_input(filename):
input = []
with open(filename, 'r') as f:
for l in f.readlines():
for tok in l.split(','):
input.append(int(tok))
return input
class int_computer(object):
def __init__(self, input_inst, input_values=None, tag=""):
self._prog = deepcopy(input_inst) + [0]*len(input_inst)*200
self.prog_len = len(input_inst)
self._output = []
self.input = []
if input_values != None:
self.input += input_values
self.pc = 0
self.running = True
self.stalled = False
self.tag = tag
self.base_address = 0
@property
def output(self):
res = self._output
self._output = []
return res
@property
def prog(self):
return self._prog[:self.prog_len]
def run(self):
while self.running:
self.step()
def step(self):
_, tmp_out = self.interpret_line(self._prog[self.pc:self.pc+4])
if tmp_out != []:
self._output += tmp_out
def run_until_input(self, additional_input=None):
res = 1
self.stalled = False
while self.running and not self.stalled:
res, tmp_out = self.interpret_line(self._prog[self.pc:self.pc+4])
if tmp_out != []:
self._output.append(*tmp_out)
# print("Computer {} - Is Stalled? {}".format(self.tag, self.stalled),
# " Is Running? {}". format(self.running))
def get_par(self, line, par=1):
mode = line[0] // (10 * (10 ** int(par))) % 10
if mode == 0:
par1 = self._prog[line[par]]
elif mode == 1:
par1 = line[par]
elif mode == 2:
par1 = self._prog[self.base_address + line[par]]
else:
raise IOError
return par1
def set_par(self, pos, value):
if pos > self.prog_len:
self.prog_len = pos
self._prog[pos] = value
def interpret_line(self, line):
ins_len = -1
res = []
if not self.running:
return ins_len, res
try:
op = line[0] % 100
if op == 99: # end
ins_len = 0
self.running = False
elif op == 3: # load
try:
self.set_par(self.get_par(line, 1),
self.input.pop(0))
ins_len = 2
self.pc += ins_len
except IndexError:
ins_len = 0
self.stalled = True
elif op == 4: # _output
res = [self.get_par(line,1)]
ins_len = 2
self.pc += ins_len
elif op == 9: # adjust the relative base
self.base_address += self.get_par(line,1)
ins_len = 2
self.pc += ins_len
elif op == 5: # Jump-if-true
ins_len = 3
if self.get_par(line,1) != 0:
self.pc = self.get_par(line,2)
else:
self.pc += ins_len
elif op == 6: # Jump-if-false
ins_len = 3
if self.get_par(line,1) == 0:
self.pc = self.get_par(line,2)
else:
self.pc += ins_len
elif op == 1: # Add
self.set_par(line[3],
self.get_par(line,1) + self.get_par(line,2))
ins_len = 4
self.pc += ins_len
elif op == 2: # Mult
self.set_par(line[3],
self.get_par(line,1) * self.get_par(line,2))
ins_len = 4
self.pc += ins_len
elif op == 7: # less than
self.set_par(line[3],
self.get_par(line,1) < self.get_par(line,2))
ins_len = 4
self.pc += ins_len
elif op == 8: # less than
self.set_par(line[3],
self.get_par(line,1) == self.get_par(line,2))
ins_len = 4
self.pc += ins_len
except IndexError:
print("Bad instruction? [{}] - {}".format(self.pc,
line))
raise
return ins_len, res
def run_prog(instructions, in_data=None):
computer = int_computer(input_inst=instructions,
input_values=in_data)
computer.run()
return computer._output
| from copy import deepcopy
def load_input(filename):
input = []
with open(filename, 'r') as f:
for l in f.readlines():
for tok in l.split(','):
input.append(int(tok))
return input
class int_computer(object):
def __init__(self, input_inst, input_values=None, tag=""):
self._prog = deepcopy(input_inst) + [0]*len(input_inst)*200
self.prog_len = len(input_inst)
self._output = []
self.input = []
if input_values != None:
self.input += input_values
self.pc = 0
self.running = True
self.stalled = False
self.tag = tag
self.base_address = 0
@property
def output(self):
res = self._output
self._output = []
return res
@property
def prog(self):
return self._prog[:self.prog_len]
def run(self):
while self.running:
self.step()
def step(self):
_, tmp_out = self.interpret_line(self._prog[self.pc:self.pc+4])
if tmp_out != []:
self._output += tmp_out
def run_until_input(self, additional_input=None):
res = 1
self.stalled = False
while self.running and not self.stalled:
res, tmp_out = self.interpret_line(self._prog[self.pc:self.pc+4])
if tmp_out != []:
self._output.append(*tmp_out)
# print("Computer {} - Is Stalled? {}".format(self.tag, self.stalled),
# " Is Running? {}". format(self.running))
def get_par(self, line, par=1):
mode = line[0] // (10 * (10 ** int(par))) % 10
if mode == 0:
par1 = self._prog[line[par]]
elif mode == 1:
par1 = line[par]
elif mode == 2:
par1 = self._prog[self.base_address + line[par]]
else:
raise IOError
return par1
def set_par(self, pos, value):
if pos > self.prog_len:
self.prog_len = pos
self._prog[pos] = value
def interpret_line(self, line):
ins_len = -1
res = []
if not self.running:
return ins_len, res
try:
op = line[0] % 100
if op == 99: # end
ins_len = 0
self.running = False
elif op == 3: # load
try:
self.set_par(self.get_par(line, 1),
self.input.pop(0))
ins_len = 2
self.pc += ins_len
except IndexError:
ins_len = 0
self.stalled = True
elif op == 4: # _output
res = [self.get_par(line,1)]
ins_len = 2
self.pc += ins_len
elif op == 9: # adjust the relative base
self.base_address += self.get_par(line,1)
ins_len = 2
self.pc += ins_len
elif op == 5: # Jump-if-true
ins_len = 3
if self.get_par(line,1) != 0:
self.pc = self.get_par(line,2)
else:
self.pc += ins_len
elif op == 6: # Jump-if-false
ins_len = 3
if self.get_par(line,1) == 0:
self.pc = self.get_par(line,2)
else:
self.pc += ins_len
elif op == 1: # Add
self.set_par(line[3],
self.get_par(line,1) + self.get_par(line,2))
ins_len = 4
self.pc += ins_len
elif op == 2: # Mult
self.set_par(line[3],
self.get_par(line,1) * self.get_par(line,2))
ins_len = 4
self.pc += ins_len
elif op == 7: # less than
self.set_par(line[3],
self.get_par(line,1) < self.get_par(line,2))
ins_len = 4
self.pc += ins_len
elif op == 8: # less than
self.set_par(line[3],
self.get_par(line,1) == self.get_par(line,2))
ins_len = 4
self.pc += ins_len
except IndexError:
print("Bad instruction? [{}] - {}".format(self.pc,
line))
raise
return ins_len, res
def run_prog(instructions, in_data=None):
computer = int_computer(input_inst=instructions,
input_values=in_data)
computer.run()
return computer._output
| en | 0.464086 | # print("Computer {} - Is Stalled? {}".format(self.tag, self.stalled), # " Is Running? {}". format(self.running)) # end # load # _output # adjust the relative base # Jump-if-true # Jump-if-false # Add # Mult # less than # less than | 3.335441 | 3 |
tests/test_tests.py | sii/siptrackd | 0 | 6623325 | <reponame>sii/siptrackd<filename>tests/test_tests.py
from utils import BasicTestCase
def raise_exception(a):
raise Exception(a)
class TestTests(BasicTestCase):
def testAssert(self):
self.assert_(1 in [1, 2, 3])
def testAssertEqual(self):
self.assertEqual(True, True)
def testAssertRaises(self):
self.assertRaises(Exception, raise_exception, 1)
| from utils import BasicTestCase
def raise_exception(a):
raise Exception(a)
class TestTests(BasicTestCase):
def testAssert(self):
self.assert_(1 in [1, 2, 3])
def testAssertEqual(self):
self.assertEqual(True, True)
def testAssertRaises(self):
self.assertRaises(Exception, raise_exception, 1) | none | 1 | 2.948635 | 3 | |
src/a_detect_pictogram/course_detect_pictogram.py | wenksi/pren-robo-cube-ipcv | 0 | 6623326 | <reponame>wenksi/pren-robo-cube-ipcv
#!/bin/python3
import cv2
import pyttsx3
import logging
import time
from src.common.camera.camera import Camera
path_to_cascades = "resources/cascades/pictogram/"
paths = ['hammer.xml', 'sandwich.xml', 'rule.xml', 'paint.xml', 'pencil.xml'] # PATH OF THE CASCADE
objectNames = ['hammer', 'sandwich', 'rule', 'paint', 'pencil'] # OBJECT NAMES TO DISPLAY
objectCount = 7 # how many objects to count for recognition
class PictogramDetector:
"""
Class loads cascade files, analyzes the video stream and detects pictograms in front of the camera.
"""
def __init__(self, camera: Camera):
# self.vs = cv2.VideoCapture(0)
# self.vs.set(3, 640)
# self.vs.set(4, 480)
self.camera = camera
time.sleep(1)
self.cascades = []
for c in paths: # LOAD THE CLASSIFIERS
self.cascades.append(cv2.CascadeClassifier(path_to_cascades + c))
logging.info("Ready for detection")
def detect(self):
"""
Used to detect and count pictograms.
:return: statistics of detected pictograms
"""
counter = 0
stats = {'hammer': 0, 'sandwich': 0, 'rule': 0, 'paint': 0, 'pencil': 0}
while counter < objectCount:
img = self.camera.snapshot()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# With all cascades check captured frame
for c in self.cascades:
objects = c.detectMultiScale(gray, 1.15, 3)
# Calculate and check the size of every found object
for (x, y, w, h) in objects:
area = w * h
if area > 400:
o = objectNames[self.cascades.index(c)]
counter += 1
stats[o] += 1
return stats
def run(camera: Camera):
"""
Runs the PictogramDetector.
:return: the pictogram which had the most hits.
"""
try:
detector = PictogramDetector(camera)
stats = detector.detect()
logging.debug(stats)
result = max(stats, key=stats.get)
t2s = pyttsx3.init()
t2s.setProperty('voice', t2s.getProperty('voices'))
t2s.setProperty('volume', 1)
comb = 'some' if result is 'paint' else 'a'
t2s.say("I am looking for %s %s" % (comb, result))
logging.info("detected: %s", result)
t2s.runAndWait()
return result
except RuntimeError as e:
logging.error("Error in a_detect_pictogram:\n", e)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
run(Camera())
| #!/bin/python3
import cv2
import pyttsx3
import logging
import time
from src.common.camera.camera import Camera
path_to_cascades = "resources/cascades/pictogram/"
paths = ['hammer.xml', 'sandwich.xml', 'rule.xml', 'paint.xml', 'pencil.xml'] # PATH OF THE CASCADE
objectNames = ['hammer', 'sandwich', 'rule', 'paint', 'pencil'] # OBJECT NAMES TO DISPLAY
objectCount = 7 # how many objects to count for recognition
class PictogramDetector:
"""
Class loads cascade files, analyzes the video stream and detects pictograms in front of the camera.
"""
def __init__(self, camera: Camera):
# self.vs = cv2.VideoCapture(0)
# self.vs.set(3, 640)
# self.vs.set(4, 480)
self.camera = camera
time.sleep(1)
self.cascades = []
for c in paths: # LOAD THE CLASSIFIERS
self.cascades.append(cv2.CascadeClassifier(path_to_cascades + c))
logging.info("Ready for detection")
def detect(self):
"""
Used to detect and count pictograms.
:return: statistics of detected pictograms
"""
counter = 0
stats = {'hammer': 0, 'sandwich': 0, 'rule': 0, 'paint': 0, 'pencil': 0}
while counter < objectCount:
img = self.camera.snapshot()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# With all cascades check captured frame
for c in self.cascades:
objects = c.detectMultiScale(gray, 1.15, 3)
# Calculate and check the size of every found object
for (x, y, w, h) in objects:
area = w * h
if area > 400:
o = objectNames[self.cascades.index(c)]
counter += 1
stats[o] += 1
return stats
def run(camera: Camera):
"""
Runs the PictogramDetector.
:return: the pictogram which had the most hits.
"""
try:
detector = PictogramDetector(camera)
stats = detector.detect()
logging.debug(stats)
result = max(stats, key=stats.get)
t2s = pyttsx3.init()
t2s.setProperty('voice', t2s.getProperty('voices'))
t2s.setProperty('volume', 1)
comb = 'some' if result is 'paint' else 'a'
t2s.say("I am looking for %s %s" % (comb, result))
logging.info("detected: %s", result)
t2s.runAndWait()
return result
except RuntimeError as e:
logging.error("Error in a_detect_pictogram:\n", e)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
run(Camera()) | en | 0.742054 | #!/bin/python3 # PATH OF THE CASCADE # OBJECT NAMES TO DISPLAY # how many objects to count for recognition Class loads cascade files, analyzes the video stream and detects pictograms in front of the camera. # self.vs = cv2.VideoCapture(0) # self.vs.set(3, 640) # self.vs.set(4, 480) # LOAD THE CLASSIFIERS Used to detect and count pictograms. :return: statistics of detected pictograms # With all cascades check captured frame # Calculate and check the size of every found object Runs the PictogramDetector. :return: the pictogram which had the most hits. | 2.816544 | 3 |
nlreg1d/plot.py | 0todd0000/nlreg1d | 1 | 6623327 | <reponame>0todd0000/nlreg1d
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
import spm1d
def axes2data(ax, points):
ax.get_xlim()
ax.get_ylim()
t = ax.transAxes + ax.transData.inverted()
return t.transform( points )
def data2axes(ax, points):
ax.get_xlim()
ax.get_ylim()
t = (ax.transAxes + ax.transData.inverted()).inverted()
return t.transform( points )
def plot_multipanel(y, yr, d, n0, colors, parametric=True, ylim=None, alpha_x=None, paired=False, permutations=1000, dvlabel='Dependent variable', xlabel='Domain position (%)', group_labels=None, leg_loc=[(0.99, 0.92), (0.99, 0.92), (0.99, 0.99)]):
d = d[:,1:-1]
Y = np.dstack( [yr[:,1:-1],d] )
J = n0
fontname = 'Helvetica'
glabels = ['Group 1 mean', 'Group 2 mean'] if (group_labels is None) else group_labels
# stats:
if parametric:
if paired:
ti = spm1d.stats.ttest_paired( y[J:], y[:J] ).inference(0.05)
T2i = spm1d.stats.hotellings_paired( Y[J:], Y[:J] ).inference(0.05)
tri = spm1d.stats.ttest_paired( yr[J:], yr[:J] ).inference(0.05/2)
twi = spm1d.stats.ttest_paired( d[J:], d[:J] ).inference(0.05/2)
else:
ti = spm1d.stats.ttest2( y[J:], y[:J] ).inference(0.05)
T2i = spm1d.stats.hotellings2( Y[J:], Y[:J] ).inference(0.05)
tri = spm1d.stats.ttest2( yr[J:], yr[:J] ).inference(0.05/2)
twi = spm1d.stats.ttest2( d[J:], d[:J] ).inference(0.05/2)
else:
if paired:
t = spm1d.stats.nonparam.ttest_paired( y[J:], y[:J] )
T2 = spm1d.stats.nonparam.hotellings_paired( Y[J:], Y[:J] )
tr = spm1d.stats.nonparam.ttest_paired( yr[J:], yr[:J] )
tw = spm1d.stats.nonparam.ttest_paired( d[J:], d[:J] )
else:
t = spm1d.stats.nonparam.ttest2( y[J:], y[:J] )
T2 = spm1d.stats.nonparam.hotellings2( Y[J:], Y[:J] )
tr = spm1d.stats.nonparam.ttest2( yr[J:], yr[:J] )
tw = spm1d.stats.nonparam.ttest2( d[J:], d[:J] )
nperm = -1 if (permutations > t.nPermUnique) else permutations
ti = t.inference(0.05, iterations=nperm, two_tailed=True)
T2i = T2.inference(0.05, iterations=nperm)
tri = tr.inference(0.05, iterations=nperm, two_tailed=True)
twi = tw.inference(0.05, iterations=nperm, two_tailed=True)
# nperm0 = -1 if (permutations > t.nPermUnique) else permutations
# nperm1 = -1 if (permutations > T2.nPermUnique) else permutations
# nperm2 = -1 if (permutations > tr.nPermUnique) else permutations
# nperm3 = -1 if (permutations > tw.nPermUnique) else permutations
# create figure and axes:
fig = plt.figure( figsize=(14,10) )
axw,axh = 0.26, 0.27
# axx = np.linspace( 0.06, 0.42, 0.71)
axx = [0.085, 0.415, 0.730]
axy = np.linspace(0.66, 0.06, 3)
ax0,ax1,ax2 = [plt.axes( [x,axy[0],axw,axh] ) for x in axx]
ax3 = plt.axes( [axx[0],axy[2],axw,axh] )
ax4 = plt.axes( [axx[1]+0.15,axy[1],axw,axh] )
ax5,ax6 = [plt.axes( [x,axy[2],axw,axh] ) for x in axx[1:]]
AX = [ax0,ax1,ax2, ax3, ax4, ax5,ax6]
h0 = ax0.plot( y[:J].T, color=colors[0], lw=0.3 )[0]
h1 = ax0.plot( y[J:].T, color=colors[1], lw=0.3 )[0]
h0 = ax0.plot( y[:J].mean(axis=0), color=colors[0], lw=5 )[0]
h1 = ax0.plot( y[J:].mean(axis=0), color=colors[1], lw=5 )[0]
leg = ax0.legend( [h0,h1], glabels, loc='upper right', bbox_to_anchor=leg_loc[0] )
plt.setp( leg.get_texts(), name=fontname)
ax1.plot( yr[:J].T, color=colors[0], lw=0.3 )
ax1.plot( yr[J:].T, color=colors[1], lw=0.3 )
h0 = ax1.plot( yr[:J].mean(axis=0), color=colors[0], lw=5 )[0]
h1 = ax1.plot( yr[J:].mean(axis=0), color=colors[1], lw=5 )[0]
leg = ax1.legend( [h0,h1], glabels, loc='upper right', bbox_to_anchor=leg_loc[1] )
plt.setp( leg.get_texts(), name=fontname)
h0 = ax2.plot( d[:J].T, color=colors[0], lw=0.3 )[0]
h1 = ax2.plot( d[J:].T, color=colors[1], lw=0.3 )[0]
h2 = ax2.axhline(0, color='k', ls='--')
h0 = ax2.plot( d[:J].mean(axis=0), color=colors[0], lw=5 )[0]
h1 = ax2.plot( d[J:].mean(axis=0), color=colors[1], lw=5 )[0]
leg = ax2.legend( [h0,h1,h2], glabels + ['Null displacement'], loc='upper right', bbox_to_anchor=leg_loc[2] )
plt.setp( leg.get_texts(), name=fontname)
# SPM results:
ti.plot( ax=ax3 )
T2i.plot( ax=ax4 )
tri.plot( ax=ax5 )
twi.plot( ax=ax6 )
# init axes decorations:
for ax in AX:
plt.setp( ax.get_xticklabels() + ax.get_yticklabels(), name=fontname, size=10 )
ax.set_xlim(0, 100)
ax.set_ylabel(None)
[ax.set_xticklabels([]) for ax in [ax1,ax2,ax4]]
# axis labels:
sz = 16
ax0.set_ylabel(dvlabel, name=fontname, size=sz)
ax1.set_ylabel(dvlabel, name=fontname, size=sz)
ax2.set_ylabel('Warp magnitude', name=fontname, size=sz)
ax3.set_ylabel('SPM{t}', name=fontname, size=sz)
ax4.set_ylabel(r'SPM{ $T^2$ }', name=fontname, size=sz)
ax5.set_ylabel('SPM{t}', name=fontname, size=sz)
[ax.set_xlabel(xlabel, name=fontname, size=sz) for ax in [ax3,ax5,ax6]]
# panel labels:
labels = ['A.1', 'B.1', 'B.2', 'A.2', 'B.3', 'B.4', 'B.5']
slabels = ['Linearly registered', 'Nonlinearly registered', 'Displacement fields', 'Statistical analysis', 'Main test (amplitude + timing)', 'Post hoc (amplitude)', 'Post hoc (timing)']
[ax.text(0.03, 0.92, f'({s}) {ss}', name=fontname, size=14, transform=ax.transAxes) for ax,s,ss in zip( AX, labels, slabels ) ]
tx0 = ax1.text(0.01, 1.05, 'Amplitude effects', ha='left', transform=ax1.transAxes)
tx1 = ax2.text(0.99, 1.05, 'Timing effects', ha='right', transform=ax2.transAxes)
plt.setp( [tx0,tx1] , name=fontname, size=16 )
# background panels:
c0,c1 = '0.6', '0.9'
patch0 = patches.Rectangle([0.035,0.005], 0.328, 0.99, facecolor=c0, edgecolor=c0, alpha=0.9, zorder=-1)
patch1 = patches.Rectangle([0.370,0.005], 0.628, 0.99, facecolor=c1, edgecolor=c1, alpha=0.9, zorder=-1)
tx0 = fig.text(0.20, 0.97, '(A) Common univariate approach', ha='center')
# tx1 = fig.text(0.20, 0.48, '( No explicit temporal\neffect consideration )', ha='center')
tx1 = fig.text(0.20, 0.48, '( None )', ha='center')
tx2 = fig.text(0.55, 0.97, '(B) Proposed multivariate approach')
fig.add_artist(patch0)
fig.add_artist(patch1)
plt.setp( [tx0, tx1, tx2], name=fontname, size=20)
x = 0.01
y = np.array(axy) + 0.5*axh
tx0 = fig.text(x, y[0], 'Dependent variables')
tx1 = fig.text(x, y[1], 'Multivariate analysis')
tx2 = fig.text(x, y[2], 'Univariate analysis')
plt.setp( [tx0, tx1, tx2], name=fontname, size=20, rotation=90, va='center')
# tx1.set_size=14
# axis limits:
if ylim is not None:
[ax.set_ylim(*yy) for ax,yy in zip(AX, ylim)]
def add_threshold_label(ax, x0, ti):
s0,s1 = r'$\alpha$ < 0.05', r'$\alpha$ > 0.05'
hax = 0.02
x,y0 = data2axes( ax, [x0, ti.zstar] )
tx0 = ax.text(x, y0+hax, s0, va='bottom')
tx1 = ax.text(x, y0-hax, s1, va='top')
tx = [tx0,tx1]
plt.setp( tx, size=11, name=fontname, transform=ax.transAxes)
return tx
# add threshold labels:
if alpha_x is not None:
add_threshold_label( ax3, alpha_x[0], ti )
add_threshold_label( ax4, alpha_x[1], T2i )
add_threshold_label( ax5, alpha_x[2], tri )
add_threshold_label( ax6, alpha_x[3], twi )
return fig,AX
| import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
import spm1d
def axes2data(ax, points):
ax.get_xlim()
ax.get_ylim()
t = ax.transAxes + ax.transData.inverted()
return t.transform( points )
def data2axes(ax, points):
ax.get_xlim()
ax.get_ylim()
t = (ax.transAxes + ax.transData.inverted()).inverted()
return t.transform( points )
def plot_multipanel(y, yr, d, n0, colors, parametric=True, ylim=None, alpha_x=None, paired=False, permutations=1000, dvlabel='Dependent variable', xlabel='Domain position (%)', group_labels=None, leg_loc=[(0.99, 0.92), (0.99, 0.92), (0.99, 0.99)]):
d = d[:,1:-1]
Y = np.dstack( [yr[:,1:-1],d] )
J = n0
fontname = 'Helvetica'
glabels = ['Group 1 mean', 'Group 2 mean'] if (group_labels is None) else group_labels
# stats:
if parametric:
if paired:
ti = spm1d.stats.ttest_paired( y[J:], y[:J] ).inference(0.05)
T2i = spm1d.stats.hotellings_paired( Y[J:], Y[:J] ).inference(0.05)
tri = spm1d.stats.ttest_paired( yr[J:], yr[:J] ).inference(0.05/2)
twi = spm1d.stats.ttest_paired( d[J:], d[:J] ).inference(0.05/2)
else:
ti = spm1d.stats.ttest2( y[J:], y[:J] ).inference(0.05)
T2i = spm1d.stats.hotellings2( Y[J:], Y[:J] ).inference(0.05)
tri = spm1d.stats.ttest2( yr[J:], yr[:J] ).inference(0.05/2)
twi = spm1d.stats.ttest2( d[J:], d[:J] ).inference(0.05/2)
else:
if paired:
t = spm1d.stats.nonparam.ttest_paired( y[J:], y[:J] )
T2 = spm1d.stats.nonparam.hotellings_paired( Y[J:], Y[:J] )
tr = spm1d.stats.nonparam.ttest_paired( yr[J:], yr[:J] )
tw = spm1d.stats.nonparam.ttest_paired( d[J:], d[:J] )
else:
t = spm1d.stats.nonparam.ttest2( y[J:], y[:J] )
T2 = spm1d.stats.nonparam.hotellings2( Y[J:], Y[:J] )
tr = spm1d.stats.nonparam.ttest2( yr[J:], yr[:J] )
tw = spm1d.stats.nonparam.ttest2( d[J:], d[:J] )
nperm = -1 if (permutations > t.nPermUnique) else permutations
ti = t.inference(0.05, iterations=nperm, two_tailed=True)
T2i = T2.inference(0.05, iterations=nperm)
tri = tr.inference(0.05, iterations=nperm, two_tailed=True)
twi = tw.inference(0.05, iterations=nperm, two_tailed=True)
# nperm0 = -1 if (permutations > t.nPermUnique) else permutations
# nperm1 = -1 if (permutations > T2.nPermUnique) else permutations
# nperm2 = -1 if (permutations > tr.nPermUnique) else permutations
# nperm3 = -1 if (permutations > tw.nPermUnique) else permutations
# create figure and axes:
fig = plt.figure( figsize=(14,10) )
axw,axh = 0.26, 0.27
# axx = np.linspace( 0.06, 0.42, 0.71)
axx = [0.085, 0.415, 0.730]
axy = np.linspace(0.66, 0.06, 3)
ax0,ax1,ax2 = [plt.axes( [x,axy[0],axw,axh] ) for x in axx]
ax3 = plt.axes( [axx[0],axy[2],axw,axh] )
ax4 = plt.axes( [axx[1]+0.15,axy[1],axw,axh] )
ax5,ax6 = [plt.axes( [x,axy[2],axw,axh] ) for x in axx[1:]]
AX = [ax0,ax1,ax2, ax3, ax4, ax5,ax6]
h0 = ax0.plot( y[:J].T, color=colors[0], lw=0.3 )[0]
h1 = ax0.plot( y[J:].T, color=colors[1], lw=0.3 )[0]
h0 = ax0.plot( y[:J].mean(axis=0), color=colors[0], lw=5 )[0]
h1 = ax0.plot( y[J:].mean(axis=0), color=colors[1], lw=5 )[0]
leg = ax0.legend( [h0,h1], glabels, loc='upper right', bbox_to_anchor=leg_loc[0] )
plt.setp( leg.get_texts(), name=fontname)
ax1.plot( yr[:J].T, color=colors[0], lw=0.3 )
ax1.plot( yr[J:].T, color=colors[1], lw=0.3 )
h0 = ax1.plot( yr[:J].mean(axis=0), color=colors[0], lw=5 )[0]
h1 = ax1.plot( yr[J:].mean(axis=0), color=colors[1], lw=5 )[0]
leg = ax1.legend( [h0,h1], glabels, loc='upper right', bbox_to_anchor=leg_loc[1] )
plt.setp( leg.get_texts(), name=fontname)
h0 = ax2.plot( d[:J].T, color=colors[0], lw=0.3 )[0]
h1 = ax2.plot( d[J:].T, color=colors[1], lw=0.3 )[0]
h2 = ax2.axhline(0, color='k', ls='--')
h0 = ax2.plot( d[:J].mean(axis=0), color=colors[0], lw=5 )[0]
h1 = ax2.plot( d[J:].mean(axis=0), color=colors[1], lw=5 )[0]
leg = ax2.legend( [h0,h1,h2], glabels + ['Null displacement'], loc='upper right', bbox_to_anchor=leg_loc[2] )
plt.setp( leg.get_texts(), name=fontname)
# SPM results:
ti.plot( ax=ax3 )
T2i.plot( ax=ax4 )
tri.plot( ax=ax5 )
twi.plot( ax=ax6 )
# init axes decorations:
for ax in AX:
plt.setp( ax.get_xticklabels() + ax.get_yticklabels(), name=fontname, size=10 )
ax.set_xlim(0, 100)
ax.set_ylabel(None)
[ax.set_xticklabels([]) for ax in [ax1,ax2,ax4]]
# axis labels:
sz = 16
ax0.set_ylabel(dvlabel, name=fontname, size=sz)
ax1.set_ylabel(dvlabel, name=fontname, size=sz)
ax2.set_ylabel('Warp magnitude', name=fontname, size=sz)
ax3.set_ylabel('SPM{t}', name=fontname, size=sz)
ax4.set_ylabel(r'SPM{ $T^2$ }', name=fontname, size=sz)
ax5.set_ylabel('SPM{t}', name=fontname, size=sz)
[ax.set_xlabel(xlabel, name=fontname, size=sz) for ax in [ax3,ax5,ax6]]
# panel labels:
labels = ['A.1', 'B.1', 'B.2', 'A.2', 'B.3', 'B.4', 'B.5']
slabels = ['Linearly registered', 'Nonlinearly registered', 'Displacement fields', 'Statistical analysis', 'Main test (amplitude + timing)', 'Post hoc (amplitude)', 'Post hoc (timing)']
[ax.text(0.03, 0.92, f'({s}) {ss}', name=fontname, size=14, transform=ax.transAxes) for ax,s,ss in zip( AX, labels, slabels ) ]
tx0 = ax1.text(0.01, 1.05, 'Amplitude effects', ha='left', transform=ax1.transAxes)
tx1 = ax2.text(0.99, 1.05, 'Timing effects', ha='right', transform=ax2.transAxes)
plt.setp( [tx0,tx1] , name=fontname, size=16 )
# background panels:
c0,c1 = '0.6', '0.9'
patch0 = patches.Rectangle([0.035,0.005], 0.328, 0.99, facecolor=c0, edgecolor=c0, alpha=0.9, zorder=-1)
patch1 = patches.Rectangle([0.370,0.005], 0.628, 0.99, facecolor=c1, edgecolor=c1, alpha=0.9, zorder=-1)
tx0 = fig.text(0.20, 0.97, '(A) Common univariate approach', ha='center')
# tx1 = fig.text(0.20, 0.48, '( No explicit temporal\neffect consideration )', ha='center')
tx1 = fig.text(0.20, 0.48, '( None )', ha='center')
tx2 = fig.text(0.55, 0.97, '(B) Proposed multivariate approach')
fig.add_artist(patch0)
fig.add_artist(patch1)
plt.setp( [tx0, tx1, tx2], name=fontname, size=20)
x = 0.01
y = np.array(axy) + 0.5*axh
tx0 = fig.text(x, y[0], 'Dependent variables')
tx1 = fig.text(x, y[1], 'Multivariate analysis')
tx2 = fig.text(x, y[2], 'Univariate analysis')
plt.setp( [tx0, tx1, tx2], name=fontname, size=20, rotation=90, va='center')
# tx1.set_size=14
# axis limits:
if ylim is not None:
[ax.set_ylim(*yy) for ax,yy in zip(AX, ylim)]
def add_threshold_label(ax, x0, ti):
s0,s1 = r'$\alpha$ < 0.05', r'$\alpha$ > 0.05'
hax = 0.02
x,y0 = data2axes( ax, [x0, ti.zstar] )
tx0 = ax.text(x, y0+hax, s0, va='bottom')
tx1 = ax.text(x, y0-hax, s1, va='top')
tx = [tx0,tx1]
plt.setp( tx, size=11, name=fontname, transform=ax.transAxes)
return tx
# add threshold labels:
if alpha_x is not None:
add_threshold_label( ax3, alpha_x[0], ti )
add_threshold_label( ax4, alpha_x[1], T2i )
add_threshold_label( ax5, alpha_x[2], tri )
add_threshold_label( ax6, alpha_x[3], twi )
return fig,AX | en | 0.368282 | # stats: # nperm0 = -1 if (permutations > t.nPermUnique) else permutations # nperm1 = -1 if (permutations > T2.nPermUnique) else permutations # nperm2 = -1 if (permutations > tr.nPermUnique) else permutations # nperm3 = -1 if (permutations > tw.nPermUnique) else permutations # create figure and axes: # axx = np.linspace( 0.06, 0.42, 0.71) # SPM results: # init axes decorations: # axis labels: # panel labels: # background panels: # tx1 = fig.text(0.20, 0.48, '( No explicit temporal\neffect consideration )', ha='center') # tx1.set_size=14 # axis limits: # add threshold labels: | 2.55069 | 3 |
odoo-13.0/addons/mass_mailing_crm/models/mailing_mailing.py | VaibhavBhujade/Blockchain-ERP-interoperability | 0 | 6623328 | <reponame>VaibhavBhujade/Blockchain-ERP-interoperability
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.osv import expression
class MassMailing(models.Model):
_name = 'mailing.mailing'
_inherit = 'mailing.mailing'
crm_lead_activated = fields.Boolean('Use Leads', compute='_compute_crm_lead_activated')
crm_lead_count = fields.Integer('Lead Count', groups='sales_team.group_sale_salesman', compute='_compute_crm_lead_and_opportunities_count')
crm_opportunities_count = fields.Integer('Opportunities Count', groups='sales_team.group_sale_salesman', compute='_compute_crm_lead_and_opportunities_count')
def _compute_crm_lead_activated(self):
for mass_mailing in self:
mass_mailing.crm_lead_activated = self.env.user.has_group('crm.group_use_lead')
@api.depends('crm_lead_activated')
def _compute_crm_lead_and_opportunities_count(self):
for mass_mailing in self:
lead_and_opportunities_count = mass_mailing.crm_lead_count = self.env['crm.lead'] \
.with_context(active_test=False) \
.search_count(self._get_crm_utm_domain())
if mass_mailing.crm_lead_activated:
mass_mailing.crm_lead_count = lead_and_opportunities_count
mass_mailing.crm_opportunities_count = 0
else:
mass_mailing.crm_lead_count = 0
mass_mailing.crm_opportunities_count = lead_and_opportunities_count
def action_redirect_to_leads(self):
action = self.env.ref('crm.crm_lead_all_leads').read()[0]
action['domain'] = self._get_crm_utm_domain()
action['context'] = {'default_type': 'lead', 'active_test': False, 'create': False}
return action
def action_redirect_to_opportunities(self):
action = self.env.ref('crm.crm_lead_opportunities').read()[0]
action['view_mode'] = 'tree,kanban,graph,pivot,form,calendar'
action['domain'] = self._get_crm_utm_domain()
action['context'] = {'active_test': False, 'create': False}
return action
def _get_crm_utm_domain(self):
""" We want all records that match the UTMs """
domain = []
if self.campaign_id:
domain = expression.AND([domain, [('campaign_id', '=', self.campaign_id.id)]])
if self.source_id:
domain = expression.AND([domain, [('source_id', '=', self.source_id.id)]])
if self.medium_id:
domain = expression.AND([domain, [('medium_id', '=', self.medium_id.id)]])
if not domain:
domain = expression.AND([domain, [(0, '=', 1)]])
return domain
| # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.osv import expression
class MassMailing(models.Model):
_name = 'mailing.mailing'
_inherit = 'mailing.mailing'
crm_lead_activated = fields.Boolean('Use Leads', compute='_compute_crm_lead_activated')
crm_lead_count = fields.Integer('Lead Count', groups='sales_team.group_sale_salesman', compute='_compute_crm_lead_and_opportunities_count')
crm_opportunities_count = fields.Integer('Opportunities Count', groups='sales_team.group_sale_salesman', compute='_compute_crm_lead_and_opportunities_count')
def _compute_crm_lead_activated(self):
for mass_mailing in self:
mass_mailing.crm_lead_activated = self.env.user.has_group('crm.group_use_lead')
@api.depends('crm_lead_activated')
def _compute_crm_lead_and_opportunities_count(self):
for mass_mailing in self:
lead_and_opportunities_count = mass_mailing.crm_lead_count = self.env['crm.lead'] \
.with_context(active_test=False) \
.search_count(self._get_crm_utm_domain())
if mass_mailing.crm_lead_activated:
mass_mailing.crm_lead_count = lead_and_opportunities_count
mass_mailing.crm_opportunities_count = 0
else:
mass_mailing.crm_lead_count = 0
mass_mailing.crm_opportunities_count = lead_and_opportunities_count
def action_redirect_to_leads(self):
action = self.env.ref('crm.crm_lead_all_leads').read()[0]
action['domain'] = self._get_crm_utm_domain()
action['context'] = {'default_type': 'lead', 'active_test': False, 'create': False}
return action
def action_redirect_to_opportunities(self):
action = self.env.ref('crm.crm_lead_opportunities').read()[0]
action['view_mode'] = 'tree,kanban,graph,pivot,form,calendar'
action['domain'] = self._get_crm_utm_domain()
action['context'] = {'active_test': False, 'create': False}
return action
def _get_crm_utm_domain(self):
""" We want all records that match the UTMs """
domain = []
if self.campaign_id:
domain = expression.AND([domain, [('campaign_id', '=', self.campaign_id.id)]])
if self.source_id:
domain = expression.AND([domain, [('source_id', '=', self.source_id.id)]])
if self.medium_id:
domain = expression.AND([domain, [('medium_id', '=', self.medium_id.id)]])
if not domain:
domain = expression.AND([domain, [(0, '=', 1)]])
return domain | en | 0.875733 | # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. We want all records that match the UTMs | 1.939246 | 2 |
venv/lib/python3.9/site-packages/Geometry/constants.py | atharva21-stack/Python-Projects | 0 | 6623329 | '''
'''
from sys import float_info
epsilon = float_info.epsilon
pi_half = 1.5707963267948966
two_pi = 6.283185307179586
del(float_info)
| '''
'''
from sys import float_info
epsilon = float_info.epsilon
pi_half = 1.5707963267948966
two_pi = 6.283185307179586
del(float_info)
| none | 1 | 1.807311 | 2 | |
custom_components/youless/__init__.py | elsingaa/Home-Assistant-Config | 1 | 6623330 | <filename>custom_components/youless/__init__.py
"""Youless component.""" | <filename>custom_components/youless/__init__.py
"""Youless component.""" | en | 0.486988 | Youless component. | 1.151303 | 1 |
examples/AR_model.py | liusf15/blackbox_selectinf | 0 | 6623331 | import torch
import numpy as np
from scipy.stats import f
from scipy.stats import norm
from blackbox_selectinf.usecase.AR_model import AR_model
from importlib import reload
import blackbox_selectinf.usecase.AR_model
reload(blackbox_selectinf.usecase.AR_model)
from blackbox_selectinf.learning.learning import (learn_select_prob, get_weight, get_CI)
import argparse
import pickle
from statsmodels.stats.stattools import durbin_watson
parser = argparse.ArgumentParser(description='AR model inference for rho')
parser.add_argument('--idx', type=int, default=0)
parser.add_argument('--n', type=int, default=100)
parser.add_argument('--p', type=int, default=10)
parser.add_argument('--n_b', type=int, default=100)
parser.add_argument('--rho', type=float, default=0.0)
parser.add_argument('--Q_L', type=float, default=1.9)
parser.add_argument('--Q_U', type=float, default=2.2)
parser.add_argument('--upper', action='store_false', default=True)
parser.add_argument('--nrep', type=int, default=1)
parser.add_argument('--max_it', type=int, default=1)
parser.add_argument('--savemodel', action='store_true', default=False)
parser.add_argument('--modelname', type=str, default='model_')
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--ntrain', type=int, default=1000)
parser.add_argument('--logname', type=str, default='log')
parser.add_argument('--loadmodel', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--thre', type=float, default=0.99)
parser.add_argument('--consec_epochs', type=int, default=5)
args = parser.parse_args()
def main():
Q_L = args.Q_L
Q_U = args.Q_U
n = args.n
p = args.p
rho = args.rho
n_b = args.n_b
ntrain = args.ntrain
max_it = args.max_it
for j in range(args.idx, args.idx + args.nrep):
logs = {}
print("Start simulation {}".format(j))
# generate data
seed = j
logs['seed'] = seed
np.random.seed(seed)
X = np.random.randn(n, p)
beta = np.random.randn(p)
sigma = 1
C = np.tile(np.arange(1, n + 1), (n, 1))
C_cov = np.power(rho, abs(C - C.T)) / (1 - rho ** 2) * sigma**2
C_inv = np.linalg.inv(C_cov)
epsilon = np.random.multivariate_normal(np.zeros(n), C_cov)
Y = X @ beta + epsilon
hat = X @ np.linalg.inv(X.T @ X) @ X.T
resids = Y - hat @ Y
dw_stat = durbin_watson(resids)
if args.upper and dw_stat >= Q_U:
print("reject")
print("DW ", dw_stat, 'Q_L', Q_L, 'Q_U', Q_U)
elif not args.upper and dw_stat <= Q_L:
print("reject")
print("DW ", dw_stat)
else:
continue
logs['dw'] = dw_stat
AR_class = AR_model(X, Y, Q_L=Q_L, Q_U=Q_U, upper=args.upper)
rho_hat = (np.mean(resids[1:] * resids[:-1]) - np.mean(resids[1:]) * np.mean(resids[:-1])) / \
(np.mean(resids[:-1]**2) - np.mean(resids[:-1])**2)
beta_hat = np.linalg.inv(X.T @ C_inv @ X) @ X.T @ C_inv @ Y
Z_data = AR_class.basis(resids)
theta_data = rho_hat
logs['rho_hat'] = rho_hat
# generate training data
training_data = AR_class.gen_train_data(100, n, beta_hat, rho_hat)
Z_train = training_data[0]
W_train = training_data[1]
Gamma = training_data[2]
target_var = training_data[3]
target_sd = np.sqrt(target_var)
logs['target_sd'] = target_sd
print("ones:", np.mean(W_train))
logs['ones'] = np.mean(W_train)
print("Start learning selection probability")
net = None
for it in range(max_it):
print("recursion", it)
net, flag, pr_data = learn_select_prob(Z_train, W_train, Z_data=torch.tensor(Z_data, dtype=torch.float),
net=net, thre=args.thre,
consec_epochs=args.consec_epochs, num_epochs=args.epochs,
batch_size=args.batch_size, verbose=args.verbose, print_every=100)
if flag == 1:
print("Succeeded learning!")
break
if it == max_it - 1:
break
else: # generate more data
print("generate more data")
training_data = AR_class.gen_train_data(ntrain=ntrain, n_b=n_b, beta_hat=beta_hat, rho_hat=rho_hat)
Z_train_new = training_data[0]
W_train_new = training_data[1]
Z_train = np.concatenate([Z_train, Z_train_new])
W_train = np.concatenate([W_train, W_train_new])
print("fraction of positive data:", np.mean(W_train))
print('pr_data', pr_data.item())
logs['pr_data'] = pr_data.item()
N_0 = Z_data - Gamma * theta_data
gamma_list = np.linspace(-10 * target_sd, 10 * target_sd, 201)
target_theta = theta_data + gamma_list
target_theta = target_theta.reshape(1, len(gamma_list))
weight_val = get_weight(net, target_theta, N_0, Gamma.reshape(-1, 1))
interval_nn, pvalue_nn = get_CI(target_theta, weight_val, target_var, theta_data, return_pvalue=True)
print("interval_nn", interval_nn)
logs['covered_nn'] = 0
if interval_nn[0] <= rho <= interval_nn[1]:
logs['covered_nn'] = 1
print("covered_nn", logs['covered_nn'])
logs['interval_nn'] = interval_nn
logs['width_nn'] = interval_nn[1] - interval_nn[0]
logs['pvalue_nn'] = pvalue_nn
interval_naive = tuple((norm.ppf(0.025) * target_sd, -norm.ppf(0.025) * target_sd)) + rho_hat
print("interval_naive", interval_naive)
logs['covered_naive'] = 0
if interval_naive[0] <= rho <= interval_naive[1]:
logs['covered_naive'] = 1
print("covered_naive", logs['covered_naive'])
logs['interval_naive'] = interval_naive
logs['width_naive'] = interval_naive[1] - interval_naive[0]
# logs['pvalue_naive'] = pvalue_naive
path = open('{}_n_{}_p_{}_nb_{}_rho_{}_{}.pickle'.format(args.logname, n, p, n_b, rho, j), 'wb')
pickle.dump(logs, path)
path.close()
if __name__ == "__main__":
main()
| import torch
import numpy as np
from scipy.stats import f
from scipy.stats import norm
from blackbox_selectinf.usecase.AR_model import AR_model
from importlib import reload
import blackbox_selectinf.usecase.AR_model
reload(blackbox_selectinf.usecase.AR_model)
from blackbox_selectinf.learning.learning import (learn_select_prob, get_weight, get_CI)
import argparse
import pickle
from statsmodels.stats.stattools import durbin_watson
parser = argparse.ArgumentParser(description='AR model inference for rho')
parser.add_argument('--idx', type=int, default=0)
parser.add_argument('--n', type=int, default=100)
parser.add_argument('--p', type=int, default=10)
parser.add_argument('--n_b', type=int, default=100)
parser.add_argument('--rho', type=float, default=0.0)
parser.add_argument('--Q_L', type=float, default=1.9)
parser.add_argument('--Q_U', type=float, default=2.2)
parser.add_argument('--upper', action='store_false', default=True)
parser.add_argument('--nrep', type=int, default=1)
parser.add_argument('--max_it', type=int, default=1)
parser.add_argument('--savemodel', action='store_true', default=False)
parser.add_argument('--modelname', type=str, default='model_')
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--ntrain', type=int, default=1000)
parser.add_argument('--logname', type=str, default='log')
parser.add_argument('--loadmodel', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--thre', type=float, default=0.99)
parser.add_argument('--consec_epochs', type=int, default=5)
args = parser.parse_args()
def main():
Q_L = args.Q_L
Q_U = args.Q_U
n = args.n
p = args.p
rho = args.rho
n_b = args.n_b
ntrain = args.ntrain
max_it = args.max_it
for j in range(args.idx, args.idx + args.nrep):
logs = {}
print("Start simulation {}".format(j))
# generate data
seed = j
logs['seed'] = seed
np.random.seed(seed)
X = np.random.randn(n, p)
beta = np.random.randn(p)
sigma = 1
C = np.tile(np.arange(1, n + 1), (n, 1))
C_cov = np.power(rho, abs(C - C.T)) / (1 - rho ** 2) * sigma**2
C_inv = np.linalg.inv(C_cov)
epsilon = np.random.multivariate_normal(np.zeros(n), C_cov)
Y = X @ beta + epsilon
hat = X @ np.linalg.inv(X.T @ X) @ X.T
resids = Y - hat @ Y
dw_stat = durbin_watson(resids)
if args.upper and dw_stat >= Q_U:
print("reject")
print("DW ", dw_stat, 'Q_L', Q_L, 'Q_U', Q_U)
elif not args.upper and dw_stat <= Q_L:
print("reject")
print("DW ", dw_stat)
else:
continue
logs['dw'] = dw_stat
AR_class = AR_model(X, Y, Q_L=Q_L, Q_U=Q_U, upper=args.upper)
rho_hat = (np.mean(resids[1:] * resids[:-1]) - np.mean(resids[1:]) * np.mean(resids[:-1])) / \
(np.mean(resids[:-1]**2) - np.mean(resids[:-1])**2)
beta_hat = np.linalg.inv(X.T @ C_inv @ X) @ X.T @ C_inv @ Y
Z_data = AR_class.basis(resids)
theta_data = rho_hat
logs['rho_hat'] = rho_hat
# generate training data
training_data = AR_class.gen_train_data(100, n, beta_hat, rho_hat)
Z_train = training_data[0]
W_train = training_data[1]
Gamma = training_data[2]
target_var = training_data[3]
target_sd = np.sqrt(target_var)
logs['target_sd'] = target_sd
print("ones:", np.mean(W_train))
logs['ones'] = np.mean(W_train)
print("Start learning selection probability")
net = None
for it in range(max_it):
print("recursion", it)
net, flag, pr_data = learn_select_prob(Z_train, W_train, Z_data=torch.tensor(Z_data, dtype=torch.float),
net=net, thre=args.thre,
consec_epochs=args.consec_epochs, num_epochs=args.epochs,
batch_size=args.batch_size, verbose=args.verbose, print_every=100)
if flag == 1:
print("Succeeded learning!")
break
if it == max_it - 1:
break
else: # generate more data
print("generate more data")
training_data = AR_class.gen_train_data(ntrain=ntrain, n_b=n_b, beta_hat=beta_hat, rho_hat=rho_hat)
Z_train_new = training_data[0]
W_train_new = training_data[1]
Z_train = np.concatenate([Z_train, Z_train_new])
W_train = np.concatenate([W_train, W_train_new])
print("fraction of positive data:", np.mean(W_train))
print('pr_data', pr_data.item())
logs['pr_data'] = pr_data.item()
N_0 = Z_data - Gamma * theta_data
gamma_list = np.linspace(-10 * target_sd, 10 * target_sd, 201)
target_theta = theta_data + gamma_list
target_theta = target_theta.reshape(1, len(gamma_list))
weight_val = get_weight(net, target_theta, N_0, Gamma.reshape(-1, 1))
interval_nn, pvalue_nn = get_CI(target_theta, weight_val, target_var, theta_data, return_pvalue=True)
print("interval_nn", interval_nn)
logs['covered_nn'] = 0
if interval_nn[0] <= rho <= interval_nn[1]:
logs['covered_nn'] = 1
print("covered_nn", logs['covered_nn'])
logs['interval_nn'] = interval_nn
logs['width_nn'] = interval_nn[1] - interval_nn[0]
logs['pvalue_nn'] = pvalue_nn
interval_naive = tuple((norm.ppf(0.025) * target_sd, -norm.ppf(0.025) * target_sd)) + rho_hat
print("interval_naive", interval_naive)
logs['covered_naive'] = 0
if interval_naive[0] <= rho <= interval_naive[1]:
logs['covered_naive'] = 1
print("covered_naive", logs['covered_naive'])
logs['interval_naive'] = interval_naive
logs['width_naive'] = interval_naive[1] - interval_naive[0]
# logs['pvalue_naive'] = pvalue_naive
path = open('{}_n_{}_p_{}_nb_{}_rho_{}_{}.pickle'.format(args.logname, n, p, n_b, rho, j), 'wb')
pickle.dump(logs, path)
path.close()
if __name__ == "__main__":
main()
| en | 0.429811 | # generate data # generate training data # generate more data # logs['pvalue_naive'] = pvalue_naive | 1.871483 | 2 |
matrix/mm-data/fix-literals/gen.py | rebryant/Cloud-BDD | 2 | 6623332 | <reponame>rebryant/Cloud-BDD
#!/usr/bin/python
import subprocess
program = "../../mm_generate.py"
time = 7200
lcount = 336
lname = 'smirnov-family.lit'
def fname(suffix, blevel):
return "run-fix%d-b%d-%s.cmd" % (lcount, blevel, suffix)
def cmd(singleton = True, blevel = 2):
suffix = 'se' if singleton else 'nse'
brange = '6' if blevel == 0 else '%d:6' % blevel
ls = [program, '-k', '-B', brange, '-t', str(time), '-L', lname, '-o', fname(suffix, blevel)]
if singleton:
ls.append('-e')
return ls
singleton = True
for blevel in range(3):
command = cmd(singleton, blevel)
print "Running: %s" % " ".join(command)
process = subprocess.Popen(command)
process.wait()
| #!/usr/bin/python
import subprocess
program = "../../mm_generate.py"
time = 7200
lcount = 336
lname = 'smirnov-family.lit'
def fname(suffix, blevel):
return "run-fix%d-b%d-%s.cmd" % (lcount, blevel, suffix)
def cmd(singleton = True, blevel = 2):
suffix = 'se' if singleton else 'nse'
brange = '6' if blevel == 0 else '%d:6' % blevel
ls = [program, '-k', '-B', brange, '-t', str(time), '-L', lname, '-o', fname(suffix, blevel)]
if singleton:
ls.append('-e')
return ls
singleton = True
for blevel in range(3):
command = cmd(singleton, blevel)
print "Running: %s" % " ".join(command)
process = subprocess.Popen(command)
process.wait() | ru | 0.258958 | #!/usr/bin/python | 2.218639 | 2 |
ble-gatt-service/configure_ble.py | noelmcloughlin/iot-ble-gatt-server | 2 | 6623333 | #!/usr/bin/env python
##################################
## Install or Restart BLE
##################################
import os, sys, shutil, getopt
from subprocess import Popen, PIPE, call
sys.path.append('../lib')
import osutils as utils
def device_libs():
call(['sudo', 'usermod', '-G', 'staff', 'pi'])
utils.install_pkg(['libglib2.0-dev', 'libudev-dev', 'libical-dev', 'libreadline-dev'])
utils.install_pkg(['libdbus-1-dev', 'python-dbus', 'git'])
utils.install_pip(['gitpython',])
def bluez_version():
try:
p = Popen(['bluetoothctl', '-v'], stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
output = output.split()[1]
except:
output = "1.0"
return str(output)
def make_install_bluez_and_reboot(version):
print("\nInstall BlueZ %s" % wanted_version)
call(["sudo", "apt-get", "update"])
call("rm -fr bluez-*", shell=True)
print("\nDownload BlueZ %s" % wanted_version)
call(["wget", "www.kernel.org/pub/linux/bluetooth/bluez-" + wanted_version + ".tar.xz"])
call(["tar", "xvf", "bluez-" + wanted_version + ".tar.xz"])
print("\nMake BlueZ %s" % wanted_version)
os.chdir( utils.workdir + "/bluez-" + wanted_version )
call(["./configure", "--prefix=/usr", "--mandir=/usr/share/man", "--sysconfdir=/etc", "--localstatedir=/var", "--enable-experimental"])
call(["make", "-j4"])
print("\nInstall BlueZ %s binaries" % wanted_version)
call(["sudo", "make", "install"])
print("\n\nDone - rebooting")
call(["sudo", "reboot"])
def restart_bluez_ble():
call(['sudo', 'systemctl', 'restart', 'bluetooth'])
call(['sudo', 'hciconfig', 'hci0', 'down'])
call(['sudo', 'hciconfig', 'hci0', 'name', 'RPi'])
call(['sudo', 'hciconfig', 'hci0', 'up'])
call(['sudo', 'hciconfig', 'hci0', 'name', 'RPi'])
call(['sudo', 'hciconfig', 'hci0', 'leadv', '0'])
#call(['sudo', 'systemctl', 'restart', 'ble-led-gatt'])
def install_ble():
wanted_version='5.50'
if bluez_version() != wanted_version:
device_libs()
make_install_bluez_and_reboot(wanted_version)
else:
print("\nbluez version %s already installed" % wanted_version)
def usage():
print("\nUsage:\n%s -a [ install|restart ]\n" % os.path.basename(__file__))
sys.exit(2)
def main(argv):
try:
opts, args = getopt.getopt(argv,"a:",["action=",])
except getopt.GetoptError:
usage()
if not opts:
usage()
for opt, arg in opts:
if opt in ("-a", "--action"):
if arg == 'install':
install_ble()
elif arg == 'restart':
restart_bluez_ble()
else:
usage()
else:
usage()
if __name__ == "__main__":
main(sys.argv[1:])
exit(0)
| #!/usr/bin/env python
##################################
## Install or Restart BLE
##################################
import os, sys, shutil, getopt
from subprocess import Popen, PIPE, call
sys.path.append('../lib')
import osutils as utils
def device_libs():
call(['sudo', 'usermod', '-G', 'staff', 'pi'])
utils.install_pkg(['libglib2.0-dev', 'libudev-dev', 'libical-dev', 'libreadline-dev'])
utils.install_pkg(['libdbus-1-dev', 'python-dbus', 'git'])
utils.install_pip(['gitpython',])
def bluez_version():
try:
p = Popen(['bluetoothctl', '-v'], stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
output = output.split()[1]
except:
output = "1.0"
return str(output)
def make_install_bluez_and_reboot(version):
print("\nInstall BlueZ %s" % wanted_version)
call(["sudo", "apt-get", "update"])
call("rm -fr bluez-*", shell=True)
print("\nDownload BlueZ %s" % wanted_version)
call(["wget", "www.kernel.org/pub/linux/bluetooth/bluez-" + wanted_version + ".tar.xz"])
call(["tar", "xvf", "bluez-" + wanted_version + ".tar.xz"])
print("\nMake BlueZ %s" % wanted_version)
os.chdir( utils.workdir + "/bluez-" + wanted_version )
call(["./configure", "--prefix=/usr", "--mandir=/usr/share/man", "--sysconfdir=/etc", "--localstatedir=/var", "--enable-experimental"])
call(["make", "-j4"])
print("\nInstall BlueZ %s binaries" % wanted_version)
call(["sudo", "make", "install"])
print("\n\nDone - rebooting")
call(["sudo", "reboot"])
def restart_bluez_ble():
call(['sudo', 'systemctl', 'restart', 'bluetooth'])
call(['sudo', 'hciconfig', 'hci0', 'down'])
call(['sudo', 'hciconfig', 'hci0', 'name', 'RPi'])
call(['sudo', 'hciconfig', 'hci0', 'up'])
call(['sudo', 'hciconfig', 'hci0', 'name', 'RPi'])
call(['sudo', 'hciconfig', 'hci0', 'leadv', '0'])
#call(['sudo', 'systemctl', 'restart', 'ble-led-gatt'])
def install_ble():
wanted_version='5.50'
if bluez_version() != wanted_version:
device_libs()
make_install_bluez_and_reboot(wanted_version)
else:
print("\nbluez version %s already installed" % wanted_version)
def usage():
print("\nUsage:\n%s -a [ install|restart ]\n" % os.path.basename(__file__))
sys.exit(2)
def main(argv):
try:
opts, args = getopt.getopt(argv,"a:",["action=",])
except getopt.GetoptError:
usage()
if not opts:
usage()
for opt, arg in opts:
if opt in ("-a", "--action"):
if arg == 'install':
install_ble()
elif arg == 'restart':
restart_bluez_ble()
else:
usage()
else:
usage()
if __name__ == "__main__":
main(sys.argv[1:])
exit(0)
| de | 0.38349 | #!/usr/bin/env python ################################## ## Install or Restart BLE ################################## #call(['sudo', 'systemctl', 'restart', 'ble-led-gatt']) | 2.155553 | 2 |
orion/core/operators/country_details_task.py | orion-search/orion-backend | 19 | 6623334 | <reponame>orion-search/orion-backend
"""
HomogeniseCountryNamesOperator: Homogenises country names from Google Places API
and the World Bank. It uses a country mapping dictionary from `model_config.yaml`.
CountryDetailsOperator: Fetches additional country details from the restcountries API.
This includes things such as population, capital, region (continent), sub-region and
country codes.
"""
import logging
from sqlalchemy import create_engine, distinct
from sqlalchemy.orm import sessionmaker
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from orion.core.orms.mag_orm import (
WorldBankFemaleLaborForce,
WorldBankGovEducation,
WorldBankResearchDevelopment,
WorldBankGDP,
CountryAssociation,
AffiliationLocation,
CountryDetails,
)
import orion
import requests
from orion.packages.geo.enrich_countries import (
parse_country_details,
get_country_details,
)
google2wb = orion.config["country_name_mapping"]["google2wb"]
google2restcountries = orion.config["country_name_mapping"]["google2restcountries"]
class HomogeniseCountryNamesOperator(BaseOperator):
"""Homogenises country names from Google Places API and the World Bank."""
@apply_defaults
def __init__(self, db_config, country_name_mapping=google2wb, *args, **kwargs):
super().__init__(**kwargs)
self.db_config = db_config
self.country_name_mapping = google2wb
def execute(self, context):
# Connect to postgresdb
engine = create_engine(self.db_config)
Session = sessionmaker(engine)
s = Session()
s.query(CountryAssociation).delete()
s.commit()
# Country names from Google Places API
country_names = [
country_name[0]
for country_name in s.query(distinct(AffiliationLocation.country)).filter(
(AffiliationLocation.country != None),
(AffiliationLocation.country != ""),
)
]
logging.info(f"Countries from Google: {len(country_names)}")
# Country names from the World Bank
wb_countries = set()
for c1, c2, c3, c4 in zip(
s.query(WorldBankGDP.country),
s.query(WorldBankGovEducation.country),
s.query(WorldBankFemaleLaborForce.country),
s.query(WorldBankResearchDevelopment.country),
):
wb_countries.update(c1, c2, c3, c4)
# Match country names
for country_name in country_names:
if country_name in self.country_name_mapping.keys():
s.add(
CountryAssociation(
google_country=country_name,
wb_country=self.country_name_mapping[country_name],
)
)
else:
s.add(
CountryAssociation(
google_country=country_name, wb_country=country_name
)
)
s.commit()
class CountryDetailsOperator(BaseOperator):
"""Fetch country information from restcountries."""
@apply_defaults
def __init__(
self, db_config, country_name_mapping=google2restcountries, *args, **kwargs
):
super().__init__(**kwargs)
self.db_config = db_config
self.country_name_mapping = country_name_mapping
def execute(self, context):
# Connect to postgresdb
engine = create_engine(self.db_config)
Session = sessionmaker(engine)
s = Session()
s.query(CountryDetails).delete()
s.commit()
# Query restcountries API with Google Places country names.
d = {}
for country_name in [
country_name[0]
for country_name in s.query(CountryAssociation.google_country)
]:
try:
d[country_name] = get_country_details(country_name)
except requests.exceptions.HTTPError as h:
logging.info(f"{country_name} - {h}: Trying with country_mapping")
try:
d[country_name] = get_country_details(
self.country_name_mapping[country_name]
)
except requests.exceptions.HTTPError as h:
logging.info(f"Failed: {country_name}")
continue
except KeyError as e:
logging.info(f"{country_name} not in mapping.")
continue
# Parse country info
country_info = []
for k, v in d.items():
# These countries are not the first match so we choose `pos=1`
if k in ["India", "United States", "Sudan"]:
parsed_response = parse_country_details(v, pos=1)
else:
parsed_response = parse_country_details(v)
parsed_response.update({"google_name": k})
parsed_response.update(
{
"wb_name": s.query(CountryAssociation.wb_country)
.filter(CountryAssociation.google_country == k)
.first()[0]
}
)
country_info.append(parsed_response)
logging.info(f"Parsed countries: {len(country_info)}")
s.bulk_insert_mappings(CountryDetails, country_info)
s.commit()
| """
HomogeniseCountryNamesOperator: Homogenises country names from Google Places API
and the World Bank. It uses a country mapping dictionary from `model_config.yaml`.
CountryDetailsOperator: Fetches additional country details from the restcountries API.
This includes things such as population, capital, region (continent), sub-region and
country codes.
"""
import logging
from sqlalchemy import create_engine, distinct
from sqlalchemy.orm import sessionmaker
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from orion.core.orms.mag_orm import (
WorldBankFemaleLaborForce,
WorldBankGovEducation,
WorldBankResearchDevelopment,
WorldBankGDP,
CountryAssociation,
AffiliationLocation,
CountryDetails,
)
import orion
import requests
from orion.packages.geo.enrich_countries import (
parse_country_details,
get_country_details,
)
google2wb = orion.config["country_name_mapping"]["google2wb"]
google2restcountries = orion.config["country_name_mapping"]["google2restcountries"]
class HomogeniseCountryNamesOperator(BaseOperator):
"""Homogenises country names from Google Places API and the World Bank."""
@apply_defaults
def __init__(self, db_config, country_name_mapping=google2wb, *args, **kwargs):
super().__init__(**kwargs)
self.db_config = db_config
self.country_name_mapping = google2wb
def execute(self, context):
# Connect to postgresdb
engine = create_engine(self.db_config)
Session = sessionmaker(engine)
s = Session()
s.query(CountryAssociation).delete()
s.commit()
# Country names from Google Places API
country_names = [
country_name[0]
for country_name in s.query(distinct(AffiliationLocation.country)).filter(
(AffiliationLocation.country != None),
(AffiliationLocation.country != ""),
)
]
logging.info(f"Countries from Google: {len(country_names)}")
# Country names from the World Bank
wb_countries = set()
for c1, c2, c3, c4 in zip(
s.query(WorldBankGDP.country),
s.query(WorldBankGovEducation.country),
s.query(WorldBankFemaleLaborForce.country),
s.query(WorldBankResearchDevelopment.country),
):
wb_countries.update(c1, c2, c3, c4)
# Match country names
for country_name in country_names:
if country_name in self.country_name_mapping.keys():
s.add(
CountryAssociation(
google_country=country_name,
wb_country=self.country_name_mapping[country_name],
)
)
else:
s.add(
CountryAssociation(
google_country=country_name, wb_country=country_name
)
)
s.commit()
class CountryDetailsOperator(BaseOperator):
"""Fetch country information from restcountries."""
@apply_defaults
def __init__(
self, db_config, country_name_mapping=google2restcountries, *args, **kwargs
):
super().__init__(**kwargs)
self.db_config = db_config
self.country_name_mapping = country_name_mapping
def execute(self, context):
# Connect to postgresdb
engine = create_engine(self.db_config)
Session = sessionmaker(engine)
s = Session()
s.query(CountryDetails).delete()
s.commit()
# Query restcountries API with Google Places country names.
d = {}
for country_name in [
country_name[0]
for country_name in s.query(CountryAssociation.google_country)
]:
try:
d[country_name] = get_country_details(country_name)
except requests.exceptions.HTTPError as h:
logging.info(f"{country_name} - {h}: Trying with country_mapping")
try:
d[country_name] = get_country_details(
self.country_name_mapping[country_name]
)
except requests.exceptions.HTTPError as h:
logging.info(f"Failed: {country_name}")
continue
except KeyError as e:
logging.info(f"{country_name} not in mapping.")
continue
# Parse country info
country_info = []
for k, v in d.items():
# These countries are not the first match so we choose `pos=1`
if k in ["India", "United States", "Sudan"]:
parsed_response = parse_country_details(v, pos=1)
else:
parsed_response = parse_country_details(v)
parsed_response.update({"google_name": k})
parsed_response.update(
{
"wb_name": s.query(CountryAssociation.wb_country)
.filter(CountryAssociation.google_country == k)
.first()[0]
}
)
country_info.append(parsed_response)
logging.info(f"Parsed countries: {len(country_info)}")
s.bulk_insert_mappings(CountryDetails, country_info)
s.commit() | en | 0.734125 | HomogeniseCountryNamesOperator: Homogenises country names from Google Places API and the World Bank. It uses a country mapping dictionary from `model_config.yaml`. CountryDetailsOperator: Fetches additional country details from the restcountries API. This includes things such as population, capital, region (continent), sub-region and country codes. Homogenises country names from Google Places API and the World Bank. # Connect to postgresdb # Country names from Google Places API # Country names from the World Bank # Match country names Fetch country information from restcountries. # Connect to postgresdb # Query restcountries API with Google Places country names. # Parse country info # These countries are not the first match so we choose `pos=1` | 2.586119 | 3 |
adb.py | yaoshiu/Ash-s-Arknights-Helper | 0 | 6623335 | <filename>adb.py
import os
import random
__adb = os.path.abspath('platform-tools/adb.exe')
__arknights = 'com.hypergryph.arknights/com.u8.sdk.U8UnityContext'
__max0035, __max0036 = 0, 0
def devices() -> list[str]:
"""Returns a list of attached devices' serial numbers."""
connected = os.popen(__adb + ' devices').read()
return connected.splitlines()[1:-1]
def connect(method: str = '-e', serial_number: str = '') -> None:
"""Connect to a device.
Args:
* `method`: The method to connect.
Avaliable method:
* `-d`: Connect to the only device attached by USB.
* `-e`: Connect to the only emulator.
* `-s`: Connect to a specified device.
* `serial_number`: The serial number of device to be connected.
"""
os.system(__adb + ' ' + method + ' ' + serial_number)
def wm_size() -> int and int:
"""Returns the size of the screen resolution of the connected device.
Returns:
* The horizontal resolution of the screen.
* The vertical resolution of the screen.
"""
content = os.popen(__adb + ' shell wm size').read()
resolution = content.split()[2]
return map(int, resolution.split('x'))
def wm_density() -> int:
"""Returns the screen density of the connected device"""
return int(os.popen(__adb + ' shell wm density').read().split()[2])
def start_arknights() -> None:
"""Start Arknights on the connected device"""
os.system(__adb + ' shell am start -n ' + __arknights)
def pull(device_path: str, computer_path: str) -> None:
"""Copy files from device to computer.
Copy the file in the specified path on the device to the specified path on the computer.
Args:
`device_path`: The path of the file to be copied.
`computer_path`: The path of to be copied to.
"""
os.system(__adb + ' pull ' + device_path + ' ' + computer_path)
def push(computer_path: str, device_path: str) -> None:
"""Copy files from computer to device.
Copy the file in the specified path on the computer to the specified path on the device.
Args:
`computer_path`: The path of the file to be copied.
`device_path`: The path of the file to be copied to.
"""
os.system(__adb + ' push ' + computer_path + ' ' + device_path)
def tap(x: int, y: int) -> None:
"""Tap the device with the specified coordinates.
Args:
`x`: The x coordinate of the tap point.
`y`: The y coordinate of the tap point.
"""
os.system(__adb + ' shell input tap ' + str(x) + ' ' + str(y))
def swipe(x1: int, y1: int, x2: int, y2: int) -> None:
"""Swipe the screen of the device connected with the specified coordinates.
Args:
`x1`: The start x coordinate of the swipe.
`y1`: The start y coordinate of the swipe.
`x2`: The end x coordinate of the swipe.
`y2`: The end y coordinate of the swipe.
"""
os.system(__adb + ' shell input swipe ' + str(x1) +
' ' + str(y1) + ' ' + str(x2) + ' ' + str(y2))
def home() -> None:
"""Push the home botton of the connected device."""
os.system(__adb + ' shell input keyevent 3')
def __make_max() -> int and int:
"""Get the maximum size of the coordinates.
Returns:
The maximum size of the horizontal coordinate.
The maximum size of the vertical coordinate.
"""
global __max0035, __max0036
contents = os.popen(__adb + ' shell getevent -p').read()
abs0003 = contents.find('ABS')
__max0035 = contents.find('0035', abs0003)
__max0035 = contents.find('max', __max0035)
end0035 = contents.find(',', __max0035)
__max0036 = contents.find('0036', abs0003)
__max0036 = contents.find('max', __max0036)
end0036 = contents.find(',', __max0036)
__max0035, __max0036 = int(
contents[__max0035 + 4: end0035]), int(contents[__max0036 + 4: end0036])
return __max0035, __max0036
def get_max_x() -> int:
"""Get the maximum size of the x coordinate.
Returns:
A int indicating the maximum size of the x coordinate.
"""
if __max0035 == 0:
__make_max()
return __max0035
def get_max_y() -> int:
"""Get the maximum size of the y coordinate.
Returns:
A int indicating the maximum size of the y coordinate.
"""
if __max0036 == 0:
__make_max()
return __max0036
def __main() -> int:
for device in devices():
print(device)
# serial_number = input('请输入要连接的设备:')
connect()
horizontal, vertical = wm_size()
print('该设备分辨率为:', horizontal, 'x', vertical)
print('该设备ppi为为:', wm_density())
print('0035max:', get_max_x())
print('0036max:', get_max_y())
tap(random.randint(1125, 1443) * __max0035 / 1920,
random.randint(633, 779) * __max0036 / 1080)
if __name__ == '__main__':
__main()
| <filename>adb.py
import os
import random
__adb = os.path.abspath('platform-tools/adb.exe')
__arknights = 'com.hypergryph.arknights/com.u8.sdk.U8UnityContext'
__max0035, __max0036 = 0, 0
def devices() -> list[str]:
"""Returns a list of attached devices' serial numbers."""
connected = os.popen(__adb + ' devices').read()
return connected.splitlines()[1:-1]
def connect(method: str = '-e', serial_number: str = '') -> None:
"""Connect to a device.
Args:
* `method`: The method to connect.
Avaliable method:
* `-d`: Connect to the only device attached by USB.
* `-e`: Connect to the only emulator.
* `-s`: Connect to a specified device.
* `serial_number`: The serial number of device to be connected.
"""
os.system(__adb + ' ' + method + ' ' + serial_number)
def wm_size() -> int and int:
"""Returns the size of the screen resolution of the connected device.
Returns:
* The horizontal resolution of the screen.
* The vertical resolution of the screen.
"""
content = os.popen(__adb + ' shell wm size').read()
resolution = content.split()[2]
return map(int, resolution.split('x'))
def wm_density() -> int:
"""Returns the screen density of the connected device"""
return int(os.popen(__adb + ' shell wm density').read().split()[2])
def start_arknights() -> None:
"""Start Arknights on the connected device"""
os.system(__adb + ' shell am start -n ' + __arknights)
def pull(device_path: str, computer_path: str) -> None:
"""Copy files from device to computer.
Copy the file in the specified path on the device to the specified path on the computer.
Args:
`device_path`: The path of the file to be copied.
`computer_path`: The path of to be copied to.
"""
os.system(__adb + ' pull ' + device_path + ' ' + computer_path)
def push(computer_path: str, device_path: str) -> None:
"""Copy files from computer to device.
Copy the file in the specified path on the computer to the specified path on the device.
Args:
`computer_path`: The path of the file to be copied.
`device_path`: The path of the file to be copied to.
"""
os.system(__adb + ' push ' + computer_path + ' ' + device_path)
def tap(x: int, y: int) -> None:
"""Tap the device with the specified coordinates.
Args:
`x`: The x coordinate of the tap point.
`y`: The y coordinate of the tap point.
"""
os.system(__adb + ' shell input tap ' + str(x) + ' ' + str(y))
def swipe(x1: int, y1: int, x2: int, y2: int) -> None:
"""Swipe the screen of the device connected with the specified coordinates.
Args:
`x1`: The start x coordinate of the swipe.
`y1`: The start y coordinate of the swipe.
`x2`: The end x coordinate of the swipe.
`y2`: The end y coordinate of the swipe.
"""
os.system(__adb + ' shell input swipe ' + str(x1) +
' ' + str(y1) + ' ' + str(x2) + ' ' + str(y2))
def home() -> None:
"""Push the home botton of the connected device."""
os.system(__adb + ' shell input keyevent 3')
def __make_max() -> int and int:
"""Get the maximum size of the coordinates.
Returns:
The maximum size of the horizontal coordinate.
The maximum size of the vertical coordinate.
"""
global __max0035, __max0036
contents = os.popen(__adb + ' shell getevent -p').read()
abs0003 = contents.find('ABS')
__max0035 = contents.find('0035', abs0003)
__max0035 = contents.find('max', __max0035)
end0035 = contents.find(',', __max0035)
__max0036 = contents.find('0036', abs0003)
__max0036 = contents.find('max', __max0036)
end0036 = contents.find(',', __max0036)
__max0035, __max0036 = int(
contents[__max0035 + 4: end0035]), int(contents[__max0036 + 4: end0036])
return __max0035, __max0036
def get_max_x() -> int:
"""Get the maximum size of the x coordinate.
Returns:
A int indicating the maximum size of the x coordinate.
"""
if __max0035 == 0:
__make_max()
return __max0035
def get_max_y() -> int:
"""Get the maximum size of the y coordinate.
Returns:
A int indicating the maximum size of the y coordinate.
"""
if __max0036 == 0:
__make_max()
return __max0036
def __main() -> int:
for device in devices():
print(device)
# serial_number = input('请输入要连接的设备:')
connect()
horizontal, vertical = wm_size()
print('该设备分辨率为:', horizontal, 'x', vertical)
print('该设备ppi为为:', wm_density())
print('0035max:', get_max_x())
print('0036max:', get_max_y())
tap(random.randint(1125, 1443) * __max0035 / 1920,
random.randint(633, 779) * __max0036 / 1080)
if __name__ == '__main__':
__main()
| en | 0.744448 | Returns a list of attached devices' serial numbers. Connect to a device. Args: * `method`: The method to connect. Avaliable method: * `-d`: Connect to the only device attached by USB. * `-e`: Connect to the only emulator. * `-s`: Connect to a specified device. * `serial_number`: The serial number of device to be connected. Returns the size of the screen resolution of the connected device. Returns: * The horizontal resolution of the screen. * The vertical resolution of the screen. Returns the screen density of the connected device Start Arknights on the connected device Copy files from device to computer. Copy the file in the specified path on the device to the specified path on the computer. Args: `device_path`: The path of the file to be copied. `computer_path`: The path of to be copied to. Copy files from computer to device. Copy the file in the specified path on the computer to the specified path on the device. Args: `computer_path`: The path of the file to be copied. `device_path`: The path of the file to be copied to. Tap the device with the specified coordinates. Args: `x`: The x coordinate of the tap point. `y`: The y coordinate of the tap point. Swipe the screen of the device connected with the specified coordinates. Args: `x1`: The start x coordinate of the swipe. `y1`: The start y coordinate of the swipe. `x2`: The end x coordinate of the swipe. `y2`: The end y coordinate of the swipe. Push the home botton of the connected device. Get the maximum size of the coordinates. Returns: The maximum size of the horizontal coordinate. The maximum size of the vertical coordinate. Get the maximum size of the x coordinate. Returns: A int indicating the maximum size of the x coordinate. Get the maximum size of the y coordinate. Returns: A int indicating the maximum size of the y coordinate. # serial_number = input('请输入要连接的设备:') | 2.95377 | 3 |
Python/problem0222.py | 1050669722/LeetCode-Answers | 0 | 6623336 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def countNodes(self, root: TreeNode) -> int:
def preorderTraversal(root):
if not root:
return []
ans = []
ans.append(root.val)
ans += preorderTraversal(root.left)
ans += preorderTraversal(root.right)
return ans
return len(preorderTraversal(root))
| # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def countNodes(self, root: TreeNode) -> int:
def preorderTraversal(root):
if not root:
return []
ans = []
ans.append(root.val)
ans += preorderTraversal(root.left)
ans += preorderTraversal(root.right)
return ans
return len(preorderTraversal(root))
| en | 0.60307 | # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None | 3.723857 | 4 |
convertcrypt/blueprints/restapi/__init__.py | alexandermarquesm/ConvertCrypt | 0 | 6623337 | from flask import Blueprint
from flask_restful import Api
from .resources import PricePairResource, ValidTokens
bp = Blueprint('restapi', __name__, url_prefix="/api/v1")
api = Api(bp)
def init_app(app):
api.add_resource(PricePairResource, '/<string:pair>/<int:amount>')
api.add_resource(ValidTokens, '/token/<string:token>')
app.register_blueprint(bp) | from flask import Blueprint
from flask_restful import Api
from .resources import PricePairResource, ValidTokens
bp = Blueprint('restapi', __name__, url_prefix="/api/v1")
api = Api(bp)
def init_app(app):
api.add_resource(PricePairResource, '/<string:pair>/<int:amount>')
api.add_resource(ValidTokens, '/token/<string:token>')
app.register_blueprint(bp) | none | 1 | 2.50955 | 3 | |
deprecated_code/workflows/mpi/test_op.py | ska-telescope/algorithm-reference-library | 22 | 6623338 | #!/usr/bin/env python
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
def fn_sum(buffer_a, buffer_b, t):
tc = MPI._typecode(t) # map MPI datatype -> Python typecode
array_a = np.frombuffer(buffer_a, dtype=tc)
array_b = np.frombuffer(buffer_b, dtype=tc)
array_b += array_a
op_sum = MPI.Op.Create(fn_sum, commute=True)
data = np.empty(5, dtype='i')
data.fill(comm.rank)
result = np.empty_like(data)
#comm.Allreduce(data, result, op=op_sum)
result=comm.reduce(data, op=op_sum)
print(result)
assert np.allclose(result, sum(range(comm.size)))
| #!/usr/bin/env python
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
def fn_sum(buffer_a, buffer_b, t):
tc = MPI._typecode(t) # map MPI datatype -> Python typecode
array_a = np.frombuffer(buffer_a, dtype=tc)
array_b = np.frombuffer(buffer_b, dtype=tc)
array_b += array_a
op_sum = MPI.Op.Create(fn_sum, commute=True)
data = np.empty(5, dtype='i')
data.fill(comm.rank)
result = np.empty_like(data)
#comm.Allreduce(data, result, op=op_sum)
result=comm.reduce(data, op=op_sum)
print(result)
assert np.allclose(result, sum(range(comm.size)))
| en | 0.08775 | #!/usr/bin/env python # map MPI datatype -> Python typecode #comm.Allreduce(data, result, op=op_sum) | 2.442036 | 2 |
QssTools.py | x568059888/Gomoku | 0 | 6623339 | <filename>QssTools.py<gh_stars>0
def SetQss(file_path, obj):
with open(file_path, 'r') as f:
obj.setStyleSheet(f.read())
| <filename>QssTools.py<gh_stars>0
def SetQss(file_path, obj):
with open(file_path, 'r') as f:
obj.setStyleSheet(f.read())
| none | 1 | 1.9872 | 2 | |
python_basics/datatypes/escape_sequence.py | danielkpodo/python-zero-to-mastery | 0 | 6623340 | # escaping double strings
greeting = "Hello what\"s is your name"
print(greeting)
# using newline
ask = "How do you see yourself in ten years \n Mr narh what\"s your response"
print(ask)
# using tabs as a way to format strings
question = "\t Who is your shepherd"
print(question)
| # escaping double strings
greeting = "Hello what\"s is your name"
print(greeting)
# using newline
ask = "How do you see yourself in ten years \n Mr narh what\"s your response"
print(ask)
# using tabs as a way to format strings
question = "\t Who is your shepherd"
print(question)
| en | 0.635422 | # escaping double strings # using newline # using tabs as a way to format strings | 3.744435 | 4 |
agape/load.py | harryscholes/agape | 0 | 6623341 | """Classes and functions to load data.
"""
import os
import pandas as pd
from .base import Base
__all__ = ["Genes", "Biogrid", "STRING"]
data = os.environ["AGAPEDATA"]
class Genes(Base):
"""Load S. pombe gene IDs.
"""
def __init__(self):
super().__init__()
df = pd.read_csv(os.path.join(data,
"schizosaccharomyces_pombe.genome.gff3"),
skiprows=6,
header=None,
sep="\t")
self.df = df.copy()
df = df[df[2] == "gene"]
_, df[8] = df[8].str.split("ID=gene:").str
df[8], _ = df[8].str.split(";Name=").str
self.genes = list(df[8].unique())
def viability(self, phenotype=None):
"""Get genes annotated with a viability phenotype.
# Arguments
phenotype: str (optional), {viable, inviable, condition-dependent}
"""
if not hasattr(self, "viability_df"):
df = pd.read_csv(os.path.join(data, "FYPOviability.tsv"), sep="\t",
header=None, names=["GeneID", "Phenotype"])
self.viability_df = df
if phenotype is None:
return self.viability_df
phenotypes = self.viability_df.Phenotype.unique()
if phenotype not in phenotypes:
raise KeyError(f"`phenotype` must be one of:", phenotypes)
df = self.viability_df
df = df[df.Phenotype == phenotype]
return list(df.GeneID)
class Biogrid(Base):
"""Load S. pombe BioGRID database.
"""
def __init__(self):
super().__init__()
f = "BIOGRID-ORGANISM-Schizosaccharomyces_pombe_972h-3.4.164.tab2.txt"
df = pd.read_csv(os.path.join(data, f), sep="\t")
df = df[(df["Organism Interactor A"] == 284812) &
(df["Organism Interactor B"] == 284812)]
self.df = df
def __call__(self, interaction_type=None, graph=False):
"""Call the class instance to filter the loaded interactions.
# Arguments
interaction_type: str, {physical, genetic}
graph: bool, if True return edge list
# Returns
DataFrame: BioGRID database
# Raises
KeyError: if `interaction_type` is not {physical, genetic}
"""
df = self.df
if interaction_type is not None:
interaction_types = df["Experimental System Type"].unique()
if interaction_type not in interaction_types:
raise KeyError(("`interaction_type` must be one of:",
f"{', '.join(interaction_types)}"))
df = df[df["Experimental System Type"] == interaction_type]
if graph:
df = df[['Systematic Name Interactor A',
'Systematic Name Interactor B']]
df.columns = ["source", "target"]
return df
class STRING(Base):
"""Load S. pombe STRING database.
"""
def __init__(self):
super().__init__()
f = "4896.protein.links.detailed.v10.5.txt"
self.df = pd.read_csv(os.path.join(data, f), sep=" ")
self.interaction_types = (
'neighborhood', 'fusion', 'cooccurence', 'coexpression',
'experimental', 'database')
def get(self, interaction_type=None):
"""Call the class instance to filter the loaded interactions.
# Arguments
interaction_type: str, {neighborhood, fusion, cooccurence,
coexpression, experimental, database}
# Returns
DataFrame: STRING database
# Raises
KeyError: if `interaction_type` is not in {neighborhood, fusion,
cooccurence, coexpression, experimental, database}
"""
if all((interaction_type is not None,
interaction_type not in self.interaction_types)):
raise KeyError(
f"`interaction_type` must be one of: {self.interaction_types}")
return self.df[["protein1", "protein2", interaction_type]]
| """Classes and functions to load data.
"""
import os
import pandas as pd
from .base import Base
__all__ = ["Genes", "Biogrid", "STRING"]
data = os.environ["AGAPEDATA"]
class Genes(Base):
"""Load S. pombe gene IDs.
"""
def __init__(self):
super().__init__()
df = pd.read_csv(os.path.join(data,
"schizosaccharomyces_pombe.genome.gff3"),
skiprows=6,
header=None,
sep="\t")
self.df = df.copy()
df = df[df[2] == "gene"]
_, df[8] = df[8].str.split("ID=gene:").str
df[8], _ = df[8].str.split(";Name=").str
self.genes = list(df[8].unique())
def viability(self, phenotype=None):
"""Get genes annotated with a viability phenotype.
# Arguments
phenotype: str (optional), {viable, inviable, condition-dependent}
"""
if not hasattr(self, "viability_df"):
df = pd.read_csv(os.path.join(data, "FYPOviability.tsv"), sep="\t",
header=None, names=["GeneID", "Phenotype"])
self.viability_df = df
if phenotype is None:
return self.viability_df
phenotypes = self.viability_df.Phenotype.unique()
if phenotype not in phenotypes:
raise KeyError(f"`phenotype` must be one of:", phenotypes)
df = self.viability_df
df = df[df.Phenotype == phenotype]
return list(df.GeneID)
class Biogrid(Base):
"""Load S. pombe BioGRID database.
"""
def __init__(self):
super().__init__()
f = "BIOGRID-ORGANISM-Schizosaccharomyces_pombe_972h-3.4.164.tab2.txt"
df = pd.read_csv(os.path.join(data, f), sep="\t")
df = df[(df["Organism Interactor A"] == 284812) &
(df["Organism Interactor B"] == 284812)]
self.df = df
def __call__(self, interaction_type=None, graph=False):
"""Call the class instance to filter the loaded interactions.
# Arguments
interaction_type: str, {physical, genetic}
graph: bool, if True return edge list
# Returns
DataFrame: BioGRID database
# Raises
KeyError: if `interaction_type` is not {physical, genetic}
"""
df = self.df
if interaction_type is not None:
interaction_types = df["Experimental System Type"].unique()
if interaction_type not in interaction_types:
raise KeyError(("`interaction_type` must be one of:",
f"{', '.join(interaction_types)}"))
df = df[df["Experimental System Type"] == interaction_type]
if graph:
df = df[['Systematic Name Interactor A',
'Systematic Name Interactor B']]
df.columns = ["source", "target"]
return df
class STRING(Base):
"""Load S. pombe STRING database.
"""
def __init__(self):
super().__init__()
f = "4896.protein.links.detailed.v10.5.txt"
self.df = pd.read_csv(os.path.join(data, f), sep=" ")
self.interaction_types = (
'neighborhood', 'fusion', 'cooccurence', 'coexpression',
'experimental', 'database')
def get(self, interaction_type=None):
"""Call the class instance to filter the loaded interactions.
# Arguments
interaction_type: str, {neighborhood, fusion, cooccurence,
coexpression, experimental, database}
# Returns
DataFrame: STRING database
# Raises
KeyError: if `interaction_type` is not in {neighborhood, fusion,
cooccurence, coexpression, experimental, database}
"""
if all((interaction_type is not None,
interaction_type not in self.interaction_types)):
raise KeyError(
f"`interaction_type` must be one of: {self.interaction_types}")
return self.df[["protein1", "protein2", interaction_type]]
| en | 0.592225 | Classes and functions to load data. Load S. pombe gene IDs. Get genes annotated with a viability phenotype. # Arguments phenotype: str (optional), {viable, inviable, condition-dependent} Load S. pombe BioGRID database. Call the class instance to filter the loaded interactions. # Arguments interaction_type: str, {physical, genetic} graph: bool, if True return edge list # Returns DataFrame: BioGRID database # Raises KeyError: if `interaction_type` is not {physical, genetic} Load S. pombe STRING database. Call the class instance to filter the loaded interactions. # Arguments interaction_type: str, {neighborhood, fusion, cooccurence, coexpression, experimental, database} # Returns DataFrame: STRING database # Raises KeyError: if `interaction_type` is not in {neighborhood, fusion, cooccurence, coexpression, experimental, database} | 3.20889 | 3 |
tests/layers.py | YusukeSuzuki/castanea2 | 0 | 6623342 | import unittest
import tensorflow as tf
import castanea2.layers as layers
class TestLayers(unittest.TestCase):
def test_conv2d(self):
x = tf.placeholder(tf.float32, shape=(32, 128, 128, 3))
y = layers.conv2d(x, 3, 32)
def test_equalized_conv2d(self):
x = tf.placeholder(tf.float32, shape=(32, 128, 128, 3))
y = layers.equalized_conv2d(x, 3, 32)
if __name__ == '__main__':
unittest.main()
| import unittest
import tensorflow as tf
import castanea2.layers as layers
class TestLayers(unittest.TestCase):
def test_conv2d(self):
x = tf.placeholder(tf.float32, shape=(32, 128, 128, 3))
y = layers.conv2d(x, 3, 32)
def test_equalized_conv2d(self):
x = tf.placeholder(tf.float32, shape=(32, 128, 128, 3))
y = layers.equalized_conv2d(x, 3, 32)
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.747015 | 3 | |
telegram/plugins/msg.py | AntonioND/led-bot | 0 | 6623343 | #!/usr/bin/env python3.7
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2020, <NAME> <<EMAIL>>
import telepot
import user
async def execute(bot, msg, chat_id, args, username):
dest_name, msg = args.split(" ", 1)
dest_id = user.get_id(dest_name)
await bot.sendMessage(dest_id, username + ': ' + msg)
| #!/usr/bin/env python3.7
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2020, <NAME> <<EMAIL>>
import telepot
import user
async def execute(bot, msg, chat_id, args, username):
dest_name, msg = args.split(" ", 1)
dest_id = user.get_id(dest_name)
await bot.sendMessage(dest_id, username + ': ' + msg)
| en | 0.184149 | #!/usr/bin/env python3.7 # SPDX-License-Identifier: MIT # # Copyright (c) 2020, <NAME> <<EMAIL>> | 2.095721 | 2 |
yowsup/layers/auth/protocolentities/test_failure.py | zulu494/Anoa-Bot- | 1 | 6623344 | <reponame>zulu494/Anoa-Bot-<filename>yowsup/layers/auth/protocolentities/test_failure.py
from yowsup.layers.auth.protocolentities.failure import FailureProtocolEntity
from yowsup.structs import ProtocolTreeNode
from yowsup.structs.protocolentity import ProtocolEntityTest
import unittest
class FailureProtocolEntityTest(ProtocolEntityTest, unittest.TestCase):
def setUp(self):
self.ProtocolEntity = FailureProtocolEntity
self.node = ProtocolTreeNode("failure", {"reason": "not-authorized"})
| from yowsup.layers.auth.protocolentities.failure import FailureProtocolEntity
from yowsup.structs import ProtocolTreeNode
from yowsup.structs.protocolentity import ProtocolEntityTest
import unittest
class FailureProtocolEntityTest(ProtocolEntityTest, unittest.TestCase):
def setUp(self):
self.ProtocolEntity = FailureProtocolEntity
self.node = ProtocolTreeNode("failure", {"reason": "not-authorized"}) | none | 1 | 2.256976 | 2 | |
tests/data/conftest.py | vasp-dev/py4vasp | 27 | 6623345 | <filename>tests/data/conftest.py
# Copyright © VASP Software GmbH,
# Licensed under the Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from contextlib import contextmanager
from IPython.core.formatters import DisplayFormatter
from unittest.mock import patch, MagicMock, PropertyMock
from pathlib import Path
import pytest
import py4vasp._util.version as version
import py4vasp.raw as raw
@pytest.fixture
def mock_file():
@contextmanager
def _mock_file(name, ref):
cm_init = patch.object(raw.File, "__init__", autospec=True, return_value=None)
cm_sut = patch.object(raw.File, name, new_callable=PropertyMock)
cm_close = patch.object(raw.File, "close", autospec=True)
with cm_init as init, cm_sut as sut, cm_close as close:
sut.return_value = {"default": ref}
yield {"init": init, "sut": sut, "close": close}
return _mock_file
@pytest.fixture
def check_read():
def _check_read(cls, mocks, ref, default_filename=None):
_check_read_from_open_file(cls, mocks, ref)
_check_read_from_default_file(cls, mocks, ref, default_filename)
_check_read_from_filename(cls, mocks, ref)
_check_read_from_path(cls, mocks, ref, default_filename)
def _check_read_from_open_file(cls, mocks, ref):
with raw.File() as file:
file._path = Path.cwd()
obj = _create_obj(cls, file, _assert_not_called, mocks)
_check_raw_data(obj, ref, _assert_only_sut, mocks)
def _check_read_from_default_file(cls, mocks, ref, default_filename):
obj = _create_obj(cls, None, _assert_not_called, mocks)
_check_raw_data(obj, ref, _assert_all_called, mocks)
assert _first_init_arg(mocks) == default_filename
def _check_read_from_filename(cls, mocks, ref):
filename = "user_selected_file"
obj = _create_obj(cls, filename, _assert_not_called, mocks)
_check_raw_data(obj, ref, _assert_all_called, mocks)
assert _first_init_arg(mocks) == filename
def _check_read_from_path(cls, mocks, ref, default_filename):
path = Path.cwd()
obj = _create_obj(cls, path, _assert_not_called, mocks)
_check_raw_data(obj, ref, _assert_all_called, mocks)
if default_filename is None:
assert _first_init_arg(mocks) == path
else:
assert _first_init_arg(mocks) == path / default_filename
def _create_obj(cls, file, assertion, mocks):
_reset_mocks(mocks)
obj = cls.from_file(file)
assertion(mocks)
return obj
def _check_raw_data(obj, ref, assertion, mocks):
_reset_mocks(mocks)
with obj._data_dict_from_context() as actual:
assert actual["default"] == ref
assertion(mocks)
def _assert_not_called(mocks):
mocks["init"].assert_not_called()
mocks["sut"].assert_not_called()
mocks["close"].assert_not_called()
def _assert_only_sut(mocks):
mocks["init"].assert_not_called()
mocks["sut"].assert_called_once()
mocks["close"].assert_not_called()
def _assert_all_called(mocks):
mocks["init"].assert_called_once()
mocks["sut"].assert_called_once()
mocks["close"].assert_called_once()
def _first_init_arg(mocks):
args, _ = mocks["init"].call_args
return args[1]
def _reset_mocks(mocks):
for mock in mocks.values():
mock.reset_mock()
return _check_read
@pytest.fixture
def check_descriptors():
def _check_descriptors(instance, descriptors):
classname = f"{instance.__module__}.{instance.__class__.__name__}"
for private_name, public_names in descriptors.items():
fullname = f"{classname}.{private_name}"
with patch(fullname, return_value=private_name):
for public_name in public_names:
assert private_name == getattr(instance, public_name)()
return _check_descriptors
@pytest.fixture
def outdated_version():
return raw.RawVersion(version.minimal_vasp_version.major - 1)
@pytest.fixture
def format_():
return DisplayFormatter().format
| <filename>tests/data/conftest.py
# Copyright © VASP Software GmbH,
# Licensed under the Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from contextlib import contextmanager
from IPython.core.formatters import DisplayFormatter
from unittest.mock import patch, MagicMock, PropertyMock
from pathlib import Path
import pytest
import py4vasp._util.version as version
import py4vasp.raw as raw
@pytest.fixture
def mock_file():
@contextmanager
def _mock_file(name, ref):
cm_init = patch.object(raw.File, "__init__", autospec=True, return_value=None)
cm_sut = patch.object(raw.File, name, new_callable=PropertyMock)
cm_close = patch.object(raw.File, "close", autospec=True)
with cm_init as init, cm_sut as sut, cm_close as close:
sut.return_value = {"default": ref}
yield {"init": init, "sut": sut, "close": close}
return _mock_file
@pytest.fixture
def check_read():
def _check_read(cls, mocks, ref, default_filename=None):
_check_read_from_open_file(cls, mocks, ref)
_check_read_from_default_file(cls, mocks, ref, default_filename)
_check_read_from_filename(cls, mocks, ref)
_check_read_from_path(cls, mocks, ref, default_filename)
def _check_read_from_open_file(cls, mocks, ref):
with raw.File() as file:
file._path = Path.cwd()
obj = _create_obj(cls, file, _assert_not_called, mocks)
_check_raw_data(obj, ref, _assert_only_sut, mocks)
def _check_read_from_default_file(cls, mocks, ref, default_filename):
obj = _create_obj(cls, None, _assert_not_called, mocks)
_check_raw_data(obj, ref, _assert_all_called, mocks)
assert _first_init_arg(mocks) == default_filename
def _check_read_from_filename(cls, mocks, ref):
filename = "user_selected_file"
obj = _create_obj(cls, filename, _assert_not_called, mocks)
_check_raw_data(obj, ref, _assert_all_called, mocks)
assert _first_init_arg(mocks) == filename
def _check_read_from_path(cls, mocks, ref, default_filename):
path = Path.cwd()
obj = _create_obj(cls, path, _assert_not_called, mocks)
_check_raw_data(obj, ref, _assert_all_called, mocks)
if default_filename is None:
assert _first_init_arg(mocks) == path
else:
assert _first_init_arg(mocks) == path / default_filename
def _create_obj(cls, file, assertion, mocks):
_reset_mocks(mocks)
obj = cls.from_file(file)
assertion(mocks)
return obj
def _check_raw_data(obj, ref, assertion, mocks):
_reset_mocks(mocks)
with obj._data_dict_from_context() as actual:
assert actual["default"] == ref
assertion(mocks)
def _assert_not_called(mocks):
mocks["init"].assert_not_called()
mocks["sut"].assert_not_called()
mocks["close"].assert_not_called()
def _assert_only_sut(mocks):
mocks["init"].assert_not_called()
mocks["sut"].assert_called_once()
mocks["close"].assert_not_called()
def _assert_all_called(mocks):
mocks["init"].assert_called_once()
mocks["sut"].assert_called_once()
mocks["close"].assert_called_once()
def _first_init_arg(mocks):
args, _ = mocks["init"].call_args
return args[1]
def _reset_mocks(mocks):
for mock in mocks.values():
mock.reset_mock()
return _check_read
@pytest.fixture
def check_descriptors():
def _check_descriptors(instance, descriptors):
classname = f"{instance.__module__}.{instance.__class__.__name__}"
for private_name, public_names in descriptors.items():
fullname = f"{classname}.{private_name}"
with patch(fullname, return_value=private_name):
for public_name in public_names:
assert private_name == getattr(instance, public_name)()
return _check_descriptors
@pytest.fixture
def outdated_version():
return raw.RawVersion(version.minimal_vasp_version.major - 1)
@pytest.fixture
def format_():
return DisplayFormatter().format
| en | 0.5538 | # Copyright © VASP Software GmbH, # Licensed under the Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0) | 2.082255 | 2 |
deprecated/grid_supply.py | mdbartos/RIPS | 1 | 6623346 | <reponame>mdbartos/RIPS
import pandas as pd
import numpy as np
import geopandas as gpd
from shapely import geometry
from scipy import spatial
#### SPECIFY SHAPEFILES
substations = '/home/akagi/Desktop/electricity_data/Substations.shp'
s = gpd.read_file(substations)
#STATIC
generation = '/home/akagi/Desktop/electricity_data/Generation.shp'
g_sta = gpd.read_file(generation)
# DYNAMIC
plant_860 = pd.read_excel('/home/akagi/Documents/EIA_form_data/eia8602012/PlantY2012.xlsx', header=1)
gen_860 = pd.read_excel('/home/akagi/Documents/EIA_form_data/eia8602012/GeneratorY2012.xlsx', sheetname='Operable', header=1)
plant_cap = pd.merge(plant_860, gen_860, on='Plant Code').groupby('Plant Code').sum()[['Summer Capacity (MW)', 'Winter Capacity (MW)', 'Nameplate Capacity (MW)']]
plant_chars = plant_860.set_index('Plant Code')[['Plant Name', 'Utility ID', 'NERC Region', 'Grid Voltage (kV)', 'Latitude', 'Longitude']]
g_dyn = pd.concat([plant_cap, plant_chars], axis=1).dropna(subset=['Longitude', 'Latitude'])
#### FIND NEAREST NEIGHBORS
tree = spatial.cKDTree(np.vstack(s.geometry.apply(lambda x: x.coords[0]).values))
node_query_sta = tree.query(np.vstack(g_sta.geometry.apply(lambda x: x.coords[0]).values))
node_query_dyn = tree.query(np.vstack(g_dyn[['Longitude', 'Latitude']].values))
sta_crosswalk = pd.DataFrame(np.column_stack([g_sta[['UNIQUE_ID', 'S_CAP_MW']].values, s.iloc[node_query_sta[1]]['UNIQUE_ID'].values.astype(int)]), columns=['GEN_ID', 'S_CAP_MW', 'SUB_ID'])
sta_crosswalk = sta_crosswalk[['GEN_ID', 'SUB_ID', 'S_CAP_MW']]
sta_crosswalk.to_csv('gen_to_sub_static.csv')
| import pandas as pd
import numpy as np
import geopandas as gpd
from shapely import geometry
from scipy import spatial
#### SPECIFY SHAPEFILES
substations = '/home/akagi/Desktop/electricity_data/Substations.shp'
s = gpd.read_file(substations)
#STATIC
generation = '/home/akagi/Desktop/electricity_data/Generation.shp'
g_sta = gpd.read_file(generation)
# DYNAMIC
plant_860 = pd.read_excel('/home/akagi/Documents/EIA_form_data/eia8602012/PlantY2012.xlsx', header=1)
gen_860 = pd.read_excel('/home/akagi/Documents/EIA_form_data/eia8602012/GeneratorY2012.xlsx', sheetname='Operable', header=1)
plant_cap = pd.merge(plant_860, gen_860, on='Plant Code').groupby('Plant Code').sum()[['Summer Capacity (MW)', 'Winter Capacity (MW)', 'Nameplate Capacity (MW)']]
plant_chars = plant_860.set_index('Plant Code')[['Plant Name', 'Utility ID', 'NERC Region', 'Grid Voltage (kV)', 'Latitude', 'Longitude']]
g_dyn = pd.concat([plant_cap, plant_chars], axis=1).dropna(subset=['Longitude', 'Latitude'])
#### FIND NEAREST NEIGHBORS
tree = spatial.cKDTree(np.vstack(s.geometry.apply(lambda x: x.coords[0]).values))
node_query_sta = tree.query(np.vstack(g_sta.geometry.apply(lambda x: x.coords[0]).values))
node_query_dyn = tree.query(np.vstack(g_dyn[['Longitude', 'Latitude']].values))
sta_crosswalk = pd.DataFrame(np.column_stack([g_sta[['UNIQUE_ID', 'S_CAP_MW']].values, s.iloc[node_query_sta[1]]['UNIQUE_ID'].values.astype(int)]), columns=['GEN_ID', 'S_CAP_MW', 'SUB_ID'])
sta_crosswalk = sta_crosswalk[['GEN_ID', 'SUB_ID', 'S_CAP_MW']]
sta_crosswalk.to_csv('gen_to_sub_static.csv') | en | 0.149557 | #### SPECIFY SHAPEFILES #STATIC # DYNAMIC #### FIND NEAREST NEIGHBORS | 2.481088 | 2 |
docxxslt/package.py | backbohne/docx-xslt | 6 | 6623347 | <reponame>backbohne/docx-xslt<gh_stars>1-10
from zipfile import ZipFile
class Package(object):
"""ZipFile wrapper to append/update/remove files from zip"""
def __init__(self, filename=None):
self.filename = filename
self.content = {}
def read(self, filename=None):
filename = filename or self.filename
with ZipFile(filename, 'r') as zip:
for filename in zip.namelist():
self.content[filename] = zip.read(filename)
def write(self, filename=None):
filename = filename or self.filename
with ZipFile(filename, 'w') as zip:
for filename, content in self.content.items():
zip.writestr(filename, content)
def get(self, filename):
if self.has(filename):
return self.content[filename]
else:
raise IndexError("%s does not exists" % filename)
def has(self, filename):
return filename in self.content
def update(self, filename, content):
if self.has(filename):
self.content[filename] = content
else:
raise IndexError("%s does not exists" % filename)
def append(self, filename, content):
if self.has(filename):
raise IndexError("%s does already exists" % filename)
else:
self.content[filename] = content
def remove(self, filename):
if self.has(filename):
del self.content[filename]
else:
raise IndexError("%s does not exists" % filename)
| from zipfile import ZipFile
class Package(object):
"""ZipFile wrapper to append/update/remove files from zip"""
def __init__(self, filename=None):
self.filename = filename
self.content = {}
def read(self, filename=None):
filename = filename or self.filename
with ZipFile(filename, 'r') as zip:
for filename in zip.namelist():
self.content[filename] = zip.read(filename)
def write(self, filename=None):
filename = filename or self.filename
with ZipFile(filename, 'w') as zip:
for filename, content in self.content.items():
zip.writestr(filename, content)
def get(self, filename):
if self.has(filename):
return self.content[filename]
else:
raise IndexError("%s does not exists" % filename)
def has(self, filename):
return filename in self.content
def update(self, filename, content):
if self.has(filename):
self.content[filename] = content
else:
raise IndexError("%s does not exists" % filename)
def append(self, filename, content):
if self.has(filename):
raise IndexError("%s does already exists" % filename)
else:
self.content[filename] = content
def remove(self, filename):
if self.has(filename):
del self.content[filename]
else:
raise IndexError("%s does not exists" % filename) | en | 0.544446 | ZipFile wrapper to append/update/remove files from zip | 3.775686 | 4 |
caption_generation/part6.py | alexdseo/Falsified-Scientific-Literature-Generation | 0 | 6623348 | <filename>caption_generation/part6.py<gh_stars>0
import os
for i in range(0,500):
command='curl -X POST "localhost:8764/inception/v3/caption/image" --data-binary @generated_images/samples_16_' + str(i) + '.png >> out.out'
os.system(command) | <filename>caption_generation/part6.py<gh_stars>0
import os
for i in range(0,500):
command='curl -X POST "localhost:8764/inception/v3/caption/image" --data-binary @generated_images/samples_16_' + str(i) + '.png >> out.out'
os.system(command) | none | 1 | 2.124006 | 2 | |
src/modules/perception/node_camera.py | joeyzhu00/FusionAD | 33 | 6623349 | #!/usr/bin/env python
"""
Publishes raw video from OpenCV VideoCapture.
Subscribes to:
None
Publishes to:
/raw_USBcamera_images
"""
import cv2
import rospy
import roslib
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
def main():
#Initialize node
rospy.init_node('camera')
#Create bridge object
bridge = CvBridge()
#Create publisher and to publish raw image data
pub = rospy.Publisher("/raw_USBcamera_images", Image, queue_size=1000)
rate = rospy.Rate(30)
#initialize camera
cap = cv2.VideoCapture(0)
print "Camera Initialized"
while not rospy.is_shutdown():
frameReadCorrectly, frame = cap.read()
#If frame is empty, don't send anything (Stuff crashes)
if frameReadCorrectly:
pub.publish(bridge.cv2_to_imgmsg(frame, "bgr8"))
rate.sleep()
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| #!/usr/bin/env python
"""
Publishes raw video from OpenCV VideoCapture.
Subscribes to:
None
Publishes to:
/raw_USBcamera_images
"""
import cv2
import rospy
import roslib
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
def main():
#Initialize node
rospy.init_node('camera')
#Create bridge object
bridge = CvBridge()
#Create publisher and to publish raw image data
pub = rospy.Publisher("/raw_USBcamera_images", Image, queue_size=1000)
rate = rospy.Rate(30)
#initialize camera
cap = cv2.VideoCapture(0)
print "Camera Initialized"
while not rospy.is_shutdown():
frameReadCorrectly, frame = cap.read()
#If frame is empty, don't send anything (Stuff crashes)
if frameReadCorrectly:
pub.publish(bridge.cv2_to_imgmsg(frame, "bgr8"))
rate.sleep()
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| en | 0.846919 | #!/usr/bin/env python Publishes raw video from OpenCV VideoCapture. Subscribes to: None Publishes to: /raw_USBcamera_images #Initialize node #Create bridge object #Create publisher and to publish raw image data #initialize camera #If frame is empty, don't send anything (Stuff crashes) | 2.892904 | 3 |
Bip44Coins.SOLANA.py | spletnik/bip_utils | 0 | 6623350 | <filename>Bip44Coins.SOLANA.py<gh_stars>0
#!/usr/bin/python
import sys
import binascii
import base64
from bip_utils import (
Bip39WordsNum, Bip39MnemonicGenerator, Bip39SeedGenerator, Bip44Changes, Bip44Coins, Bip44
)
num_args = len(sys.argv)
args = str(sys.argv)
# print('Number of arguments:', num_args, 'arguments.' )
# print('Argument List:', args )
if num_args < 4 :
#print('Parameters are user_id, index')
exit()
account_id = int(sys.argv[1])
index = int(sys.argv[2])
base64_message = sys.argv[3]
# print('Account ID: ', account_id)
# print('Account Index: ', index)
# Generate random mnemonic
#mnemonic = Bip39MnemonicGenerator().FromWordsNumber(Bip39WordsNum.WORDS_NUM_24)
#print("Mnemonic string: %s" % mnemonic)
#mnemonic = VAULT.get_solana_seed( KEY )
#message = "Python is fun"
# message = "gallery hospital reflect tray strike pyramid scrap two proud cute trend sunny bulk almost surface trap license drastic fiber tumble rare purity dentist dice"
# message_bytes = message.encode('ascii')
# base64_bytes = base64.b64encode(message_bytes)
# base64_message = base64_bytes.decode('ascii')
# print(base64_message)
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
#print(message)
mnemonic = message
# Generate seed from mnemonic
seed_bytes = Bip39SeedGenerator(mnemonic).Generate()
# Construct from seed
bip44_mst_ctx = Bip44.FromSeed(seed_bytes, Bip44Coins.SOLANA)
# Print master key
# print("Master key (bytes): %s" % bip44_mst_ctx.PrivateKey().Raw().ToHex())
# print("Master key (extended): %s" % bip44_mst_ctx.PrivateKey().ToExtended())
# print("Master key (WIF): %s" % bip44_mst_ctx.PrivateKey().ToWif())
bip44_acc_ctx = bip44_mst_ctx.Purpose().Coin().Account( account_id )
bip44_chg_ctx = bip44_acc_ctx.Change(Bip44Changes.CHAIN_EXT)
bip44_addr_ctx = bip44_chg_ctx.AddressIndex( index )
#print("%d. Address: %s %s" % (account_id, bip44_addr_ctx.PublicKey().ToAddress(), bip44_addr_ctx.PrivateKey().ToExtended()) )
priv_key_bytes = bip44_addr_ctx.PrivateKey().Raw().ToBytes()
pub_key_bytes = bip44_addr_ctx.PublicKey().RawCompressed().ToBytes()[1:]
key_pair = priv_key_bytes + pub_key_bytes
print("{\"address\": \"%s\", \"private_key\": \"%s\"}" % (bip44_addr_ctx.PublicKey().ToAddress(), key_pair.hex() ) )
| <filename>Bip44Coins.SOLANA.py<gh_stars>0
#!/usr/bin/python
import sys
import binascii
import base64
from bip_utils import (
Bip39WordsNum, Bip39MnemonicGenerator, Bip39SeedGenerator, Bip44Changes, Bip44Coins, Bip44
)
num_args = len(sys.argv)
args = str(sys.argv)
# print('Number of arguments:', num_args, 'arguments.' )
# print('Argument List:', args )
if num_args < 4 :
#print('Parameters are user_id, index')
exit()
account_id = int(sys.argv[1])
index = int(sys.argv[2])
base64_message = sys.argv[3]
# print('Account ID: ', account_id)
# print('Account Index: ', index)
# Generate random mnemonic
#mnemonic = Bip39MnemonicGenerator().FromWordsNumber(Bip39WordsNum.WORDS_NUM_24)
#print("Mnemonic string: %s" % mnemonic)
#mnemonic = VAULT.get_solana_seed( KEY )
#message = "Python is fun"
# message = "gallery hospital reflect tray strike pyramid scrap two proud cute trend sunny bulk almost surface trap license drastic fiber tumble rare purity dentist dice"
# message_bytes = message.encode('ascii')
# base64_bytes = base64.b64encode(message_bytes)
# base64_message = base64_bytes.decode('ascii')
# print(base64_message)
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
#print(message)
mnemonic = message
# Generate seed from mnemonic
seed_bytes = Bip39SeedGenerator(mnemonic).Generate()
# Construct from seed
bip44_mst_ctx = Bip44.FromSeed(seed_bytes, Bip44Coins.SOLANA)
# Print master key
# print("Master key (bytes): %s" % bip44_mst_ctx.PrivateKey().Raw().ToHex())
# print("Master key (extended): %s" % bip44_mst_ctx.PrivateKey().ToExtended())
# print("Master key (WIF): %s" % bip44_mst_ctx.PrivateKey().ToWif())
bip44_acc_ctx = bip44_mst_ctx.Purpose().Coin().Account( account_id )
bip44_chg_ctx = bip44_acc_ctx.Change(Bip44Changes.CHAIN_EXT)
bip44_addr_ctx = bip44_chg_ctx.AddressIndex( index )
#print("%d. Address: %s %s" % (account_id, bip44_addr_ctx.PublicKey().ToAddress(), bip44_addr_ctx.PrivateKey().ToExtended()) )
priv_key_bytes = bip44_addr_ctx.PrivateKey().Raw().ToBytes()
pub_key_bytes = bip44_addr_ctx.PublicKey().RawCompressed().ToBytes()[1:]
key_pair = priv_key_bytes + pub_key_bytes
print("{\"address\": \"%s\", \"private_key\": \"%s\"}" % (bip44_addr_ctx.PublicKey().ToAddress(), key_pair.hex() ) )
| en | 0.374315 | #!/usr/bin/python # print('Number of arguments:', num_args, 'arguments.' ) # print('Argument List:', args ) #print('Parameters are user_id, index') # print('Account ID: ', account_id) # print('Account Index: ', index) # Generate random mnemonic #mnemonic = Bip39MnemonicGenerator().FromWordsNumber(Bip39WordsNum.WORDS_NUM_24) #print("Mnemonic string: %s" % mnemonic) #mnemonic = VAULT.get_solana_seed( KEY ) #message = "Python is fun" # message = "gallery hospital reflect tray strike pyramid scrap two proud cute trend sunny bulk almost surface trap license drastic fiber tumble rare purity dentist dice" # message_bytes = message.encode('ascii') # base64_bytes = base64.b64encode(message_bytes) # base64_message = base64_bytes.decode('ascii') # print(base64_message) #print(message) # Generate seed from mnemonic # Construct from seed # Print master key # print("Master key (bytes): %s" % bip44_mst_ctx.PrivateKey().Raw().ToHex()) # print("Master key (extended): %s" % bip44_mst_ctx.PrivateKey().ToExtended()) # print("Master key (WIF): %s" % bip44_mst_ctx.PrivateKey().ToWif()) #print("%d. Address: %s %s" % (account_id, bip44_addr_ctx.PublicKey().ToAddress(), bip44_addr_ctx.PrivateKey().ToExtended()) ) | 2.256339 | 2 |