content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from typing import Any, Optional
import torch
from InnerEye.ML.utils.image_util import get_class_weights
from InnerEye.ML.utils.supervised_criterion import SupervisedLearningCriterion
class SoftDiceLoss(SupervisedLearningCriterion):
"""
Implementation of Soft-Dice Loss.
Reference: Milletari, F., Navab, N., & Ahmadi, S. (2016). V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation. In International Conference on 3D Vision (3DV).
"""
def __init__(self,
eps: float = 1e-10,
apply_softmax: bool = True,
class_weight_power: Optional[float] = None):
"""
:param eps: A small constant to smooth Sorensen-Dice Loss function. Additionally, it avoids division by zero.
:param apply_softmax: If true, the input to the loss function will be first fed through a Softmax operation.
If false, the input to the loss function will be used as is.
:param class_weight_power: power to raise 1/C to, where C is the number of voxels in each class. Should be
non-negative to help increase accuracy on small structures.
"""
super().__init__()
#: Small value to avoid division by zero errors.
self.eps = eps
self.apply_softmax = apply_softmax
self.class_weight_power = class_weight_power
def forward_minibatch(self, output: torch.Tensor, target: torch.Tensor, **kwargs: Any) -> torch.Tensor:
"""
Computes the forward pass of soft-dice loss. It assumes the output and target have Batch x Classes x ...
dimensions, with the last dimensions being an arbitrary number of spatial dimensions.
:param output: The output of the network.
:param target: The target of the network.
:return: The soft-dice loss.
:raises ValueError: If the shape of the tensors is incorrect.
:raises TypeError: If output or target are not torch.tensors.
"""
# Check Types
if not torch.is_tensor(output) or not torch.is_tensor(target):
raise TypeError("Output and target must be torch.Tensors (type(output): {}, type(target): {})".
format(type(output), type(target)))
# Check dimensions
if len(output.shape) < 3:
raise ValueError("The shape of the output and target must be at least 3, Batch x Class x ... "
"(output.shape: {})".format(output.shape))
if output.shape != target.shape:
raise ValueError("The output and target must have the same shape (output.shape: {}, target.shape: {})".
format(output.shape, target.shape))
if self.apply_softmax:
output = torch.nn.functional.softmax(output, dim=1)
# Get the spatial dimensions; we'll sum numerator and denominator over these for efficiency.
axes = list(range(2, len(output.shape)))
# Eps is added to all products, avoiding division errors and problems
# when a class does not exist in the current patch
eps = torch.tensor([self.eps])
if output.is_cuda:
eps = eps.cuda(device=output.device)
intersection = torch.sum(output * target + eps, axes)
if self.class_weight_power is not None and self.class_weight_power != 0.0:
# Multiply target by the class weight.
class_weights = get_class_weights(target, self.class_weight_power)
# noinspection PyTypeChecker
intersection = torch.einsum("ij,j->ij", intersection, class_weights)
output_sum_square = torch.sum(output * output + eps, axes)
target_sum_square = torch.sum(target * target + eps, axes)
sum_squares = output_sum_square + target_sum_square
# Average per Batch and Class
# noinspection PyTypeChecker
return 1.0 - 2.0 * torch.mean(intersection / sum_squares) # type: ignore
| [
2,
220,
16529,
22369,
438,
198,
2,
220,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
220,
49962,
739,
262,
17168,
13789,
357,
36393,
737,
4091,
38559,
24290,
287,
262,
29924,
6808,
329,
5964,
1321,
13,
198,
2,
... | 2.707134 | 1,598 |
import time
from threading import Thread
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
import scene_layout as scene_layout_parser # This should come from CARLA path
class ObjectFinder(object):
"""
Pseudo sensor that gives you the position of all the other dynamic objects and their states
"""
def __init__(self, world, reading_frequency):
"""
The object finder is used to give you the positions of all the
other dynamic objects in the scene and their properties.
"""
# Give the entire access there
# The vehicle where the class reads the speed
self._world = world
# Map used by the object finder
self._map = CarlaDataProvider.get_map()
# How often do you look at your speedometer in hz
self._reading_frequency = reading_frequency
self._callback = None
# Counts the frames
self._frame_number = 0
self._run_ps = True
self.find_objects()
def __call__(self):
""" We here work into getting all the dynamic objects """
return scene_layout_parser.get_dynamic_objects(self._world, self._map)
@threaded
| [
198,
11748,
640,
198,
198,
6738,
4704,
278,
1330,
14122,
198,
6738,
264,
16737,
13,
1416,
268,
2743,
5185,
3536,
13,
7718,
5031,
62,
7890,
62,
15234,
1304,
1330,
1879,
5031,
6601,
29495,
198,
11748,
3715,
62,
39786,
355,
3715,
62,
397... | 2.775744 | 437 |
#!/usr/bin/env python3
# coding: utf-8
import numpy as np
class VertWellMaker(object):
"""
vertical well bound points and segments maker
"""
def make(self, well, nw):
"""
"""
x = well.track[0].x
y = well.track[0].y
rw = well.radius
pts, seg = self.__circle(x, y, nw, rw)
return [[x, y], pts, seg]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4871,
24417,
5779,
48890,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
11723,... | 2.054945 | 182 |
# File containing helper functions for computing similarity and clustering PDB active sites
import numpy as np
import random
import matplotlib.pyplot as plt
def output_aa_string(residues):
"""
Convert 3 letter amino acid code to single letter amino acids
Input: list of residues in three letter codes
Output: string of residues in single code
"""
# Dictionary of 3 letter to 1 letter AA conversion
aa_dict = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
s = ''
for res in residues:
s = s + aa_dict.get(res.type)
return s
def match(a1, a2):
"""
Find indexes of matches for both alignment a and alignment b
Input: two strings after alignment
Outputs: indexes where match occurs in string 1 and string 2
"""
inds_1, inds_2 = [], []
i, j = 0, 0
for s in range(0, len(a1)):
increment = lambda a, x, ind: x+1 if a[ind] != "-" else x
if a1[s] is a2[s] and a1[s] != "-":
inds_1.append(i)
inds_2.append(j)
i = increment(a1,i,s)
j = increment(a2,j,s)
return inds_1, inds_2
def create_coord_matrix(site, index):
"""
Create a matrix of CA atom x,y,z coordinates for every residue contained
in an active site
Inputs: active site and list of indices where two alignments match
Output: matrix with x,y, and z coordinates for every CA atom
"""
mat = np.empty([0,3])
for res in np.take(site.residues, index):
ind = [i for i,x in enumerate(res.atoms) if x.type == "CA" or x.type == "CA A"][0]
x,y,z = res.atoms[ind].coords
mat = np.append(mat, [[x,y,z]], axis=0)
return mat
def initialize_k_mediods(data, k):
"""
Randomly select k points and set as cluster centers
Input: similarity matrix and # of clusters
Output: centers for clusters
"""
return random.sample(range(len(data)), k)
def assign_k_clusters(data, centers):
"""
Assign each data point to its nearest cluster center
Input: matrix and k centers
Output: cluster assignments for points in matrix
"""
clusters = []
center_data = np.take(data, centers, axis=0)
best_center = np.argmax(center_data, axis=0)
for i in range(len(centers)):
inds = [ind for ind in np.where(best_center == i)[0]]
clusters.append(inds)
return clusters
def calculate_cost(data, centers, clusters):
"""
Calculate the sum of similarities of cluster elements to centers
"""
total = 0
for i in range(len(centers)):
total = total + np.sum(data[centers[i]][clusters[i]])
return total
def recalculate_centers(data, k, clusters):
"""
Recalculate cluster centers for data
Input: matrix, number of clusters, and cluster assignments
Output: new cluster centers
"""
centers = []
for k_i in range(k):
inds = [i for i, j in enumerate(clusters) if j == k_i]
n = np.take(data, inds, axis=0)
if len(inds) == 0:
i = np.random.randint(len(data))
centers.append((data[i,0], data[i,1]))
elif len(inds) < 2:
centers.append((n[0][0], n[0][1]))
else:
result = np.sum(n, axis=1)/len(inds)
centers.append((result[0], result[0]))
return centers
def dist_HC(active_sites, clusters,c_new, data):
"""
Output the distance of the new cluster to all other clusters
by computing the average similarity (average linkage)
"""
new_arr = np.array([])
for i in range(c_new):
if type(clusters.get(i)) == type(None):
new_arr = np.append(new_arr,float("-inf"))
else:
c = clusters.get(i)
sub = data[np.ix_(clusters.get(c_new), c)]
d = (np.ma.masked_invalid(sub).sum())/(len(clusters.get(c_new))*len(c))
new_arr = np.append(new_arr, d)
return new_arr
def output_cluster_list(active_sites, clusters):
"""
Output the active site names in list of atoms format
"""
atoms = []
for c in clusters:
clust = []
for elem in c:
clust.append(active_sites[elem])
atoms.append(clust)
return atoms
| [
2,
9220,
7268,
31904,
5499,
329,
14492,
26789,
290,
32966,
1586,
350,
11012,
4075,
5043,
198,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
4738,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
4299,
5072,... | 2.330855 | 1,883 |
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2020, 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration for REANA-DB."""
from datetime import datetime, timedelta
from uuid import uuid4
import mock
import pytest
from reana_db.models import Resource, RunStatus, User, Workflow
@pytest.fixture(scope="module")
def db():
"""Initialize database fixture."""
from reana_db.database import init_db
init_db()
Resource.initialise_default_resources()
@pytest.fixture
def new_user(session, db):
"""Create new user."""
user = User(
email="{}@reana.io".format(uuid4()), access_token="secretkey-{}".format(uuid4())
)
session.add(user)
session.commit()
return user
@pytest.fixture
def run_workflow(session, new_user):
"""Mock workflow run factory."""
def _run_workflow(time_elapsed_seconds=0.5, finish=True):
"""Mock a workflow run."""
id_ = uuid4()
workflow = Workflow(
id_=str(id_),
name="test_{}".format(id_),
owner_id=new_user.id_,
reana_specification=[],
type_="serial",
logs="",
status=RunStatus.created,
)
# start workflow
workflow.status = RunStatus.running
session.add(workflow)
session.commit()
termination_value = datetime.now() + timedelta(seconds=time_elapsed_seconds)
if finish:
with mock.patch("reana_db.models.datetime", MockDatetime):
Workflow.update_workflow_status(
session, workflow.id_, RunStatus.finished
)
return workflow
return _run_workflow
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
4526,
31574,
13,
198,
2,
15069,
357,
34,
8,
12131,
11,
33448,
327,
28778,
13,
198,
2,
198,
2,
4526,
31574,
318,
1479,
3788,
26... | 2.344114 | 773 |
import os
import re
from shutil import copyfile, rmtree
import src.ffmpeg_helper as ffmpeg
from src.__MACRO__ import LOG, INFO, ERROR
from src.video_inference import process_frames, open_session
def frames_name_comparer(name):
'''Compares two filenames and returns a tuple indicating
their correct relative order to be used in the sort method.
name(str) -- the current name to sort'''
info = re.findall('([0-9]+)(_)?', name)[0]
return (int(info[0]), info[1])
def format_duration(seconds):
'''Returns a formatted string to indicate the duration of a video.'''
return '{:02d}:{:02d}:{:02d}'.format(seconds // 3600, seconds // 60, seconds % 60)
def convert(args):
'''Converts a video by doubling its framerate.
params(dict<str, str>) -- the input parameters
'''
# Initial setup and info
LOG('Processing {}'.format(args['source']))
width, height, framerate, duration = ffmpeg.get_video_info(args['source'])
if any((info is None for info in [width, height, framerate, duration])):
ERROR('Error retrieving video info')
if duration < 2:
ERROR('The video file is either empty or too short')
INFO('Total duration: {}'.format(format_duration(duration)))
INFO('Framerate: {}/{}'.format(framerate[0], framerate[1]))
INFO('Resolution: {}x{}'.format(width, height))
# Validate the target resolution
if args['scale'] is not None:
if (args['scale'] * height) % width != 0:
ERROR('The scaled resolution would produce a fractional height')
if (args['scale'] * height // width) % 4 != 0:
ERROR('The scaled resolution must be a multiple of 4 to be encoded correctly')
# calculate the framerate parameters to encode the video chunks
if args['interpolation'] == 'double':
in_fps, out_fps = '{}/{}'.format(framerate[0] * 2, framerate[1]), '{}/{}'.format(framerate[0] * 2, framerate[1])
else:
in_fps, out_fps = '{}/{}'.format(framerate[0], framerate[1]), '{}/{}'.format(framerate[0], framerate[1])
# Loop until all video chunks have been created
frames_path = os.path.join(args['working_dir'], 'frames')
chunk_timestep, video_timestep, step_size = 1, 0, args['temp_buffer_lenght'] * 60
chunks_paths = []
with open_session(args['model_path'], frames_path) as session:
while True:
LOG('Converting {} to {}'.format(format_duration(video_timestep), format_duration(min(video_timestep + step_size, duration))))
# Extract the frames from the n-th video chunk
if os.path.isdir(frames_path):
rmtree(frames_path)
extract_ok = ffmpeg.extract_frames(
args['source'], frames_path,
[args['scale'], -1] if args['scale'] is not None else None,
video_timestep, step_size, extension=args['frame_quality'])
# progress checks
if not extract_ok or (video_timestep == 0 and not os.listdir(frames_path)):
rmtree(args['working_dir'])
ERROR('Failed to extract frames')
video_timestep += step_size
# Inference pass on the n-th video chunk
if not os.listdir(frames_path):
break
process_frames(frames_path, session, args['post_processing'] == 'shader')
# sort the frames by alternating the original and the interpolated
LOG('Preparing generated frames')
frames = os.listdir(frames_path)
frames.sort(key=frames_name_comparer)
# duplicate the last frame (no interpolation available)
copy_filename = '{}_{}'.format(re.findall('([0-9]+)', frames[-1])[0], frames[-1][-4:])
copyfile(os.path.join(frames_path, frames[-1]), os.path.join(frames_path, copy_filename))
frames += [copy_filename]
INFO('{} total frame(s) to encode'.format(len(frames)))
# rename the source frames to encode
for i in range(len(frames), 0, -1):
source = os.path.join(frames_path, frames[i - 1])
destination = os.path.join(frames_path, '{:05d}.{}'.format(i, args['frame_quality']))
os.rename(source, destination)
# encode the interpolated video
LOG('Encoding video chunk #{}'.format(video_timestep // step_size))
chunk_path = os.path.join(args['working_dir'], '_{}.mp4'.format(chunk_timestep))
ffmpeg.create_video(os.path.join(frames_path, '%05d.{}'.format(args['frame_quality'])), chunk_path, in_fps, out_fps, args['encoder'], args['crf'], args['preset'])
chunks_paths += [chunk_path]
chunk_timestep += 1
# prepare the list file
LOG('Preparing final merge, {} chunk(s) available'.format(len(chunks_paths)))
list_path = os.path.join(args['working_dir'], 'list.txt')
with open(list_path, 'w', encoding='utf-8') as txt:
for path in chunks_paths:
print('file \'{}\''.format(path), file=txt)
# create the final resampled video
LOG('Creating output video')
ffmpeg.concat_videos(list_path, args['source'] if args['interpolation'] == 'double' else None, args['output'])
rmtree(args['working_dir']) # cleanup
LOG('Video creation completed')
| [
11748,
28686,
198,
11748,
302,
198,
6738,
4423,
346,
1330,
4866,
7753,
11,
374,
16762,
631,
198,
11748,
12351,
13,
487,
43913,
62,
2978,
525,
355,
31246,
43913,
198,
6738,
12351,
13,
834,
44721,
13252,
834,
1330,
41605,
11,
24890,
11,
... | 2.439616 | 2,186 |
"""
Crafting - Griatch 2020
This is a general crafting engine. The basic functionality of crafting is to
combine any number of of items or tools in a 'recipe' to produce a new result.
item + item + item + tool + tool -> recipe -> new result
This is useful not only for traditional crafting but the engine is flexible
enough to also be useful for puzzles or similar.
## Installation
- Add the `CmdCraft` Command from this module to your default cmdset. This
allows for crafting from in-game using a simple syntax.
- Create a new module and add it to a new list in your settings file
(`server/conf/settings.py`) named `CRAFT_RECIPES_MODULES`, such as
`CRAFT_RECIPE_MODULES = ["world.recipes_weapons"]`.
- In the new module(s), create one or more classes, each a child of
`CraftingRecipe` from this module. Each such class must have a unique `.name`
property. It also defines what inputs are required and what is created using
this recipe.
- Objects to use for crafting should (by default) be tagged with tags using the
tag-category `crafting_material` or `crafting_tool`. The name of the object
doesn't matter, only its tag.
## Crafting in game
The default `craft` command handles all crafting needs.
::
> craft spiked club from club, nails
Here, `spiked club` specifies the recipe while `club` and `nails` are objects
the crafter must have in their inventory. These will be consumed during
crafting (by default only if crafting was successful).
A recipe can also require *tools* (like the `hammer` above). These must be
either in inventory *or* be in the current location. Tools are *not* consumed
during the crafting process.
::
> craft wooden doll from wood with knife
## Crafting in code
In code, you should use the helper function `craft` from this module. This
specifies the name of the recipe to use and expects all suitable
ingredients/tools as arguments (consumables and tools should be added together,
tools will be identified before consumables).
```python
from evennia.contrib.crafting import crafting
spiked_club = crafting.craft(crafter, "spiked club", club, nails)
```
The result is always a list with zero or more objects. A fail leads to an empty
list. The crafter should already have been notified of any error in this case
(this should be handle by the recipe itself).
## Recipes
A *recipe* is a class that works like an input/output blackbox: you initialize
it with consumables (and/or tools) if they match the recipe, a new
result is spit out. Consumables are consumed in the process while tools are not.
This module contains a base class for making new ingredient types
(`CraftingRecipeBase`) and an implementation of the most common form of
crafting (`CraftingRecipe`) using objects and prototypes.
Recipes are put in one or more modules added as a list to the
`CRAFT_RECIPE_MODULES` setting, for example:
```python
CRAFT_RECIPE_MODULES = ['world.recipes_weapons', 'world.recipes_potions']
```
Below is an example of a crafting recipe and how `craft` calls it under the
hood. See the `CraftingRecipe` class for details of which properties and
methods are available to override - the craft behavior can be modified
substantially this way.
```python
from evennia.contrib.crafting.crafting import CraftingRecipe
class PigIronRecipe(CraftingRecipe):
# Pig iron is a high-carbon result of melting iron in a blast furnace.
name = "pig iron" # this is what crafting.craft and CmdCraft uses
tool_tags = ["blast furnace"]
consumable_tags = ["iron ore", "coal", "coal"]
output_prototypes = [
{"key": "Pig Iron ingot",
"desc": "An ingot of crude pig iron.",
"tags": [("pig iron", "crafting_material")]}
]
# for testing, conveniently spawn all we need based on the tags on the class
tools, consumables = PigIronRecipe.seed()
recipe = PigIronRecipe(caller, *(tools + consumables))
result = recipe.craft()
```
If the above class was added to a module in `CRAFT_RECIPE_MODULES`, it could be
called using its `.name` property, as "pig iron".
The [example_recipies](api:evennia.contrib.crafting.example_recipes) module has
a full example of the components for creating a sword from base components.
----
"""
from copy import copy
from evennia.utils.utils import iter_to_str, callables_from_module, inherits_from, make_iter
from evennia.commands.cmdset import CmdSet
from evennia.commands.command import Command
from evennia.prototypes.spawner import spawn
from evennia.utils.create import create_object
_RECIPE_CLASSES = {}
def _load_recipes():
"""
Delayed loading of recipe classes. This parses
`settings.CRAFT_RECIPE_MODULES`.
"""
from django.conf import settings
global _RECIPE_CLASSES
if not _RECIPE_CLASSES:
paths = ["evennia.contrib.crafting.example_recipes"]
if hasattr(settings, "CRAFT_RECIPE_MODULES"):
paths += make_iter(settings.CRAFT_RECIPE_MODULES)
for path in paths:
for cls in callables_from_module(path).values():
if inherits_from(cls, CraftingRecipeBase):
_RECIPE_CLASSES[cls.name] = cls
class CraftingError(RuntimeError):
"""
Crafting error.
"""
class CraftingValidationError(CraftingError):
"""
Error if crafting validation failed.
"""
class CraftingRecipeBase:
"""
The recipe handles all aspects of performing a 'craft' operation. This is
the base of the crafting system, intended to be replace if you want to
adapt it for very different functionality - see the `CraftingRecipe` child
class for an implementation of the most common type of crafting using
objects.
Example of usage:
::
recipe = CraftRecipe(crafter, obj1, obj2, obj3)
result = recipe.craft()
Note that the most common crafting operation is that the inputs are
consumed - so in that case the recipe cannot be used a second time (doing so
will raise a `CraftingError`)
Process:
1. `.craft(**kwargs)` - this starts the process on the initialized recipe. The kwargs
are optional but will be passed into all of the following hooks.
2. `.pre_craft(**kwargs)` - this normally validates inputs and stores them in
`.validated_inputs.`. Raises `CraftingValidationError` otherwise.
4. `.do_craft(**kwargs)` - should return the crafted item(s) or the empty list. Any
crafting errors should be immediately reported to user.
5. `.post_craft(crafted_result, **kwargs)`- always called, even if `pre_craft`
raised a `CraftingError` or `CraftingValidationError`.
Should return `crafted_result` (modified or not).
"""
name = "recipe base"
# if set, allow running `.craft` more than once on the same instance.
# don't set this unless crafting inputs are *not* consumed by the crafting
# process (otherwise subsequent calls will fail).
allow_reuse = False
def __init__(self, crafter, *inputs, **kwargs):
"""
Initialize the recipe.
Args:
crafter (Object): The one doing the crafting.
*inputs (any): The ingredients of the recipe to use.
**kwargs (any): Any other parameters that are relevant for
this recipe.
"""
self.crafter = crafter
self.inputs = inputs
self.craft_kwargs = kwargs
self.allow_craft = True
self.validated_inputs = []
def msg(self, message, **kwargs):
"""
Send message to crafter. This is a central point to override if wanting
to change crafting return style in some way.
Args:
message(str): The message to send.
**kwargs: Any optional properties relevant to this send.
"""
self.crafter.msg(message, {"type": "crafting"})
def pre_craft(self, **kwargs):
"""
Hook to override.
This is called just before crafting operation and is normally
responsible for validating the inputs, storing data on
`self.validated_inputs`.
Args:
**kwargs: Optional extra flags passed during initialization or
`.craft(**kwargs)`.
Raises:
CraftingValidationError: If validation fails.
"""
if self.allow_craft:
self.validated_inputs = self.inputs[:]
else:
raise CraftingValidationError
def do_craft(self, **kwargs):
"""
Hook to override.
This performs the actual crafting. At this point the inputs are
expected to have been verified already. If needed, the validated
inputs are available on this recipe instance.
Args:
**kwargs: Any extra flags passed at initialization.
Returns:
any: The result of crafting.
"""
return None
def post_craft(self, crafting_result, **kwargs):
"""
Hook to override.
This is called just after crafting has finished. A common use of this
method is to delete the inputs.
Args:
crafting_result (any): The outcome of crafting, as returned by `do_craft`.
**kwargs: Any extra flags passed at initialization.
Returns:
any: The final crafting result.
"""
return crafting_result
def craft(self, raise_exception=False, **kwargs):
"""
Main crafting call method. Call this to produce a result and make
sure all hooks run correctly.
Args:
raise_exception (bool): If crafting would return `None`, raise
exception instead.
**kwargs (any): Any other parameters that is relevant
for this particular craft operation. This will temporarily
override same-named kwargs given at the creation of this recipe
and be passed into all of the crafting hooks.
Returns:
any: The result of the craft, or `None` if crafting failed.
Raises:
CraftingValidationError: If recipe validation failed and
`raise_exception` is True.
CraftingError: On If trying to rerun a no-rerun recipe, or if crafting
would return `None` and raise_exception` is set.
"""
craft_result = None
if self.allow_craft:
# override/extend craft_kwargs from initialization.
craft_kwargs = copy(self.craft_kwargs)
craft_kwargs.update(kwargs)
try:
try:
# this assigns to self.validated_inputs
self.pre_craft(**craft_kwargs)
except (CraftingError, CraftingValidationError):
if raise_exception:
raise
else:
craft_result = self.do_craft(**craft_kwargs)
finally:
craft_result = self.post_craft(craft_result, **craft_kwargs)
except (CraftingError, CraftingValidationError):
if raise_exception:
raise
# possibly turn off re-use depending on class setting
self.allow_craft = self.allow_reuse
elif not self.allow_reuse:
raise CraftingError("Cannot re-run crafting without re-initializing recipe first.")
if craft_result is None and raise_exception:
raise CraftingError(f"Crafting of {self.name} failed.")
return craft_result
class CraftingRecipe(CraftingRecipeBase):
"""
The CraftRecipe implements the most common form of crafting: Combining (and
consuming) inputs to produce a new result. This type of recipe only works
with typeclassed entities as inputs and outputs, since it's based on Tags
and Prototypes.
There are two types of crafting ingredients: 'tools' and 'consumables'. The
difference between them is that the former is not consumed in the crafting
process. So if you need a hammer and anvil to craft a sword, they are
'tools' whereas the materials of the sword are 'consumables'.
Examples:
::
class FlourRecipe(CraftRecipe):
name = "flour"
tool_tags = ['windmill']
consumable_tags = ["wheat"]
output_prototypes = [
{"key": "Bag of flour",
"typeclass": "typeclasses.food.Flour",
"desc": "A small bag of flour."
"tags": [("flour", "crafting_material"),
}
class BreadRecipe(CraftRecipe):
name = "bread"
tool_tags = ["roller", "owen"]
consumable_tags = ["flour", "egg", "egg", "salt", "water", "yeast"]
output_prototypes = [
{"key": "bread",
"desc": "A tasty bread."
}
## Properties on the class level:
- `name` (str): The name of this recipe. This should be globally unique.
### tools
- `tool_tag_category` (str): What tag-category tools must use. Default is
'crafting_tool'.
- `tool_tags` (list): Object-tags to use for tooling. If more than one instace
of a tool is needed, add multiple entries here.
- `tool_names` (list): Human-readable names for tools. These are used for informative
messages/errors. If not given, the tags will be used. If given, this list should
match the length of `tool_tags`.:
- `exact_tools` (bool, default True): Must have exactly the right tools, any extra
leads to failure.
- `exact_tool_order` (bool, default False): Tools must be added in exactly the
right order for crafting to pass.
### consumables
- `consumable_tag_category` (str): What tag-category consumables must use.
Default is 'crafting_material'.
- `consumable_tags` (list): Tags for objects that will be consumed as part of
running the recipe.
- `consumable_names` (list): Human-readable names for consumables. Same as for tools.
- `exact_consumables` (bool, default True): Normally, adding more consumables
than needed leads to a a crafting error. If this is False, the craft will
still succeed (only the needed ingredients will be consumed).
- `exact_consumable_order` (bool, default False): Normally, the order in which
ingredients are added does not matter. With this set, trying to add consumables in
another order than given will lead to failing crafting.
- `consume_on_fail` (bool, default False): Normally, consumables remain if
crafting fails. With this flag, a failed crafting will still consume
consumables. Note that this will also consume any 'extra' consumables
added not part of the recipe!
### outputs (result of crafting)
- `output_prototypes` (list): One or more prototypes (`prototype_keys` or
full dicts) describing how to create the result(s) of this recipe.
- `output_names` (list): Human-readable names for (prospective) prototypes.
This is used in error messages. If not given, this is extracted from the
prototypes' `key` if possible.
### custom error messages
custom messages all have custom formatting markers. Many are empty strings
when not applicable.
::
{missing}: Comma-separated list of tool/consumable missing for missing/out of order errors.
{excess}: Comma-separated list of tool/consumable added in excess of recipe
{inputs}: Comma-separated list of any inputs (tools + consumables) involved in error.
{tools}: Comma-sepatated list of tools involved in error.
{consumables}: Comma-separated list of consumables involved in error.
{outputs}: Comma-separated list of (expected) outputs
{t0}..{tN-1}: Individual tools, same order as `.tool_names`.
{c0}..{cN-1}: Individual consumables, same order as `.consumable_names`.
{o0}..{oN-1}: Individual outputs, same order as `.output_names`.
- `error_tool_missing_message`: "Could not craft {outputs} without {missing}."
- `error_tool_order_message`:
"Could not craft {outputs} since {missing} was added in the wrong order."
- `error_tool_excess_message`: "Could not craft {outputs} (extra {excess})."
- `error_consumable_missing_message`: "Could not craft {outputs} without {missing}."
- `error_consumable_order_message`:
"Could not craft {outputs} since {missing} was added in the wrong order."
- `error_consumable_excess_message`: "Could not craft {outputs} (excess {excess})."
- `success_message`: "You successfuly craft {outputs}!"
- `failure_message`: "" (this is handled by the other error messages by default)
## Hooks
1. Crafting starts by calling `.craft(**kwargs)` on the parent class. The
`**kwargs` are optional, extends any `**kwargs` passed to the class
constructor and will be passed into all the following hooks.
3. `.pre_craft(**kwargs)` should handle validation of inputs. Results should
be stored in `validated_consumables/tools` respectively. Raises `CraftingValidationError`
otherwise.
4. `.do_craft(**kwargs)` will not be called if validation failed. Should return
a list of the things crafted.
5. `.post_craft(crafting_result, **kwargs)` is always called, also if validation
failed (`crafting_result` will then be falsy). It does any cleanup. By default
this deletes consumables.
Use `.msg` to conveniently send messages to the crafter. Raise
`evennia.contrib.crafting.crafting.CraftingError` exception to abort
crafting at any time in the sequence. If raising with a text, this will be
shown to the crafter automatically
"""
name = "crafting recipe"
# this define the overall category all material tags must have
consumable_tag_category = "crafting_material"
# tag category for tool objects
tool_tag_category = "crafting_tool"
# the tools needed to perform this crafting. Tools are never consumed (if they were,
# they'd need to be a consumable). If more than one instance of a tool is needed,
# there should be multiple entries in this list.
tool_tags = []
# human-readable names for the tools. This will be used for informative messages
# or when usage fails. If empty, use tag-names.
tool_names = []
# if we must have exactly the right tools, no more
exact_tools = True
# if the order of the tools matters
exact_tool_order = False
# error to show if missing tools
error_tool_missing_message = "Could not craft {outputs} without {missing}."
# error to show if tool-order matters and it was wrong. Missing is the first
# tool out of order
error_tool_order_message = (
"Could not craft {outputs} since {missing} was added in the wrong order."
)
# if .exact_tools is set and there are more than needed
error_tool_excess_message = (
"Could not craft {outputs} without the exact tools (extra {excess})."
)
# a list of tag-keys (of the `tag_category`). If more than one of each type
# is needed, there should be multiple same-named entries in this list.
consumable_tags = []
# these are human-readable names for the items to use. This is used for informative
# messages or when usage fails. If empty, the tag-names will be used. If given, this
# must have the same length as `consumable_tags`.
consumable_names = []
# if True, consume valid inputs also if crafting failed (returned None)
consume_on_fail = False
# if True, having any wrong input result in failing the crafting. If False,
# extra components beyond the recipe are ignored.
exact_consumables = True
# if True, the exact order in which inputs are provided matters and must match
# the order of `consumable_tags`. If False, order doesn't matter.
exact_consumable_order = False
# error to show if missing consumables
error_consumable_missing_message = "Could not craft {outputs} without {missing}."
# error to show if consumable order matters and it was wrong. Missing is the first
# consumable out of order
error_consumable_order_message = (
"Could not craft {outputs} since {missing} was added in the wrong order."
)
# if .exact_consumables is set and there are more than needed
error_consumable_excess_message = (
"Could not craft {outputs} without the exact ingredients (extra {excess})."
)
# this is a list of one or more prototypes (prototype_keys to existing
# prototypes or full prototype-dicts) to use to build the result. All of
# these will be returned (as a list) if crafting succeeded.
output_prototypes = []
# human-readable name(s) for the (expected) result of this crafting. This will usually only
# be used for error messages (to report what would have been). If not given, the
# prototype's key or typeclass will be used. If given, this must have the same length
# as `output_prototypes`.
output_names = []
# general craft-failure msg to show after other error-messages.
failure_message = ""
# show after a successful craft
success_message = "You successfully craft {outputs}!"
def __init__(self, crafter, *inputs, **kwargs):
"""
Args:
crafter (Object): The one doing the crafting.
*inputs (Object): The ingredients (+tools) of the recipe to use. The
The recipe will itself figure out (from tags) which is a tool and
which is a consumable.
**kwargs (any): Any other parameters that are relevant for
this recipe. These will be passed into the crafting hooks.
Notes:
Internally, this class stores validated data in
`.validated_consumables` and `.validated_tools` respectively. The
`.validated_inputs` property (from parent) holds a list of everything
types in the order inserted to the class constructor.
"""
super().__init__(crafter, *inputs, **kwargs)
self.validated_consumables = []
self.validated_tools = []
# validate class properties
if self.consumable_names:
assert len(self.consumable_names) == len(self.consumable_tags), (
f"Crafting {self.__class__}.consumable_names list must "
"have the same length as .consumable_tags."
)
else:
self.consumable_names = self.consumable_tags
if self.tool_names:
assert len(self.tool_names) == len(self.tool_tags), (
f"Crafting {self.__class__}.tool_names list must "
"have the same length as .tool_tags."
)
else:
self.tool_names = self.tool_tags
if self.output_names:
assert len(self.consumable_names) == len(self.consumable_tags), (
f"Crafting {self.__class__}.output_names list must "
"have the same length as .output_prototypes."
)
else:
self.output_names = [
prot.get("key", prot.get("typeclass", "unnamed"))
if isinstance(prot, dict)
else str(prot)
for prot in self.output_prototypes
]
assert isinstance(
self.output_prototypes, (list, tuple)
), "Crafting {self.__class__}.output_prototypes must be a list or tuple."
# don't allow reuse if we have consumables. If only tools we can reuse
# over and over since nothing changes.
self.allow_reuse = not bool(self.consumable_tags)
@classmethod
def seed(cls, tool_kwargs=None, consumable_kwargs=None, location=None):
"""
This is a helper class-method for easy testing and application of this
recipe. When called, it will create simple dummy ingredients with names
and tags needed by this recipe.
Args:
tool_kwargs (dict, optional): Will be passed as `**tool_kwargs` into the `create_object`
call for each tool. If not given, the matching
`tool_name` or `tool_tag` will be used for key.
consumable_kwargs (dict, optional): This will be passed as
`**consumable_kwargs` into the `create_object` call for each consumable.
If not given, matching `consumable_name` or `consumable_tag`
will be used for key.
location (Object, optional): If given, the created items will be created in this
location. This is a shortcut for adding {"location": <obj>} to both the
consumable/tool kwargs (and will *override* any such setting in those kwargs).
Returns:
tuple: A tuple `(tools, consumables)` with newly created dummy
objects matching the recipe ingredient list.
Example:
::
tools, consumables = SwordRecipe.seed(location=caller)
recipe = SwordRecipe(caller, *(tools + consumables))
result = recipe.craft()
Notes:
If `key` is given in `consumable/tool_kwargs` then _every_ created item
of each type will have the same key.
"""
if not tool_kwargs:
tool_kwargs = {}
if not consumable_kwargs:
consumable_kwargs = {}
if location:
tool_kwargs['location'] = location
consumable_kwargs['location'] = location
tool_key = tool_kwargs.pop("key", None)
cons_key = consumable_kwargs.pop("key", None)
tool_tags = tool_kwargs.pop("tags", [])
cons_tags = consumable_kwargs.pop("tags", [])
tools = []
for itag, tag in enumerate(cls.tool_tags):
tools.append(
create_object(
key=tool_key or (cls.tool_names[itag] if cls.tool_names else tag.capitalize()),
tags=[(tag, cls.tool_tag_category), *tool_tags],
**tool_kwargs,
)
)
consumables = []
for itag, tag in enumerate(cls.consumable_tags):
consumables.append(
create_object(
key=cons_key
or (cls.consumable_names[itag] if cls.consumable_names else tag.capitalize()),
tags=[(tag, cls.consumable_tag_category), *cons_tags],
**consumable_kwargs,
)
)
return tools, consumables
def pre_craft(self, **kwargs):
"""
Do pre-craft checks, including input validation.
Check so the given inputs are what is needed. This operates on
`self.inputs` which is set to the inputs added to the class
constructor. Validated data is stored as lists on `.validated_tools`
and `.validated_consumables` respectively.
Args:
**kwargs: Any optional extra kwargs passed during initialization of
the recipe class.
Raises:
CraftingValidationError: If validation fails. At this point the crafter
is expected to have been informed of the problem already.
"""
def _check_completeness(
tagmap,
taglist,
namelist,
exact_match,
exact_order,
error_missing_message,
error_order_message,
error_excess_message,
):
"""Compare tagmap (inputs) to taglist (required)"""
valids = []
for itag, tagkey in enumerate(taglist):
found_obj = None
for obj, objtags in tagmap.items():
if tagkey in objtags:
found_obj = obj
break
if exact_order:
# if we get here order is wrong
err = self._format_message(
error_order_message, missing=obj.get_display_name(looker=self.crafter)
)
self.msg(err)
raise CraftingValidationError(err)
# since we pop from the mapping, it gets ever shorter
match = tagmap.pop(found_obj, None)
if match:
valids.append(found_obj)
elif exact_match:
err = self._format_message(
error_missing_message,
missing=namelist[itag] if namelist else tagkey.capitalize(),
)
self.msg(err)
raise CraftingValidationError(err)
if exact_match and tagmap:
# something is left in tagmap, that means it was never popped and
# thus this is not an exact match
err = self._format_message(
error_excess_message,
excess=[obj.get_display_name(looker=self.crafter) for obj in tagmap],
)
self.msg(err)
raise CraftingValidationError(err)
return valids
# get tools and consumables from self.inputs
tool_map = {
obj: obj.tags.get(category=self.tool_tag_category, return_list=True)
for obj in self.inputs
if obj
and hasattr(obj, "tags")
and inherits_from(obj, "evennia.objects.models.ObjectDB")
}
tool_map = {obj: tags for obj, tags in tool_map.items() if tags}
consumable_map = {
obj: obj.tags.get(category=self.consumable_tag_category, return_list=True)
for obj in self.inputs
if obj
and hasattr(obj, "tags")
and obj not in tool_map
and inherits_from(obj, "evennia.objects.models.ObjectDB")
}
consumable_map = {obj: tags for obj, tags in consumable_map.items() if tags}
# we set these so they are available for error management at all times,
# they will be updated with the actual values at the end
self.validated_tools = [obj for obj in tool_map]
self.validated_consumables = [obj for obj in consumable_map]
tools = _check_completeness(
tool_map,
self.tool_tags,
self.tool_names,
self.exact_tools,
self.exact_tool_order,
self.error_tool_missing_message,
self.error_tool_order_message,
self.error_tool_excess_message,
)
consumables = _check_completeness(
consumable_map,
self.consumable_tags,
self.consumable_names,
self.exact_consumables,
self.exact_consumable_order,
self.error_consumable_missing_message,
self.error_consumable_order_message,
self.error_consumable_excess_message,
)
# regardless of flags, the tools/consumable lists much contain exactly
# all the recipe needs now.
if len(tools) != len(self.tool_tags):
raise CraftingValidationError(
f"Tools {tools}'s tags do not match expected tags {self.tool_tags}"
)
if len(consumables) != len(self.consumable_tags):
raise CraftingValidationError(
f"Consumables {consumables}'s tags do not match "
f"expected tags {self.consumable_tags}"
)
self.validated_tools = tools
self.validated_consumables = consumables
def do_craft(self, **kwargs):
"""
Hook to override. This will not be called if validation in `pre_craft`
fails.
This performs the actual crafting. At this point the inputs are
expected to have been verified already.
Returns:
list: A list of spawned objects created from the inputs, or None
on a failure.
Notes:
This method should use `self.msg` to inform the user about the
specific reason of failure immediately.
We may want to analyze the tools in some way here to affect the
crafting process.
"""
return spawn(*self.output_prototypes)
def post_craft(self, craft_result, **kwargs):
"""
Hook to override.
This is called just after crafting has finished. A common use of
this method is to delete the inputs.
Args:
craft_result (list): The crafted result, provided by `self.do_craft`.
**kwargs (any): Passed from `self.craft`.
Returns:
list: The return(s) of the craft, possibly modified in this method.
Notes:
This is _always_ called, also if validation in `pre_craft` fails
(`craft_result` will then be `None`).
"""
if craft_result:
self.msg(self._format_message(self.success_message))
elif self.failure_message:
self.msg(self._format_message(self.failure_message))
if craft_result or self.consume_on_fail:
# consume the inputs
for obj in self.validated_consumables:
obj.delete()
return craft_result
# access function
def craft(crafter, recipe_name, *inputs, raise_exception=False, **kwargs):
"""
Access function. Craft a given recipe from a source recipe module. A
recipe module is a Python module containing recipe classes. Note that this
requires `settings.CRAFT_RECIPE_MODULES` to be added to a list of one or
more python-paths to modules holding Recipe-classes.
Args:
crafter (Object): The one doing the crafting.
recipe_name (str): The `CraftRecipe.name` to use. This uses fuzzy-matching
if the result is unique.
*inputs: Suitable ingredients and/or tools (Objects) to use in the crafting.
raise_exception (bool, optional): If crafting failed for whatever
reason, raise `CraftingError`. The user will still be informed by the
recipe.
**kwargs: Optional kwargs to pass into the recipe (will passed into
recipe.craft).
Returns:
list: Crafted objects, if any.
Raises:
CraftingError: If `raise_exception` is True and crafting failed to
produce an output. KeyError: If `recipe_name` failed to find a
matching recipe class (or the hit was not precise enough.)
Notes:
If no recipe_module is given, will look for a list `settings.CRAFT_RECIPE_MODULES` and
lastly fall back to the example module `"evennia.contrib."`
"""
# delayed loading/caching of recipes
_load_recipes()
RecipeClass = _RECIPE_CLASSES.get(recipe_name, None)
if not RecipeClass:
# try a startswith fuzzy match
matches = [key for key in _RECIPE_CLASSES if key.startswith(recipe_name)]
if not matches:
# try in-match
matches = [key for key in _RECIPE_CLASSES if recipe_name in key]
if len(matches) == 1:
RecipeClass = matches[0]
if not RecipeClass:
raise KeyError(
f"No recipe in settings.CRAFT_RECIPE_MODULES has a name matching {recipe_name}"
)
recipe = RecipeClass(crafter, *inputs, **kwargs)
return recipe.craft(raise_exception=raise_exception)
# craft command/cmdset
class CraftingCmdSet(CmdSet):
"""
Store crafting command.
"""
key = "Crafting cmdset"
class CmdCraft(Command):
"""
Craft an item using ingredients and tools
Usage:
craft <recipe> [from <ingredient>,...] [using <tool>, ...]
Examples:
craft snowball from snow
craft puppet from piece of wood using knife
craft bread from flour, butter, water, yeast using owen, bowl, roller
craft fireball using wand, spellbook
Notes:
Ingredients must be in the crafter's inventory. Tools can also be
things in the current location, like a furnace, windmill or anvil.
"""
key = "craft"
locks = "cmd:all()"
help_category = "General"
arg_regex = r"\s|$"
def parse(self):
"""
Handle parsing of:
::
<recipe> [FROM <ingredients>] [USING <tools>]
Examples:
::
craft snowball from snow
craft puppet from piece of wood using knife
craft bread from flour, butter, water, yeast using owen, bowl, roller
craft fireball using wand, spellbook
"""
self.args = args = self.args.strip().lower()
recipe, ingredients, tools = "", "", ""
if "from" in args:
recipe, *rest = args.split(" from ", 1)
rest = rest[0] if rest else ""
ingredients, *tools = rest.split(" using ", 1)
elif "using" in args:
recipe, *tools = args.split(" using ", 1)
tools = tools[0] if tools else ""
self.recipe = recipe.strip()
self.ingredients = [ingr.strip() for ingr in ingredients.split(",")]
self.tools = [tool.strip() for tool in tools.split(",")]
def func(self):
"""
Perform crafting.
Will check the `craft` locktype. If a consumable/ingredient does not pass
this check, we will check for the 'crafting_consumable_err_msg'
Attribute, otherwise will use a default. If failing on a tool, will use
the `crafting_tool_err_msg` if available.
"""
caller = self.caller
if not self.args or not self.recipe:
self.caller.msg("Usage: craft <recipe> from <ingredient>, ... [using <tool>,...]")
return
ingredients = []
for ingr_key in self.ingredients:
if not ingr_key:
continue
obj = caller.search(ingr_key, location=self.caller)
# since ingredients are consumed we need extra check so we don't
# try to include characters or accounts etc.
if not obj:
return
if (
not inherits_from(obj, "evennia.objects.models.ObjectDB")
or obj.sessions.all()
or not obj.access(caller, "craft", default=True)
):
# We don't allow to include puppeted objects nor those with the
# 'negative' permission 'nocraft'.
caller.msg(
obj.attributes.get(
"crafting_consumable_err_msg",
default=f"{obj.get_display_name(looker=caller)} can't be used for this.",
)
)
return
ingredients.append(obj)
tools = []
for tool_key in self.tools:
if not tool_key:
continue
# tools are not consumed, can also exist in the current room
obj = caller.search(tool_key)
if not obj:
return None
if not obj.access(caller, "craft", default=True):
caller.msg(
obj.attributes.get(
"crafting_tool_err_msg",
default=f"{obj.get_display_name(looker=caller)} can't be used for this.",
)
)
return
tools.append(obj)
# perform craft and make sure result is in inventory
# (the recipe handles all returns to caller)
result = craft(caller, self.recipe, *(tools + ingredients))
if result:
for obj in result:
obj.location = caller
| [
37811,
198,
14467,
278,
532,
20914,
963,
12131,
198,
198,
1212,
318,
257,
2276,
21671,
3113,
13,
383,
4096,
11244,
286,
21671,
318,
284,
198,
24011,
500,
597,
1271,
286,
286,
3709,
393,
4899,
287,
257,
705,
29102,
431,
6,
284,
4439,
... | 2.490637 | 15,967 |
from random import randint, random
scss_files = ["purple.css"]
| [
6738,
4738,
1330,
43720,
600,
11,
4738,
198,
198,
1416,
824,
62,
16624,
796,
14631,
14225,
1154,
13,
25471,
8973,
628
] | 3.095238 | 21 |
import torch
from torchvision import datasets,transforms
import numpy as np
from torch.utils.data.sampler import SubsetRandomSampler
def get_test_loader_cifar(
batch_size,
dataset="cifar10",
output_height = 32,
output_width = 32,
shuffle=True,
num_workers=16,
pin_memory=True,
data_dir = "./data/"):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-100 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
if dataset =="cifar10":
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
# define transform
transform = transforms.Compose([
# transforms.Resize((output_width, output_height)),
transforms.ToTensor(),
normalize,
])
dataset = datasets.CIFAR10(
root=data_dir, train=False,
download=True, transform=transform,
)
elif dataset =="cifar100":
normalize = transforms.Normalize(mean=[0.507, 0.487, 0.441],
std=[0.267, 0.256, 0.276])
# define transform
transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
dataset = datasets.CIFAR100(
root=data_dir, train=False,
download=True, transform=transform
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
| [
11748,
28034,
198,
6738,
28034,
10178,
1330,
40522,
11,
7645,
23914,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
28034,
13,
26791,
13,
7890,
13,
37687,
20053,
1330,
3834,
2617,
29531,
16305,
20053,
628,
198,
198,
4299,
651,
62,
9288,
... | 2.212831 | 982 |
from flask import current_app, request, abort, jsonify
from datetime import datetime
from . import forecast as forecast_
from .autocomplete import autocomplete as autocomplete_
@current_app.route('/forecast')
@current_app.route('/autocomplete') | [
6738,
42903,
1330,
1459,
62,
1324,
11,
2581,
11,
15614,
11,
33918,
1958,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
764,
1330,
11092,
355,
11092,
62,
198,
6738,
764,
2306,
42829,
6677,
1330,
1960,
42829,
6677,
355,
1960,
42829,... | 3.514286 | 70 |
#!/usr/bin/env python2
"""
This module includes the out-of-process support code for codeintel2
Reference: http://bugs.activestate.com/show_bug.cgi?id=93455
"""
from .driver import Driver
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
37811,
198,
1212,
8265,
3407,
262,
503,
12,
1659,
12,
14681,
1104,
2438,
329,
2438,
48779,
17,
198,
198,
26687,
25,
2638,
1378,
32965,
13,
15791,
44146,
13,
785,
14,
12860,
... | 3.114754 | 61 |
from __future__ import print_function
import numpy as np
class ClassifierTrainer(object):
""" The trainer class performs SGD with momentum on a cost function """
def train(self, X, y,
model, learning_rate_decay=0.95, sample_batches=True,
num_epochs=30, batch_size=100, acc_frequency=None,
verbose=False, optimizer=None):
"""
Optimize the parameters of a model to minimize a loss function. We use
training data X and y to compute the loss and gradients, and periodically
check the accuracy on the validation set.
Inputs:
- X: Array of training data; each X[i] is a training sample.
- y: Vector of training labels; y[i] gives the label for X[i].
- model: Model of ConvNet
- learning_rate_decay: The learning rate is multiplied by this after each
epoch.
- sample_batches: If True, use a minibatch of data for each parameter update
(stochastic gradient descent); if False, use the entire training set for
each parameter update (gradient descent).
- num_epochs: The number of epochs to take over the training data.
- batch_size: The number of training samples to use at each iteration.
- acc_frequency: If set to an integer, we compute the training and
validation set error after every acc_frequency iterations.
- verbose: If True, print status after each epoch.
Returns a tuple of:
- loss_history: List containing the value of the loss function at each
iteration.
- train_acc_history: List storing the training set accuracy at each epoch.
"""
N = X.shape[0]
if sample_batches:
iterations_per_epoch = N // batch_size # using SGD
else:
iterations_per_epoch = 1 # using GD
num_iters = num_epochs * iterations_per_epoch
epoch = 0
loss_history = []
train_acc_history = []
for it in range(num_iters):
# get batch of data
if sample_batches:
batch_mask = np.random.choice(N, batch_size)
X_batch = X[batch_mask]
y_batch = y[batch_mask]
else:
# no SGD used, full gradient descent
X_batch = X
y_batch = y
# evaluate cost and gradient
out, cost = model.forward(X_batch, y_batch)
model.backward()
optimizer.update(model)
# every epoch perform an evaluation on the validation set
first_it = (it == 0)
epoch_end = (it + 1) % iterations_per_epoch == 0
acc_check = (acc_frequency is not None and it % acc_frequency == 0)
if first_it or epoch_end or acc_check:
if it > 0 and epoch_end:
# decay the learning rate
optimizer.learning_rate *= learning_rate_decay
epoch += 1
# evaluate train accuracy
if N > 1000:
train_mask = np.random.choice(N, 1000)
X_train_subset = X[train_mask]
y_train_subset = y[train_mask]
else:
X_train_subset = X
y_train_subset = y
scores_train, _ = model.forward(X_train_subset, y_train_subset)
y_pred_train = np.argmax(scores_train, axis=1)
train_acc = np.mean(y_pred_train == y_train_subset)
train_acc_history.append(train_acc)
# print progress if needed
if verbose:
print('Finished epoch %d / %d: cost %f, train: %f, lr %e'
% (epoch, num_epochs, cost, train_acc, optimizer.learning_rate))
# return the training history statistics
return loss_history, train_acc_history | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4871,
5016,
7483,
2898,
10613,
7,
15252,
2599,
198,
220,
220,
220,
37227,
383,
21997,
1398,
17706,
26147,
35,
351,
12858,
319,
257,
1575,
2163,... | 2.1546 | 1,837 |
"""
Basic and Monitor-Curve Exponent Transfer Functions
===================================================
Defines the exponent transfer functions:
- :func:`colour.models.exponent_function_basic`
- :func:`colour.models.exponent_function_monitor_curve`
References
----------
- :cite: `TheAcademyofMotionPictureArtsandSciences2020` : The Academy of
Motion Picture Arts and Sciences, Science and Technology Council, & Academy
Color Encoding System (ACES) Project Subcommittee. (2020). Specification
S-2014-006 - Common LUT Format (CLF) - A Common File Format for Look-Up
Tables. Retrieved June 24, 2020, from http://j.mp/S-2014-006
"""
from __future__ import annotations
import numpy as np
from colour.hints import (
FloatingOrArrayLike,
FloatingOrNDArray,
Literal,
NDArray,
Union,
)
from colour.utilities import (
as_float,
as_float_array,
suppress_warnings,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"exponent_function_basic",
"exponent_function_monitor_curve",
]
def exponent_function_basic(
x: FloatingOrArrayLike,
exponent: FloatingOrArrayLike = 1,
style: Union[
Literal[
"basicFwd",
"basicRev",
"basicMirrorFwd",
"basicMirrorRev",
"basicPassThruFwd",
"basicPassThruRev",
],
str,
] = "basicFwd",
) -> FloatingOrNDArray:
"""
Define the *basic* exponent transfer function.
Parameters
----------
x
Data to undergo the basic exponent conversion.
exponent
Exponent value used for the conversion.
style
Defines the behaviour for the transfer function to operate:
- *basicFwd*: *Basic Forward* exponential behaviour where the
definition applies a basic power law using the exponent. Values
less than zero are clamped.
- *basicRev*: *Basic Reverse* exponential behaviour where the
definition applies a basic power law using the exponent. Values
less than zero are clamped.
- *basicMirrorFwd*: *Basic Mirror Forward* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and mirrors the function
for values less than zero (i.e. rotationally symmetric
around the origin).
- *basicMirrorRev*: *Basic Mirror Reverse* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and mirrors the function
for values less than zero (i.e. rotationally symmetric around the
origin).
- *basicPassThruFwd*: *Basic Pass Forward* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and passes values less
than zero unchanged.
- *basicPassThruRev*: *Basic Pass Reverse* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and passes values less
than zero unchanged.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Exponentially converted data.
Examples
--------
>>> exponent_function_basic(0.18, 2.2) # doctest: +ELLIPSIS
0.0229932...
>>> exponent_function_basic(-0.18, 2.2)
0.0
>>> exponent_function_basic(0.18, 2.2, 'basicRev') # doctest: +ELLIPSIS
0.4586564...
>>> exponent_function_basic(-0.18, 2.2, 'basicRev')
0.0
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicMirrorFwd')
0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicMirrorFwd')
-0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicMirrorRev')
0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicMirrorRev')
-0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicPassThruFwd')
0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicPassThruFwd')
-0.1799999...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicPassThruRev')
0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicPassThruRev')
-0.1799999...
"""
x = as_float_array(x)
exponent = as_float_array(exponent)
style = validate_method(
style,
[
"basicFwd",
"basicRev",
"basicMirrorFwd",
"basicMirrorRev",
"basicPassThruFwd",
"basicPassThruRev",
],
'"{0}" style is invalid, it must be one of {1}!',
)
def exponent_forward(x: NDArray) -> NDArray:
"""Return the input raised to the exponent value."""
return x**exponent
def exponent_reverse(y: NDArray) -> NDArray:
"""Return the input raised to the inverse exponent value."""
return y ** (as_float_array(1) / exponent)
if style == "basicfwd":
return as_float(np.where(x >= 0, exponent_forward(x), 0))
elif style == "basicrev":
return as_float(np.where(x >= 0, exponent_reverse(x), 0))
elif style == "basicmirrorfwd":
return as_float(
np.where(x >= 0, exponent_forward(x), -exponent_forward(-x))
)
elif style == "basicmirrorrev":
return as_float(
np.where(x >= 0, exponent_reverse(x), -exponent_reverse(-x))
)
elif style == "basicpassthrufwd":
return as_float(np.where(x >= 0, exponent_forward(x), x))
else: # style == 'basicpassthrurev'
return as_float(np.where(x >= 0, exponent_reverse(x), x))
def exponent_function_monitor_curve(
x: FloatingOrArrayLike,
exponent: FloatingOrArrayLike = 1,
offset: FloatingOrArrayLike = 0,
style: Union[
Literal[
"monCurveFwd",
"monCurveRev",
"monCurveMirrorFwd",
"monCurveMirrorRev",
],
str,
] = "monCurveFwd",
) -> FloatingOrNDArray:
"""
Define the *Monitor Curve* exponent transfer function.
Parameters
----------
x
Data to undergo the monitor curve exponential conversion.
exponent
Exponent value used for the conversion.
offset
Offset value used for the conversion.
style
Defines the behaviour for the transfer function to operate:
- *monCurveFwd*: *Monitor Curve Forward* exponential behaviour
where the definition applies a power law function with a linear
segment near the origin.
- *monCurveRev*: *Monitor Curve Reverse* exponential behaviour
where the definition applies a power law function with a linear
segment near the origin.
- *monCurveMirrorFwd*: *Monitor Curve Mirror Forward* exponential
behaviour where the definition applies a power law function with a
linear segment near the origin and mirrors the function for values
less than zero (i.e. rotationally symmetric around the origin).
- *monCurveMirrorRev*: *Monitor Curve Mirror Reverse* exponential
behaviour where the definition applies a power law function with a
linear segment near the origin and mirrors the function for values
less than zero (i.e. rotationally symmetric around the origin).
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Exponentially converted data.
Examples
--------
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001)
0.0232240...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001)
-0.0002054...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001, 'monCurveRev')
0.4581151...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveRev')
-157.7302795...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 2, 'monCurveMirrorFwd')
0.1679399...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveMirrorFwd')
-0.0232240...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001, 'monCurveMirrorRev')
0.4581151...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveMirrorRev')
-0.4581151...
"""
x = as_float_array(x)
exponent = as_float_array(exponent)
offset = as_float_array(offset)
style = validate_method(
style,
[
"monCurveFwd",
"monCurveRev",
"monCurveMirrorFwd",
"monCurveMirrorRev",
],
'"{0}" style is invalid, it must be one of {1}!',
)
with suppress_warnings(python_warnings=True):
s = as_float_array(
((exponent - 1) / offset)
* ((exponent * offset) / ((exponent - 1) * (offset + 1)))
** exponent
)
s[np.isnan(s)] = 1
def monitor_curve_forward(
x: NDArray, offset: NDArray, exponent: NDArray
) -> NDArray:
"""Define the *Monitor Curve Forward* function."""
x_break = offset / (exponent - 1)
return np.where(
x >= x_break,
((x + offset) / (1 + offset)) ** exponent,
x * s,
)
def monitor_curve_reverse(
y: NDArray, offset: NDArray, exponent: NDArray
) -> NDArray:
"""Define the *Monitor Curve Reverse* function."""
y_break = (
(exponent * offset) / ((exponent - 1) * (1 + offset))
) ** exponent
return np.where(
y >= y_break,
((1 + offset) * (y ** (1 / exponent))) - offset,
y / s,
)
if style == "moncurvefwd":
return as_float(monitor_curve_forward(x, offset, exponent))
elif style == "moncurverev":
return as_float(monitor_curve_reverse(x, offset, exponent))
elif style == "moncurvemirrorfwd":
return as_float(
np.where(
x >= 0,
monitor_curve_forward(x, offset, exponent),
-monitor_curve_forward(-x, offset, exponent),
)
)
else: # style == 'moncurvemirrorrev'
return as_float(
np.where(
x >= 0,
monitor_curve_reverse(x, offset, exponent),
-monitor_curve_reverse(-x, offset, exponent),
)
)
| [
37811,
198,
26416,
290,
18289,
12,
26628,
303,
5518,
3471,
20558,
40480,
198,
10052,
4770,
18604,
198,
198,
7469,
1127,
262,
28622,
4351,
5499,
25,
198,
198,
12,
220,
220,
1058,
20786,
25,
63,
49903,
13,
27530,
13,
11201,
3471,
62,
88... | 2.349466 | 4,773 |
import unittest
import logging
import redique, time
import threading
logging.basicConfig(level=logging.DEBUG) | [
11748,
555,
715,
395,
198,
11748,
18931,
198,
11748,
2266,
2350,
11,
640,
198,
11748,
4704,
278,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
30531,
8
] | 3.516129 | 31 |
from typing import Union
class SnowflakeID(int):
"""
Represents a Discord Snowflake ID.
"""
pass
MIdentifier = Union[SnowflakeID, str]
| [
6738,
19720,
1330,
4479,
628,
198,
4871,
7967,
47597,
2389,
7,
600,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1432,
6629,
257,
39462,
7967,
47597,
4522,
13,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1208,
628,
198,
4... | 2.818182 | 55 |
#progetto eseguito da Leonardo Fiore(322767) e Federico Putamorsi(320041)
import g2d
from x7_BubbleBobbleGame import BubbleBobbleGame
gui = BubbleBobbleGUI() | [
2,
1676,
1136,
1462,
1658,
1533,
5013,
78,
12379,
38083,
23238,
382,
7,
18,
24403,
3134,
8,
304,
35089,
3713,
5930,
321,
35255,
7,
18,
2167,
3901,
8,
198,
198,
11748,
308,
17,
67,
198,
6738,
2124,
22,
62,
33,
549,
903,
18861,
903,... | 2.639344 | 61 |
import math
import numpy as np
from numba import cuda
from numba.types import float32
from replay_trajectory_classification.bins import atleast_2d
# Precompute this constant as a float32. Numba will inline it at compile time.
SQRT_2PI = np.float32((2 * math.pi)**0.5)
EPS = np.finfo(np.float32).eps
@cuda.jit(device=True)
def gaussian_pdf(x, mean, sigma):
'''Compute the value of a Gaussian probability density function at x with
given mean and sigma.'''
return math.exp(-0.5 * ((x - mean) / sigma)**2) / (sigma * SQRT_2PI)
# cuda.jit('(float32[:, :], float32[:, :], float32[:], float32[:])')
@cuda.jit()
def kde1(eval_points, samples, bandwidths, out):
"""
Parameters
----------
eval_points : ndarray, shape (n_eval_points, n_bandwidths)
samples : ndarray, shape (n_samples, n_bandwidths)
bandwidths : ndarray, shape (n_bandwidths,)
out : ndarray, shape (n_eval_points,)
"""
n_bandwidths = len(bandwidths)
n_samples = len(samples)
n_eval_points = len(eval_points)
for thread_id in range(cuda.grid(1), n_eval_points, cuda.gridsize(1)):
eval_point = eval_points[thread_id]
sum_kernel = float32(0.0)
for sample_ind in range(n_samples):
product_kernel = float32(1.0)
for bandwidth_ind in range(n_bandwidths):
product_kernel *= gaussian_pdf(
eval_point[bandwidth_ind],
samples[sample_ind, bandwidth_ind],
bandwidths[bandwidth_ind]
)
sum_kernel += product_kernel
out[thread_id] = sum_kernel / n_samples
def get_marks_by_place_bin_centers(marks, place_bin_centers):
"""
Parameters
----------
marks : ndarray, shape (n_spikes, n_features)
place_bin_centers : ndarray, shape (n_position_bins, n_position_dims)
Returns
-------
marks_by_place_bin_centers : ndarray, shape (n_spikes * n_position_bins,
n_features + n_position_dims)
"""
n_spikes = marks.shape[0]
n_place_bin_centers = place_bin_centers.shape[0]
return np.concatenate(
(np.tile(marks, reps=(n_place_bin_centers, 1)),
np.repeat(place_bin_centers, n_spikes, axis=0)), axis=1)
def estimate_log_intensity(density, occupancy, mean_rate):
'''
Parameters
----------
density : ndarray, shape (n_bins,)
occupancy : ndarray, shape (n_bins,)
mean_rate : float
Returns
-------
intensity : ndarray, shape (n_bins,)
'''
return np.log(mean_rate) + np.log(density) - np.log(occupancy)
def pin_arrays(
decoding_marks, encoding_marks, place_bin_centers, encoding_positions,
bandwidths, stream=0
):
'''
Parameters
----------
decoding_marks : ndarray, shape (n_decoding_spikes, n_marks)
encoding_marks : ndarray, shape (n_encoding_spikes, n_marks)
place_bin_centers : ndarray, shape (n_bins, n_position_dims)
encoding_positions : ndarray, shape (n_encoding_spikes, n_position_dims)
bandwidths : numba.cuda.const
stream : numba.cuda.stream, optional
Returns
-------
joint_mark_intensity : ndarray, shape (n_decoding_spikes, n_bins)
'''
decoding_marks = np.atleast_2d(decoding_marks)
eval_points = (get_marks_by_place_bin_centers(decoding_marks, place_bin_centers)
.astype(np.float32))
encoding_samples = (np.concatenate((encoding_marks, encoding_positions), axis=1)
.astype(np.float32))
n_decoding_spikes, n_marks = decoding_marks.shape
n_bins, n_position_dims = place_bin_centers.shape
n_eval_points = len(eval_points)
pdf = np.empty((n_eval_points,), dtype=np.float32)
with cuda.pinned(eval_points, encoding_samples, bandwidths, pdf):
# Copy the arrays to the GPU
d_eval_points = cuda.to_device(eval_points, stream=stream)
d_encoding_samples = cuda.to_device(encoding_samples, stream=stream)
d_bandwidths = cuda.to_device(bandwidths, stream=stream)
# Allocate memory on the GPU for the result
d_pdf = cuda.device_array_like(pdf, stream=stream)
return (d_eval_points, d_encoding_samples, d_bandwidths, d_pdf,
n_decoding_spikes, pdf)
def estimate_multiunit_likelihood_gpu_pinned_arrays(multiunits,
encoding_marks,
mark_std,
place_bin_centers,
encoding_positions,
position_std,
occupancy,
mean_rates,
summed_ground_process_intensity,
is_track_interior=None,
time_bin_size=1,
n_streams=16):
'''
Parameters
----------
multiunits : ndarray, shape (n_time, n_marks, n_electrodes)
encoding_marks : list of ndarrays, len (n_electrodes,)
mark_std : float
place_bin_centers : ndarray, shape (n_bins, n_position_dims)
encoding_positions : list of ndarrays, len (n_electrodes,)
position_std : float
occupancy : ndarray, (n_bins,)
mean_rates : list, len (n_electrodes,)
summed_ground_process_intensity : ndarray, shape (n_bins,)
Returns
-------
log_likelihood : ndarray, shape (n_time, n_bins)
'''
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],),
dtype=np.bool)
n_time, n_marks, n_electrodes = multiunits.shape
log_likelihood = (-time_bin_size * summed_ground_process_intensity *
np.ones((n_time, 1)))
streams = [cuda.stream() for _ in range(min(n_streams, n_electrodes))]
n_position_dims = place_bin_centers.shape[1]
bandwidths = (np.concatenate(
([mark_std] * n_marks,
[position_std] * n_position_dims,
)
).astype(np.float32))
device_arrays = []
with cuda.defer_cleanup():
for elec_ind, (multiunit, enc_marks, enc_pos, mean_rate) in enumerate(zip(
np.moveaxis(multiunits, -1, 0), encoding_marks, encoding_positions, mean_rates)):
nan_multiunit = np.isnan(multiunit)
is_spike = np.any(~nan_multiunit, axis=1)
nan_mark_dims = np.all(nan_multiunit, axis=0)
if is_spike.sum() > 0:
device_arrays.append(pin_arrays(
multiunit[np.ix_(is_spike, ~nan_mark_dims)],
enc_marks,
place_bin_centers[is_track_interior],
enc_pos,
bandwidths,
stream=streams[elec_ind % n_streams]
))
else:
device_arrays.append([[], [], [], [], [], []])
# Run KDE
for elec_ind, (d_eval_points, d_encoding_samples, d_bandwidths, d_pdf,
_, _) in enumerate(device_arrays):
if len(d_eval_points) > 0:
stream = streams[elec_ind % n_streams]
n_eval_points = d_eval_points.shape[0]
kde1.forall(n_eval_points, stream=stream)(
d_eval_points, d_encoding_samples, d_bandwidths, d_pdf)
for elec_ind, (_, _, _, d_pdf, _, pdf) in enumerate(
device_arrays):
if len(d_pdf) > 0:
stream = streams[elec_ind % n_streams]
d_pdf.copy_to_host(pdf, stream=stream)
n_bins = np.sum(is_track_interior)
for elec_ind, ((_, _, _, _, n_decoding_spikes, pdf), multiunit, mean_rate) in enumerate(
zip(device_arrays, np.moveaxis(multiunits, -1, 0), mean_rates)):
is_spike = np.any(~np.isnan(multiunit), axis=1)
if is_spike.sum() > 0:
log_intensity = (
estimate_log_intensity(
pdf.reshape((n_decoding_spikes, n_bins), order='F') + EPS,
occupancy[is_track_interior] + EPS,
mean_rate))
is_inf = np.all(np.isneginf(log_intensity), axis=1)
log_intensity[is_inf] = np.spacing(1)
log_likelihood[np.ix_(
is_spike, is_track_interior)] += log_intensity
log_likelihood[:, ~is_track_interior] = np.nan
return log_likelihood
def estimate_intensity(density, occupancy, mean_rate):
'''
Parameters
----------
density : ndarray, shape (n_bins,)
occupancy : ndarray, shape (n_bins,)
mean_rate : float
Returns
-------
intensity : ndarray, shape (n_bins,)
'''
return np.exp(estimate_log_intensity(density, occupancy, mean_rate))
def fit_multiunit_likelihood_gpu_pinned_arrays(position,
multiunits,
place_bin_centers,
mark_std,
position_std,
is_track_interior=None,
**kwargs):
'''
Parameters
----------
position : ndarray, shape (n_time, n_position_dims)
multiunits : ndarray, shape (n_time, n_marks, n_electrodes)
place_bin_centers : ndarray, shape ( n_bins, n_position_dims)
mark_std : float
position_std : float
is_track_interior : None or ndarray, shape (n_bins,)
Returns
-------
encoding_model : dict
'''
if is_track_interior is None:
is_track_interior = np.ones((place_bin_centers.shape[0],),
dtype=np.bool)
position = atleast_2d(position)
place_bin_centers = atleast_2d(place_bin_centers)
not_nan_position = np.all(~np.isnan(position), axis=1)
occupancy = np.zeros((place_bin_centers.shape[0],), dtype=np.float32)
occupancy[is_track_interior] = estimate_position_density(
place_bin_centers[is_track_interior],
position[not_nan_position],
position_std)
mean_rates = []
ground_process_intensities = []
encoding_marks = []
encoding_positions = []
for multiunit in np.moveaxis(multiunits, -1, 0):
# ground process intensity
nan_multiunit = np.isnan(multiunit)
is_spike = np.any(~nan_multiunit, axis=1)
nan_mark_dims = np.all(nan_multiunit, axis=0)
mean_rates.append(is_spike.mean())
marginal_density = np.zeros(
(place_bin_centers.shape[0],), dtype=np.float32)
if is_spike.sum() > 0:
marginal_density[is_track_interior] = estimate_position_density(
place_bin_centers[is_track_interior],
position[is_spike & not_nan_position], position_std)
ground_process_intensities.append(
estimate_intensity(marginal_density, occupancy, mean_rates[-1])
+ np.finfo(np.float32).eps)
encoding_marks.append(
multiunit[np.ix_(is_spike & not_nan_position, ~nan_mark_dims)
].astype(np.float32))
encoding_positions.append(position[is_spike & not_nan_position])
summed_ground_process_intensity = np.sum(
np.stack(ground_process_intensities, axis=0), axis=0, keepdims=True)
return {
'encoding_marks': encoding_marks,
'encoding_positions': encoding_positions,
'summed_ground_process_intensity': summed_ground_process_intensity,
'occupancy': occupancy,
'mean_rates': mean_rates,
'mark_std': mark_std,
'position_std': position_std,
**kwargs,
}
| [
11748,
10688,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
269,
15339,
198,
6738,
997,
7012,
13,
19199,
1330,
12178,
2624,
198,
6738,
24788,
62,
9535,
752,
652,
62,
4871,
2649,
13,
65,
1040,
1330,
379,
293,
459... | 1.987469 | 5,985 |
import pandas as pd
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import r2_score
import pickle
def train_model(
xtrain_df, ytrain_df, xtest_df, ytest_df, num_params=500, max_dp=6,
min_samp_splt=2, lrn_rt=0.01, losss='ls', feat_filename=None
):
"""
This function takes dataframes of the x and y values for training and
testing and uses the scikit learn toolbox to train a Gradient Boosting
Regressor model with preset parameters that can be specified. It returns
a trained model and the r2 value of the trained model. There is an option
to create a normalized list of the features to a user-named Excel file.
feat_filename should be a csv file string.
"""
# Reshaping the inputs into numpy arrays for the scikit learn module.
xtrain = xtrain_df.values
ytrain = ytrain_df.values
xtest = xtest_df.values
ytest = ytest_df.values
# Defining the parameters of GBM
params = {
'n_estimators': num_params, 'max_depth': max_dp, 'min_samples_split':
min_samp_splt, 'learning_rate': lrn_rt, 'loss': losss
}
model = GradientBoostingRegressor(**params)
model.fit(xtrain, ytrain)
ypred = model.predict(xtest)
test_score = r2_score(ytest, ypred)
filename = 'trained_model.sav'
pickle.dump(model, open(filename, 'wb'))
# Feature importance list function call
if feat_filename:
assert isinstance(feat_filename, str)
feature_importance_list(feat_filename, xtest_df)
return test_score
else:
return test_score
def predict_model():
"""
This function unpickles the saved model and returns it as an object.
"""
filename = 'trained_model.sav'
new_model = pickle.load(open(filename, 'rb'))
return new_model
def feature_importance_list(feat_filename, xtest_df):
"""
This function takes the trained model and returns an ordered list of the
relative importance of the features. This will be written to an Excel
file that the user will specify.
"""
if '.csv' in feat_filename:
pass
else:
raise Exception(
'Error: Feature importance file must be a .csv file.'
)
filename = 'trained_model.sav'
new_model = pickle.load(open(filename, 'rb'))
feature_importance = new_model.feature_importances_
feature_importance = 100.0 * (
feature_importance / feature_importance.max()
)
sorted_idx = np.argsort(feature_importance)
# pos = np.arange(sorted_idx.shape[0]) + 0.5
x = xtest_df.columns[sorted_idx]
imp = feature_importance[sorted_idx]
df_feats = pd.DataFrame({'Feature Name': x, 'Feature Importance': imp})
df_feats.to_csv(feat_filename)
return print(
"The feature importance list was created as", feat_filename
)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
1072,
11306,
1330,
17701,
1153,
45686,
278,
8081,
44292,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
374,
17,
62,
26675,
198,
11748,
229... | 2.631579 | 1,083 |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
setup(name='f5_aws',
version='1.0.5',
description='Code to deploy BIG-IP, network, and applications in AWS VPC',
long_description=README,
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
],
author='Chris Mutzel, Alex Applebaum',
author_email='c.mutzel@f5.com, a.applebaum@f5.com',
zip_safe=False,
include_package_data=True,
packages=find_packages()
)
| [
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
1456,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
4480,
1280,
7,
418,
13,
6... | 2.368613 | 274 |
from typing import Optional, Dict, Text, Any, List, Union
from rasa.core.channels import OutputChannel, UserMessage
from kairon import Utility
from kairon.chat.agent_processor import AgentProcessor
from kairon.chat.handlers.channels.clients.whatsapp import WhatsappClient
from kairon.chat.handlers.channels.messenger import MessengerHandler
import json
import logging
from http import HTTPStatus
import html
from tornado.escape import json_decode
from kairon.shared.chat.processor import ChatDataProcessor
logger = logging.getLogger(__name__)
class Whatsapp:
"""Whatsapp input channel to parse incoming webhooks and send msgs."""
def __init__(self, page_access_token: Text) -> None:
"""Init whatsapp input channel."""
self.page_access_token = page_access_token
self.last_message: Dict[Text, Any] = {}
@classmethod
async def message(
self, message: Dict[Text, Any], metadata: Optional[Dict[Text, Any]], bot: str
) -> None:
"""Handle an incoming event from the whatsapp webhook."""
# quick reply and user message both share 'text' attribute
# so quick reply should be checked first
if message.get("type") == "button":
text = message["button"]["payload"]
elif message.get("type") == "text":
text = message["text"]['body']
elif message.get("type") in {"image", "audio", "document", "video"}:
attachment_info = self.client.get_attachment(message[message["type"]]['id'])
text = attachment_info.get("url")
if Utility.check_empty_string(text):
logger.warning(f"Unable to find url for attachment. Message: {attachment_info}")
else:
logger.warning(f"Received a message from whatsapp that we can not handle. Message: {message}")
return
await self._handle_user_message(text, message["from"], message, bot)
async def _handle_user_message(
self, text: Text, sender_id: Text, metadata: Optional[Dict[Text, Any]], bot: str
) -> None:
"""Pass on the text to the dialogue engine for processing."""
out_channel = WhatsappBot(self.client)
await out_channel.mark_as_read(metadata["id"])
user_msg = UserMessage(
text, out_channel, sender_id, input_channel=self.name(), metadata=metadata
)
try:
await self.process_message(bot, user_msg)
except Exception:
logger.exception("Exception when trying to handle webhook for whatsapp message.")
@staticmethod
class WhatsappBot(OutputChannel):
"""A bot that uses whatsapp to communicate."""
@classmethod
def __init__(self, whatsapp_client: WhatsappClient) -> None:
"""Init whatsapp output channel."""
self.whatsapp_client = whatsapp_client
super().__init__()
def send(self, recipient_id: Text, element: Any) -> None:
"""Sends a message to the recipient using the messenger client."""
# this is a bit hacky, but the client doesn't have a proper API to
# send messages but instead expects the incoming sender to be present
# which we don't have as it is stored in the input channel.
self.whatsapp_client.send(element, recipient_id, "text")
async def send_text_message(
self, recipient_id: Text, text: Text, **kwargs: Any
) -> None:
"""Send a message through this channel."""
self.send(recipient_id, {"preview_url": True, "body": text})
async def send_image_url(
self, recipient_id: Text, image: Text, **kwargs: Any
) -> None:
"""Sends an image. Default will just post the url as a string."""
link = kwargs.get("link")
self.send(recipient_id, {"link": link})
async def mark_as_read(self, msg_id: Text) -> None:
"""Mark user message as read.
Args:
msg_id: message id
"""
self.whatsapp_client.send_action({"messaging_product": "whatsapp", "status": "read", "message_id": msg_id})
async def send_custom_json(
self,
recipient_id: Text,
json_message: Union[List, Dict[Text, Any]],
**kwargs: Any,
) -> None:
"""Sends custom json data to the output."""
messaging_type = kwargs.get("messaging_type") or "interactive"
self.whatsapp_client.send(json_message, recipient_id, messaging_type)
class WhatsappHandler(MessengerHandler):
"""Whatsapp input channel implementation. Based on the HTTPInputChannel."""
| [
6738,
19720,
1330,
32233,
11,
360,
713,
11,
8255,
11,
4377,
11,
7343,
11,
4479,
198,
198,
6738,
374,
15462,
13,
7295,
13,
354,
8961,
1330,
25235,
29239,
11,
11787,
12837,
198,
198,
6738,
479,
958,
261,
1330,
34030,
198,
6738,
479,
9... | 2.607653 | 1,751 |
import copy
import json
from decimal import Decimal
from typing import Optional
from enum import Enum
import boto3
from boto3.dynamodb.types import TypeSerializer, TypeDeserializer
from botocore import exceptions
| [
11748,
4866,
198,
11748,
33918,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
19720,
1330,
32233,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
11748,
275,
2069,
18,
198,
6738,
275,
2069,
18,
13,
67,
4989,
375,
65,
13,
19199,
1330,
59... | 3.666667 | 60 |
# -*- coding: utf-8 -*-
"""
Script to run zope.testrunner in a gevent monkey-patched environment.
Using ``python -m gevent.monkey zope-testrunner ...`` is insufficient.
This is because up through 1.5a2 there is a serious bug in the way the
monkey-patcher patches the spawned process. The net effect is that the gevent
threadpool isn't functional.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gevent.monkey
gevent.monkey.patch_all()
# pylint:disable=wrong-import-position, wrong-import-order
import sys
from zope.testrunner import run
sys.argv[:] = [
'zope-testrunner',
'--test-path=src',
'-v',
'--color',
] + sys.argv[1:]
print(sys.argv)
run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
7391,
284,
1057,
1976,
3008,
13,
9288,
16737,
287,
257,
4903,
1151,
21657,
12,
8071,
1740,
2858,
13,
198,
198,
12814,
7559,
29412,
532,
76,
4903,
1151,
13,
... | 2.975904 | 249 |
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn_HULA_compartment.py',
'../_base_/datasets/HULA_compartment_instance.py',
'../_base_/schedules/schedule_1x_HULA_maskrcnn.py',
'../_base_/default_runtime.py'
]
work_dir = "/data/syed/mmdet/run15_maskrcnn_customaug/"
gpu_ids = range(0, 1)
seed = 0
| [
62,
8692,
62,
796,
685,
198,
220,
220,
220,
705,
40720,
62,
8692,
62,
14,
27530,
14,
27932,
62,
6015,
20471,
62,
81,
1120,
62,
69,
21999,
62,
39,
6239,
32,
62,
5589,
1823,
13,
9078,
3256,
198,
220,
220,
220,
705,
40720,
62,
8692... | 2.053333 | 150 |
"""
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
| [
37811,
198,
2,
30396,
329,
257,
19081,
13,
198,
4871,
19081,
25,
198,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
1188,
25,
493,
796,
657,
11,
1364,
25,
705,
19667,
6,
796,
6045,
11,
826,
25,
705,
19667,
6,
796,
6045,
11,... | 2.296296 | 108 |
#
# Author: Salo Shp <SaloShp@Gmail.Com>
#
from __future__ import absolute_import
VERSION = (1, 0, 0, 0)
__version__ = VERSION
__versionstr__ = '.'.join(map(str, VERSION))
import platform
import sys
import subprocess
from .utils import *
args = parse_cli_args()
logger = init_logging()
cfg = init_config(args)
hwuuid_proc = subprocess.Popen('dmidecode -s system-uuid'.split(),stdout=subprocess.PIPE)
hwuuid = hwuuid_proc.stdout.readline().rstrip().decode("utf-8").lower()
if cfg.debug:
logger.setLevel(logging.DEBUG)
logger.debug("Configuration - '{}'".format(cfg.__dict__))
hostplatform = dict()
hostplatform['sysversion'] = sys.version.split('\n')
hostplatform['dist'] = platform.dist()
hostplatform['linux_distribution'] = platform.linux_distribution()
hostplatform['system'] = platform.system()
hostplatform['machine'] = platform.machine()
hostplatform['platform'] = platform.platform()
hostplatform['uname'] = platform.uname()
hostplatform['version'] = platform.version()
hostplatform['mac_ver'] = platform.mac_ver()
hostplatform['hwuuid'] = hwuuid
es = init_elastic(cfg)
###
logger.info("Initiated - %s" % hostplatform)
| [
2,
198,
2,
6434,
25,
4849,
78,
911,
79,
1279,
50,
7335,
2484,
79,
31,
38,
4529,
13,
5377,
29,
198,
2,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
43717,
796,
357,
16,
11,
657,
11,
657,
11,
657,
8,
198,
... | 2.915601 | 391 |
from discord import Message, Embed, Color, TextChannel, Role, utils
from discord.ext import commands
from discord.ext.commands import Context as CommandContext
from peewee import ModelSelect
import checks
from awaiter import AdvancedAwaiter, AwaitCanceled, AwaitTimedOut
from database import *
| [
6738,
36446,
1330,
16000,
11,
13302,
276,
11,
5315,
11,
8255,
29239,
11,
20934,
11,
3384,
4487,
201,
198,
6738,
36446,
13,
2302,
1330,
9729,
201,
198,
6738,
36446,
13,
2302,
13,
9503,
1746,
1330,
30532,
355,
9455,
21947,
201,
198,
673... | 3.517241 | 87 |
import shapefile
import gmsh
import sys
import meshio
# A class with the intersected point coordinates between two atrributes is created
# A Data base class is defined in order to characterized imported geometry(shapefile)
| [
11748,
5485,
7753,
220,
220,
220,
220,
198,
11748,
308,
907,
71,
198,
11748,
25064,
198,
11748,
19609,
952,
198,
220,
220,
220,
220,
628,
198,
2,
317,
1398,
351,
262,
36177,
276,
966,
22715,
1022,
734,
379,
81,
7657,
318,
2727,
220,... | 3.746032 | 63 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='lr2irscraper',
version='2.2',
description='LR2 Internet Ranking Scraper Library',
author='nakt',
author_email='nakt_azdim@walkure.net',
install_requires=['pandas>=0.21', 'requests', 'lxml'],
python_requires='>=3.7',
packages=find_packages(exclude=('tests', 'docs')),
test_suite='tests'
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
14050,
17,
343,
1416,
38545,
3256,
198,
220,
220,
220,
2196,
11639,
17,
... | 2.463415 | 164 |
# Libraries
from selenium import webdriver
import json
import codecs
from time import sleep
# Import keys
with codecs.open("scraper_siglas-uc/files/docs.json", "rU", "utf-8") as js:
data = json.load(js)[0]
# Url generator
base_url = lambda ID: f"https://buscacursos.uc.cl/?cxml_semestre=2022-1&cxml_sigla=&cxml_nrc=&cxml_nombre=&cxml_categoria=TODOS&cxml_area_fg=TODOS&cxml_formato_cur=TODOS&cxml_profesor=&cxml_campus=TODOS&cxml_unidad_academica={ID}&cxml_horario_tipo_busqueda=si_tenga&cxml_horario_tipo_busqueda_actividad=TODOS#resultados"
# Init webdriver
options = webdriver.ChromeOptions()
options.add_argument("--incognito")
driver = webdriver.Chrome(executable_path="scraper_siglas-uc/webdriver/chromedriver.exe", options=options)
# Scrape siglas
lista_siglas = dict()
for esc, num in data.items():
driver.get(base_url(num))
sleep(5)
siglas = driver.find_elements_by_xpath("//*[@id='wrapper']/div/div/div[3]/table/tbody/tr/td[2]/div")
lista_siglas[esc] = [sigla.text for sigla in siglas]
print(esc, ":", [sigla.text for sigla in siglas])
# Close driver
driver.close()
# Save resuls
with codecs.open("scraper_siglas-uc/outputs/siglas.json", "w", "utf-8") as js:
json.dump(lista_siglas, js) | [
2,
46267,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
11748,
33918,
198,
11748,
40481,
82,
198,
6738,
640,
1330,
3993,
198,
198,
2,
17267,
8251,
198,
198,
4480,
40481,
82,
13,
9654,
7203,
1416,
38545,
62,
82,
328,
21921,... | 2.317164 | 536 |
#! /usr/bin/env python
# -*- coding: utf-8
"""
Python implementation of Non-Stationary Gabor Transform (NSGT)
derived from MATLAB code by NUHAG, University of Vienna, Austria
Thomas Grill, 2011-2015
http://grrrr.org/nsgt
"""
import numpy as np
from nsgt import NSGT,LogScale,LinScale,MelScale,OctScale
from scikits.audiolab import Sndfile,Format
import os
from itertools import imap
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("input", type=str, help="input audio file")
parser.add_argument("--output", type=str, help="output audio file")
parser.add_argument("--fmin", type=float, default=80, help="minimum frequency (default=%(default)s)")
parser.add_argument("--fmax", type=float, default=22050, help="maximum frequency (default=%(default)s)")
parser.add_argument("--scale", choices=("oct","log","mel"), default='oct', help="frequency scale (oct,log,mel)")
parser.add_argument("--bins", type=int, default=24, help="frequency bins (total or per octave, default=%(default)s)")
parser.add_argument("--real", action='store_true', help="assume real signal")
parser.add_argument("--matrixform", action='store_true', help="use regular time division (matrix form)")
parser.add_argument("--reducedform", action='store_true', help="if real==1: omit bins for f=0 and f=fs/2 (lossy=1), or also the transition bands (lossy=2)")
parser.add_argument("--time", type=int, default=1, help="timing calculation n-fold (default=%(default)s)")
parser.add_argument("--plot", action='store_true', help="plot results (needs installed matplotlib and scipy packages)")
args = parser.parse_args()
if not os.path.exists(args.input):
parser.error("Input file '%s' not found"%args.input)
# Read audio data
sf = Sndfile(args.input)
fs = sf.samplerate
s = sf.read_frames(sf.nframes)
if len(s.shape) > 1:
s = np.mean(s, axis=1)
scales = {'log':LogScale,'lin':LinScale,'mel':MelScale,'oct':OctScale}
try:
scale = scales[args.scale]
except KeyError:
parser.error('scale unknown')
scl = scale(args.fmin, args.fmax, args.bins)
times = []
for _ in xrange(args.time or 1):
t1 = cputime()
# calculate transform parameters
Ls = len(s)
nsgt = NSGT(scl, fs, Ls, real=args.real, matrixform=args.matrixform, reducedform=args.reducedform)
# forward transform
c = nsgt.forward(s)
# c = N.array(c)
# print "c",len(c),N.array(map(len,c))
# inverse transform
s_r = nsgt.backward(c)
t2 = cputime()
times.append(t2-t1)
norm = lambda x: np.sqrt(np.sum(np.abs(np.square(x))))
rec_err = norm(s-s_r)/norm(s)
print "Reconstruction error: %.3e"%rec_err
print "Calculation time: %.3f±%.3fs (min=%.3f s)"%(np.mean(times),np.std(times)/2,np.min(times))
if args.output:
print "Writing audio file '%s'"%args.output
sf = Sndfile(args.output, mode='w', format=Format('wav','pcm24'), channels=1, samplerate=fs)
sf.write_frames(s_r)
sf.close()
print "Done"
if args.plot:
print "Preparing plot"
import matplotlib.pyplot as pl
# interpolate CQT to get a grid
x = np.linspace(0, Ls, 2000)
hf = -1 if args.real else len(c)//2
grid = interpolate(imap(np.abs, c[2:hf]), Ls)(x)
# display grid
pl.imshow(np.log(np.flipud(grid.T)), aspect=float(grid.shape[0])/grid.shape[1]*0.5, interpolation='nearest')
print "Plotting"
pl.show()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
220,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
37906,
7822,
286,
8504,
12,
12367,
560,
402,
4820,
26981,
357,
8035,
19555,
8,
198,
34631,
422,
36775,
4878... | 2.578544 | 1,305 |
from pathlib import Path
from musicscore.musicstream.streamvoice import SimpleFormat
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from musicxmlunittest import XMLTestCase
path = Path(__file__)
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
1928,
873,
7295,
13,
28965,
5532,
13,
5532,
38888,
1330,
17427,
26227,
198,
6738,
1928,
873,
7295,
13,
14664,
713,
631,
13,
33945,
3798,
9997,
320,
413,
786,
1330,
12200,
26595,
14967,
413,... | 3.4 | 65 |
"""
Import the local file cars.csv and split the data set equally into test set
and training set.
"""
import pandas as pd
#version 01
# Importing the dataset
dataset = pd.read_csv('cars.csv')
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
dataset_train, dataset_test = train_test_split(dataset, test_size = 0.5, random_state = 0)
dataset_train.to_csv('dataset_train.csv', index = False)
dataset_test.to_csv("dataset_test.csv", index = False)
#version 02:
# Importing the dataset
dataset = pd.read_csv('cars.csv')
features = dataset.iloc[:, 1:].values
labels = dataset.iloc[:, 0].values #price is the labels
#print (dataset.columns.tolist())
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size = 0.5, random_state = 0)
| [
37811,
198,
20939,
262,
1957,
2393,
5006,
13,
40664,
290,
6626,
262,
1366,
900,
8603,
656,
1332,
900,
220,
198,
392,
3047,
900,
13,
220,
628,
198,
37811,
628,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
2,
9641,
5534,
198,
2,
... | 2.963636 | 330 |
# Generated by Django 2.0 on 2018-01-29 22:37
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
319,
2864,
12,
486,
12,
1959,
2534,
25,
2718,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
628
] | 2.904762 | 42 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Giuliano França
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
====================================================================================================
Disclaimer:
THIS PLUGIN IS JUST A PROTOTYPE. YOU MUST USE THE C++ RELEASE PLUGIN FOR PRODUCTION.
YOU CAN FIND THE C++ RELEASE PLUGIN FOR YOUR SPECIFIC PLATFORM IN RELEASES FOLDER:
"gfTools > plug-ins > release"
How to use:
* Copy the parent folder to the MAYA_SCRIPT_PATH.
* To find MAYA_SCRIPT_PATH paste this command in a Python tab:
import os; os.environ["MAYA_SCRIPT_PATH"].split(";")
* In Maya, go to Windows > Settings/Preferences > Plug-in Manager.
* Browse for "gfTools > plug-ins > dev > python"
* Find gfTools_P.py and import it.
Requirements:
* Maya 2017 or above.
Description:
Convert vector to euler rotation.
Attributes:
* Vector: The vector to be converted to euler rotation.
* Out Euler: The output euler rotation converted from an vector.
Todo:
* NDA
Sources:
* NDA
This code supports Pylint. Rc file in project.
"""
import math
import maya.api._OpenMaya_py2 as om2
def maya_useNewAPI():
""" Function to Maya recognize the use of the Python API 2.0. """
# pylint: disable=invalid-name, unnecessary-pass
pass
def INPUT_ATTR(FNATTR):
""" Configure a input attribute. """
# pylint: disable=invalid-name
FNATTR.writable = True
FNATTR.readable = True
FNATTR.storable = True
FNATTR.keyable = True
def OUTPUT_ATTR(FNATTR):
""" Configure a output attribute. """
# pylint: disable=invalid-name
FNATTR.writable = False
FNATTR.readable = True
FNATTR.storable = False
FNATTR.keyable = False
class VectorToEuler(om2.MPxNode):
""" Main class of gfUtilVectorToEuler node. """
kNodeName = ""
kNodeClassify = ""
kNodeID = ""
inVector = om2.MObject()
outEuler = om2.MObject()
outEulerX = om2.MObject()
outEulerY = om2.MObject()
outEulerZ = om2.MObject()
def __init__(self):
""" Constructor. """
om2.MPxNode.__init__(self)
@staticmethod
def creator():
""" Maya creator function. """
return VectorToEuler()
@staticmethod
def initialize():
"""
Defines the set of attributes for this node. The attributes declared in this function are assigned
as static members to VectorToEuler class. Instances of VectorToEuler will use these attributes to create plugs
for use in the compute() method.
"""
uAttr = om2.MFnUnitAttribute()
nAttr = om2.MFnNumericAttribute()
VectorToEuler.inVector = nAttr.createPoint("vector", "vec")
INPUT_ATTR(nAttr)
VectorToEuler.outEulerX = uAttr.create("outEulerX", "oex", om2.MFnUnitAttribute.kAngle, 0.0)
VectorToEuler.outEulerY = uAttr.create("outEulerY", "oey", om2.MFnUnitAttribute.kAngle, 0.0)
VectorToEuler.outEulerZ = uAttr.create("outEulerZ", "oez", om2.MFnUnitAttribute.kAngle, 0.0)
VectorToEuler.outEuler = nAttr.create("outEuler", "oe", VectorToEuler.outEulerX, VectorToEuler.outEulerY, VectorToEuler.outEulerZ)
OUTPUT_ATTR(nAttr)
VectorToEuler.addAttribute(VectorToEuler.inVector)
VectorToEuler.addAttribute(VectorToEuler.outEuler)
VectorToEuler.attributeAffects(VectorToEuler.inVector, VectorToEuler.outEuler)
def compute(self, plug, dataBlock):
"""
Node computation method:
* plug is a connection point related to one of our node attributes (either an input or an output).
* dataBlock contains the data on which we will base our computations.
"""
# pylint: disable=no-self-use
vector = dataBlock.inputValue(VectorToEuler.inVector).asFloat3()
vVector = om2.MVector([unit * (math.pi / 180.0) for unit in vector])
eEuler = om2.MEulerRotation(vVector)
outEulerHandle = dataBlock.outputValue(VectorToEuler.outEuler)
outEulerHandle.set3Double(eEuler.x, eEuler.y, eEuler.z)
outEulerHandle.setClean()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
15269,
357,
66,
8,
13130,
35267,
10115,
40178,
64,
198,
198,
36393,
13789,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
... | 2.721897 | 1,877 |
"""Stack with a linked list"""
class Node:
"""Node: LinkedList"""
def __init__(self, item, next) -> None:
"""Set initial item and next of a linked list"""
self.item = item
self.next = next
class Stack:
"""Last in first out"""
def __init__(self) -> None:
"""Set an initial the next values in the class Node"""
self.last = None
def push(self, item):
"""Add an item"""
self.last = Node(item, self.last)
def pop(self):
"""Pop an item"""
item = self.last.item
self.last = self.last.next
return item
| [
37811,
25896,
351,
257,
6692,
1351,
37811,
198,
198,
4871,
19081,
25,
198,
220,
220,
220,
37227,
19667,
25,
7502,
276,
8053,
37811,
198,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2378,
11,
1306,
8,
4613,
6045,
25,
198,
220,... | 2.373541 | 257 |
import os
for i in range(10):
#cmd = 'python mainpro_CK+.py --model VGG19 --bs 32 --lr 0.01 --fold %d' %(i+1)
#cmd = 'python mainpro_CK+.py --model VGG19 --bs 64 --lr 0.01 --fold %d' %(i+1)
#cmd = 'python mainpro_CK+.py --model Resnet18 --bs 64 --lr 0.01 --fold %d' %(i+1)
cmd = 'python mainpro_CK+.py --model Resnet34 --bs 64 --lr 0.01 --fold %d' %(i+1)
os.system(cmd)
print("Train VGG19 ok!")
| [
11748,
28686,
198,
198,
1640,
1312,
287,
2837,
7,
940,
2599,
198,
220,
220,
220,
1303,
28758,
796,
705,
29412,
1388,
1676,
62,
34,
42,
27613,
9078,
1377,
19849,
569,
11190,
1129,
1377,
1443,
3933,
1377,
14050,
657,
13,
486,
1377,
1137... | 2.183246 | 191 |
from typing import List
| [
6738,
19720,
1330,
7343,
628,
198
] | 4.333333 | 6 |
import sympy
import itertools
def functionstr(var, expr):
"""
Return a string that defines a function ``D`` that can evaluate `expr` and
its first two derivatives with respect to `var`.
The returned definition uses common subexpression elimination (CSE).
Parameters
----------
var : `sympy.Symbol` or str
The function's variable.
expr : `sympy.Expression` or str or float
SymPy-compatible expression. Any free symbols other than `var` will be
taken as parameters that must be in scope when the returned code is
executed. Use of special functions and constructs is not currently
allowed.
Returns
-------
str
Python code that defines the function ``D``.
"""
var = sympy.sympify(var)
expr = sympy.sympify(expr)
exprs = [expr]
for n in range(1,3):
exprs.append(exprs[-1].diff(var))
xs, exprs = sympy.cse(exprs, optimizations='basic')
appearance = {(None, expr): n for (n, expr) in enumerate(exprs)}
for x in reversed(xs):
appearance[x] = 2
for other in appearance:
if x[0] in other[1].free_symbols:
appearance[x] = min(appearance[x], appearance[other])
variable = {var}
for x in xs:
if (x[1].free_symbols & variable):
variable.add(x[0])
lines = \
["# - Code generated with functionstr() from ../symbolic/generate.py - #"]
for x in xs:
if x[0] not in variable:
lines.append("{} = {}".format(*x))
lines.append("def D({}, derivatives=0):".format(var))
deriv_names = [name for name,_ in zip(_derivative_names(var), range(3))]
for n, (name, expr) in enumerate(zip(deriv_names, exprs)):
for x in xs:
if x[0] in variable and appearance[x] == n:
lines.append(" {} = {}".format(*x))
lines.append(" {} = {}".format(name, expr))
lines.append(" if derivatives == {}: return {}".format(
n, ", ".join(deriv_names[:n+1])))
lines.append(
' raise ValueError("derivatives must be 0, 1 or 2")')
lines.append(
"# ----------------------- End generated code ----------------------- #")
return "\n".join(lines) | [
11748,
10558,
88,
198,
198,
11748,
340,
861,
10141,
198,
198,
4299,
2163,
2536,
7,
7785,
11,
44052,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
8229,
257,
4731,
326,
15738,
257,
2163,
7559,
35,
15506,
326,
460,
13446,
4600,
... | 2.444924 | 926 |
import os
from functools import partial
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.listview import ListView, ListItemButton
from kivy.properties import ObjectProperty, NumericProperty
from kivy.adapters.listadapter import ListAdapter
from designer.helper_functions import get_kivy_designer_dir
RECENT_FILES_NAME = 'recent_files'
class RecentManager(object):
'''RecentManager is responsible for retrieving/storing the list of recently
opened/saved projects.
'''
def add_file(self, _file):
'''To add file to RecentManager.
'''
_file_index = 0
try:
_file_index = self.list_files.index(_file)
except:
_file_index = -1
if _file_index != -1:
# If _file is already present in list_files, then move it to 0 index
self.list_files.remove(_file)
self.list_files.insert(0, _file)
# Recent files should not be greater than max_recent_files
while len(self.list_files) > self.max_recent_files:
self.list_files.pop()
self.store_files()
def store_files(self):
'''To store the list of files on disk.
'''
_string = ''
for _file in self.list_files:
_string += _file + '\n'
recent_file_path = os.path.join(get_kivy_designer_dir(),
RECENT_FILES_NAME)
f = open(recent_file_path, 'w')
f.write(_string)
f.close()
def load_files(self):
'''To load the list of files from disk
'''
recent_file_path = os.path.join(get_kivy_designer_dir(),
RECENT_FILES_NAME)
if not os.path.exists(recent_file_path):
return
f = open(recent_file_path, 'r')
_file = f.readline()
while _file != '':
file_path = _file.strip()
if os.path.exists(file_path):
self.list_files.append(file_path)
_file = f.readline()
f.close()
class RecentDialog(BoxLayout):
'''RecentDialog shows the list of recent files retrieved from RecentManager
It emits, 'on_select' event when a file is selected and select_button is
clicked and 'on_cancel' when cancel_button is pressed.
'''
listview = ObjectProperty(None)
''':class:`~kivy.uix.listview.ListView` used for showing file paths.
:data:`listview` is a :class:`~kivy.properties.ObjectProperty`
'''
select_button = ObjectProperty(None)
''':class:`~kivy.uix.button.Button` used to select the list item.
:data:`select_button` is a :class:`~kivy.properties.ObjectProperty`
'''
cancel_button = ObjectProperty(None)
''':class:`~kivy.uix.button.Button` to cancel the dialog.
:data:`cancel_button` is a :class:`~kivy.properties.ObjectProperty`
'''
adapter = ObjectProperty(None)
''':class:`~kivy.uix.listview.ListAdapter` used for selecting files.
:data:`adapter` is a :class:`~kivy.properties.ObjectProperty`
'''
__events__ = ('on_select', 'on_cancel')
def get_selected_project(self, *args):
'''
Get the path of the selected project
'''
return self.adapter.selection[0].text
def on_select_button(self, *args):
'''Event handler for 'on_release' event of select_button.
'''
self.select_button.bind(on_press=partial(self.dispatch, 'on_select'))
def on_cancel_button(self, *args):
'''Event handler for 'on_release' event of cancel_button.
'''
self.cancel_button.bind(on_press=partial(self.dispatch, 'on_cancel'))
def on_select(self, *args):
'''Default event handler for 'on_select' event.
'''
pass
def on_cancel(self, *args):
'''Default event handler for 'on_cancel' event.
'''
pass
| [
11748,
28686,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
6738,
479,
452,
88,
13,
84,
844,
13,
3524,
39786,
1330,
8315,
32517,
198,
6738,
479,
452,
88,
13,
84,
844,
13,
16539,
1330,
20969,
198,
6738,
479,
452,
88,
13,
84,
... | 2.252129 | 1,761 |
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2017 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from .primitive_object_value import PrimitiveObjectValue
from .object_value import *
class StringObjectValue(PrimitiveObjectValue):
"""Smartsheet StringObjectValue data model."""
def __init__(self, value=None, base_obj=None):
"""Initialize the StringObjectValue model."""
super(StringObjectValue, self).__init__(value, base_obj)
self._base = None
if base_obj is not None:
self._base = base_obj
self.__initialized = True
@property
| [
2,
279,
2645,
600,
25,
15560,
28,
34,
486,
1157,
11,
49,
2931,
2999,
11,
49,
2931,
3023,
11,
49,
2931,
1065,
11,
49,
2931,
1485,
11,
49,
2931,
1314,
11,
36,
1157,
486,
198,
2,
2439,
5889,
25473,
11361,
26144,
13,
198,
2,
198,
... | 3.086735 | 392 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This script waits until the OEF is up and running."""
import argparse
import asyncio
import logging
from threading import Timer
from typing import Optional
from oef.agents import AsyncioCore, OEFAgent
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser("oef_healthcheck", description=__doc__)
parser.add_argument(
"--oef-addr", default="127.0.0.1", type=str, help="TCP/IP address of the OEF Agent"
)
parser.add_argument(
"--oef-port", default=10000, type=int, help="TCP/IP port of the OEF Agent"
)
class OEFHealthCheck(object):
"""A health check class."""
def __init__(
self,
oef_addr: str,
oef_port: int,
loop: Optional[asyncio.AbstractEventLoop] = None,
):
"""
Initialize.
:param oef_addr: IP address of the OEF node.
:param oef_port: Port of the OEF node.
"""
self.oef_addr = oef_addr
self.oef_port = oef_port
self._result = False
self._stop = False
self._core = AsyncioCore()
self.agent = OEFAgent(
"check", core=self._core, oef_addr=self.oef_addr, oef_port=self.oef_port
)
self.agent.on_connect_success = self.on_connect_ok
self.agent.on_connection_terminated = self.on_connect_terminated
self.agent.on_connect_failed = self.exception_handler
def exception_handler(self, url=None, ex=None):
"""Handle exception during a connection attempt."""
print("An error occurred. Exception: {}".format(ex))
self._stop = True
def on_connect_ok(self, url=None):
"""Handle a successful connection."""
print("Connection OK!")
self._result = True
self._stop = True
def on_connect_terminated(self, url=None):
"""Handle a connection failure."""
print("Connection terminated.")
self._stop = True
def run(self) -> bool:
"""
Run the check, asynchronously.
:return: True if the check is successful, False otherwise.
"""
self._result = False
self._stop = False
t = Timer(1.5, stop_connection_attempt, args=(self,))
try:
print("Connecting to {}:{}...".format(self.oef_addr, self.oef_port))
self._core.run_threaded()
t.start()
self._result = self.agent.connect()
self._stop = True
if self._result:
print("Connection established. Tearing down connection...")
self.agent.disconnect()
t.cancel()
else:
print("A problem occurred. Exiting...")
except Exception as e:
print(str(e))
finally:
t.join(1.0)
self.agent.stop()
self.agent.disconnect()
self._core.stop()
return self._result
def main(oef_addr, oef_port):
"""Launch the health check."""
oef_health_check = OEFHealthCheck(oef_addr, oef_port)
return oef_health_check.run()
if __name__ == "__main__":
args = parser.parse_args()
main(args.oef_addr, args.oef_port)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
16529,
26171,
198,
2,
198,
2,
220,
220,
15069,
2864,
12,
23344,
376,
7569,
13,
20185,
15302,
198,
2,
19... | 2.457143 | 1,610 |
from fixtures.builder import FixtureBuilder
| [
6738,
34609,
13,
38272,
1330,
376,
9602,
32875,
628
] | 5 | 9 |
import os
import numpy as np
import tifffile as tiff
import matplotlib.pyplot as plt
image_id = '6120_2_2'
m = M(image_id)
img = np.zeros((837, 851, 3))
img[:, :, 0] = m[:, :, 4] # red
img[:, :, 1] = m[:, :, 2] # green
img[:, :, 2] = m[:, :, 1] # blue
fig, axes = plt.subplots(ncols=1, nrows=1, figsize=(8, 8))
axes.imshow(stretch_8bit(img))
axes.set_title('RGB')
axes.axis('off')
plt.show()
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
256,
361,
487,
576,
355,
256,
733,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
628,
198,
9060,
62,
312,
796,
705,
21,
10232,
62,
17,
62,
17,
6,
1... | 2.030303 | 198 |
from datetime import datetime
from itertools import count
from therminator import db
from therminator.models import *
GENERATORS = {
'user.name': map(lambda n: 'User %d' % n, count()),
'user.name': map(lambda n: "User %d" % n, count()),
'user.email': map(lambda n: "user%d@example.com" % n, count()),
'home.name': map(lambda n: "Home %d" % n, count()),
'sensor.name': map(lambda n: "Sensor %d" % n, count()),
}
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
340,
861,
10141,
1330,
954,
198,
6738,
10811,
1084,
1352,
1330,
20613,
198,
6738,
10811,
1084,
1352,
13,
27530,
1330,
1635,
198,
198,
35353,
1137,
1404,
20673,
796,
1391,
198,
220,
220,
220... | 2.541176 | 170 |
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.view_definitionssoftware, name='view_definitionssoftware'),
path('create/', views.term_create, name='term_create'),
path('update/<pk>/', views.term_update, name='term_update'),
path('delete/<pk>/', views.term_delete, name='term_delete'),
]
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
1177,
62,
4299,
50101... | 2.875969 | 129 |
'''
Here we sort the arrays to ascending order;
descending order can be done similarly.
The idea behind bubble sort is
Keep iterating thru the array and swapping neighboring elements
until an iteration in which no swap takes place, then we know
the array is sorted.
Author: phunc20
'''
import time
print("em_bubble_sort")
print("-------------------------------------")
### Example taking 1 iteration (an already-sorted array): O(n)
##print(em_bubble_sort([1,2,3,4,5,6]))
#test([1,2,3,4,5,6], em_bubble_sort)
### Example taking 2 iterations
##print(em_bubble_sort([1,3,2,4,5,6]))
#test([1,3,2,4,5,6], em_bubble_sort)
### average O(n^2)
##print(em_bubble_sort([4,2,3,1,6,5]))
#test([4,2,3,1,6,5], em_bubble_sort)
### worst O(n^2)
##print(em_bubble_sort([6,5,4,3,2,1]))
#test([6,5,4,3,2,1], em_bubble_sort)
for arr in [[1,2,3,4,5,6],
[1,3,2,4,5,6],
[4,2,3,1,6,5],
[6,5,4,3,2,1],]:
test(arr, em_bubble_sort)
print(end="\n")
print("same2_but_diff")
print("N.B. This is a wrong implementation.\n`for-else` is to be used along w/ `break`, not like this")
print("-------------------------------------")
for arr in [[1,2,3,4,5,6],
[1,3,2,4,5,6],
[4,2,3,1,6,5],
[6,5,4,3,2,1],]:
test(arr, same2_but_diff)
'''
-------------
Analysis
-------------
def em_bubble_sort(arr):
while True: c1 x
corrected = False c2 x
for i in range(0, len(arr) - 1): c3 (n-1)*x
if arr[i] > arr[i+1]: c4 (n-1)*x
arr[i], arr[i+1] = arr[i+1], arr[i] c5 (n-1)*x
corrected = True c6 (n-1)*x
if not corrected: c7 x
# Then the arr must be sorted because 0
# arr[i] > arr[i+1] never takes place. 0
return arr c8 1
If the smallest element is at the last entry, we see that in this case x is at least n-1.
Therefore, we see that the worst case, if exists, is at least O(n^2).
'''
| [
7061,
6,
198,
4342,
356,
3297,
262,
26515,
284,
41988,
1502,
26,
198,
20147,
1571,
1502,
460,
307,
1760,
12470,
13,
198,
198,
464,
2126,
2157,
14310,
3297,
318,
198,
220,
220,
220,
9175,
11629,
803,
33834,
262,
7177,
290,
38869,
19651... | 1.826122 | 1,248 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
"""
The framework's base is FrameworkBase. See its help for more information.
"""
from time import time
from Box2D import (b2World, b2AABB, b2CircleShape, b2Color, b2Vec2)
from Box2D import (b2ContactListener, b2DestructionListener, b2DrawExtended)
from Box2D import (b2Fixture, b2FixtureDef, b2Joint)
from Box2D import (b2GetPointStates, b2QueryCallback, b2Random)
from Box2D import (b2_addState, b2_dynamicBody, b2_epsilon, b2_persistState)
from Box2D.examples.settings import fwSettings
class fwDestructionListener(b2DestructionListener):
"""
The destruction listener callback:
"SayGoodbye" is called when a joint or shape is deleted.
"""
class FrameworkBase(b2ContactListener):
"""
The base of the main testbed framework.
If you are planning on using the testbed framework and:
* Want to implement your own renderer (other than Pygame, etc.):
You should derive your class from this one to implement your own tests.
See empty.py or any of the other tests for more information.
* Do NOT want to implement your own renderer:
You should derive your class from Framework. The renderer chosen in
fwSettings (see settings.py) or on the command line will automatically
be used for your test.
"""
name = "None"
description = None
TEXTLINE_START = 30
colors = {
'mouse_point': b2Color(0, 1, 0),
'bomb_center': b2Color(0, 0, 1.0),
'bomb_line': b2Color(0, 1.0, 1.0),
'joint_line': b2Color(0.8, 0.8, 0.8),
'contact_add': b2Color(0.3, 0.95, 0.3),
'contact_persist': b2Color(0.3, 0.3, 0.95),
'contact_normal': b2Color(0.4, 0.9, 0.4),
}
def __reset(self):
""" Reset all of the variables to their starting values.
Not to be called except at initialization."""
# Box2D-related
self.points = []
self.world = None
self.bomb = None
self.mouseJoint = None
self.settings = fwSettings
self.bombSpawning = False
self.bombSpawnPoint = None
self.mouseWorld = None
self.using_contacts = False
self.stepCount = 0
# Box2D-callbacks
self.destructionListener = None
self.renderer = None
def Step(self, settings):
"""
The main physics step.
Takes care of physics drawing (callbacks are executed after the world.Step() )
and drawing additional information.
"""
self.stepCount += 1
# Don't do anything if the setting's Hz are <= 0
if settings.hz > 0.0:
timeStep = 1.0 / settings.hz
else:
timeStep = 0.0
renderer = self.renderer
# If paused, display so
if settings.pause:
if settings.singleStep:
settings.singleStep = False
else:
timeStep = 0.0
self.Print("****PAUSED****", (200, 0, 0))
# Set the flags based on what the settings show
if renderer:
# convertVertices is only applicable when using b2DrawExtended. It
# indicates that the C code should transform box2d coords to screen
# coordinates.
is_extended = isinstance(renderer, b2DrawExtended)
renderer.flags = dict(drawShapes=settings.drawShapes,
drawJoints=settings.drawJoints,
drawAABBs=settings.drawAABBs,
drawPairs=settings.drawPairs,
drawCOMs=settings.drawCOMs,
convertVertices=is_extended,
)
# Set the other settings that aren't contained in the flags
self.world.warmStarting = settings.enableWarmStarting
self.world.continuousPhysics = settings.enableContinuous
self.world.subStepping = settings.enableSubStepping
# Reset the collision points
self.points = []
# Tell Box2D to step
t_step = time()
self.world.Step(timeStep, settings.velocityIterations,
settings.positionIterations)
self.world.ClearForces()
t_step = time() - t_step
# Update the debug draw settings so that the vertices will be properly
# converted to screen coordinates
t_draw = time()
if renderer is not None:
renderer.StartDraw()
self.world.DrawDebugData()
# If the bomb is frozen, get rid of it.
if self.bomb and not self.bomb.awake:
self.world.DestroyBody(self.bomb)
self.bomb = None
# Take care of additional drawing (fps, mouse joint, slingshot bomb,
# contact points)
if renderer:
# If there's a mouse joint, draw the connection between the object
# and the current pointer position.
if self.mouseJoint:
p1 = renderer.to_screen(self.mouseJoint.anchorB)
p2 = renderer.to_screen(self.mouseJoint.target)
renderer.DrawPoint(p1, settings.pointSize,
self.colors['mouse_point'])
renderer.DrawPoint(p2, settings.pointSize,
self.colors['mouse_point'])
renderer.DrawSegment(p1, p2, self.colors['joint_line'])
# Draw the slingshot bomb
if self.bombSpawning:
renderer.DrawPoint(renderer.to_screen(self.bombSpawnPoint),
settings.pointSize, self.colors['bomb_center'])
renderer.DrawSegment(renderer.to_screen(self.bombSpawnPoint),
renderer.to_screen(self.mouseWorld),
self.colors['bomb_line'])
# Draw each of the contact points in different colors.
if self.settings.drawContactPoints:
for point in self.points:
if point['state'] == b2_addState:
renderer.DrawPoint(renderer.to_screen(point['position']),
settings.pointSize,
self.colors['contact_add'])
elif point['state'] == b2_persistState:
renderer.DrawPoint(renderer.to_screen(point['position']),
settings.pointSize,
self.colors['contact_persist'])
if settings.drawContactNormals:
for point in self.points:
p1 = renderer.to_screen(point['position'])
p2 = renderer.axisScale * point['normal'] + p1
renderer.DrawSegment(p1, p2, self.colors['contact_normal'])
renderer.EndDraw()
t_draw = time() - t_draw
t_draw = max(b2_epsilon, t_draw)
t_step = max(b2_epsilon, t_step)
try:
self.t_draws.append(1.0 / t_draw)
except:
pass
else:
if len(self.t_draws) > 2:
self.t_draws.pop(0)
try:
self.t_steps.append(1.0 / t_step)
except:
pass
else:
if len(self.t_steps) > 2:
self.t_steps.pop(0)
if settings.drawFPS:
self.Print("Combined FPS %d" % self.fps)
if settings.drawStats:
self.Print("bodies=%d contacts=%d joints=%d proxies=%d" %
(self.world.bodyCount, self.world.contactCount,
self.world.jointCount, self.world.proxyCount))
self.Print("hz %d vel/pos iterations %d/%d" %
(settings.hz, settings.velocityIterations,
settings.positionIterations))
if self.t_draws and self.t_steps:
self.Print("Potential draw rate: %.2f fps Step rate: %.2f Hz"
"" % (sum(self.t_draws) / len(self.t_draws),
sum(self.t_steps) / len(self.t_steps))
)
def ShiftMouseDown(self, p):
"""
Indicates that there was a left click at point p (world coordinates)
with the left shift key being held down.
"""
self.mouseWorld = p
if not self.mouseJoint:
self.SpawnBomb(p)
def MouseDown(self, p):
"""
Indicates that there was a left click at point p (world coordinates)
"""
if self.mouseJoint is not None:
return
# Create a mouse joint on the selected body (assuming it's dynamic)
# Make a small box.
aabb = b2AABB(lowerBound=p - (0.001, 0.001),
upperBound=p + (0.001, 0.001))
# Query the world for overlapping shapes.
query = fwQueryCallback(p)
self.world.QueryAABB(query, aabb)
if query.fixture:
body = query.fixture.body
# A body was selected, create the mouse joint
self.mouseJoint = self.world.CreateMouseJoint(
bodyA=self.groundbody,
bodyB=body,
target=p,
maxForce=1000.0 * body.mass)
body.awake = True
def MouseUp(self, p):
"""
Left mouse button up.
"""
if self.mouseJoint:
self.world.DestroyJoint(self.mouseJoint)
self.mouseJoint = None
if self.bombSpawning:
self.CompleteBombSpawn(p)
def MouseMove(self, p):
"""
Mouse moved to point p, in world coordinates.
"""
self.mouseWorld = p
if self.mouseJoint:
self.mouseJoint.target = p
def SpawnBomb(self, worldPt):
"""
Begins the slingshot bomb by recording the initial position.
Once the user drags the mouse and releases it, then
CompleteBombSpawn will be called and the actual bomb will be
released.
"""
self.bombSpawnPoint = worldPt.copy()
self.bombSpawning = True
def CompleteBombSpawn(self, p):
"""
Create the slingshot bomb based on the two points
(from the worldPt passed to SpawnBomb to p passed in here)
"""
if not self.bombSpawning:
return
multiplier = 30.0
vel = self.bombSpawnPoint - p
vel *= multiplier
self.LaunchBomb(self.bombSpawnPoint, vel)
self.bombSpawning = False
def LaunchBomb(self, position, velocity):
"""
A bomb is a simple circle which has the specified position and velocity.
position and velocity must be b2Vec2's.
"""
if self.bomb:
self.world.DestroyBody(self.bomb)
self.bomb = None
self.bomb = self.world.CreateDynamicBody(
allowSleep=True,
position=position,
linearVelocity=velocity,
fixtures=b2FixtureDef(
shape=b2CircleShape(radius=0.3),
density=20,
restitution=0.1)
)
def LaunchRandomBomb(self):
"""
Create a new bomb and launch it at the testbed.
"""
p = b2Vec2(b2Random(-15.0, 15.0), 30.0)
v = -5.0 * p
self.LaunchBomb(p, v)
def SimulationLoop(self):
"""
The main simulation loop. Don't override this, override Step instead.
"""
# Reset the text line to start the text from the top
self.textLine = self.TEXTLINE_START
# Draw the name of the test running
self.Print(self.name, (127, 127, 255))
if self.description:
# Draw the name of the test running
for s in self.description.split('\n'):
self.Print(s, (127, 255, 127))
# Do the main physics step
self.Step(self.settings)
def ConvertScreenToWorld(self, x, y):
"""
Return a b2Vec2 in world coordinates of the passed in screen
coordinates x, y
NOTE: Renderer subclasses must implement this
"""
raise NotImplementedError()
def DrawStringAt(self, x, y, str, color=(229, 153, 153, 255)):
"""
Draw some text, str, at screen coordinates (x, y).
NOTE: Renderer subclasses must implement this
"""
raise NotImplementedError()
def Print(self, str, color=(229, 153, 153, 255)):
"""
Draw some text at the top status lines
and advance to the next line.
NOTE: Renderer subclasses must implement this
"""
raise NotImplementedError()
def PreSolve(self, contact, old_manifold):
"""
This is a critical function when there are many contacts in the world.
It should be optimized as much as possible.
"""
if not (self.settings.drawContactPoints or
self.settings.drawContactNormals or self.using_contacts):
return
elif len(self.points) > self.settings.maxContactPoints:
return
manifold = contact.manifold
if manifold.pointCount == 0:
return
state1, state2 = b2GetPointStates(old_manifold, manifold)
if not state2:
return
worldManifold = contact.worldManifold
# TODO: find some way to speed all of this up.
self.points.extend([dict(fixtureA=contact.fixtureA,
fixtureB=contact.fixtureB,
position=worldManifold.points[i],
normal=worldManifold.normal.copy(),
state=state2[i],
)
for i, point in enumerate(state2)])
# These can/should be implemented in the test subclass: (Step() also if necessary)
# See empty.py for a simple example.
def FixtureDestroyed(self, fixture):
"""
Callback indicating 'fixture' has been destroyed.
"""
pass
def JointDestroyed(self, joint):
"""
Callback indicating 'joint' has been destroyed.
"""
pass
def Keyboard(self, key):
"""
Callback indicating 'key' has been pressed down.
"""
pass
def KeyboardUp(self, key):
"""
Callback indicating 'key' has been released.
"""
pass
def main(test_class):
"""
Loads the test class and executes it.
"""
print("Loading %s..." % test_class.name)
test = test_class()
if fwSettings.onlyInit:
return
test.run()
if __name__ == '__main__':
print('Please run one of the examples directly. This is just the base for '
'all of the frameworks.')
exit(1)
# Your framework classes should follow this format. If it is the 'foobar'
# framework, then your file should be 'backends/foobar_framework.py' and you
# should have a class 'FoobarFramework' that subclasses FrameworkBase. Ensure
# proper capitalization for portability.
from Box2D.examples import backends
try:
framework_name = '%s_framework' % (fwSettings.backend.lower())
__import__('backends', globals(), fromlist=[framework_name], level=1)
framework_module = getattr(backends, framework_name)
Framework = getattr(framework_module,
'%sFramework' % fwSettings.backend.capitalize())
except Exception as ex:
print('Unable to import the back-end %s: %s' % (fwSettings.backend, ex))
print('Attempting to fall back on the pygame back-end.')
from Box2D.examples.backends.pygame_framework import PygameFramework as Framework
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
327,
4880,
2196,
15069,
357,
66,
8,
4793,
12,
12726,
28894,
327,
45807,
2638,
1378,
2503,
13,
3524,
17,
... | 2.16701 | 7,772 |
"""Builds network used in brain of learning agents.
Author: Yoshinari Motokawa <yoshinari.moto@fuji.waseda.jp>
"""
from typing import List
from core.utils.logging import initialize_logging
from omegaconf import DictConfig
from torch import nn
from .customs.categorical_dqn import CategoricalDQN
from .customs.conv_mlp import ConvMLP
from .customs.da3 import DA3
from .customs.da3_iqn import DA3_IQN
from .customs.da6 import DA6
from .customs.iqn import IQN
from .customs.qr_dqn import QRDQN
from .mlp import MLP
logger = initialize_logging(__name__)
| [
37811,
15580,
82,
3127,
973,
287,
3632,
286,
4673,
6554,
13,
198,
198,
13838,
25,
28563,
259,
2743,
6543,
482,
6909,
1279,
88,
3768,
259,
2743,
13,
76,
2069,
31,
20942,
7285,
13,
86,
839,
64,
13,
34523,
29,
198,
37811,
198,
198,
6... | 2.78 | 200 |
from layers import ShuffleNetv2, CEM, RPN, SAM, CEM_FILTER
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
import tensorflow.keras as keras
import tensorflow as tf
import numpy as np
class ThunderNet_bb(Model):
""" thundernet without roi part"""
@tf.function
# def compute_output_shape(self, input_shape):
# return tf.TensorShape([10, 20, 20, 245])
from tensorflow.keras import layers
if __name__ == "__main__":
print("summary for ThunderNet_bb")
#K.clear_session()
shape = (10, 320, 320, 3)
nx = np.random.rand(*shape).astype(np.float32)
g = ThunderNet_bb()
g.build((None, 320,320,3))
g.summary()
t = keras.Input(shape=nx.shape[1:], batch_size=nx.shape[0])
sam_result, rpn_result, rpn_cls_score, rpn_cls_pred = g(nx, training=False)
#sam_result = g(t, training=False)
g.summary()
print('thundernet_backbone result: \nsam_result, rpn_result, rpn_cls_score, rpn_cls_pred: ', list(map(K.int_shape, [sam_result, rpn_result, rpn_cls_score, rpn_cls_pred])))
| [
6738,
11685,
1330,
911,
18137,
7934,
85,
17,
11,
327,
3620,
11,
371,
13137,
11,
28844,
11,
327,
3620,
62,
46700,
5781,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
9104,
198,
11748,
11192,
273,
11125,
13,
6122,
292,
... | 2.425968 | 439 |
for key in IMG_PATHS.keys():
start_time = time.time()
print"importing {0}".format(key)
LEAF_COLLECTION.load_leaves_folder(IMG_PATHS[key], num_sections=SECTIONS)
print """done importing and measuring all {0} with
{1} sections per leaf in {2} seconds""".format(key,
str(SECTIONS),
str(time.time()-start_time)) | [
1640,
1994,
287,
8959,
38,
62,
47,
1404,
7998,
13,
13083,
33529,
198,
220,
220,
220,
220,
220,
220,
220,
923,
62,
2435,
796,
640,
13,
2435,
3419,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
1,
11748,
278,
1391,
15,
92,
1911,
1... | 1.842553 | 235 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.plotting.register_matplotlib_converters()
df = pd.read_csv("DATASET.csv")
df.set_index("Date", inplace=True)
df.index = pd.to_datetime(df.index, format="%d/%m/%y")
df.Transakce = df['Demand'].astype(float)
df.plot()
len(df)
train_len = len(df)-90
train = df.iloc[:train_len]
test = df.iloc[train_len:]
len(test)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(train)
scaled_train = scaler.transform(train)
scaled_test = scaler.transform(test)
from keras.preprocessing.sequence import TimeseriesGenerator
n_input = 90
n_features=1
generator = TimeseriesGenerator(scaled_train, scaled_train, length=n_input, batch_size=10)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, GRU
from keras.layers import Dropout
model = Sequential()
model.add(LSTM(40, return_sequences=True, activation='relu', input_shape=(n_input, n_features)))
#model.add(LSTM(40,return_sequences=False, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mae')
model.summary()
model.fit_generator(generator,epochs=100)
model.history.history.keys()
loss_per_epoch = model.history.history['loss']
plt.plot(range(len(loss_per_epoch)),loss_per_epoch)
first_eval_batch = scaled_train[-90:]
first_eval_batch
first_eval_batch = first_eval_batch.reshape((1, 90, n_features))
test_predictions = []
first_eval_batch = scaled_train[-n_input:]
current_batch = first_eval_batch.reshape((1, n_input, n_features))
for i in range(len(test)):
current_pred = model.predict(current_batch)[0]
test_predictions.append(current_pred)
current_batch = np.append(current_batch[:,1:,:],[[current_pred]],axis=1)
true_predictions = scaler.inverse_transform(test_predictions)
true_predictions
test['Predictions'] = true_predictions
Y_true = df.iloc[train_len:]
Y_pred = test["Predictions"]
from sklearn.metrics import mean_squared_error,mean_absolute_error
MSE = mean_squared_error(Y_true,Y_pred)
MAE = mean_absolute_error(Y_true,Y_pred)
plt.plot(test["Predictions"], label="Pred", color="black", zorder=1)
plt.plot(test["Demand"], label="True", color="lightgray", zorder=0)
plt.legend(loc="upper right")
plt.xlabel('Days', fontsize=10)
plt.ylabel('Demand', fontsize=10)
print(MSE)
print(MAE) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
30094,
13,
29487,
889,
13,
30238,
62,
6759,
29487,
8019,
62,
1102,
332,
1010,
3419,
198,
198,
7568... | 2.649438 | 890 |
import os
import re
import os
import json
DB_PATH = "%s/%s" % (
os.getenv("XRAY_DATABASE_DIR"), os.getenv("XRAY_DATABASE"))
(ROI_X1, ROI_X2), (ROI_Y1, ROI_Y2) = roi_xy()
def slice_xy():
'''Return (X1, X2), (Y1, Y2) from XRAY_ROI, exclusive end (for xrange)'''
# SLICE_X12Y100:SLICE_X27Y149
# Note XRAY_ROI_GRID_* is something else
m = re.match(
r'SLICE_X([0-9]*)Y([0-9]*):SLICE_X([0-9]*)Y([0-9]*)',
os.getenv('XRAY_ROI'))
ms = [int(m.group(i + 1)) for i in range(4)]
return ((ms[0], ms[2] + 1), (ms[1], ms[3] + 1))
def gen_tiles(tile_types=None, tilegrid=None):
'''
tile_types: list of tile types to keep, or None for all
tilegrid: cache the tilegrid database
'''
tilegrid = tilegrid or load_tilegrid()
for tile_name, tilej in tilegrid.items():
if tile_in_roi(tilej) and (tile_types is None
or tilej['type'] in tile_types):
yield (tile_name, tilej)
def gen_sites(site_types=None, tilegrid=None):
'''
site_types: list of site types to keep, or None for all
tilegrid: cache the tilegrid database
'''
tilegrid = tilegrid or load_tilegrid()
for tile_name, tilej in tilegrid.items():
if not tile_in_roi(tilej):
continue
for site_name, site_type in tilej['sites'].items():
if site_types is None or site_type in site_types:
yield (tile_name, site_name, site_type)
#print(list(gen_tiles(['CLBLL_L', 'CLBLL_R', 'CLBLM_L', 'CLBLM_R'])))
#print(list(gen_sites(['SLICEL', 'SLICEM'])))
#print(list(gen_sites(['SLICEM'])))
# we know that all bits for CLB MUXes are in frames 30 and 31, so filter all other bits
| [
11748,
28686,
198,
11748,
302,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
11012,
62,
34219,
796,
36521,
82,
14,
4,
82,
1,
4064,
357,
198,
220,
220,
220,
28686,
13,
1136,
24330,
7203,
55,
30631,
62,
35,
1404,
6242,
11159,
62,
34... | 2.134161 | 805 |
from flask import Flask, render_template, url_for, redirect, request, flash, jsonify, session
from modules.graphs import create_figures
from modules.video import *
import os
from modules.tools import GetUniqueID
import json
app = Flask(__name__)
app.secret_key = '1234567890abcde'
widgetfile = open('data/about_the_widgets.json', 'r')
widgets = dict(json.load(widgetfile))
ALLOWED_EXTENSIONS = ['wav']
@app.route("/")
@app.route('/about-the-widget')
@app.route("/graph-audio-upload", methods=['GET'])
@app.route('/interactive-visualization', methods=['POST'])
@ app.route('/visualize-with-video')
@ app.route('/video-download', methods=['POST'])
if __name__ == "__main__":
app.run()
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
19016,
62,
1640,
11,
18941,
11,
2581,
11,
7644,
11,
33918,
1958,
11,
6246,
198,
6738,
13103,
13,
34960,
82,
1330,
2251,
62,
5647,
942,
198,
6738,
13103,
13,
15588,
1330,
1635,
198,
... | 2.869919 | 246 |
import json
with open('moves_type.json') as poke_data:
move_list = []
type_list = []
d = json.load(poke_data)
for i in d :
move = i['move_name']
type = i['type']
move_list.append('move('+move.lower()+').')
type_list.append('have_type('+move.lower()+','+type.lower()+').')
out = open('movefact.pl','w')
for i in type_list :
out.write(i+'\n')
for i in move_list :
out.write(i+'\n')
out.close()
| [
11748,
33918,
198,
4480,
1280,
10786,
76,
5241,
62,
4906,
13,
17752,
11537,
355,
22620,
62,
7890,
25,
198,
220,
220,
220,
1445,
62,
4868,
796,
17635,
198,
220,
220,
220,
2099,
62,
4868,
796,
17635,
198,
220,
220,
220,
288,
796,
3391... | 2.070175 | 228 |
import time
from multiprocessing import Process
if __name__ == '__main__':
main()
| [
11748,
640,
198,
6738,
18540,
305,
919,
278,
1330,
10854,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.935484 | 31 |
import sys
from plot.table import TabularStats
from crawlstats import CST, MonthlyCrawl
if __name__ == '__main__':
plot_crawls = sys.argv[1:]
plot_name = 'charsets'
column_header = 'charset'
if len(plot_crawls) == 0:
plot_crawls = MonthlyCrawl.get_latest(3)
print(plot_crawls)
else:
plot_name += '-' + '-'.join(plot_crawls)
plot = CharsetStats()
plot.read_data(sys.stdin)
plot.transform_data(CharsetStats.MAX_CHARSETS,
CharsetStats.MIN_AVERAGE_COUNT,
None)
plot.save_data_percentage(plot_name, dir_name='plots', type_name='charset')
plot.plot(plot_crawls, plot_name, column_header)
| [
11748,
25064,
198,
198,
6738,
7110,
13,
11487,
1330,
16904,
934,
29668,
198,
6738,
27318,
34242,
1330,
46429,
11,
27573,
34,
13132,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
7110,
62,
... | 2.10241 | 332 |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 23:10:08 2019
@author: Ham
HackerRanch Challenge: Validating UID
Task
ABCXYZ company has up to 100 employees.
The company decides to create a unique identification number (UID)
for each of its employees.
The company has assigned you the task of validating
all the randomly generated UIDs.
A valid UID must follow the rules below:
It must contain at least 2 uppercase English alphabet characters.
It must contain at least 3 digits (0 - 9).
It should only contain alphanumeric characters (A - Z, a - z & 0 - 9).
No character should repeat.
There must be exactly characters in a valid UID.
Input Format
The first line contains an integer T, the number of test cases.
The next T lines contains an employee's UID.
Output Format
For each test case, print 'Valid' if the UID is valid.
Otherwise, print 'Invalid', on separate lines.
Do not print the quotation marks.
Sample Input
2
B1CD102354
B1CDEF2354
Sample Output
Invalid
Valid
"""
import string
import collections
import io
STDIN_SIO = io.StringIO("""
2
B1CD102354
B1CDEF2354
""".strip())
def is_valid_uid(s):
"""doc"""
#print(s)
if len(s) != 10:
return False
c = collections.Counter(s)
upp = 0
dig = 0
ld = string.ascii_letters + string.digits
for k, v in c.items():
if v > 1:
return False # repeating chars => False
if k in string.digits:
dig += 1
elif k in string.ascii_uppercase:
upp += 1
elif k not in ld:
return False # neither digit nor letter => False
return (upp > 1) and (dig > 2) # >=2 UPPERCASE and >=3 digits
if __name__ == '__main__':
for _ in range(int(STDIN_SIO.readline().strip())):
print("Valid" if is_valid_uid(STDIN_SIO.readline().strip())
else "Invalid")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
2892,
3158,
1679,
2242,
25,
940,
25,
2919,
13130,
201,
198,
201,
198,
31,
9800,
25,
4345,
201,
198,
201,
198,
39,
10735,
49,
3702,
... | 2.439206 | 806 |
from .general import *
from .text_classification import *
from .sequence_labeling import *
from .summarization import *
from .text_matching import *
from .kg_link_prediction import * | [
6738,
764,
24622,
1330,
1635,
198,
6738,
764,
5239,
62,
4871,
2649,
1330,
1635,
198,
6738,
764,
43167,
62,
18242,
278,
1330,
1635,
198,
6738,
764,
16345,
3876,
1634,
1330,
1635,
198,
6738,
764,
5239,
62,
15699,
278,
1330,
1635,
198,
6... | 3.568627 | 51 |
# sweetscomplete.entity.customer.Customer read/add/edit/delete
# tell python where to find module source code
import os,sys
sys.path.append(os.path.realpath("src"))
import pprint
import db.mongodb.connection
from datetime import date
from sweetscomplete.entity.customer import Customer
from sweetscomplete.domain.customer import CustomerService
# setting up the connection + collection
conn = db.mongodb.connection.Connection('localhost', 27017, Customer)
service = CustomerService(conn, 'sweetscomplete')
# initialize test data
key = '00000000000'
doc = '''\
{
"customerKey" : "%key%",
"firstName" : "Fred",
"lastName" : "Flintstone",
"phoneNumber" : "+0-000-000-0000",
"email" : "fred@first.man.com",
"socialMedia": {
"GO": {
"label": "google",
"url": "https:\/\/google.com\/fflintstone"
},
"LN": {
"label": "line",
"url": "https:\/\/line.com\/fred"
}
},
"userName" : "fflintstone",
"password" : "password",
"streetAddressOfBuilding" : "123 Rocky Way",
"buildingName" : "",
"floor" : "1",
"roomApartmentCondoNumber" : "",
"city" : "Bedrock",
"stateProvince" : "MI",
"locality" : "North America",
"country" : "ZZ",
"postalCode" : "00000",
"latitude" : 111.1111,
"longitude" : 222.2222,
"secondaryPhoneNumbers" : ["111-222-3333","444-555-6666"],
"secondaryEmailAddresses" : [],
"dateOfBirth" : "0000-00-00",
"gender" : "Male"
}'''.replace('%key%',key)
# adding a new customer
print("\nAdding a Single Test Customer")
customer = Customer(doc)
if service.addOne(customer) :
print("\nCustomer " + key + " added successfully")
# running a query for a single item
print("\nFetch Customer by Key")
doc = service.fetchByKey(key)
if doc :
print(doc.toJson())
# updating a single customer
newPhone = '+9-999-999-9999'
updateDoc = {
'phoneNumber' :newPhone,
'email' : 'new.email@fred.com'
};
result = service.editOneByKey(key, updateDoc)
if not result :
print("\nUnable to find this product key: " + key)
else :
print("\nProduct " + key + " updated successfully")
print(result.toJson())
# bad key should not perform updates
badkey = 'badkey'
result = service.editOneByKey(badkey, updateDoc)
if not result :
print("\nUnable to find this product key: " + badkey + "\n")
else :
print("\nProduct " + badkey + " updated successfully\n")
print(result.toJson())
# running a query for a single customer
print("\nQuery by Phone Number")
query = dict({ "phoneNumber" : newPhone })
doc = service.fetchOne(query)
if doc :
print("\nSingle Customer")
print(doc.toJson())
# deleting the test customer
query = dict({"customerKey" : key})
if service.deleteOne(query) :
print("\nCustomer " + key + " deleted successfully")
# running a query all customers
print("\nList of Customer Names and Keys")
for doc in service.fetchKeysAndNames(0, 6) :
print(doc.getFullName() + ' [' + doc.getKey() + ']')
# deleting all instances of the test customer
query = dict({"customerKey" : key})
if service.deleteAll(query) :
print("\nAll Customers " + key + " deleted successfully")
| [
2,
42402,
20751,
13,
26858,
13,
23144,
263,
13,
44939,
1100,
14,
2860,
14,
19312,
14,
33678,
198,
198,
2,
1560,
21015,
810,
284,
1064,
8265,
2723,
2438,
198,
11748,
28686,
11,
17597,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978... | 2.343394 | 1,491 |
import envi
import envi.bits as e_bits
import struct
# from disasm import H8ImmOper, H8RegDirOper, H8RegIndirOper, H8AbsAddrOper, H8PcOffsetOper, H8RegMultiOper, H8MemIndirOper
import envi.archs.h8.regs as e_regs
import envi.archs.h8.const as e_const
import envi.archs.h8.operands as h8_operands
bcc = [
('bra', envi.IF_NOFALL | envi.IF_BRANCH),
('brn', envi.IF_BRANCH | envi.IF_COND),
('bhi', envi.IF_BRANCH | envi.IF_COND),
('bls', envi.IF_BRANCH | envi.IF_COND),
('bcc', envi.IF_BRANCH | envi.IF_COND),
('bcs', envi.IF_BRANCH | envi.IF_COND),
('bne', envi.IF_BRANCH | envi.IF_COND),
('beq', envi.IF_BRANCH | envi.IF_COND),
('bvc', envi.IF_BRANCH | envi.IF_COND),
('bvs', envi.IF_BRANCH | envi.IF_COND),
('bpl', envi.IF_BRANCH | envi.IF_COND),
('bmi', envi.IF_BRANCH | envi.IF_COND),
('bge', envi.IF_BRANCH | envi.IF_COND),
('blt', envi.IF_BRANCH | envi.IF_COND),
('bgt', envi.IF_BRANCH | envi.IF_COND),
('ble', envi.IF_BRANCH | envi.IF_COND),
]
# 60-67, 70-77, 7d, 7f, 7c, 7e (converge?)
bit_dbles = [
('bset', 0),
('bset', 0),
('bnot', 0),
('bnot', 0),
('bclr', 0),
('bclr', 0),
('btst', 0),
('bist', 0),
('bor', 0),
('bior', 0),
('bxor', 0),
('bixor', 0),
('band', 0),
('biand', 0),
('bst', 0),
('bist', 0),
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
]
bit_dbles.extend(bit_dbles)
bit_dbles[0x2e] = ('bld', 0)
bit_dbles[0x2f] = ('bild', 0)
data_0b = (
(4, 0, 1, 'adds'),
None,
None,
None,
None,
(2, e_const.IF_W, 1, 'inc'),
None,
(4, e_const.IF_L, 1, 'inc'),
(4, 0, 2, 'adds'),
(4, 0, 4, 'adds'),
None,
None,
None,
(2, e_const.IF_W, 2, 'inc'),
None,
(4, e_const.IF_L, 2, 'inc'),
)
data_1b = (
(4, 0, 1, 'subs'),
None,
None,
None,
None,
(2, e_const.IF_W, 1, 'dec'),
None,
(4, e_const.IF_L, 1, 'dec'),
(4, 0, 2, 'subs'),
(4, 0, 4, 'subs'),
None,
None,
None,
(2, e_const.IF_W, 2, 'dec'),
None,
(4, e_const.IF_L, 2, 'dec'),
)
shift_info = []
for name in ('shll', 'shal', 'shlr', 'shar', 'rotxl', 'rotl', 'rotxr', 'rotr'):
shift_info.append((name, 1, 0))
shift_info.append((name, 2, 0))
shift_info.append(None)
shift_info.append((name, 4, 0))
shift_info.append((name, 1, 2))
shift_info.append((name, 2, 2))
shift_info.append(None)
shift_info.append((name, 4, 2))
for nothing in range(0x14, 0x17):
for xnothing in range(16):
shift_info.append(None)
for name1, name2 in (('not', 'extu'), ('neg', 'exts')):
shift_info.append((name1, 1, 0))
shift_info.append((name1, 2, 0))
shift_info.append(None)
shift_info.append((name1, 4, 0))
shift_info.append(None)
shift_info.append((name2, 2, 0))
shift_info.append(None)
shift_info.append((name2, 4, 0))
mnem_79a = (
'mov',
'add',
'cmp',
'sub',
'or',
'xor',
'and',
)
'''
8DII add.b immediate 2 states
08SD add.b regdir 2 states
791DIIII add.w imm 4 states
09SD add.w regdir 2 states
7a1EIIII add.l imm 6 states
0aSD add.l regdir 2 states
0b0D adds #1, ERd 2 states
0b8D adds #2, ERd 2 states
0b9D adds #4, ERd 2 states
9DII addx #xx:8, Rd 2 states
0eSD addx Rs, Rd 2 states
eDII and.b #xx:8, Rd 2 states
16SD and.b Rs, Rd 2 states
796DIIII and.w #xx:16, Rd 4 states
66SD and.w Rs, Rd 2 states
7a6DIIIIIIII and.l #xx:32, ERd 6 states
01f066SD and.l Rs, ERd 4 states
06II andc #xx:8, CCR 2 states
76ID band #xx:3, Rd 2 states
7cD076I0 band #xx:3, @ERd 6 states
7eAb76I0 band #xx:3, @aa:8 6 states
4CDS bcc d:8 4 states
58C0DISP bcc d:16 6 states
'''
# table: ( subtable, mnem, decoder, tsize, iflags)
main_table = [(None, 'DECODE_ERROR', 0, 0, 0) for x in range(256)]
main_table[0x0] = (False, 'nop', None, 0, 0)
main_table[0x1] = (False, None, p_01, 0, 0)
main_table[0xa] = (False, None, p_0a_1a, 0, 0)
main_table[0xb] = (False, None, p_0b_1b, 0, 0)
main_table[0xf] = (False, None, p_0f_1f, 0, 0)
main_table[0x10] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x11] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x12] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x13] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x17] = (False, None, p_shift_10_11_12_13_17, 0, 0)
main_table[0x1a] = (False, None, p_0a_1a, 0, 0)
main_table[0x1b] = (False, None, p_0b_1b, 0, 0)
main_table[0x1f] = (False, None, p_0f_1f, 0, 0)
main_table[0x02] = (False, 'stc', p_CCR_Rd, 1, e_const.IF_B)
main_table[0x03] = (False, 'ldc', p_Rs_CCR, 1, e_const.IF_B)
main_table[0x04] = (False, 'orc', p_i8_CCR, 1, 0)
main_table[0x05] = (False, 'xorc', p_i8_CCR, 1, 0)
main_table[0x06] = (False, 'andc', p_i8_CCR, 1, 0)
main_table[0x07] = (False, 'ldc', p_i8_CCR, 1, e_const.IF_B)
main_table[0x08] = (False, 'add', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x09] = (False, 'add', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x0c] = (False, 'mov', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x0d] = (False, 'mov', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x0e] = (False, 'addx', p_Rs_Rd, 1, 0)
main_table[0x14] = (False, 'or', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x15] = (False, 'xor', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x16] = (False, 'and', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x18] = (False, 'sub', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x19] = (False, 'sub', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x1c] = (False, 'cmp', p_Rs_Rd, 1, e_const.IF_B)
main_table[0x1d] = (False, 'cmp', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x1e] = (False, 'subx', p_Rs_Rd, 1, 0)
# mov.b set
for opbyte in range(0x20, 0x30):
main_table[opbyte] = (False, 'mov', p_aAA8_Rd, 1, e_const.IF_B)
for opbyte in range(0x30, 0x40):
main_table[opbyte] = (False, 'mov', p_Rs_aAA8, 1, e_const.IF_B)
# generate Bcc opcodes
for opbyte in range(16):
mnem, iflags = bcc[opbyte]
main_table[0x40 + opbyte] = (False, mnem, p_disp8, 1, iflags)
main_table[0x50] = (False, 'mulxu', p_Rs_Rd_mul, 1, e_const.IF_B)
main_table[0x51] = (False, 'divxu', p_Rs_Rd_mul, 1, e_const.IF_B)
main_table[0x52] = (False, 'mulxu', p_Rs_ERd, 2, e_const.IF_W)
main_table[0x53] = (False, 'divxu', p_Rs_ERd, 2, e_const.IF_W)
main_table[0x54] = (False, 'rts', None, 0, envi.IF_RET | envi.IF_NOFALL) # 5470
main_table[0x55] = (False, 'bsr', p_disp8, 0, envi.IF_CALL)
main_table[0x56] = (False, 'rte', None, 0, envi.IF_RET | envi.IF_NOFALL) # 5670
main_table[0x57] = (False, 'trapa', p_i2, 0, envi.IF_NOFALL)
main_table[0x58] = (False, 'error', p_disp16, 2, 0)
main_table[0x59] = (False, 'jmp', p_aERn, 3, envi.IF_BRANCH | envi.IF_NOFALL)
main_table[0x5a] = (False, 'jmp', p_aAA24, 0, envi.IF_BRANCH | envi.IF_NOFALL)
main_table[0x5b] = (False, 'jmp', p_aaAA8, 0, envi.IF_BRANCH | envi.IF_NOFALL)
main_table[0x5c] = (False, 'bsr', p_disp16, 0, envi.IF_CALL)
main_table[0x5d] = (False, 'jsr', p_aERn, 3, envi.IF_CALL)
main_table[0x5e] = (False, 'jsr', p_aAA24, 0, envi.IF_CALL)
main_table[0x5f] = (False, 'jsr', p_aaAA8, 0, envi.IF_CALL)
# all bit instructions are B. may set 0->1
main_table[0x60] = (False, 'bset', p_Rn_Rd, 1, 0)
main_table[0x70] = (False, 'bset', p_i3_Rd, 1, 0)
main_table[0x61] = (False, 'bnot', p_Rn_Rd, 1, 0)
main_table[0x71] = (False, 'bnot', p_i3_Rd, 1, 0)
main_table[0x62] = (False, 'bclr', p_Rn_Rd, 1, 0)
main_table[0x72] = (False, 'bclr', p_i3_Rd, 1, 0)
main_table[0x63] = (False, 'btst', p_Rn_Rd, 1, 0)
main_table[0x73] = (False, 'btst', p_i3_Rd, 1, 0)
main_table[0x64] = (False, 'or', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x65] = (False, 'xor', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x66] = (False, 'and', p_Rs_Rd, 2, e_const.IF_W)
main_table[0x67] = (False, 'bitdoubles', p_Bit_Doubles, 1, 0)
main_table[0x68] = (False, 'mov', p_68_69_6e_6f, 1, e_const.IF_B)
main_table[0x69] = (False, 'mov', p_68_69_6e_6f, 2, e_const.IF_W)
main_table[0x6a] = (False, 'mov', p_6A_6B, 1, 0)
main_table[0x6b] = (False, 'mov', p_6A_6B, 2, e_const.IF_W)
# main_table[0x6c] = (False, 'mov', p_Mov_6C, 1, IF_B)
# main_table[0x6d] = (False, 'mov', p_Mov_6C, 2, IF_W)
main_table[0x6c] = (False, 'mov', p_6c_6d_0100, 1, e_const.IF_B)
main_table[0x6d] = (False, 'mov', p_6c_6d_0100, 2, e_const.IF_W)
main_table[0x6e] = (False, 'mov', p_68_69_6e_6f, 1, e_const.IF_B)
main_table[0x6f] = (False, 'mov', p_68_69_6e_6f, 2, e_const.IF_W)
for opbyte in range(0x74, 0x78):
main_table[opbyte] = (False, 'bitdoubles', p_Bit_Doubles, 1, 0)
main_table[0x78] = (False, 'mov', p_Mov_78, 1, 0)
main_table[0x79] = (False, 'p79', p_79, 2, e_const.IF_W)
main_table[0x7a] = (False, 'p7a', p_7a, 4, e_const.IF_L)
main_table[0x7b] = (False, 'eepmov', p_eepmov, 0, 0)
main_table[0x7c] = (False, '7Cmnem', p_7c, 1, 0)
main_table[0x7d] = (False, '7Dmnem', p_7d, 1, 0)
main_table[0x7e] = (False, '7Emnem', p_7e, 1, 0)
main_table[0x7f] = (False, '7Fmnem', p_7f, 1, 0)
for opbyte in range(0x80, 0x90):
main_table[opbyte] = (False, 'add', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0x90, 0xa0):
main_table[opbyte] = (False, 'addx', p_i8_Rd, 1, 0)
for opbyte in range(0xa0, 0xb0):
main_table[opbyte] = (False, 'cmp', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0xb0, 0xc0):
main_table[opbyte] = (False, 'subx', p_i8_Rd, 1, 0)
for opbyte in range(0xc0, 0xd0):
main_table[opbyte] = (False, 'or', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0xd0, 0xe0):
main_table[opbyte] = (False, 'xor', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0xe0, 0xf0):
main_table[opbyte] = (False, 'and', p_i8_Rd, 1, e_const.IF_B)
for opbyte in range(0xf0, 0x100):
main_table[opbyte] = (False, 'mov', p_i8_Rd, 1, e_const.IF_B)
| [
11748,
551,
8903,
198,
11748,
551,
8903,
13,
9895,
355,
304,
62,
9895,
198,
11748,
2878,
198,
198,
2,
422,
595,
8597,
1330,
367,
23,
24675,
18843,
11,
367,
23,
8081,
35277,
18843,
11,
367,
23,
8081,
5497,
343,
18843,
11,
367,
23,
... | 1.75 | 6,004 |
# /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="django-zurb-foundation-6",
version="6.7.4",
description="Django Zurb Foundation package for Django 4.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Jeremy Bywater",
author_email="nougats-dance0b@icloud.com",
url="https://github.com/jlbyh2o/django-zurb-foundation-6",
license='Unlicense',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django :: 4',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: The Unlicense (Unlicense)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Topic :: Utilities'],
python_requires=">3.6",
)
| [
2,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
13252,
2394,
62,
34720,
796,... | 2.549234 | 457 |
if __name__ == "__main__":
import sys; sys.path.insert(0, '..')
from pprint import pprint
from collections import Counter, defaultdict
from copy import copy
from contextlib import contextmanager
from os.path import join
import json
from extensible_provn.prov_magics import ProvMagic
STATS_VIEW = True
NAMES = Counter()
DICTS = defaultdict(dict)
ENABLED = True
RESULT = []
TEMP = []
STATS = defaultdict(lambda: defaultdict(Counter))
VALUES = {}
SAME = {}
BASE = ""
LINE = None
HIDE = {"dot:hide": "true"}
SPECIFIC = {"dot:specific": "true"}
BLACK = {"dot:color":"#000000"}
NAMESPACE = "version:"
SCRIPT = "script:"
@contextmanager
| [
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1330,
25064,
26,
25064,
13,
6978,
13,
28463,
7,
15,
11,
705,
492,
11537,
198,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
6738,
17268,
1330,
15034,
11,
427... | 2.958904 | 219 |
################################################################################
# Module: template.__init__.py
# Description: Template module for everything UMI template.
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
from .umi_base import *
from .gas_material import *
from .glazing_material import *
from .opaque_material import *
from .opaque_construction import *
from .schedule import *
from .window import *
from .structure import *
from .conditioning import *
from .load import *
from .ventilation import *
from .dhw import *
from .zone_construction_set import *
from .zonedefinition import *
from .building_template import *
| [
29113,
29113,
14468,
198,
2,
19937,
25,
11055,
13,
834,
15003,
834,
13,
9078,
198,
2,
12489,
25,
37350,
8265,
329,
2279,
471,
8895,
11055,
13,
198,
2,
13789,
25,
17168,
11,
766,
1336,
5964,
287,
38559,
24290,
13,
14116,
198,
2,
5313... | 4.134409 | 186 |
import mechanize
import urllib2
from tqdm import tqdm
import sys
MechBrowser = mechanize.Browser()
MechBrowser.set_handle_equiv(True)
MechBrowser.set_handle_redirect(True)
MechBrowser.set_handle_referer(True)
MechBrowser.set_handle_robots(False)
print "\n#####################################"
print "# => Brute Force Login <= #"
print "# trustn01 #"
print "#####################################"
with open('passwords.txt') as f:
passwords = f.read().splitlines()
LoginUrl = "http://localhost:8000/auth"
flag = False
print ("\nConnecting to: %s ......\n" % (LoginUrl))
print("Attemping brute force for user 'intern@wondoughbank.com', PROGRESS\n")
for x in tqdm(passwords):
LoginData = 'target=http%3A%2F%2Flocalhost%3A8080%2Foauth&appname=Sample+Application&username=intern%40wondoughbank.com&password=' + str(x)
LoginHeader = {"content-type" : "application/x-www-form-urlencoded"}
LoginRequest = urllib2.Request(LoginUrl, LoginData, LoginHeader)
LoginResponse = MechBrowser.open(LoginRequest)
# print("[.]Checking (username / password): intern@wondoughbank.com / %s" % (x))
if LoginResponse.geturl() == "http://localhost:8080/":
print ("\n[*]SUCCESS! Password for intern@wondoughbank.com is: %s" % (x))
break
| [
11748,
3962,
1096,
198,
11748,
2956,
297,
571,
17,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
25064,
198,
198,
28452,
46532,
796,
3962,
1096,
13,
46532,
3419,
198,
28452,
46532,
13,
2617,
62,
28144,
62,
4853,
452,
7,... | 2.709812 | 479 |
from webdnn.graph.operators.elementwise import Elementwise
class Greater(Elementwise):
"""Greater(name)
return 1 if x0 is greater than x1 elementwisely
.. math::
f(x) = x0 > x1 ? 1 : 0;
Signature
.. code::
y, = op(x0, x1)
- **x0, x1** - Input variables.
- **y** - Output variable.
"""
pass
| [
6738,
3992,
67,
20471,
13,
34960,
13,
3575,
2024,
13,
30854,
3083,
1330,
11703,
3083,
628,
198,
4871,
18169,
7,
20180,
3083,
2599,
198,
220,
220,
220,
37227,
13681,
263,
7,
3672,
8,
628,
220,
220,
220,
1441,
352,
611,
2124,
15,
318,... | 2.204819 | 166 |
# coding=utf-8
#
# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)
#
# Most of this work is copyright (C) 2013-2015 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a
# full list of people who may hold copyright, and consult the git log if you
# need to determine who owns an individual contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import division, print_function, absolute_import
import sqlite3
import threading
from abc import abstractmethod
from contextlib import contextmanager
from hypothesis.internal.compat import text_type
class Backend(object):
"""Interface class for storage systems.
Simple text key -> value mapping. values are of the type returned by
data_type() but keys are always unicode text (str in python 3, unicode in
python 2).
Every (key, value) pair appears at most once. Saving a duplicate will just
silently do nothing.
"""
@abstractmethod # pragma: no cover
def data_type(self):
"""Returns the type of data that is suitable for values in this DB."""
@abstractmethod # pragma: no cover
def save(self, key, value):
"""Save a single value matching this key."""
def delete(self, key, value):
"""Remove this value from this key.
This method is optional but should fail silently if not
supported. Note that if you do not support it you may see
performance degradation over time as a number of values have to
be ignored on each run
"""
@abstractmethod # pragma: no cover
def fetch(self, key):
"""yield the values matching this key."""
@abstractmethod # pragma: no cover
def close(self):
"""Close database connection whenever such is used."""
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
21209,
313,
8497,
357,
5450,
1378,
12567,
13,
785,
14,
7707,
14155,
40,
332,
14,
36362,
313,
8497,
8,
198,
2,
198,
2,
4042,
286,
428,
670,
318,
6634,
357,
... | 3.162328 | 653 |
import typing as t
import uuid
from ..filter import Column as _Column, ValueType
if t.TYPE_CHECKING:
from .database import Database
from .model import Model
T_Model = t.TypeVar('T_Model', bound='Model')
class Column(_Column):
"""
Use this class to declare a column on the class-level of a #Model subclass. Colums declared with this
class will be assigned to a #Model instance directly (unlike, for example, the #ForeignKey column type).
# Example
```py
from baserow.orm import Column, Model
class Product(Model):
name = Column('Name')
price = Column('Price')
```
Columns have four types of names:
1. The attrribute name that is assigned to them on the Model instance and that is used to reference the
column in code.
2. The user defined name of the column (aka. field) in the Baserow table.
3. The Baserow internal field ID.
4. An ORM internal UUID that is newly generated every for every Column instance. This ID is used as a
placeholder when constructing #Filter#s which the #Query will replace with the Baserow internal
field ID on execution.
In the model, we only specify the attribute and user defined name. Use the #DatabaseMapping.generate()
method, or the `baserow.orm` CLI, to generate a #DatabaseMapping which uses the user defined name to
create a mapping from attribute name to Baserow internal field ID.
"""
def __init__(self, user_name: str) -> None:
"""
# Arguments
user_name: The user specified column name in the Baserow table.
"""
assert isinstance(user_name, str)
# We later replace references to this ID in #Filter objects with the actual internal field ID.
super().__init__(user_name + '.' + str(uuid.uuid4()))
self._user_name = user_name
def from_baserow(self, db: 'Database', value: t.Any) -> t.Any:
"""
Convert a value received by Baserow for this column to the value that should be assigned to the
Model instance attribute.
"""
return value
def to_baserow(self, value: t.Any) -> t.Any:
"""
Convert a value stored on a model instance to JSON format for a request to Baserow.
"""
return value
def from_python(self, value: t.Any) -> t.Any:
"""
Convert a value assigned to the row from Python.
"""
return value
@property
def id(self) -> str:
"""
The internal ID of the column that is used a as a placeholder when constructing #Filter#s.
"""
return self._name
@property
def name(self) -> str:
"""
The user defined name of the column.
"""
return self._user_name
class ForeignKey(Column):
"""
The #ForeignKey is a special column that should be used to represent a "link row" field in Baserow. Model
attributes of this column type will not receive the raw data from the Baserow API as values, but instead
an instance of #LinkedRow which automatically loads the linked rows as another Model from the database on
access.
"""
@property
class Ref(t.NamedTuple):
"""
A reference to another row.
"""
id: int
name: ValueType
class LinkedRow(t.Sequence[T_Model]):
"""
Represents a "link row" field. Loads instances of the linked Model on acces.
"""
@property
| [
198,
11748,
19720,
355,
256,
198,
11748,
334,
27112,
198,
198,
6738,
11485,
24455,
1330,
29201,
355,
4808,
39470,
11,
11052,
6030,
198,
198,
361,
256,
13,
25216,
62,
50084,
2751,
25,
198,
220,
422,
764,
48806,
1330,
24047,
198,
220,
4... | 3.245473 | 994 |
# -*- coding: utf-8 -*-
import pymongo
from pymongo.errors import ConnectionFailure
import CPUtimer
import os
import csv
import codecs
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
279,
4948,
25162,
198,
6738,
279,
4948,
25162,
13,
48277,
1330,
26923,
50015,
198,
11748,
9135,
45016,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
40481,
... | 2.806452 | 62 |
# -*- coding: utf-8 -*-
# import logging
from os.path import join
from gensim.models import Word2Vec
import numpy as np
import random
from utils.cache import LMDBClient
from utils import data_utils
from utils.data_utils import Singleton
from utils import settings
import gensim
# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
EMB_DIM = 100
# class MySenteces():
# def __init__(self, dirname):
# self.dirname = dirname
# def __iter__(self):
# for fname in os.listdir(self.dirname):
# for line in open(os.path.join(self.dirname, fname)):
# yield line.split()
@Singleton
if __name__ == '__main__':
wf_name = 'aminer'
emb_model = EmbeddingModel.Instance()
emb_model.train(wf_name)
print('loaded') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
1330,
18931,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
6738,
308,
641,
320,
13,
27530,
1330,
9678,
17,
53,
721,
198,
11748,
299,
32152,
355,
45941,
198,
117... | 2.334262 | 359 |
# creating the tables for database
import sqlite3
conn = sqlite3.connect('Database/final.db')
cursor = conn.cursor()
cursor.execute(""" CREATE TABLE IF NOT EXISTS cust_image_master (Customer_ID VARCHAR(10) PRIMARY KEY, Image BLOB, Counter INTEGER) """)
cursor.execute(""" CREATE TABLE IF NOT EXISTS location_master (Location_ID VARCHAR(10) PRIMARY KEY, Location_Name TEXT) """)
cursor.execute(""" CREATE TABLE IF NOT EXISTS customer_master (Customer_ID VARCHAR(10) PRIMARY KEY, Gender CHARACTER, Age INTEGER, Counter INTEGER) """)
cursor.execute(""" CREATE TABLE IF NOT EXISTS visit_register (Visit_Date DATE, Visit_Time DATE, Customer_ID VARCHAR(10), Location_ID VARCHAR(10), New_Customer TEXT, Sentiment TEXT) """)
cursor.close()
conn.close()
| [
2,
4441,
262,
8893,
329,
6831,
198,
11748,
44161,
578,
18,
198,
198,
37043,
796,
44161,
578,
18,
13,
8443,
10786,
38105,
14,
20311,
13,
9945,
11537,
198,
66,
21471,
796,
48260,
13,
66,
21471,
3419,
198,
198,
66,
21471,
13,
41049,
72... | 3.032129 | 249 |
import base64
from unittest import TestCase
from osbot_aws.helpers.Lambda_Package import Lambda_Package
from osbot_utils.utils.Dev import Dev
from osbot_jupyter.api.CodeBuild_Jupyter import CodeBuild_Jupyter
| [
11748,
2779,
2414,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
28686,
13645,
62,
8356,
13,
16794,
364,
13,
43,
4131,
6814,
62,
27813,
1330,
21114,
6814,
62,
27813,
198,
6738,
28686,
13645,
62,
26791,
13,
26791,
13,
13... | 2.864865 | 74 |
fun1(1)
fun1(5)
fun1(6)
fun1(7)
fun2("Laurence","Svekis")
fun2("Linda","Jones")
fun2("Mike","Smith")
myName = fun2("Laurence","Svekis")
print(myName)
num1 = fun3(6,7)
num2 = fun3(126,2317)
print(num1, num2) | [
198,
12543,
16,
7,
16,
8,
198,
12543,
16,
7,
20,
8,
198,
12543,
16,
7,
21,
8,
198,
12543,
16,
7,
22,
8,
198,
198,
12543,
17,
7203,
14772,
495,
1198,
2430,
50,
303,
74,
271,
4943,
198,
12543,
17,
7203,
43,
22261,
2430,
25784,
... | 1.9 | 110 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Setup script for gdal-utils.
from glob import glob
# from importlib.metadata import entry_points
from pathlib import Path
from setuptools import setup, find_packages
from osgeo_utils import (
__package_name__,
__version__,
__author__,
__author_email__,
__maintainer__,
__maintainer_email__,
__description__,
__license_type__,
__url__,
)
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Scientific/Engineering :: Information Analysis',
]
__readme__ = open('README.rst', encoding="utf-8").read()
__readme_type__ = 'text/x-rst'
package_root = '.' # package sources are under this dir
packages = find_packages(package_root) # include all packages under package_root
package_dir = {'': package_root} # packages sources are under package_root
scripts = sorted(glob('./osgeo_utils/*.py'), reverse=True) # command line scripts
def define_entry_points(scripts, entry_points=None):
"""
Return a dict defining scripts that get installed to PYTHONHOME/Scripts.
console_scripts : [
# CLI_command = dirname.filename
'gdal_edit = osgeo_utils.gdal_edit',
'gdal_merge = osgeo_utils.gdal_merge',
... ]
"""
console_scripts = []
for f in scripts:
name = Path(f).stem # 'gdal_edit' from 'gdal_edit.py'
console_scripts.append([f'{name} = osgeo_utils.{name}:main'])
entry_points = {'console_scripts': console_scripts}
return entry_points
setup(
name=__package_name__,
version=__version__,
author=__author__,
author_email=__author_email__,
maintainer=__maintainer__,
maintainer_email=__maintainer_email__,
long_description=__readme__,
long_description_content_type=__readme_type__,
description=__description__,
license=__license_type__,
classifiers=classifiers,
url=__url__,
python_requires='>=3.6.0',
packages=packages,
package_dir=package_dir,
# scripts=glob('./scripts/*.py'), # use entry_points console_script instead
install_requires=['gdal'],
extras_require={'numpy': ['numpy > 1.0.0']},
entry_points=define_entry_points(scripts),
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
31122,
4226,
329,
308,
31748,
12,
26791,
13,
198,
6738,
15095,
1330,
15095,
198,
2,
422,
1330,
8019,
13,
38993,
13... | 2.638298 | 940 |
import enum
from dataclasses import dataclass, field
from heapq import heappop, heappush
from fileinput import FileInput
from math import inf
from typing import Iterable, NamedTuple, Optional
HALLWAY_LENGTH = 7
ROOMS = 4
ROOM_SIZE = 2
# Memory layout of the Burrow implementation
#
# Hn == hallway[n] - represents a spot in the hallway, which can be occupied by an amphipod
# Un == unwrapped_hallway[n] - only used in distance calculation, never stored actually
#
# U0 U1 U2 U3 U4 U5 U6 U7 U8 U9 U10
# H0 H1 ___ H2 ___ H3 ___ H4 ___ H5 H6
# R01 R11 R21 R31
# R00 R10 R20 R30
#
# In handsight, it would probably be better to store spots within the room,
# in the opposite order, so that `room[i][0]` is the hallway spot.
#
# The 'Spot' enum is designed in such a way, that an amphipod (= non-empty spot)
# wants to be in the same room as its value; in other words, given an amphipod,
# it wants to go to rooms[that_amphipod].
@dataclass(order=True)
class State:
"""State represents a burrow after n moves were performed.
To solve the challenge, we'll want to find the end state with the lowest energy.
"""
burrow: Burrow = field(compare=False)
energy: int = field(default=0, compare=True)
def can_move_from_spot_to_room(self, hallway_idx: int, room_idx: int) -> bool:
"""Checks if the path from the hallway spot to a room
is not obstructed by any other amphipod."""
left = hallway_idx + 1
right = room_idx + 2
if left > right:
spots = self.burrow.hallway[right:left-1]
else:
spots = self.burrow.hallway[left:right]
return all(spot == Spot.Empty for spot in spots)
def can_move_into_room(self, room_idx: int) -> Optional[int]:
"""Checks if the amphipod can move into a room.
An amphipod can move into a room if there's an empty spot,
and any other amphipods below (towards the "window") are the expected/valid
amphipods.
Returns None if the move can't be performed, and a room_spot_idx otherwise.
"""
for room_spot_idx in range(ROOM_SIZE):
occupant = self.burrow.rooms[room_idx][room_spot_idx]
if occupant == Spot.Empty:
return room_spot_idx
elif occupant != room_idx:
return None
def can_move_from_room(self, room_idx: int) -> Optional[int]:
"""Checks if anyone from the given room should move.
Returns room_spot_idx of the amphipod to move, as long
as it's in the incorrect room, or any amphipods below are also in
the incorrect room.
"""
room = self.burrow.rooms[room_idx]
# Empty room - nothing to move from
if room[0] == Spot.Empty:
return None
# Check if room is "valid" - it contains only valid amphipods or empty spots
# => no need to move in this case
if all(s == Spot.Empty or s == room_idx for s in room):
return None
# Find the top item
for idx in range(ROOM_SIZE):
if room[idx] == Spot.Empty:
return idx-1
return ROOM_SIZE - 1
def possible_moves(self) -> Iterable["State"]:
"""Generates all of the possible moves from the current state -
first generating all moves **to** a room, and then by
generating moves **from** all rooms.
"""
# First check if someone from the hallway can move to its room
for idx in range(HALLWAY_LENGTH):
amphipod = self.burrow.hallway[idx]
# Check if there's an amphipod at this idx and
# it has a free path to its room
if amphipod != Spot.Empty and self.can_move_from_spot_to_room(idx, amphipod) \
and (insert_into_spot := self.can_move_into_room(amphipod)) is not None:
energy_delta = abs(unwrap_hallway_index(idx) - 2 * (amphipod + 1))
energy_delta += ROOM_SIZE - insert_into_spot
energy_delta *= amphipod.energy_multiplier()
yield State(
self.burrow.to_room(idx, amphipod, insert_into_spot),
self.energy + energy_delta
)
# Then, check possible moves into the hallway
for room_idx in range(ROOMS):
from_spot = self.can_move_from_room(room_idx)
if from_spot is None:
continue
amphipod = self.burrow.rooms[room_idx][from_spot]
# Generate all possible spots to move to
for hallway_idx in range(HALLWAY_LENGTH):
if self.burrow.hallway[hallway_idx] != Spot.Empty \
or not self.can_move_from_spot_to_room(hallway_idx, room_idx):
continue
energy_delta = abs(unwrap_hallway_index(hallway_idx) - 2 * (room_idx + 1))
energy_delta += ROOM_SIZE - from_spot
energy_delta *= amphipod.energy_multiplier()
yield State(
self.burrow.to_hallway(hallway_idx, room_idx, from_spot),
self.energy + energy_delta
)
def find_cheapest(initial_state: State) -> State:
"""Uses Dijkstra's algorithm to find the least-energy
solved state.
Because we don't care about the specific steps towards a solution,
there's no need to keep track of the `prev`/`came_from` mapping.
It takes a few seconds to find the solution, I wonder if
it can be improved by switching to A*; or maybe it's possible to solve
the task in an entirely different way...
"""
min_energies: dict[Burrow, float] = {initial_state.burrow: 0}
queue: list[State] = [initial_state]
while queue:
state = heappop(queue)
if state.is_end():
return state
for new_state in state.possible_moves():
if new_state.energy < min_energies.get(new_state.burrow, inf):
min_energies[new_state.burrow] = new_state.energy
heappush(queue, new_state)
raise RuntimeError("no solution found")
if __name__ == "__main__":
initial_burrow = Burrow.from_input(FileInput())
initial_state = State(initial_burrow)
cheapest_end_state = find_cheapest(initial_state)
print(cheapest_end_state.energy)
| [
11748,
33829,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
24575,
80,
1330,
339,
1324,
404,
11,
339,
1324,
1530,
198,
6738,
2393,
15414,
1330,
9220,
20560,
198,
6738,
10688,
1330,
1167,
198,
6738,
19720,
133... | 2.295487 | 2,792 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Classifying Personality of a Person Based On Users Twitter Data',
version='1.0.0',
install_requires=["pandas >= 1.0.2",
"pickleshare >= 0.7.5",
"numpy >= 1.17.2",
"scikit-learn >= 0.21.3",
"nltk >= 3.4.5",
"tweepy >= 3.8.0",
"unicodecsv >= 0.14.1"],
description='A Natural Language Processing (NLP), Machine Learning and Data Mining project, which will automate the screening process before hiring a professional or can be used in psychiatry to check effectivity of patient therapy.',
long_description=long_description,
url='https://github.com/heitorsampaio/Personality-Classification-Based-on-Twitter-Data',
authors=' Heitor Sampaio ',
author_email='horlando.heitor@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=' Suggestion Mining using textblob nlp tweepy and nltk',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
40481,
82,
1330,
1280,
198,
6738,
28686,
1330,
3108,
198,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
198,
44... | 2.463333 | 600 |
#!/usr/bin/env python3
import argparse
import logging
import sys
from typing import Tuple
from .base import (
DEFAULT_HTTP_PORT,
DEFAULT_LOG_LEVEL,
DEFAULT_PORT,
LOG_LEVELS,
ProtocolType,
)
from .config import OptionType, config
from .proxy import ProxyServer
from .tunnel_client import TunnelClient
from .tunnel_gui import GUIClient
from .utils import configure_logging
_logger = logging.getLogger(__name__)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
25064,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
6738,
764,
8692,
1330,
357,
198,
220,
220,
220,
5550,
38865,
62,
40717,
62,
... | 2.829412 | 170 |
from fastlmmhpc.util.runner import *
import logging
import unittest
import cStringIO
class DistributableTest(object) : #implements IDistributable
'''
This is a class for distributing any testing. It shouldn't be confused with TestDistributable which is a class for
testing the distributable classes.
'''
@staticmethod
#start of IDistributable interface--------------------------------------
_work_count = None
@property
@property
#optional override -- the str name of the instance is used by the cluster as the job name
#end of IDistributable interface---------------------------------------
| [
6738,
3049,
75,
3020,
71,
14751,
13,
22602,
13,
16737,
1330,
1635,
198,
11748,
18931,
198,
11748,
555,
715,
395,
198,
11748,
269,
10100,
9399,
198,
198,
4871,
46567,
540,
14402,
7,
15252,
8,
1058,
1303,
320,
1154,
902,
4522,
396,
2455... | 3.775148 | 169 |
# Copyright (c) 2015 SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, 5GTANGO, UBIWHERE, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
#
# This work has also been performed in the framework of the 5GTANGO project,
# funded by the European Commission under Grant number 761493 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.5gtango.eu).
import unittest
import tempfile
import shutil
import os
import tngsdk.package as tngpkg
class TngSdkPackagePythonApiTest(unittest.TestCase):
"""
Test case to check that the tool can be
used from external Python code, e.g.,
by directly calling its run method.
"""
| [
2,
220,
15069,
357,
66,
8,
1853,
311,
1340,
13563,
12,
21870,
53,
11,
642,
19555,
1565,
11230,
11,
471,
3483,
47357,
11,
350,
5067,
6286,
2059,
198,
2,
11096,
371,
34874,
15731,
1137,
53,
1961,
13,
198,
2,
198,
2,
49962,
739,
262,... | 3.644444 | 495 |
import sqlite3 as sql
| [
11748,
44161,
578,
18,
355,
44161,
628
] | 3.285714 | 7 |
__author__ = 'renhao.cui' | [
834,
9800,
834,
796,
705,
918,
23778,
13,
66,
9019,
6
] | 2.272727 | 11 |
#-------------------------------------------------------------------------------------------------------------------------------
# HUMAN VS BOT DIFFERENTIATION
# FILE NAME: test.py
# DEVELOPED BY: Vigneshwar Ravichandar, Moulishankar M R
# TOPICS: Multiclass Classification, Machine Learning, TensorFlow
#-------------------------------------------------------------------------------------------------------------------------------
# IMPORTING REQUIRED MODULES
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
print(f"TensorFlow version: {tf.__version__}")
DATASET_PATH = 'data/Dataset.csv'
MODEL_PATH = './model/botModel'
# DATA PREPROCESSING
data = pd.read_csv(DATASET_PATH)
print(data.describe())
x = data.iloc[:,[4,5,6,7]].values
y = data.iloc[:,8].values
# OPENING THE TRAINED MODEL
model = tf.keras.models.load_model(MODEL_PATH)
features = ['Check Status','Captcha Attempts','No of Login Attempts','Avg Time between Attempts']
response = []
for i in range(len(features)):
response.append(float(input(f"Please mention the {features[i]}: ")))
res = model.predict_classes([response])
if res == 1:
print("It might be a Human.")
elif res == 0:
print("It might be a Bot.")
| [
2,
10097,
47232,
24305,
198,
198,
2,
45850,
1565,
22269,
347,
2394,
360,
5064,
24302,
3525,
40,
6234,
198,
198,
2,
45811,
36751,
25,
1332,
13,
9078,
198,
198,
2,
5550,
18697,
3185,
1961,
11050,
25,
569,
570,
5069,
5767,
28146,
488,
... | 3.282322 | 379 |
from tkinter import *
import smtplib
#Main Screen Init
master = Tk()
master.title = 'Email App'
#Functions
#Labels
Label(master, text="Email App", font=('Calibri',15)).grid(row=0, sticky=N)
Label(master, text="Please use the form below to send an email", font=('Calibri',11)).grid(row=1, sticky=W, padx=5 ,pady=10)
Label(master, text="Email", font=('Calibri', 11)).grid(row=2,sticky=W, padx=5)
Label(master, text="Password", font=('Calibri', 11)).grid(row=3,sticky=W, padx=5)
Label(master, text="To", font=('Calibri', 11)).grid(row=4,sticky=W, padx=5)
Label(master, text="Subject", font=('Calibri', 11)).grid(row=5,sticky=W, padx=5)
Label(master, text="Body", font=('Calibri', 11)).grid(row=6,sticky=W, padx=5)
notif = Label(master, text="", font=('Calibri', 11),fg="red")
notif.grid(row=7,sticky=S)
#Storage
temp_username = StringVar()
temp_password = StringVar()
temp_receiver = StringVar()
temp_subject = StringVar()
temp_body = StringVar()
#Entries
usernameEntry = Entry(master, textvariable = temp_username)
usernameEntry.grid(row=2,column=0)
passwordEntry = Entry(master, show="*", textvariable = temp_password)
passwordEntry.grid(row=3,column=0)
receiverEntry = Entry(master, textvariable = temp_receiver)
receiverEntry.grid(row=4,column=0)
subjectEntry = Entry(master, textvariable = temp_subject)
subjectEntry.grid(row=5,column=0)
bodyEntry = Entry(master, textvariable = temp_body)
bodyEntry.grid(row=6,column=0)
#Buttons
Button(master, text = "Send", command = send).grid(row=7, sticky=W, pady=15, padx=5)
Button(master, text = "Reset", command = reset).grid(row=7, sticky=W, padx=45, pady=40)
#Mainloop
master.mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
201,
198,
11748,
895,
83,
489,
571,
201,
198,
201,
198,
2,
13383,
15216,
44707,
201,
198,
9866,
220,
220,
220,
220,
220,
220,
796,
309,
74,
3419,
201,
198,
9866,
13,
7839,
796,
705,
15333,
2034,
6,... | 2.494169 | 686 |
import logging
import celery
from django.apps import apps
from django.conf import settings
from elasticsearch import Elasticsearch
from elasticsearch import helpers
logger = logging.getLogger(__name__)
@celery.shared_task(bind=True)
| [
11748,
18931,
198,
198,
11748,
18725,
1924,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
6725,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
27468,
12947,
1330,
48567,
12947,
198,
6738,
27468,
12947,
1330,
49385,
628,
... | 3.507246 | 69 |
from collections import OrderedDict
from cornflake.exceptions import ValidationError
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
6738,
11676,
47597,
13,
1069,
11755,
1330,
3254,
24765,
12331,
628,
628,
628,
628,
628,
628,
628,
198
] | 3.703704 | 27 |
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.1.0.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
| [
2,
44156,
2849,
815,
307,
350,
8905,
21734,
11670,
357,
4023,
1378,
2503,
13,
29412,
13,
2398,
14,
7959,
14,
431,
862,
14,
431,
79,
12,
15,
21734,
8,
198,
43717,
796,
705,
15,
13,
16,
13,
15,
13,
7959,
6,
198,
198,
2,
1423,
16... | 2.815385 | 65 |
"""
Python has a function int(s), which parses an integer value out of a given string s. For example, the following program..
s = "12" + "3" # s == "123"
n = int(s) # value of n is 123
n = n + 10 # int value can be used in arithmetic expression
print (n) # Output 133
...would output 123. In program below, the variable nameBirth references a String with a person's name and a birth year (for example "John Smith, 1981"). Calculate and output the person's age in the year 2011. Do not write any function definitions, and do not output anything else but the age as a number!.
For example, if the value of the nameBirth would be
John Smith, 1981
...your program should output 30
"""
import random
first = [ "John", "James", "Bill", "Arnold", "Lisa", "Ann", "Kimberly", "Monica" ]
last = [ "Smith", "Jones", "Williams", "Brown", "Wilson", "Taylor", "Johnson" ];
for i in range(3):
name = random.choice(first) + " " + random.choice(last)
name_age = name + ", 19" + str(random.randint(0,99))
print ("Name and b.year:", name_age)
print ("Age:", output_age(name_age)) | [
37811,
198,
37906,
468,
257,
2163,
493,
7,
82,
828,
543,
13544,
274,
281,
18253,
1988,
503,
286,
257,
1813,
4731,
264,
13,
1114,
1672,
11,
262,
1708,
1430,
492,
198,
220,
198,
82,
796,
366,
1065,
1,
1343,
366,
18,
1,
1303,
264,
... | 3.215976 | 338 |
# ======================================================================================================================
# File: GUI/TabWaters.py
# Project: AlphaBrew
# Description: Extensions and functionality for the main GUI window.
# Author: Jared Julien <jaredjulien@gmail.com>
# Copyright: (c) 2020 Jared Julien
# ----------------------------------------------------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
# ======================================================================================================================
# Import Statements
# ----------------------------------------------------------------------------------------------------------------------
from PySide2 import QtCore, QtWidgets
import qtawesome
from GUI.Base.TabWater import Ui_TabWater
from Model.Waters import Waters
from Model.Water import Water
from Model.MeasurableUnits import ConcentrationType, PercentType
# ======================================================================================================================
# Waters Tab Class
# ----------------------------------------------------------------------------------------------------------------------
class TabWaters(QtWidgets.QWidget):
"""Extends the MainWindow Waters tab widget containing a subset of controls specific to waters in the
recipe.
Please note: this is one area where this tool becomes opinionated. To simplify the calculations and inputs, the
tool assumes that you will only use water from a single source and allows you to combine it with some ratio of
distilled water. This is done to prevent the user from needing to balance water percentages and to make the mash
pH calculations much simpler. This should suit nearly every beer brewed.
This does present a quirk that a water source be specified even if using 100% distilled water. I'm living with that
for the time being and will adjust later if it bothers me enough.
"""
# ======================================================================================================================
# Properties
# ----------------------------------------------------------------------------------------------------------------------
@property
def sourcePercentage(self):
"""Returns the PercentType value for the currently selected "source" percentage, based upon the slider."""
return PercentType(self.ui.ratio.value(), '%')
# ----------------------------------------------------------------------------------------------------------------------
@property
def distilledPercentage(self):
"""Returns the PercentType of distilled water set by the slider."""
return PercentType(100 - self.ui.ratio.value(), '%')
# ----------------------------------------------------------------------------------------------------------------------
@property
def sourceAmount(self):
"""Return the source amount in gallons calculated from the mash size. This isn't necessarily the same amount
as that which is contained in the actual recipe. This is intended primarily as a helper to calculate and
populate the new water when a source gets added to the recipe."""
return self.recipe.mash.totalWater * self.sourcePercentage
# ----------------------------------------------------------------------------------------------------------------------
@property
def distilledAmount(self):
"""Same story as with the source amount, just for the distilled portion."""
return self.recipe.mash.totalWater * self.distilledPercentage
# ======================================================================================================================
# Methods
# ----------------------------------------------------------------------------------------------------------------------
def on_load(self):
"""Fires when the recipe gets loaded to re-associate the recipe model with the Qt table in this tab."""
if len(self.recipe.waters) == 0:
# If there is nothing to be loaded then bail out now.
return
source: Water = self.recipe.waters[0]
self.ui.sourceName.setText(source.name)
self.ui.sourceCalcium.setValue(source.calcium.ppm)
self.ui.sourceMagnesium.setValue(source.magnesium.ppm)
self.ui.sourceSodium.setValue(source.sodium.ppm)
self.ui.sourceChloride.setValue(source.chloride.ppm)
self.ui.sourceSulfate.setValue(source.sulfate.ppm)
self.ui.sourceBicarbonate.setValue(source.bicarbonate.ppm)
self.ui.sourcePh.setValue(source.ph)
self.ui.ratio.setValue(source.percentage.percent)
self.on_change()
# ----------------------------------------------------------------------------------------------------------------------
def on_change(self):
"""Fires whenever the recipe is changed. Recalculates the brewing water chemistry for display."""
self.ui.mixedCalcium.setText(str(self.recipe.waters.calcium))
self.ui.mixedMagnesium.setText(str(self.recipe.waters.magnesium))
self.ui.mixedSodium.setText(str(self.recipe.waters.sodium))
self.ui.mixedChloride.setText(str(self.recipe.waters.chloride))
self.ui.mixedSulfate.setText(str(self.recipe.waters.sulfate))
self.ui.mixedBicarbonate.setText(str(self.recipe.waters.bicarbonate))
self.ui.mixedPh.setText(f'{self.recipe.waters.ph:.1f}')
if len(self.recipe.waters) > 0:
self.ui.sourceVolume.setText(str(self.recipe.waters[0].amount))
else:
self.ui.sourceVolume.setText(str(self.recipe.boilVolume))
if len(self.recipe.waters) > 1:
self.ui.distilledVolume.setText(str(self.recipe.waters[1].amount))
else:
self.ui.distilledVolume.setText('0.0 gal')
# ----------------------------------------------------------------------------------------------------------------------
def on_library_selection_changed(self):
"""Fires when the user makes a selection in the bottom table."""
selection = self.ui.library.selectionModel().selectedIndexes()
selected = len(selection) > 0
self.ui.load.setEnabled(selected)
# ----------------------------------------------------------------------------------------------------------------------
def on_load_source(self):
"""Fires when the user clicks the load button to load in a selected water profile."""
# Iterate though the selection(s) the user has made.
for index in self.ui.library.selectedIndexes():
# # We get an index for each cell, lets filter down to a single column so that we can focus on row indexes.
if index.column() != 0:
continue
# Get the data through the proxy as the indexes don't align with the library when filtering.
selected = self.proxy.data(index, QtCore.Qt.UserRole)
# Make a copy of the water so as to not modify the version in the library when working with recipe.
water = selected.copy(self.recipe)
water.amount = self.sourceAmount
if len(self.recipe.waters) == 0:
# Add the new water into the recipe.
self.recipe.waters.append(water)
else:
# Update the zeroth entry, which is forced to be the source water.
self.recipe.waters[0] = water
# Force a reload of the source water inputs now that we just updated the recipe.
self.on_load()
self.recipe.changed.emit()
break
# ----------------------------------------------------------------------------------------------------------------------
def on_source_changed(self, attribute, value):
"""Fires when the user changes one of the controls for the source water and posts the updates back to the
recipe. `attribute` is injected above in a lambda function to allow this single handler function to handle
updates from all of the source water inputs."""
if len(self.recipe.waters) == 0:
# If there are no waters yet then lets get one added to the recipe.
water = Water(self.recipe)
water.amount = self.sourceAmount
self.recipe.waters.append(water)
else:
# When there is a water pull it for updates.
water = self.recipe.waters[0]
if attribute not in ['name', 'ph']:
value = ConcentrationType(value, 'ppm')
setattr(water, attribute, value)
self.recipe.changed.emit()
# ----------------------------------------------------------------------------------------------------------------------
def on_ratio_change(self):
"""Fires when the ratio for distilled vs source water is changed. Calculates new values for the amounts of
source and distilled water and recalculates the brewing percentages."""
self.ui.sourcePercent.setText(str(self.sourcePercentage))
self.ui.distilledPercent.setText(str(self.distilledPercentage))
if len(self.recipe.waters) == 0:
# We cannot update the ratio when there is no water profile yet.
return
self.recipe.waters[0].amount = self.sourceAmount
if self.distilledPercentage.percent > 0:
# There is some portion of distilled water in this recipe.
if len(self.recipe.waters) == 1:
# Distiled water has not yet been added to the recipe - do that now.
distilled = Water(
self.recipe,
name='Distilled Water',
amount=self.distilledAmount,
)
self.recipe.waters.append(distilled)
else:
# Distilled water is already here, lets just update it.
self.recipe.waters[1].amount = self.distilledAmount
self.recipe.changed.emit()
elif len(self.recipe.waters) > 1:
# The user no longer calls for distilled water but it's still in the recipe - remove it.
self.recipe.waters.pop(1)
# End of File
| [
2,
38093,
10052,
4770,
1421,
28,
201,
198,
2,
220,
220,
220,
220,
220,
220,
220,
9220,
25,
220,
25757,
14,
33349,
54,
8605,
13,
9078,
201,
198,
2,
220,
220,
220,
220,
4935,
25,
220,
12995,
44029,
201,
198,
2,
12489,
25,
220,
497... | 3.078307 | 3,780 |
import argparse
import numpy as np
import os
import json
import torch
from models import *
from run_manager import RunManager
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='Exp/test')
parser.add_argument('--gpu', help='gpu available', default='0')
parser.add_argument('--manual_seed', default=0, type=int)
""" dataset """
parser.add_argument('--dataset', type=str, default='imagenet')
parser.add_argument('--test_batch_size', type=int, default=250)
""" train config """
parser.add_argument('--n_worker', type=int, default=4)
parser.add_argument('--dropout', type=float, default=0.3)
""" gene setting """
parser.add_argument("--model_type", default='gpu', type=str)
parser.add_argument("--darts_gene", default=None, type=str)
parser.add_argument("--mobile_gene", default=None, type=str)
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
np.random.seed(args.manual_seed)
os.makedirs(args.path, exist_ok=True)
run_config = ImagenetRunConfig(
**args.__dict__
)
if args.darts_gene is not None:
if args.dataset == 'imagenet':
from models.darts_nets_imagenet.augment_cnn import AugmentCNNImageNet
net = AugmentCNNImageNet(num_classes=run_config.data_provider.n_classes, genotype=eval(args.darts_gene),
drop_out=args.dropout)
elif args.dataset == 'cifar10':
from models.darts_nets_cifar.augment_cnn import AugmentCNN
net = AugmentCNN(n_classes=run_config.data_provider.n_classes, genotype=eval(args.darts_gene),
drop_out=args.dropout)
else:
from models.normal_nets.proxyless_nets import proxyless_network
gene = args.mobile_gene
net = proxyless_network(
structure=args.model_type, genotypes=eval(gene), n_classes=run_config.data_provider.n_classes,
dropout_rate=args.dropout,
)
# build run manager
run_manager = RunManager(args.path, net, run_config)
run_manager.load_model()
output_dict = {}
print('Test on test set')
loss, acc1, acc5 = run_manager.validate(is_test=True, return_top5=True)
log = 'test_loss: %f\t test_acc1: %f\t test_acc5: %f' % (loss, acc1, acc5)
run_manager.write_log(log, prefix='test')
output_dict = {
**output_dict,
'test_loss': '%f' % loss, 'test_acc1': '%f' % acc1, 'test_acc5': '%f' % acc5,
'total_params': '%f' % run_manager.total_params, 'gpu_latency': '%f' % run_manager.gpu_latency,
}
json.dump(output_dict, open('%s/test output' % args.path, 'w'), indent=4)
| [
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
28034,
198,
198,
6738,
4981,
1330,
1635,
198,
6738,
1057,
62,
37153,
1330,
5660,
13511,
198,
6738,
3384,
4487,
1330,
1635,
198,
198,... | 2.356589 | 1,161 |
# Test using 'pytest tests/'
# Or with coverage 'coverage run -m pytest tests/' use 'coverage report' after.
from tests import _INPUT_FILE_PATH, _N_TRAIN
import torch
import numpy as np
from src.data.dataset import Dataset
from transformers import DistilBertTokenizer
# Test training data
device = torch.device("cpu")
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
train_set = Dataset(_INPUT_FILE_PATH,tokenizer=tokenizer,device=device)
item_dict, label = train_set[0]
assert isinstance(item_dict, dict)
assert "input_ids" in item_dict.keys(), "Tokenization error: Input_ids not in dict item"
assert "attention_mask" in item_dict.keys(), "Tokenization error: Masks not in dict item"
assert item_dict['input_ids'].shape == item_dict['attention_mask'].shape, "Input ids and masks not the same shape"
assert len(train_set) == _N_TRAIN, "Incorrect number of training examples"
assert isinstance(label.item(), int), "Label is not integer"
assert all(np.sort(train_set.classes()) == np.array([0, 1, 2, 3])), "Label error: Classes missing or not between 0 and 3."
| [
2,
6208,
1262,
705,
9078,
9288,
5254,
14,
6,
198,
2,
1471,
351,
5197,
705,
1073,
1857,
1057,
532,
76,
12972,
9288,
5254,
14,
6,
779,
705,
1073,
1857,
989,
6,
706,
13,
198,
6738,
5254,
1330,
4808,
1268,
30076,
62,
25664,
62,
34219,... | 3.132565 | 347 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Image classification task definition."""
import tensorflow as tf
from official.core import input_reader
from official.core import task_factory
from official.vision.beta.dataloaders import classification_input
from official.vision.beta.projects.yolo.configs import darknet_classification as exp_cfg
from official.vision.beta.projects.yolo.dataloaders import classification_tfds_decoder as cli
from official.vision.beta.tasks import image_classification
@task_factory.register_task_cls(exp_cfg.ImageClassificationTask)
class ImageClassificationTask(image_classification.ImageClassificationTask):
"""A task for image classification."""
def build_inputs(self, params, input_context=None):
"""Builds classification input."""
num_classes = self.task_config.model.num_classes
input_size = self.task_config.model.input_size
if params.tfds_name:
decoder = cli.Decoder()
else:
decoder = classification_input.Decoder()
parser = classification_input.Parser(
output_size=input_size[:2],
num_classes=num_classes,
dtype=params.dtype)
reader = input_reader.InputReader(
params,
dataset_fn=tf.data.TFRecordDataset,
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def train_step(self, inputs, model, optimizer, metrics=None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
if self.task_config.losses.one_hot:
labels = tf.one_hot(labels, self.task_config.model.num_classes)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(
grads, self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
| [
2,
15069,
33448,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 2.856669 | 1,507 |
# -*- coding: utf-8 -*-
"""
This is a user configuration file.
"""
botSummary = u" หากผิดพลาดโปรดแจ้ง[[User talk:Nullzero|ที่นี่]]"
sandboxPages = [u"วิกิพีเดีย:ทดลองเขียน",
u"วิกิพีเดีย:สอนการใช้งาน_(จัดรูปแบบ)/กระดาษทด",
u"วิกิพีเดีย:สอนการใช้งาน_(แหล่งข้อมูลอื่น)/กระดาษทด",
u"วิกิพีเดีย:สอนการใช้งาน_(แก้ไข)/กระดาษทด",
u"วิกิพีเดีย:สอนการใช้งาน_(วิกิพีเดียลิงก์)/กระดาษทด"]
# put the sandbox page in the first position,
# then put other tutorial pages.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
220,
532,
9,
12,
198,
37811,
198,
1212,
318,
257,
2836,
8398,
2393,
13,
198,
37811,
198,
198,
13645,
22093,
796,
334,
1,
220,
19567,
104,
19567,
110,
19567,
223,
19567,
250,
19567,
112,
... | 0.906412 | 577 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
198
] | 3.166667 | 6 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Ne pas oublier d'installer seaborn et matplotlib sur la VM d'Airflow
import seaborn as sns
# Ne pas oublier d'installer mlflow sur la VM d'Airflow
import mlflow
import mlflow.sklearn
# Authentification à Google Cloud avec la clé correspondant au compte de service MLflow
import os
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from google.cloud import storage
EXPERIMENT_ID = 1 # Choisir le bon ID
# Ne pas oublier d'ajouter la clé pour Storage
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'C:\blent-data-engineer\storage-key.json'
sns.set(rc={'figure.figsize':(14,10)})
# TODO : À modifier
project_id = "data-engineer-duration"
mlflow.set_tracking_uri("http://34.65.91.126:5000/")
client = storage.Client()
def plot_mae(X, y, model):
"""
Il est aussi pertinent de logger les graphiques sous forme d'artifacts.
"""
fig = plt.figure()
plt.scatter(y, model.predict(X))
plt.xlabel("Durée réelle du trajet")
plt.ylabel("Durée estimée du trajet")
image = fig
fig.savefig("MAE.png")
plt.close(fig)
return image
def load_data(path):
"""
Charge les données et retourne la base d'apprentissage (X, y).
"""
data = pd.read_csv(path)
data['pickup_datetime'] = pd.to_datetime(data['pickup_datetime'])
data['dropoff_datetime'] = pd.to_datetime(data['dropoff_datetime'])
# Création des colonnes liés au pickup
data['pickup_hour'] = data['pickup_datetime'].dt.hour
data['pickup_minute'] = data['pickup_datetime'].dt.minute
data['pickup_second'] = data['pickup_datetime'].dt.second
# Durée réelle du trajet en secondes
data['duration'] = (data['dropoff_datetime'] - data['pickup_datetime']).dt.seconds
data_base = data[(data['duration'] < 3600) & (data['duration'] > 60)]
X = data_base[['trip_distance', 'PULocationID', 'DOLocationID', 'pickup_hour',
'pickup_minute', 'pickup_second']]
y = data_base['duration']
return X, y
def train_model(path):
"""
Entraîne le modèle et envoie les métriques et artifacts sur Mlflow.
"""
X, y = load_data(path)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
n_estimators = 50
max_depth = 12
with mlflow.start_run(experiment_id=EXPERIMENT_ID):
rf = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth)
rf.fit(X_train, y_train) # Processus d'optimisation de l'arbre
mae_score = mae(y_test, rf.predict(X_test))
plot_mae(X_test, y_test, rf)
mlflow.log_param("n_estimators", n_estimators)
mlflow.log_param("max_depth", max_depth)
mlflow.log_metric("mae", mae_score)
mlflow.log_artifact("MAE.png")
mlflow.sklearn.log_model(rf, "model") | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
2,
3169,
38836,
267,
549,
2505,
288,
6,
17350,
263,
384,
397,
1211,
2123,
2603,
29487,
8019,
969,
... | 2.38111 | 1,207 |
# Рекурсивные функции - это те функции, которые вызывают сами себя, но с разными параметрами
# factorial
print(fact(10))
# сделаем то же самое с цикле (для сравнения)
factorial = 1
for i in range(1,11):
factorial = factorial * i # можно сократить так: factorial*=i
print(factorial)
# напишем функцию для возведения в степень:
print(degree(2,10))
# сделаем то же самое с цикле (для сравнения)
deg = 1
for i in range(1,11):
deg = deg * 2
print(deg)
| [
220,
220,
220,
1303,
12466,
254,
16843,
31583,
35072,
21169,
21727,
18849,
38857,
22177,
45035,
16843,
220,
141,
226,
35072,
22177,
31583,
141,
228,
18849,
18849,
532,
220,
141,
235,
20375,
15166,
220,
20375,
16843,
220,
141,
226,
35072,
... | 1.30137 | 365 |
#!/usr/bin/python3
import nmap
scanner = nmap.PortScanner()
print("Welcome, this is a simple Nmap automation tool")
print("<--------------------------------------------------->")
ip_addr = input("Please enter the IP address you want to scan : ")
print(f"You entered the IP address : {ip_addr}")
type(ip_addr)
resp = input(
"""
Please enter the type of scan you want to run
1) SYN ACK Scan
2) UDP Scan
3) Comprehensive scan
"""
)
print(f"You have selected the option : {resp}\n")
if resp == "1":
print(f"Nmap version : {scanner.nmap_version()}")
scanner.scan(ip_addr, "1-1024", "--privileged -v -sS")
print(scanner.scaninfo())
print(f"IP status : {scanner[ip_addr].state()}")
print(scanner[ip_addr].all_protocols())
print(f"Open ports : {scanner[ip_addr]['tcp'].keys()}")
elif resp == "2":
print(f"Nmap version : {scanner.nmap_version()}")
scanner.scan(ip_addr, "1-1024", "--privileged -v -sU")
print(scanner.scaninfo())
print(f"IP status : {scanner[ip_addr].state()}")
print(scanner[ip_addr].all_protocols())
print(f"Open ports : {scanner[ip_addr]['udp'].keys()}")
elif resp == "3":
print(f"Nmap version : {scanner.nmap_version()}")
scanner.scan(ip_addr, "1-1024", "--privileged -v -sS -sV -sC -A -O")
print(scanner.scaninfo())
print(f"IP status : {scanner[ip_addr].state()}")
print(scanner[ip_addr].all_protocols())
print(f"Open ports : {scanner[ip_addr]['tcp'].keys()}")
elif resp >= "4":
print("Please enter a valid option") | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
299,
8899,
198,
198,
35836,
1008,
796,
299,
8899,
13,
13924,
33351,
1008,
3419,
198,
198,
4798,
7203,
14618,
11,
428,
318,
257,
2829,
399,
8899,
22771,
2891,
4943,
198,
4798,
... | 2.483766 | 616 |
"""Module contains Resource model."""
from enum import Enum
import os
import posixpath
from typing import Any, Dict, List, Union # noqa: F401
from requests.models import Response # noqa: F401 pylint: disable=unused-import
from crux._utils import DEFAULT_CHUNK_SIZE, Headers
from crux.models.model import CruxModel
from crux.models.permission import Permission
class Resource(CruxModel):
"""Resource Model."""
def __init__(
self,
id=None, # type: str # id name is by design pylint: disable=redefined-builtin
dataset_id=None, # type: str
folder_id=None, # type: str
folder=None, # type: str
name=None, # type: str
size=None, # type: str
type=None, # type: str # type name is by design pylint: disable=redefined-builtin
config=None, # type: Dict[str, Any]
provenance=None, # type: str
as_of=None, # type: str
created_at=None, # type: str
modified_at=None, # type: str
storage_id=None, # type: str
description=None, # type: str
media_type=None, # type: str
tags=None, # type: List[str]
labels=None, # type: Dict[str, str]
connection=None,
raw_response=None, # type: Dict[Any, Any]
):
# type: (...) -> None
"""
Attributes:
id (str): Resourece Id. Defaults to None.
dataset_id (str): Dataset Identity. Defaults to None.
folder_id (str): Folder Identity. Defaults to None.
folder (str): Folder Name. Defaults to None.
name (str): Resource name. Defaults to None.
size (str): Resource size. Defaults to None.
type (str): Resource type. Defaults to None.
config (str): Resource config. Defaults to None.
provenance (list): Resource Provenance. Defaults to None.
as_of (str): Resource as_of. Defaults to None.
created_at (str): Resource created_at. Defaults to None.
modified_at (str): Resource modified_at. Defaults to None.
storage_id (str): Resource storage Identity. Defaults to None.
description (str): Resource description. Defaults to None.
media_type (str): Resource Media Type. Defaults to None.
tags (:obj:`list` of :obj:`str`): Resource tags. Defaults to None.
labels (dict): Dictionary containing Label Key and Values.
Defaults to None.
connection (crux._client.CruxClient): Connection Object. Defaults to None.
raw_response (dict): Response Content. Defaults to None.
Raises:
ValueError: If name or tags are set to None.
TypeError: If tags are not of list type.
"""
self._id = id
self._dataset_id = dataset_id
self._folder_id = folder_id
self._description = description
self._name = name
self._size = size if size else 0
self._type = type
self._config = config
self._provenance = provenance
self._as_of = as_of
self._created_at = created_at
self._storage_id = storage_id
self._media_type = media_type
self._modified_at = modified_at
self._tags = tags
self._folder = None
self._labels = labels if labels else {} # type: Dict[str, str]
self._folder = folder
self.connection = connection
self.raw_response = raw_response
@property
def id(self):
"""str: Gets the Resource ID."""
return self._id
@property
def description(self):
"""str: Gets the Resource Description."""
return self._description
@property
def media_type(self):
"""str: Gets the Resource Description."""
return self._media_type
@property
def dataset_id(self):
"""str: Gets the Dataset ID."""
return self._dataset_id
@property
def folder_id(self):
"""str: Gets the Folder ID."""
return self._folder_id
@property
def storage_id(self):
"""str: Gets the Storage ID."""
return self._storage_id
@property
def name(self):
"""str: Gets the Resource Name."""
return self._name
@property
def config(self):
"""str: Gets the config."""
return self._config
@property
def provenance(self):
"""str: Gets the Provenance."""
return self._provenance
@property
def type(self):
"""str: Gets the Resource Type."""
return self._type
@property
def tags(self):
""":obj:`list` of :obj:`str`: Gets the Resource Tags."""
return self._tags
@property
def labels(self):
"""dict: Gets the Resource labels."""
return self._labels
@property
def as_of(self):
"""str: Gets the as_of."""
return self._as_of
@property
def created_at(self):
"""str: Gets created_at."""
return self._created_at
@property
def modified_at(self):
"""str: Gets modified_at."""
return self._modified_at
@property
def size(self):
"""int: Gets the size."""
return int(self._size)
@property
def path(self):
"""str: Compute or Get the resource path."""
return posixpath.join(self.folder, self.name)
@property
def folder(self):
"""str: Compute or Get the folder name."""
if self._folder:
return self._folder
self._folder = self._get_folder()
return self._folder
def to_dict(self):
# type: () -> Dict[str, Any]
"""Transforms Resource object to Resource Dictionary.
Returns:
dict: Resource Dictionary.
"""
return {
"resourceId": self.id,
"datasetId": self.dataset_id,
"description": self.description,
"folderId": self.folder_id,
"mediaType": self.media_type,
"name": self.name,
"size": self.size,
"type": self.type,
"config": self.config,
"provenance": self.provenance,
"asOf": self.as_of,
"tags": self.tags,
"labels": self.labels,
"storageId": self.storage_id,
"createdAt": self.created_at,
"modifiedAt": self.modified_at,
}
@classmethod
def from_dict(cls, a_dict):
# type: (Dict[str, Any]) -> Any
"""Transforms Resource Dictionary to Resource object.
Args:
a_dict (dict): Resource Dictionary.
Returns:
crux.models.Resource: Resource Object.
"""
id = a_dict[ # id name is by design pylint: disable=redefined-builtin
"resourceId"
]
dataset_id = a_dict["datasetId"]
description = a_dict["description"]
folder_id = a_dict["folderId"]
storage_id = a_dict["storageId"]
media_type = a_dict["mediaType"]
description = a_dict["description"]
name = a_dict["name"]
if "tags" in a_dict:
tags = a_dict["tags"]
else:
tags = []
type = a_dict[ # type name is by design pylint: disable=redefined-builtin
"type"
]
if "config" in a_dict:
config = a_dict["config"]
else:
config = None
if "labels" in a_dict:
labels = {}
for label in a_dict["labels"]:
labels[label["labelKey"]] = label["labelValue"]
else:
labels = {}
provenance = a_dict["provenance"]
created_at = a_dict["createdAt"]
modified_at = a_dict["modifiedAt"]
size = a_dict["size"]
as_of = a_dict["asOf"]
return cls(
dataset_id=dataset_id,
id=id,
folder_id=folder_id,
media_type=media_type,
storage_id=storage_id,
description=description,
name=name,
tags=tags,
labels=labels,
type=type,
config=config,
provenance=provenance,
created_at=created_at,
modified_at=modified_at,
size=size,
as_of=as_of,
)
def delete(self):
# type: () -> bool
"""Deletes Resource from Dataset.
Returns:
bool: True if it is deleted.
"""
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
return self.connection.api_call(
"DELETE", ["resources", self.id], headers=headers
)
def update(self, name=None, description=None, tags=None):
# type: (str, str, List[str]) -> bool
"""Updates the metadata for Resource.
Args:
name (str): Name of resource. Defaults to None.
description (str): Description of the resource. Defaults to None.
tags (:obj:`list` of :obj:`str`): List of tags. Defaults to None.
Returns:
bool: True, if resource is updated.
Raises:
ValueError: It is raised if name, description or tags are unset.
TypeError: It is raised if tags are not of type List.
"""
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
body = {} # type: Dict[str, Union[str, List, Dict]]
if name is not None:
body["name"] = name
if description is not None:
body["description"] = description
if tags is not None:
if isinstance(tags, list):
body["tags"] = tags
else:
raise TypeError("Tags should be of type list")
if body:
response = self.connection.api_call(
"PUT", ["resources", self.id], headers=headers, json=body
)
response_dict = response.json()
if "name" in response_dict:
self._name = response.json().get("name")
if "tags" in response_dict and tags is not None:
self._tags = response.json().get("tags")
if "description" in response_dict:
self._description = response.json().get("description")
return True
else:
raise ValueError("Name, Description or Tags should be set")
def add_permission(self, identity_id, permission):
# type: (str, str) -> Union[bool, Permission]
"""Adds permission to the resource.
Args:
identity_id: Identity Id to be set.
permission: Permission to be set.
Returns:
crux.models.Permission: Permission Object.
"""
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
return self.connection.api_call(
"PUT",
["permissions", self.id, identity_id, permission],
model=Permission,
headers=headers,
)
def delete_permission(self, identity_id, permission):
# type: (str, str) -> bool
"""Deletes permission from the resource.
Args:
identity_id (str): Identity Id for the deletion.
permission (str): Permission for the deletion.
Returns:
bool: True if it is able to delete it.
"""
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
return self.connection.api_call(
"DELETE", ["permissions", self.id, identity_id, permission], headers=headers
)
def list_permissions(self):
# type: () -> List[Permission]
"""Lists the permission on the resource.
Returns:
list (:obj:`crux.models.Permission`): List of Permission Objects.
"""
headers = Headers({"accept": "application/json"})
return self.connection.api_call(
"GET",
["resources", self.id, "permissions"],
model=Permission,
headers=headers,
)
def add_label(self, label_key, label_value):
# type: (str, str) -> bool
"""Adds label to Resource.
Args:
label_key (str): Label Key for Resource.
label_value (str): Label Value for Resource.
Returns:
bool: True if label is added, False otherwise.
"""
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
response_result = self.connection.api_call(
"PUT",
[
"datasets",
self.dataset_id,
"resources",
self.id,
"labels",
label_key,
label_value,
],
headers=headers,
)
if response_result:
self._labels[label_key] = label_value
return True
else:
return False
def delete_label(self, label_key):
# type: (str) -> bool
"""Deletes label from Resource.
Args:
label_key (str): Label Key for Resource.
Returns:
bool: True if label is deleted, False otherwise.
"""
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
response_result = self.connection.api_call(
"DELETE",
["datasets", self.dataset_id, "resources", self.id, "labels", label_key],
headers=headers,
)
if response_result:
try:
del self._labels[label_key]
except KeyError:
return False
return True
else:
return False
def add_labels(self, labels_dict):
# type: (dict) -> bool
"""Adds multiple labels to Resource.
Args:
label_dict (dict): Labels (key/value pairs) to add to the Resource.
Returns:
bool: True if the labels were added, False otherwise.
"""
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
labels_list = []
for label_key, label_value in labels_dict.items():
if label_key is not None and label_value is not None:
label_key = (
label_key.value if isinstance(label_key, Enum) else label_key
)
labels_list.append(
{"labelKey": str(label_key), "labelValue": str(label_value)}
)
data = {"labels": labels_list}
response_result = self.connection.api_call(
"PUT",
["datasets", self.dataset_id, "resources", self.id, "labels"],
headers=headers,
json=data,
)
if response_result:
for label_key, label_value in labels_dict.items():
self._labels[label_key] = label_value
return True
else:
return False
def _get_folder(self):
# type: () -> str
"""Fetches the folder of the resource.
Returns:
str: Folder name of the resource.
"""
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
response = self.connection.api_call(
"GET", ["resources", self.id, "folderpath"], headers=headers
)
return response.json().get("path")
def refresh(self):
"""Refresh Resource model from API backend.
Returns:
bool: True, if it is able to refresh the model,
False otherwise.
"""
# type () -> bool
headers = Headers(
{"content-type": "application/json", "accept": "application/json"}
)
resource_object = self.connection.api_call(
"GET", ["resources", self.id], headers=headers, model=Resource
)
self.__dict__.update(resource_object.__dict__)
return True
# https://github.com/python/mypy/issues/2477, mypy is performing checking with Python2
class MediaType(Enum): # type: ignore
"""MediaType Enumeration Model."""
JSON = "application/json"
NDJSON = "application/x-ndjson"
CSV = "text/csv"
PARQUET = "application/parquet"
AVRO = "avro/binary"
@classmethod
def detect(cls, file_name):
# type: (str) -> str
"""Detects the media_type from the file extension.
Args:
file_name (str): Absolute or Relative Path of the file.
Returns:
str: MediaType extension.
Raises:
LookupError: If file type is not supported.
"""
file_ext = os.path.splitext(file_name)[1][1:].upper()
if file_ext in cls.__members__:
return cls[file_ext].value # type: ignore
else:
raise LookupError("File/Media Type not supported.")
| [
37811,
26796,
4909,
20857,
2746,
526,
15931,
198,
198,
6738,
33829,
1330,
2039,
388,
198,
11748,
28686,
198,
11748,
1426,
844,
6978,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
4479,
220,
1303,
645,
20402,
25,
376,
21844,
... | 2.144783 | 8,012 |
from wq.db import rest
from .models import GeometryModel, PointModel
from django.conf import settings
if settings.WITH_GIS:
rest.router.register_model(
GeometryModel,
fields="__all__",
)
rest.router.register_model(
PointModel,
fields="__all__",
)
| [
6738,
266,
80,
13,
9945,
1330,
1334,
198,
6738,
764,
27530,
1330,
2269,
15748,
17633,
11,
6252,
17633,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
361,
6460,
13,
54,
10554,
62,
38,
1797,
25,
198,
220,
220,
220,
1334,
1... | 2.368 | 125 |
import sys, os
sys.path.append(os.path.abspath(os.getcwd()))
from Visualize import Visualizer
from ZF3D.Camera import Camera
import glob
import os
import cv2
import numpy as np
import pandas as pd
def genSplitViewImages(image_dir):
"""
Create image folder and generate split-view images
"""
imgF_dir = os.path.join(image_dir, 'imgF')
imgT_dir = os.path.join(image_dir, 'imgT')
output_dir = os.path.join(image_dir, 'img1')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for frame in os.listdir(imgF_dir):
if frame.endswith(".jpg"):
f = os.path.split(frame)[-1]
output_file = os.path.join(output_dir,f)
if os.path.exists(output_file):
continue
imgF = cv2.imread(os.path.join(imgF_dir,f))
imgT = cv2.imread(os.path.join(imgT_dir,f))
splitImg = np.hstack((imgF,imgT))
cv2.imwrite(output_file,splitImg)
if __name__ == "__main__":
image_dir = "data/3DZeF20/train/ZebraFish-02"
genSplitViewImages(image_dir)
| [
198,
11748,
25064,
11,
28686,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
1136,
66,
16993,
3419,
4008,
198,
6738,
15612,
1096,
1330,
15612,
7509,
198,
6738,
1168,
37,
18,
35,
13,
35632,
1330,
... | 2.065789 | 532 |
# Author
# Peter Svenningsson
# Email
# peter.o.svenningsson@gmail.com
###########
# IMPORTS #
###########
# Standard
import time
import csv
import json
# 3rd party
import numpy as np
import matplotlib.pyplot as plt
# Local
from dataset.dataset_QSNMC import QSNMCDataset
from models.izencovich_neuron import SimpleIzencovichModel,IzencovichModelExtended
from models.LIF import LIFModel, LIF
from evaluators.MD_star import MDStarComparator
from GA.population import Population
from GA.encoders import RealNumberEncoder
from GA.mutators import CreepMutation
from GA.selection import TournamentCrossover
from GA.elitism import Elitism
import seaborn as sns
#############
# CONSTANTS #
#############
##########
# SCRIPT #
##########
comparator = MDStarComparator(delta = 1)
dataset = QSNMCDataset()
NeuronModel = IzencovichModelExtended
individual = NeuronModel(dataset,a=0.3017337265776834,b=0.14916560882651994,c=-59.33629988414421,d=106.57674262730798,v_rest=-54.61904283240068,v_threshold=-50.286389631670055,k=1.6259421609654119,C=125.52690852164866,polynomial_a=-0.07730008943907689, polynomial_b=-0.03043767818048064)
individual.simulate_spiking_times()
predicted_spike_times = individual.spike_times
spike_trains = dataset.spike_trains
score = comparator.evaluate(individual, spike_trains, voided_time = 16, voided_end = 32.7 ) # voided time validation 32.7 # voided time training 16, 32.7
print("score is {}".format(score))
def plot_input_output(v, I, title:str, filename:str):
""" beautification of results.
"""
sns.set(style = "dark")
plt.rcParams.update({'font.size': 14})
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
time = [dataset.dt * x for x in range(0, len(I))]
# Initialize
fig, ax1 = plt.subplots(figsize=(12, 4))
ax1.plot(time, v, 'tab:blue', label='Membrane Potential', alpha = 0.4)
ax1.set_xlabel('time (s)')
#fig.suptitle(title, fontsize=8)
# Plot output
ax1.set_ylabel('Membrane Potential mV', color='tab:blue')
ax1.tick_params('y', colors='tab:blue')
ax1.set_ylim(-150, 55)
ax2 = ax1.twinx()
# Plot input current
ax2.plot(time, I, 'tab:red', label='Input Current', alpha = 0.4)
ax2.set_ylim(-25, 25)
ax2.set_xlim(25,28)
ax1.set_xlim(25, 28)
ax2.set_ylabel('Input Current (pA)', color='tab:red')
ax2.tick_params('y', colors='tab:red')
fig.tight_layout()
ax1.legend(loc=1)
ax2.legend(loc=3)
plt.savefig(filename+'.jpg')
def plot_voltage_voltage(individual, dataset, data_sample: int, title:str, filename:str, limits = (30,36)):
"""
"""
plt.rcParams.update({'font.size': 22})
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
time = [dataset.dt * x for x in range(0, len(individual.i_ext))]
predicted_voltage = individual.v
observed_voltage = dataset.gold_voltages[data_sample]
sns.set(style = "dark")
plt.rcParams.update({'font.size': 22})
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
# Initialize
fig, ax1 = plt.subplots(figsize=(16, 6))
fig.subplots_adjust(right=0.75)
fig.subplots_adjust(bottom=0.2)
ax1.plot(time, np.zeros_like(time), 'w', alpha = 0.8)
ax1.plot(time, np.ones_like(time)*50, 'w', alpha=0.8)
ax1.plot(time, np.ones_like(time)*(-50), 'w', alpha=0.8)
ax1.plot(time, np.ones_like(time)*(100), 'w', alpha=0.8)
ax1.plot(time, np.ones_like(time)*(-100), 'w', alpha=0.8)
plt.rcParams.update({'font.size': 22})
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
ax1.plot(time, predicted_voltage, 'tab:blue', label='Predicted voltage', alpha = 0.5)
ax1.plot(individual.spike_times, np.ones_like(individual.spike_times)*60, 'bo', label='Spiking times for predicted voltage', alpha=0.5)
plt.savefig('individual.jpg')
plt.rcParams.update({'font.size': 22})
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
ax1.set_xlabel('time (s)', fontsize=22)
# Plot output
ax1.set_ylabel('Predicted voltage (mV)', fontsize=22, color='tab:blue')
ax1.tick_params('y', colors='tab:blue')
ax1.set_ylim(-140, 120)
ax2 = ax1.twinx()
# ax3 = ax1.twinx()
plt.rcParams.update({'font.size': 22})
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
# Plot observed voltage
ax2.plot(time, observed_voltage, 'tab:green', label='Observed voltage', alpha = 0.5)
ax2.plot(dataset.spike_trains[data_sample], np.ones_like(dataset.spike_trains[data_sample])*45, 'go', label='Spiking times for observed voltage', alpha=0.5)
ax2.set_ylim(-140, 120)
ax2.set_ylabel('Observed voltage (mV)', fontsize=22, color='tab:green')
ax2.tick_params('y', colors='tab:green')
plt.rcParams.update({'font.size': 22})
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
ax1.legend(loc=1, prop={'size': 14})
ax2.legend(loc=3, prop={'size': 14})
ax1.set_xlim(*limits)
plt.savefig(filename+'.jpg')
# Plot measured data
plot_input_output(dataset.gold_voltages[-1], dataset.current, "", 'measured_voltage')
#Plot model performance
plot_input_output(
individual.v, individual.i_ext, "The score achieved is " + str(round(score,5)), 'summary_of_model_results'
)
# Plot comparison
dataset_sample = 12
for limits in [(12.5,16), (18,24), (24,30), (32.7,36), (36,39)]:
title = "Sample {} - Comparison of observed to predicted voltage with score {} \n" \
" {} predicted spikes, {:0.1f} average observed spikes".format(
dataset_sample,str(round(score,4)), len(individual.spike_times), np.mean([len(train) for train in dataset.spike_trains]))
filename = "predicted_observed_comparison sample {}, {}".format(dataset_sample,str(limits))
plot_voltage_voltage(individual, dataset, dataset_sample, title, filename, limits)
| [
2,
6434,
198,
2,
5613,
44611,
23400,
1559,
198,
2,
9570,
198,
2,
279,
2357,
13,
78,
13,
82,
574,
23400,
1559,
31,
14816,
13,
785,
198,
7804,
21017,
198,
2,
30023,
33002,
1303,
198,
7804,
21017,
198,
2,
8997,
198,
11748,
640,
198,
... | 2.407423 | 2,425 |
import config as cf
from app.base_app import BaseApp
from app.inference_app import InferenceApp
from app.inference_cascade_app import InferenceCascadeApp
from data.db.file_list_loader import FileListLoader
from utils import log
from utils.cpu_gpu_switcher import CpuGpuSwitcher
class EvaluateRuntimeApp(BaseApp):
"""An object of this class will run inference on all combinations of cascade/single gpu/cpu configurations.
Final runtimes can be taken from the saved log file.
This app is not extending but using an InferenceApp.
"""
def __init__(self, cascade_session_key: str, single_session_key: str, max_positive_test_imgs: int,
max_negative_test_imgs: int):
"""Create a new EvaluateRuntimeApp.
:param cascade_session_key: The session key of the serialized cascade model which should be evaluated.
If None, cf.get("default_evaluation_model_cascade") will be used.
:param single_session_key: The session key of the serialized single-net model which should be evaluated.
If None, cf.get("default_evaluation_model_single") will be used.
:param max_positive_test_imgs: The maximum number of foreground images which should be evaluated.
:param max_negative_test_imgs: The maximum number of background images which should be evaluated.
"""
self._cascade_session_key = cascade_session_key
self._single_session_key = single_session_key
self._max_positive_test_imgs = max_positive_test_imgs
self._max_negative_test_imgs = max_negative_test_imgs
# prevent using image patches instead of the original images
cf.set("cache_dataset", False)
# sample images only once to ensure that all apps use the exact same files
self._img_infos = FileListLoader().sample_image_infos(max_positive_test_imgs, max_negative_test_imgs)
BaseApp.__init__(self)
def _run_inference(self, enable_gpu: bool, use_cascade: bool):
"""Run a single combination of enable_gpu and use_cascade.
:param enable_gpu: Whether the GPU should be used. If not, the CPU will be used.
:param use_cascade: Whether the cascade should be used. If not, the single net will be used.
:return:
"""
log.log("")
log.log("")
log.log("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
log.log("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
log.log("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
log.log("~~~~~~~~~~~~~~~~~~~~~~~~~~ {} / {} ~~~~~~~~~~~~~~~~~~".format(
"Cascade" if use_cascade else "Single Net",
"GPU" if enable_gpu else "CPU",
))
log.log("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
log.log("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
log.log("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# the GPU should already be enabled, but let's get sure
if enable_gpu:
CpuGpuSwitcher().enable_gpu()
else:
CpuGpuSwitcher().disable_gpu()
# create an inference app without running it yet
if use_cascade:
app_inference = InferenceCascadeApp(self._cascade_session_key)
else:
app_inference = InferenceApp(self._single_session_key)
# run inference
_ = app_inference.run_inference_on_images(self._img_infos, merge=cf.get("inference_merge"))
# tidy up to ensure that a later run does not benefit from anything done in a previous run
app_inference.clean()
app_inference = None
for img_info in self._img_infos:
img_info.clear_raw_img_cache()
| [
11748,
4566,
355,
30218,
198,
6738,
598,
13,
8692,
62,
1324,
1330,
7308,
4677,
198,
6738,
598,
13,
259,
4288,
62,
1324,
1330,
554,
4288,
4677,
198,
6738,
598,
13,
259,
4288,
62,
66,
28966,
62,
1324,
1330,
554,
4288,
34,
28966,
4677,... | 2.889942 | 1,372 |