hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf48293b639976cdfb4e580a43a76bd9e45bf43 | 2,349 | py | Python | temp_hold/uf3.py | JasonGibsonUfl/quantumMLdev | 341ed1678e9c8bc80a3beb751ad26c83c8b7b97f | [
"MIT"
] | 1 | 2021-06-04T14:31:10.000Z | 2021-06-04T14:31:10.000Z | temp_hold/uf3.py | JasonGibsonUfl/quantumMLdev | 341ed1678e9c8bc80a3beb751ad26c83c8b7b97f | [
"MIT"
] | 2 | 2021-03-24T17:58:17.000Z | 2021-03-24T18:46:37.000Z | temp_hold/uf3.py | JasonGibsonUfl/quantumMLdev | 341ed1678e9c8bc80a3beb751ad26c83c8b7b97f | [
"MIT"
] | 1 | 2021-02-16T20:15:27.000Z | 2021-02-16T20:15:27.000Z | from uf3.util import json_io
import os
from uf3.util import user_config
from quantumml.rest import MLRester
from uf3.data.composition import ChemicalSystem
from uf3.regression.least_squares import WeightedLinearModel
from uf3.representation.bspline import BSplineConfig
from uf3.forcefield.calculator import UFCalculator
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.core import Structure
from uf3.data import geometry
import ast
import numpy as np
class UFCC(UFCalculator):
def _structure_to_ase_atoms(self, structure):
adaptor = AseAtomsAdaptor()
return adaptor.get_atoms(structure)
def get_potential_energy(self, structure, force_consistent=None):
"""Evaluate the total energy of a configuration.""" ""
atoms = self._structure_to_ase_atoms(structure)
energy = super().get_potential_energy(atoms)
return energy
def get_forces(self, atoms=None):
atoms = self._structure_to_ase_atoms(structure)
forces = super().get_forces(atoms)
return forces
@staticmethod
def rebuild(element_list):
# try:
with MLRester() as mlr:
query_results = (mlr.get_uf3(element_list))[-1]
# except:
# print('model not found')
def str_to_tuple(dictionary):
new_dict = {}
for key in dictionary:
if len(key) > 2:
new_key = tuple(
key.strip(")")
.strip("(")
.replace(",", "")
.replace("'", "")
.split()
)
new_dict[new_key] = np.array(dictionary[key])
else:
new_dict[key] = dictionary[key]
return new_dict
degree = query_results["degree"]
model_data = ast.literal_eval(query_results["model_data"])
knots_map = str_to_tuple(model_data["knots"])
coefficients = str_to_tuple(model_data["coefficients"])
chemical_system = ChemicalSystem(element_list=element_list, degree=degree)
bspline_config = BSplineConfig(
chemical_system, knots_map=knots_map, knot_spacing="custom"
)
model = WeightedLinearModel(bspline_config)
model.load(coefficients)
return model
| 32.625 | 82 | 0.622393 |
acf4830b1e927f873045b4f32a4b7b680d012a99 | 2,795 | py | Python | main.py | minwoopark2003/PythonDiscordMusicBot | c9e7c0e0d4e83509402db85e0bf7ea80377b607a | [
"MIT"
] | null | null | null | main.py | minwoopark2003/PythonDiscordMusicBot | c9e7c0e0d4e83509402db85e0bf7ea80377b607a | [
"MIT"
] | null | null | null | main.py | minwoopark2003/PythonDiscordMusicBot | c9e7c0e0d4e83509402db85e0bf7ea80377b607a | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import os
from music import Music
#Attribute for MyHelp class, controlling name, aliases, and cooldown
attributes = {
'name': "help",
'aliases': ["helper", "helps"],
'cooldown': commands.Cooldown(2, 5.0, commands.BucketType.user)
}
help_object = commands.HelpCommand(command_attrs=attributes)
# creating bot object with commands.bot
bot = commands.Bot(command_prefix='!')
token = os.environ['TOKEN']
# adding a cog(it is like a module) to the bot
bot.add_cog(Music(bot))
#iterator variable
count = 0
# array of potential greetings
greetings = ['Hello', 'Salutations','Hola', 'Sup']
@bot.command(name='hello', description='Saying hello to the bot')
async def hello(ctx):
global count
await ctx.send(f'{greetings[count%4]} {ctx.author.name}')
count+=1
# This the help class
class My_help(commands.HelpCommand):
#Overrides inherited function to removed alias names and clean up the output
def get_command_signature(self, command):
return '%s%s %s' % (self.clean_prefix, command.qualified_name, command.signature)
# Error function
async def send_error_message(self, error):
embed = discord.Embed(title="Error", description=error)
channel = self.get_destination()
await channel.send(embed=embed)
# !help function
async def send_bot_help(self, mapping):
embed = discord.Embed(title = "Help Commands") #Creates embed object
for cog, specific_commands in mapping.items():
command_signatures = [self.get_command_signature(c) for c in specific_commands]
if command_signatures:
cog_name = getattr(cog, "qualified_name", "Main")
embed.add_field(name=cog_name, value="\n".join(command_signatures),inline=True)
channel = self.get_destination()
await channel.send(embed=embed)
# !help <command> Does not work
async def send_command_help(self, command):
channel = self.get_destination()
await channel.send(embed= discord.Embed(title = command.signature, description = command.description))
# !help <cog>
async def send_cog_help(self, cog):
channel = self.get_destination()
embed= discord.Embed(title = "", description = cog.description)
await channel.send(embed=embed)
'''@help.error
async def help_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(embed = discord.Embed(title = "You are on cooldown!", description = f"Try again in {error.retry_after:.2f}s"))
'''
# This notifies us that the bot is currently active
@bot.event
async def on_ready():
print('Bot is Online')
#@bot.event
#async def on_voice_state_update(data):
# return
# setting up the help command on the bot
bot.help_command = My_help(command_attrs=attributes)
# run the bot
bot.run(token)
| 30.714286 | 131 | 0.719499 |
acf483d4a4ee00423eb89f15a18bb851df7f89d3 | 268 | py | Python | tests/conftest.py | JoshuaZero/awesome_python_test | 358813173d324e510e9793ea57ccfcac4146e2e5 | [
"MIT"
] | null | null | null | tests/conftest.py | JoshuaZero/awesome_python_test | 358813173d324e510e9793ea57ccfcac4146e2e5 | [
"MIT"
] | 11 | 2020-07-01T06:42:40.000Z | 2021-09-22T19:21:40.000Z | tests/conftest.py | JoshuaZero/awesome_python_test | 358813173d324e510e9793ea57ccfcac4146e2e5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Dummy conftest.py for awesome_test.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
import scipy
print(help(scipy))
| 17.866667 | 60 | 0.664179 |
acf485e83d69ba24d227f5089b0975b4ce370a79 | 3,489 | py | Python | back/ticker_counts.py | aur3us/Reddit-Stock-Trends | b920d667f9e9c5267064775441806c14ca52dc4b | [
"MIT"
] | null | null | null | back/ticker_counts.py | aur3us/Reddit-Stock-Trends | b920d667f9e9c5267064775441806c14ca52dc4b | [
"MIT"
] | null | null | null | back/ticker_counts.py | aur3us/Reddit-Stock-Trends | b920d667f9e9c5267064775441806c14ca52dc4b | [
"MIT"
] | null | null | null | import configparser
import json
import pandas as pd
import praw
import re
import os
import yfinance as yf
from collections import Counter
from functools import reduce
from operator import add
from typing import Set
from datetime import datetime
from tqdm import tqdm
# JB 02/07/2021 - Configparser introduced to scrape out some hardcode and allow removal of sensitive passwords
WEBSCRAPER_LIMIT = 20000
config = configparser.ConfigParser()
config.read('./config/config.ini')
stop_words = json.loads(config['FilteringOptions']['StopWords'])
block_words = json.loads(config['FilteringOptions']['BlockWords'])
with open('./config/tickers.json') as tickerFile:
tickerList = json.load(tickerFile)
# Scrape subreddits `r/robinhoodpennystocks` and `r/pennystocks`
# Current it does fetch a lot of additional data like upvotes, comments, awards etc but not using anything apart from title for now
reddit = praw.Reddit('ClientSecrets')
subreddits = "+".join(json.loads(config['FilteringOptions']['Subreddits']))
new_bets = reddit.subreddit(subreddits).new(limit=WEBSCRAPER_LIMIT)
posts = [[post.id,
post.title,
post.score,
post.num_comments,
post.upvote_ratio,
post.total_awards_received] for post in tqdm(new_bets, desc="Selecting relevant data from webscraper", total=WEBSCRAPER_LIMIT)]
posts = pd.DataFrame(posts, columns=["id",
"title",
"score",
"comments",
"upvote_ratio",
"total_awards"])
def verify_ticker(tic):
try:
if tickerList[tic]:
return True
except:
pass
return False
def extract_ticker(body: str, re_string: str = "[$][A-Za-z]*|[A-Z][A-Z]{1,}") -> Set[str]:
"""Simple Regex to get tickers from text."""
ticks = set(re.findall(re_string, str(body)))
res = set()
for item in ticks:
if item not in block_words and item.lower() not in stop_words and item:
try:
tic = item.replace("$", "").upper()
res.add(tic)
except Exception as e:
print(e)
return res
# Extract tickers from all titles and create a new column
posts["Tickers"] = posts["title"].apply(extract_ticker)
ticker_sets = posts.Tickers.to_list()
# Count number of occurances of the Ticker and verify id the Ticker exists
counts = reduce(add, map(Counter, ticker_sets))
verified_tics = {}
for ticker, ticker_count in tqdm(counts.items(), desc="Filtering verified ticks"):
# If ticker is found more than 3 times and ticker is valid
if ticker_count > 3 and verify_ticker(ticker):
verified_tics[ticker] = ticker_count
# Create Datable of just mentions
tick_df = pd.DataFrame(verified_tics.items(), columns=["Ticker", "Mentions"])
tick_df.sort_values(by=["Mentions"], inplace=True, ascending=False)
tick_df.reset_index(inplace=True, drop=True)
date_created = datetime.today().strftime('%Y-%m-%d')
csv_filename = f"{date_created}_tick_df"
directory_output = "./data"
if not os.path.exists(directory_output):
os.mkdir(directory_output)
full_output_path = f"{directory_output}/{csv_filename}.csv"
with open(full_output_path, "w+") as file: # Use file to refer to the file object
# Save to file to load into yahoo analysis script
tick_df.to_csv(full_output_path, index=False)
print(tick_df.head())
| 32.009174 | 137 | 0.671253 |
acf486632d0c3c64c7ad623e2c282426b1b1bd97 | 496 | py | Python | bin/ConvertRagooOrderingtoTour.py | sc-zhang/ALLHiC_adjuster | 877885eb42579c6713b913ec216f67c587798cd7 | [
"MIT"
] | 4 | 2020-05-27T09:13:09.000Z | 2021-08-04T10:55:15.000Z | bin/ConvertRagooOrderingtoTour.py | sc-zhang/ALLHiC_adjuster | 877885eb42579c6713b913ec216f67c587798cd7 | [
"MIT"
] | null | null | null | bin/ConvertRagooOrderingtoTour.py | sc-zhang/ALLHiC_adjuster | 877885eb42579c6713b913ec216f67c587798cd7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
def convert_ord_to_tour(in_ord, out_tour):
with open(in_ord, 'r') as fin:
with open(out_tour, 'w') as fout:
tour_list = []
for line in fin:
data = line.strip().split()
tour_list.append(data[0]+data[1])
fout.write("%s"%(' '.join(tour_list)))
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: python "+sys.argv[0]+" <in_ordering> <out_tour>")
else:
in_ord, out_tour = sys.argv[1:]
convert_ord_to_tour(in_ord, out_tour)
| 22.545455 | 65 | 0.65121 |
acf486a09a0debcb6307f86be37e351825f8c771 | 1,506 | py | Python | goatools/test_data/cli/find_enrichment_dflts.py | cinaljess/goatools | 8cd603df88fcb49da970e37c445cdb1bdd17ee01 | [
"BSD-2-Clause"
] | null | null | null | goatools/test_data/cli/find_enrichment_dflts.py | cinaljess/goatools | 8cd603df88fcb49da970e37c445cdb1bdd17ee01 | [
"BSD-2-Clause"
] | null | null | null | goatools/test_data/cli/find_enrichment_dflts.py | cinaljess/goatools | 8cd603df88fcb49da970e37c445cdb1bdd17ee01 | [
"BSD-2-Clause"
] | 1 | 2022-03-17T03:14:32.000Z | 2022-03-17T03:14:32.000Z | """Defaults for find_enrichment parseargs to be used in tests."""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
import os
import collections as cx
# pylint: disable=too-few-public-methods
class ArgsDict(object):
"""Defaults for find_enrichment parseargs to be used in tests."""
repo = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../..")
def __init__(self):
self.namespace = {
'alpha' : 0.05,
'compare' : False,
'filenames' : [
'{REPO}/data/study'.format(REPO=self.repo),
'{REPO}/data/population'.format(REPO=self.repo),
'{REPO}/data/association'.format(REPO=self.repo)],
'goslim' : '{REPO}/goslim_generic.obo'.format(REPO=self.repo),
'indent' : False,
'method' : 'bonferroni,sidak,holm,fdr_bh',
'min_overlap' : 0.7,
'no_propagate_counts' : False,
'obo' : '{REPO}/go-basic.obo'.format(REPO=self.repo),
'outfile' : '{REPO}/goea.txt'.format(REPO=self.repo),
'outfile_detail' : None,
'pval' : 0.05,
'pval_field' : 'uncorrected',
'pvalcalc' : 'fisher',
'ratio' : None,
'sections' : None,
}
self.ntobj = cx.namedtuple("Namespace", " ".join(self.namespace.keys()))
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
| 35.857143 | 88 | 0.571713 |
acf487208fdc4748ac08abe212237fafaf0c7eeb | 1,955 | py | Python | slider-agent/src/main/python/resource_management/libraries/script/hook.py | turningme/incubator-retired-slider | 1d4f519d763210f46e327338be72efa99e65cb5d | [
"Apache-2.0"
] | 60 | 2015-01-05T10:51:11.000Z | 2018-12-15T03:48:09.000Z | slider-agent/src/main/python/resource_management/libraries/script/hook.py | turningme/incubator-retired-slider | 1d4f519d763210f46e327338be72efa99e65cb5d | [
"Apache-2.0"
] | null | null | null | slider-agent/src/main/python/resource_management/libraries/script/hook.py | turningme/incubator-retired-slider | 1d4f519d763210f46e327338be72efa99e65cb5d | [
"Apache-2.0"
] | 87 | 2015-01-14T05:14:15.000Z | 2018-12-25T14:14:56.000Z | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__all__ = ["Hook"]
from resource_management.libraries.script import Script
import subprocess
import sys
class Hook(Script):
"""
Executes a hook for acommand for custom service. stdout and stderr are written to
tmpoutfile and to tmperrfile respectively.
"""
HOOK_METHOD_NAME = "hook" # This method is always executed at hooks
def choose_method_to_execute(self, command_name):
"""
Changes logics of resolving method name
"""
return super(Hook, self).choose_method_to_execute(self.HOOK_METHOD_NAME)
def run_custom_hook(self, command):
"""
Runs custom hook
"""
args = sys.argv
#Hook script to run
args[0] = args[0].replace('before-'+args[1], command)
args[0] = args[0].replace('after-'+args[1], command)
#Hook script base directory
args[3] = args[3].replace('before-'+args[1], command)
args[3] = args[3].replace('after-'+args[1], command)
args[1] = command.split("-")[1]
cmd = [sys.executable]
cmd.extend(args)
if subprocess.call(cmd) != 0:
self.fail_with_error("Error: Unable to run the custom hook script " +
cmd.__str__())
| 29.179104 | 83 | 0.706394 |
acf4873b5d5f7dacff3e64723eccba7289a4f635 | 7,735 | py | Python | src/biome/text/modules/heads/language_modelling.py | javispp/biome-text | d4f49fde92c5f5efec51e2a285bb243bf7b25c0f | [
"Apache-2.0"
] | 62 | 2019-04-09T02:20:57.000Z | 2022-02-19T12:29:27.000Z | src/biome/text/modules/heads/language_modelling.py | javispp/biome-text | d4f49fde92c5f5efec51e2a285bb243bf7b25c0f | [
"Apache-2.0"
] | 213 | 2019-03-20T15:40:47.000Z | 2021-07-05T16:00:02.000Z | src/biome/text/modules/heads/language_modelling.py | javispp/biome-text | d4f49fde92c5f5efec51e2a285bb243bf7b25c0f | [
"Apache-2.0"
] | 8 | 2020-09-03T18:16:16.000Z | 2021-08-13T08:38:24.000Z | import logging
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
import numpy
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import Instance
from allennlp.data import TextFieldTensors
from allennlp.modules import SoftmaxLoss
from allennlp.nn.util import get_text_field_mask
from allennlp.nn.util import get_token_ids_from_text_field_tensors
from biome.text import vocabulary
from biome.text.backbone import ModelBackbone
from biome.text.metrics import Metrics
from biome.text.modules.configuration import ComponentConfiguration
from biome.text.modules.heads.task_head import TaskHead
from biome.text.modules.heads.task_head import TaskName
from biome.text.modules.heads.task_prediction import LanguageModellingPrediction
class LanguageModelling(TaskHead):
"""
Task head for next-token language modelling, i.e., a model to predict the next token
in a sequence of tokens.
"""
task_name = TaskName.language_modelling
_LOGGER = logging.getLogger(__name__)
def __init__(
self,
backbone: ModelBackbone,
dropout: float = None,
bidirectional: bool = False,
) -> None:
super(LanguageModelling, self).__init__(backbone)
self._empty_prediction = LanguageModellingPrediction(
lm_embeddings=numpy.array([]), mask=numpy.array([])
)
self.bidirectional = bidirectional
if not backbone.featurizer.has_word_features:
raise ConfigurationError(
"`LanguageModelling` defines a word-level next token language model. "
"Please check your `features` configuration to enable at least `words` features."
)
if backbone.encoder.is_bidirectional() is not bidirectional:
raise ConfigurationError(
"Bidirectionality of contextualizer must match bidirectionality of "
"language model. "
f"Contextualizer bidirectional: {backbone.encoder.is_bidirectional()}, "
f"language model bidirectional: {bidirectional}"
)
if self.bidirectional:
self._forward_dim = backbone.encoder.get_output_dim() // 2
else:
self._forward_dim = backbone.encoder.get_output_dim()
if dropout:
self._dropout = torch.nn.Dropout(dropout)
else:
self._dropout = lambda x: x
self._metrics = Metrics(perplexity={"type": "perplexity"})
self._loss = SoftmaxLoss(
num_words=vocabulary.words_vocab_size(self.backbone.vocab),
embedding_dim=self._forward_dim,
)
def on_vocab_update(self):
num_words = vocabulary.words_vocab_size(self.backbone.vocab)
if len(self._loss.softmax_b) != num_words:
self._loss = SoftmaxLoss(
num_words=num_words,
embedding_dim=self._forward_dim,
)
def featurize(self, text: str) -> Optional[Instance]:
instance = self.backbone.featurizer(text, to_field="text", aggregate=True)
return instance
def forward(self, text: TextFieldTensors) -> Dict[str, Any]: # type: ignore
mask = get_text_field_mask(text)
contextual_embeddings = self.backbone.forward(text, mask)
token_ids = get_token_ids_from_text_field_tensors(text)
assert isinstance(contextual_embeddings, torch.Tensor)
# Use token_ids to compute targets
# targets are next token ids with respect to first token in the seq
# e.g. token_ids [[1, 3, 5, 7],..[]], forward_targets=[[3,5,7],..]
forward_targets = torch.zeros_like(token_ids)
forward_targets[:, 0:-1] = token_ids[:, 1:]
if self.bidirectional:
backward_targets = torch.zeros_like(token_ids)
backward_targets[:, 1:] = token_ids[:, 0:-1]
else:
backward_targets = None
# add dropout
contextual_embeddings_with_dropout = self._dropout(contextual_embeddings)
# compute softmax loss
try:
forward_loss, backward_loss = self._compute_loss(
contextual_embeddings_with_dropout, forward_targets, backward_targets
)
except IndexError:
raise IndexError(
"Word token out of vocabulary boundaries, please check your vocab is correctly set"
" or created before starting training."
)
num_targets = torch.sum((forward_targets > 0).long())
if num_targets > 0:
if self.bidirectional:
average_loss = (
0.5 * (forward_loss + backward_loss) / num_targets.float()
)
else:
average_loss = forward_loss / num_targets.float()
else:
average_loss = torch.tensor(0.0)
for metric in self._metrics.get_dict(is_train=self.training).values():
# Perplexity needs the value to be on the cpu
metric(average_loss.to("cpu"))
return dict(
loss=average_loss,
lm_embeddings=contextual_embeddings,
mask=mask,
)
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
metric_name: metric.get_metric(reset)
for metric_name, metric in self._metrics.get_dict(
is_train=self.training
).items()
}
def _compute_loss(
self,
lm_embeddings: torch.Tensor,
forward_targets: torch.Tensor,
backward_targets: torch.Tensor = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# If bidirectional, lm_embeddings is shape (batch_size, timesteps, dim * 2)
# If unidirectional, lm_embeddings is shape (batch_size, timesteps, dim)
# forward_targets, backward_targets (None in the unidirectional case) are
# shape (batch_size, timesteps) masked with 0
if self.bidirectional:
forward_embeddings, backward_embeddings = lm_embeddings.chunk(2, -1)
backward_loss = self._loss_helper(backward_embeddings, backward_targets)
else:
forward_embeddings = lm_embeddings
backward_loss = None
forward_loss = self._loss_helper(forward_embeddings, forward_targets)
return forward_loss, backward_loss
def _loss_helper(
self,
direction_embeddings: torch.Tensor,
direction_targets: torch.Tensor,
) -> torch.Tensor:
mask = direction_targets > 0
# we need to subtract 1 to undo the padding id since the softmax
# does not include a padding dimension
# shape (batch_size * timesteps, )
non_masked_targets = direction_targets.masked_select(mask) - 1
# shape (batch_size * timesteps, embedding_dim)
non_masked_embeddings = direction_embeddings.masked_select(
mask.unsqueeze(-1)
).view(-1, self._forward_dim)
return self._loss(non_masked_embeddings, non_masked_targets)
def _make_task_prediction(
self,
single_forward_output: Dict[str, numpy.ndarray],
instance: Instance,
) -> LanguageModellingPrediction:
task_prediction = LanguageModellingPrediction(
lm_embeddings=single_forward_output["lm_embeddings"],
mask=single_forward_output["mask"],
)
if "loss" in single_forward_output:
task_prediction.loss = float(single_forward_output["loss"])
return task_prediction
class LanguageModellingConfiguration(ComponentConfiguration[LanguageModelling]):
"""Configuration for language model head components"""
pass
| 36.14486 | 99 | 0.655074 |
acf48849b4840799a63c009c92dbff0ce498eb75 | 794 | py | Python | ProjectApplication/grant_management/migrations/0054_medium_add_createmodified.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 5 | 2020-07-29T10:00:11.000Z | 2022-02-19T11:00:34.000Z | ProjectApplication/grant_management/migrations/0054_medium_add_createmodified.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 471 | 2019-09-20T14:37:28.000Z | 2022-03-25T14:16:34.000Z | ProjectApplication/grant_management/migrations/0054_medium_add_createmodified.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 5 | 2020-03-15T12:42:47.000Z | 2022-02-15T18:06:52.000Z | # Generated by Django 3.0.10 on 2020-10-08 11:10
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('grant_management', '0053_add_file_validator'),
]
operations = [
migrations.AddField(
model_name='medium',
name='created_on',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Date and time at which the entry was created'),
preserve_default=False,
),
migrations.AddField(
model_name='medium',
name='modified_on',
field=models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True),
),
]
| 30.538462 | 151 | 0.642317 |
acf488c7ed1aef50afcc67926612a6eeb58f9598 | 870 | py | Python | atomate/feff/workflows/presets.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | 167 | 2017-01-26T00:14:19.000Z | 2022-03-18T20:47:58.000Z | atomate/feff/workflows/presets.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | 422 | 2016-12-16T18:21:15.000Z | 2022-03-23T22:13:19.000Z | atomate/feff/workflows/presets.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | 158 | 2016-12-16T18:28:00.000Z | 2022-03-28T11:40:03.000Z | from atomate.feff.workflows.core import get_wf_xas
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
def wf_Xanes_K_edge(structure, c=None):
c = c or {}
feff_cmd = c.get("FEFF_CMD", "feff_mpi")
db_file = c.get("DB_FILE", None)
metadata = c.get("METADATA", {})
absorbing_atom = c.get("ABSORBING_ATOM")
user_tag_settings = {
"RPATH": -1,
"SCF": "7 0 30 0.2 3",
"FMS": "9 0",
"LDOS": "-30.0 30.0 0.1",
"RECIPROCAL": "",
"EDGE": "K",
"COREHOLE": "RPA",
}
metadata.update({"absorbing_atom_idx": str(absorbing_atom)})
wf = get_wf_xas(
absorbing_atom,
structure,
edge="K",
feff_cmd=feff_cmd,
db_file=db_file,
metadata=metadata,
user_tag_settings=user_tag_settings,
use_primitive=False,
)
return wf
| 22.894737 | 64 | 0.570115 |
acf4894808e8e4e71f93be92bd2b00ce01578d30 | 6,893 | py | Python | suites/SUMO_HVT_tracker/mobs.py | kotmasha/kodlab-uma-client | 0a8219100f5b6408b1f2df815477d4ba37811fb9 | [
"MIT"
] | null | null | null | suites/SUMO_HVT_tracker/mobs.py | kotmasha/kodlab-uma-client | 0a8219100f5b6408b1f2df815477d4ba37811fb9 | [
"MIT"
] | null | null | null | suites/SUMO_HVT_tracker/mobs.py | kotmasha/kodlab-uma-client | 0a8219100f5b6408b1f2df815477d4ba37811fb9 | [
"MIT"
] | null | null | null | ### UMA tracker agents: data structures and their operators
import cPickle
import bsddb
import numpy as np
from collections import deque
import copy
## A more standard rounding operation
def my_round(x):
return np.floor(x) if x<np.floor(x)+.5 else np.floor(x+1)
## version of str for signed numbers
def signedstr(x):
return str(x) if x<0 else '+'+str(x)
## Complex integers class
# - possibly there is something better, but I couldn't find it
#
class icomplex(object):
def __init__(self,x,y):
self.real=int(np.floor(x))
self.imag=int(np.floor(y))
def __repr__(self):
return str(self.real)+signedstr(self.imag)+'j'
def __str__(self):
return str(self.real)+signedstr(self.imag)+'j'
def __eq__(self,other):
return self.real==other.real and self.imag==other.imag
def __ne__(self,other):
return not self==other
def __add__(self,z):
return icomplex(self.real+z.real,self.imag+z.imag)
def __sub__(self,z):
return icomplex(self.real-z.real,self.imag-z.imag)
def __mul__(self,z):
return icomplex(self.real*z.real-self.imag*z.imag,self.real*z.imag+self.imag*z.real)
def conj(self):
return icomplex(self.real,-self.imag)
def __abs__(self):
return (self*self.conj()).real
def __complex__(self):
return complex(self.real,self.imag)
def __floordiv__(self,scale): # $scale$ must be a non-zero integer
return icomplex(self.real / scale,self.imag / scale)
def __mod__(self,scale): # $scale$ must be a non-zero integer
return icomplex(self.real % scale,self.imag % scale)
def pow(self,x):
if isinstance(x,int):
if x==0:
return 1
elif x>0:
return self*pow(self,x-1)
else:
raise Exception('Complex integer has only non-negative integer powers.')
else:
raise Exception('Complex integer has only non-negative integer powers.')
#def __coerce__(self,other):
# return complex(self.real,self.imag),complex(other)
up=icomplex(0,1)
down=icomplex(0,-1)
right=icomplex(1,0)
left=icomplex(-1,0)
nulik=icomplex(0,0)
## Tracker physical state (to be "pan-tilt-zoom" when this matures...)
# - viewport is a grid of size $self._res$;
# - viewport coords are come first ($self._state[0]$);
# - $depth$ is the maximal zoom-in depth;
# - viewports do not overlap except when containing each other.
# - actions are:
# zoomin, zoomout, pan by arbitrary vector in least-significant units
class ptz(object):
def __init__(self,res):
# res is a positive integer
# init is an initial ptz value (has to already be a ptz, or absent)
# state is a deque of complex integers in $range(self._res)$
self._res=res
self._state=deque([])
def __repr__(self):
return str(list(self._state))
## Least significant position at the tail of self._state
def zoomin(self,pos):
self._state.append(pos % self._res)
return self
def zoomout(self):
if self._state:
self._state.pop()
return self
def state_all(self):
return deque(self._state)
def depth(self):
return len(self._state)
## Comparison operations ("refinement")
# - $other$ refines $self$ iff $self._state$ is a prefix of $other._state$
def __le__(self,other):
s=copy.deepcopy(self._state)
o=copy.deepcopy(other.state_all())
if len(s)>len(o):
return False
while len(s):
if s.popleft()!=o.popleft():
return False
return True
def __ge__(self,other):
return other<=self
def __eq__(self,other):
return self<=other and other<=self
## Addition (more like GCD)
def __add__(self,other):
temp=copy.deepcopy(other)
if self<=temp:
return temp
elif temp<=self:
return self
else:
while not temp<=self:
temp.zoomout()
return temp
## Subtraction
def __sub__(self,other):
temp=copy.deepcopy(other)
return temp.zoomout() if other<=self else self
## Panning by $panvec$, experessed in units of current level
# - $panvec$ is assumed to be of type $icomplex$
# - input rejected (returns None, state remains unchanged) if $panvec$
# is too long.
#
def pan(self,panvec):
depth=len(self._state)
bound=pow(self._res,depth)
pos=panvec
for ind in xrange(depth):
pos+=self._state[ind]*pow(self._res,depth-1-ind)
x=pos.real
y=pos.imag
if pos.real<bound and pos.imag<bound and pos.real>=0 and pos.imag>=0:
new_state=[]
for ind in xrange(depth):
new_state.append(pos % self._res)
pos //= self._res
new_state.reverse()
self._state=new_state
return self
else:
return self
## Tracker marker (may have several): an integer-indexed database of
# ptz objects of a fixed resolution.
#
class marker(object):
def __init__(self,filename,default):
self._db=bsddb.rnopen(filename,'n')
self._default=default
def mark(self,loc,content):
if loc+1 in self._db:
self._db[loc+1]=cPickle.dumps(cPickle.loads(self._db[loc+1])+content)
else:
self._db[loc+1]=cPickle.dumps(self._default+content)
self.sync()
def unmark(self,loc,content):
if loc+1 in self._db:
self._db[loc+1]=cPickle.dumps(cPickle.loads(self._db[loc+1])-content)
else:
self._db[loc+1]=cPickle.dumps(self._default-content)
self.sync()
def moveto(self,loc):
if loc+1 not in self._db:
self._db[loc+1]=cPickle.dumps(self._default)
self._db.set_location(loc+1)
return cPickle.loads(self._db[loc+1])
def report(self,st,en=None):
#returns the content from $current_loc - noffset$ to, and including
# $current_loc+poffset$.
rep=[]
if en:
for loc in xrange(st+1,en+2):
if loc<=1 or loc not in self._db:
rep.append(self._default)
else:
rep.append(cPickle.loads(self._db[loc]))
return rep
else:
return cPickle.loads(self._db[st+1]) if st+1 in self._db else self._default
#def next(self):
# ind,pickle=self._db.next()
# return ind,cPickle.loads(pickle)
#
#def previous(self):
# ind,pickle=self._db.previous()
# return ind,cPickle.loads(pickle)
def sync(self):
self._db.sync()
| 30.100437 | 92 | 0.591179 |
acf489969cb69117f26f4fd97fca45611e1d9522 | 359 | py | Python | src/old/string-info.py | smorenburg/python | 74b1e72944dfd244f0169e8a7adb9e29ed1a7d27 | [
"MIT"
] | null | null | null | src/old/string-info.py | smorenburg/python | 74b1e72944dfd244f0169e8a7adb9e29ed1a7d27 | [
"MIT"
] | null | null | null | src/old/string-info.py | smorenburg/python | 74b1e72944dfd244f0169e8a7adb9e29ed1a7d27 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
message = input('Enter a message: ')
print('First character:', message[0])
print('Last character:', message[-1])
print('Middle character:', message[int(len(message) / 2)])
print('Even index characters:', message[0::2])
print('Odd index characters:', message[1::2])
print('Reversed message:', message[::-1])
print('Message:', message)
| 29.916667 | 58 | 0.688022 |
acf48999214d7704b6c0edea98b3f9abd031bee6 | 4,229 | py | Python | infinite/lindblad/bose_hubbard.py | ryuikaneko/lindblad_gutzwiller | 2c3fc59f3bec5d449c0c994d41f0b8f4da94fe6d | [
"MIT"
] | 1 | 2022-02-01T15:37:20.000Z | 2022-02-01T15:37:20.000Z | infinite/lindblad/bose_hubbard.py | ryuikaneko/lindblad_gutzwiller | 2c3fc59f3bec5d449c0c994d41f0b8f4da94fe6d | [
"MIT"
] | null | null | null | infinite/lindblad/bose_hubbard.py | ryuikaneko/lindblad_gutzwiller | 2c3fc59f3bec5d449c0c994d41f0b8f4da94fe6d | [
"MIT"
] | 1 | 2022-02-01T15:37:29.000Z | 2022-02-01T15:37:29.000Z | #!/usr/bin/env python
import numpy as np
import scipy.linalg
def make_op(nsps):
op_id = np.eye(nsps,dtype=np.float64)
op_a = np.zeros((nsps,nsps),dtype=np.float64)
op_hop = np.zeros((nsps,nsps),dtype=np.float64)
op_n = np.zeros((nsps,nsps),dtype=np.float64)
op_n2 = np.zeros((nsps,nsps),dtype=np.float64)
for i in range(nsps-1):
op_a[i,i+1] = np.sqrt(i+1)
op_hop[i,i+1] = np.sqrt(i+1)
op_hop[i+1,i] = np.sqrt(i+1)
for i in range(nsps):
op_n[i,i] = i
op_n2[i,i] = i**2
return op_id, op_a, op_hop, op_n, op_n2
def make_ham(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,phi):
return - z*J*phi*op_hop + 0.5*U*op_n2 - (0.5*U+mu)*op_n + z*J*phi**2*op_id
def calc_phys(op_a,op_n,op_n2,vec):
norm2 = np.linalg.norm(vec)**2
val_a = vec.dot(op_a.dot(vec))/norm2
val_n = vec.dot(op_n.dot(vec))/norm2
val_n2 = vec.dot(op_n2.dot(vec))/norm2
return val_a, val_n, val_n2
def calc_gs(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,nsps):
Nstep = 1000
phi_old = nsps
phi_new = nsps
phi_eps = 1e-12
dphi = 0.0
for step in range(Nstep):
H = make_ham(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,phi_old)
# print(H)
ene, vec = scipy.linalg.eigh(H)
# print(ene[0],vec[:,0])
phi_new, n, n2 = calc_phys(op_a,op_n,op_n2,vec[:,0])
# print(phi_new,n,n2)
dphi = np.abs(phi_new - phi_old)
phi_old = phi_new
# print(step,phi_new,dphi)
if dphi < phi_eps:
break
H = make_ham(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,phi_new)
ene, vec = scipy.linalg.eigh(H)
phi, n, n2 = calc_phys(op_a,op_n,op_n2,vec[:,0])
ene_J = -z*J*phi**2
ene_U = 0.5*U*(n2-n)
ene_mu = -mu*n
return ene[0], ene_J, ene_U, ene_mu, vec[:,0], phi, dphi, n, n2
def vec2rho(vec):
return np.array([vec]).conj().T.dot(np.array([vec]))
def calc_drhodt(gamma,op_n,op_n2,op_ham,rho):
return \
- 1j * (op_ham.dot(rho) - rho.dot(op_ham)) \
- 0.5 * gamma * (op_n2.dot(rho) + rho.dot(op_n2) \
- 2.0 * op_n.dot(rho.dot(op_n)))
# fourth order Runge-Kutta
def calc_RK(dt,gamma,op_n,op_n2,op_ham,rho):
k1 = dt * calc_drhodt(gamma,op_n,op_n2,op_ham,rho)
k2 = dt * calc_drhodt(gamma,op_n,op_n2,op_ham,rho+0.5*k1)
k3 = dt * calc_drhodt(gamma,op_n,op_n2,op_ham,rho+0.5*k2)
k4 = dt * calc_drhodt(gamma,op_n,op_n2,op_ham,rho+k3)
return rho + (k1+2*k2+2*k3+k4)/6.0
def main():
z = 2 ## 1D
# z = 4 ## 2D
# z = 6 ## 3D
J = 1.0
U = 1.0
## nsps = n_{max states per site} = n_{max occupation} + 1
# nsps = 2
nsps = 11
mu = 0.371
gamma = 0.0
# gamma = 0.1
#
op_id, op_a, op_hop, op_n, op_n2 = make_op(nsps)
#
print("# z nsps J U mu ene ene_J ene_U ene_mu phi error(phi) n n^2 vec")
ene, ene_J, ene_U, ene_mu, vec, phi, dphi, n, n2 = calc_gs(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,nsps)
print("#",end="")
print(z,nsps,J,U,mu,end=" ")
print(ene,ene_J,ene_U,ene_mu,end=" ")
print(phi,dphi,n,n2,end=" ")
print(' '.join(str(x) for x in vec),end=" ")
print()
#
# rho = vec2rho(vec)
# print(rho)
# print(np.trace(rho))
# print(phi)
# phi1 = np.trace(rho.dot(op_a))
# print(phi1)
# op_ham = make_ham(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,phi)
# ene1 = np.trace(rho.dot(op_ham))
# print(ene1)
#
# op_ham = make_ham(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,phi)
# drhodt = calc_drhodt(gamma,op_n,op_n2,op_ham,rho)
# print(drhodt)
# print(np.trace(drhodt))
# phi1 = np.trace(drhodt.dot(op_a))
# print(phi1)
# ene1 = np.trace(drhodt.dot(op_ham))
# print(ene1)
#
print()
dt = 0.01
Nsteps = 100000
op_ham = make_ham(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,phi)
rho = vec2rho(vec)
for steps in range(Nsteps):
rho1 = calc_RK(dt,gamma,op_n,op_n2,op_ham,rho)
phi1 = np.trace(rho1.dot(op_a))
ene1 = np.trace(rho1.dot(op_ham))
# print(dt*steps,np.trace(rho1),phi1,ene1)
print(dt*steps,np.abs(np.trace(rho1)),np.abs(phi1),np.abs(ene1))
rho = rho1
phi = phi1
op_ham = make_ham(op_id,op_a,op_hop,op_n,op_n2,z,J,U,mu,phi)
if __name__ == "__main__":
main()
| 31.325926 | 106 | 0.59423 |
acf489edc0102f3344bdd5a07c84277c83eb880d | 1,037 | py | Python | EopTool/interface/Plot.py | AdrianLundell/adrians-geotools | a070ef39295ffd447faff66b6ca626ca59c79cbd | [
"MIT"
] | null | null | null | EopTool/interface/Plot.py | AdrianLundell/adrians-geotools | a070ef39295ffd447faff66b6ca626ca59c79cbd | [
"MIT"
] | null | null | null | EopTool/interface/Plot.py | AdrianLundell/adrians-geotools | a070ef39295ffd447faff66b6ca626ca59c79cbd | [
"MIT"
] | null | null | null | import tkinter as tk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
import matplotlib.pyplot as plt
import numpy as np
class Plot(tk.Frame):
"""Custom tkinter widget for using matplotlib with the interface"""
def __init__(self, master, subplot_rows, subplot_cols, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.fig, self.axes = plt.subplots(subplot_rows, subplot_cols, figsize=(5,7))
self.canvas = FigureCanvasTkAgg(self.fig, master=self)
self.toolbar = NavigationToolbar2Tk(self.canvas, self)
self.toolbar.update()
self.canvas.get_tk_widget().pack(expand=True, fill='both', side="top")
self.fig.set_tight_layout(True)
def clear(self):
"""Clear all axes"""
if isinstance(self.axes, np.ndarray):
for ax in self.axes:
ax.clear()
else:
self.axes.clear()
def draw(self):
"""Show plot"""
self.canvas.draw() | 32.40625 | 85 | 0.635487 |
acf48ac7e9725d815798770f23b332421aef0937 | 730 | py | Python | iPresence/televisita/forms.py | toninoes/Proyecto | f9eb33fa10119cb93726c26d338396d3f38d2ddb | [
"MIT"
] | null | null | null | iPresence/televisita/forms.py | toninoes/Proyecto | f9eb33fa10119cb93726c26d338396d3f38d2ddb | [
"MIT"
] | null | null | null | iPresence/televisita/forms.py | toninoes/Proyecto | f9eb33fa10119cb93726c26d338396d3f38d2ddb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.contrib.localflavor.es.forms import *
class MyModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
if obj.es_Alumno:
rol = 'ALU'
elif obj.es_tutorLaboral:
rol = 'LAB'
elif obj.es_tutorDocente:
rol = 'DOC'
return "%s: %s %s" % (rol, obj.first_name, obj.last_name)
class AutorizadoForm(forms.Form):
autorizado = MyModelChoiceField(queryset=User.objects.exclude(is_active=False).exclude(puede_hablar=False).exclude(is_staff=True).order_by("-es_tutorDocente", "-es_tutorLaboral", "-es_Alumno"), empty_label=None)
enviarMail = forms.BooleanField(initial=False, required=False)
| 33.181818 | 212 | 0.750685 |
acf48b496fe5b069938322d72e2d5c089bf685fb | 2,884 | py | Python | profiles_api/models.py | PhurinatFlint/profile-rest-api | 608cfbc79121670d537da369b62f5ceac21284c1 | [
"MIT"
] | null | null | null | profiles_api/models.py | PhurinatFlint/profile-rest-api | 608cfbc79121670d537da369b62f5ceac21284c1 | [
"MIT"
] | null | null | null | profiles_api/models.py | PhurinatFlint/profile-rest-api | 608cfbc79121670d537da369b62f5ceac21284c1 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser # StandardBaseClass that you need to use when overriding or customizing
from django.contrib.auth.models import PermissionsMixin # StandardBaseClass that you need to use when overriding or customizing
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profiles"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password) # Need to ensure the password converted to the HASH password
user.save(using=self._db) # This is the standard for saving for object.
return user
def create_superuser(self, email, name, password):
"""The new superuser will given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True) # Email on user profile table, Unique string is only 1 can put into the table
name = models.CharField(max_length=255) # Name on user profile table
is_active = models.BooleanField(default=True) # Determine if the user is activated or not
is_staff = models.BooleanField(default=False) # If the user is a staff user which is used to determine, if they should have access to django admin.
# custom model manager, control users
objects = UserProfileManager()
# To work with django
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name'] # The user name is required
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of our users"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update""" # To update the status.
# The way you link others model in django is called "foreign key".
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
) # The first argument of a foreign key is the name of the model that is the remote model for this foreign key
status_text = models.CharField(max_length=255)
create_on = models.DateTimeField(auto_now_add=True) # Automatically added the datatime
def __str__(self):
"""Return a models as a string"""
return self.status_text
| 43.044776 | 151 | 0.707351 |
acf48c272ca02d9fbc87b915c03b412254c6ec96 | 57 | py | Python | flityard/__init__.py | karantan/flityard | ddf8fd727f0988ea1cc5fc697e97043274e8c177 | [
"MIT"
] | null | null | null | flityard/__init__.py | karantan/flityard | ddf8fd727f0988ea1cc5fc697e97043274e8c177 | [
"MIT"
] | null | null | null | flityard/__init__.py | karantan/flityard | ddf8fd727f0988ea1cc5fc697e97043274e8c177 | [
"MIT"
] | null | null | null | """A very simple library."""
__version__ = '0.3.0.dev0'
| 14.25 | 28 | 0.631579 |
acf48c9b72efd2dcebd3184b9755bffaab139a12 | 28,207 | py | Python | python/oanda/models/limit_order_transaction.py | KoenBal/OANDA_V20_Client | e67b9dbaddff6ed23e355d3ce7f9c9972799c702 | [
"MIT"
] | 1 | 2018-10-25T03:57:32.000Z | 2018-10-25T03:57:32.000Z | python/oanda/models/limit_order_transaction.py | KoenBal/OANDA_V20_Client | e67b9dbaddff6ed23e355d3ce7f9c9972799c702 | [
"MIT"
] | null | null | null | python/oanda/models/limit_order_transaction.py | KoenBal/OANDA_V20_Client | e67b9dbaddff6ed23e355d3ce7f9c9972799c702 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: api@oanda.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from oanda.models.client_extensions import ClientExtensions # noqa: F401,E501
from oanda.models.stop_loss_details import StopLossDetails # noqa: F401,E501
from oanda.models.take_profit_details import TakeProfitDetails # noqa: F401,E501
from oanda.models.trailing_stop_loss_details import TrailingStopLossDetails # noqa: F401,E501
class LimitOrderTransaction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'time': 'str',
'user_id': 'int',
'account_id': 'str',
'batch_id': 'str',
'request_id': 'str',
'type': 'str',
'instrument': 'str',
'units': 'str',
'price': 'str',
'time_in_force': 'str',
'gtd_time': 'str',
'position_fill': 'str',
'trigger_condition': 'str',
'reason': 'str',
'client_extensions': 'ClientExtensions',
'take_profit_on_fill': 'TakeProfitDetails',
'stop_loss_on_fill': 'StopLossDetails',
'trailing_stop_loss_on_fill': 'TrailingStopLossDetails',
'trade_client_extensions': 'ClientExtensions',
'replaces_order_id': 'str',
'cancelling_transaction_id': 'str'
}
attribute_map = {
'id': 'id',
'time': 'time',
'user_id': 'userID',
'account_id': 'AccountID',
'batch_id': 'batchID',
'request_id': 'requestID',
'type': 'type',
'instrument': 'instrument',
'units': 'units',
'price': 'price',
'time_in_force': 'timeInForce',
'gtd_time': 'gtdTime',
'position_fill': 'positionFill',
'trigger_condition': 'triggerCondition',
'reason': 'reason',
'client_extensions': 'clientExtensions',
'take_profit_on_fill': 'takeProfitOnFill',
'stop_loss_on_fill': 'stopLossOnFill',
'trailing_stop_loss_on_fill': 'trailingStopLossOnFill',
'trade_client_extensions': 'tradeClientExtensions',
'replaces_order_id': 'replacesOrderID',
'cancelling_transaction_id': 'cancellingTransactionID'
}
def __init__(self, id=None, time=None, user_id=None, account_id=None, batch_id=None, request_id=None, type=None, instrument=None, units=None, price=None, time_in_force=None, gtd_time=None, position_fill=None, trigger_condition=None, reason=None, client_extensions=None, take_profit_on_fill=None, stop_loss_on_fill=None, trailing_stop_loss_on_fill=None, trade_client_extensions=None, replaces_order_id=None, cancelling_transaction_id=None): # noqa: E501
"""LimitOrderTransaction - a model defined in Swagger""" # noqa: E501
self._id = None
self._time = None
self._user_id = None
self._account_id = None
self._batch_id = None
self._request_id = None
self._type = None
self._instrument = None
self._units = None
self._price = None
self._time_in_force = None
self._gtd_time = None
self._position_fill = None
self._trigger_condition = None
self._reason = None
self._client_extensions = None
self._take_profit_on_fill = None
self._stop_loss_on_fill = None
self._trailing_stop_loss_on_fill = None
self._trade_client_extensions = None
self._replaces_order_id = None
self._cancelling_transaction_id = None
self.discriminator = None
if id is not None:
self.id = id
if time is not None:
self.time = time
if user_id is not None:
self.user_id = user_id
if account_id is not None:
self.account_id = account_id
if batch_id is not None:
self.batch_id = batch_id
if request_id is not None:
self.request_id = request_id
if type is not None:
self.type = type
if instrument is not None:
self.instrument = instrument
if units is not None:
self.units = units
if price is not None:
self.price = price
if time_in_force is not None:
self.time_in_force = time_in_force
if gtd_time is not None:
self.gtd_time = gtd_time
if position_fill is not None:
self.position_fill = position_fill
if trigger_condition is not None:
self.trigger_condition = trigger_condition
if reason is not None:
self.reason = reason
if client_extensions is not None:
self.client_extensions = client_extensions
if take_profit_on_fill is not None:
self.take_profit_on_fill = take_profit_on_fill
if stop_loss_on_fill is not None:
self.stop_loss_on_fill = stop_loss_on_fill
if trailing_stop_loss_on_fill is not None:
self.trailing_stop_loss_on_fill = trailing_stop_loss_on_fill
if trade_client_extensions is not None:
self.trade_client_extensions = trade_client_extensions
if replaces_order_id is not None:
self.replaces_order_id = replaces_order_id
if cancelling_transaction_id is not None:
self.cancelling_transaction_id = cancelling_transaction_id
@property
def id(self):
"""Gets the id of this LimitOrderTransaction. # noqa: E501
The Transaction's Identifier. # noqa: E501
:return: The id of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this LimitOrderTransaction.
The Transaction's Identifier. # noqa: E501
:param id: The id of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._id = id
@property
def time(self):
"""Gets the time of this LimitOrderTransaction. # noqa: E501
The date/time when the Transaction was created. # noqa: E501
:return: The time of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this LimitOrderTransaction.
The date/time when the Transaction was created. # noqa: E501
:param time: The time of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._time = time
@property
def user_id(self):
"""Gets the user_id of this LimitOrderTransaction. # noqa: E501
The ID of the user that initiated the creation of the Transaction. # noqa: E501
:return: The user_id of this LimitOrderTransaction. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this LimitOrderTransaction.
The ID of the user that initiated the creation of the Transaction. # noqa: E501
:param user_id: The user_id of this LimitOrderTransaction. # noqa: E501
:type: int
"""
self._user_id = user_id
@property
def account_id(self):
"""Gets the account_id of this LimitOrderTransaction. # noqa: E501
The ID of the Account the Transaction was created for. # noqa: E501
:return: The account_id of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this LimitOrderTransaction.
The ID of the Account the Transaction was created for. # noqa: E501
:param account_id: The account_id of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def batch_id(self):
"""Gets the batch_id of this LimitOrderTransaction. # noqa: E501
The ID of the \"batch\" that the Transaction belongs to. Transactions in the same batch are applied to the Account simultaneously. # noqa: E501
:return: The batch_id of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._batch_id
@batch_id.setter
def batch_id(self, batch_id):
"""Sets the batch_id of this LimitOrderTransaction.
The ID of the \"batch\" that the Transaction belongs to. Transactions in the same batch are applied to the Account simultaneously. # noqa: E501
:param batch_id: The batch_id of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._batch_id = batch_id
@property
def request_id(self):
"""Gets the request_id of this LimitOrderTransaction. # noqa: E501
The Request ID of the request which generated the transaction. # noqa: E501
:return: The request_id of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this LimitOrderTransaction.
The Request ID of the request which generated the transaction. # noqa: E501
:param request_id: The request_id of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def type(self):
"""Gets the type of this LimitOrderTransaction. # noqa: E501
The Type of the Transaction. Always set to \"LIMIT_ORDER\" in a LimitOrderTransaction. # noqa: E501
:return: The type of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this LimitOrderTransaction.
The Type of the Transaction. Always set to \"LIMIT_ORDER\" in a LimitOrderTransaction. # noqa: E501
:param type: The type of this LimitOrderTransaction. # noqa: E501
:type: str
"""
allowed_values = ["CREATE", "CLOSE", "REOPEN", "CLIENT_CONFIGURE", "CLIENT_CONFIGURE_REJECT", "TRANSFER_FUNDS", "TRANSFER_FUNDS_REJECT", "MARKET_ORDER", "MARKET_ORDER_REJECT", "FIXED_PRICE_ORDER", "LIMIT_ORDER", "LIMIT_ORDER_REJECT", "STOP_ORDER", "STOP_ORDER_REJECT", "MARKET_IF_TOUCHED_ORDER", "MARKET_IF_TOUCHED_ORDER_REJECT", "TAKE_PROFIT_ORDER", "TAKE_PROFIT_ORDER_REJECT", "STOP_LOSS_ORDER", "STOP_LOSS_ORDER_REJECT", "TRAILING_STOP_LOSS_ORDER", "TRAILING_STOP_LOSS_ORDER_REJECT", "ORDER_FILL", "ORDER_CANCEL", "ORDER_CANCEL_REJECT", "ORDER_CLIENT_EXTENSIONS_MODIFY", "ORDER_CLIENT_EXTENSIONS_MODIFY_REJECT", "TRADE_CLIENT_EXTENSIONS_MODIFY", "TRADE_CLIENT_EXTENSIONS_MODIFY_REJECT", "MARGIN_CALL_ENTER", "MARGIN_CALL_EXTEND", "MARGIN_CALL_EXIT", "DELAYED_TRADE_CLOSURE", "DAILY_FINANCING", "RESET_RESETTABLE_PL"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def instrument(self):
"""Gets the instrument of this LimitOrderTransaction. # noqa: E501
The Limit Order's Instrument. # noqa: E501
:return: The instrument of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._instrument
@instrument.setter
def instrument(self, instrument):
"""Sets the instrument of this LimitOrderTransaction.
The Limit Order's Instrument. # noqa: E501
:param instrument: The instrument of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._instrument = instrument
@property
def units(self):
"""Gets the units of this LimitOrderTransaction. # noqa: E501
The quantity requested to be filled by the Limit Order. A posititive number of units results in a long Order, and a negative number of units results in a short Order. # noqa: E501
:return: The units of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._units
@units.setter
def units(self, units):
"""Sets the units of this LimitOrderTransaction.
The quantity requested to be filled by the Limit Order. A posititive number of units results in a long Order, and a negative number of units results in a short Order. # noqa: E501
:param units: The units of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._units = units
@property
def price(self):
"""Gets the price of this LimitOrderTransaction. # noqa: E501
The price threshold specified for the Limit Order. The Limit Order will only be filled by a market price that is equal to or better than this price. # noqa: E501
:return: The price of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this LimitOrderTransaction.
The price threshold specified for the Limit Order. The Limit Order will only be filled by a market price that is equal to or better than this price. # noqa: E501
:param price: The price of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._price = price
@property
def time_in_force(self):
"""Gets the time_in_force of this LimitOrderTransaction. # noqa: E501
The time-in-force requested for the Limit Order. # noqa: E501
:return: The time_in_force of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._time_in_force
@time_in_force.setter
def time_in_force(self, time_in_force):
"""Sets the time_in_force of this LimitOrderTransaction.
The time-in-force requested for the Limit Order. # noqa: E501
:param time_in_force: The time_in_force of this LimitOrderTransaction. # noqa: E501
:type: str
"""
allowed_values = ["GTC", "GTD", "GFD", "FOK", "IOC"] # noqa: E501
if time_in_force not in allowed_values:
raise ValueError(
"Invalid value for `time_in_force` ({0}), must be one of {1}" # noqa: E501
.format(time_in_force, allowed_values)
)
self._time_in_force = time_in_force
@property
def gtd_time(self):
"""Gets the gtd_time of this LimitOrderTransaction. # noqa: E501
The date/time when the Limit Order will be cancelled if its timeInForce is \"GTD\". # noqa: E501
:return: The gtd_time of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._gtd_time
@gtd_time.setter
def gtd_time(self, gtd_time):
"""Sets the gtd_time of this LimitOrderTransaction.
The date/time when the Limit Order will be cancelled if its timeInForce is \"GTD\". # noqa: E501
:param gtd_time: The gtd_time of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._gtd_time = gtd_time
@property
def position_fill(self):
"""Gets the position_fill of this LimitOrderTransaction. # noqa: E501
Specification of how Positions in the Account are modified when the Order is filled. # noqa: E501
:return: The position_fill of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._position_fill
@position_fill.setter
def position_fill(self, position_fill):
"""Sets the position_fill of this LimitOrderTransaction.
Specification of how Positions in the Account are modified when the Order is filled. # noqa: E501
:param position_fill: The position_fill of this LimitOrderTransaction. # noqa: E501
:type: str
"""
allowed_values = ["OPEN_ONLY", "REDUCE_FIRST", "REDUCE_ONLY", "DEFAULT"] # noqa: E501
if position_fill not in allowed_values:
raise ValueError(
"Invalid value for `position_fill` ({0}), must be one of {1}" # noqa: E501
.format(position_fill, allowed_values)
)
self._position_fill = position_fill
@property
def trigger_condition(self):
"""Gets the trigger_condition of this LimitOrderTransaction. # noqa: E501
Specification of which price component should be used when determining if an Order should be triggered and filled. This allows Orders to be triggered based on the bid, ask, mid, default (ask for buy, bid for sell) or inverse (ask for sell, bid for buy) price depending on the desired behaviour. Orders are always filled using their default price component. This feature is only provided through the REST API. Clients who choose to specify a non-default trigger condition will not see it reflected in any of OANDA's proprietary or partner trading platforms, their transaction history or their account statements. OANDA platforms always assume that an Order's trigger condition is set to the default value when indicating the distance from an Order's trigger price, and will always provide the default trigger condition when creating or modifying an Order. A special restriction applies when creating a guaranteed Stop Loss Order. In this case the TriggerCondition value must either be \"DEFAULT\", or the \"natural\" trigger side \"DEFAULT\" results in. So for a Stop Loss Order for a long trade valid values are \"DEFAULT\" and \"BID\", and for short trades \"DEFAULT\" and \"ASK\" are valid. # noqa: E501
:return: The trigger_condition of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._trigger_condition
@trigger_condition.setter
def trigger_condition(self, trigger_condition):
"""Sets the trigger_condition of this LimitOrderTransaction.
Specification of which price component should be used when determining if an Order should be triggered and filled. This allows Orders to be triggered based on the bid, ask, mid, default (ask for buy, bid for sell) or inverse (ask for sell, bid for buy) price depending on the desired behaviour. Orders are always filled using their default price component. This feature is only provided through the REST API. Clients who choose to specify a non-default trigger condition will not see it reflected in any of OANDA's proprietary or partner trading platforms, their transaction history or their account statements. OANDA platforms always assume that an Order's trigger condition is set to the default value when indicating the distance from an Order's trigger price, and will always provide the default trigger condition when creating or modifying an Order. A special restriction applies when creating a guaranteed Stop Loss Order. In this case the TriggerCondition value must either be \"DEFAULT\", or the \"natural\" trigger side \"DEFAULT\" results in. So for a Stop Loss Order for a long trade valid values are \"DEFAULT\" and \"BID\", and for short trades \"DEFAULT\" and \"ASK\" are valid. # noqa: E501
:param trigger_condition: The trigger_condition of this LimitOrderTransaction. # noqa: E501
:type: str
"""
allowed_values = ["DEFAULT", "INVERSE", "BID", "ASK", "MID"] # noqa: E501
if trigger_condition not in allowed_values:
raise ValueError(
"Invalid value for `trigger_condition` ({0}), must be one of {1}" # noqa: E501
.format(trigger_condition, allowed_values)
)
self._trigger_condition = trigger_condition
@property
def reason(self):
"""Gets the reason of this LimitOrderTransaction. # noqa: E501
The reason that the Limit Order was initiated # noqa: E501
:return: The reason of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this LimitOrderTransaction.
The reason that the Limit Order was initiated # noqa: E501
:param reason: The reason of this LimitOrderTransaction. # noqa: E501
:type: str
"""
allowed_values = ["CLIENT_ORDER", "REPLACEMENT"] # noqa: E501
if reason not in allowed_values:
raise ValueError(
"Invalid value for `reason` ({0}), must be one of {1}" # noqa: E501
.format(reason, allowed_values)
)
self._reason = reason
@property
def client_extensions(self):
"""Gets the client_extensions of this LimitOrderTransaction. # noqa: E501
:return: The client_extensions of this LimitOrderTransaction. # noqa: E501
:rtype: ClientExtensions
"""
return self._client_extensions
@client_extensions.setter
def client_extensions(self, client_extensions):
"""Sets the client_extensions of this LimitOrderTransaction.
:param client_extensions: The client_extensions of this LimitOrderTransaction. # noqa: E501
:type: ClientExtensions
"""
self._client_extensions = client_extensions
@property
def take_profit_on_fill(self):
"""Gets the take_profit_on_fill of this LimitOrderTransaction. # noqa: E501
:return: The take_profit_on_fill of this LimitOrderTransaction. # noqa: E501
:rtype: TakeProfitDetails
"""
return self._take_profit_on_fill
@take_profit_on_fill.setter
def take_profit_on_fill(self, take_profit_on_fill):
"""Sets the take_profit_on_fill of this LimitOrderTransaction.
:param take_profit_on_fill: The take_profit_on_fill of this LimitOrderTransaction. # noqa: E501
:type: TakeProfitDetails
"""
self._take_profit_on_fill = take_profit_on_fill
@property
def stop_loss_on_fill(self):
"""Gets the stop_loss_on_fill of this LimitOrderTransaction. # noqa: E501
:return: The stop_loss_on_fill of this LimitOrderTransaction. # noqa: E501
:rtype: StopLossDetails
"""
return self._stop_loss_on_fill
@stop_loss_on_fill.setter
def stop_loss_on_fill(self, stop_loss_on_fill):
"""Sets the stop_loss_on_fill of this LimitOrderTransaction.
:param stop_loss_on_fill: The stop_loss_on_fill of this LimitOrderTransaction. # noqa: E501
:type: StopLossDetails
"""
self._stop_loss_on_fill = stop_loss_on_fill
@property
def trailing_stop_loss_on_fill(self):
"""Gets the trailing_stop_loss_on_fill of this LimitOrderTransaction. # noqa: E501
:return: The trailing_stop_loss_on_fill of this LimitOrderTransaction. # noqa: E501
:rtype: TrailingStopLossDetails
"""
return self._trailing_stop_loss_on_fill
@trailing_stop_loss_on_fill.setter
def trailing_stop_loss_on_fill(self, trailing_stop_loss_on_fill):
"""Sets the trailing_stop_loss_on_fill of this LimitOrderTransaction.
:param trailing_stop_loss_on_fill: The trailing_stop_loss_on_fill of this LimitOrderTransaction. # noqa: E501
:type: TrailingStopLossDetails
"""
self._trailing_stop_loss_on_fill = trailing_stop_loss_on_fill
@property
def trade_client_extensions(self):
"""Gets the trade_client_extensions of this LimitOrderTransaction. # noqa: E501
:return: The trade_client_extensions of this LimitOrderTransaction. # noqa: E501
:rtype: ClientExtensions
"""
return self._trade_client_extensions
@trade_client_extensions.setter
def trade_client_extensions(self, trade_client_extensions):
"""Sets the trade_client_extensions of this LimitOrderTransaction.
:param trade_client_extensions: The trade_client_extensions of this LimitOrderTransaction. # noqa: E501
:type: ClientExtensions
"""
self._trade_client_extensions = trade_client_extensions
@property
def replaces_order_id(self):
"""Gets the replaces_order_id of this LimitOrderTransaction. # noqa: E501
The ID of the Order that this Order replaces (only provided if this Order replaces an existing Order). # noqa: E501
:return: The replaces_order_id of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._replaces_order_id
@replaces_order_id.setter
def replaces_order_id(self, replaces_order_id):
"""Sets the replaces_order_id of this LimitOrderTransaction.
The ID of the Order that this Order replaces (only provided if this Order replaces an existing Order). # noqa: E501
:param replaces_order_id: The replaces_order_id of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._replaces_order_id = replaces_order_id
@property
def cancelling_transaction_id(self):
"""Gets the cancelling_transaction_id of this LimitOrderTransaction. # noqa: E501
The ID of the Transaction that cancels the replaced Order (only provided if this Order replaces an existing Order). # noqa: E501
:return: The cancelling_transaction_id of this LimitOrderTransaction. # noqa: E501
:rtype: str
"""
return self._cancelling_transaction_id
@cancelling_transaction_id.setter
def cancelling_transaction_id(self, cancelling_transaction_id):
"""Sets the cancelling_transaction_id of this LimitOrderTransaction.
The ID of the Transaction that cancels the replaced Order (only provided if this Order replaces an existing Order). # noqa: E501
:param cancelling_transaction_id: The cancelling_transaction_id of this LimitOrderTransaction. # noqa: E501
:type: str
"""
self._cancelling_transaction_id = cancelling_transaction_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LimitOrderTransaction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.745879 | 1,214 | 0.659269 |
acf48cc67fa7249f8b782e86d8986462fb84d79b | 1,771 | py | Python | game_of_pyfe/utils.py | jglezt/game-of-pyfe | 4a8865617436cefa1ecd93188787ee343d7bc2b2 | [
"MIT"
] | null | null | null | game_of_pyfe/utils.py | jglezt/game-of-pyfe | 4a8865617436cefa1ecd93188787ee343d7bc2b2 | [
"MIT"
] | null | null | null | game_of_pyfe/utils.py | jglezt/game-of-pyfe | 4a8865617436cefa1ecd93188787ee343d7bc2b2 | [
"MIT"
] | null | null | null | """
Utilities for game of pyfe.
"""
import os
from typing import List
import numpy as np
def cls():
"""Clean the terminal."""
os.system("cls" if os.name == "nt" else "clear")
def validate_board(board: np.array) -> np.array:
"""Validate the Game of life board.
Validates that the board cumplies with the following parameters.
1. Has shape of (n, m).
2. Only contains 1's (life) and 0's (death).
Arguments
---------
board: Game of life board.
Raises
------
TypeError if does not cumply with shape.
ValueError if does not has only 1's and 0's.
Returns
-------
The original board.
"""
board_shape = board.shape
if not (len(board_shape) == 2 and board_shape[0] >= 2 and board_shape[1] >= 2):
raise TypeError("board does not contain the correct shape")
for i in range(board_shape[0]):
for j in range(board_shape[1]):
if not (board[i][j] == 0 or board[i][j] == 1):
raise ValueError(
"Board contains a {} in index [{}, {}]".format(board[i][j], i, j)
)
return board
def create_printable_board(board: np.array) -> List[List[int]]:
"""Format the board to be printed in the terminal.
For convinience and testability, the array contains the in representing
'space' for 0's and 'white box' for 1's
Arguments
---------
board: Game of life board.
Returns
-------
A list containing the lines of integers representing each cell
life state.
"""
black_square = 9608
space = 32
printable_board = board.copy()
printable_board[printable_board == 0] = space
printable_board[printable_board == 1] = black_square
return printable_board.tolist()
| 23.932432 | 85 | 0.607566 |
acf48d83117e1576be6ffdadec40e87048b8b68e | 2,774 | py | Python | sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor.py | jxub/beam | 8222fcc978a54d98d385c108fb5fcf7615d74829 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor.py | jxub/beam | 8222fcc978a54d98d385c108fb5fcf7615d74829 | [
"Apache-2.0"
] | 71 | 2018-05-23T22:20:02.000Z | 2019-04-30T15:37:46.000Z | sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor.py | jxub/beam | 8222fcc978a54d98d385c108fb5fcf7615d74829 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""ConsumerTrackingPipelineVisitor, a PipelineVisitor object."""
# pytype: skip-file
from __future__ import absolute_import
from typing import TYPE_CHECKING
from typing import Dict
from typing import List
from typing import Set
from apache_beam import pvalue
from apache_beam.pipeline import PipelineVisitor
if TYPE_CHECKING:
from apache_beam.pipeline import AppliedPTransform
class ConsumerTrackingPipelineVisitor(PipelineVisitor):
"""For internal use only; no backwards-compatibility guarantees.
Visitor for extracting value-consumer relations from the graph.
Tracks the AppliedPTransforms that consume each PValue in the Pipeline. This
is used to schedule consuming PTransforms to consume input after the upstream
transform has produced and committed output.
"""
def __init__(self):
self.value_to_consumers = {
} # type: Dict[pvalue.PValue, List[AppliedPTransform]]
self.root_transforms = set() # type: Set[AppliedPTransform]
self.step_names = {} # type: Dict[AppliedPTransform, str]
self._num_transforms = 0
self._views = set()
@property
def views(self):
"""Returns a list of side intputs extracted from the graph.
Returns:
A list of pvalue.AsSideInput.
"""
return list(self._views)
def visit_transform(self, applied_ptransform):
# type: (AppliedPTransform) -> None
inputs = list(applied_ptransform.inputs)
if inputs:
for input_value in inputs:
if isinstance(input_value, pvalue.PBegin):
self.root_transforms.add(applied_ptransform)
if input_value not in self.value_to_consumers:
self.value_to_consumers[input_value] = []
self.value_to_consumers[input_value].append(applied_ptransform)
else:
self.root_transforms.add(applied_ptransform)
self.step_names[applied_ptransform] = 's%d' % (self._num_transforms)
self._num_transforms += 1
for side_input in applied_ptransform.side_inputs:
self._views.add(side_input)
| 34.675 | 79 | 0.753064 |
acf48d9d912a1d0ad07f8119b78067636aee54cd | 3,019 | py | Python | mlflow/entities/run_data.py | akarloff/mlflow | be9774a76b4b6dcdb8cc2147a93d7c8676438292 | [
"Apache-2.0"
] | 3 | 2019-10-07T01:12:25.000Z | 2020-07-06T04:27:51.000Z | mlflow/entities/run_data.py | akarloff/mlflow | be9774a76b4b6dcdb8cc2147a93d7c8676438292 | [
"Apache-2.0"
] | 15 | 2019-10-07T01:11:46.000Z | 2022-03-08T23:33:53.000Z | mlflow/entities/run_data.py | akarloff/mlflow | be9774a76b4b6dcdb8cc2147a93d7c8676438292 | [
"Apache-2.0"
] | 6 | 2019-11-28T13:23:35.000Z | 2020-07-08T19:22:12.000Z | from mlflow.entities._mlflow_object import _MLflowObject
from mlflow.entities.metric import Metric
from mlflow.entities.param import Param
from mlflow.entities.run_tag import RunTag
from mlflow.protos.service_pb2 import RunData as ProtoRunData, Param as ProtoParam,\
RunTag as ProtoRunTag
class RunData(_MLflowObject):
"""
Run data (metrics and parameters).
"""
def __init__(self, metrics=None, params=None, tags=None):
"""
Construct a new :py:class:`mlflow.entities.RunData` instance.
:param metrics: List of :py:class:`mlflow.entities.Metric`.
:param params: List of :py:class:`mlflow.entities.Param`.
:param tags: List of :py:class:`mlflow.entities.RunTag`.
"""
# Maintain the original list of metrics so that we can easily convert it back to
# protobuf
self._metric_objs = metrics or []
self._metrics = {metric.key: metric.value for metric in self._metric_objs}
self._params = {param.key: param.value for param in (params or [])}
self._tags = {tag.key: tag.value for tag in (tags or [])}
@property
def metrics(self):
"""
Dictionary of string key -> metric value for the current run.
For each metric key, the metric value with the latest timestamp is returned. In case there
are multiple values with the same latest timestamp, the maximum of these values is returned.
"""
return self._metrics
@property
def params(self):
"""Dictionary of param key (string) -> param value for the current run."""
return self._params
@property
def tags(self):
"""Dictionary of tag key (string) -> tag value for the current run."""
return self._tags
def _add_metric(self, metric):
self._metrics[metric.key] = metric.value
self._metric_objs.append(metric)
def _add_param(self, param):
self._params[param.key] = param.value
def _add_tag(self, tag):
self._tags[tag.key] = tag.value
def to_proto(self):
run_data = ProtoRunData()
run_data.metrics.extend([m.to_proto() for m in self._metric_objs])
run_data.params.extend([ProtoParam(key=key, value=val) for key, val in self.params.items()])
run_data.tags.extend([ProtoRunTag(key=key, value=val) for key, val in self.tags.items()])
return run_data
def to_dictionary(self):
return {
"metrics": self.metrics,
"params": self.params,
"tags": self.tags,
}
@classmethod
def from_proto(cls, proto):
run_data = cls()
# iterate proto and add metrics, params, and tags
for proto_metric in proto.metrics:
run_data._add_metric(Metric.from_proto(proto_metric))
for proto_param in proto.params:
run_data._add_param(Param.from_proto(proto_param))
for proto_tag in proto.tags:
run_data._add_tag(RunTag.from_proto(proto_tag))
return run_data
| 37.271605 | 100 | 0.650547 |
acf48f8d35810dbecaab39c6af7a66d21e5e05ec | 1,748 | py | Python | parameter/vh.py | andyfaff/avant | b5a72db1d82fd9d722557dded08fb76dac0f7dad | [
"MIT"
] | null | null | null | parameter/vh.py | andyfaff/avant | b5a72db1d82fd9d722557dded08fb76dac0f7dad | [
"MIT"
] | null | null | null | parameter/vh.py | andyfaff/avant | b5a72db1d82fd9d722557dded08fb76dac0f7dad | [
"MIT"
] | 1 | 2021-09-14T02:15:15.000Z | 2021-09-14T02:15:15.000Z | from .util import findGauss, findUniform
import matplotlib.pyplot as plt
import numpy as np
def Gauss(name):
""""
Calls the findGauss function from util.py to return the Gauss prior for the given molecule.
Input: name of molecule
Output: Gauss prior object containing pdf, logpdf, cdf, ppf and rvs methods
"""
prior = findGauss(name, 'v_h')
return prior
def uniform(name):
"""
Calls the findUniform function from util.py to return the uniform bounds for the given molecule.
Input: name of molecule
Output: array of length [2] with the upper and lower bounds for the uniform prior
"""
prior = findUniform(name, 'v_h')
return prior
def plotGauss(name): # pragma: no cover
"""
Plots the Gauss prior probability distribution for the given molecule.
Input: name of molecule
Output: matplotlib.pyplot graph of the given prior
"""
# set the xrange, upper bound and lower bound for the prior
xrange = uniform(name)
lb = xrange[0,0]
ub = xrange[0,1]
xrange = np.linspace(lb, ub, 100)
# plot the Gauss prior
prior = Gauss(name)
plt.xlabel('Head Volume [Å]')
plt.ylabel('pdf')
plt.title(name)
plt.plot(xrange, prior.pdf(xrange))
plt.show()
def plotUniform(name): # pragma: no cover
xrange = uniform(name)
lb = xrange[0,0]
ub = xrange[0,1]
xrange = np.linspace(0.5 * lb, 1.3 * ub, 100)
# plot the uniform prior
y = np.zeros_like(xrange)
for i, j in enumerate(xrange):
if (lb <= j <= ub):
y[i] = 1.0
else:
y[i] = 0.0
plt.xlabel('Head volume [Å]')
plt.ylabel('pdf')
plt.title(name)
plt.plot(xrange, y)
plt.show()
| 26.892308 | 100 | 0.622998 |
acf48fa1fb39fd69836f21d5d87af4869b983cc6 | 5,563 | py | Python | gaphor/UML/actions/actionstoolbox.py | Texopolis/gaphor | 3b190620075fd413258af1e7a007b4b2167a7564 | [
"Apache-2.0"
] | 1 | 2022-01-30T15:33:53.000Z | 2022-01-30T15:33:53.000Z | gaphor/UML/actions/actionstoolbox.py | burakozturk16/gaphor | 86267a5200ac4439626d35d306dbb376c3800107 | [
"Apache-2.0"
] | null | null | null | gaphor/UML/actions/actionstoolbox.py | burakozturk16/gaphor | 86267a5200ac4439626d35d306dbb376c3800107 | [
"Apache-2.0"
] | 1 | 2022-01-23T18:36:27.000Z | 2022-01-23T18:36:27.000Z | """The definition for the actions section of the toolbox."""
from functools import partial
from gaphas.item import SE
from gaphor import UML
from gaphor.core import gettext
from gaphor.diagram.diagramtoolbox import ToolDef, ToolSection, new_item_factory
from gaphor.UML import diagramitems
from gaphor.UML.recipes import owner_package
def activity_config(new_item, name=None):
subject = new_item.subject
if name:
subject.name = gettext("New {name}").format(name=name)
if subject.activity:
return
diagram = new_item.diagram
package = owner_package(diagram.owner)
activities = (
[i for i in package.ownedType if isinstance(i, UML.Activity)]
if package
else diagram.model.lselect(
lambda e: isinstance(e, UML.Activity) and e.package is None
)
)
if activities:
subject.activity = activities[0]
else:
activity = subject.model.create(UML.Activity)
activity.name = gettext("Activity")
activity.package = package
subject.activity = activity
def partition_config(new_item):
activity_config(new_item)
subject = new_item.subject
subject.name = gettext("Swimlane One")
new_item.partition = subject
partition = subject.model.create(UML.ActivityPartition)
partition.name = gettext("Swimlane Two")
partition.activity = subject.activity
new_item.partition = partition
actions = ToolSection(
gettext("Actions"),
(
ToolDef(
"toolbox-action",
gettext("Action"),
"gaphor-action-symbolic",
"a",
new_item_factory(
diagramitems.ActionItem,
UML.Action,
config_func=partial(activity_config, name=gettext("Action")),
),
handle_index=SE,
),
ToolDef(
"toolbox-initial-node",
gettext("Initial node"),
"gaphor-initial-node-symbolic",
"j",
new_item_factory(
diagramitems.InitialNodeItem,
UML.InitialNode,
config_func=activity_config,
),
handle_index=SE,
),
ToolDef(
"toolbox-activity-final-node",
gettext("Activity final node"),
"gaphor-activity-final-node-symbolic",
"f",
new_item_factory(
diagramitems.ActivityFinalNodeItem,
UML.ActivityFinalNode,
config_func=activity_config,
),
handle_index=SE,
),
ToolDef(
"toolbox-flow-final-node",
gettext("Flow final node"),
"gaphor-flow-final-node-symbolic",
"w",
new_item_factory(
diagramitems.FlowFinalNodeItem,
UML.FlowFinalNode,
config_func=activity_config,
),
handle_index=SE,
),
ToolDef(
"toolbox-decision-node",
gettext("Decision/merge node"),
"gaphor-decision-node-symbolic",
"g",
new_item_factory(
diagramitems.DecisionNodeItem,
UML.DecisionNode,
config_func=activity_config,
),
handle_index=SE,
),
ToolDef(
"toolbox-fork-node",
gettext("Fork/join node"),
"gaphor-fork-node-symbolic",
"<Shift>R",
new_item_factory(
diagramitems.ForkNodeItem,
UML.JoinNode,
config_func=activity_config,
),
handle_index=1,
),
ToolDef(
"toolbox-object-node",
gettext("Object node"),
"gaphor-object-node-symbolic",
"<Shift>O",
new_item_factory(
diagramitems.ObjectNodeItem,
UML.ObjectNode,
config_func=partial(activity_config, name=gettext("Object node")),
),
handle_index=SE,
),
ToolDef(
"toolbox-partition",
gettext("Swimlane"),
"gaphor-activity-partition-symbolic",
"<Shift>P",
new_item_factory(
diagramitems.PartitionItem,
UML.ActivityPartition,
config_func=partition_config,
),
handle_index=SE,
),
ToolDef(
"toolbox-flow",
gettext("Control/object flow"),
"gaphor-control-flow-symbolic",
"<Shift>F",
new_item_factory(diagramitems.FlowItem),
),
ToolDef(
"toolbox-send-signal-action",
gettext("Send signal action"),
"gaphor-send-signal-action-symbolic",
None,
new_item_factory(
diagramitems.SendSignalActionItem,
UML.SendSignalAction,
config_func=partial(activity_config, name=gettext("Send signal")),
),
handle_index=SE,
),
ToolDef(
"toolbox-accept-event-action",
gettext("Accept event action"),
"gaphor-accept-event-action-symbolic",
None,
new_item_factory(
diagramitems.AcceptEventActionItem,
UML.AcceptEventAction,
config_func=partial(activity_config, name=gettext("Accept event")),
),
handle_index=SE,
),
),
)
| 30.398907 | 83 | 0.540176 |
acf48fc016b2ebd1fcc8078963a5aa51625f7681 | 307 | py | Python | contributions/anwar.py | praneeth2yadav/Devathon-21 | 16bce7a7725f72ccfbf4211ebeb004de1292c1ed | [
"MIT"
] | 1 | 2021-10-03T07:23:47.000Z | 2021-10-03T07:23:47.000Z | contributions/anwar.py | praneeth2yadav/Devathon-21 | 16bce7a7725f72ccfbf4211ebeb004de1292c1ed | [
"MIT"
] | null | null | null | contributions/anwar.py | praneeth2yadav/Devathon-21 | 16bce7a7725f72ccfbf4211ebeb004de1292c1ed | [
"MIT"
] | null | null | null | greet = "Hello world!"
print(greet)
space = 0
for letter in greet[1:]:
print(letter + (" " * space) + letter)
space += 1
print("\nI became (or am becoming) a programmer the shine a light in a dark room of numbers")
print("Thanks for viewing and to Justen Phelps for creating the repo!")
input()
| 21.928571 | 93 | 0.674267 |
acf48fd97a8fc761d8c091e81f5c1d92fde22538 | 13,939 | py | Python | utils/FeatureGenerator.py | dair-iitd/LocationTagger | 4485ac5f2e8e74b213387b3312116c9465a8795f | [
"Apache-2.0"
] | 2 | 2021-03-01T09:41:04.000Z | 2021-11-15T02:07:44.000Z | utils/FeatureGenerator.py | dair-iitd/LocationTagger | 4485ac5f2e8e74b213387b3312116c9465a8795f | [
"Apache-2.0"
] | null | null | null | utils/FeatureGenerator.py | dair-iitd/LocationTagger | 4485ac5f2e8e74b213387b3312116c9465a8795f | [
"Apache-2.0"
] | null | null | null | import re
import os
import sys
import json
import tqdm
import spacy
import pickle
import argparse
import functools
import itertools
from rake_nltk import Rake
from collections import defaultdict
# from gensim.models import Word2Vec
from nltk.parse import CoreNLPParser
# from nltk.parse.corenlp import CoreNLPDependencyParser
from utils import common
class FeatureBuilder:
def __init__(self, options):
self.options = options
self.descriptive_phrases = set(open(self.options.descriptive_phrases_path, "r").readlines())
self.word_vectors = {word: "G" + groupid for word, groupid in [line.strip().split(" ") for line in open(options.word_vectors_path, "r").readlines()]}
self.stop_words = [line.strip().lower() for line in open(options.stop_words_path, "r").readlines() if not line.startswith("//")]
def buildFeatures(self, text):
try:
tokens = list(self.options.tok_parser.tokenize(text))
pos_tags = list(self.options.pos_tagger.tag(tokens))
ner_tags = list(self.options.ner_tagger.tag(tokens))
tokens = list([token for token, tag in pos_tags])
except:
error = ""
error += "Please start the StanfordCoreNLPServer!" + "\n"
error += "cd /home/goelshashank007/Documents/btp/java/stanford-corenlp-full-2018-10-05/" + "\n"
error += "Start the CoreNLP Server: java -mx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -preload tokenize,pos,ner,depparse -status_port 9000 -port 9000 -timeout 15000" + "\n"
error += "Stop the CoreNLP Server: wget \"localhost:9000/shutdown?key=`cat /tmp/corenlp.shutdown`\" -O -"
raise Exception(error)
self.features = [[token] for token in tokens]
def setFirstCharCaps():
for i, token in enumerate(tokens):
if(token[0].isupper()):
if((i == 0) or (tokens[i - 1] == "?") or (tokens[i - 1] == ".") or (tokens[i - 1] == "!")):
continue
self.features[i].append("FIRST_CHAR_CAPS")
def setNumbers():
for i, token in enumerate(tokens):
if(re.match(r"[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?", token)):
self.features[i].append("IS_NUMBER")
def setPrevPosTag():
self.features[0].append("NA")
for i, (token, tag) in enumerate(pos_tags[:-1]):
self.features[i + 1].append(tag.lower())
def setNoun():
for i, (token, tag) in enumerate(pos_tags):
tag = tag.lower()
if(tag.startswith("nn")):
self.features[i].append("NOUN_P" if tag == "nnp" else "NOUN")
def setAdjectives():
for i, (token, tag) in enumerate(pos_tags):
tag = tag.lower()
if(tag.startswith("jj")):
self.features[i].append("ADJ")
def setNerTags():
for i, (token, tag) in enumerate(ner_tags):
if(tag != "O"):
self.features[i].append(tag)
def setDescriptivePhrases():
i = 0
while(i < len(tokens) - 1):
token = tokens[i]
tag = pos_tags[i][1].lower()
k = 1
while(1):
token_plus_k = tokens[i + k]
tag_plus_k = pos_tags[i + k][1].lower()
if(len(token_plus_k) > 1):
break
k += 1
if(i + k == len(tokens)):
break
if(i + k < len(tokens)):
if(((tag == "jj") and (tag_plus_k in ["nn", "nns", "nnp"])) or ((token + " " + token_plus_k).lower() in self.descriptive_phrases)):
self.features[i].append("DESC_PHRASE")
self.features[i + k].append("DESC_PHRASE")
i += k
i += 1
def setWhInducedTarget():
for i in range(len(tokens)):
if(i < len(tokens) - 3):
if(pos_tags[i][1].lower().startswith("w") and (not tokens[i].lower() == "who") and pos_tags[i + 1][1].lower() == "to" and pos_tags[i + 2][1].lower().startswith("v") or (pos_tags[i][1].lower().startswith("n") and pos_tags[i + 1][1].lower() == "to" and pos_tags[i + 2][1].lower().startswith("v"))):
self.features[i].append("WH_INDUCED_TARGET")
self.features[i + 1].append("WH_INDUCED_TARGET")
self.features[i + 2].append("WH_INDUCED_TARGET")
if (i < len(tokens) - 4):
if(pos_tags[i + 3][1].lower().startswith("n")):
self.features[i + 3].append("WH_INDUCED_TARGET")
def setTypeIndicatorBasedOnVerb():
try:
text = " ".join(tokens).lower()
self.options.rake.extract_keywords_from_text(text)
phrases = [phrase for score, phrase in self.options.rake.get_ranked_phrases_with_scores() if score > 4.0]
positions = [(text[:re.search(phrase, text).start()].count(" ") + index) for phrase in phrases for index in range(phrase.count(" ") + 1) if phrase in text]
for i in positions:
self.features[i].append("TYPE_INDICATOR_BASED_ON_VB")
except:
pass
def setWordAfterNumber():
for i in range(1, len(tokens)):
if("IS_NUMBER" in self.features[i - 1]):
self.features[i].append("wordAfterNUM")
def setTypeIndPlusWhCombo():
for i in range(len(tokens)):
if("TYPE_INDICATOR_BASED_ON_VB" in self.features[i] and "WH_INDUCED_TARGET" in self.features[i]):
self.features[i].append("TYPE_INDICATOR_VB_PLUS_WH")
def setWordVector():
for i, token in enumerate(tokens):
if(token in self.word_vectors):
self.features[i].append(self.word_vectors[token])
def setWordCount():
word_count = defaultdict(int)
for i, token in enumerate(tokens):
if(token in self.stop_words):
continue
word_count[token] += 1
for i, token in enumerate(tokens):
if(token in word_count):
self.features[i].append("NUM_%d" % word_count[token])
setFirstCharCaps()
setNumbers()
setPrevPosTag()
setNoun()
setAdjectives()
setNerTags()
setDescriptivePhrases()
setWhInducedTarget()
setTypeIndicatorBasedOnVerb()
setWordAfterNumber()
setTypeIndPlusWhCombo()
setWordVector()
setWordCount()
return self.features
class FeatureProcessor:
def __init__(self, options):
self.options = options
def processFeatures(self, features):
self.features = features
def sentenceSplitter():
self.sentences = [list(x[1]) for x in itertools.groupby(self.features, lambda l: l[0] in set(['.', '?','!',';']))]
self.sentences = list(map(lambda x: x[0] + x[1], itertools.zip_longest(self.sentences[::2], self.sentences[1::2], fillvalue = [])))
def processSentences1():
allowed = set(['ADJ', 'NOUN', 'PROPN'])
notallowed = set(['PRP$', 'PDT', 'WDT', 'WP$'])
for i in range(len(self.sentences)):
sentence = self.sentences[i]
type_indicator_words = set([features[0] for features in sentence if any("TYPE_INDICATOR" in feature for feature in features)])
if(not type_indicator_words):
continue
text = " ".join([features[0] for features in sentence])
document = self.options.nlp(text)
for index, (token, features) in enumerate(zip(document, sentence)):
if((token.pos_ in allowed) and (not token.text in type_indicator_words) and (not token.tag_ in notallowed)):
head = token
while(head.head != head):
if(head.text in type_indicator_words):
sentence[index].append("PREDICTED_ATTR_BY_FEAT")
break
head = head.head
self.sentences[i] = sentence
# def processSentences2():
# def checkSimilar(word1, word2):
# try:
# if(word1 == word2):
# return 1
# for word, score in self.gensim_model.wv.similar_by_vector(word1):
# if(word2 == word):
# return 1
# return 0
# except:
# return 0
#
# for i, sentence in enumerate(self.sentences):
# string = " ".join([features[0] for features in sentence])
# search_for_to = 0
# in_location_indicators, going_location_indicators = {}, {}
# in_word_in_consideration, going_word_in_consideration = "", ""
#
# dependency_parse = self.options.dep_parser.raw_parse(string).__next__().triples()
# for head, tag, body in dependency_parse:
# if(checkSimilar(head[0], "travelling") and body[1] == "TO"):
# search_for_to = 1
# if(search_for_to and (head[1] == "TO") and (tag in ["pobj", "dep"])):
# search_for_to = 0
# if("NN" in body[1]):
# going_location_indicators[body[0]] = body[1]
# going_word_in_consideration = body[0]
# elif(going_word_in_consideration and (head[0] == going_word_in_consideration)):
# if(tag in ["nn", "dobj", "dep"]):
# going_location_indicators[body[0]] = body[1]
# if((checkSimilar(head[0], "in") or checkSimilar(head[0], "near")) and (tag in ["pobj", "dep"])):
# if("NN" in body[1]):
# in_location_indicators[body[0]] = body[1]
# in_word_in_consideration = body[0]
# elif(in_word_in_consideration and (head[0] == in_word_in_consideration)):
# if(tag in ["nn", "dobj", "dep"]):
# in_location_indicators[body[0]] = body[1]
#
# for j, features in enumerate(sentence):
# if((features[0] in in_location_indicators) and (" DATE " not in features)):
# features.append("IN_LOC_INDICATOR")
# features.append(in_location_indicators[features[0]])
# if((features[0] in going_location_indicators) and (" DATE " not in features)):
# features.append("GOING_LOC_INDICATOR")
# features.append(going_location_indicators[features[0]])
# sentence[j] = features
#
# self.sentences[i] = sentence
sentenceSplitter()
processSentences1()
# processSentences2()
self.features = list(itertools.chain.from_iterable(self.sentences))
return self.features
def getFeatures(sentences, labels = None):
project_root_path = common.getProjectRootPath()
defaults = {}
defaults["descriptive_phrases_path"] = project_root_path / "data/features/DescriptivePhrases.txt"
defaults["word_vectors_path"] = project_root_path / "data/features/WordVectors.txt"
defaults["stop_words_path"] = project_root_path / "data/features/StopWords.txt"
# defaults["gensim_model_path"] = project_root_path / "data/features/Gensim.model"
defaults["allowed_features_file_path"] = project_root_path / "data/features/features.txt"
parser = argparse.ArgumentParser()
parser.add_argument("--descriptive_phrases_path", type = str, default = str(defaults["descriptive_phrases_path"]))
parser.add_argument("--word_vectors_path", type = str, default = str(defaults["word_vectors_path"]))
parser.add_argument("--stop_words_path", type = str, default = str(defaults["stop_words_path"]))
# parser.add_argument("--gensim_model_path", type = str, default = str(defaults["gensim_model_path"]))
parser.add_argument("--allowed_features_file_path", type = str, default = str(defaults["allowed_features_file_path"]))
options = parser.parse_args("")
options.rake = Rake()
options.nlp = spacy.load("en_core_web_sm")
# options.gensim = Word2Vec.load(options.gensim_model_path)
options.tok_parser = CoreNLPParser(url = "http://localhost:9000")
options.pos_tagger = CoreNLPParser(url = "http://localhost:9000", tagtype = "pos")
options.ner_tagger = CoreNLPParser(url = "http://localhost:9000", tagtype = "ner")
# options.dep_parser = CoreNLPDependencyParser(url = "http://localhost:9000")
allowed_features = set([line.strip() for line in open(options.allowed_features_file_path, "r").readlines()])
featureBuilder = FeatureBuilder(options)
featureProcessor = FeatureProcessor(options)
features = []
bar = tqdm.tqdm(total = len(sentences))
if(labels is None):
labels = ["O"] * len(sentences)
for sentence, ilabels in zip(sentences, labels):
ifeatures = featureBuilder.buildFeatures(sentence)
ifeatures = featureProcessor.processFeatures(ifeatures)
item = []
for token_features, label in zip(ifeatures, ilabels):
item.append(token_features[0] + " " + " ".join([feature for feature in token_features[1:] if feature in allowed_features]) + " " + label)
features.append("\n".join(item))
bar.update()
return "\n\n".join(features)
| 45.552288 | 316 | 0.559725 |
acf4901a4c5ee5ae5b9d29c07c1842ed22941389 | 57,213 | py | Python | deep_learning/keras/keras/legacy/layers.py | xpennec/applications | 50aefdf14de308fc3c132784ebba9d329e47b087 | [
"MIT"
] | 21 | 2019-01-12T17:59:41.000Z | 2022-03-08T17:42:56.000Z | deep_learning/keras/keras/legacy/layers.py | farrell236/applications | 0e1ab139ade2a0b3ba6f04f6fd93822b1dd5ae2f | [
"MIT"
] | 7 | 2019-01-24T11:44:58.000Z | 2020-04-21T21:13:37.000Z | deep_learning/keras/keras/legacy/layers.py | farrell236/applications | 0e1ab139ade2a0b3ba6f04f6fd93822b1dd5ae2f | [
"MIT"
] | 8 | 2019-01-24T11:36:05.000Z | 2021-06-15T20:59:50.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import types as python_types
import warnings
from ..engine import Layer, InputSpec
from .. import backend as K
from ..utils.generic_utils import func_dump, func_load, has_arg
from ..utils import conv_utils
from .. import regularizers
from .. import constraints
from .. import activations
from .. import initializers
class Merge(Layer):
"""A `Merge` layer can be used to merge a list of tensors
into a single tensor, following some merge `mode`.
# Example
```python
model1 = Sequential()
model1.add(Dense(32, input_dim=32))
model2 = Sequential()
model2.add(Dense(32, input_dim=32))
merged_model = Sequential()
merged_model.add(Merge([model1, model2], mode='concat', concat_axis=1))
```
# Arguments
layers: Can be a list of Keras tensors or
a list of layer instances. Must be more
than one layer/tensor.
mode: String or lambda/function. If string, must be one
of: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot', 'max'.
If lambda/function, it should take as input a list of tensors
and return a single tensor.
concat_axis: Integer, axis to use in mode `concat`.
dot_axes: Integer or tuple of integers,
axes to use in mode `dot` or `cos`.
output_shape: Either a shape tuple (tuple of integers),
or a lambda/function
to compute `output_shape`
(only if merge mode is a lambda/function).
If the argument is a tuple,
it should be expected output shape, *not* including the batch size
(same convention as the `input_shape` argument in layers).
If the argument is callable,
it should take as input a list of shape tuples
(1:1 mapping to input tensors)
and return a single shape tuple, including the
batch size (same convention as the
`compute_output_shape` method of layers).
node_indices: Optional list of integers containing
the output node index for each input layer
(in case some input layers have multiple output nodes).
will default to an array of 0s if not provided.
tensor_indices: Optional list of indices of output tensors
to consider for merging
(in case some input layer node returns multiple tensors).
output_mask: Mask or lambda/function to compute the output mask (only
if merge mode is a lambda/function). If the latter case, it should
take as input a list of masks and return a single mask.
"""
def __init__(self, layers=None, mode='sum', concat_axis=-1,
dot_axes=-1, output_shape=None, output_mask=None,
arguments=None, node_indices=None, tensor_indices=None,
name=None):
warnings.warn('The `Merge` layer is deprecated '
'and will be removed after 08/2017. '
'Use instead layers from `keras.layers.merge`, '
'e.g. `add`, `concatenate`, etc.', stacklevel=2)
self.layers = layers
self.mode = mode
self.concat_axis = concat_axis
self.dot_axes = dot_axes
self._output_shape = output_shape
self.node_indices = node_indices
self._output_mask = output_mask
self.arguments = arguments if arguments else {}
self._initial_weights = None
self._updates = []
self._losses = []
self._per_input_updates = {}
self._per_input_losses = {}
# Layer parameters.
self._inbound_nodes = []
self._outbound_nodes = []
self.constraints = {}
self._trainable_weights = []
self._non_trainable_weights = []
self.supports_masking = True
self.uses_learning_phase = False
self.input_spec = None # Compatible with anything.
self.stateful = False
self.trainable = True
if not name:
prefix = self.__class__.__name__.lower()
name = prefix + '_' + str(K.get_uid(prefix))
self.name = name
if layers:
# This exists for backwards compatibility.
# equivalent to:
# merge = Merge(layers=None)
# output = merge([input_tensor_1, input_tensor_2])
if not node_indices:
# By default we connect to
# the 1st output stream in the input layer.
node_indices = [0 for _ in range(len(layers))]
if not tensor_indices:
tensor_indices = [0 for _ in range(len(layers))]
self._arguments_validation(layers, mode,
concat_axis, dot_axes,
node_indices, tensor_indices)
self.built = True
input_tensors = []
input_masks = []
for i, layer in enumerate(layers):
node_index = node_indices[i]
tensor_index = tensor_indices[i]
inbound_node = layer._inbound_nodes[node_index]
input_tensors.append(inbound_node.output_tensors[tensor_index])
input_masks.append(inbound_node.output_masks[tensor_index])
self(input_tensors, mask=input_masks)
else:
self.built = False
def _arguments_validation(self, layers, mode, concat_axis, dot_axes,
node_indices, tensor_indices):
"""Validates user-passed arguments and raises exceptions
as appropriate.
"""
if not callable(mode):
if mode not in {'sum', 'mul', 'concat', 'ave', 'cos', 'dot', 'max'}:
raise ValueError('Invalid merge mode: ' + str(mode))
if not isinstance(layers, (list, tuple)) or len(layers) < 2:
raise TypeError('A Merge should only be applied to a list of '
'layers with at least 2 elements. Found: ' +
str(layers))
if tensor_indices is None:
tensor_indices = [None for _ in range(len(layers))]
input_shapes = []
for i, layer in enumerate(layers):
layer_output_shape = layer.get_output_shape_at(node_indices[i])
if isinstance(layer_output_shape, list):
# Case: the layer has multiple output tensors
# and we only need a specific one.
layer_output_shape = layer_output_shape[tensor_indices[i]]
input_shapes.append(layer_output_shape)
if mode in {'sum', 'mul', 'ave', 'cos', 'max'}:
input_shapes_set = set(input_shapes)
if len(input_shapes_set) > 1:
raise ValueError('Only layers of same output shape can '
'be merged using ' + mode + ' mode. ' +
'Layer shapes: %s' % input_shapes)
if mode in {'cos', 'dot'}:
if len(layers) > 2:
raise ValueError(mode + ' merge takes exactly 2 layers')
shape1 = input_shapes[0]
shape2 = input_shapes[1]
n1 = len(shape1)
n2 = len(shape2)
if isinstance(dot_axes, int):
if dot_axes < 0:
self.dot_axes = [dot_axes % n1, dot_axes % n2]
else:
self.dot_axes = [dot_axes, ] * 2
if not isinstance(self.dot_axes, (list, tuple)):
raise TypeError('Invalid type for dot_axes - '
'should be a list.')
if len(self.dot_axes) != 2:
raise ValueError('Invalid format for dot_axes - '
'should contain two elements.')
if not isinstance(self.dot_axes[0], int) or not isinstance(self.dot_axes[1], int):
raise ValueError('Invalid format for dot_axes - '
'list elements should be "int".')
if shape1[self.dot_axes[0]] != shape2[self.dot_axes[1]]:
raise ValueError('Dimension incompatibility using dot mode: '
'%s != %s. ' % (shape1[self.dot_axes[0]], shape2[self.dot_axes[1]]) +
'Layer shapes: %s, %s' % (shape1, shape2))
elif mode == 'concat':
reduced_inputs_shapes = [list(shape) for shape in input_shapes]
shape_set = set()
for i in range(len(reduced_inputs_shapes)):
del reduced_inputs_shapes[i][self.concat_axis]
shape_set.add(tuple(reduced_inputs_shapes[i]))
if len(shape_set) > 1:
raise ValueError('"concat" mode can only merge '
'layers with matching '
'output shapes except for the concat axis. '
'Layer shapes: %s' % (input_shapes))
def call(self, inputs, mask=None):
if not isinstance(inputs, list) or len(inputs) <= 1:
raise TypeError('Merge must be called on a list of tensors '
'(at least 2). Got: ' + str(inputs))
# Case: "mode" is a lambda or function.
if callable(self.mode):
arguments = self.arguments
if has_arg(self.mode, 'mask'):
arguments['mask'] = mask
return self.mode(inputs, **arguments)
if self.mode == 'sum' or self.mode == 'ave':
s = inputs[0]
for i in range(1, len(inputs)):
s += inputs[i]
if self.mode == 'ave':
s /= len(inputs)
return s
elif self.mode == 'concat':
return K.concatenate(inputs, axis=self.concat_axis)
elif self.mode == 'mul':
s = inputs[0]
for i in range(1, len(inputs)):
s *= inputs[i]
return s
elif self.mode == 'max':
s = inputs[0]
for i in range(1, len(inputs)):
s = K.maximum(s, inputs[i])
return s
elif self.mode == 'dot':
l1 = inputs[0]
l2 = inputs[1]
output = K.batch_dot(l1, l2, self.dot_axes)
return output
elif self.mode == 'cos':
l1 = inputs[0]
l2 = inputs[1]
denominator = K.sqrt(K.batch_dot(l1, l1, self.dot_axes) *
K.batch_dot(l2, l2, self.dot_axes))
denominator = K.maximum(denominator, K.epsilon())
output = K.batch_dot(l1, l2, self.dot_axes) / denominator
output = K.expand_dims(output, 1)
return output
else:
raise ValueError('Unknown merge mode.')
def compute_output_shape(self, input_shape):
# Must have multiple input shape tuples.
assert isinstance(input_shape, list)
# Case: callable self._output_shape.
if callable(self.mode):
if callable(self._output_shape):
output_shape = self._output_shape(input_shape)
return output_shape
elif self._output_shape is not None:
return (input_shape[0][0],) + tuple(self._output_shape)
else:
raise ValueError('The Merge layer ' + self.name +
' has a callable `mode` argument, '
'and we cannot infer its output shape '
'because no `output_shape` '
'argument was provided. '
'Make sure to pass a shape tuple '
'(or callable) '
'`output_shape` to Merge.')
# Pre-defined merge modes.
input_shapes = input_shape
if self.mode in ['sum', 'mul', 'ave', 'max']:
# All tuples in input_shapes should be the same.
return input_shapes[0]
elif self.mode == 'concat':
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.concat_axis] is None or shape[self.concat_axis] is None:
output_shape[self.concat_axis] = None
break
output_shape[self.concat_axis] += shape[self.concat_axis]
return tuple(output_shape)
elif self.mode in ['dot', 'cos']:
shape1 = list(input_shapes[0])
shape2 = list(input_shapes[1])
shape1.pop(self.dot_axes[0])
shape2.pop(self.dot_axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tuple(output_shape)
def compute_mask(self, inputs, mask=None):
if mask is None or all([m is None for m in mask]):
return None
assert hasattr(mask, '__len__') and len(mask) == len(inputs)
if self.mode in ['sum', 'mul', 'ave', 'max']:
masks = [K.expand_dims(m, 0) for m in mask if m is not None]
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
elif self.mode == 'concat':
# Make a list of masks while making sure
# the dimensionality of each mask
# is the same as the corresponding input.
masks = []
for input_i, mask_i in zip(inputs, mask):
if mask_i is None:
# Input is unmasked. Append all 1s to masks,
masks.append(K.ones_like(input_i, dtype='bool'))
elif K.ndim(mask_i) < K.ndim(input_i):
# Mask is smaller than the input, expand it
masks.append(K.expand_dims(mask_i))
else:
masks.append(mask_i)
concatenated = K.concatenate(masks, axis=self.concat_axis)
return K.all(concatenated, axis=-1, keepdims=False)
elif self.mode in ['cos', 'dot']:
return None
elif callable(self.mode):
if callable(self._output_mask):
return self._output_mask(mask)
else:
return self._output_mask
else:
# This should have been caught earlier.
raise ValueError('Invalid merge mode: {}'.format(self.mode))
def get_config(self):
if isinstance(self.mode, python_types.LambdaType):
mode = func_dump(self.mode)
mode_type = 'lambda'
elif callable(self.mode):
mode = self.mode.__name__
mode_type = 'function'
else:
mode = self.mode
mode_type = 'raw'
if isinstance(self._output_shape, python_types.LambdaType):
output_shape = func_dump(self._output_shape)
output_shape_type = 'lambda'
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
output_shape_type = 'function'
else:
output_shape = self._output_shape
output_shape_type = 'raw'
if isinstance(self._output_mask, python_types.LambdaType):
output_mask = func_dump(self._output_mask)
output_mask_type = 'lambda'
elif callable(self._output_mask):
output_mask = self._output_mask.__name__
output_mask_type = 'function'
else:
output_mask = self._output_mask
output_mask_type = 'raw'
return {'name': self.name,
'mode': mode,
'mode_type': mode_type,
'concat_axis': self.concat_axis,
'dot_axes': self.dot_axes,
'output_shape': output_shape,
'output_shape_type': output_shape_type,
'output_mask': output_mask,
'output_mask_type': output_mask_type,
'arguments': self.arguments}
@classmethod
def from_config(cls, config):
config = config.copy()
mode_type = config.pop('mode_type')
if mode_type == 'function':
mode = globals()[config['mode']]
elif mode_type == 'lambda':
mode = func_load(config['mode'], globs=globals())
else:
mode = config['mode']
output_shape_type = config.pop('output_shape_type', None)
if output_shape_type == 'function':
output_shape = globals()[config['output_shape']]
elif output_shape_type == 'lambda':
output_shape = func_load(config['output_shape'],
globs=globals())
else:
output_shape = config.get('output_shape')
output_mask_type = config.pop('output_mask_type', None)
if output_mask_type == 'function':
output_mask = globals()[config['output_mask']]
elif output_mask_type == 'lambda':
output_mask = func_load(config['output_mask'],
globs=globals())
else:
output_mask = config.get('output_mask')
config['mode'] = mode
config['output_shape'] = output_shape
config['output_mask'] = output_mask
return super(Merge, cls).from_config(config)
def merge(inputs, mode='sum', concat_axis=-1,
dot_axes=-1, output_shape=None, output_mask=None,
arguments=None, name=None):
"""Functional merge, to apply to Keras tensors (NOT layers).
Returns a Keras tensor.
# Example
```python
tensor_a = Input(shape=(32,))
tensor_b = Input(shape=(32,))
merged_tensor = merge([tensor_a, tensor_b], mode='concat', concat_axis=1)
```
# Arguments
mode: String or lambda/function. If string, must be one
of: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot', 'max'.
If lambda/function, it should take as input a list of tensors
and return a single tensor.
concat_axis: Integer, axis to use in mode `concat`.
dot_axes: Integer or tuple of integers,
axes to use in mode `dot` or `cos`.
output_shape: Shape tuple (tuple of integers), or lambda/function
to compute output_shape (only if merge mode is a lambda/function).
If the latter case, it should take as input a list of shape tuples
(1:1 mapping to input tensors) and return a single shape tuple,
including the batch size
(same convention as the `compute_output_shape` method of layers).
node_indices: Optional list of integers containing
the output node index for each input layer
(in case some input layers have multiple output nodes).
will default to an array of 0s if not provided.
tensor_indices: Optional list of indices of output tensors
to consider for merging
(in case some input layer node returns multiple tensors).
"""
warnings.warn('The `merge` function is deprecated '
'and will be removed after 08/2017. '
'Use instead layers from `keras.layers.merge`, '
'e.g. `add`, `concatenate`, etc.', stacklevel=2)
all_keras_tensors = True
for x in inputs:
if not hasattr(x, '_keras_history'):
all_keras_tensors = False
break
if all_keras_tensors:
input_layers = []
node_indices = []
tensor_indices = []
for x in inputs:
input_layer, node_index, tensor_index = x._keras_history
input_layers.append(input_layer)
node_indices.append(node_index)
tensor_indices.append(tensor_index)
merge_layer = Merge(input_layers, mode=mode,
concat_axis=concat_axis,
dot_axes=dot_axes,
output_shape=output_shape,
output_mask=output_mask,
arguments=arguments,
node_indices=node_indices,
tensor_indices=tensor_indices,
name=name)
return merge_layer._inbound_nodes[0].output_tensors[0]
else:
merge_layer = Merge(mode=mode,
concat_axis=concat_axis,
dot_axes=dot_axes,
output_shape=output_shape,
output_mask=output_mask,
arguments=arguments,
name=name)
return merge_layer(inputs)
class MaxoutDense(Layer):
"""A dense maxout layer.
A `MaxoutDense` layer takes the element-wise maximum of
`nb_feature` `Dense(input_dim, output_dim)` linear layers.
This allows the layer to learn a convex,
piecewise linear activation function over the inputs.
Note that this is a *linear* layer;
if you wish to apply activation function
(you shouldn't need to --they are universal function approximators),
an `Activation` layer must be added after.
# Arguments
output_dim: int > 0.
nb_feature: number of Dense layers to use internally.
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
# References
- [Maxout Networks](http://arxiv.org/abs/1302.4389)
"""
def __init__(self, output_dim,
nb_feature=4,
init='glorot_uniform',
weights=None,
W_regularizer=None,
b_regularizer=None,
activity_regularizer=None,
W_constraint=None,
b_constraint=None,
bias=True,
input_dim=None,
**kwargs):
warnings.warn('The `MaxoutDense` layer is deprecated '
'and will be removed after 06/2017.')
self.output_dim = output_dim
self.nb_feature = nb_feature
self.init = initializers.get(init)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(MaxoutDense, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(),
shape=(None, input_dim))
self.W = self.add_weight((self.nb_feature, input_dim, self.output_dim),
initializer=self.init,
name='W',
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((self.nb_feature, self.output_dim,),
initializer='zero',
name='b',
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def call(self, x):
# no activation, this layer is only linear.
output = K.dot(x, self.W)
if self.bias:
output += self.b
output = K.max(output, axis=1)
return output
def get_config(self):
config = {'output_dim': self.output_dim,
'init': initializers.serialize(self.init),
'nb_feature': self.nb_feature,
'W_regularizer': regularizers.serialize(self.W_regularizer),
'b_regularizer': regularizers.serialize(self.b_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'W_constraint': constraints.serialize(self.W_constraint),
'b_constraint': constraints.serialize(self.b_constraint),
'bias': self.bias,
'input_dim': self.input_dim}
base_config = super(MaxoutDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Highway(Layer):
"""Densely connected highway network.
Highway layers are a natural extension of LSTMs to feedforward networks.
# Arguments
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, input_dim)`.
# References
- [Highway Networks](http://arxiv.org/abs/1505.00387v2)
"""
def __init__(self,
init='glorot_uniform',
activation=None,
weights=None,
W_regularizer=None,
b_regularizer=None,
activity_regularizer=None,
W_constraint=None,
b_constraint=None,
bias=True,
input_dim=None,
**kwargs):
warnings.warn('The `Highway` layer is deprecated '
'and will be removed after 06/2017.')
if 'transform_bias' in kwargs:
kwargs.pop('transform_bias')
warnings.warn('`transform_bias` argument is deprecated and '
'has been removed.')
self.init = initializers.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Highway, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(),
shape=(None, input_dim))
self.W = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='W',
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.W_carry = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='W_carry')
if self.bias:
self.b = self.add_weight((input_dim,),
initializer='zero',
name='b',
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.b_carry = self.add_weight((input_dim,),
initializer='one',
name='b_carry')
else:
self.b_carry = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x):
y = K.dot(x, self.W_carry)
if self.bias:
y += self.b_carry
transform_weight = activations.sigmoid(y)
y = K.dot(x, self.W)
if self.bias:
y += self.b
act = self.activation(y)
act *= transform_weight
output = act + (1 - transform_weight) * x
return output
def get_config(self):
config = {'init': initializers.serialize(self.init),
'activation': activations.serialize(self.activation),
'W_regularizer': regularizers.serialize(self.W_regularizer),
'b_regularizer': regularizers.serialize(self.b_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'W_constraint': constraints.serialize(self.W_constraint),
'b_constraint': constraints.serialize(self.b_constraint),
'bias': self.bias,
'input_dim': self.input_dim}
base_config = super(Highway, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def AtrousConvolution1D(*args, **kwargs):
from ..layers import Conv1D
if 'atrous_rate' in kwargs:
rate = kwargs.pop('atrous_rate')
else:
rate = 1
kwargs['dilation_rate'] = rate
warnings.warn('The `AtrousConvolution1D` layer '
' has been deprecated. Use instead '
'the `Conv1D` layer with the `dilation_rate` '
'argument.')
return Conv1D(*args, **kwargs)
def AtrousConvolution2D(*args, **kwargs):
from ..layers import Conv2D
if 'atrous_rate' in kwargs:
rate = kwargs.pop('atrous_rate')
else:
rate = 1
kwargs['dilation_rate'] = rate
warnings.warn('The `AtrousConvolution2D` layer '
' has been deprecated. Use instead '
'the `Conv2D` layer with the `dilation_rate` '
'argument.')
return Conv2D(*args, **kwargs)
class Recurrent(Layer):
"""Abstract base class for recurrent layers.
Do not use in a model -- it's not a valid layer!
Use its children classes `LSTM`, `GRU` and `SimpleRNN` instead.
All recurrent layers (`LSTM`, `GRU`, `SimpleRNN`) also
follow the specifications of this class and accept
the keyword arguments listed below.
# Example
```python
# as the first layer in a Sequential model
model = Sequential()
model.add(LSTM(32, input_shape=(10, 64)))
# now model.output_shape == (None, 32)
# note: `None` is the batch dimension.
# for subsequent layers, no need to specify the input size:
model.add(LSTM(16))
# to stack recurrent layers, you must use return_sequences=True
# on any recurrent layer that feeds into another recurrent layer.
# note that you only need to specify the input size on the first layer.
model = Sequential()
model.add(LSTM(64, input_dim=64, input_length=10, return_sequences=True))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(10))
```
# Arguments
weights: list of Numpy arrays to set as initial weights.
The list should have 3 elements, of shapes:
`[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
implementation: one of {0, 1, or 2}.
If set to 0, the RNN will use
an implementation that uses fewer, larger matrix products,
thus running faster on CPU but consuming more memory.
If set to 1, the RNN will use more matrix products,
but smaller ones, thus running slower
(may actually be faster on GPU) while consuming less memory.
If set to 2 (LSTM/GRU only),
the RNN will combine the input gate,
the forget gate and the output gate into a single matrix,
enabling more time-efficient parallelization on the GPU.
Note: RNN dropout must be shared for all gates,
resulting in a slightly reduced regularization.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shapes
3D tensor with shape `(batch_size, timesteps, input_dim)`,
(Optional) 2D tensors with shape `(batch_size, output_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
"""
def __init__(self, return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
implementation=0,
**kwargs):
super(Recurrent, self).__init__(**kwargs)
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.implementation = implementation
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self.dropout = 0
self.recurrent_dropout = 0
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], self.units)
else:
output_shape = (input_shape[0], self.units)
if self.return_state:
state_shape = [(input_shape[0], self.units) for _ in self.states]
return [output_shape] + state_shape
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def step(self, inputs, states):
raise NotImplementedError
def get_constants(self, inputs, training=None):
return []
def get_initial_state(self, inputs):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.units]) # (samples, output_dim)
initial_state = [initial_state for _ in range(len(self.states))]
return initial_state
def preprocess_input(self, inputs, training=None):
return inputs
def __call__(self, inputs, initial_state=None, **kwargs):
# If there are multiple inputs, then
# they should be the main input and `initial_state`
# e.g. when loading model from file
if isinstance(inputs, (list, tuple)) and len(inputs) > 1 and initial_state is None:
initial_state = inputs[1:]
inputs = inputs[0]
# If `initial_state` is specified,
# and if it a Keras tensor,
# then add it to the inputs and temporarily
# modify the input spec to include the state.
if initial_state is None:
return super(Recurrent, self).__call__(inputs, **kwargs)
if not isinstance(initial_state, (list, tuple)):
initial_state = [initial_state]
is_keras_tensor = hasattr(initial_state[0], '_keras_history')
for tensor in initial_state:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state of an RNN layer cannot be'
' specified with a mix of Keras tensors and'
' non-Keras tensors')
if is_keras_tensor:
# Compute the full input spec, including state
input_spec = self.input_spec
state_spec = self.state_spec
if not isinstance(input_spec, list):
input_spec = [input_spec]
if not isinstance(state_spec, list):
state_spec = [state_spec]
self.input_spec = input_spec + state_spec
# Compute the full inputs, including state
inputs = [inputs] + list(initial_state)
# Perform the call
output = super(Recurrent, self).__call__(inputs, **kwargs)
# Restore original input spec
self.input_spec = input_spec
return output
else:
kwargs['initial_state'] = initial_state
return super(Recurrent, self).__call__(inputs, **kwargs)
def call(self, inputs, mask=None, training=None, initial_state=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
timesteps = input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
constants = self.get_constants(inputs, training=None)
preprocessed_input = self.preprocess_input(inputs, training=None)
last_output, outputs, states = K.rnn(self.step,
preprocessed_input,
initial_state,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
# Properly set learning phase
if 0 < self.dropout + self.recurrent_dropout:
last_output._uses_learning_phase = True
outputs._uses_learning_phase = True
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
self.states = [K.zeros((batch_size, self.units))
for _ in self.states]
elif states is None:
for state in self.states:
K.set_value(state, np.zeros((batch_size, self.units)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if value.shape != (batch_size, self.units):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str((batch_size, self.units)) +
', found shape=' + str(value.shape))
K.set_value(state, value)
def get_config(self):
config = {'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'implementation': self.implementation}
base_config = super(Recurrent, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvRecurrent2D(Recurrent):
"""Abstract base class for convolutional recurrent layers.
Do not use in a model -- it's not a functional layer!
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
# Input shape
5D tensor with shape `(num_samples, timesteps, channels, rows, cols)`.
# Output shape
- if `return_sequences`: 5D tensor with shape
`(num_samples, timesteps, channels, rows, cols)`.
- else, 4D tensor with shape `(num_samples, channels, rows, cols)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
**Note:** for the time being, masking is only supported with Theano.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch.
This assumes a one-to-one mapping between
samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
a `batch_input_size=(...)` to the first layer in your model.
This is the expected shape of your inputs *including the batch
size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
return_sequences=False,
go_backwards=False,
stateful=False,
**kwargs):
super(ConvRecurrent2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.return_sequences = return_sequences
self.go_backwards = go_backwards
self.stateful = stateful
self.input_spec = [InputSpec(ndim=5)]
self.state_spec = None
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if self.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif self.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(rows,
self.kernel_size[0],
padding=self.padding,
stride=self.strides[0],
dilation=self.dilation_rate[0])
cols = conv_utils.conv_output_length(cols,
self.kernel_size[1],
padding=self.padding,
stride=self.strides[1],
dilation=self.dilation_rate[1])
if self.return_sequences:
if self.data_format == 'channels_first':
output_shape = (input_shape[0], input_shape[1],
self.filters, rows, cols)
elif self.data_format == 'channels_last':
output_shape = (input_shape[0], input_shape[1],
rows, cols, self.filters)
else:
if self.data_format == 'channels_first':
output_shape = (input_shape[0], self.filters, rows, cols)
elif self.data_format == 'channels_last':
output_shape = (input_shape[0], rows, cols, self.filters)
if self.return_state:
if self.data_format == 'channels_first':
output_shape = [output_shape] + [(input_shape[0], self.filters, rows, cols) for _ in range(2)]
elif self.data_format == 'channels_last':
output_shape = [output_shape] + [(input_shape[0], rows, cols, self.filters) for _ in range(2)]
return output_shape
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful}
base_config = super(ConvRecurrent2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 44.978774 | 110 | 0.570465 |
acf4904726d46eab241d07312015bfa4b8d7b898 | 12,469 | py | Python | fi.py | wade1011/Python-Financial-Independence-Calculator | 3d679e602b6a6a87594d959d9a8db4c48d32c9b8 | [
"MIT"
] | null | null | null | fi.py | wade1011/Python-Financial-Independence-Calculator | 3d679e602b6a6a87594d959d9a8db4c48d32c9b8 | [
"MIT"
] | null | null | null | fi.py | wade1011/Python-Financial-Independence-Calculator | 3d679e602b6a6a87594d959d9a8db4c48d32c9b8 | [
"MIT"
] | null | null | null | """
Financial Independence Console Application
Parses files output by the Financial Independence console application, displays
a maximum, minimum and average value for each simulations result set.
Version: 1.0
Author: Wade Casey
Date: 02/06/2018
"""
import os
import random
# =========================================================================== #
# Validation Functions #
# =========================================================================== #
def validate_positive_integer(user_input):
"""Check if value passed is a valid, positive integer.
Args:
user_input: input passed by the user.
Returns:
value passed as a positive integer, or the string 'invalid_input'
Raises:
ValueError: if value passed is not an integer.
"""
try:
user_input = int(user_input)
if user_input < 0:
print("\nPlease enter a non-negative integer.")
return "invalid_input"
else:
return user_input
except ValueError:
print("\nInvalid input, please enter an integer.")
return "invalid_input"
def validate_integer(user_input):
"""Check if value passed is a valid integer.
Args:
user_input: input passed by the user.
Returns:
value passed as an integer, or the string 'invalid_input'
Raises:
ValueError: if value passed is not an integer.
"""
try:
return int(user_input)
except ValueError:
print("\nInvalid input, please enter an integer.")
return "invalid_input"
def validate_float(user_input):
"""Check if value passed is a float.
Args:
user_input: input passed by the user.
Returns:
value passed as a float, or the string 'invalid_input'
Raises:
ValueError: if value passed is not a float.
"""
try:
return float(user_input)
except:
print("\nPlease enter a real number.")
return "invalid_input"
# =========================================================================== #
# Input Functions #
# =========================================================================== #
def get_annual_spend():
"""Request yearly spend, validate input, recursive until input is valid.
"""
print("\nHow much did you spend last year to support your current lifestyle?")
annual_spend = validate_positive_integer(input("(Must be positive integer): "))
if annual_spend == "invalid_input":
return get_annual_spend()
else:
return annual_spend
def get_inflation_rate():
"""Request inflation rate, validate input, recursive until input is valid.
"""
print("\nPlease enter the base inflation rate:")
inflation_rate = validate_float(input("(e.g. 2% should be entered as 0.02): "))
if inflation_rate == "invalid_input":
return get_inflation_rate()
else:
return inflation_rate
def get_savings_balance():
"""Request savings balance, validate input, recursive until input is valid.
"""
print("\nHow much do you currently have saved for investment?")
savings_balance = validate_integer(input("(Must be an integer): "))
if savings_balance == "invalid_input":
return get_savings_balance()
else:
return savings_balance
def get_interest_rate():
"""Request annual interest rate, validate input, recursive until input is valid.
"""
print("\nPlease enter the base annual interest rate:")
interest_rate = validate_float(input("(e.g. 4% should be entered as 0.04): "))
if interest_rate == "invalid_input":
return get_interest_rate()
else:
return interest_rate
def get_num_years():
"""Request number of year to test, validate input, recursive until input is valid.
"""
print("\nHow many years do you want to test?")
num_years = validate_positive_integer(input("(Must be positive integer, less than 10,000): "))
if num_years != "invalid_input" and num_years > 9999:
print("\nPlease enter a value less than 10,000.")
return get_num_years()
if num_years == "invalid_input":
return get_num_years()
else:
if num_years > 0:
return num_years
else:
print("\nPlease enter a positive integer.")
return get_num_years()
def get_inflation_change():
"""Request maximum change for inflation in a given year, validate input,
recursive until input is valid."""
print("\nPlease enter the expected maximum change for inflation in a given year:")
inflation_change = validate_float(input("(e.g. 0.25% should be entered as 0.0025): "))
if inflation_change == "invalid_input":
return get_inflation_change()
else:
return inflation_change
def get_interest_change():
"""Request expected maximum change for interest in a given year, validate
input, recursive until input is valid."""
print("\nPlease enter the expected maximum change for interest in a given year:")
interest_change = validate_float(input("(e.g. 1% should be entered as 0.01): "))
if interest_change == "invalid_input":
return get_interest_change()
else:
return interest_change
def get_simulations_to_run():
"""Request number of simulations to run, validate input, recursive until input is valid."""
print("\nHow many simulations should be run?")
simulations_to_run = validate_positive_integer(input("(Must be positive integer, less than 10,000): "))
if simulations_to_run != "invalid_input" and simulations_to_run > 9999:
print("\nPlease enter a value less than 10,000.")
return get_simulations_to_run()
if simulations_to_run == "invalid_input":
return get_simulations_to_run()
else:
if simulations_to_run > 0:
return simulations_to_run
else:
print("\nPlease enter a positive integer.")
return get_simulations_to_run()
# =========================================================================== #
# Functions #
# =========================================================================== #
def run_again():
"""
Prompt to input 'Q' to Quit, or 'R' to Restart the application.
"""
decision = str(input("--------------------------\nWhat would you like to do?\n'Quit' or 'Restart'?\n "))
if decision.upper() == "QUIT" or decision.upper() == "Q":
print("--------------------------\nfi.py has closed.\n--------------------------")
quit()
elif decision.upper() == "RESTART" or decision.upper() == "R":
print("--------------------------\nfi.py has restarted.\n--------------------------")
begin()
else:
print("--------------------------\n\n", decision, "is not a valid option.\n")
run_again()
def run_simulation(annual_spend, inflation_rate, savings_balance,
interest_rate, num_years, inflation_change, interest_change):
"""
Calculates savings_balance value after yearly expenses, taking into account
interest and inflation rates. Interest and inflation values undergo randomization each iteration.
Args:
annual_spend,
inflation_rate,
savings_balance,
interest_rate,
num_years,
inflation_change,
interest_change:
Validated values input by the user.
Returns:
An array of floats, representing the users saving balance after spending, interest and inflation.
Explanation:
Loop for int(num_years):
annual_spend is adjusted by inflation_rate.
Adjusted annual_spend is subtracted from savings_balance.
interest_rate is applied to remaining savings_balance.
Final savings_balance is appended to ???San array.
End loop.
return array.
"""
results = []
for i in range(0, num_years):
annual_spend = annual_spend + (annual_spend * inflation_rate)
savings_balance = savings_balance - annual_spend
savings_balance = savings_balance + (savings_balance * interest_rate)
inflation_rate = modify_rate(inflation_rate, inflation_change)
interest_rate = modify_rate(interest_rate, interest_change)
results.append(savings_balance)
return results
def modify_rate(rate, change):
"""
Calculate a random value within change of the given rate
Args:
rate: the initial rate to change from
change: the maximum amount to add or subtract from the rate
Returns:
random value within change of the given rate
Explanation:
random.random() calculates a random value between 0 and 1.
Thus random.random() * 2 gives a value between 0 and 2,
so 1 - random.random() * 2 gives a value between -1 and 1.
Multiplying by change gives a value between -change and change,
which is then added to the rate.
"""
return rate + change * (0.75 - random.random() * 2.5)
def begin():
"""
Will cause the application to prompt user for input, validate, store
and process the input.
Processed data is written to file 'output.txt' which will be created and stored
in the same directory as 'fi.py'.
"""
# Confirm the file 'output.txt' can be opened or created, and written to.
try:
with open('output.txt', '+w') as f:
f.write("")
# Instruct user how to resolve the error then close the application.
except IOError:
input("\nError accessing output.txt from:\n" + os.getcwd() +
"\nCheck you have permissions to read and write to files in this " +
"directory then try again.\n\nPress and 'Enter' or 'Return' to quit the application.")
quit()
# Get values from the user to so the calculations can be run.
annual_spend = get_annual_spend()
inflation_rate = get_inflation_rate()
inflation_change = get_inflation_change()
savings_balance = get_savings_balance()
interest_rate = get_interest_rate()
interest_change = get_interest_change()
num_years = get_num_years()
simulations_to_run = get_simulations_to_run()
# Used to determin the percentage of simulations which end with a positive number.
successful_count = 0
# Loop for the number of simulations the user requested.
for i in range(0, simulations_to_run):
# 'run_simulation' returns an array of savings balances remaining after
# expenses for each year have been calculated and deducted.
result = run_simulation(annual_spend, inflation_rate, savings_balance,
interest_rate, num_years, inflation_change, interest_change)
# Open the file 'output.txt' in append mode.
# For each value in results array, format to two decimal places and
# append if to the file. If the last result was positive, append
# 'successful', else append 'unsuccessful'.
with open('output.txt', 'a') as f:
for val in result:
f.write(format(val, '.2f') + " ")
if float(result[len(result)-1]) < 0:
f.write("unsuccessful")
else:
f.write("successful")
successful_count += 1
f.write("\n")
# Calculate the percent of successful results, and write to the console.
percent = (successful_count/simulations_to_run)*100
print("\n----------------------------------------------")
print("Simulation was successful in " + str(successful_count) + "/" +
str(simulations_to_run) + " runs " + "(" + format(percent, '.2f') + "%)")
print("----------------------------------------------")
print("See 'output.txt' located in directory:\n" + os.getcwd() + ". for more detailed results.")
# Once processing has finished, prompt user to 'Quit' or 'Restart' the application.
run_again()
# Entry point for application
begin() | 37.110119 | 109 | 0.592509 |
acf490dde93504358d6aeaf00be970971993515b | 344 | py | Python | project_euler/solutions/problem_53.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | null | null | null | project_euler/solutions/problem_53.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | 9 | 2017-02-20T23:41:40.000Z | 2017-04-16T15:36:54.000Z | project_euler/solutions/problem_53.py | cryvate/project-euler | 6ed13880d7916c34554559f5f71662a863735eda | [
"MIT"
] | null | null | null | def solve(max_n: int=100, at_least: int=1_000_000):
counter = 0
for n in range(1, max_n + 1):
accumulate = 1
for r in range(1, n // 2):
accumulate = ((n + 1 - r) * accumulate) // r
if accumulate >= at_least:
counter += (n - 2 * r + 1)
break
return counter
| 22.933333 | 56 | 0.473837 |
acf4915d3861e0e786c2f7324f7e730beea1c47a | 867 | py | Python | setup.py | cebel/pymsql_tools | 30cfa7a2b0faf7d3c00df811064273907c3764b8 | [
"Apache-2.0"
] | 1 | 2017-09-29T17:59:35.000Z | 2017-09-29T17:59:35.000Z | setup.py | cebel/pymsql_tools | 30cfa7a2b0faf7d3c00df811064273907c3764b8 | [
"Apache-2.0"
] | null | null | null | setup.py | cebel/pymsql_tools | 30cfa7a2b0faf7d3c00df811064273907c3764b8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
PACKAGES = find_packages(where='src')
INSTALL_REQUIRES = [
'pymysql',
]
setup(
name="pymysql_tools",
version='0.0.1',
url='https://github.com/cebel/pymysql_tools/',
author='Christian Ebeling',
author_email='chr.ebeling@gmail.com',
maintainer='Christian Ebeling',
maintainer_email='chr.ebeling@gmail.com',
description='Pure Python MySQL Driver',
license="Apache 2.0",
packages=PACKAGES,
install_requires=INSTALL_REQUIRES,
package_dir={'': 'src'},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Database',
],
)
| 27.09375 | 54 | 0.643599 |
acf492bb9bffe3125fccfa40019429cb5f67f527 | 345 | py | Python | scripts/figures/figure5/ready_model_resnet152/remote_run_data.py | netx-repo/PipeSwitch | f321d399e501b79ad51da13074e2aecda36cb06a | [
"Apache-2.0"
] | 81 | 2020-11-05T16:15:58.000Z | 2022-03-09T07:38:51.000Z | scripts/figures/figure5/ready_model_resnet152/remote_run_data.py | baizh1994/PipeSwitch | cb7b03f0777cc59038a449e55ce1492f7ec973c6 | [
"Apache-2.0"
] | 3 | 2020-12-21T13:00:09.000Z | 2021-06-13T15:08:46.000Z | scripts/figures/figure5/ready_model_resnet152/remote_run_data.py | baizh1994/PipeSwitch | cb7b03f0777cc59038a449e55ce1492f7ec973c6 | [
"Apache-2.0"
] | 22 | 2020-11-06T07:51:35.000Z | 2022-03-09T07:38:53.000Z | import os
import sys
from scripts.common.util import RunDocker
def main():
with RunDocker('pipeswitch:ready_model', 'figure5_ready_model_resnet152') as rd:
# Start the server: ready_model
rd.run('python PipeSwitch/scripts/run_data.py')
# Get and return the data point
if __name__ == '__main__':
main() | 24.642857 | 84 | 0.684058 |
acf4941c8d3be11f791a3822bbf1b77de1669d53 | 754 | py | Python | DB/structure/builder.py | cnzeki/PSENet | c7e0785404e12866171e9da678736abae9cdb8cb | [
"Apache-2.0"
] | null | null | null | DB/structure/builder.py | cnzeki/PSENet | c7e0785404e12866171e9da678736abae9cdb8cb | [
"Apache-2.0"
] | null | null | null | DB/structure/builder.py | cnzeki/PSENet | c7e0785404e12866171e9da678736abae9cdb8cb | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
import torch
import structure.model
from concern.config import Configurable, State
class Builder(Configurable):
model = State()
model_args = State()
def __init__(self, cmd={}, **kwargs):
self.load_all(**kwargs)
if 'backbone' in cmd:
self.model_args['backbone'] = cmd['backbone']
@property
def model_name(self):
return self.model + '-' + getattr(structure.model, self.model).model_name(self.model_args)
def build(self, device, distributed=False, local_rank=0):
Model = getattr(structure.model,self.model)
model = Model(self.model_args, device,
distributed=distributed, local_rank=local_rank)
return model
| 26.928571 | 98 | 0.661804 |
acf49526c9801ce6bd87631f93a2add2c1b0d08b | 19,677 | py | Python | pytorchvideo/models/slowfast.py | rushyaP/pytorchvideo | 875b2df67312f5f4c7d581a332701cc7eca11c14 | [
"Apache-2.0"
] | 1 | 2021-05-20T21:25:14.000Z | 2021-05-20T21:25:14.000Z | pytorchvideo/models/slowfast.py | rushyaP/pytorchvideo | 875b2df67312f5f4c7d581a332701cc7eca11c14 | [
"Apache-2.0"
] | null | null | null | pytorchvideo/models/slowfast.py | rushyaP/pytorchvideo | 875b2df67312f5f4c7d581a332701cc7eca11c14 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from pytorchvideo.layers.utils import set_attributes
from pytorchvideo.models.head import create_res_basic_head
from pytorchvideo.models.net import MultiPathWayWithFuse, Net
from pytorchvideo.models.resnet import create_bottleneck_block, create_res_stage
from pytorchvideo.models.stem import create_res_basic_stem
def create_slowfast(
*,
# SlowFast configs.
slowfast_channel_reduction_ratio: Union[Tuple[int], int] = (8,),
slowfast_conv_channel_fusion_ratio: int = 2,
slowfast_fusion_conv_kernel_size: Tuple[int] = (
7,
1,
1,
), # deprecated, use fusion_builder
slowfast_fusion_conv_stride: Tuple[int] = (
4,
1,
1,
), # deprecated, use fusion_builder
fusion_builder: Callable[
[int, int], nn.Module
] = None, # Args: fusion_dim_in, stage_idx
# Input clip configs.
input_channels: Tuple[int] = (3, 3),
# Model configs.
model_depth: int = 50,
model_num_class: int = 400,
dropout_rate: float = 0.5,
# Normalization configs.
norm: Callable = nn.BatchNorm3d,
# Activation configs.
activation: Callable = nn.ReLU,
# Stem configs.
stem_function: Tuple[Callable] = (
create_res_basic_stem,
create_res_basic_stem,
),
stem_dim_outs: Tuple[int] = (64, 8),
stem_conv_kernel_sizes: Tuple[Tuple[int]] = ((1, 7, 7), (5, 7, 7)),
stem_conv_strides: Tuple[Tuple[int]] = ((1, 2, 2), (1, 2, 2)),
stem_pool: Union[Callable, Tuple[Callable]] = (nn.MaxPool3d, nn.MaxPool3d),
stem_pool_kernel_sizes: Tuple[Tuple[int]] = ((1, 3, 3), (1, 3, 3)),
stem_pool_strides: Tuple[Tuple[int]] = ((1, 2, 2), (1, 2, 2)),
# Stage configs.
stage_conv_a_kernel_sizes: Tuple[Tuple[Tuple[int]]] = (
((1, 1, 1), (1, 1, 1), (3, 1, 1), (3, 1, 1)),
((3, 1, 1), (3, 1, 1), (3, 1, 1), (3, 1, 1)),
),
stage_conv_b_kernel_sizes: Tuple[Tuple[Tuple[int]]] = (
((1, 3, 3), (1, 3, 3), (1, 3, 3), (1, 3, 3)),
((1, 3, 3), (1, 3, 3), (1, 3, 3), (1, 3, 3)),
),
stage_conv_b_num_groups: Tuple[Tuple[int]] = ((1, 1, 1, 1), (1, 1, 1, 1)),
stage_conv_b_dilations: Tuple[Tuple[Tuple[int]]] = (
((1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1)),
((1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1)),
),
stage_spatial_strides: Tuple[Tuple[int]] = ((1, 2, 2, 2), (1, 2, 2, 2)),
stage_temporal_strides: Tuple[Tuple[int]] = ((1, 1, 1, 1), (1, 1, 1, 1)),
bottleneck: Union[Callable, Tuple[Tuple[Callable]]] = (
(
create_bottleneck_block,
create_bottleneck_block,
create_bottleneck_block,
create_bottleneck_block,
),
(
create_bottleneck_block,
create_bottleneck_block,
create_bottleneck_block,
create_bottleneck_block,
),
),
# Head configs.
head_pool: Callable = nn.AvgPool3d,
head_pool_kernel_sizes: Tuple[Tuple[int]] = ((8, 7, 7), (32, 7, 7)),
head_output_size: Tuple[int] = (1, 1, 1),
head_activation: Callable = None,
head_output_with_global_average: bool = True,
) -> nn.Module:
"""
Build SlowFast model for video recognition, SlowFast model involves a Slow pathway,
operating at low frame rate, to capture spatial semantics, and a Fast pathway,
operating at high frame rate, to capture motion at fine temporal resolution. The
Fast pathway can be made very lightweight by reducing its channel capacity, yet can
learn useful temporal information for video recognition. Details can be found from
the paper:
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
::
Slow Input Fast Input
↓ ↓
Stem Stem
↓ ⭠ Fusion- ↓
Stage 1 Stage 1
↓ ⭠ Fusion- ↓
. .
↓ ↓
Stage N Stage N
↓ ⭠ Fusion- ↓
↓
Head
Args:
slowfast_channel_reduction_ratio (int): Corresponds to the inverse of the channel
reduction ratio, $\beta$ between the Slow and Fast pathways.
slowfast_conv_channel_fusion_ratio (int): Ratio of channel dimensions
between the Slow and Fast pathways.
DEPRECATED slowfast_fusion_conv_kernel_size (tuple): the convolutional kernel
size used for fusion.
DEPRECATED slowfast_fusion_conv_stride (tuple): the convolutional stride size
used for fusion.
fusion_builder (Callable[[int, int], nn.Module]): Builder function for generating
the fusion modules based on stage dimension and index
input_channels (tuple): number of channels for the input video clip.
model_depth (int): the depth of the resnet.
model_num_class (int): the number of classes for the video dataset.
dropout_rate (float): dropout rate.
norm (callable): a callable that constructs normalization layer.
activation (callable): a callable that constructs activation layer.
stem_function (Tuple[Callable]): a callable that constructs stem layer.
Examples include create_res_basic_stem. Indexed by pathway
stem_dim_outs (tuple): output channel size to stem.
stem_conv_kernel_sizes (tuple): convolutional kernel size(s) of stem.
stem_conv_strides (tuple): convolutional stride size(s) of stem.
stem_pool (Tuple[Callable]): a callable that constructs resnet head pooling layer.
Indexed by pathway
stem_pool_kernel_sizes (tuple): pooling kernel size(s).
stem_pool_strides (tuple): pooling stride size(s).
stage_conv_a_kernel_sizes (tuple): convolutional kernel size(s) for conv_a.
stage_conv_b_kernel_sizes (tuple): convolutional kernel size(s) for conv_b.
stage_conv_b_num_groups (tuple): number of groups for groupwise convolution
for conv_b. 1 for ResNet, and larger than 1 for ResNeXt.
stage_conv_b_dilations (tuple): dilation for 3D convolution for conv_b.
stage_spatial_strides (tuple): the spatial stride for each stage.
stage_temporal_strides (tuple): the temporal stride for each stage.
bottleneck (Tuple[Tuple[Callable]]): a callable that constructs bottleneck
block layer. Examples include: create_bottleneck_block.
Indexed by pathway and stage index
head_pool (callable): a callable that constructs resnet head pooling layer.
head_output_sizes (tuple): the size of output tensor for head.
head_activation (callable): a callable that constructs activation layer.
head_output_with_global_average (bool): if True, perform global averaging on
the head output.
Returns:
(nn.Module): SlowFast model.
"""
torch._C._log_api_usage_once("PYTORCHVIDEO.model.create_slowfast")
# Number of blocks for different stages given the model depth.
_num_pathway = len(input_channels)
_MODEL_STAGE_DEPTH = {
18: (1, 1, 1, 1),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
assert (
model_depth in _MODEL_STAGE_DEPTH.keys()
), f"{model_depth} is not in {_MODEL_STAGE_DEPTH.keys()}"
stage_depths = _MODEL_STAGE_DEPTH[model_depth]
# Fix up inputs
if isinstance(slowfast_channel_reduction_ratio, int):
slowfast_channel_reduction_ratio = (slowfast_channel_reduction_ratio,)
if isinstance(stem_pool, Callable):
stem_pool = (stem_pool,) * _num_pathway
if isinstance(bottleneck, Callable):
bottleneck = (bottleneck,) * len(stage_depths)
bottleneck = (bottleneck,) * _num_pathway
if fusion_builder is None:
fusion_builder = FastToSlowFusionBuilder(
slowfast_channel_reduction_ratio=slowfast_channel_reduction_ratio[0],
conv_fusion_channel_ratio=slowfast_conv_channel_fusion_ratio,
conv_kernel_size=slowfast_fusion_conv_kernel_size,
conv_stride=slowfast_fusion_conv_stride,
norm=norm,
activation=activation,
max_stage_idx=len(stage_depths) - 1,
).create_module
# Build stem blocks.
stems = []
for pathway_idx in range(_num_pathway):
stems.append(
stem_function[pathway_idx](
in_channels=input_channels[pathway_idx],
out_channels=stem_dim_outs[pathway_idx],
conv_kernel_size=stem_conv_kernel_sizes[pathway_idx],
conv_stride=stem_conv_strides[pathway_idx],
conv_padding=[
size // 2 for size in stem_conv_kernel_sizes[pathway_idx]
],
pool=stem_pool[pathway_idx],
pool_kernel_size=stem_pool_kernel_sizes[pathway_idx],
pool_stride=stem_pool_strides[pathway_idx],
pool_padding=[
size // 2 for size in stem_pool_kernel_sizes[pathway_idx]
],
norm=norm,
activation=activation,
)
)
stages = []
stages.append(
MultiPathWayWithFuse(
multipathway_blocks=nn.ModuleList(stems),
multipathway_fusion=fusion_builder(
fusion_dim_in=stem_dim_outs[0],
stage_idx=0,
),
)
)
# Build stages blocks.
stage_dim_in = stem_dim_outs[0]
stage_dim_out = stage_dim_in * 4
for idx in range(len(stage_depths)):
pathway_stage_dim_in = [
stage_dim_in
+ stage_dim_in
* slowfast_conv_channel_fusion_ratio
// slowfast_channel_reduction_ratio[0],
]
pathway_stage_dim_inner = [
stage_dim_out // 4,
]
pathway_stage_dim_out = [
stage_dim_out,
]
for reduction_ratio in slowfast_channel_reduction_ratio:
pathway_stage_dim_in = pathway_stage_dim_in + [
stage_dim_in // reduction_ratio
]
pathway_stage_dim_inner = pathway_stage_dim_inner + [
stage_dim_out // 4 // reduction_ratio
]
pathway_stage_dim_out = pathway_stage_dim_out + [
stage_dim_out // reduction_ratio
]
stage = []
for pathway_idx in range(_num_pathway):
depth = stage_depths[idx]
stage_conv_a_stride = (stage_temporal_strides[pathway_idx][idx], 1, 1)
stage_conv_b_stride = (
1,
stage_spatial_strides[pathway_idx][idx],
stage_spatial_strides[pathway_idx][idx],
)
stage.append(
create_res_stage(
depth=depth,
dim_in=pathway_stage_dim_in[pathway_idx],
dim_inner=pathway_stage_dim_inner[pathway_idx],
dim_out=pathway_stage_dim_out[pathway_idx],
bottleneck=bottleneck[pathway_idx][idx],
conv_a_kernel_size=stage_conv_a_kernel_sizes[pathway_idx][idx],
conv_a_stride=stage_conv_a_stride,
conv_a_padding=[
size // 2
for size in stage_conv_a_kernel_sizes[pathway_idx][idx]
],
conv_b_kernel_size=stage_conv_b_kernel_sizes[pathway_idx][idx],
conv_b_stride=stage_conv_b_stride,
conv_b_padding=[
size // 2
for size in stage_conv_b_kernel_sizes[pathway_idx][idx]
],
conv_b_num_groups=stage_conv_b_num_groups[pathway_idx][idx],
conv_b_dilation=stage_conv_b_dilations[pathway_idx][idx],
norm=norm,
activation=activation,
)
)
stages.append(
MultiPathWayWithFuse(
multipathway_blocks=nn.ModuleList(stage),
multipathway_fusion=fusion_builder(
fusion_dim_in=stage_dim_out,
stage_idx=idx + 1,
),
)
)
stage_dim_in = stage_dim_out
stage_dim_out = stage_dim_out * 2
if head_pool is None:
pool_model = None
elif head_pool == nn.AdaptiveAvgPool3d:
pool_model = [head_pool(head_output_size[idx]) for idx in range(_num_pathway)]
elif head_pool == nn.AvgPool3d:
pool_model = [
head_pool(
kernel_size=head_pool_kernel_sizes[idx],
stride=(1, 1, 1),
padding=(0, 0, 0),
)
for idx in range(_num_pathway)
]
else:
raise NotImplementedError(f"Unsupported pool_model type {pool_model}")
stages.append(PoolConcatPathway(retain_list=False, pool=nn.ModuleList(pool_model)))
head_in_features = stage_dim_in
for reduction_ratio in slowfast_channel_reduction_ratio:
head_in_features = head_in_features + stage_dim_in // reduction_ratio
stages.append(
create_res_basic_head(
in_features=head_in_features,
out_features=model_num_class,
pool=None,
output_size=head_output_size,
dropout_rate=dropout_rate,
activation=head_activation,
output_with_global_average=head_output_with_global_average,
)
)
return Net(blocks=nn.ModuleList(stages))
# TODO: move to pytorchvideo/layer once we have a common.py
class PoolConcatPathway(nn.Module):
"""
Given a list of tensors, perform optional spatio-temporal pool and concatenate the
tensors along the channel dimension.
"""
def __init__(
self,
retain_list: bool = False,
pool: Optional[nn.ModuleList] = None,
dim: int = 1,
) -> None:
"""
Args:
retain_list (bool): if True, return the concatenated tensor in a list.
pool (nn.module_list): if not None, list of pooling models for different
pathway before performing concatenation.
dim (int): dimension to performance concatenation.
"""
super().__init__()
set_attributes(self, locals())
def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
if self.pool is not None:
assert len(x) == len(self.pool)
output = []
for ind in range(len(x)):
if x[ind] is not None:
if self.pool is not None and self.pool[ind] is not None:
x[ind] = self.pool[ind](x[ind])
output.append(x[ind])
if self.retain_list:
return [torch.cat(output, 1)]
else:
return torch.cat(output, 1)
class FastToSlowFusionBuilder:
def __init__(
self,
slowfast_channel_reduction_ratio: int,
conv_fusion_channel_ratio: float,
conv_kernel_size: Tuple[int],
conv_stride: Tuple[int],
norm: Callable = nn.BatchNorm3d,
norm_eps: float = 1e-5,
norm_momentum: float = 0.1,
activation: Callable = nn.ReLU,
max_stage_idx: int = 3,
) -> None:
"""
Given a list of two tensors from Slow pathway and Fast pathway, fusion information
from the Fast pathway to the Slow on through a convolution followed by a
concatenation, then return the fused list of tensors from Slow and Fast pathway in
order.
Args:
slowfast_channel_reduction_ratio (int): Reduction ratio from the stage dimension.
Used to compute conv_dim_in = fusion_dim_in // slowfast_channel_reduction_ratio
conv_fusion_channel_ratio (int): channel ratio for the convolution used to fuse
from Fast pathway to Slow pathway.
conv_kernel_size (int): kernel size of the convolution used to fuse from Fast
pathway to Slow pathway.
conv_stride (int): stride size of the convolution used to fuse from Fast pathway
to Slow pathway.
norm (callable): a callable that constructs normalization layer, examples
include nn.BatchNorm3d, None (not performing normalization).
norm_eps (float): normalization epsilon.
norm_momentum (float): normalization momentum.
activation (callable): a callable that constructs activation layer, examples
include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing
activation).
max_stage_idx (int): Returns identity module if we exceed this
"""
set_attributes(self, locals())
def create_module(self, fusion_dim_in: int, stage_idx: int) -> nn.Module:
"""
Creates the module for the given stage
Args:
fusion_dim_in (int): input stage dimension
stage_idx (int): which stage this is
"""
if stage_idx > self.max_stage_idx:
return nn.Identity()
conv_dim_in = fusion_dim_in // self.slowfast_channel_reduction_ratio
conv_fast_to_slow = nn.Conv3d(
conv_dim_in,
int(conv_dim_in * self.conv_fusion_channel_ratio),
kernel_size=self.conv_kernel_size,
stride=self.conv_stride,
padding=[k_size // 2 for k_size in self.conv_kernel_size],
bias=False,
)
norm_module = (
None
if self.norm is None
else self.norm(
num_features=conv_dim_in * self.conv_fusion_channel_ratio,
eps=self.norm_eps,
momentum=self.norm_momentum,
)
)
activation_module = None if self.activation is None else self.activation()
return FuseFastToSlow(
conv_fast_to_slow=conv_fast_to_slow,
norm=norm_module,
activation=activation_module,
)
class FuseFastToSlow(nn.Module):
"""
Given a list of two tensors from Slow pathway and Fast pathway, fusion information
from the Fast pathway to the Slow on through a convolution followed by a
concatenation, then return the fused list of tensors from Slow and Fast pathway in
order.
"""
def __init__(
self,
conv_fast_to_slow: nn.Module,
norm: Optional[nn.Module] = None,
activation: Optional[nn.Module] = None,
) -> None:
"""
Args:
conv_fast_to_slow (nn.module): convolution to perform fusion.
norm (nn.module): normalization module.
activation (torch.nn.modules): activation module.
"""
super().__init__()
set_attributes(self, locals())
def forward(self, x):
x_s = x[0]
x_f = x[1]
fuse = self.conv_fast_to_slow(x_f)
if self.norm is not None:
fuse = self.norm(fuse)
if self.activation is not None:
fuse = self.activation(fuse)
x_s_fuse = torch.cat([x_s, fuse], 1)
return [x_s_fuse, x_f]
| 40.157143 | 95 | 0.601921 |
acf49588b3539b2323a92b6cfbcbe4c999754afb | 7,119 | py | Python | tests/Unit/IO/Test_VolumeData.py | Ambrou/spectre | a819ebbcca607d8af9683db3683bea14bf4ac23c | [
"MIT"
] | null | null | null | tests/Unit/IO/Test_VolumeData.py | Ambrou/spectre | a819ebbcca607d8af9683db3683bea14bf4ac23c | [
"MIT"
] | null | null | null | tests/Unit/IO/Test_VolumeData.py | Ambrou/spectre | a819ebbcca607d8af9683db3683bea14bf4ac23c | [
"MIT"
] | null | null | null | # Distributed under the MIT License.
# See LICENSE.txt for details.
from spectre import DataStructures as ds
from spectre.Spectral import Basis, Quadrature
import spectre.IO.H5 as spectre_h5
from spectre import Informer
import unittest
import numpy as np
import os
import numpy.testing as npt
class TestVolumeDataWriting(unittest.TestCase):
# Test Fixtures
def setUp(self):
# The tests in this class involve inserting vol files, the h5 file
# will be deleted and recreated for each test
self.file_name = os.path.join(Informer.unit_test_path(),
"IO/TestVolumeDataWriting.h5")
if os.path.isfile(self.file_name):
os.remove(self.file_name)
self.h5_file = spectre_h5.H5File(file_name=self.file_name,
append_to_file=True)
def tearDown(self):
self.h5_file.close()
if os.path.isfile(self.file_name):
os.remove(self.file_name)
# Testing the VolumeData Insert Function
def test_insert_vol(self):
self.h5_file.insert_vol(path="/element_data", version=0)
vol_file = self.h5_file.get_vol(path="/element_data")
self.assertEqual(vol_file.get_version(), 0)
# Test the header was generated correctly
def test_vol_get_header(self):
self.h5_file.insert_vol(path="/element_data", version=0)
vol_file = self.h5_file.get_vol(path="/element_data")
self.assertEqual(vol_file.get_header()[0:20], "#\n# File created on ")
class TestVolumeData(unittest.TestCase):
# Test Fixtures
def setUp(self):
# The tests in this class use a volume data file written using
# the write_volume_data() function
self.file_name = os.path.join(Informer.unit_test_path(),
"IO/TestVolumeData.h5")
if os.path.isfile(self.file_name):
os.remove(self.file_name)
self.h5_file = spectre_h5.H5File(file_name=self.file_name,
append_to_file=True)
self.tensor_component_data = np.random.rand(4, 8)
observation_ids = [0, 1]
observation_values = {0: 7.0, 1: 1.3}
grid_names = ["grid_1", "grid_2"]
basis = Basis.Legendre
quad = Quadrature.Gauss
# Insert .vol file to h5 file
self.h5_file.insert_vol("/element_data", version=0)
self.vol_file = self.h5_file.get_vol(path="/element_data")
# Set TensorComponent and ExtentsAndTensorVolumeData to
# be written
element_vol_data_grid_1 = [
ds.ElementVolumeData([2, 2, 2], [
ds.TensorComponent(
grid_names[0] + "/field_1",
ds.DataVector(self.tensor_component_data[2 * i])),
ds.TensorComponent(
grid_names[0] + "/field_2",
ds.DataVector(self.tensor_component_data[2 * i + 1]))
], [basis, basis, basis], [quad, quad, quad])
for i, observation_id in enumerate(observation_ids)
]
element_vol_data_grid_2 = [
ds.ElementVolumeData([2, 2, 2], [
ds.TensorComponent(
grid_names[1] + "/field_1",
ds.DataVector(self.tensor_component_data[2 * i + 1])),
ds.TensorComponent(
grid_names[1] + "/field_2",
ds.DataVector(self.tensor_component_data[2 * i]))
], [basis, basis, basis], [quad, quad, quad])
for i, observation_id in enumerate(observation_ids)
]
# Write extents and tensor volume data to volfile
for i, observation_id in enumerate(observation_ids):
self.vol_file.write_volume_data(
observation_id, observation_values[observation_id],
[element_vol_data_grid_1[i], element_vol_data_grid_2[i]])
def tearDown(self):
self.h5_file.close()
if os.path.isfile(self.file_name):
os.remove(self.file_name)
# Test that observation ids and values are retrieved correctly
def test_observation_id(self):
# Test observation Ids
obs_ids = set(self.vol_file.list_observation_ids())
expected_obs_ids = set([0, 1])
self.assertEqual(obs_ids, expected_obs_ids)
# Test observation values
expected_obs_values = {0: 7.0, 1: 1.3}
for obs_id in expected_obs_ids:
self.assertEqual(
self.vol_file.get_observation_value(observation_id=obs_id),
expected_obs_values[obs_id])
# Test to make sure information about the computation elements was found
def test_grids(self):
obs_id = self.vol_file.list_observation_ids()[0]
# Test grid names
grid_names = self.vol_file.get_grid_names(observation_id=obs_id)
expected_grid_names = ["grid_1", "grid_2"]
self.assertEqual(grid_names, expected_grid_names)
# Test extents
extents = self.vol_file.get_extents(observation_id=obs_id)
expected_extents = [[2, 2, 2], [2, 2, 2]]
self.assertEqual(extents, expected_extents)
bases = self.vol_file.get_bases(obs_id)
expected_bases = [["Legendre", "Legendre", "Legendre"],
["Legendre", "Legendre", "Legendre"]]
self.assertEqual(bases, expected_bases)
quadratures = self.vol_file.get_quadratures(obs_id)
expected_quadratures = [["Gauss", "Gauss", "Gauss"],
["Gauss", "Gauss", "Gauss"]]
# Test that the tensor components, and tensor data are retrieved correctly
def test_tensor_components(self):
obs_id = 0
# Test tensor component names
tensor_component_names = set(
self.vol_file.list_tensor_components(observation_id=obs_id))
expected_tensor_component_names = ['field_1', 'field_2']
self.assertEqual(tensor_component_names,
set(expected_tensor_component_names))
# Test tensor component data at specified obs_id
for i, expected_tensor_component_data in\
enumerate(self.tensor_component_data[:2]):
npt.assert_almost_equal(
np.asarray(
self.vol_file.get_tensor_component(
observation_id=obs_id,
tensor_component=expected_tensor_component_names[i]))
[0:8], expected_tensor_component_data)
# Test that the offset and length for certain grid is retrieved correctly
def test_offset_and_length_for_grid(self):
obs_id = self.vol_file.list_observation_ids()[0]
all_grid_names = self.vol_file.get_grid_names(observation_id=obs_id)
all_extents = self.vol_file.get_extents(observation_id=obs_id)
self.assertEqual(
spectre_h5.offset_and_length_for_grid(
grid_name='grid_1',
all_grid_names=all_grid_names,
all_extents=all_extents), (0, 8))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 41.389535 | 79 | 0.62284 |
acf4959fc080fc0131545621140035070bdf7020 | 6,711 | py | Python | django/core/signing.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/core/signing.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/core/signing.py | kkoralsky/django | 924af638e4d4fb8eb46a19ac0cafcb2e83480cf3 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-02-06T10:31:51.000Z | 2020-02-06T10:31:51.000Z | """
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialized object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
import base64
import datetime
import json
import re
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
_SEP_UNSAFE = re.compile(r'^[A-z0-9-_=]*$')
class BadSignature(Exception):
"""
Signature does not match
"""
pass
class SignatureExpired(BadSignature):
"""
Signature timestamp is older than required max_age
"""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest())
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_string(settings.SIGNING_BACKEND)
key = force_bytes(settings.SECRET_KEY)
return Signer(b'django.http.cookies' + key, salt=salt)
class JSONSerializer:
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign() returns str but base64 and zlib compression
# operate on bytes.
base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer:
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.key = key or settings.SECRET_KEY
self.sep = sep
if _SEP_UNSAFE.match(self.sep):
raise ValueError(
'Unsafe Signer separator: %r (cannot be empty or consist of '
'only A-z0-9-_=)' % sep,
)
self.salt = salt or '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
def signature(self, value):
return force_text(base64_hmac(self.salt + 'signer', value, self.key))
def sign(self, value):
return '%s%s%s' % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = '%s%s%s' % (force_text(value), self.sep, self.timestamp())
return super().sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super().unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
| 32.897059 | 98 | 0.683356 |
acf4962a8c3bfc1eb4342042d794694dd131ce6f | 20 | py | Python | elliot/evaluation/metrics/accuracy/map/__init__.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 175 | 2021-03-04T15:46:25.000Z | 2022-03-31T05:56:58.000Z | elliot/evaluation/metrics/accuracy/map/__init__.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 15 | 2021-03-06T17:53:56.000Z | 2022-03-24T17:02:07.000Z | elliot/evaluation/metrics/accuracy/map/__init__.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 39 | 2021-03-04T15:46:26.000Z | 2022-03-09T15:37:12.000Z | from .map import MAP | 20 | 20 | 0.8 |
acf49679e315752a611b48fb4e03c113bf1c255a | 3,363 | py | Python | text_cleaning_comparisson.py | asijit123/Python | 30050ab3aa7f89eb75e142bd5dfc9987861284a6 | [
"MIT"
] | null | null | null | text_cleaning_comparisson.py | asijit123/Python | 30050ab3aa7f89eb75e142bd5dfc9987861284a6 | [
"MIT"
] | 1 | 2020-10-01T14:14:31.000Z | 2020-10-01T14:14:58.000Z | text_cleaning_comparisson.py | asijit123/Python | 30050ab3aa7f89eb75e142bd5dfc9987861284a6 | [
"MIT"
] | 1 | 2020-10-01T14:10:27.000Z | 2020-10-01T14:10:27.000Z | #!/usr/bin/env python
# coding: utf-8
# In[63]:
from string import punctuation
from unidecode import unidecode
from time import process_time
from re import sub, compile
from nltk.corpus import stopwords, gutenberg
from nltk.tokenize import sent_tokenize
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
# Data for testing
emma = gutenberg.words('austen-emma.txt')
example_text = ' '.join(emma)
df = pd.DataFrame(data={'sentences': sent_tokenize(example_text)})
tokenizer = lambda s: clean_text(s).split()
vectorizer = CountVectorizer(encoding='ascii', decode_error='ignore',
strip_accents='ascii',
tokenizer=tokenizer, lowercase=False,
max_df=0.7,
min_df=0.0001
)
vectorizer.fit(df['sentences'])
STOP_WORDS = stopwords.words('english')
# Remove any charcater is non alphanumeric or space
pattern_cleaning = compile(r'[^\w\s]|\d')
pattern_stop_words = compile(r'\b(' + r'|'.join(STOP_WORDS) + r')\b\s*')
# First remove punctuation and numbers, then remove stop words
remove_punctuation_r = lambda s : sub(pattern_stop_words, '', sub(pattern_cleaning, '', s.lower()))
remove_short_words = lambda s : ' '.join(filter(lambda w: len(w) > 2, s.split()))
# Remove numbers, short words (one or two characters),
# punctuaction, non ascii characers and stop words
clean_text = lambda s: remove_short_words(remove_punctuation_r(s))
# Data cleaning functions
pattern_cleaning = compile(r'[^\w\s]|\d')
pattern_stop_words = compile(r'\b(' + r'|'.join(stopwords.words('english')) + r')\b\s*')
pattern_short_words = compile(r'\b[^\s]{0,2}\b')
exclude = punctuation
remove_punctuation_t = lambda s : unidecode(s).translate(str.maketrans('', '', exclude)).lower()
remove_punctuation_r = lambda s : sub(pattern_stop_words, '', sub(pattern_cleaning, '', s.lower()))
remove_stop_words = lambda s : ' '.join([word for word in s.split() if word not in STOP_WORDS])
remove_stop_words_2 = lambda s : sub(pattern_stop_words, '', s)
remove_stop_words_3 = lambda s : ' '.join(filter(lambda w: len(w) > 2 and not w in STOP_WORDS, s.split()))
remove_short_words = lambda s : ' '.join(filter(lambda w: len(w) > 2, s.split()))
remove_short_words_2 = lambda s : sub(pattern_stop_words, '', s)
clean_text_1 = lambda s: remove_short_words_2(remove_punctuation_r(s))
clean_text_2 = lambda s: remove_short_words(remove_punctuation_r(s))
clean_text_3 = lambda s: remove_stop_words(remove_short_words(remove_punctuation_t(s)))
clean_text_4 = lambda s: remove_stop_words_3(remove_punctuation_t(s))
clean_text_5 = lambda s: remove_stop_words_3(remove_punctuation_r(s))
# Comparing data cleaning ways
func = (clean_text_1,clean_text_2,clean_text_3,clean_text_4, clean_text_5)
title = ('Regex and unidecode, loop (short words)',
'Regex and unidecode, filter (short words)',
'Translate and unidecode, filter (short words) ,loops (stop words)',
'Translate and unidecode, filter (short words, stop words)',
'Regex, loop (short words, stop words)'
)
for f, t in zip(func, title):
print('*'*len(t))
print(t)
print('*'*len(t))
t0 = process_time()
print(df['sentences'].apply(f).head())
print(f'Time: {process_time()-t0}')
| 41.518519 | 110 | 0.690158 |
acf496a5ddc25b8c6c46e528b95f95d14132b955 | 33,238 | py | Python | plot_ratio.py | eugenelet/NeuralScale | 67ddedc19880d8e2e93304b07305e6ede8d76212 | [
"MIT"
] | 18 | 2020-04-11T14:24:06.000Z | 2022-03-11T08:04:48.000Z | plot_ratio.py | eugenelet/NeuralScale | 67ddedc19880d8e2e93304b07305e6ede8d76212 | [
"MIT"
] | null | null | null | plot_ratio.py | eugenelet/NeuralScale | 67ddedc19880d8e2e93304b07305e6ede8d76212 | [
"MIT"
] | 1 | 2020-07-14T14:19:45.000Z | 2020-07-14T14:19:45.000Z | '''
Plots accuracy of different methods under various parameter scale.
'''
import matplotlib as mpl
from matplotlib import pyplot as plt
import numpy as np
import pickle
import argparse
from utils import compute_params_
from model.VGG import vgg11, VGG
from model.preact_resnet import PreActResNet18, PreActResNet
from model.mobilenetv2 import MobileNetV2
from prune import pruner, prepare_pruning_list
import torchvision.transforms as transforms
import torchvision
import torch
import time
import os
parser = argparse.ArgumentParser(description='Plots accuracy of different methods under various parameter scale')
parser.add_argument('--dataset', default="CIFAR100", type=str,
help='dataset for experiment, choice: CIFAR10, CIFAR100, tinyimagenet', choices= ["CIFAR10", "CIFAR100", "tinyimagenet"])
parser.add_argument('--model', default="vgg", type=str,
help='model selection, choices: vgg, mobilenetv2, resnet18',
choices=["vgg", "mobilenetv2", "resnet18"])
parser.add_argument('--convcut', dest="convcut", action='store_true', default=False,
help='Show comparison with network that use convolutional layer for all shortcut layer (only for ResNet18 and MobileNetV2)')
parser.add_argument('--plot_normal', dest="plot_normal", action='store_true', default=False,
help='Plot using normal size without considering size constraints for publication')
args = parser.parse_args()
width = 496.85625 # textwidth of CVPR2020
colwidth = 237.13594 # columnwidth CVPR2020
def set_size(width, fraction=1):
""" Set aesthetic figure dimensions to avoid scaling in latex.
Parameters
----------
width: float
Width in pts
fraction: float
Fraction of the width which you wish the figure to occupy
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
# Width of figure
fig_width_pt = width * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
if args.convcut or args.plot_normal:
golden_ratio = (5**.5 - 0.5) / 2
else:
golden_ratio = (5**.5 - 0.85) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
# Settings for plot fonts
if args.convcut or args.plot_normal:
nice_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
# "font.family": "Times New Roman",
# Use 10pt font in plots, to match 10pt font in document
"axes.linewidth": 10 / 12.,
"lines.linewidth": 10 / 12.,
"lines.markersize": 30 / 12.,
"patch.linewidth": 10 / 12.,
"axes.labelsize": 10,
"font.size": 10,
# Make the legend/label fonts a little smaller
"legend.fontsize": 8,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
}
else:
nice_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
# "font.family": "Times New Roman",
# Use 10pt font in plots, to match 10pt font in document
"axes.linewidth": 4 / 12.,
"lines.linewidth": 4 / 12.,
"lines.markersize": 12 / 12.,
"patch.linewidth": 4 / 12.,
"axes.labelsize": 4,
"font.size": 4,
# Make the legend/label fonts a little smaller
"legend.fontsize": 3.5,
"xtick.labelsize": 3,
"ytick.labelsize": 3,
}
mpl.rcParams.update(nice_fonts)
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']
if args.dataset=="CIFAR100":
num_classes = 100
elif args.dataset=="tinyimagenet":
num_classes = 200
elif args.dataset=="CIFAR10":
num_classes = 10
if(args.dataset == "CIFAR10"):
print("Using Cifar10 Dataset")
normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize,
])
testset = torchvision.datasets.CIFAR10(root='/DATA/data_cifar10/', train=False,
download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=100,
shuffle=True)
elif args.dataset == "CIFAR100":
print("Using Cifar100 Dataset")
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize,
])
testset = torchvision.datasets.CIFAR100(root='/DATA/data_cifar100/', train=False,
download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=100,
shuffle=False)
elif args.dataset == "tinyimagenet":
print("Using tiny-Imagenet Dataset")
valdir = os.path.join("/DATA/tiny-imagenet-200", 'test')
normalize = transforms.Normalize([0.4802, 0.4481, 0.3975], [0.2302, 0.2265, 0.2262])
kwargs = {'num_workers': 16}
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(valdir, transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=100, shuffle=False, pin_memory=True)
else:
print("Dataset does not exist! [Imagenet]")
exit()
def compute_latency(model):
latency = list()
model = model.cuda()
model.eval()
last_time = time.time()
data, target = next(test_loader.__iter__())
data = data.cuda()
for idx in range(100):
with torch.no_grad():
_ = model(data)
cur_time = time.time()
if idx > 20: # allow 20 runs for GPU to warm-up
latency.append(cur_time - last_time)
last_time = cur_time
del model
del data
torch.cuda.empty_cache()
return np.mean(latency) * 1000
param_low = []
param_high = []
param_convcut = []
param_uni = []
param_prune = []
latency_low = []
latency_high = []
latency_convcut = []
latency_uni = []
latency_prune = []
if args.model=="vgg":
config = [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
if args.dataset == "CIFAR10":
config_prune = [64, 128, 249, 253, 268, 175, 87, 152] # VGG C10
elif args.dataset == "CIFAR100":
config_prune = [63, 125, 204, 215, 234, 174, 120, 241] # VGG C100
ratios = np.arange(0.25,2.1,0.25) # [0.25, 0.5 , 0.75, 1, 1.25, 1.5 , 1.75, 2]
for ratio in ratios:
# uniform
new_config = VGG.prepare_filters(VGG, config, ratio=ratio, neuralscale=False, num_classes=num_classes)
model = vgg11(config=new_config, num_classes=num_classes)
latency = compute_latency(model)
params = compute_params_(model)
param_uni.append(params)
latency_uni.append(latency)
# pruned
new_config = VGG.prepare_filters(VGG, config, ratio=ratio, neuralscale=False, num_classes=num_classes, pruned_filters=config_prune)
model = vgg11(config=new_config, num_classes=num_classes)
latency = compute_latency(model)
params = compute_params_(model)
param_prune.append(params)
latency_prune.append(latency)
## efficient
if args.dataset == "CIFAR100":
fname = "vgg2_10_eff_c100"
# fname = "vgg_eff_c100"
fname_convcut = "vgg100_c100_01"
elif args.dataset == "CIFAR10":
fname = "vgg_10_eff_c10"
fname_convcut = "vgg_c10_01"
# low
new_config = VGG.prepare_filters(VGG, config, ratio=ratio, neuralscale=True, descent_idx=0, prune_fname=fname, num_classes=num_classes)
model = vgg11(ratio=ratio, neuralscale=True, num_classes=num_classes, prune_fname=fname, descent_idx=0)
latency = compute_latency(model)
params = compute_params_(model)
param_low.append(params)
latency_low.append(latency)
# high
new_config = VGG.prepare_filters(VGG, config, ratio=ratio, neuralscale=True, descent_idx=14, prune_fname=fname, num_classes=num_classes)
model = vgg11(ratio=ratio, neuralscale=True, num_classes=num_classes, prune_fname=fname, descent_idx=14)
latency = compute_latency(model)
params = compute_params_(model)
param_high.append(params)
latency_high.append(latency)
elif args.model == "resnet18":
filters = [[64],[64,64],[64,64],[128,128],[128,128],[256,256],[256,256],[512,512],[512,512]]
if args.dataset == "CIFAR100":
filters_prune = [48, 46, 40, 41, 54, 91, 75, 73, 95, 157, 149, 149, 156, 232, 216, 140, 190] # resnet18 c100
ratios = np.arange(0.25,2.1,0.25) # [0.25, 0.5 , 0.75, 1, 1.25, 1.5 , 1.75, 2]
elif args.dataset == "tinyimagenet":
filters_prune = [82,90,78,80,96,180,104,96,194,312,182,178,376,546,562,454,294]
ratios = [0.25, 0.5, 0.75, 1.0]
for ratio in ratios:
# convcut
new_config = PreActResNet.prepare_filters(PreActResNet, filters, ratio=ratio, neuralscale=False, num_classes=num_classes)
model = PreActResNet18(filters=new_config, num_classes=num_classes, dataset=args.dataset, convcut=True)
latency = compute_latency(model)
params = compute_params_(model)
param_convcut.append(params)
latency_convcut.append(latency)
# uniform
new_config = PreActResNet.prepare_filters(PreActResNet, filters, ratio=ratio, neuralscale=False, num_classes=num_classes)
model = PreActResNet18(filters=new_config, num_classes=num_classes, dataset=args.dataset)
latency = compute_latency(model)
params = compute_params_(model)
param_uni.append(params)
latency_uni.append(latency)
# pruned
new_config = PreActResNet.prepare_filters(PreActResNet, filters, ratio=ratio, neuralscale=False, num_classes=num_classes, pruned_filters=filters_prune)
model = PreActResNet18(filters=new_config, num_classes=num_classes, dataset=args.dataset)
latency = compute_latency(model)
params = compute_params_(model)
param_prune.append(params)
latency_prune.append(latency)
## efficient
if args.dataset == "CIFAR100":
fname = "resnet18_10_eff_c100"
elif args.dataset == "tinyimagenet":
fname = "resnet18_10_eff_tinyimagenet"
# low
new_config = PreActResNet.prepare_filters(PreActResNet, filters, ratio=ratio, neuralscale=True, descent_idx=0, prune_fname=fname, num_classes=num_classes)
model = PreActResNet18(ratio=ratio, neuralscale=True, num_classes=num_classes, prune_fname=fname, descent_idx=0)
latency = compute_latency(model)
params = compute_params_(model)
param_low.append(params)
latency_low.append(latency)
# high
new_config = PreActResNet.prepare_filters(PreActResNet, filters, ratio=ratio, neuralscale=True, descent_idx=14, prune_fname=fname, num_classes=num_classes)
model = PreActResNet18(ratio=ratio, neuralscale=True, num_classes=num_classes, prune_fname=fname, descent_idx=14)
latency = compute_latency(model)
params = compute_params_(model)
param_high.append(params)
latency_high.append(latency)
elif args.model == "mobilenetv2":
filters = [[32],[16],[24,24],[32,32,32],[64,64,64,64],[96,96,96],[160,160,160],[320],[1280]]
if args.dataset == "CIFAR100":
filters_prune = [28, 16, 24, 21, 30, 31, 26, 56, 50, 49, 46, 83, 70, 58, 120, 101, 68, 134, 397]
ratios = np.arange(0.25,2.1,0.25) # [0.25, 0.5 , 0.75, 1, 1.25, 1.5 , 1.75, 2]
elif args.dataset == "tinyimagenet":
filters_prune = [28, 16, 24, 24, 32, 32, 30, 64, 59, 50, 41, 96, 73, 48, 160, 69, 47, 155, 360] # mobilenetv2 tinyimagenet
ratios = [0.25,0.5,0.75,1.0]
for ratio in ratios:
# convcut
new_config = MobileNetV2.prepare_filters(MobileNetV2, filters, ratio=ratio, neuralscale=False, num_classes=num_classes)
model = MobileNetV2(filters=new_config, num_classes=num_classes, dataset=args.dataset, convcut=True)
latency = compute_latency(model)
params = compute_params_(model)
param_convcut.append(params)
latency_convcut.append(latency)
# uniform
new_config = MobileNetV2.prepare_filters(MobileNetV2, filters, ratio=ratio, neuralscale=False, num_classes=num_classes)
model = MobileNetV2(filters=new_config, num_classes=num_classes, dataset=args.dataset)
latency = compute_latency(model)
params = compute_params_(model)
param_uni.append(params)
latency_uni.append(latency)
# pruned
new_config = MobileNetV2.prepare_filters(MobileNetV2, filters, ratio=ratio, neuralscale=False, num_classes=num_classes, pruned_filters=filters_prune)
model = MobileNetV2(filters=new_config, num_classes=num_classes, dataset=args.dataset)
latency = compute_latency(model)
params = compute_params_(model)
param_prune.append(params)
latency_prune.append(latency)
## efficient
if args.dataset == "CIFAR100":
fname = "mobilenetv2_10_eff_c100"
elif args.dataset == "tinyimagenet":
fname = "mobilenetv2_15_eff_tinyimagenet"
# low
new_config = MobileNetV2.prepare_filters(MobileNetV2, filters, ratio=ratio, neuralscale=True, descent_idx=0, prune_fname=fname, num_classes=num_classes)
model = MobileNetV2(ratio=ratio, neuralscale=True, num_classes=num_classes, prune_fname=fname, descent_idx=0)
latency = compute_latency(model)
params = compute_params_(model)
param_low.append(params)
latency_low.append(latency)
# high
new_config = MobileNetV2.prepare_filters(MobileNetV2, filters, ratio=ratio, neuralscale=True, descent_idx=14, prune_fname=fname, num_classes=num_classes)
model = MobileNetV2(ratio=ratio, neuralscale=True, num_classes=num_classes, prune_fname=fname, descent_idx=14)
latency = compute_latency(model)
params = compute_params_(model)
param_high.append(params)
latency_high.append(latency)
print("Parameters:")
print("Uniform", param_uni)
print("MorphNet (Taylor-FO)", param_prune)
print("NeuralScale (Iter=1)", param_low)
print("NeuralScale (Iter=15)", param_high)
if args.convcut:
print("Uniform (ConvCut)", param_convcut)
uni_test_acc = []
prune_uni_test_acc = []
high_test_acc = []
low_test_acc = []
convcut_test_acc = []
uni_test_acc_max = []
prune_uni_test_acc_max = []
high_test_acc_max = []
low_test_acc_max = []
convcut_test_acc_max = []
uni_test_acc_min = []
prune_uni_test_acc_min = []
high_test_acc_min = []
low_test_acc_min = []
convcut_test_acc_min = []
uni_test_acc_std = []
prune_uni_test_acc_std = []
high_test_acc_std = []
low_test_acc_std = []
convcut_test_acc_std = []
if args.dataset=="CIFAR10" or args.dataset=="CIFAR100":
ratios = np.arange(0.25,2.1,0.25)
elif args.dataset=="tinyimagenet":
ratios = [0.25,0.5,0.75,1.0]
for ratio in ratios:
uni_test_acc_tmp = []
prune_uni_test_acc_tmp = []
high_test_acc_tmp = []
low_test_acc_tmp = []
convcut_test_acc_tmp = []
num_samples = 5
for i in range(num_samples):
if args.model == "vgg":
if args.dataset == "CIFAR100":
# Baseline (Uniform Scale)
pkl_ld = pickle.load( open( "saved_plots/vgg/vgg_uni_c100_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# MorphNet (Taylor-FO)
pkl_ld = pickle.load( open( "saved_plots/vgg/vgg_pruned_c100_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
prune_uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=15, lr=0.1)
pkl_ld = pickle.load( open( "saved_plots/vgg/vgg_10_eff_c100_late_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
high_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=1, lr=0.1)
pkl_ld = pickle.load( open( "saved_plots/vgg/vgg_10_eff_c100_early_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
low_test_acc_tmp.append(max(pkl_ld["test_acc"]))
elif args.dataset == "CIFAR10":
# Baseline (Uniform Scale)
pkl_ld = pickle.load( open( "saved_plots/vgg/vgg_uni_c10_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# MorphNet (Taylor-FO)
pkl_ld = pickle.load( open( "saved_plots/vgg/vgg_pruned_c10_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
prune_uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=15, lr=0.1)
pkl_ld = pickle.load( open( "saved_plots/vgg/vgg_10_eff_c10_late_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
high_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=1, lr=0.1)
pkl_ld = pickle.load( open( "saved_plots/vgg/vgg_10_eff_c10_early_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
low_test_acc_tmp.append(max(pkl_ld["test_acc"]))
else:
print("Dataset Not Found...")
exit()
elif args.model == "resnet18":
if args.dataset == "CIFAR100":
# Baseline (Uniform Scale)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_uni_c100_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# MorphNet (Taylor-FO)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_pruned_c100_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
prune_uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=15)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_10_eff_c100_late_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
high_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=1)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_10_eff_c100_early_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
low_test_acc_tmp.append(max(pkl_ld["test_acc"]))
elif args.dataset == "tinyimagenet":
# Baseline (Uniform Scale)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_uni_tinyimagenet_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# MorphNet (Taylor-FO)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_pruned_tinyimagenet_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
prune_uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=15, lr=0.1)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_10_eff_tinyimagenet_late_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
high_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=1, lr=0.1)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_10_eff_tinyimagenet_early_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
low_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# Uniform (Convcut)
pkl_ld = pickle.load( open( "saved_plots/resnet18/resnet18_convcut_tinyimagenet_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
convcut_test_acc_tmp.append(max(pkl_ld["test_acc"]))
else:
print("Dataset Not Found...")
elif args.model == "mobilenetv2":
if args.dataset == "CIFAR100":
# Baseline (Uniform Scale)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_uni_c100_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# MorphNet (Taylor-FO)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_pruned_c100_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
prune_uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=1)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_10_eff_c100_early_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
low_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=15)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_10_eff_c100_late_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
high_test_acc_tmp.append(max(pkl_ld["test_acc"]))
elif args.dataset == "tinyimagenet":
# Baseline (Uniform Scale)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_uni_tinyimagenet_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# MorphNet (Taylor-FO)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_pruned_tinyimagenet_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
prune_uni_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=15, lr=0.1)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_15_eff_tinyimagenet_early_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
low_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# neuralscale (Iteration=15, lr=0.1)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_15_eff_tinyimagenet_late_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
high_test_acc_tmp.append(max(pkl_ld["test_acc"]))
# Uniform (Convcut)
pkl_ld = pickle.load( open( "saved_plots/mobilenetv2/mobilenetv2_convcut_tinyimagenet_{}_{}.pk".format(i,int(ratio*100)), "rb" ) )
convcut_test_acc_tmp.append(max(pkl_ld["test_acc"]))
else:
print("Dataset Not Found...")
uni_test_acc_tmp = np.array(uni_test_acc_tmp)
prune_uni_test_acc_tmp = np.array(prune_uni_test_acc_tmp)
high_test_acc_tmp = np.array(high_test_acc_tmp)
low_test_acc_tmp = np.array(low_test_acc_tmp)
uni_test_acc_max_tmp = uni_test_acc_tmp.max(axis=0)
prune_uni_test_acc_max_tmp = prune_uni_test_acc_tmp.max(axis=0)
high_test_acc_max_tmp = high_test_acc_tmp.max(axis=0)
low_test_acc_max_tmp = low_test_acc_tmp.max(axis=0)
uni_test_acc_min_tmp = uni_test_acc_tmp.min(axis=0)
prune_uni_test_acc_min_tmp = prune_uni_test_acc_tmp.min(axis=0)
high_test_acc_min_tmp = high_test_acc_tmp.min(axis=0)
low_test_acc_min_tmp = low_test_acc_tmp.min(axis=0)
uni_test_acc_std_tmp = uni_test_acc_tmp.std(axis=0)
prune_uni_test_acc_std_tmp = prune_uni_test_acc_tmp.std(axis=0)
high_test_acc_std_tmp = high_test_acc_tmp.std(axis=0)
low_test_acc_std_tmp = low_test_acc_tmp.std(axis=0)
uni_test_acc_tmp = uni_test_acc_tmp.mean(axis=0)
prune_uni_test_acc_tmp = prune_uni_test_acc_tmp.mean(axis=0)
high_test_acc_tmp = high_test_acc_tmp.mean(axis=0)
low_test_acc_tmp = low_test_acc_tmp.mean(axis=0)
uni_test_acc.append(uni_test_acc_tmp)
prune_uni_test_acc.append(prune_uni_test_acc_tmp)
high_test_acc.append(high_test_acc_tmp)
low_test_acc.append(low_test_acc_tmp)
uni_test_acc_max.append(uni_test_acc_max_tmp)
prune_uni_test_acc_max.append(prune_uni_test_acc_max_tmp)
high_test_acc_max.append(high_test_acc_max_tmp)
low_test_acc_max.append(low_test_acc_max_tmp)
uni_test_acc_min.append(uni_test_acc_min_tmp)
prune_uni_test_acc_min.append(prune_uni_test_acc_min_tmp)
high_test_acc_min.append(high_test_acc_min_tmp)
low_test_acc_min.append(low_test_acc_min_tmp)
uni_test_acc_std.append(uni_test_acc_std_tmp)
prune_uni_test_acc_std.append(prune_uni_test_acc_std_tmp)
high_test_acc_std.append(high_test_acc_std_tmp)
low_test_acc_std.append(low_test_acc_std_tmp)
if args.convcut:
convcut_test_acc_tmp = np.array(convcut_test_acc_tmp)
convcut_test_acc_max_tmp = convcut_test_acc_tmp.max(axis=0)
convcut_test_acc_min_tmp = convcut_test_acc_tmp.min(axis=0)
convcut_test_acc_std_tmp = convcut_test_acc_tmp.std(axis=0)
convcut_test_acc_tmp = convcut_test_acc_tmp.mean(axis=0)
convcut_test_acc.append(convcut_test_acc_tmp)
convcut_test_acc_max.append(convcut_test_acc_max_tmp)
convcut_test_acc_min.append(convcut_test_acc_min_tmp)
convcut_test_acc_std.append(convcut_test_acc_std_tmp)
uni_test_acc = np.array(uni_test_acc)
prune_uni_test_acc = np.array(prune_uni_test_acc)
high_test_acc = np.array(high_test_acc)
low_test_acc = np.array(low_test_acc)
uni_test_acc_max = np.array(uni_test_acc_max)
prune_uni_test_acc_max = np.array(prune_uni_test_acc_max)
high_test_acc_max = np.array(high_test_acc_max)
low_test_acc_max = np.array(low_test_acc_max)
uni_test_acc_min = np.array(uni_test_acc_min)
prune_uni_test_acc_min = np.array(prune_uni_test_acc_min)
high_test_acc_min = np.array(high_test_acc_min)
low_test_acc_min = np.array(low_test_acc_min)
if args.convcut:
convcut_test_acc = np.array(convcut_test_acc)
convcut_test_acc_max = np.array(convcut_test_acc_max)
convcut_test_acc_min = np.array(convcut_test_acc_min)
# PLOT ACCURACY vs PARAMETERS
if args.convcut or args.plot_normal:
plt.figure(0, figsize=set_size(colwidth))
else:
plt.figure(0, figsize=set_size(width,0.235))
# plt.figure(0)
plt.plot(param_high, high_test_acc, marker='o', label="NeuralScale (Iteration=15)")
plt.fill_between(param_high, high_test_acc_min, high_test_acc_max, alpha=0.1)
plt.plot(param_low, low_test_acc, marker='o', label="NeuralScale (Iteration=1)")
plt.fill_between(param_low, low_test_acc_min, low_test_acc_max, alpha=0.1)
plt.plot(param_prune, prune_uni_test_acc, marker='o', label="MorphNet (Taylor-FO)")
plt.fill_between(param_prune, prune_uni_test_acc_min, prune_uni_test_acc_max, alpha=0.1)
plt.plot(param_uni, uni_test_acc, marker='o', label="Uniform Scale (Baseline)")
plt.fill_between(param_uni, uni_test_acc_min, uni_test_acc_max, alpha=0.1)
if args.convcut:
plt.plot(param_convcut, convcut_test_acc, marker='o', label="Uniform Scale (Convcut)")
plt.fill_between(param_convcut, convcut_test_acc_min, convcut_test_acc_max, alpha=0.1)
# plt.title("Test Accuracy vs Parameters")
plt.xlabel("Parameters")
plt.ylabel("Accuracy")
plt.legend()
plt.grid()
plt.tight_layout()
if args.convcut:
plt.savefig("savefigs/param_acc_convcut_{}_{}.pdf".format(args.model,args.dataset))
else:
plt.savefig("savefigs/param_acc_{}_{}.pdf".format(args.model,args.dataset))
if args.convcut or args.plot_normal:
plt.figure(1, figsize=set_size(colwidth))
else:
plt.figure(1, figsize=set_size(width,0.235))
# plt.figure(1)
param_spec = np.arange(min(param_uni),max(param_uni),(max(param_uni)-min(param_uni))/20)
param_uni_interp = np.interp(param_spec, param_uni, uni_test_acc)
diff_high = np.interp(param_spec, param_high, high_test_acc) - param_uni_interp
diff_low = np.interp(param_spec, param_low, low_test_acc) - param_uni_interp
diff_prune = np.interp(param_spec, param_prune, prune_uni_test_acc) - param_uni_interp
if args.convcut:
diff_convcut = np.interp(param_spec, param_convcut, convcut_test_acc) - param_uni_interp
plt.plot(param_spec, diff_high, label="NeuralScale (Iteration=15)")
plt.plot(param_spec, diff_low, label="NeuralScale (Iteration=1)")
plt.plot(param_spec, diff_prune, label="MorphNet (Taylor-FO)")
if args.convcut:
plt.plot(param_spec, diff_convcut, label="Uniform Scale (Convcut)")
# plt.title("Test Accuracy Gap vs Parameters")
plt.xlabel("Parameters")
plt.ylabel("Accuracy Gap")
plt.legend()
plt.grid()
plt.tight_layout()
# if args.convcut:
# plt.savefig("savefigs/param_acc_gap_convcut_{}_{}.pdf".format(args.model, args.dataset))
# else:
# plt.savefig("savefigs/param_acc_gap_{}_{}.pdf".format(args.model, args.dataset))
# PLOT ACCURACY vs latency
if args.convcut or args.plot_normal:
plt.figure(2, figsize=set_size(colwidth))
else:
plt.figure(2, figsize=set_size(width,0.235))
# plt.figure(2)
plt.plot(latency_high, high_test_acc, marker='o', label="NeuralScale (Iteration=15)")
plt.fill_between(latency_high, high_test_acc_min, high_test_acc_max, alpha=0.1)
plt.plot(latency_low, low_test_acc, marker='o', label="NeuralScale (Iteration=1)")
plt.fill_between(latency_low, low_test_acc_min, low_test_acc_max, alpha=0.1)
plt.plot(latency_prune, prune_uni_test_acc, marker='o', label="MorphNet (Taylor-FO)")
plt.fill_between(latency_prune, prune_uni_test_acc_min, prune_uni_test_acc_max, alpha=0.1)
plt.plot(latency_uni, uni_test_acc, marker='o', label="Uniform Scale (Baseline)")
plt.fill_between(latency_uni, uni_test_acc_min, uni_test_acc_max, alpha=0.1)
if args.convcut:
plt.plot(latency_convcut, convcut_test_acc, marker='o', label="Uniform Scale (Convcut)")
plt.fill_between(latency_convcut, convcut_test_acc_min, convcut_test_acc_max, alpha=0.1)
# plt.title("Test Accuracy vs Latency")
plt.xlabel("Latency (ms)")
plt.ylabel("Accuracy")
plt.legend()
plt.grid()
plt.tight_layout()
if args.convcut:
plt.savefig("savefigs/latency_acc_convcut_{}_{}.pdf".format(args.model, args.dataset))
else:
plt.savefig("savefigs/latency_acc_{}_{}.pdf".format(args.model, args.dataset))
if args.convcut or args.plot_normal:
plt.figure(3, figsize=set_size(colwidth))
else:
plt.figure(3, figsize=set_size(width,0.235))
# plt.figure(3)
latency_spec = np.arange(min(latency_uni),max(latency_uni),(max(latency_uni)-min(latency_uni))/20)
latency_uni_interp = np.interp(latency_spec, latency_uni, uni_test_acc)
diff_high = np.interp(latency_spec, latency_high, high_test_acc) - latency_uni_interp
diff_low = np.interp(latency_spec, latency_low, low_test_acc) - latency_uni_interp
diff_prune = np.interp(latency_spec, latency_prune, prune_uni_test_acc) - latency_uni_interp
if args.convcut:
diff_convcut = np.interp(latency_spec, latency_convcut, convcut_test_acc) - latency_uni_interp
plt.plot(latency_spec, diff_high, label="NeuralScale (Iteration=15)")
plt.plot(latency_spec, diff_low, label="NeuralScale (Iteration=1)")
plt.plot(latency_spec, diff_prune, label="MorphNet (Taylor-FO)")
if args.convcut:
plt.plot(latency_spec, diff_convcut, label="Uniform Scale (Convcut)")
# plt.title("Test Accuracy Gap vs Latency")
plt.xlabel("Latency (ms)")
plt.ylabel("Accuracy Gap")
plt.legend()
plt.grid()
plt.tight_layout()
# if args.convcut:
# plt.savefig("savefigs/latency_acc_gap_convcut_{}_{}.pdf".format(args.model, args.dataset))
# else:
# plt.savefig("savefigs/latency_acc_gap_{}_{}.pdf".format(args.model, args.dataset))
print("===============")
print("Comparison Table")
print("===============")
print("Param latency Accuracy std")
print("Uniform")
for i, ratio in enumerate(ratios):
print("{} {} {} {}".format(param_uni[i], latency_uni[i], uni_test_acc[i], uni_test_acc_std[i]))
print("MorphNet (Taylor-FO)")
for i, ratio in enumerate(ratios):
print("{} {} {} {}".format(param_prune[i], latency_prune[i], prune_uni_test_acc[i], prune_uni_test_acc_std[i]))
print("NeuralScale (Iter=1)")
for i, ratio in enumerate(ratios):
print("{} {} {} {}".format(param_low[i], latency_low[i], low_test_acc[i], low_test_acc_std[i]))
print("NeuralScale (Iter=15)")
for i, ratio in enumerate(ratios):
print("{} {} {} {}".format(param_high[i], latency_high[i], high_test_acc[i], high_test_acc_std[i]))
if args.convcut:
print("Uniform (ConvCut)")
for i, ratio in enumerate(ratios):
print("{} {} {} {}".format(param_convcut[i], latency_convcut[i], convcut_test_acc[i], convcut_test_acc_std[i]))
plt.show()
| 48.311047 | 164 | 0.659125 |
acf4977d97bd9436d89084abdeea373626178120 | 122 | py | Python | chat/admin.py | thekonungr/wolfsschanze | a203c7c5bf2be17b779dd5e3e472c84fd245c7bf | [
"MIT"
] | null | null | null | chat/admin.py | thekonungr/wolfsschanze | a203c7c5bf2be17b779dd5e3e472c84fd245c7bf | [
"MIT"
] | null | null | null | chat/admin.py | thekonungr/wolfsschanze | a203c7c5bf2be17b779dd5e3e472c84fd245c7bf | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.ChatMessage) | 30.5 | 39 | 0.819672 |
acf497970db9e4db41a1e3e06a0208fafb517bdc | 459 | py | Python | Graphs/DisjointSet.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 12 | 2021-06-18T16:24:27.000Z | 2021-11-04T03:30:00.000Z | Graphs/DisjointSet.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 32 | 2021-10-01T07:15:00.000Z | 2021-11-05T15:35:53.000Z | Graphs/DisjointSet.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 21 | 2021-09-29T09:16:31.000Z | 2021-10-30T10:06:21.000Z | parent = [0]*100000
rank = [0]*100000
def makeset(n):
for i in range(n):
parent[i] = i
def findParent(node):
if node == parent[node]:
return node
parent[node] = findParent(parent[node])
return parent[node]
def union(u, v):
u = findParent(u)
v = findParent(v)
if rank[u] < rank[v]:
parent[u] = v
elif rank[u] > rank[v]:
parent[v] = u
else:
parent[v] = u
rank[u] += 1
| 15.3 | 43 | 0.518519 |
acf497d64c42e89c7022d6eefb450ad8358533dc | 381 | py | Python | boards/migrations/0003_remove_topic_room.py | xxozmozxx2/scheduler | 887a1ee3bf3cb028b6e9f8d95e15168cebe64c52 | [
"MIT"
] | null | null | null | boards/migrations/0003_remove_topic_room.py | xxozmozxx2/scheduler | 887a1ee3bf3cb028b6e9f8d95e15168cebe64c52 | [
"MIT"
] | 2 | 2020-06-05T19:06:21.000Z | 2021-06-10T20:48:07.000Z | boards/migrations/0003_remove_topic_room.py | xxozmozxx2/scheduler | 887a1ee3bf3cb028b6e9f8d95e15168cebe64c52 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-09-20 10:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('boards', '0002_topic_room'),
]
operations = [
migrations.RemoveField(
model_name='topic',
name='room',
),
]
| 19.05 | 48 | 0.60105 |
acf499060ac4d21219fcb2017564d7acfe03ca0e | 501 | py | Python | Scripts/AverageRatings_RatingsCount.py | keya-desai/IT556_Worthless_without_coffee_DA-IICT_Final_Project | 8e072716efa0132d77094394663b8337f2da68c4 | [
"MIT"
] | 15 | 2018-05-08T21:25:58.000Z | 2021-08-05T07:06:21.000Z | Scripts/AverageRatings_RatingsCount.py | keya-desai/IT556_Worthless_without_coffee_DA-IICT_Final_Project | 8e072716efa0132d77094394663b8337f2da68c4 | [
"MIT"
] | 2 | 2019-01-30T16:14:00.000Z | 2019-10-03T17:06:05.000Z | Scripts/AverageRatings_RatingsCount.py | keya-desai/IT556_Worthless_without_coffee_DA-IICT_Final_Project | 8e072716efa0132d77094394663b8337f2da68c4 | [
"MIT"
] | 8 | 2018-05-08T13:00:27.000Z | 2020-12-04T10:49:18.000Z | import pandas as pd
import sys, csv ,operator
df = pd.read_csv('/Users/keyadesai/Desktop/reco/ratings.csv')
#print(len(df.groupby('book_id').groups.keys()))
dic = df.groupby('book_id')[['rating']].mean()
dic1 = df.groupby('book_id')[['rating']].count()
file = '/Users/keyadesai/Desktop/reco/avg_ratings1.csv'
dic.to_csv(file, sep=',')
fd1 = pd.read_csv('/Users/keyadesai/Desktop/reco/avg_ratings1.csv')
'''
file = '/Users/keyadesai/Desktop/reco/avg_ratings1.csv'
dic1.to_csv(file,sep =',')
''' | 23.857143 | 67 | 0.700599 |
acf49a72fffaf2d34c4aece62642c39a2eb816f1 | 1,902 | py | Python | x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/detectionoutput.py | aiyasin/X2Paddle | b37959f2ecdc09fdec7a38c01272126a7f3800e4 | [
"Apache-2.0"
] | null | null | null | x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/detectionoutput.py | aiyasin/X2Paddle | b37959f2ecdc09fdec7a38c01272126a7f3800e4 | [
"Apache-2.0"
] | null | null | null | x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/detectionoutput.py | aiyasin/X2Paddle | b37959f2ecdc09fdec7a38c01272126a7f3800e4 | [
"Apache-2.0"
] | 1 | 2021-02-22T09:05:44.000Z | 2021-02-22T09:05:44.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
def detectionoutput(x0,
x1,
x2,
nms_threshold,
nms_top_k,
keep_top_k,
nms_eta,
score_threshold,
background_label):
detection_output_layer_attrs = {
"background_label": background_label,
"nms_threshold": nms_threshold,
"nms_top_k": nms_top_k,
"keep_top_k": keep_top_k,
"score_threshold": score_threshold,
"nms_eta": nms_eta}
priorbox_list = paddle.split(x2, num_or_sections=2, axis=1)
pb = priorbox_list[0]
pbv = priorbox_list[1]
pb = paddle.reshape(x=pb, shape=[-1, 4])
pbv = paddle.reshape(x=pbv, shape=[-1, 4])
pb_dim = fluid.layers.shape(pb)[0]
loc = paddle.reshape(x0, shape=[-1, pb_dim, 4])
conf_flatten = paddle.reshape(x1, shape=[0, pb_dim, -1])
out = fluid.layers.detection_output(loc=loc,
scores=conf_flatten,
prior_box=pb,
prior_box_var=pbv,
**detection_output_layer_attrs)
return out | 40.468085 | 74 | 0.583596 |
acf49ad0915403ec6bdb9fdb415847ace07aeb83 | 260 | py | Python | .history/Classiles/creating_lights_20210614211748.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | .history/Classiles/creating_lights_20210614211748.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | .history/Classiles/creating_lights_20210614211748.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | """[Practice: Light Switch]
Variable name class
Returns:
[type]: [description]
"""
class Light:
def __init__(self):
self.on = False
def is_on(sel):
return self.on
def toggle(self):
self.on = not self.on
| 13 | 29 | 0.557692 |
acf49b1148dd146bcf0cc37f79b3f2c9a961ebb7 | 15,526 | py | Python | tests/backend_test_base.py | tensorleap/tensorflow-onnx | 56f6070828928bbb0f30890b2229eec8b663213d | [
"Apache-2.0"
] | null | null | null | tests/backend_test_base.py | tensorleap/tensorflow-onnx | 56f6070828928bbb0f30890b2229eec8b663213d | [
"Apache-2.0"
] | null | null | null | tests/backend_test_base.py | tensorleap/tensorflow-onnx | 56f6070828928bbb0f30890b2229eec8b663213d | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
"""Unit Test Base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,import-outside-toplevel
# pylint: disable=wrong-import-position
import logging
import os
import unittest
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.ops import lookup_ops
from common import get_test_config
from tf2onnxnightly import utils
from tf2onnxnightly.tfonnx import process_tf_graph
from tf2onnxnightly import optimizer
from tf2onnxnightly.tf_loader import tf_reset_default_graph, tf_session, tf_placeholder, from_function, freeze_session
from tf2onnxnightly.tf_loader import tf_optimize, is_tf2, get_hash_table_info
from tf2onnxnightly.tf_utils import compress_graph_def
from tf2onnxnightly.graph import ExternalTensorStorage
if is_tf2():
tf_set_random_seed = tf.compat.v1.set_random_seed
tf_tables_initializer = tf.compat.v1.tables_initializer
tf_lite = tf.compat.v1.lite
else:
tf_set_random_seed = tf.set_random_seed
tf_tables_initializer = tf.tables_initializer
tf_lite = None
class Tf2OnnxBackendTestBase(unittest.TestCase):
def setUp(self):
self.config = get_test_config()
tf_reset_default_graph()
# reset name generation on every test
utils.INTERNAL_NAME = 1
np.random.seed(1) # Make it reproducible.
self.logger = logging.getLogger(self.__class__.__name__)
def tearDown(self):
if not self.config.is_debug_mode:
utils.delete_directory(self.test_data_directory)
@property
def test_data_directory(self):
return os.path.join(self.config.temp_dir, self._testMethodName)
@staticmethod
def assertAllClose(expected, actual, **kwargs):
np.testing.assert_allclose(expected, actual, **kwargs)
@staticmethod
def assertAllEqual(expected, actual, **kwargs):
np.testing.assert_array_equal(expected, actual, **kwargs)
def run_onnxcaffe2(self, onnx_graph, inputs):
"""Run test against caffe2 backend."""
import caffe2.python.onnx.backend
prepared_backend = caffe2.python.onnx.backend.prepare(onnx_graph)
results = prepared_backend.run(inputs)
return results
def run_onnxruntime(self, model_path, inputs, output_names):
"""Run test against onnxruntime backend."""
import onnxruntime as rt
providers = ['CPUExecutionProvider']
if rt.get_device() == "GPU":
gpus = os.environ.get("CUDA_VISIBLE_DEVICES")
if gpus is None or len(gpus) > 1:
providers = ['CUDAExecutionProvider']
opt = rt.SessionOptions()
# in case of issues with the runtime, one can enable more logging
# opt.log_severity_level = 0
# opt.log_verbosity_level = 255
# opt.enable_profiling = True
m = rt.InferenceSession(model_path, opt, providers=providers)
results = m.run(output_names, inputs)
return results
def run_backend(self, g, outputs, input_dict, large_model=False, postfix=""):
tensor_storage = ExternalTensorStorage() if large_model else None
model_proto = g.make_model("test", external_tensor_storage=tensor_storage)
model_path = self.save_onnx_model(model_proto, input_dict, external_tensor_storage=tensor_storage,
postfix=postfix)
if self.config.backend == "onnxruntime":
y = self.run_onnxruntime(model_path, input_dict, outputs)
elif self.config.backend == "caffe2":
y = self.run_onnxcaffe2(model_proto, input_dict)
else:
raise ValueError("unknown backend")
return y
def assert_results_equal(self, expected, actual, rtol, atol, check_value=True, check_shape=True, check_dtype=True):
for expected_val, actual_val in zip(expected, actual):
if check_value:
if expected_val.dtype == np.object:
decode = np.vectorize(lambda x: x.decode('UTF-8'))
expected_val_str = decode(expected_val)
self.assertAllEqual(expected_val_str, actual_val)
else:
self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=atol)
if check_dtype:
self.assertEqual(expected_val.dtype, actual_val.dtype)
# why need shape checke: issue when compare [] with scalar
# https://github.com/numpy/numpy/issues/11071
if check_shape:
self.assertEqual(expected_val.shape, actual_val.shape)
def freeze_and_run_tf(self, func, feed_dict, outputs, as_session, premade_placeholders, large_model, constant_fold):
np.random.seed(1) # Make it reproducible.
clean_feed_dict = {utils.node_name(k): v for k, v in feed_dict.items()}
if is_tf2() and not as_session:
#
# use eager to execute the tensorflow func
#
# numpy doesn't work for all ops, make it tf.Tensor()
input_tensors = [tf.TensorSpec(shape=v.shape, dtype=tf.as_dtype(v.dtype), name=utils.node_name(k))
for k, v in feed_dict.items()]
input_list = [tf.convert_to_tensor(v, dtype=tf.as_dtype(v.dtype), name=utils.node_name(k))
for k, v in feed_dict.items()]
tf.random.set_seed(1)
result = func(*input_list)
if isinstance(result, (list, tuple)):
# list or tuple
result = [x.numpy() for x in result]
else:
# single result
result = [result.numpy()]
# now make the eager functions a graph
concrete_func = tf.function(func, input_signature=tuple(input_tensors))
concrete_func = concrete_func.get_concrete_function()
graph_def = from_function(concrete_func,
input_names=list(feed_dict.keys()),
output_names=outputs,
large_model=large_model)
initialized_tables = None
else:
#
# use graph to execute the tensorflow func
#
with tf_session() as sess:
tf_set_random_seed(1)
input_list = []
if not premade_placeholders:
for k, v in clean_feed_dict.items():
input_list.append(tf_placeholder(name=k, shape=v.shape, dtype=tf.as_dtype(v.dtype)))
func(*input_list)
variables_lib.global_variables_initializer().run()
tf_tables_initializer().run()
output_dict = []
for out_name in outputs:
output_dict.append(sess.graph.get_tensor_by_name(out_name))
result = sess.run(output_dict, feed_dict=feed_dict)
graph_def = freeze_session(sess,
input_names=list(feed_dict.keys()),
output_names=outputs)
table_names, key_dtypes, value_dtypes = get_hash_table_info(graph_def)
initialized_tables = {}
for n, k_dtype, val_dtype in zip(table_names, key_dtypes, value_dtypes):
h = lookup_ops.hash_table_v2(k_dtype, val_dtype, shared_name=n)
k, v = lookup_ops.lookup_table_export_v2(h, k_dtype, val_dtype)
initialized_tables[n] = (sess.run(k), sess.run(v))
tf_reset_default_graph()
with tf_session() as sess:
tf.import_graph_def(graph_def, name='')
graph_def = tf_optimize(list(feed_dict.keys()), outputs, graph_def, fold_constant=constant_fold)
model_path = os.path.join(self.test_data_directory, self._testMethodName + "_after_tf_optimize.pb")
utils.save_protobuf(model_path, graph_def)
self.logger.debug("created file %s", model_path)
return result, graph_def, initialized_tables
def convert_to_tflite(self, graph_def, feed_dict, outputs):
if not feed_dict:
return None # Can't make TFlite model with no inputs
tf_reset_default_graph()
with tf_session() as sess:
tf.import_graph_def(graph_def, name='')
sess_inputs = [sess.graph.get_tensor_by_name(k) for k in feed_dict.keys()]
sess_outputs = [sess.graph.get_tensor_by_name(n) for n in outputs]
converter = tf_lite.TFLiteConverter.from_session(sess, sess_inputs, sess_outputs)
#converter.optimizations = [tf.lite.Optimize.DEFAULT]
from tensorflow.lite.python.convert import ConverterError
try:
tflite_model = converter.convert()
tflite_path = os.path.join(self.test_data_directory, self._testMethodName + ".tflite")
dir_name = os.path.dirname(tflite_path)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
with open(tflite_path, 'wb') as f:
f.write(tflite_model)
return tflite_path
except ConverterError:
return None
def run_tflite(self, tflite_path, feed_dict):
try:
interpreter = tf.lite.Interpreter(tflite_path)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_name_to_index = {n['name'].split(':')[0]: n['index'] for n in input_details}
feed_dict_without_port = {k.split(':')[0]: v for k, v in feed_dict.items()}
for k, v in feed_dict_without_port.items():
interpreter.resize_tensor_input(input_name_to_index[k], v.shape)
interpreter.allocate_tensors()
# The output names might be different in the tflite but the order is the same
output_names = [n['name'] for n in output_details]
for k, v in feed_dict_without_port.items():
interpreter.set_tensor(input_name_to_index[k], v)
interpreter.invoke()
result = [interpreter.get_tensor(output['index']) for output in output_details]
return result, output_names
except (RuntimeError, ValueError):
# tflite sometimes converts from tf but produces an invalid model
return None, None
def run_test_case(self, func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-07, atol=1e-5,
convert_var_to_const=True, constant_fold=True, check_value=True, check_shape=True,
check_dtype=True, process_args=None, onnx_feed_dict=None, graph_validator=None, as_session=False,
large_model=False, premade_placeholders=False):
test_tf = not self.config.skip_tf_tests
test_tflite = not self.config.skip_tflite_tests
run_tfl_consistency_test = test_tf and test_tflite and self.config.run_tfl_consistency_test
# optional - passed to process_tf_graph
if process_args is None:
process_args = {}
# optional - pass distinct feed_dict to onnx runtime
if onnx_feed_dict is None:
onnx_feed_dict = feed_dict
input_names_with_port = list(feed_dict)
tf_reset_default_graph()
if tf_lite is None:
test_tflite = False
g = None
expected, graph_def, initialized_tables = \
self.freeze_and_run_tf(func, feed_dict, output_names_with_port, as_session,
premade_placeholders, large_model, constant_fold)
if test_tflite:
tflite_path = self.convert_to_tflite(graph_def, feed_dict, output_names_with_port)
test_tflite = tflite_path is not None
if test_tf:
tf_reset_default_graph()
with tf_session() as sess:
const_node_values = None
if large_model:
const_node_values = compress_graph_def(graph_def)
tf.import_graph_def(graph_def, name='')
g = process_tf_graph(sess.graph, opset=self.config.opset,
input_names=list(feed_dict.keys()),
output_names=output_names_with_port,
target=self.config.target,
const_node_values=const_node_values,
initialized_tables=initialized_tables,
**process_args)
g = optimizer.optimize_graph(g, catch_errors=False)
actual = self.run_backend(g, output_names_with_port, onnx_feed_dict, large_model)
self.assert_results_equal(expected, actual, rtol, atol, check_value, check_shape, check_dtype)
if graph_validator:
self.assertTrue(graph_validator(g))
if test_tflite:
tfl_results, tfl_outputs = self.run_tflite(tflite_path, feed_dict)
test_tflite = tfl_results is not None
if test_tflite:
if run_tfl_consistency_test:
self.assert_results_equal(expected, tfl_results, rtol, atol, check_value, check_shape, check_dtype)
tfl_process_args = process_args.copy()
if 'inputs_as_nchw' in tfl_process_args:
nchw_inps_with_port = tfl_process_args['inputs_as_nchw']
tfl_process_args['inputs_as_nchw'] = [i.split(':')[0] for i in nchw_inps_with_port]
input_names_without_port = [inp.split(':')[0] for inp in feed_dict.keys()]
g = process_tf_graph(None, opset=self.config.opset,
input_names=input_names_without_port,
output_names=tfl_outputs,
target=self.config.target,
tflite_path=tflite_path,
**tfl_process_args)
g = optimizer.optimize_graph(g)
onnx_feed_dict_without_port = {k.split(':')[0]: v for k, v in onnx_feed_dict.items()}
onnx_from_tfl_res = self.run_backend(g, tfl_outputs, onnx_feed_dict_without_port, postfix="_from_tflite")
self.assert_results_equal(tfl_results, onnx_from_tfl_res, rtol, atol, check_value, check_shape, check_dtype)
if graph_validator:
self.assertTrue(graph_validator(g))
if g is None:
raise unittest.SkipTest("Both tf and tflite marked to skip")
return g
def save_onnx_model(self, model_proto, feed_dict, postfix="", external_tensor_storage=None):
target_path = utils.save_onnx_model(self.test_data_directory, self._testMethodName + postfix, feed_dict,
model_proto, include_test_data=self.config.is_debug_mode,
as_text=self.config.is_debug_mode,
external_tensor_storage=external_tensor_storage)
self.logger.debug("create model file: %s", target_path)
return target_path
| 47.625767 | 120 | 0.626369 |
acf49b44374155fed68ffa0a48984299f54ad542 | 686 | py | Python | ansible/roles/db/molecule/default/tests/test_default.py | Otus-DevOps-2020-02/souljapanic_infra | d577e898342a2b42470c96e0d28228d1226d798e | [
"MIT"
] | null | null | null | ansible/roles/db/molecule/default/tests/test_default.py | Otus-DevOps-2020-02/souljapanic_infra | d577e898342a2b42470c96e0d28228d1226d798e | [
"MIT"
] | 1 | 2020-04-01T14:54:38.000Z | 2020-04-01T14:54:38.000Z | ansible/roles/souljapanic.db/molecule/default/tests/test_default.py | Otus-DevOps-2020-02/souljapanic_infra | d577e898342a2b42470c96e0d28228d1226d798e | [
"MIT"
] | null | null | null | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# check if MongoDB is enabled and running
def test_mongo_running_and_enabled(host):
mongo = host.service("mongod")
assert mongo.is_running
assert mongo.is_enabled
# check if configuration file contains the required line
def test_config_file(host):
config_file = host.file('/etc/mongod.conf')
assert config_file.contains('bindIp: 0.0.0.0')
assert config_file.is_file
# check port
def test_port_listening(host):
port = host.socket('tcp://0.0.0.0:27017')
assert port.is_listening
| 28.583333 | 63 | 0.758017 |
acf49d998b8adb4144a0911a23c2932c1a05059b | 2,128 | py | Python | Scripts/player.py | dhaval6552/ecommerce-2 | ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737 | [
"MIT"
] | null | null | null | Scripts/player.py | dhaval6552/ecommerce-2 | ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737 | [
"MIT"
] | null | null | null | Scripts/player.py | dhaval6552/ecommerce-2 | ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737 | [
"MIT"
] | null | null | null | #!c:\users\dhava\djangopractice\ecommerce-2\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.info.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| 21.714286 | 80 | 0.49765 |
acf49e040dcbda76c7b89067a70730474007b1fc | 2,863 | py | Python | lego/apps/files/fields.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 45 | 2017-10-24T12:09:06.000Z | 2021-11-03T21:21:03.000Z | lego/apps/files/fields.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 980 | 2017-10-24T12:29:07.000Z | 2022-03-31T04:04:31.000Z | lego/apps/files/fields.py | wahello/lego | a0b02f3abc997fe96326e9c9c05b49847170041b | [
"MIT"
] | 23 | 2018-04-11T16:34:22.000Z | 2021-11-23T12:28:30.000Z | from urllib.parse import unquote
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import URLValidator
from rest_framework import serializers
from lego.apps.files.constants import IMAGE
from lego.apps.files.thumbor import generate_url
from .models import File
from .storage import storage
url_validator = URLValidator()
class FileField(serializers.PrimaryKeyRelatedField):
default_error_messages = {
"required": "This field is required.",
"does_not_exist": 'Invalid pk "{pk_value}" - object does not exist.',
"incorrect_type": "Incorrect type. Expected pk value, received {data_type}.",
"incorrect_token": "Incorrect file token, you cannot access this file.",
}
allowed_types = None
def __init__(self, allowed_types=None, **kwargs):
super().__init__(**kwargs)
self.allowed_types = allowed_types
self.access_granted = False
def get_queryset(self):
if not self.allowed_types:
return File.objects.all()
return File.objects.filter(file_type__in=self.allowed_types)
def use_pk_only_optimization(self):
return True
def to_representation(self, value):
return storage.generate_signed_url(File.bucket, value.pk)
def run_validation(self, data=None):
if data is None:
data = ""
# Remove urls, url is not valid as a file
self.access_granted = False
try:
url_validator(str(data))
except ValidationError:
pass
else:
data = getattr(self.parent.instance, f"{self.source}_id")
self.access_granted = True
return super().run_validation(data)
def to_internal_value(self, data):
if self.access_granted:
key, token = data, None
else:
try:
key, token = data.split(":")
except ValueError:
self.fail("incorrect_token")
try:
file = self.get_queryset().get(key=key)
if file.token != token and not self.access_granted:
self.fail("incorrect_token")
return file
except ObjectDoesNotExist:
self.fail("does_not_exist", pk_value=data)
except (TypeError, ValueError):
self.fail("incorrect_type", data_type=type(data).__name__)
class ImageField(FileField):
"""
Load images with thumbor and on demand resizing.
Pass a options dict to the constructor to control thumbor.
"""
options = {}
def __init__(self, options=None, **kwargs):
kwargs["allowed_types"] = [IMAGE]
super().__init__(**kwargs)
if options:
self.options = options
def to_representation(self, value):
return generate_url(unquote(value.pk), **self.options)
| 29.515464 | 85 | 0.640936 |
acf49e18f3d76cbda81f3010141fd2a8462b8fb6 | 37 | py | Python | ext/admin/__init__.py | jakobgrine/musicbot | 1382dcbdc47fd0327e0f09ad5ea870c73f5bc956 | [
"MIT"
] | null | null | null | ext/admin/__init__.py | jakobgrine/musicbot | 1382dcbdc47fd0327e0f09ad5ea870c73f5bc956 | [
"MIT"
] | null | null | null | ext/admin/__init__.py | jakobgrine/musicbot | 1382dcbdc47fd0327e0f09ad5ea870c73f5bc956 | [
"MIT"
] | 3 | 2020-08-03T16:00:36.000Z | 2020-10-23T13:43:37.000Z | from .admin import *
from . import *
| 12.333333 | 20 | 0.675676 |
acf49f5d58292b8a6c0c5ae080b1ed5ded0a1d59 | 4,039 | py | Python | task3/multicore/parsec-3.0/benchmark.py | YukioZzz/swiss-knife | 81fcbf1a9a12348829edb5fc3fe100fa148efaac | [
"MIT"
] | 2 | 2021-10-30T15:12:15.000Z | 2021-11-01T09:33:55.000Z | task3/multicore/parsec-3.0/benchmark.py | YukioZzz/swiss-knife | 81fcbf1a9a12348829edb5fc3fe100fa148efaac | [
"MIT"
] | null | null | null | task3/multicore/parsec-3.0/benchmark.py | YukioZzz/swiss-knife | 81fcbf1a9a12348829edb5fc3fe100fa148efaac | [
"MIT"
] | 7 | 2021-10-31T12:17:09.000Z | 2021-11-06T09:49:29.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import csv
import subprocess
import multiprocessing
import matplotlib.pyplot as plt
programs = ["blackscholes","bodytrack","ferret","fluidanimate","freqmine","swaptions","vips"] # remove raytrace, as its threaded version blocks, also remove facesim and x264, as they meet segment fault
inputs = ["simsmall","simmedium","simlarge"]
configs = ["gcc-serial","gcc"]
resultDir = "/home/result/"
def benchmark(programs, threads, dataset, base):
data_filename = resultDir + ("result_seq" + "_d" + str(dataset) if base == True else "result_t" + str(threads) + "_d" + str(dataset))
config = configs[0] if base == True else configs[1]
with open(data_filename, 'w') as f:
f.write('apps,walltime\n')
for app in programs:
print(f'Running app {app} with {threads} thread(s) and {inputs[dataset]} dataset')
f.write(f'{app},')
out = subprocess.check_output(f'parsecmgmt -a run -p {app} -c {config} -i {inputs[dataset]} -n {threads} | grep real', shell=True, text=True)
seconds = re.findall(".*m(.*)s.*", out)[0]
minutes = re.findall("real(.*)m.*", out)[0]
wallTime = str(int(minutes)*60 + float(seconds))
print('wall time:', wallTime)
f.write(wallTime+'\n')
f.flush()
import matplotlib.pyplot as plt
import pandas as pd
import fnmatch
def plot(programs, mode):
os.chdir(resultDir)
name_list = programs
results = []
legends = []
basefiles = []
base_lists = []
pattern = 'result_t*_d2' if mode == "threads" else 'result_t8_d*'
for f_name in os.listdir('./'):
if fnmatch.fnmatch(f_name, pattern):
results.append(f_name)
elif fnmatch.fnmatch(f_name, 'result_seq_d*'):
basefiles.append(f_name)
results.sort()
basefiles.sort()
for basefile in basefiles:
df = pd.read_csv(basefile, delimiter=',')
base_list = list(df.to_dict()["walltime"].values())
base_lists.append(base_list)
legends = [(f_name[8:f_name.find('d')]
if mode == "threads"
else int(f_name[f_name.find('d')+1:])) for f_name in results]
num_lists = []
datasetSize = 0
for result in results:
df = pd.read_csv(result, delimiter=',')
num_list = list(df.to_dict()["walltime"].values())
if mode == "threads":
num_list = [base_lists[2][i] / num_list[i] for i in range(len(num_list))]
if mode == "dataset":
num_list = [base_lists[datasetSize][i] / num_list[i] for i in range(len(num_list))]
num_lists.append(num_list)
datasetSize = datasetSize + 1
x =list(range(len(num_lists[0])))
total_width = 0.8
n = len(results)
width = total_width / n
plt.figure(figsize=(12, 6.5))
plt.title("corenum comparison" if mode == "threads" else "dataset comparison")
for i in range(n):
plt.bar(x, num_lists[i], width=width, label=(legends[i]+" Threads" if mode == "threads" else inputs[legends[i]]))
for j in range(len(x)):
plt.text(x[j], 0, str(round(num_lists[i][j],1)), ha='center', va='bottom', fontsize=5, color='black')
for i in range(len(x)):
x[i] = x[i] + width
x = [i + width for i in list(range(len(num_lists[0])))]
plt.xticks(x, labels=programs)
plt.xlabel("Programs")
plt.ylabel("Speed Up")
plt.legend()
plt.savefig('result_t.png' if mode == "threads" else f'result_d.png')
def main():
cpu_count = multiprocessing.cpu_count()
for threads in [1,2,4,8]:
print(f'Threads Num: {threads}')
benchmark(programs, threads, 2, False) #fix dataset to the large one
for dataset in range(3):
benchmark(programs, 8, dataset, False) #fix threads to 8
benchmark(programs, 1, dataset, True) #test for sequential code
plot(programs,"threads")
plot(programs,"dataset")
if __name__ == '__main__':
main()
| 35.743363 | 202 | 0.608319 |
acf49fc0e9ed625e61ce94e7cc85ad45e8a12da0 | 6,904 | py | Python | kubernetes/client/models/v1_component_status.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_component_status.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | 3 | 2021-11-30T03:11:13.000Z | 2022-02-09T03:39:41.000Z | kubernetes/client/models/v1_component_status.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.24
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ComponentStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'conditions': 'list[V1ComponentCondition]',
'kind': 'str',
'metadata': 'V1ObjectMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'conditions': 'conditions',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, conditions=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ComponentStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._conditions = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if conditions is not None:
self.conditions = conditions
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ComponentStatus. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ComponentStatus. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ComponentStatus.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ComponentStatus. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def conditions(self):
"""Gets the conditions of this V1ComponentStatus. # noqa: E501
List of component conditions observed # noqa: E501
:return: The conditions of this V1ComponentStatus. # noqa: E501
:rtype: list[V1ComponentCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1ComponentStatus.
List of component conditions observed # noqa: E501
:param conditions: The conditions of this V1ComponentStatus. # noqa: E501
:type: list[V1ComponentCondition]
"""
self._conditions = conditions
@property
def kind(self):
"""Gets the kind of this V1ComponentStatus. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ComponentStatus. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ComponentStatus.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ComponentStatus. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ComponentStatus. # noqa: E501
:return: The metadata of this V1ComponentStatus. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ComponentStatus.
:param metadata: The metadata of this V1ComponentStatus. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ComponentStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ComponentStatus):
return True
return self.to_dict() != other.to_dict()
| 33.678049 | 312 | 0.630794 |
acf49fd37e297f98738ebb33c86986f76719e98a | 1,528 | py | Python | quickspy/qsparts.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | 1 | 2020-07-11T13:41:40.000Z | 2020-07-11T13:41:40.000Z | quickspy/qsparts.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | null | null | null | quickspy/qsparts.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | null | null | null | from collections import namedtuple
from quickspy.net import NetEngine
from quickspy.net import UrlManager
from quickspy.messenger import Logger
from quickspy.messenger import Messenger
RegedNE = namedtuple('RegedNE', ['uuid', 'NE'])
RegedUP = namedtuple('RegedUP', ['uuid', 'UP'])
RegedM = namedtuple('RegedM', ['uuid', 'M'])
RegedL = namedtuple('RegedL', ['uuid', 'L'])
class NEMannager:
def __init__(self):
self.pool = {}
def reg(self, uuid):
if uuid not in self.pool:
temp = RegedNE(uuid, NetEngine())
self.pool[uuid] = temp
return temp.NE
else:
return self.pool[uuid].NE
class UPMannager:
def __init__(self):
self.pool = {} #{uuid: urlpool}
def reg(self, uuid):
if uuid not in self.pool:
temp = RegedUP(uuid, UrlManager())
self.pool[uuid] = temp
return temp.UP
else:
return self.pool[uuid].UP
class MMannager:
def __init__(self):
self.pool = {}
def reg(self, uuid):
if uuid not in self.pool:
temp = RegedM(uuid, Messenger())
self.pool[uuid] = temp
return temp.M
else:
return self.pool[uuid].M
class LMannager:
def __init__(self):
self.pool = {}
def reg(self, uuid):
if uuid not in self.pool:
temp = RegedL(uuid, Logger())
self.pool[uuid] = temp
return temp.L
else:
return self.pool[uuid].L
| 24.253968 | 47 | 0.565445 |
acf4a043a8483413981710cdbb7d093fbf34a7aa | 909 | py | Python | painelmma_api/dbrouters.py | ibamacsr/painelmma_api | a11a6cd63e312f09f445b139fcff8c11ab383764 | [
"MIT"
] | null | null | null | painelmma_api/dbrouters.py | ibamacsr/painelmma_api | a11a6cd63e312f09f445b139fcff8c11ab383764 | [
"MIT"
] | null | null | null | painelmma_api/dbrouters.py | ibamacsr/painelmma_api | a11a6cd63e312f09f445b139fcff8c11ab383764 | [
"MIT"
] | null | null | null | class SiscomRouter(object):
"""
A router to control all database operations on models in the
restApp application.
"""
def db_for_read(self, model, **hints):
"""
Attempts to read operations models go to painelmma_db.
"""
if model._meta.app_label == 'painelmma_api':
return 'painelmma_db'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write operations models go to painelmma_db.
"""
if model._meta.app_label == 'painelmma_api':
return 'painelmma_db'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the operations app is involved.
"""
if obj1._meta.app_label == 'painelmma_api' or \
obj2._meta.app_label == 'painelmma_api':
return True
return None | 31.344828 | 69 | 0.588559 |
acf4a086768aaddef6dc20b52d6e780d00236da9 | 5,304 | py | Python | tests/tests/retime/robustness/test_robustness_main.py | stevegolton/toppra | 846e2a7f5b87e0e1884b244b07d5fd661edcd9bd | [
"MIT"
] | 342 | 2017-07-26T17:37:19.000Z | 2022-03-28T19:50:27.000Z | tests/tests/retime/robustness/test_robustness_main.py | stevegolton/toppra | 846e2a7f5b87e0e1884b244b07d5fd661edcd9bd | [
"MIT"
] | 151 | 2017-11-30T06:14:29.000Z | 2022-03-29T02:06:08.000Z | tests/tests/retime/robustness/test_robustness_main.py | stevegolton/toppra | 846e2a7f5b87e0e1884b244b07d5fd661edcd9bd | [
"MIT"
] | 134 | 2017-08-18T21:35:39.000Z | 2022-03-25T03:43:08.000Z | import pytest
import numpy as np
import yaml
import re
import pandas
import tabulate
import time
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
import toppra
import toppra.constraint as constraint
import toppra.algorithm as algo
import matplotlib.pyplot as plt
def test_robustness_main(request):
""" Load problem suite based on regex, run test and report results.
"""
toppra.setup_logging(request.config.getoption("--loglevel"))
problem_regex = request.config.getoption("--robust_regex")
visualize = request.config.getoption("--visualize")
# parse problems from a configuration file
parsed_problems = []
path = pathlib.Path(__file__)
path = path / '../problem_suite_1.yaml'
problem_dict = yaml.load(path.resolve().read_text(), Loader=yaml.SafeLoader)
for key in problem_dict:
if len(problem_dict[key]['ss_waypoints']) == 2:
ss_waypoints = np.linspace(problem_dict[key]['ss_waypoints'][0],
problem_dict[key]['ss_waypoints'][1],
len(problem_dict[key]['waypoints']))
for duration in problem_dict[key]['desired_duration']:
for solver_wrapper in problem_dict[key]['solver_wrapper']:
for nb_gridpoints in problem_dict[key]['nb_gridpoints']:
parsed_problems.append({
"name": key,
"problem_id": "{:}-{:5f}-{:}-{:}".format(key, duration, solver_wrapper, nb_gridpoints),
'waypoints': np.array(problem_dict[key]['waypoints'], dtype=float),
'ss_waypoints': ss_waypoints,
'vlim': np.r_[problem_dict[key]['vlim']],
'alim': np.r_[problem_dict[key]['alim']],
'desired_duration': duration,
'solver_wrapper': solver_wrapper,
'gridpoints': np.linspace(ss_waypoints[0], ss_waypoints[-1], nb_gridpoints),
'nb_gridpoints': nb_gridpoints
})
parsed_problems_df = pandas.DataFrame(parsed_problems)
# solve problems that matched the given regex
all_success = True
for row_index, problem_data in parsed_problems_df.iterrows():
if re.match(problem_regex, problem_data['problem_id']) is None:
continue
t0 = time.time()
path = toppra.SplineInterpolator(
problem_data['ss_waypoints'],
problem_data['waypoints'], bc_type='clamped')
vlim = np.vstack((- problem_data['vlim'], problem_data['vlim'])).T
alim = np.vstack((- problem_data['alim'], problem_data['alim'])).T
pc_vel = constraint.JointVelocityConstraint(vlim)
pc_acc = constraint.JointAccelerationConstraint(
alim, discretization_scheme=constraint.DiscretizationType.Interpolation)
t1 = time.time()
if problem_data['desired_duration'] == 0:
instance = algo.TOPPRA([pc_vel, pc_acc], path, gridpoints=problem_data['gridpoints'],
solver_wrapper=problem_data['solver_wrapper'])
else:
instance = algo.TOPPRAsd([pc_vel, pc_acc], path, gridpoints=problem_data['gridpoints'],
solver_wrapper=problem_data['solver_wrapper'])
instance.set_desired_duration(problem_data['desired_duration'])
t2 = time.time()
jnt_traj = instance.compute_trajectory(0, 0)
data = instance.problem_data
t3 = time.time()
if visualize:
_t = np.linspace(0, jnt_traj.duration, 100)
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(data.K[:, 0], c="C0")
axs[0, 0].plot(data.K[:, 1], c="C0")
axs[0, 0].plot(data.sd_vec ** 2, c="C1")
axs[0, 1].plot(_t, jnt_traj(_t))
axs[1, 0].plot(_t, jnt_traj(_t, 1))
axs[1, 1].plot(_t, jnt_traj(_t, 2))
axs[0, 0].set_title("param")
axs[0, 1].set_title("jnt. pos.")
axs[1, 0].set_title("jnt. vel.")
axs[1, 1].set_title("jnt. acc.")
plt.show()
if jnt_traj is None:
all_success = False
parsed_problems_df.loc[row_index, "status"] = "FAIL"
parsed_problems_df.loc[row_index, "duration"] = None
else:
parsed_problems_df.loc[row_index, "status"] = "SUCCESS"
parsed_problems_df.loc[row_index, "duration"] = jnt_traj.duration
parsed_problems_df.loc[row_index, "t_init(ms)"] = (t1 - t0) * 1e3
parsed_problems_df.loc[row_index, "t_setup(ms)"] = (t2 - t1) * 1e3
parsed_problems_df.loc[row_index, "t_solve(ms)"] = (t3 - t2) * 1e3
# get all rows with status different from NaN, then reports other columns.
result_df = parsed_problems_df[parsed_problems_df["status"].notna()][
["status", "duration", "desired_duration", "name", "solver_wrapper",
"nb_gridpoints", "problem_id", "t_init(ms)", "t_setup(ms)", "t_solve(ms)"]]
result_df.to_csv('%s.result' % __file__)
print("Test summary\n")
print(tabulate.tabulate(result_df, result_df.columns))
assert all_success, "Unable to solve some problems in the test suite"
| 44.949153 | 111 | 0.601056 |
acf4a0b8932373b93f33e3e98c35b1ec702746cf | 308 | py | Python | api/urls.py | mbarrientos/release-control | e6aa88ef3a938c034e5991e00d0b3f26c3cd6697 | [
"MIT"
] | null | null | null | api/urls.py | mbarrientos/release-control | e6aa88ef3a938c034e5991e00d0b3f26c3cd6697 | [
"MIT"
] | null | null | null | api/urls.py | mbarrientos/release-control | e6aa88ef3a938c034e5991e00d0b3f26c3cd6697 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from rest_framework import routers
from api.views import ReleaseViewSet, ProjectViewSet
router = routers.DefaultRouter()
router.register(r'release', ReleaseViewSet)
router.register(r'project', ProjectViewSet)
urlpatterns = [
url(r'', include(router.urls)),
]
| 23.692308 | 52 | 0.782468 |
acf4a1792199742373c0b71bbf8c3f58f90d7010 | 1,751 | py | Python | src/python/grpcio_tests/tests/unit/_server_test.py | benjaminp/grpc | dfb1a0f20624417bff408a14b12a23713085b999 | [
"Apache-2.0"
] | 91 | 2018-11-24T05:33:58.000Z | 2022-03-16T05:58:05.000Z | src/python/grpcio_tests/tests/unit/_server_test.py | benjaminp/grpc | dfb1a0f20624417bff408a14b12a23713085b999 | [
"Apache-2.0"
] | 11 | 2019-06-02T23:50:17.000Z | 2022-02-04T23:58:56.000Z | src/python/grpcio_tests/tests/unit/_server_test.py | benjaminp/grpc | dfb1a0f20624417bff408a14b12a23713085b999 | [
"Apache-2.0"
] | 18 | 2018-11-24T10:35:29.000Z | 2021-04-22T07:22:10.000Z | # Copyright 2018 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
import unittest
import grpc
class _ActualGenericRpcHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
return None
class ServerTest(unittest.TestCase):
def test_not_a_generic_rpc_handler_at_construction(self):
with self.assertRaises(AttributeError) as exception_context:
grpc.server(
futures.ThreadPoolExecutor(max_workers=5),
handlers=[
_ActualGenericRpcHandler(),
object(),
])
self.assertIn('grpc.GenericRpcHandler',
str(exception_context.exception))
def test_not_a_generic_rpc_handler_after_construction(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
with self.assertRaises(AttributeError) as exception_context:
server.add_generic_rpc_handlers([
_ActualGenericRpcHandler(),
object(),
])
self.assertIn('grpc.GenericRpcHandler',
str(exception_context.exception))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 33.037736 | 74 | 0.683038 |
acf4a180b9e010905d8f8a0c95ac687537aa1ea1 | 5,487 | py | Python | test/quantum_volume/test_qv.py | 1ucian0/qiskit-ignis | 76920622401743a30d74005433e588a48d570691 | [
"Apache-2.0"
] | null | null | null | test/quantum_volume/test_qv.py | 1ucian0/qiskit-ignis | 76920622401743a30d74005433e588a48d570691 | [
"Apache-2.0"
] | null | null | null | test/quantum_volume/test_qv.py | 1ucian0/qiskit-ignis | 76920622401743a30d74005433e588a48d570691 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=undefined-loop-variable,invalid-name
"""
Run through Quantum volume
"""
import unittest
import qiskit
import qiskit.ignis.verification.quantum_volume as qv
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error
SEED = 42
def qv_circuit_execution(qubit_lists: list, ntrials: int, shots: int):
"""
create quantum volume circuits, simulate the ideal state and run a noisy simulation
Args:
qubit_lists (list): list of lists of qubits to apply qv circuits to
ntrials (int): number of iterations (number of circuits)
shots (int): number of shots per simulation
Returns:
tuple: a tuple of 2 lists:
list of Results of the ideal statevector simulations
list of Results of the noisy circuits simulations
"""
# create the qv circuit
qv_circs, qv_circs_nomeas = qv.qv_circuits(qubit_lists, ntrials)
# get the ideal state
statevector_backend = qiskit.Aer.get_backend('statevector_simulator')
ideal_results = []
for trial in range(ntrials):
ideal_results.append(qiskit.execute(qv_circs_nomeas[trial],
backend=statevector_backend).result())
# define noise_model
noise_model = NoiseModel()
p1q = 0.002
p2q = 0.02
noise_model.add_all_qubit_quantum_error(depolarizing_error(p1q, 1), 'u2')
noise_model.add_all_qubit_quantum_error(depolarizing_error(2 * p1q, 1), 'u3')
noise_model.add_all_qubit_quantum_error(depolarizing_error(p2q, 2), 'cx')
# get the noisy results
backend = qiskit.Aer.get_backend('qasm_simulator')
basis_gates = ['u1', 'u2', 'u3', 'cx'] # use U,CX for now
exp_results = []
for trial in range(ntrials):
exp_results.append(
qiskit.execute(qv_circs[trial], basis_gates=basis_gates, backend=backend,
noise_model=noise_model, shots=shots,
seed_simulator=SEED,
backend_options={'max_parallel_experiments': 0}).result())
return ideal_results, exp_results
class TestQV(unittest.TestCase):
""" The test class """
def test_qv_circuits(self):
""" Test circuit generation """
# Qubit list
qubit_lists = [[0, 1, 2], [0, 1, 2, 4], [0, 1, 2, 4, 7]]
ntrials = 5
qv_circs, _ = qv.qv_circuits(qubit_lists, ntrials)
self.assertEqual(len(qv_circs), ntrials,
"Error: Not enough trials")
self.assertEqual(len(qv_circs[0]), len(qubit_lists),
"Error: Not enough circuits for the "
"number of specified qubit lists")
def test_qv_circuits_with_seed(self):
"""Ensure seed is propogated to QuantumVolme objects."""
qubit_lists = [list(range(5))]
qv_circs, qv_circs_no_meas = qv.qv_circuits(qubit_lists, seed=3)
meas_name = qv_circs[0][0].data[0][0].name
no_meas_name = qv_circs_no_meas[0][0].data[0][0].name
self.assertEqual(int(meas_name.split(',')[-1].rstrip(']')), 811)
self.assertEqual(int(no_meas_name.split(',')[-1].rstrip(']')), 811)
def test_measurements_in_circuits(self):
"""Ensure measurements are set or not on output circuits."""
qubit_lists = [list(range(4))]
qv_circs, qv_circs_no_meas = qv.qv_circuits(qubit_lists)
qv_circs_measure_qubits = [
x[1][0].index for x in qv_circs[0][0].data if x[0].name == 'measure']
self.assertNotIn('measure',
[x[0].name for x in qv_circs_no_meas[0][0].data])
self.assertEqual([0, 1, 2, 3], qv_circs_measure_qubits)
def test_measurements_in_circuits_qubit_list_gap(self):
"""Test that there are no measurement instructions in output nomeas circuits."""
qubit_lists = [[1, 3, 5, 7]]
qv_circs, qv_circs_no_meas = qv.qv_circuits(qubit_lists)
qv_circs_measure_qubits = [
x[1][0].index for x in qv_circs[0][0].data if x[0].name == 'measure']
self.assertNotIn('measure',
[x[0].name for x in qv_circs_no_meas[0][0].data])
self.assertEqual([1, 3, 5, 7], qv_circs_measure_qubits)
def test_qv_fitter(self):
""" Test the fitter"""
qubit_lists = [[0, 1, 3], [0, 1, 3, 5], [0, 1, 3, 5, 7],
[0, 1, 3, 5, 7, 10]]
ntrials = 5
ideal_results, exp_results = qv_circuit_execution(qubit_lists,
ntrials,
shots=1024)
qv_fitter = qv.QVFitter(qubit_lists=qubit_lists)
qv_fitter.add_statevectors(ideal_results)
qv_fitter.add_data(exp_results)
qv_success_list = qv_fitter.qv_success()
self.assertFalse(qv_success_list[0][0])
if __name__ == '__main__':
unittest.main()
| 37.841379 | 88 | 0.630581 |
acf4a1a437d1ff98307369f82f80763d957c0118 | 647 | py | Python | Extended Programming Challenges Python/Flamaster/wrongs.py | szachovy/School-and-Training | 70f07c0d077da7ba1920d28d881fff7ddcbc37d9 | [
"MIT"
] | null | null | null | Extended Programming Challenges Python/Flamaster/wrongs.py | szachovy/School-and-Training | 70f07c0d077da7ba1920d28d881fff7ddcbc37d9 | [
"MIT"
] | null | null | null | Extended Programming Challenges Python/Flamaster/wrongs.py | szachovy/School-and-Training | 70f07c0d077da7ba1920d28d881fff7ddcbc37d9 | [
"MIT"
] | null | null | null | def dataSets():
return print('How many data sets?\n'
'Typo number:\n')
def printMenu():
return print("Type words after space\n"
"I will shorten this words:\n")
def notletterCheck(words):
try:
for word in words:
assert str(word).isalpha()
except AssertionError:
print('not every string is built from letters\n'
'run program again!')
exit(0)
def howmanyCheck(number) -> int:
try:
number = int(number)
return number
except ValueError:
print('Please put integer value in order to type datasets')
exit(0)
| 22.310345 | 67 | 0.576507 |
acf4a1e8488d6a5b7d7b644bfa56b09df35d1324 | 215 | py | Python | test/unit/core/test_async_event_loop.py | boto/botoflow | 49d8ed3bc9c57294504be82e933a051e1901b76e | [
"Apache-2.0"
] | 13 | 2016-06-15T06:10:57.000Z | 2021-10-30T03:52:28.000Z | test/unit/core/test_async_event_loop.py | DalavanCloud/botoflow | 49d8ed3bc9c57294504be82e933a051e1901b76e | [
"Apache-2.0"
] | 11 | 2016-09-15T01:48:08.000Z | 2019-01-09T06:11:44.000Z | test/unit/core/test_async_event_loop.py | DalavanCloud/botoflow | 49d8ed3bc9c57294504be82e933a051e1901b76e | [
"Apache-2.0"
] | 16 | 2016-06-05T03:42:04.000Z | 2022-03-01T17:43:14.000Z | import pytest
from botoflow.core import async_event_loop
pytestmark = pytest.mark.usefixtures('core_debug')
def test_smoke():
ev = async_event_loop.AsyncEventLoop()
assert None == ev.execute_all_tasks()
| 19.545455 | 50 | 0.767442 |
acf4a225925a5319340e27d0e7078757718293ba | 1,139 | py | Python | schedule/serializers.py | Yourcelium/Transport-Manager | 262c6a6b543b61debca64cb2de1e87e85f4645c8 | [
"MIT"
] | null | null | null | schedule/serializers.py | Yourcelium/Transport-Manager | 262c6a6b543b61debca64cb2de1e87e85f4645c8 | [
"MIT"
] | null | null | null | schedule/serializers.py | Yourcelium/Transport-Manager | 262c6a6b543b61debca64cb2de1e87e85f4645c8 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Resident, Destination, MedicalProvider, Trip, Issue
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
class ResidentSerializer(serializers.ModelSerializer):
class Meta:
model = Resident
fields = '__all__'
class DestinationSerializer(serializers.ModelSerializer):
class Meta:
model = Destination
fields = '__all__'
class MedicalProviderSerializer(serializers.ModelSerializer):
destinations = DestinationSerializer(many=True)
residents = ResidentSerializer(many=True)
class Meta:
model = MedicalProvider
fields = '__all__'
class TripSerializer(serializers.ModelSerializer):
arranged_by = UserSerializer()
destination = DestinationSerializer()
resident = ResidentSerializer()
class Meta:
model = Trip
fields = '__all__'
class IssueSerializer(serializers.ModelSerializer):
trip = TripSerializer()
class Meta:
model = Issue
fields = '__all__'
| 26.488372 | 71 | 0.71115 |
acf4a253af4a80abf662ca52e8e278140d6c7cfc | 15,743 | py | Python | orttraining/orttraining/python/training/ortmodule/_training_manager.py | ccsquare/onnxruntime | 0a70c2de0074ec6b48234e5a462e312e82be9aec | [
"MIT"
] | 1 | 2021-09-15T08:20:39.000Z | 2021-09-15T08:20:39.000Z | orttraining/orttraining/python/training/ortmodule/_training_manager.py | ccsquare/onnxruntime | 0a70c2de0074ec6b48234e5a462e312e82be9aec | [
"MIT"
] | null | null | null | orttraining/orttraining/python/training/ortmodule/_training_manager.py | ccsquare/onnxruntime | 0a70c2de0074ec6b48234e5a462e312e82be9aec | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from . import _utils, _io, _logger
from ._graph_execution_manager import GraphExecutionManager, RunStateInfo, _SkipCheck
from ._execution_agent import TrainingAgent
from onnxruntime.capi import _pybind_state as C
from onnxruntime.capi.onnxruntime_inference_collection import get_ort_device_type
import onnx
import torch
import warnings
from torch.utils.dlpack import from_dlpack, to_dlpack
class TrainingManager(GraphExecutionManager):
"""Concrete instance of GraphExecutionManager that is able to manage the training model
TrainingManager is resposible for building and running the forward and backward graph of the training model
"""
def __init__(self, model):
super().__init__(model)
self._export_mode = torch.onnx.TrainingMode.TRAINING
@staticmethod
def execution_session_run_forward(execution_session, onnx_model, *inputs):
"""Runs the forward graph on execution_session with given model inputs and device"""
# TODO: Try to reuse the output buffers as some of the output tensors are same sizes,
# especially the backward graph outputs.
# REVIEW(codemzs): Consolidate Training Agent with InferenceAgent on C++ side to not
# have the need for passing IOBinding.
state = C.PartialGraphExecutionState()
forward_inputs = C.OrtValueVector()
forward_inputs.reserve(len(inputs))
for input in inputs:
forward_inputs.push_back(to_dlpack(input), input.dtype == torch.bool)
forward_outputs = C.OrtValueVector()
# Run and return module outputs.
execution_session.run_forward(forward_inputs, forward_outputs, state)
user_outputs = tuple(_utils._ortvalue_to_torch_tensor(forward_output) for forward_output in forward_outputs)
output_info = [(output.shape, output.device, output.dtype) for output in user_outputs]
run_info = RunStateInfo(state, output_info)
# Return user outputs and forward run information
return user_outputs, run_info
def forward(self, *inputs, **kwargs):
'''Forward pass starts here and continues at `_ORTModuleFunction.forward`
ONNX model is exported the first time this method is executed.
Next, we build a full training graph with module_graph_builder.
Finally, we instantiate the ONNX Runtime InferenceSession.
'''
# Exporting module to ONNX for the first time
build_gradient_graph = False
if self._skip_check.is_set(_SkipCheck.SKIP_CHECK_BUILD_GRADIENT) == False:
build_gradient_graph = self._export_model(*inputs, **kwargs)
if build_gradient_graph:
# If model was exported, then initialize the graph builder
self._initialize_graph_builder(training=True)
input_info = _io.parse_inputs_for_onnx_export(self._module_parameters,
self._onnx_model,
inputs,
kwargs)
# Reinitialize graph builder if the inputs or initializers requiring gradient have changed.
# Order of or operation is important here because we always need to call
# _reinitialize_graph_builder irrespective of the value of build_gradient_graph.
build_gradient_graph = self._reinitialize_graph_builder(input_info) or build_gradient_graph
# Build the gradient graph
if build_gradient_graph:
self._build_graph()
create_execution_session = False
if self._skip_check.is_set(_SkipCheck.SKIP_CHECK_EXECUTION_AGENT) == False:
device = _utils.get_device_from_module(self._original_module) or \
_utils.get_device_from_inputs(inputs, kwargs)
# The _training_session/_inference_session should be created every time
# the graph was built or if the device changed between calls to forward
create_execution_session = build_gradient_graph or self._device != device
if self._device != device:
self._device = device
if create_execution_session:
# Create execution session creates the training_session
self._create_execution_agent()
# disable some checks after execution session is created the first time
if self._skip_check.is_disabled() == False:
self._skip_check = _SkipCheck.SKIP_CHECK_BUILD_GRADIENT | _SkipCheck.SKIP_CHECK_EXECUTION_AGENT | _SkipCheck.SKIP_CHECK_DEVICE
if self._loglevel <= _logger.LogLevel.WARNING:
warnings.warn("Fast path enabled - skipping checks for rebuilding gradient graph, execution agent creation, and device during training.",
UserWarning)
class _ORTModuleFunction(torch.autograd.Function):
'''Use a custom torch.autograd.Function to associate self.backward_graph as the
gradient implementation for self.forward_graph.'''
@staticmethod
def forward(ctx, *inputs):
'''Performs forward pass based on user input and PyTorch initializer
Autograd Function's apply() doesn't support keyword arguments,
so `*inputs` has all the arguments - keyword arguments converted
to positional/keywords during `TrainingManager.forward`.
Module outputs are returned to the user
'''
if self._skip_check.is_set(_SkipCheck.SKIP_CHECK_DEVICE) == False:
# Assert that the input and model device match
_utils._check_same_device(self._device, "Input argument to forward", *inputs)
user_outputs, ctx.run_info = TrainingManager.execution_session_run_forward(self._execution_agent,
self._optimized_onnx_model,
*inputs)
# Disable materializing grads then None object will not be
# converted to a tensor filled with zeros prior to calling backward.
# Save shape, device and type info to ctx for materializing tensor in backward if output grad is None.
ctx.set_materialize_grads(False)
# Mark the outputs tensors needed in backward computation
# ORT is NOT relying on save_for_backward() to actually save the tensor,
# as this tensor is also kept in ORT's PartialGraphState
# This call is to invoke pytorch's version check to detect the potential inplace corruption
for idx in self._graph_info.module_output_indices_requires_save_for_backward:
ctx.save_for_backward(user_outputs[idx])
return user_outputs
@staticmethod
def backward(ctx, *grad_outputs):
'''Performs backward pass based on grad wrt module output'''
assert ctx.run_info is not None, 'forward() or __call__() methods must be called before backward()'
if self._skip_check.is_set(_SkipCheck.SKIP_CHECK_DEVICE) == False:
_utils._check_same_device(self._device, "Input argument to backward", *grad_outputs)
# Unpack saved_tensor to trigger version detection that catches inplace corruption
_ = ctx.saved_tensors
# Use IO binding
# Push user output grads to ONNX backend.
backward_inputs = C.OrtValueVector()
# Preallocate length of the vector. And then delete as required towards the end.
backward_inputs.reserve(len(grad_outputs))
for idx, grad_output in enumerate(grad_outputs):
if idx in self._graph_info.output_grad_indices_non_differentiable:
assert grad_output is None, "ORT found the {}-th module output '{}' is " \
"non-differentiable according to the onnx graph. " \
"However, the gradient value is still provided by " \
"PyTorch's autograd engine." \
.format(idx, self._graph_info.user_output_names[idx])
continue
if grad_output is None:
shape, device, dtype = ctx.run_info.output_info[idx]
if idx in self._graph_info.output_grad_indices_require_full_shape:
grad_output = torch.zeros(shape, device=device, dtype=dtype)
else:
grad_output = torch.tensor(0., device=device, dtype=dtype)
elif not grad_output.is_contiguous():
grad_output = grad_output.contiguous()
backward_inputs.push_back(to_dlpack(grad_output), grad_output.dtype == torch.bool)
backward_inputs.shrink_to_fit()
# Run and get results
backward_outputs = C.OrtValueVector()
self._execution_agent.run_backward(backward_inputs, backward_outputs, ctx.run_info.state)
# Destroy the state immediately (as opposed to be at the mercy of garbage collector) so it does not
# affect peak memory usage in a subsequent graph run.
del ctx.run_info.state
# Return input and initializer gradients
num_user_input_grads = len(self._input_info.require_grad_names)
results = []
require_grad_names_set = set(self._input_info.require_grad_names)
require_grad_names_index = 0
for input_name in self._graph_info.user_input_names:
# Append to the results the backward output for each input that required grad
if input_name in require_grad_names_set:
results.append(_utils._torch_tensor_from_dl_pack(
backward_outputs.dlpack_at(require_grad_names_index),
backward_outputs[require_grad_names_index]))
require_grad_names_index += 1
else:
# input_name is not found in the self._input_info.require_grad_names list
# Append None to results for each input that did not require grad
results.append(None)
# Append gradients of initializer to results
# Go over each initializer, check if it required grad and append to results accordingly
initializer_index = num_user_input_grads
for initializer_name in self._graph_info.initializer_names:
if initializer_name in self._graph_initializer_names_to_train:
results.append(_utils._torch_tensor_from_dl_pack(
backward_outputs.dlpack_at(initializer_index),
backward_outputs[initializer_index]))
initializer_index += 1
else:
results.append(None)
return tuple(results)
return _io.unflatten_user_output(self._module_output_schema,
_ORTModuleFunction.apply(
*_io._combine_input_buffers_initializers(
self._graph_initializers,
self._graph_info.user_input_names,
self._input_info,
self._flattened_module.named_buffers(),
inputs,
kwargs,
self._device)))
def _build_graph(self):
"""Build an optimized gradient graph using the module_graph_builder"""
super()._build_graph()
if self._save_onnx:
onnx.save(self._optimized_onnx_model, self._save_onnx_prefix + '_training.onnx')
inference_optimized_model = onnx.load_model_from_string(self._graph_builder.get_inference_optimized_model())
onnx.save(inference_optimized_model, self._save_onnx_prefix + '_inference_optimized.onnx')
def _create_execution_agent(self):
"""Creates a TrainingAgent that can run the forward and backward graph on the training model"""
session_options, providers, provider_options = self._get_session_config()
fw_feed_names = [input.name for input in self._optimized_onnx_model.graph.input]
fw_outputs_device_info = [
C.OrtDevice(get_ort_device_type(self._device.type),
C.OrtDevice.default_memory(),
_utils.get_device_index(self._device)
)] * len(self._graph_info.user_output_names)
bw_fetches_names = [output.name for output in self._optimized_onnx_model.graph.output]
bw_outputs_device_info = [
C.OrtDevice(get_ort_device_type(self._device.type),
C.OrtDevice.default_memory(),
_utils.get_device_index(self._device)
)] * len(bw_fetches_names)
self._execution_agent = TrainingAgent(self._optimized_onnx_model.SerializeToString(),
fw_feed_names,
fw_outputs_device_info,
bw_fetches_names,
bw_outputs_device_info,
session_options,
providers,
provider_options)
def _reinitialize_graph_builder(self, input_info):
"""Return true if the module graph builder was reinitialized"""
# Model could have unused parameters which are dropped after export and so not a part of self._graph_initializer_names_to_train.
# To see if any trainable initializers changed, compare self._graph_initializer_names_to_train
# with initializers in module named_parameters that are known to the onnx graph.
initializer_names_to_train_set_user_model = {name for name, param in
self._flattened_module.named_parameters()
if param.requires_grad and name in self._graph_initializer_names}
# If inputs requiring gradient change from forward to the next, the module_gradient_graph_builder
# needs to be reinitialized so it can compute the backward output for the new inputs that require_grad
if input_info.require_grad_names != self._input_info.require_grad_names or \
initializer_names_to_train_set_user_model != self._graph_initializer_names_to_train:
self._input_info = input_info
self._initialize_graph_builder(training=True)
return True
return False
| 56.024911 | 157 | 0.604269 |
acf4a27ad5bb5554777033421caeedb5bd2f1de8 | 6,336 | py | Python | tests/services/test_archive.py | code42/py42 | ae748ea737ddcc89a92b2cf17b7e0433198d60bc | [
"MIT"
] | 21 | 2020-04-21T20:33:08.000Z | 2022-02-24T19:09:52.000Z | tests/services/test_archive.py | code42/py42 | ae748ea737ddcc89a92b2cf17b7e0433198d60bc | [
"MIT"
] | 255 | 2020-03-31T18:56:17.000Z | 2022-03-29T17:16:54.000Z | tests/services/test_archive.py | code42/py42 | ae748ea737ddcc89a92b2cf17b7e0433198d60bc | [
"MIT"
] | 14 | 2020-03-31T19:11:26.000Z | 2022-03-14T20:42:51.000Z | import pytest
from tests.conftest import create_mock_response
import py42.settings
from py42.services.archive import ArchiveService
ARCHIVE_URI = "/api/Archive"
DEFAULT_GET_ARCHIVES_PARAMS = {
"pgNum": 1,
"pgSize": 100,
}
MOCK_GET_ARCHIVE_RESPONSE = """{"totalCount": 3000, "archives": ["foo"]}"""
MOCK_EMPTY_GET_ARCHIVE_RESPONSE = """{"totalCount": 3000, "archives": []}"""
MOCK_GET_ORG_RESTORE_HISTORY_RESPONSE = """{
"totalCount": 3000,
"restoreEvents": [
{
"eventName": "foo",
"eventUid": "123"
}
]
}"""
MOCK_EMPTY_GET_ORG_RESTORE_HISTORY_RESPONSE = (
"""{"totalCount": 3000, "restoreEvents": []}"""
)
MOCK_GET_ORG_COLD_STORAGE_RESPONSE = (
"""{"coldStorageRows": [{"archiveGuid": "fakeguid"}]}"""
)
MOCK_EMPTY_GET_ORG_COLD_STORAGE_RESPONSE = """{"coldStorageRows": []}"""
class TestArchiveService:
@pytest.fixture
def mock_get_archives_response(self, mocker):
return create_mock_response(mocker, MOCK_GET_ARCHIVE_RESPONSE)
@pytest.fixture
def mock_get_archives_empty_response(self, mocker):
return create_mock_response(mocker, MOCK_EMPTY_GET_ARCHIVE_RESPONSE)
@pytest.fixture
def mock_get_all_restore_history_response(self, mocker):
return create_mock_response(mocker, MOCK_GET_ORG_RESTORE_HISTORY_RESPONSE)
@pytest.fixture
def mock_get_all_restore_history_empty_response(self, mocker):
return create_mock_response(mocker, MOCK_EMPTY_GET_ORG_RESTORE_HISTORY_RESPONSE)
@pytest.fixture
def mock_get_all_org_cold_storage_response(self, mocker):
return create_mock_response(mocker, MOCK_GET_ORG_COLD_STORAGE_RESPONSE)
@pytest.fixture
def mock_get_all_org_cold_storage_empty_response(self, mocker):
return create_mock_response(mocker, MOCK_EMPTY_GET_ORG_COLD_STORAGE_RESPONSE)
def test_get_single_archive_calls_get_with_expected_uri(
self, mock_connection, successful_response
):
mock_connection.get.return_value = successful_response
service = ArchiveService(mock_connection)
service.get_single_archive("ARCHIVE_GUID")
uri = f"{ARCHIVE_URI}/ARCHIVE_GUID"
mock_connection.get.assert_called_once_with(uri)
def test_get_all_archives_from_value_calls_get_expected_number_of_times(
self,
mock_connection,
mock_get_archives_response,
mock_get_archives_empty_response,
):
device_guid = "42"
py42.settings.items_per_page = 1
service = ArchiveService(mock_connection)
mock_connection.get.side_effect = [
mock_get_archives_response,
mock_get_archives_response,
mock_get_archives_empty_response,
]
for _ in service.get_all_archives_from_value(device_guid, "backupSourceGuid"):
pass
py42.settings.items_per_page = 500
assert mock_connection.get.call_count == 3
def test_get_by_value_calls_get_with_expected_uri_and_params(self, mock_connection):
device_guid = "42"
service = ArchiveService(mock_connection)
for _ in service.get_all_archives_from_value(device_guid, "backupSourceGuid"):
pass
expected_params = {"pgNum": 1, "pgSize": 500, "backupSourceGuid": "42"}
mock_connection.get.assert_called_once_with(ARCHIVE_URI, params=expected_params)
def test_get_all_restore_history_calls_get_expected_number_of_times(
self,
mock_connection,
mock_get_all_restore_history_response,
mock_get_all_restore_history_empty_response,
):
py42.settings.items_per_page = 1
service = ArchiveService(mock_connection)
mock_connection.get.side_effect = [
mock_get_all_restore_history_response,
mock_get_all_restore_history_response,
mock_get_all_restore_history_empty_response,
]
for _ in service.get_all_restore_history(10, "orgId", "123"):
pass
py42.settings.items_per_page = 500
assert mock_connection.get.call_count == 3
def test_get_web_restore_info_calls_get_with_expected_url_and_params(
self, mock_connection
):
service = ArchiveService(mock_connection)
service.get_web_restore_info("src", "dest")
expected_params = {"srcGuid": "src", "destGuid": "dest"}
mock_connection.get.assert_called_once_with(
"/api/WebRestoreInfo", params=expected_params
)
def test_update_cold_storage_purge_date_calls_coldstorage_with_expected_data(
self, mock_connection
):
service = ArchiveService(mock_connection)
service.update_cold_storage_purge_date("123", "2020-04-24")
mock_connection.put.assert_called_once_with(
"/api/coldStorage/123",
params={"idType": "guid"},
json={"archiveHoldExpireDate": "2020-04-24"},
)
def test_get_all_org_cold_storage_archives_calls_get_expected_number_of_times(
self,
mock_connection,
mock_get_all_org_cold_storage_response,
mock_get_all_org_cold_storage_empty_response,
):
py42.settings.items_per_page = 1
service = ArchiveService(mock_connection)
mock_connection.get.side_effect = [
mock_get_all_org_cold_storage_response,
mock_get_all_org_cold_storage_response,
mock_get_all_org_cold_storage_empty_response,
]
for _ in service.get_all_org_cold_storage_archives("orgId"):
pass
py42.settings.items_per_page = 500
assert mock_connection.get.call_count == 3
def test_get_all_org_cold_storage_archives_calls_get_with_expected_uri_and_params(
self, mock_connection, mock_get_all_org_cold_storage_empty_response
):
service = ArchiveService(mock_connection)
mock_connection.get.side_effect = [mock_get_all_org_cold_storage_empty_response]
for _ in service.get_all_org_cold_storage_archives("orgId"):
break
params = {
"orgId": "orgId",
"incChildOrgs": True,
"pgNum": 1,
"pgSize": 500,
"srtDir": "asc",
"srtKey": "archiveHoldExpireDate",
}
mock_connection.get.assert_called_once_with("/api/ColdStorage", params=params)
| 37.94012 | 88 | 0.702178 |
acf4a36e7869aa60388756c21e5a4eaaf0ad4bda | 362 | py | Python | bronze/f.py | HasanIjaz-HB/Quantum-Computing | 53c2df99cd2efbfb827857125991342f336a3097 | [
"MIT"
] | null | null | null | bronze/f.py | HasanIjaz-HB/Quantum-Computing | 53c2df99cd2efbfb827857125991342f336a3097 | [
"MIT"
] | null | null | null | bronze/f.py | HasanIjaz-HB/Quantum-Computing | 53c2df99cd2efbfb827857125991342f336a3097 | [
"MIT"
] | null | null | null | def f(circuit,qreg):
circuit.x(qreg[1])
circuit.ccx(qreg[0],qreg[1],qreg[2])
circuit.x(qreg[1])
def f_8(circuit,quantum_reg):
circuit.x(quantum_reg[2])
circuit.ccx(quantum_reg[2],quantum_reg[1],quantum_reg[4])
circuit.ccx(quantum_reg[4],quantum_reg[0],quantum_reg[3])
circuit.ccx(quantum_reg[2],quantum_reg[1],quantum_reg[4])
circuit.x(quantum_reg[2])
| 30.166667 | 58 | 0.740331 |
acf4a4b4af1d2490729f99b59dede858af65289d | 7,449 | py | Python | test/onnx_converter_test/train_scripts/deepfm.py | x-y-z/HugeCTR | 17bf942215df60827ece9dc015af5191ef9219b7 | [
"Apache-2.0"
] | 130 | 2021-10-11T11:55:28.000Z | 2022-03-31T21:53:07.000Z | test/onnx_converter_test/train_scripts/deepfm.py | Teora/HugeCTR | c55a63401ad350669ccfcd374aefd7a5fc879ca2 | [
"Apache-2.0"
] | 72 | 2021-10-09T04:59:09.000Z | 2022-03-31T11:27:54.000Z | test/onnx_converter_test/train_scripts/deepfm.py | Teora/HugeCTR | c55a63401ad350669ccfcd374aefd7a5fc879ca2 | [
"Apache-2.0"
] | 29 | 2021-11-03T22:35:01.000Z | 2022-03-30T13:11:59.000Z | import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(max_eval_batches = 300,
batchsize_eval = 16384,
batchsize = 16384,
lr = 0.001,
vvgpu = [[0]],
repeat_dataset = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
source = ["./dcn_data/file_list.txt"],
eval_source = "./dcn_data/file_list_test.txt",
check_type = hugectr.Check_t.Sum)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.0000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 1, True, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 183,
embedding_vec_size = 11,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding1"],
top_names = ["reshape1"],
leading_dim=11))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["reshape1"],
top_names = ["slice11", "slice12"],
ranges=[(0,10),(10,11)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["slice11"],
top_names = ["reshape2"],
leading_dim=260))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["slice12"],
top_names = ["reshape3"],
leading_dim=26))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["dense"],
top_names = ["slice21", "slice22"],
ranges=[(0,13),(0,13)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.WeightMultiply,
bottom_names = ["slice21"],
top_names = ["weight_multiply1"],
weight_dims= [13,10]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.WeightMultiply,
bottom_names = ["slice22"],
top_names = ["weight_multiply2"],
weight_dims= [13,1]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape2","weight_multiply1"],
top_names = ["concat1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["concat1"],
top_names = ["slice31", "slice32"],
ranges=[(0,390),(0,390)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["slice31"],
top_names = ["fc1"],
num_output=400))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu1"],
top_names = ["dropout1"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout1"],
top_names = ["fc2"],
num_output=400))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu2"],
top_names = ["dropout2"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout2"],
top_names = ["fc3"],
num_output=400))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc3"],
top_names = ["relu3"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu3"],
top_names = ["dropout3"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout3"],
top_names = ["fc4"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.FmOrder2,
bottom_names = ["slice32"],
top_names = ["fmorder2"],
out_dim=10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReduceSum,
bottom_names = ["fmorder2"],
top_names = ["reducesum1"],
axis=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape3","weight_multiply2"],
top_names = ["concat2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReduceSum,
bottom_names = ["concat2"],
top_names = ["reducesum2"],
axis=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Add,
bottom_names = ["fc4", "reducesum1", "reducesum2"],
top_names = ["add"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["add", "label"],
top_names = ["loss"]))
model.graph_to_json("/onnx_converter/graph_files/deepfm.json")
model.compile()
model.summary()
model.fit(max_iter = 2300, display = 200, eval_interval = 1000, snapshot = 2000, snapshot_prefix = "/onnx_converter/hugectr_models/deepfm")
| 58.195313 | 153 | 0.485971 |
acf4a4bd772ad6d0b6229a74734a90011ed23ecd | 42,031 | py | Python | napari/utils/events/event.py | Carreau/napari | 65c624cbd55102775d316568e28d284bb597f596 | [
"BSD-3-Clause"
] | null | null | null | napari/utils/events/event.py | Carreau/napari | 65c624cbd55102775d316568e28d284bb597f596 | [
"BSD-3-Clause"
] | 1 | 2021-02-12T23:43:05.000Z | 2021-02-12T23:43:05.000Z | napari/utils/events/event.py | Carreau/napari | 65c624cbd55102775d316568e28d284bb597f596 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# # LICENSE.txt
# Vispy licensing terms
# ---------------------
# Vispy is licensed under the terms of the (new) BSD license:
#
# Copyright (c) 2013-2017, Vispy Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Vispy Development Team nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Exceptions
# ----------
#
# The examples code in the examples directory can be considered public
# domain, unless otherwise indicated in the corresponding source file.
"""
The event module implements the classes that make up the event system.
The Event class and its subclasses are used to represent "stuff that happens".
The EventEmitter class provides an interface to connect to events and
to emit events. The EmitterGroup groups EventEmitter objects.
For more information see http://github.com/vispy/vispy/wiki/API_Events
"""
import inspect
import warnings
import weakref
from collections.abc import Sequence
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
from typing_extensions import Literal
from vispy.util.logs import _handle_exception
from ..translations import trans
class Event:
"""Class describing events that occur and can be reacted to with callbacks.
Each event instance contains information about a single event that has
occurred such as a key press, mouse motion, timer activation, etc.
Subclasses: :class:`KeyEvent`, :class:`MouseEvent`, :class:`TouchEvent`,
:class:`StylusEvent`
The creation of events and passing of events to the appropriate callback
functions is the responsibility of :class:`EventEmitter` instances.
Note that each event object has an attribute for each of the input
arguments listed below.
Parameters
----------
type : str
String indicating the event type (e.g. mouse_press, key_release)
native : object (optional)
The native GUI event object
**kwargs : keyword arguments
All extra keyword arguments become attributes of the event object.
"""
def __init__(self, type: str, native: Any = None, **kwargs: Any):
# stack of all sources this event has been emitted through
self._sources: List[Any] = []
self._handled: bool = False
self._blocked: bool = False
# Store args
self._type = type
self._native = native
for k, v in kwargs.items():
setattr(self, k, v)
@property
def source(self) -> Any:
"""The object that the event applies to (i.e. the source of the event)."""
return self._sources[-1] if self._sources else None
@property
def sources(self) -> List[Any]:
"""List of objects that the event applies to (i.e. are or have
been a source of the event). Can contain multiple objects in case
the event traverses a hierarchy of objects.
"""
return self._sources
def _push_source(self, source):
self._sources.append(source)
def _pop_source(self):
return self._sources.pop()
@property
def type(self) -> str:
# No docstring; documeted in class docstring
return self._type
@property
def native(self) -> Any:
# No docstring; documeted in class docstring
return self._native
@property
def handled(self) -> bool:
"""This boolean property indicates whether the event has already been
acted on by an event handler. Since many handlers may have access to
the same events, it is recommended that each check whether the event
has already been handled as well as set handled=True if it decides to
act on the event.
"""
return self._handled
@handled.setter
def handled(self, val) -> bool:
self._handled = bool(val)
@property
def blocked(self) -> bool:
"""This boolean property indicates whether the event will be delivered
to event callbacks. If it is set to True, then no further callbacks
will receive the event. When possible, it is recommended to use
Event.handled rather than Event.blocked.
"""
return self._blocked
@blocked.setter
def blocked(self, val) -> bool:
self._blocked = bool(val)
def __repr__(self) -> str:
# Try to generate a nice string representation of the event that
# includes the interesting properties.
# need to keep track of depth because it is
# very difficult to avoid excessive recursion.
global _event_repr_depth
_event_repr_depth += 1
try:
if _event_repr_depth > 2:
return "<...>"
attrs = []
for name in dir(self):
if name.startswith('_'):
continue
# select only properties
if not hasattr(type(self), name) or not isinstance(
getattr(type(self), name), property
):
continue
attr = getattr(self, name)
attrs.append(f"{name}={attr!r}")
return "<{} {}>".format(self.__class__.__name__, " ".join(attrs))
finally:
_event_repr_depth -= 1
def __str__(self) -> str:
"""Shorter string representation"""
return self.__class__.__name__
# mypy fix for dynamic attribute access
def __getattr__(self, name: str) -> Any:
return object.__getattribute__(self, name)
_event_repr_depth = 0
Callback = Union[Callable[[Event], None], Callable[[], None]]
CallbackRef = Tuple['weakref.ReferenceType[Any]', str] # dereferenced method
CallbackStr = Tuple[
Union['weakref.ReferenceType[Any]', object], str
] # dereferenced method
class _WeakCounter:
"""
Similar to collection counter but has weak keys.
It will only implement the methods we use here.
"""
def __init__(self):
self._counter = weakref.WeakKeyDictionary()
self._nonecount = 0
def update(self, iterable):
for it in iterable:
if it is None:
self._nonecount += 1
else:
self._counter[it] = self.get(it, 0) + 1
def get(self, key, default):
if key is None:
return self._nonecount
return self._counter.get(key, default)
class EventEmitter:
"""Encapsulates a list of event callbacks.
Each instance of EventEmitter represents the source of a stream of similar
events, such as mouse click events or timer activation events. For
example, the following diagram shows the propagation of a mouse click
event to the list of callbacks that are registered to listen for that
event::
User clicks |Canvas creates
mouse on |MouseEvent: |'mouse_press' EventEmitter: |callbacks in sequence: # noqa
Canvas | | | # noqa
-->|event = MouseEvent(...) -->|Canvas.events.mouse_press(event) -->|callback1(event) # noqa
| | -->|callback2(event) # noqa
| | -->|callback3(event) # noqa
Callback functions may be added or removed from an EventEmitter using
:func:`connect() <vispy.event.EventEmitter.connect>` or
:func:`disconnect() <vispy.event.EventEmitter.disconnect>`.
Calling an instance of EventEmitter will cause each of its callbacks
to be invoked in sequence. All callbacks are invoked with a single
argument which will be an instance of :class:`Event <vispy.event.Event>`.
EventEmitters are generally created by an EmitterGroup instance.
Parameters
----------
source : object
The object that the generated events apply to. All emitted Events will
have their .source property set to this value.
type : str or None
String indicating the event type (e.g. mouse_press, key_release)
event_class : subclass of Event
The class of events that this emitter will generate.
"""
def __init__(
self,
source: Any = None,
type: Optional[str] = None,
event_class: Type[Event] = Event,
):
# connected callbacks
self._callbacks: List[Union[Callback, CallbackRef]] = []
# used when connecting new callbacks at specific positions
self._callback_refs: List[Optional[str]] = []
self._callback_pass_event: List[bool] = []
# count number of times this emitter is blocked for each callback.
self._blocked: Dict[Optional[Callback], int] = {None: 0}
self._block_counter: _WeakCounter[Optional[Callback]] = _WeakCounter()
# used to detect emitter loops
self._emitting = False
self.source = source
self.default_args = {}
if type is not None:
self.default_args['type'] = type
assert inspect.isclass(event_class)
self.event_class = event_class
self._ignore_callback_errors: bool = False # True
self.print_callback_errors = 'reminders' # 'reminders'
@property
def ignore_callback_errors(self) -> bool:
"""Whether exceptions during callbacks will be caught by the emitter
This allows it to continue invoking other callbacks if an error
occurs.
"""
return self._ignore_callback_errors
@ignore_callback_errors.setter
def ignore_callback_errors(self, val: bool):
self._ignore_callback_errors = val
@property
def print_callback_errors(self) -> str:
"""Print a message and stack trace if a callback raises an exception
Valid values are "first" (only show first instance), "reminders" (show
complete first instance, then counts), "always" (always show full
traceback), or "never".
This assumes ignore_callback_errors=True. These will be raised as
warnings, so ensure that the vispy logging level is set to at
least "warning".
"""
return self._print_callback_errors
@print_callback_errors.setter
def print_callback_errors(
self,
val: Union[
Literal['first'],
Literal['reminders'],
Literal['always'],
Literal['never'],
],
):
if val not in ('first', 'reminders', 'always', 'never'):
raise ValueError(
trans._(
'print_callback_errors must be "first", "reminders", "always", or "never"',
deferred=True,
)
)
self._print_callback_errors = val
@property
def callback_refs(self) -> Tuple[Optional[str], ...]:
"""The set of callback references"""
return tuple(self._callback_refs)
@property
def callbacks(self) -> Tuple[Union[Callback, CallbackRef], ...]:
"""The set of callbacks"""
return tuple(self._callbacks)
@property
def source(self) -> Any:
"""The object that events generated by this emitter apply to"""
return (
None if self._source is None else self._source()
) # get object behind weakref
@source.setter
def source(self, s):
self._source = None if s is None else weakref.ref(s)
def connect(
self,
callback: Union[Callback, CallbackRef, CallbackStr, 'EmitterGroup'],
ref: Union[bool, str] = False,
position: Union[Literal['first'], Literal['last']] = 'first',
before: Union[str, Callback, List[Union[str, Callback]], None] = None,
after: Union[str, Callback, List[Union[str, Callback]], None] = None,
):
"""Connect this emitter to a new callback.
Parameters
----------
callback : function | tuple
*callback* may be either a callable object or a tuple
(object, attr_name) where object.attr_name will point to a
callable object. Note that only a weak reference to ``object``
will be kept.
ref : bool | str
Reference used to identify the callback in ``before``/``after``.
If True, the callback ref will automatically determined (see
Notes). If False, the callback cannot be referred to by a string.
If str, the given string will be used. Note that if ``ref``
is not unique in ``callback_refs``, an error will be thrown.
position : str
If ``'first'``, the first eligible position is used (that
meets the before and after criteria), ``'last'`` will use
the last position.
before : str | callback | list of str or callback | None
List of callbacks that the current callback should precede.
Can be None if no before-criteria should be used.
after : str | callback | list of str or callback | None
List of callbacks that the current callback should follow.
Can be None if no after-criteria should be used.
Notes
-----
If ``ref=True``, the callback reference will be determined from:
1. If ``callback`` is ``tuple``, the second element in the tuple.
2. The ``__name__`` attribute.
3. The ``__class__.__name__`` attribute.
The current list of callback refs can be obtained using
``event.callback_refs``. Callbacks can be referred to by either
their string reference (if given), or by the actual callback that
was attached (e.g., ``(canvas, 'swap_buffers')``).
If the specified callback is already connected, then the request is
ignored.
If before is None and after is None (default), the new callback will
be added to the beginning of the callback list. Thus the
callback that is connected _last_ will be the _first_ to receive
events from the emitter.
"""
callbacks = self.callbacks
callback_refs = self.callback_refs
old_callback = callback
callback, pass_event = self._normalize_cb(callback)
if callback in callbacks:
return
# deal with the ref
_ref: Union[str, None]
if isinstance(ref, bool):
if ref:
if isinstance(callback, tuple):
_ref = callback[1]
elif hasattr(callback, '__name__'): # function
_ref = callback.__name__
else: # Method, or other
_ref = callback.__class__.__name__
else:
_ref = None
elif isinstance(ref, str):
_ref = ref
else:
raise TypeError(
trans._(
'ref must be a bool or string',
deferred=True,
)
)
if _ref is not None and _ref in self._callback_refs:
raise ValueError(
trans._('ref "{ref}" is not unique', deferred=True, ref=_ref)
)
# positions
if position not in ('first', 'last'):
raise ValueError(
trans._(
'position must be "first" or "last", not {position}',
deferred=True,
position=position,
)
)
# bounds: upper & lower bnds (inclusive) of possible cb locs
bounds: List[int] = list()
for ri, criteria in enumerate((before, after)):
if criteria is None or criteria == []:
bounds.append(len(callback_refs) if ri == 0 else 0)
else:
if not isinstance(criteria, list):
criteria = [criteria]
for c in criteria:
count = sum(
c in [cn, cc]
for cn, cc in zip(callback_refs, callbacks)
)
if count != 1:
raise ValueError(
trans._(
'criteria "{criteria}" is in the current callback list {count} times:\n{callback_refs}\n{callbacks}',
deferred=True,
criteria=criteria,
count=count,
callback_refs=callback_refs,
callbacks=callbacks,
)
)
matches = [
ci
for ci, (cn, cc) in enumerate(
zip(callback_refs, callbacks)
)
if (cc in criteria or cn in criteria)
]
bounds.append(matches[0] if ri == 0 else (matches[-1] + 1))
if bounds[0] < bounds[1]: # i.e., "place before" < "place after"
raise RuntimeError(
trans._(
'cannot place callback before "{before}" and after "{after}" for callbacks: {callback_refs}',
deferred=True,
before=before,
after=after,
callback_refs=callback_refs,
)
)
idx = bounds[1] if position == 'first' else bounds[0] # 'last'
# actually add the callback
self._callbacks.insert(idx, callback)
self._callback_refs.insert(idx, _ref)
self._callback_pass_event.insert(idx, pass_event)
return old_callback # allows connect to be used as a decorator
def disconnect(
self, callback: Union[Callback, CallbackRef, None, object] = None
):
"""Disconnect a callback from this emitter.
If no callback is specified, then *all* callbacks are removed.
If the callback was not already connected, then the call does nothing.
"""
if callback is None:
self._callbacks = []
self._callback_refs = []
self._callback_pass_event = []
elif isinstance(callback, (Callable, tuple)):
callback, _pass_event = self._normalize_cb(callback)
if callback in self._callbacks:
idx = self._callbacks.index(callback)
self._callbacks.pop(idx)
self._callback_refs.pop(idx)
self._callback_pass_event.pop(idx)
else:
index_list = []
for idx, local_callback in enumerate(self._callbacks):
if not (
isinstance(local_callback, Sequence)
and isinstance(local_callback[0], weakref.ref)
):
continue
if (
local_callback[0]() is callback
or local_callback[0]() is None
):
index_list.append(idx)
for idx in index_list[::-1]:
self._callbacks.pop(idx)
self._callback_refs.pop(idx)
self._callback_pass_event.pop(idx)
@staticmethod
def _get_proper_name(callback):
assert inspect.ismethod(callback)
obj = callback.__self__
if (
not hasattr(obj, callback.__name__)
or getattr(obj, callback.__name__) != callback
):
# some decorators will alter method.__name__, so that obj.method
# will not be equal to getattr(obj, obj.method.__name__). We check
# for that case here and traverse to find the right method here.
for name in dir(obj):
meth = getattr(obj, name)
if inspect.ismethod(meth) and meth == callback:
return obj, name
raise RuntimeError(
trans._(
"During bind method {callback} of object {obj} an error happen",
deferred=True,
callback=callback,
obj=obj,
)
)
return obj, callback.__name__
@staticmethod
def _check_signature(fun: Callable) -> bool:
"""
Check if function will accept event parameter
"""
signature = inspect.signature(fun)
parameters_list = list(signature.parameters.values())
if sum(map(_is_pos_arg, parameters_list)) > 1:
raise RuntimeError(
trans._(
"Binning function cannot have more than one positional argument",
deferred=True,
)
)
return any(
map(
lambda x: x.kind
in [
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
],
signature.parameters.values(),
)
)
def _normalize_cb(
self, callback
) -> Tuple[Union[CallbackRef, Callback], bool]:
# dereference methods into a (self, method_name) pair so that we can
# make the connection without making a strong reference to the
# instance.
start_callback = callback
if inspect.ismethod(callback):
callback = self._get_proper_name(callback)
# always use a weak ref
if isinstance(callback, tuple) and not isinstance(
callback[0], weakref.ref
):
callback = (weakref.ref(callback[0]), *callback[1:])
if isinstance(start_callback, Callable):
callback = callback, self._check_signature(start_callback)
else:
obj = callback[0]()
if obj is None:
callback = callback, False
else:
callback_fun = getattr(obj, callback[1])
callback = callback, self._check_signature(callback_fun)
return callback
def __call__(self, *args, **kwargs) -> Event:
"""__call__(**kwargs)
Invoke all callbacks for this emitter.
Emit a new event object, created with the given keyword
arguments, which must match with the input arguments of the
corresponding event class. Note that the 'type' argument is
filled in by the emitter.
Alternatively, the emitter can also be called with an Event
instance as the only argument. In this case, the specified
Event will be used rather than generating a new one. This allows
customized Event instances to be emitted and also allows EventEmitters
to be chained by connecting one directly to another.
Note that the same Event instance is sent to all callbacks.
This allows some level of communication between the callbacks
(notably, via Event.handled) but also requires that callbacks
be careful not to inadvertently modify the Event.
"""
# This is a VERY highly used method; must be fast!
blocked = self._blocked
# create / massage event as needed
event = self._prepare_event(*args, **kwargs)
# Add our source to the event; remove it after all callbacks have been
# invoked.
event._push_source(self.source)
self._emitting = True
try:
if blocked.get(None, 0) > 0: # this is the same as self.blocked()
self._block_counter.update([None])
return event
rem: List[CallbackRef] = []
for cb, pass_event in zip(
self._callbacks[:], self._callback_pass_event[:]
):
if isinstance(cb, tuple):
obj = cb[0]()
if obj is None:
rem.append(cb) # add dead weakref
continue
old_cb = cb
cb = getattr(obj, cb[1], None)
if cb is None:
warnings.warn(
trans._(
"Problem with function {old_cb} of {obj} connected to event {self_}",
deferred=True,
old_cb=old_cb[1],
obj=obj,
self_=self,
),
stacklevel=2,
category=RuntimeWarning,
)
continue
cb = cast(Callback, cb)
if blocked.get(cb, 0) > 0:
self._block_counter.update([cb])
continue
self._invoke_callback(cb, event if pass_event else None)
if event.blocked:
break
# remove callbacks to dead objects
for cb in rem:
self.disconnect(cb)
finally:
self._emitting = False
if event._pop_source() != self.source:
raise RuntimeError(
trans._(
"Event source-stack mismatch.",
deferred=True,
)
)
return event
def _invoke_callback(
self, cb: Union[Callback, Callable[[], None]], event: Optional[Event]
):
try:
if event is not None:
cb(event)
else:
cb()
except Exception as e:
# dead Qt object with living python pointer. not importing Qt
# here... but this error is consistent across backends
if (
isinstance(e, RuntimeError)
and 'C++' in str(e)
and str(e).endswith(('has been deleted', 'already deleted.'))
):
self.disconnect(cb)
return
_handle_exception(
self.ignore_callback_errors,
self.print_callback_errors,
self,
cb_event=(cb, event),
)
def _prepare_event(self, *args, **kwargs) -> Event:
# When emitting, this method is called to create or otherwise alter
# an event before it is sent to callbacks. Subclasses may extend
# this method to make custom modifications to the event.
if len(args) == 1 and not kwargs and isinstance(args[0], Event):
event: Event = args[0]
# Ensure that the given event matches what we want to emit
assert isinstance(event, self.event_class)
elif not args:
_kwargs = self.default_args.copy()
_kwargs.update(kwargs)
event = self.event_class(**_kwargs)
else:
raise ValueError(
trans._(
"Event emitters can be called with an Event instance or with keyword arguments only.",
deferred=True,
)
)
return event
def blocked(self, callback: Optional[Callback] = None) -> bool:
"""Return boolean indicating whether the emitter is blocked for
the given callback.
"""
return self._blocked.get(callback, 0) > 0
def block(self, callback: Optional[Callback] = None):
"""Block this emitter. Any attempts to emit an event while blocked
will be silently ignored. If *callback* is given, then the emitter
is only blocked for that specific callback.
Calls to block are cumulative; the emitter must be unblocked the same
number of times as it is blocked.
"""
self._blocked[callback] = self._blocked.get(callback, 0) + 1
def unblock(self, callback: Optional[Callback] = None):
"""Unblock this emitter. See :func:`event.EventEmitter.block`.
Note: Use of ``unblock(None)`` only reverses the effect of
``block(None)``; it does not unblock callbacks that were explicitly
blocked using ``block(callback)``.
"""
if callback not in self._blocked or self._blocked[callback] == 0:
raise RuntimeError(
trans._(
"Cannot unblock {self_} for callback {callback}; emitter was not previously blocked.",
deferred=True,
self_=self,
callback=callback,
)
)
b = self._blocked[callback] - 1
if b == 0 and callback is not None:
del self._blocked[callback]
else:
self._blocked[callback] = b
def blocker(self, callback: Optional[Callback] = None):
"""Return an EventBlocker to be used in 'with' statements
Notes
-----
For example, one could do::
with emitter.blocker():
pass # ..do stuff; no events will be emitted..
"""
return EventBlocker(self, callback)
class WarningEmitter(EventEmitter):
"""
EventEmitter subclass used to allow deprecated events to be used with a
warning message.
"""
def __init__(
self,
message,
category=FutureWarning,
stacklevel=3,
*args,
**kwargs,
):
self._message = message
self._warned = False
self._category = category
self._stacklevel = stacklevel
EventEmitter.__init__(self, *args, **kwargs)
def connect(self, cb, *args, **kwargs):
self._warn(cb)
return EventEmitter.connect(self, cb, *args, **kwargs)
def _invoke_callback(self, cb, event):
self._warn(cb)
return EventEmitter._invoke_callback(self, cb, event)
def _warn(self, cb):
if self._warned:
return
# don't warn about unimplemented connections
if isinstance(cb, tuple) and getattr(cb[0], cb[1], None) is None:
return
import warnings
warnings.warn(
self._message, category=self._category, stacklevel=self._stacklevel
)
self._warned = True
class EmitterGroup(EventEmitter):
"""EmitterGroup instances manage a set of related
:class:`EventEmitters <vispy.event.EventEmitter>`.
Its primary purpose is to provide organization for objects
that make use of multiple emitters and to reduce the boilerplate code
needed to initialize those emitters with default connections.
EmitterGroup instances are usually stored as an 'events' attribute on
objects that use multiple emitters. For example::
EmitterGroup EventEmitter
| |
Canvas.events.mouse_press
Canvas.events.resized
Canvas.events.key_press
EmitterGroup is also a subclass of
:class:`EventEmitters <vispy.event.EventEmitter>`,
allowing it to emit its own
events. Any callback that connects directly to the EmitterGroup will
receive *all* of the events generated by the group's emitters.
Parameters
----------
source : object
The object that the generated events apply to.
auto_connect : bool
If *auto_connect* is True, then one connection will
be made for each emitter that looks like
:func:`emitter.connect((source, 'on_' + event_name))
<vispy.event.EventEmitter.connect>`.
This provides a simple mechanism for automatically connecting a large
group of emitters to default callbacks. By default, false.
emitters : keyword arguments
See the :func:`add <vispy.event.EmitterGroup.add>` method.
"""
def __init__(
self,
source: Any = None,
auto_connect: bool = False,
**emitters: Union[Type[Event], EventEmitter, None],
):
EventEmitter.__init__(self, source)
self.auto_connect = auto_connect
self.auto_connect_format = "on_%s"
self._emitters: Dict[str, EventEmitter] = dict()
# whether the sub-emitters have been connected to the group:
self._emitters_connected: bool = False
self.add(**emitters) # type: ignore
def __getattr__(self, name) -> EventEmitter:
return object.__getattribute__(self, name)
def __getitem__(self, name: str) -> EventEmitter:
"""
Return the emitter assigned to the specified name.
Note that emitters may also be retrieved as an attribute of the
EmitterGroup.
"""
return self._emitters[name]
def __setitem__(
self, name: str, emitter: Union[Type[Event], EventEmitter, None]
):
"""
Alias for EmitterGroup.add(name=emitter)
"""
self.add(**{name: emitter}) # type: ignore
def add(
self,
auto_connect: Optional[bool] = None,
**kwargs: Union[Type[Event], EventEmitter, None],
):
"""Add one or more EventEmitter instances to this emitter group.
Each keyword argument may be specified as either an EventEmitter
instance or an Event subclass, in which case an EventEmitter will be
generated automatically::
# This statement:
group.add(mouse_press=MouseEvent,
mouse_release=MouseEvent)
# ..is equivalent to this statement:
group.add(mouse_press=EventEmitter(group.source, 'mouse_press',
MouseEvent),
mouse_release=EventEmitter(group.source, 'mouse_press',
MouseEvent))
"""
if auto_connect is None:
auto_connect = self.auto_connect
# check all names before adding anything
for name in kwargs:
if name in self._emitters:
raise ValueError(
trans._(
"EmitterGroup already has an emitter named '{name}'",
deferred=True,
name=name,
)
)
elif hasattr(self, name):
raise ValueError(
trans._(
"The name '{name}' cannot be used as an emitter; it is already an attribute of EmitterGroup",
deferred=True,
name=name,
)
)
# add each emitter specified in the keyword arguments
for name, emitter in kwargs.items():
if emitter is None:
emitter = Event
if inspect.isclass(emitter) and issubclass(emitter, Event): # type: ignore
emitter = EventEmitter(
source=self.source, type=name, event_class=emitter # type: ignore
)
elif not isinstance(emitter, EventEmitter):
raise RuntimeError(
trans._(
'Emitter must be specified as either an EventEmitter instance or Event subclass. (got {name}={emitter})',
deferred=True,
name=name,
emitter=emitter,
)
)
# give this emitter the same source as the group.
emitter.source = self.source
setattr(self, name, emitter) # this is a bummer for typing.
self._emitters[name] = emitter
if (
auto_connect
and self.source is not None
and hasattr(self.source, self.auto_connect_format % name)
):
emitter.connect((self.source, self.auto_connect_format % name))
# If emitters are connected to the group already, then this one
# should be connected as well.
if self._emitters_connected:
emitter.connect(self)
@property
def emitters(self) -> Dict[str, EventEmitter]:
"""List of current emitters in this group."""
return self._emitters
def __iter__(self) -> Generator[str, None, None]:
"""
Iterates over the names of emitters in this group.
"""
yield from self._emitters
def block_all(self):
"""Block all emitters in this group."""
self.block()
for em in self._emitters.values():
em.block()
def unblock_all(self):
"""Unblock all emitters in this group."""
self.unblock()
for em in self._emitters.values():
em.unblock()
def connect(
self,
callback: Union[Callback, CallbackRef, 'EmitterGroup'],
ref: Union[bool, str] = False,
position: Union[Literal['first'], Literal['last']] = 'first',
before: Union[str, Callback, List[Union[str, Callback]], None] = None,
after: Union[str, Callback, List[Union[str, Callback]], None] = None,
):
"""Connect the callback to the event group. The callback will receive
events from *all* of the emitters in the group.
See :func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>`
for arguments.
"""
self._connect_emitters(True)
return EventEmitter.connect(
self, callback, ref, position, before, after
)
def disconnect(self, callback: Optional[Callback] = None):
"""Disconnect the callback from this group. See
:func:`connect() <vispy.event.EmitterGroup.connect>` and
:func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for
more information.
"""
ret = EventEmitter.disconnect(self, callback)
if len(self._callbacks) == 0:
self._connect_emitters(False)
return ret
def _connect_emitters(self, connect):
# Connect/disconnect all sub-emitters from the group. This allows the
# group to emit an event whenever _any_ of the sub-emitters emit,
# while simultaneously eliminating the overhead if nobody is listening.
if connect:
for emitter in self:
if not isinstance(self[emitter], WarningEmitter):
self[emitter].connect(self)
else:
for emitter in self:
self[emitter].disconnect(self)
self._emitters_connected = connect
@property
def ignore_callback_errors(self):
return super().ignore_callback_errors
@ignore_callback_errors.setter
def ignore_callback_errors(self, ignore):
EventEmitter.ignore_callback_errors.fset(self, ignore)
for emitter in self._emitters.values():
if isinstance(emitter, EventEmitter):
emitter.ignore_callback_errors = ignore
elif isinstance(emitter, EmitterGroup):
emitter.ignore_callback_errors_all(ignore)
def blocker_all(self) -> 'EventBlockerAll':
"""Return an EventBlockerAll to be used in 'with' statements
Notes
-----
For example, one could do::
with emitter.blocker_all():
pass # ..do stuff; no events will be emitted..
"""
return EventBlockerAll(self)
class EventBlocker:
"""Represents a block for an EventEmitter to be used in a context
manager (i.e. 'with' statement).
"""
def __init__(self, target, callback=None):
self.target = target
self.callback = callback
self._base_count = target._block_counter.get(callback, 0)
@property
def count(self):
n_blocked = self.target._block_counter.get(self.callback, 0)
return n_blocked - self._base_count
def __enter__(self):
self.target.block(self.callback)
return self
def __exit__(self, *args):
self.target.unblock(self.callback)
class EventBlockerAll:
"""Represents a block_all for an EmitterGroup to be used in a context
manager (i.e. 'with' statement).
"""
def __init__(self, target):
self.target = target
def __enter__(self):
self.target.block_all()
def __exit__(self, *args):
self.target.unblock_all()
def _is_pos_arg(param: inspect.Parameter):
"""
Check if param is positional or named and has no default parameter.
"""
return (
param.kind
in [
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
]
and param.default == inspect.Parameter.empty
)
| 36.2962 | 133 | 0.579905 |
acf4a6744381e0de19667c6a48a25b06887a9f7b | 1,969 | py | Python | pypykatz/utils/crypto/cmdhelper.py | ebfe90/pypykatz | cd7ec54000aed2775ac3c58fb1fbdb82c93d3081 | [
"MIT"
] | 5 | 2019-04-20T05:34:01.000Z | 2019-10-12T01:26:09.000Z | pypykatz/utils/crypto/cmdhelper.py | ebfe90/pypykatz | cd7ec54000aed2775ac3c58fb1fbdb82c93d3081 | [
"MIT"
] | 1 | 2018-09-13T15:20:29.000Z | 2018-09-13T15:20:29.000Z | pypykatz/utils/crypto/cmdhelper.py | ebfe90/pypykatz | cd7ec54000aed2775ac3c58fb1fbdb82c93d3081 | [
"MIT"
] | 8 | 2018-09-11T22:02:22.000Z | 2019-11-27T08:52:20.000Z | #!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
class CryptoCMDHelper:
def __init__(self):
self.live_keywords = []
self.keywords = ['nt','lm','dcc','dcc2','gppass']
def add_args(self, parser, live_parser):
group = parser.add_parser('nt', help='Generates NT hash of the password')
group.add_argument('password', help= 'Password to be hashed')
group = parser.add_parser('lm', help='Generates LM hash of the password')
group.add_argument('password', help= 'Password to be hashed')
group = parser.add_parser('dcc', help='Generates DCC v1 (domain cached credentials version 1) hash of the password')
group.add_argument('username', help= 'username')
group.add_argument('password', help= 'Password to be hashed')
group = parser.add_parser('dcc2', help='Generates DCC v2 (domain cached credentials version 2) hash of the password')
group.add_argument('username', help= 'username')
group.add_argument('password', help= 'Password to be hashed')
group.add_argument('-i','--iteration-count', type = int, default=10240, help= 'iteration-count')
group = parser.add_parser('gppass', help='Decrypt GP passwords')
group.add_argument('enc', help='Encrypted password string')
def execute(self, args):
if args.command in self.keywords:
self.run(args)
if len(self.live_keywords) > 0 and args.command == 'live' and args.module in self.live_keywords:
self.run_live(args)
def run(self, args):
from pypykatz.utils.crypto.winhash import NT, LM, MSDCC, MSDCCv2
from pypykatz.utils.crypto.gppassword import gppassword
if args.command == 'nt':
print(NT(args.password).hex())
elif args.command == 'lm':
print(LM(args.password).hex())
elif args.command == 'dcc':
print(MSDCC(args.username, args.password).hex())
elif args.command == 'dcc2':
print(MSDCCv2(args.username, args.password, args.iteration_count).hex())
elif args.command == 'gppass':
print(gppassword(args.enc))
| 36.462963 | 119 | 0.700356 |
acf4a6cee8e2ad763fbabd2a4f2ff88e8d6406be | 8,896 | py | Python | unittest/ops/greedy_legalize_unittest/greedy_legalize_unittest.py | xiefei1026/DREAMPlace | d674cce42caaa5490795c3b577eda43f80350a84 | [
"BSD-3-Clause"
] | 323 | 2019-02-28T10:09:53.000Z | 2022-03-24T04:00:01.000Z | unittest/ops/greedy_legalize_unittest/greedy_legalize_unittest.py | xiefei1026/DREAMPlace | d674cce42caaa5490795c3b577eda43f80350a84 | [
"BSD-3-Clause"
] | 61 | 2019-06-10T08:47:09.000Z | 2022-03-31T13:38:18.000Z | unittest/ops/greedy_legalize_unittest/greedy_legalize_unittest.py | xiefei1026/DREAMPlace | d674cce42caaa5490795c3b577eda43f80350a84 | [
"BSD-3-Clause"
] | 109 | 2019-03-22T17:32:16.000Z | 2022-03-26T14:31:05.000Z | ##
# @file greedy_legalize_unitest.py
# @author Yibo Lin
# @date Mar 2019
#
import os
import sys
import numpy as np
import unittest
import cairocffi as cairo
import time
import torch
from torch.autograd import Function, Variable
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from dreamplace.ops.greedy_legalize import greedy_legalize
sys.path.pop()
def plot(figname,
node_x, node_y,
node_size_x, node_size_y,
layout_xl, layout_yl, layout_xh, layout_yh,
num_bins_x, num_bins_y,
num_nodes, num_movable_nodes, num_physical_nodes, num_filler_nodes
):
tt = time.time()
width = 800
height = 800
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context(surface)
ctx.scale(width, height) # Normalizing the canvas
if num_movable_nodes < num_physical_nodes:
layout_xl2 = min(np.amin(node_x[num_movable_nodes:num_physical_nodes]), layout_xl)
layout_yl2 = min(np.amin(node_y[num_movable_nodes:num_physical_nodes]), layout_yl)
layout_xh2 = max(np.amax(node_x[num_movable_nodes:num_physical_nodes]+node_size_x[num_movable_nodes:num_physical_nodes]), layout_xh)
layout_yh2 = max(np.amax(node_y[num_movable_nodes:num_physical_nodes]+node_size_y[num_movable_nodes:num_physical_nodes]), layout_yh)
else:
layout_xl2 = layout_xl
layout_yl2 = layout_yl
layout_xh2 = layout_xh
layout_yh2 = layout_yh
bin_size_x = (layout_xh-layout_xl)/num_bins_x
bin_size_y = (layout_yh-layout_yl)/num_bins_y
def normalize_x(xx):
return (xx - (layout_xl-bin_size_x))/(layout_xh-layout_xl+2*bin_size_x)
def normalize_y(xx):
return (xx - (layout_yl-bin_size_y))/(layout_yh-layout_yl+2*bin_size_y)
def draw_rect(x1, y1, x2, y2):
ctx.move_to(x1, y1)
ctx.line_to(x1, y2)
ctx.line_to(x2, y2)
ctx.line_to(x2, y1)
ctx.close_path()
ctx.stroke()
def bin_xl(i):
return layout_xl+i*bin_size_x
def bin_yl(i):
return layout_yl+i*bin_size_y
# draw layout region
ctx.set_source_rgb(1, 1, 1)
draw_layout_xl = normalize_x(layout_xl2-1*bin_size_x)
draw_layout_yl = normalize_y(layout_yl2-1*bin_size_y)
draw_layout_xh = normalize_x(layout_xh2+1*bin_size_x)
draw_layout_yh = normalize_y(layout_yh2+1*bin_size_y)
ctx.rectangle(draw_layout_xl, draw_layout_yl, draw_layout_xh, draw_layout_yh)
ctx.fill()
ctx.set_line_width(0.001)
ctx.set_source_rgba(0.1, 0.1, 0.1, alpha=0.8)
draw_rect(normalize_x(layout_xl), normalize_y(layout_yl), normalize_x(layout_xh), normalize_y(layout_yh))
#ctx.move_to(normalize_x(xl), normalize_y(yl))
#ctx.line_to(normalize_x(xl), normalize_y(yh))
#ctx.line_to(normalize_x(xh), normalize_y(yh))
#ctx.line_to(normalize_x(xh), normalize_y(yl))
#ctx.close_path()
#ctx.stroke()
# draw bins
for i in range(1, num_bins_x):
ctx.move_to(normalize_x(bin_xl(i)), normalize_y(layout_yl))
ctx.line_to(normalize_x(bin_xl(i)), normalize_y(layout_yh))
ctx.close_path()
ctx.stroke()
for i in range(1, num_bins_y):
ctx.move_to(normalize_x(layout_xl), normalize_y(bin_yl(i)))
ctx.line_to(normalize_x(layout_xh), normalize_y(bin_yl(i)))
ctx.close_path()
ctx.stroke()
# draw cells
node_xl = node_x
node_yl = layout_yl+layout_yh-(node_y+node_size_y[0:len(node_y)]) # flip y
node_xh = node_x+node_size_x[0:len(node_x)]
node_yh = layout_yl+layout_yh-node_y # flip y
node_xl = normalize_x(node_xl)
node_yl = normalize_y(node_yl)
node_xh = normalize_x(node_xh)
node_yh = normalize_y(node_yh)
ctx.set_line_width(0.001)
#print("plot layout")
# draw fixed macros
ctx.set_source_rgba(1, 0, 0, alpha=0.5)
for i in range(num_movable_nodes, num_physical_nodes):
ctx.rectangle(node_xl[i], node_yl[i], node_xh[i]-node_xl[i], node_yh[i]-node_yl[i]) # Rectangle(xl, yl, w, h)
ctx.fill()
ctx.set_source_rgba(0, 0, 0, alpha=1.0) # Solid color
for i in range(num_movable_nodes, num_physical_nodes):
draw_rect(node_xl[i], node_yl[i], node_xh[i], node_yh[i])
# draw fillers
if len(node_xl) > num_physical_nodes: # filler is included
ctx.set_line_width(0.001)
ctx.set_source_rgba(230/255.0, 230/255.0, 250/255.0, alpha=0.3) # Solid color
for i in range(num_physical_nodes, num_nodes):
draw_rect(node_xl[i], node_yl[i], node_xh[i], node_yh[i])
# draw cells
ctx.set_line_width(0.002)
ctx.set_source_rgba(0, 0, 1, alpha=0.8) # Solid color
for i in range(num_movable_nodes):
draw_rect(node_xl[i], node_yl[i], node_xh[i], node_yh[i])
surface.write_to_png(figname) # Output to PNG
print("[I] plotting to %s takes %.3f seconds" % (figname, time.time()-tt))
#print(session.run(grads))
#print(session.run(masked_grads))
class GreedyLegalizeOpTest(unittest.TestCase):
def test_greedyLegalizeRandom(self):
dtype = np.float64
xx = np.array([1.0, 0.5, 3.0]).astype(dtype)
yy = np.array([0.5, 0.8, 1.5]).astype(dtype)
node_size_x = np.array([0.5, 1.5, 1.0]).astype(dtype)
node_size_y = np.array([2.0, 2.0, 4.0]).astype(dtype)
node_weights = np.ones_like(node_size_x)
num_nodes = len(xx)
xl = 1.0
yl = 1.0
xh = 5.0
yh = 5.0
num_terminals = 0
num_terminal_NIs = 0
num_filler_nodes = 0
num_movable_nodes = len(xx)-num_terminals-num_terminal_NIs-num_filler_nodes
site_width = 1
row_height = 2
num_bins_x = 2
num_bins_y = 2
flat_region_boxes = np.zeros(0, dtype=dtype)
flat_region_boxes_start = np.array([0], dtype=np.int32)
node2fence_region_map = np.zeros(0, dtype=np.int32)
plot("initial.png",
xx, yy,
node_size_x, node_size_y,
xl, yl, xh, yh,
num_bins_x, num_bins_y,
num_movable_nodes+num_terminals+num_terminal_NIs+num_filler_nodes, num_movable_nodes, num_movable_nodes+num_terminals+num_terminal_NIs, num_filler_nodes)
# test cpu
custom = greedy_legalize.GreedyLegalize(
torch.from_numpy(node_size_x), torch.from_numpy(node_size_y), torch.from_numpy(node_weights),
flat_region_boxes=torch.from_numpy(flat_region_boxes), flat_region_boxes_start=torch.from_numpy(flat_region_boxes_start), node2fence_region_map=torch.from_numpy(node2fence_region_map),
xl=xl, yl=yl, xh=xh, yh=yh,
site_width=site_width, row_height=row_height,
num_bins_x=num_bins_x, num_bins_y=num_bins_y,
num_movable_nodes=num_movable_nodes,
num_terminal_NIs=num_terminal_NIs,
num_filler_nodes=num_filler_nodes)
pos = Variable(torch.from_numpy(np.concatenate([xx, yy])))
result = custom(pos, pos)
print("custom_result = ", result)
print("average displacement = %g" % (np.sum(np.absolute(result.numpy() - np.concatenate([xx, yy])))/num_movable_nodes))
plot("final.png",
result.numpy()[0:len(xx)], result.numpy()[len(xx):],
node_size_x, node_size_y,
xl, yl, xh, yh,
num_bins_x, num_bins_y,
num_movable_nodes+num_terminals+num_terminal_NIs+num_filler_nodes, num_movable_nodes, num_movable_nodes+num_terminals+num_terminal_NIs, num_filler_nodes)
# test cuda
if torch.cuda.device_count():
custom_cuda = greedy_legalize.GreedyLegalize(
torch.from_numpy(node_size_x).cuda(), torch.from_numpy(node_size_y).cuda(), torch.from_numpy(node_weights).cuda(),
flat_region_boxes=torch.from_numpy(flat_region_boxes).cuda(), flat_region_boxes_start=torch.from_numpy(flat_region_boxes_start).cuda(), node2fence_region_map=torch.from_numpy(node2fence_region_map).cuda(),
xl=xl, yl=yl, xh=xh, yh=yh,
site_width=site_width, row_height=row_height,
num_bins_x=num_bins_x, num_bins_y=num_bins_y,
num_movable_nodes=num_movable_nodes,
num_terminal_NIs=num_terminal_NIs,
num_filler_nodes=num_filler_nodes)
pos = Variable(torch.from_numpy(np.concatenate([xx, yy]))).cuda()
result_cuda = custom_cuda(pos, pos)
print("custom_result = ", result_cuda.data.cpu())
#np.testing.assert_allclose(result, result_cuda.data.cpu())
if __name__ == '__main__':
unittest.main()
| 42.769231 | 230 | 0.655913 |
acf4a6fa90c01f593da52d1c7cba0c325d5d3ac9 | 266 | py | Python | python/proseco/utility/constants.py | ProSeCo-Planning/ros_proseco_planning | 484beedb01e5faafa7e03e95bc88b1e4f969285e | [
"BSD-3-Clause"
] | null | null | null | python/proseco/utility/constants.py | ProSeCo-Planning/ros_proseco_planning | 484beedb01e5faafa7e03e95bc88b1e4f969285e | [
"BSD-3-Clause"
] | null | null | null | python/proseco/utility/constants.py | ProSeCo-Planning/ros_proseco_planning | 484beedb01e5faafa7e03e95bc88b1e4f969285e | [
"BSD-3-Clause"
] | null | null | null | # The constant class contains all the constants used in the ProSeCo package.
# The gravitational constant [m/s^2]
GRAVITY = 9.807
# The ratio of a circle's circumference to its diameter [m/m]
PI = 3.14159265359
# The duration of an action [s]
ACTION_DURATION = 0.5
| 29.555556 | 76 | 0.74812 |
acf4a825c9d7ce4855130385b212ba595d171272 | 210 | py | Python | trans_ms/transport_management/doctype/electronics_details/electronics_details.py | mohsinalimat/transport | 3d32bd27f505f64b948f48d0bfc5c7ccaf61c4a2 | [
"MIT"
] | 4 | 2021-09-24T12:30:32.000Z | 2022-03-19T14:55:34.000Z | trans_ms/transport_management/doctype/electronics_details/electronics_details.py | mohsinalimat/transport | 3d32bd27f505f64b948f48d0bfc5c7ccaf61c4a2 | [
"MIT"
] | null | null | null | trans_ms/transport_management/doctype/electronics_details/electronics_details.py | mohsinalimat/transport | 3d32bd27f505f64b948f48d0bfc5c7ccaf61c4a2 | [
"MIT"
] | 7 | 2021-09-24T12:30:33.000Z | 2022-03-21T11:34:02.000Z | # Copyright (c) 2021, Aakvatech Limited and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class ElectronicsDetails(Document):
pass
| 23.333333 | 56 | 0.804762 |
acf4a95f12b4dcfb43c88783bdeefdb52476ac43 | 903 | py | Python | src/tests/test_service.py | marcusianlevine/compose-flow | 6739d3fafb832fa73004f569dc20ab8e76d17353 | [
"Apache-2.0"
] | 10 | 2018-05-18T19:42:18.000Z | 2020-12-08T11:07:52.000Z | src/tests/test_service.py | marcusianlevine/compose-flow | 6739d3fafb832fa73004f569dc20ab8e76d17353 | [
"Apache-2.0"
] | 25 | 2018-09-14T13:31:56.000Z | 2021-03-18T14:35:11.000Z | src/tests/test_service.py | marcusianlevine/compose-flow | 6739d3fafb832fa73004f569dc20ab8e76d17353 | [
"Apache-2.0"
] | 5 | 2018-05-18T19:42:08.000Z | 2018-10-11T16:33:04.000Z | import re
import shlex
from unittest import mock
from compose_flow.commands import Workflow
from tests import BaseTestCase
class ServiceTestCase(BaseTestCase):
def test_runs(self, *mocks):
"""
Basic test to ensure the command runs as expected
"""
argv = shlex.split("-e test service exec app /bin/bash")
workflow = Workflow(argv=argv)
service = workflow.subcommand
service.select_container = mock.MagicMock()
service.select_container.return_value = (
"container_id service_name something test_hostname"
)
workflow.run()
ssh_mock = self.sh_mock.ssh
ssh_args = ssh_mock.mock_calls[0][1]
args_s = " ".join(ssh_args)
command_re = re.compile(r"docker exec .* service_name\.container_id /bin/bash")
self.assertEqual(True, command_re.search(args_s) is not None)
| 25.083333 | 87 | 0.660022 |
acf4a972697937bca79efae523ba9dde68cfb983 | 1,122 | py | Python | agent.py | petrosgk/MalmoRL | 97af63d11430e5bce30bb84f79c6edbd075cf2ec | [
"MIT"
] | 20 | 2018-03-07T14:05:24.000Z | 2021-02-09T09:15:06.000Z | agent.py | petrosgk/MalmoRL | 97af63d11430e5bce30bb84f79c6edbd075cf2ec | [
"MIT"
] | 1 | 2018-04-03T03:01:05.000Z | 2018-04-03T09:45:48.000Z | agent.py | petrosgk/MalmoRL | 97af63d11430e5bce30bb84f79c6edbd075cf2ec | [
"MIT"
] | 4 | 2019-02-14T00:41:20.000Z | 2019-06-12T13:57:07.000Z | class BaseAgent(object):
def __init__(self, name, env):
self.name = name
self.env = env
def fit(self, env, nb_steps):
raise NotImplementedError
def test(self, env, nb_episodes):
raise NotImplementedError
def save(self, out_dir):
raise NotImplementedError
def load(self, out_dir):
raise NotImplementedError
class Observer(BaseAgent):
def __init__(self, name, env):
super(Observer, self).__init__(name, env.available_actions)
def fit(self, env, nb_steps):
env.reset()
for step in range(1, nb_steps + 1):
# Check if env needs reset
if env.done:
env.reset()
# Select an action to advance the environment one step
# The action will be ignored by Malmo
action = 0 # Just return the 1st action
env.do(action)
def test(self, env, nb_steps):
# Fitting and testing for the observer agent are the same.
self.fit(env, nb_steps)
def save(self, out_dir):
pass
def load(self, out_dir):
pass
| 24.391304 | 67 | 0.595365 |
acf4a9dc53145b230f1dd102901298a45364407e | 15,037 | py | Python | lib/spack/spack/build_systems/cmake.py | mt-empty/spack | 4573261de5b32bb22289752b9023aa767b50b700 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-03-05T10:54:32.000Z | 2021-03-05T14:14:52.000Z | lib/spack/spack/build_systems/cmake.py | mt-empty/spack | 4573261de5b32bb22289752b9023aa767b50b700 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | lib/spack/spack/build_systems/cmake.py | mt-empty/spack | 4573261de5b32bb22289752b9023aa767b50b700 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-07-19T20:31:27.000Z | 2021-07-19T21:14:14.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
import platform
import re
from typing import List # novm
from llnl.util.filesystem import working_dir
import spack.build_environment
from spack.directives import conflicts, depends_on, variant
from spack.package import InstallError, PackageBase, run_after
# Regex to extract the primary generator from the CMake generator
# string.
_primary_generator_extractor = re.compile(r'(?:.* - )?(.*)')
def _extract_primary_generator(generator):
"""Use the compiled regex _primary_generator_extractor to extract the
primary generator from the generator string which may contain an
optional secondary generator.
"""
primary_generator = _primary_generator_extractor.match(generator).group(1)
return primary_generator
class CMakePackage(PackageBase):
"""Specialized class for packages built using CMake
For more information on the CMake build system, see:
https://cmake.org/cmake/help/latest/
This class provides three phases that can be overridden:
1. :py:meth:`~.CMakePackage.cmake`
2. :py:meth:`~.CMakePackage.build`
3. :py:meth:`~.CMakePackage.install`
They all have sensible defaults and for many packages the only thing
necessary will be to override :py:meth:`~.CMakePackage.cmake_args`.
For a finer tuning you may also override:
+-----------------------------------------------+--------------------+
| **Method** | **Purpose** |
+===============================================+====================+
| :py:meth:`~.CMakePackage.root_cmakelists_dir` | Location of the |
| | root CMakeLists.txt|
+-----------------------------------------------+--------------------+
| :py:meth:`~.CMakePackage.build_directory` | Directory where to |
| | build the package |
+-----------------------------------------------+--------------------+
The generator used by CMake can be specified by providing the
generator attribute. Per
https://cmake.org/cmake/help/git-master/manual/cmake-generators.7.html,
the format is: [<secondary-generator> - ]<primary_generator>. The
full list of primary and secondary generators supported by CMake may
be found in the documentation for the version of CMake used;
however, at this time Spack supports only the primary generators
"Unix Makefiles" and "Ninja." Spack's CMake support is agnostic with
respect to primary generators. Spack will generate a runtime error
if the generator string does not follow the prescribed format, or if
the primary generator is not supported.
"""
#: Phases of a CMake package
phases = ['cmake', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'CMakePackage'
build_targets = [] # type: List[str]
install_targets = ['install']
build_time_test_callbacks = ['check']
#: The build system generator to use.
#:
#: See ``cmake --help`` for a list of valid generators.
#: Currently, "Unix Makefiles" and "Ninja" are the only generators
#: that Spack supports. Defaults to "Unix Makefiles".
#:
#: See https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html
#: for more information.
generator = 'Unix Makefiles'
# https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
variant('build_type', default='RelWithDebInfo',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
# https://cmake.org/cmake/help/latest/variable/CMAKE_INTERPROCEDURAL_OPTIMIZATION.html
variant('ipo', default=False,
description='CMake interprocedural optimization')
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
conflicts('+ipo', when='^cmake@:3.8',
msg='+ipo is not supported by CMake < 3.9')
depends_on('cmake', type='build')
@property
def archive_files(self):
"""Files to archive for packages based on CMake"""
return [os.path.join(self.build_directory, 'CMakeCache.txt')]
@property
def root_cmakelists_dir(self):
"""The relative path to the directory containing CMakeLists.txt
This path is relative to the root of the extracted tarball,
not to the ``build_directory``. Defaults to the current directory.
:return: directory containing CMakeLists.txt
"""
return self.stage.source_path
@property
def std_cmake_args(self):
"""Standard cmake arguments provided as a property for
convenience of package writers
:return: standard cmake arguments
"""
# standard CMake arguments
std_cmake_args = CMakePackage._std_args(self)
std_cmake_args += getattr(self, 'cmake_flag_args', [])
return std_cmake_args
@staticmethod
def _std_args(pkg):
"""Computes the standard cmake arguments for a generic package"""
try:
generator = pkg.generator
except AttributeError:
generator = 'Unix Makefiles'
# Make sure a valid generator was chosen
valid_primary_generators = ['Unix Makefiles', 'Ninja']
primary_generator = _extract_primary_generator(generator)
if primary_generator not in valid_primary_generators:
msg = "Invalid CMake generator: '{0}'\n".format(generator)
msg += "CMakePackage currently supports the following "
msg += "primary generators: '{0}'".\
format("', '".join(valid_primary_generators))
raise InstallError(msg)
try:
build_type = pkg.spec.variants['build_type'].value
except KeyError:
build_type = 'RelWithDebInfo'
try:
ipo = pkg.spec.variants['ipo'].value
except KeyError:
ipo = False
define = CMakePackage.define
args = [
'-G', generator,
define('CMAKE_INSTALL_PREFIX', pkg.prefix),
define('CMAKE_BUILD_TYPE', build_type),
]
# CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
if pkg.spec.satisfies('^cmake@3.9:'):
args.append(define('CMAKE_INTERPROCEDURAL_OPTIMIZATION', ipo))
if primary_generator == 'Unix Makefiles':
args.append(define('CMAKE_VERBOSE_MAKEFILE', True))
if platform.mac_ver()[0]:
args.extend([
define('CMAKE_FIND_FRAMEWORK', "LAST"),
define('CMAKE_FIND_APPBUNDLE', "LAST"),
])
# Set up CMake rpath
args.extend([
define('CMAKE_INSTALL_RPATH_USE_LINK_PATH', False),
define('CMAKE_INSTALL_RPATH',
spack.build_environment.get_rpaths(pkg)),
define('CMAKE_PREFIX_PATH',
spack.build_environment.get_cmake_prefix_path(pkg))
])
return args
@staticmethod
def define(cmake_var, value):
"""Return a CMake command line argument that defines a variable.
The resulting argument will convert boolean values to OFF/ON
and lists/tuples to CMake semicolon-separated string lists. All other
values will be interpreted as strings.
Examples:
.. code-block:: python
[define('BUILD_SHARED_LIBS', True),
define('CMAKE_CXX_STANDARD', 14),
define('swr', ['avx', 'avx2'])]
will generate the following configuration options:
.. code-block:: console
["-DBUILD_SHARED_LIBS:BOOL=ON",
"-DCMAKE_CXX_STANDARD:STRING=14",
"-DSWR:STRING=avx;avx2]
"""
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
if isinstance(value, bool):
kind = 'BOOL'
value = "ON" if value else "OFF"
else:
kind = 'STRING'
if isinstance(value, (list, tuple)):
value = ";".join(str(v) for v in value)
else:
value = str(value)
return "".join(["-D", cmake_var, ":", kind, "=", value])
def define_from_variant(self, cmake_var, variant=None):
"""Return a CMake command line argument from the given variant's value.
The optional ``variant`` argument defaults to the lower-case transform
of ``cmake_var``.
This utility function is similar to
:py:meth:`~.AutotoolsPackage.with_or_without`.
Examples:
Given a package with:
.. code-block:: python
variant('cxxstd', default='11', values=('11', '14'),
multi=False, description='')
variant('shared', default=True, description='')
variant('swr', values=any_combination_of('avx', 'avx2'),
description='')
calling this function like:
.. code-block:: python
[define_from_variant('BUILD_SHARED_LIBS', 'shared'),
define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
define_from_variant('SWR')]
will generate the following configuration options:
.. code-block:: console
["-DBUILD_SHARED_LIBS:BOOL=ON",
"-DCMAKE_CXX_STANDARD:STRING=14",
"-DSWR:STRING=avx;avx2]
for ``<spec-name> cxxstd=14 +shared swr=avx,avx2``
"""
if variant is None:
variant = cmake_var.lower()
if variant not in self.variants:
raise KeyError(
'"{0}" is not a variant of "{1}"'.format(variant, self.name))
value = self.spec.variants[variant].value
if isinstance(value, (tuple, list)):
# Sort multi-valued variants for reproducibility
value = sorted(value)
return self.define(cmake_var, value)
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass the specified
compiler flags to cmake. Note CMAKE does not have a cppflags option,
so cppflags will be added to cflags, cxxflags, and fflags to mimic the
behavior in other tools."""
# Has to be dynamic attribute due to caching
setattr(self, 'cmake_flag_args', [])
flag_string = '-DCMAKE_{0}_FLAGS={1}'
langs = {'C': 'c', 'CXX': 'cxx', 'Fortran': 'f'}
# Handle language compiler flags
for lang, pre in langs.items():
flag = pre + 'flags'
# cmake has no explicit cppflags support -> add it to all langs
lang_flags = ' '.join(flags.get(flag, []) + flags.get('cppflags',
[]))
if lang_flags:
self.cmake_flag_args.append(flag_string.format(lang,
lang_flags))
# Cmake has different linker arguments for different build types.
# We specify for each of them.
if flags['ldflags']:
ldflags = ' '.join(flags['ldflags'])
ld_string = '-DCMAKE_{0}_LINKER_FLAGS={1}'
# cmake has separate linker arguments for types of builds.
for type in ['EXE', 'MODULE', 'SHARED', 'STATIC']:
self.cmake_flag_args.append(ld_string.format(type, ldflags))
# CMake has libs options separated by language. Apply ours to each.
if flags['ldlibs']:
libs_flags = ' '.join(flags['ldlibs'])
libs_string = '-DCMAKE_{0}_STANDARD_LIBRARIES={1}'
for lang in langs:
self.cmake_flag_args.append(libs_string.format(lang,
libs_flags))
@property
def build_dirname(self):
"""Returns the directory name to use when building the package
:return: name of the subdirectory for building the package
"""
return 'spack-build-%s' % self.spec.dag_hash(7)
@property
def build_directory(self):
"""Returns the directory to use when building the package
:return: directory where to build the package
"""
return os.path.join(self.stage.path, self.build_dirname)
def cmake_args(self):
"""Produces a list containing all the arguments that must be passed to
cmake, except:
* CMAKE_INSTALL_PREFIX
* CMAKE_BUILD_TYPE
which will be set automatically.
:return: list of arguments for cmake
"""
return []
def cmake(self, spec, prefix):
"""Runs ``cmake`` in the build directory"""
options = self.std_cmake_args
options += self.cmake_args()
options.append(os.path.abspath(self.root_cmakelists_dir))
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).cmake(*options)
def build(self, spec, prefix):
"""Make the build targets"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.build_targets)
elif self.generator == 'Ninja':
self.build_targets.append("-v")
inspect.getmodule(self).ninja(*self.build_targets)
def install(self, spec, prefix):
"""Make the install targets"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.install_targets)
elif self.generator == 'Ninja':
inspect.getmodule(self).ninja(*self.install_targets)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the CMake-generated Makefile for the target ``test``
and runs it if found.
"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
self._if_make_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_make_target_execute('check')
elif self.generator == 'Ninja':
self._if_ninja_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_ninja_target_execute('check')
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
| 37.972222 | 90 | 0.594666 |
acf4aa4ef9f0df95c559afd7889a1e2074c6aced | 492 | py | Python | sigopt/cli/arguments/project.py | emattia/sigopt-python | e6b4e5240261ddbdc84a3b4061b8935873612c23 | [
"MIT"
] | 67 | 2015-03-01T02:16:47.000Z | 2021-05-10T16:17:21.000Z | sigopt/cli/arguments/project.py | emattia/sigopt-python | e6b4e5240261ddbdc84a3b4061b8935873612c23 | [
"MIT"
] | 150 | 2015-10-22T21:59:37.000Z | 2022-03-10T00:55:19.000Z | sigopt/cli/arguments/project.py | emattia/sigopt-python | e6b4e5240261ddbdc84a3b4061b8935873612c23 | [
"MIT"
] | 19 | 2016-07-10T03:46:33.000Z | 2022-02-05T12:13:01.000Z | import click
from sigopt.defaults import check_valid_project_id, get_default_project
def validate_project_id_callback(ctx, p, value): # pylint: disable=unused-argument
if value is None:
return get_default_project()
try:
check_valid_project_id(value)
except ValueError as ve:
raise click.BadParameter(str(ve)) from ve
return value
project_option = click.option(
"-p",
"--project",
callback=validate_project_id_callback,
help="Configure the project to use",
)
| 22.363636 | 83 | 0.756098 |
acf4ab36310af486cc7d319cf18035c3f82ca3b2 | 3,559 | py | Python | pyupnptools/upnp_soap.py | bjtj/python-upnp-tools | d33aadedc2a4419f3ffcc94993b54f34072e336a | [
"MIT"
] | null | null | null | pyupnptools/upnp_soap.py | bjtj/python-upnp-tools | d33aadedc2a4419f3ffcc94993b54f34072e336a | [
"MIT"
] | null | null | null | pyupnptools/upnp_soap.py | bjtj/python-upnp-tools | d33aadedc2a4419f3ffcc94993b54f34072e336a | [
"MIT"
] | null | null | null | from .upnp import *
from .upnp_xml import *
from collections import OrderedDict
class UPnPSoapRequest(OrderedDict):
def __init__(self, service_type, action_name, params=None):
super(UPnPSoapRequest, self).__init__()
self.service_type = service_type
self.action_name = action_name
if params:
self.update(params)
def __str__(self):
from io import BytesIO
import xml.etree.ElementTree as ET
ns_table = {
's': 'http://schemas.xmlsoap.org/soap/envelope/',
'u': self.service_type
}
for k,v in ns_table.items():
ET.register_namespace(k, v)
envelope = ET.Element('{{{}}}Envelope'.format(ns_table['s']))
body = ET.SubElement(envelope, '{{{}}}Body'.format(ns_table['s']))
action = ET.SubElement(body, '{{{}}}{}'.format(ns_table['u'],
self.action_name))
for k, v in self.items():
elem = ET.SubElement(action, k)
elem.text = v
et = ET.ElementTree(envelope)
f = BytesIO()
et.write(f, encoding='utf-8', xml_declaration=True)
xml = f.getvalue().decode('utf-8')
return xml
@staticmethod
def read(service_type, action_name, text):
req = UPnPSoapRequest(service_type, action_name)
root = read_xml(text)
for child in root:
if get_tagname(child) == 'Body':
for action_node in child:
# get_tagname(action_node) == action_name
for prop_node in action_node:
if not list(prop_node):
name = get_tagname(prop_node)
value = prop_node.text
req[name] = value
return req
class UPnPSoapResponse(OrderedDict):
def __init__(self):
super(UPnPSoapResponse, self).__init__()
def __str__(self):
from io import BytesIO
import xml.etree.ElementTree as ET
ns_table = {
's': 'http://schemas.xmlsoap.org/soap/envelope/',
'u': self.service_type
}
for k,v in ns_table.items():
ET.register_namespace(k, v)
envelope = ET.Element('{{{}}}Envelope'.format(ns_table['s']))
body = ET.SubElement(envelope, '{{{}}}Body'.format(ns_table['s']))
action = ET.SubElement(body, '{{{}}}{}Response'.format(ns_table['u'],
self.action_name))
for k, v in self.items():
elem = ET.SubElement(action, k)
elem.text = v
et = ET.ElementTree(envelope)
f = BytesIO()
et.write(f, encoding='utf-8', xml_declaration=True)
xml = f.getvalue().decode('utf-8')
return xml
@staticmethod
def read(text):
res = UPnPSoapResponse()
root = read_xml(text)
for node in root:
if get_tagname(node) == 'Body':
for action_node in node:
res.service_type = get_namespace(action_node)
response_action_name = get_tagname(action_node)
res.action_name = response_action_name[:-len('Response')]
for prop_node in action_node:
if not list(prop_node):
name = get_tagname(prop_node)
value = prop_node.text
res[name] = value
return res
| 34.892157 | 81 | 0.527676 |
acf4ab526e02282f67f00f393e9e15703ca188d5 | 2,461 | py | Python | Application/db_operations.py | gdimitris/FleetManagerBackend | 1a5f0c26a4279894b6ed6507cf729f88502d0883 | [
"MIT"
] | null | null | null | Application/db_operations.py | gdimitris/FleetManagerBackend | 1a5f0c26a4279894b6ed6507cf729f88502d0883 | [
"MIT"
] | null | null | null | Application/db_operations.py | gdimitris/FleetManagerBackend | 1a5f0c26a4279894b6ed6507cf729f88502d0883 | [
"MIT"
] | 1 | 2020-05-05T05:42:00.000Z | 2020-05-05T05:42:00.000Z | from datetime import datetime
from sqlalchemy import and_
from Application import db
from Application.models import Researchers, LocationPoints
def update_researcher_timestamp(device_id, time):
researcher = Researchers.query.filter(Researchers.phone_id == device_id).first()
if researcher:
researcher.last_updated = time
commit_and_flush(researcher)
else:
insert_or_update_existing_researcher(device_id, 'None', 'None', time)
def insert_location_point_in_db(device_id, latitude, longitude, timestamp):
lp = LocationPoints(phone_id=device_id, latitude=latitude, longitude=longitude, timestamp=timestamp)
commit_and_flush(lp)
def insert_or_update_existing_researcher(device_id, name, surname, timestamp = None):
researcher = Researchers.query.filter(Researchers.phone_id == device_id).first()
if not researcher:
researcher = Researchers(phone_id=device_id, name=name, surname=surname, last_updated=timestamp)
else:
researcher.name = name
researcher.surname = surname
commit_and_flush(researcher)
def commit_and_flush(r):
try:
db.session.add(r)
db.session.commit()
db.session.flush()
except:
db.session.rollback()
finally:
db.session.close()
def get_all_researchers_from_db():
return Researchers.query.all()
def get_location_points_with_id(phone_id):
return LocationPoints.query.filter(LocationPoints.phone_id == phone_id).all()
def get_entries_with_phone_id(device_id):
locations = get_location_points_with_id(device_id)
serialized_locations = [i.serialize for i in locations]
return serialized_locations
def get_filtered_entries_from_db(device_id, start_unix_time, end_unix_time):
start_time = datetime.now().fromtimestamp(float(start_unix_time))
end_time = datetime.now().fromtimestamp(float(end_unix_time))
query = and_(LocationPoints.timestamp >= start_time,
LocationPoints.timestamp < end_time,
LocationPoints.phone_id == device_id)
locations = LocationPoints.query.filter(query).all()
serialized_locations = [i.serialize for i in locations]
return serialized_locations
def get_locations_for_phone_ids(selected_phone_ids):
q = LocationPoints.phone_id.in_(selected_phone_ids)
locations = LocationPoints.query.filter(q).all()
serialized_locations = [i.serialize for i in locations]
return serialized_locations
| 33.256757 | 104 | 0.747664 |
acf4ababb9347d7130b63ef89f1b6e0d6d0e67be | 1,244 | py | Python | test/test_patch_apply.py | MFreidank/tensorflow-determinism | 4194f2ea4e453c0024b621f2db1ae7aa953b6f3e | [
"Apache-2.0"
] | 209 | 2020-06-30T18:58:49.000Z | 2022-03-25T20:26:09.000Z | test/test_patch_apply.py | MFreidank/tensorflow-determinism | 4194f2ea4e453c0024b621f2db1ae7aa953b6f3e | [
"Apache-2.0"
] | 23 | 2020-06-26T21:42:24.000Z | 2022-01-21T21:29:20.000Z | test/test_patch_apply.py | MFreidank/tensorflow-determinism | 4194f2ea4e453c0024b621f2db1ae7aa953b6f3e | [
"Apache-2.0"
] | 15 | 2020-07-20T16:48:16.000Z | 2022-02-18T07:12:14.000Z | # Copyright 2019 NVIDIA Corporation. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
sys.path.insert(0, '..')
expected_exception = None
if len(sys.argv) > 2 and sys.argv[1] == "--expected-exception":
expected_exception_string = sys.argv[2]
if expected_exception_string == "TypeError":
expected_exception = TypeError
from fwd9m.tensorflow import patch
try:
patch()
except expected_exception as e:
print("Expected exception (%s) caught: " % expected_exception_string + str(e))
sys.exit(0)
| 32.736842 | 80 | 0.717846 |
acf4ac456f1cbcb4ca1f8f8103a130a488115259 | 6,599 | py | Python | dipy/io/utils.py | stefanv/dipy | 4d4518861a796502826f053c17161487db126487 | [
"BSD-3-Clause"
] | null | null | null | dipy/io/utils.py | stefanv/dipy | 4d4518861a796502826f053c17161487db126487 | [
"BSD-3-Clause"
] | null | null | null | dipy/io/utils.py | stefanv/dipy | 4d4518861a796502826f053c17161487db126487 | [
"BSD-3-Clause"
] | null | null | null | ''' Utility functions for file formats '''
import sys
import numpy as np
sys_is_le = sys.byteorder == 'little'
native_code = sys_is_le and '<' or '>'
swapped_code = sys_is_le and '>' or '<'
default_compresslevel = 1
endian_codes = (# numpy code, aliases
('<', 'little', 'l', 'le', 'L', 'LE'),
('>', 'big', 'BIG', 'b', 'be', 'B', 'BE'),
(native_code, 'native', 'n', 'N', '=', '|', 'i', 'I'),
(swapped_code, 'swapped', 's', 'S', '!'))
# We'll put these into the Recoder class after we define it
class Recoder(object):
''' class to return canonical code(s) from code or aliases
The concept is a lot easier to read in the implementation and
tests than it is to explain, so...
>>> # If you have some codes, and several aliases, like this:
>>> code1 = 1; aliases1=['one', 'first']
>>> code2 = 2; aliases2=['two', 'second']
>>> # You might want to do this:
>>> codes = [[code1]+aliases1,[code2]+aliases2]
>>> recodes = Recoder(codes)
>>> recodes.code['one']
1
>>> recodes.code['second']
2
>>> recodes.code[2]
2
>>> # Or maybe you have a code, a label and some aliases
>>> codes=((1,'label1','one', 'first'),(2,'label2','two'))
>>> # you might want to get back the code or the label
>>> recodes = Recoder(codes, fields=('code','label'))
>>> recodes.code['first']
1
>>> recodes.code['label1']
1
>>> recodes.label[2]
'label2'
>>> # For convenience, you can get the first entered name by
>>> # indexing the object directly
>>> recodes[2]
2
'''
def __init__(self, codes, fields=('code',)):
''' Create recoder object
``codes`` give a sequence of code, alias sequences
``fields`` are names by which the entries in these sequences can be
accessed.
By default ``fields`` gives the first column the name
"code". The first column is the vector of first entries
in each of the sequences found in ``codes``. Thence you can
get the equivalent first column value with ob.code[value],
where value can be a first column value, or a value in any of
the other columns in that sequence.
You can give other columns names too, and access them in the
same way - see the examples in the class docstring.
Parameters
------------
codes : seqence of sequences
Each sequence defines values (codes) that are equivalent
fields : {('code',) string sequence}, optional
names by which elements in sequences can be accesssed
'''
self.fields = fields
self.field1 = {} # a placeholder for the check below
for name in fields:
if name in self.__dict__:
raise KeyError('Input name %s already in object dict'
% name)
self.__dict__[name] = {}
self.field1 = self.__dict__[fields[0]]
self.add_codes(codes)
def add_codes(self, codes):
''' Add codes to object
>>> codes = ((1, 'one'), (2, 'two'))
>>> rc = Recoder(codes)
>>> rc.value_set() == set((1,2))
True
>>> rc.add_codes(((3, 'three'), (1, 'first')))
>>> rc.value_set() == set((1,2,3))
True
'''
for vals in codes:
for val in vals:
for ind, name in enumerate(self.fields):
self.__dict__[name][val] = vals[ind]
def __getitem__(self, key):
''' Return value from field1 dictionary (first column of values)
Returns same value as ``obj.field1[key]`` and, with the
default initializing ``fields`` argument of fields=('code',),
this will return the same as ``obj.code[key]``
>>> codes = ((1, 'one'), (2, 'two'))
>>> Recoder(codes)['two']
2
'''
return self.field1[key]
def keys(self):
''' Return all available code and alias values
Returns same value as ``obj.field1.keys()`` and, with the
default initializing ``fields`` argument of fields=('code',),
this will return the same as ``obj.code.keys()``
>>> codes = ((1, 'one'), (2, 'two'), (1, 'repeat value'))
>>> k = Recoder(codes).keys()
>>> k.sort() # Just to guarantee order for doctest output
>>> k
[1, 2, 'one', 'repeat value', 'two']
'''
return self.field1.keys()
def value_set(self, name=None):
''' Return set of possible returned values for column
By default, the column is the first column.
Returns same values as ``set(obj.field1.values())`` and,
with the default initializing ``fields`` argument of
fields=('code',), this will return the same as
``set(obj.code.values())``
Parameters
------------
name : {None, string}
Where default of None gives result for first column
Returns
---------
val_set : set
set of all values for `name`
Examples
-----------
>>> codes = ((1, 'one'), (2, 'two'), (1, 'repeat value'))
>>> vs = Recoder(codes).value_set()
>>> vs == set([1, 2]) # Sets are not ordered, hence this test
True
>>> rc = Recoder(codes, fields=('code', 'label'))
>>> rc.value_set('label') == set(('one', 'two', 'repeat value'))
True
'''
if name is None:
d = self.field1
else:
d = self.__dict__[name]
return set(d.values())
# Endian code aliases
endian_codes = Recoder(endian_codes)
def allopen(fname, *args, **kwargs):
''' Generic file-like object open
If input ``fname`` already looks like a file, pass through.
If ``fname`` ends with recognizable compressed types, use python
libraries to open as file-like objects (read or write)
Otherwise, use standard ``open``.
'''
if hasattr(fname, 'write'):
return fname
if args:
mode = args[0]
elif 'mode' in kwargs:
mode = kwargs['mode']
else:
mode = 'rb'
if fname.endswith('.gz'):
if ('w' in mode and
len(args) < 2 and
not 'compresslevel' in kwargs):
kwargs['compresslevel'] = default_compresslevel
import gzip
opener = gzip.open
elif fname.endswith('.bz2'):
if ('w' in mode and
len(args) < 3 and
not 'compresslevel' in kwargs):
kwargs['compresslevel'] = default_compresslevel
import bz2
opener = bz2.BZ2File
else:
opener = open
return opener(fname, *args, **kwargs)
| 31.574163 | 72 | 0.559782 |
acf4acab3577affcf2c2a69a23957d573bd9b46e | 596 | py | Python | 21. Merge Two Sorted Lists/solution.py | Rukeith/leetcode | 001aaf83968889d0cbc634dcc3bd1b59728d1ecc | [
"MIT"
] | 3 | 2018-06-19T08:36:32.000Z | 2019-03-25T04:05:04.000Z | 21. Merge Two Sorted Lists/solution.py | Rukeith/leetcode | 001aaf83968889d0cbc634dcc3bd1b59728d1ecc | [
"MIT"
] | null | null | null | 21. Merge Two Sorted Lists/solution.py | Rukeith/leetcode | 001aaf83968889d0cbc634dcc3bd1b59728d1ecc | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
head = pos = ListNode(0)
while l1 and l2:
if l1.val < l2.val:
pos.next = l1
l1 = l1.next
else:
pos.next = l2
l2 = l2.next
pos = pos.next
pos.next = l1 or l2
return head.next | 24.833333 | 36 | 0.454698 |
acf4acaed6b9061c26c90f0545ba5b792635f559 | 2,312 | py | Python | tests/test_connection.py | geronimo-iia/airflow-indexima | 7dc9aec646f7f75cfa90e320fff86549f98185c2 | [
"MIT"
] | 2 | 2019-12-04T15:51:49.000Z | 2019-12-20T09:18:01.000Z | tests/test_connection.py | geronimo-iia/airflow-indexima | 7dc9aec646f7f75cfa90e320fff86549f98185c2 | [
"MIT"
] | null | null | null | tests/test_connection.py | geronimo-iia/airflow-indexima | 7dc9aec646f7f75cfa90e320fff86549f98185c2 | [
"MIT"
] | null | null | null | from airflow_indexima.connection import apply_hive_extra_setting, extract_hive_extra_setting
def test_apply_hive_extra_setting_with_nothing(indexima_connection):
assert indexima_connection.extra is None
conn = apply_hive_extra_setting(connection=indexima_connection)
assert conn.extra == "{}"
def test_apply_hive_extra_setting_with_attribute_1(indexima_connection):
conn = apply_hive_extra_setting(
connection=indexima_connection,
auth="CUSTOM",
kerberos_service_name="my_service",
timeout_seconds=90,
socket_keepalive=True,
)
assert conn.extra == (
'{"auth": "CUSTOM", "kerberos_service_name": "my_service", '
'"timeout_seconds": 90, "socket_keepalive": true}'
)
def test_apply_hive_extra_setting_with_attribute_2(indexima_connection):
conn = apply_hive_extra_setting(
connection=indexima_connection,
auth="NONE",
kerberos_service_name="my_service",
timeout_seconds=91,
socket_keepalive=False,
)
assert conn.extra == (
'{"auth": "NONE", "kerberos_service_name": "my_service", '
'"timeout_seconds": 91, "socket_keepalive": false}'
)
def test_extract_hive_extra_setting_1(indexima_connection):
conn = apply_hive_extra_setting(
connection=indexima_connection,
auth="NONE",
kerberos_service_name="my_service",
timeout_seconds=91,
socket_keepalive=False,
)
assert extract_hive_extra_setting(connection=conn) == ('NONE', 'my_service', 91, False)
def test_extract_hive_extra_setting_2(indexima_connection):
conn = apply_hive_extra_setting(
connection=indexima_connection, auth="LDAP", timeout_seconds=91, socket_keepalive=True
)
assert extract_hive_extra_setting(connection=conn) == ('LDAP', None, 91, True)
def test_extract_hive_extra_setting_without_data_1(indexima_connection):
conn = apply_hive_extra_setting(connection=indexima_connection, timeout_seconds=90)
assert extract_hive_extra_setting(connection=conn) == (None, None, 90, None)
def test_extract_hive_extra_setting_without_data_2(indexima_connection):
conn = apply_hive_extra_setting(connection=indexima_connection)
assert extract_hive_extra_setting(connection=conn) == (None, None, None, None)
| 35.030303 | 94 | 0.739619 |
acf4ad3e58660784e9ad155ec3b27520842d17fd | 3,406 | py | Python | CircuitPython_RGBMatrix/simple_scroller/code.py | claycooper/Adafruit_Learning_System_Guides | 890431bd4b9df929bc601e5886c2a735d89814f9 | [
"MIT"
] | null | null | null | CircuitPython_RGBMatrix/simple_scroller/code.py | claycooper/Adafruit_Learning_System_Guides | 890431bd4b9df929bc601e5886c2a735d89814f9 | [
"MIT"
] | null | null | null | CircuitPython_RGBMatrix/simple_scroller/code.py | claycooper/Adafruit_Learning_System_Guides | 890431bd4b9df929bc601e5886c2a735d89814f9 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2020 Jeff Epler for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# This example implements a simple two line scroller using
# Adafruit_CircuitPython_Display_Text. Each line has its own color
# and it is possible to modify the example to use other fonts and non-standard
# characters.
import adafruit_display_text.label
import board
import displayio
import framebufferio
import rgbmatrix
import terminalio
# If there was a display before (protomatter, LCD, or E-paper), release it so
# we can create ours
displayio.release_displays()
# This next call creates the RGB Matrix object itself. It has the given width
# and height. bit_depth can range from 1 to 6; higher numbers allow more color
# shades to be displayed, but increase memory usage and slow down your Python
# code. If you just want to show primary colors plus black and white, use 1.
# Otherwise, try 3, 4 and 5 to see which effect you like best.
#
# These lines are for the Feather M4 Express. If you're using a different board,
# check the guide to find the pins and wiring diagrams for your board.
# If you have a matrix with a different width or height, change that too.
# If you have a 16x32 display, try with just a single line of text.
matrix = rgbmatrix.RGBMatrix(
width=64, height=32, bit_depth=1,
rgb_pins=[board.D6, board.D5, board.D9, board.D11, board.D10, board.D12],
addr_pins=[board.A5, board.A4, board.A3, board.A2],
clock_pin=board.D13, latch_pin=board.D0, output_enable_pin=board.D1)
# Associate the RGB matrix with a Display so that we can use displayio features
display = framebufferio.FramebufferDisplay(matrix, auto_refresh=False)
# Create two lines of text to scroll. Besides changing the text, you can also
# customize the color and font (using Adafruit_CircuitPython_Bitmap_Font).
# To keep this demo simple, we just used the built-in font.
# The Y coordinates of the two lines were chosen so that they looked good
# but if you change the font you might find that other values work better.
line1 = adafruit_display_text.label.Label(
terminalio.FONT,
color=0xff0000,
text="This scroller is brought to you by CircuitPython RGBMatrix")
line1.x = display.width
line1.y = 8
line2 = adafruit_display_text.label.Label(
terminalio.FONT,
color=0x0080ff,
text="Hello to all CircuitPython contributors worldwide <3")
line2.x = display.width
line2.y = 24
# Put each line of text into a Group, then show that group.
g = displayio.Group()
g.append(line1)
g.append(line2)
display.show(g)
# This function will scoot one label a pixel to the left and send it back to
# the far right if it's gone all the way off screen. This goes in a function
# because we'll do exactly the same thing with line1 and line2 below.
def scroll(line):
line.x = line.x - 1
line_width = line.bounding_box[2]
if line.x < -line_width:
line.x = display.width
# This function scrolls lines backwards. Try switching which function is
# called for line2 below!
def reverse_scroll(line):
line.x = line.x + 1
line_width = line.bounding_box[2]
if line.x >= display.width:
line.x = -line_width
# You can add more effects in this loop. For instance, maybe you want to set the
# color of each label to a different value.
while True:
scroll(line1)
scroll(line2)
#reverse_scroll(line2)
display.refresh(minimum_frames_per_second=0)
| 38.269663 | 80 | 0.751615 |
acf4af2c22c79e0d8c2696d803ee096e9155e790 | 5,868 | py | Python | day21/solve2.py | tomaszmj/AdventOfCode2020 | 28b59176d969b00e9630bd7a47a77bcd8275567b | [
"MIT"
] | null | null | null | day21/solve2.py | tomaszmj/AdventOfCode2020 | 28b59176d969b00e9630bd7a47a77bcd8275567b | [
"MIT"
] | null | null | null | day21/solve2.py | tomaszmj/AdventOfCode2020 | 28b59176d969b00e9630bd7a47a77bcd8275567b | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Set, Dict, NamedTuple, List
class FoodEntry(NamedTuple):
ingredients: List[str]
allergens: List[str]
@classmethod
def from_string(cls, line: str):
allergens_index = line.find(" (")
ingredients_list = line[:allergens_index].split(" ")
allergens_list_str = line[allergens_index:]
if not allergens_list_str.startswith(" (contains ") or not allergens_list_str.endswith(")"):
raise ValueError(f"invalid allergens list on line: {line}")
allergens_list = allergens_list_str[len("(contains "):-1].replace(" ", "").split(",")
ingredients_list.sort()
allergens_list.sort()
return FoodEntry(ingredients=ingredients_list, allergens=allergens_list)
class IngredientPerAllergenEntry(NamedTuple):
allergen: str
possible_ingredients: List[str]
class Data:
def __init__(self):
self._entries = [] # const after all add_entry calls
self._possible_ingredients_set_per_allergen: Dict[str, Set[str]] = {} # const after all add_entry calls
self._all_ingredients_set: Set[str] = set() # const after all add_entry calls
self._choices: List[IngredientPerAllergenEntry] = [] # const after _initialize_internals
self._allergen_per_ingredient: Dict[str, str] = {} # variable used in _solve
self._ingredient_per_allergen: Dict[str, str] = {} # variable used in _solve
def add_entry(self, entry: FoodEntry):
self._entries.append(entry)
self._all_ingredients_set.update(entry.ingredients)
for allergen in entry.allergens:
if allergen in self._possible_ingredients_set_per_allergen:
self._possible_ingredients_set_per_allergen[allergen].intersection_update(set(entry.ingredients))
else:
self._possible_ingredients_set_per_allergen[allergen] = set(entry.ingredients)
def solve(self):
self._initialize_internals()
print(f"staring solve with {len(self._all_ingredients_set)} ingredients and "
f"{len(self._possible_ingredients_set_per_allergen)} allergens, choices:\n"
f"{self.possible_ingredients_per_allergen_to_str()}")
return self._solve()
def possible_ingredients_per_allergen_to_str(self) -> str:
return "\n".join(
f"{choice.allergen}: {choice.possible_ingredients} (total {len(choice.possible_ingredients)})"
for choice in self._choices
)
def chosen_ingredient_per_allergen_to_str(self) -> str:
allergens = (choice.allergen for choice in self._choices)
return "\n".join(
(f"{allergen}: {self._ingredient_per_allergen.get(allergen, '')}" for allergen in allergens)
)
def canonical_dangerous_ingredients(self) -> str:
data = list((allergen, ingredient) for allergen, ingredient in self._ingredient_per_allergen.items())
data.sort(key=lambda d: d[0]) # sort by allergen
return ",".join(d[1] for d in data) # return dangerous ingredients list sorted by allergen
def _solve(self) -> bool:
if len(self._ingredient_per_allergen) == len(self._choices):
is_valid = self._validate()
print(f"iteration of backtracking finished, validation: {is_valid}")
return is_valid
entry = self._choices[len(self._ingredient_per_allergen)]
for ingredient in entry.possible_ingredients:
if ingredient in self._allergen_per_ingredient:
continue
self._allergen_per_ingredient[ingredient] = entry.allergen
self._ingredient_per_allergen[entry.allergen] = ingredient
if self._solve():
return True
# else - backtrack...
del self._allergen_per_ingredient[ingredient]
del self._ingredient_per_allergen[entry.allergen]
return False
# In my first solution version _validate was crucial for backtracking.
# Then, with intersection of _possible_ingredients_set_per_allergen,
# it turned of that it is not really needed, i.e. the first finished iteration of
# backtracking finds the correct solution. I am leaving it here just as a sanity check.
def _validate(self) -> bool:
for i, entry in enumerate(self._entries):
allergens = set()
for ingredient in entry.ingredients:
allergen = self._allergen_per_ingredient.get(ingredient, "")
if not allergen:
continue
if allergen in allergens:
return False # allergens cannot repeat
allergens.add(allergen)
for expected_allergen in entry.allergens:
if expected_allergen not in allergens:
return False
return True
def _initialize_internals(self):
self._allergen_per_ingredient: Dict[str, str] = {}
self._ingredient_per_allergen: Dict[str, str] = {}
self._choices = []
for allergen, possible_ingredients in self._possible_ingredients_set_per_allergen.items():
entry = IngredientPerAllergenEntry(allergen=allergen, possible_ingredients=list(possible_ingredients))
self._choices.append(entry)
self._choices.sort(key=lambda c: len(c.possible_ingredients))
def main():
data = Data()
with open("data.txt") as f:
for i, line in enumerate(f):
entry = FoodEntry.from_string(line.strip())
data.add_entry(entry)
if data.solve():
canonical_dangerous_ingredients = data.canonical_dangerous_ingredients()
print(f"solve ok:\n{data.chosen_ingredient_per_allergen_to_str()}\nanswer: {canonical_dangerous_ingredients}")
else:
print("solve failed")
if __name__ == "__main__":
main()
| 44.454545 | 118 | 0.667007 |
acf4af3c4ca9d3f032c2afaeea5fc52805b147d4 | 2,856 | py | Python | awards/tests.py | HannahChege/Awards | 0dcf390ae85d4caf2076884e085eb91407b017ec | [
"Unlicense"
] | null | null | null | awards/tests.py | HannahChege/Awards | 0dcf390ae85d4caf2076884e085eb91407b017ec | [
"Unlicense"
] | null | null | null | awards/tests.py | HannahChege/Awards | 0dcf390ae85d4caf2076884e085eb91407b017ec | [
"Unlicense"
] | null | null | null | from django.test import TestCase
from .models import Profile, Project
from django.contrib.auth.models import User
class ProfileTestClass(TestCase):
"""
Test profile class and its functions
"""
def setUp(self):
self.user = User.objects.create(id =1,username='hannah')
self.profile = Profile(dp='hannah.jpg', bio='Life is too short', contact="0711139310",user=self.user)
def test_instance(self):
self.assertTrue(isinstance(self.profile, Profile))
def test_save_method(self):
"""
Function to test that profile is being saved
"""
self.profile.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
def test_delete_method(self):
"""
Function to test that a profile can be deleted
"""
self.profile.save_profile()
def test_update_method(self):
"""
Function to test that a profile's details can be updated
"""
self.profile.save_profile()
new_profile = Profile.objects.filter(bio='LIfe is too short').update(bio='You only live once')
def test_get_profile_by_id(self):
"""
Function to test if you can get a profile by its id
"""
self.profile.save_profile()
this_pro= self.profile.get_by_id(self.profile.user_id)
profile = Profile.objects.get(user_id=self.profile.user_id)
self.assertTrue(this_pro, profile)
class ProjectTestClass(TestCase):
"""
Test project class and its functions
"""
def setUp(self):
self.user = User.objects.create(id =1,username='hannah')
#creating an new profile
self.profile = Profile(dp='pic.jpg', bio='LIfe is too short', contact="0711139310",user=self.user)
self.profile.save_profile()
self.project = Project(title='projects',image='pic.jpg', description='projects', url='https://www.test.com', profile=self.profile, user=self.user)
def test_instance(self):
self.assertTrue(isinstance(self.project, Project))
def test_save_method(self):
"""
Function to test that project is being saved
"""
self.project.save_project()
projects = Project.objects.all()
self.assertTrue(len(projects) > 0)
def test_delete_method(self):
"""
Function to test that a project can be deleted
"""
self.project.save_project()
self.project.delete_project()
def test_update_method(self):
"""
Function to test that a project's details can be updated
"""
self.project.save_project()
new_project = Project.objects.filter(title='projects').update(title='project')
projects = Project.objects.get(title='project')
self.assertTrue(projects.title, 'project')
| 31.733333 | 154 | 0.637255 |
acf4af90d9d1d23557a1a91c0b60751fed02b1e4 | 915 | py | Python | example/plugins/documents.py | sakost/kutana | 7695902803f17e1ce6109b5f9a8a7c24126d322f | [
"MIT"
] | null | null | null | example/plugins/documents.py | sakost/kutana | 7695902803f17e1ce6109b5f9a8a7c24126d322f | [
"MIT"
] | null | null | null | example/plugins/documents.py | sakost/kutana | 7695902803f17e1ce6109b5f9a8a7c24126d322f | [
"MIT"
] | null | null | null | from kutana import Plugin, Attachment, get_path
plugin = Plugin(name="Documents", description="Send documents")
@plugin.on_commands(["documents"])
async def _(msg, ctx):
# Document
with open(get_path(__file__, "assets/pizza.png"), "rb") as fh:
doc = Attachment.new(fh.read(), "pizza.png")
await ctx.reply("Document", attachments=doc)
# Graffiti (special for vk)
with open(get_path(__file__, "assets/pizza.png"), "rb") as fh:
graffiti = Attachment.new(fh.read(), "pizza.png", type="graffiti")
try:
await ctx.reply("Graffiti", attachments=graffiti)
except ValueError:
await ctx.reply("Can't upload this type of attachments")
# Audio message
with open(get_path(__file__, "assets/audio.ogg"), "rb") as fh:
audio_message = Attachment.new(fh.read(), "audio.ogg", "voice")
await ctx.reply("Audio message", attachments=audio_message)
| 31.551724 | 74 | 0.66776 |
acf4afa54af0d4ca21374fc2a89cebade9367d70 | 3,893 | py | Python | util/auto_gen.py | goosen78/gQuant | cc0bff4ac524ccfbe8097acd647a8b3fad5fe578 | [
"Apache-2.0"
] | null | null | null | util/auto_gen.py | goosen78/gQuant | cc0bff4ac524ccfbe8097acd647a8b3fad5fe578 | [
"Apache-2.0"
] | null | null | null | util/auto_gen.py | goosen78/gQuant | cc0bff4ac524ccfbe8097acd647a8b3fad5fe578 | [
"Apache-2.0"
] | null | null | null | import sys
import inspect
from pathlib import Path
file_ = Path(__file__)
modulespath = '{}/modules'.format(file_.resolve().parents[1])
sys.path.insert(1, modulespath)
from nemo.backends.pytorch.nm import NeuralModule
from nemo_gquant_modules.nemoBaseNode import FeedProperty
TEMPLATE = """from gquant.dataframe_flow import Node
from .nemoBaseNode import NeMoBase
import nemo
import {}
"""
CLASS_TEMP = """
class {}(NeMoBase, Node):
def init(self):
NeMoBase.init(self, {})
"""
def gen_module_file(module, overwrite=None):
file_str = TEMPLATE.format(module.__name__)
nodecls_list = []
for item in inspect.getmembers(module):
if inspect.ismodule(item[1]):
if item[1].__package__.startswith('nemo'):
for node in inspect.getmembers(item[1]):
if inspect.isclass(node[1]):
nodecls = node[1]
if nodecls in nodecls_list:
continue
if issubclass(nodecls, NeuralModule):
if nodecls.__module__ == 'nemo.backends.pytorch.nm':
continue
try:
# p_inports = node[1].input_ports
# p_outports = node[1].output_ports
# feeder = FeedProperty({})
# inports = p_inports.fget(feeder)
# outports = p_outports.fget(feeder)
init_fun = node[1].__init__
sig = inspect.signature(init_fun)
skip = False
for key in sig.parameters.keys():
if key == 'self':
# ignore the self
continue
para = sig.parameters[key]
if para.default != inspect._empty:
if para.default.__class__.__name__ == 'type' or para.default.__class__.__name__ == 'DataCombination':
print(para.default, para)
skip = True
break
if skip:
print(node[0], 'find class arg', para.default.__class__.__name__)
continue
class_name = node[1].__module__ + '.' + node[1].__name__
file_str += CLASS_TEMP.format(node[0] + "Node",
class_name)
nodecls_list.append(nodecls)
except Exception as e:
print(e)
print(node[0], 'is not compatible, as it uses instance for input/output ports')
continue
if overwrite is not None:
module_name = overwrite
else:
module_name = module.__name__.split('.')[-1]
with open('../modules/nemo_gquant_modules/' + module_name + '.py', 'w') as f:
f.write(file_str)
import nemo.backends.pytorch.tutorials
gen_module_file(nemo.backends.pytorch.tutorials)
import nemo.backends.pytorch.common
gen_module_file(nemo.backends.pytorch.common)
import nemo.collections.asr
gen_module_file(nemo.collections.asr)
import nemo.collections.cv
gen_module_file(nemo.collections.cv)
import nemo.collections.nlp.nm
gen_module_file(nemo.collections.nlp.nm, 'nlp')
import nemo.collections.simple_gan
gen_module_file(nemo.collections.simple_gan)
import nemo.collections.tts
gen_module_file(nemo.collections.tts)
| 36.726415 | 141 | 0.504238 |
acf4b06d167447e4384dcfaf58d9d259f0c3b215 | 15,126 | py | Python | amfe/constraint/constraint_formulation_lagrange_multiplier.py | ma-kast/AMfe | 99686cc313fb8904a093fb42e6cf0b38f8cfd791 | [
"BSD-3-Clause"
] | null | null | null | amfe/constraint/constraint_formulation_lagrange_multiplier.py | ma-kast/AMfe | 99686cc313fb8904a093fb42e6cf0b38f8cfd791 | [
"BSD-3-Clause"
] | null | null | null | amfe/constraint/constraint_formulation_lagrange_multiplier.py | ma-kast/AMfe | 99686cc313fb8904a093fb42e6cf0b38f8cfd791 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, RIXEN@TUM.DE.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
import numpy as np
from scipy.sparse import csr_matrix, issparse
from scipy.sparse import hstack as sphstack
from scipy.sparse import vstack as spvstack
from .constraint_formulation import ConstraintFormulationBase
__all__ = ['SparseLagrangeMultiplierConstraintFormulation']
class SparseLagrangeMultiplierConstraintFormulation(ConstraintFormulationBase):
r"""
Sparse Lagrange Multiplier Formulation for Sparse Matrices including scaling and Augmentation (Penalty)
returns sparse matrices, although it can be used with a non-sparse unconstrained system
This formulation transforms the system
.. math::
M_{\mathrm{raw}}(u, \dot{u}, t) \ddot{u} + h(u, \dot{u}, t) + B^T \lambda &= p(u, \dot{u}, t) \\
g_{holo}(u, t) &= 0
to
.. math::
M(x, dx, t) \ddot{x} + f_{int}(x, \dot{x}, t) = f_{ext}(x, \dot{x}, t)
In detail:
.. math::
\begin{bmatrix} M_{raw} & 0 \\
0 & 0
\end{bmatrix} \begin{bmatrix} \ddot{u} \\
\ddot{\lambda} \end{bmatrix} + \begin{bmatrix} h(u, \dot{u}, t) + s \cdot B^T \lambda + ps B^T g(u, t) \\
s g(u, t)
\end{bmatrix} =
\begin{bmatrix}
p(u, \dot{u}, t) \\
0
\end{bmatrix}
and the linearization
.. math::
\begin{bmatrix} M_{raw} & 0 \\
0 & 0
\end{bmatrix} \
\begin{bmatrix} \ddot{u} \\
\ddot{\lambda} \end{bmatrix} + \
\begin{bmatrix} D_{raw} & 0 \\
0 & 0
\end{bmatrix} \
\begin{bmatrix} \Delta \dot{u} \\
\Delta \dot{\lambda} \end{bmatrix} + \
\begin{bmatrix} K_{raw} + psB^T B & sB^T \\
sB & 0
\end{bmatrix} \
\begin{bmatrix} \Delta u \\
\Delta \lambda \end{bmatrix} = \
\begin{bmatrix} p(\bar{u}, \dot{\bar{u}}, t) - h(\bar{u}, \dot{\bar{u}}, t) - s \cdot B^T \bar{\lambda}
- ps B^T g(\bar{u}, t) \\
- s g(\bar{u}, t)
\end{bmatrix}
= f_{ext}(\bar{u}, \dot{\bar{u}}, t) - f_{int}(\bar{u}, \dot{\bar{u}}, t)
with
.. math::
K_{\mathrm{raw}} &= \frac{\partial (h-p)}{\partial u} \\
D_{\mathrm{raw}} &= \frac{\partial (h-p)}{\partial {\dot u}}
It includes a scaling factor s that scales the constraint equations and a penalization term in the tangential
stiffness matrix (Augmentation or Penalization) that is scaled by a penalty factor p.
Attributes
----------
_M_full: csr_matrix
Preallocated csr_matrix for M
_D_full: csr_matrix
Preallocated csr_matrix for D
_K_full: csr_matrix
Preallocated csr_matrix for K
_f_int_full: csr_matrix
Preallocated ndarray for f_int
_f_ext_full: csr_matrix
Preallocated ndarray for f_ext
_scaling: float
Scaling factor for scaling the constraint equation
_penalty: float or None
Penalty factor for Penalization of stiffness matrix K to achieve better conditioning
"""
def __init__(self, no_of_dofs_unconstrained, M_func, h_func, B_func, p_func=None,
jac_h_u=None, jac_h_du=None, jac_p_u=None, jac_p_du=None,
g_func=None, b_func=None, a_func=None):
super().__init__(no_of_dofs_unconstrained, M_func, h_func, B_func, p_func,
jac_h_u, jac_h_du, jac_p_u, jac_p_du,
g_func, b_func, a_func)
self._no_of_constraints = len(self._g_func(np.zeros(self._no_of_dofs_unconstrained), 0.0))
self._M_full = None
self._D_full = None
self._K_full = None
self._f_int_full = None
self._f_ext_full = None
self._scaling = 1.0
self._penalty = None
def _preallocate_M(self, M):
"""
internal function for preallocation of Mass matrix
Parameters
----------
M: csr_matrix
matrix containing the pattern of the M matrix before constraint formulation is carried out
Returns
-------
M_full: csr_matrix
preallocated matrix that will be returned after constraints are applied
"""
if not isinstance(M, csr_matrix):
if issparse(M):
M = M.tocsr()
else:
M = csr_matrix(M)
indptr = np.concatenate((M.indptr, np.ones(self._no_of_constraints, dtype=M.indptr.dtype) * M.indptr[-1]))
return csr_matrix((M.data * 0.0, M.indices, indptr), shape=(M.shape[0] + self._no_of_constraints,
M.shape[1] + self._no_of_constraints))
def _preallocate_D(self, D):
"""
internal function for preallocation of linear damping matrix
Parameters
----------
D: csr_matrix
matrix containing the pattern of the D matrix before constraint formulation is carried out
Returns
-------
D_full: csr_matrix
preallocated matrix that will be returned after constraints are applied
"""
return self._preallocate_M(D)
def _preallocate_f(self):
"""
internal function for preallocation of f_int and f_ext vector
Returns
-------
F_full: numpy.array
preallocated F array that will be returned after constraints are applied
"""
return np.zeros(self._no_of_dofs_unconstrained + self._no_of_constraints)
@property
def dimension(self):
"""
Returns the dimension of the system after constraints have been applied
Returns
-------
dim: int
dimension of the system after constraints are applied
"""
return self._no_of_dofs_unconstrained + self._no_of_constraints
def set_options(self, **options):
"""
Sets options for the Lagrange formulation
Parameters
----------
options: dict
Key value dict describing the options to apply
Returns
-------
Notes
-----
Available Options:
- 'scaling': float (scaling factor for constraint function)
- 'penalty': float or None (scaling factor for Penalty Augmentation (if None, not applied))
"""
self._scaling = options.get('scaling', self._scaling)
self._penalty = options.get('penalty', self._penalty)
def update(self):
"""
Function that is called by observers if state has changed
Returns
-------
None
"""
self._no_of_constraints = len(self._g_func(np.zeros(self._no_of_dofs_unconstrained), 0.0))
def u(self, x, t):
"""
Parameters
----------
x: numpy.array
Global state vector of the system
t: float
time
Returns
-------
u: numpy.array
recovered displacements of the unconstrained system
"""
return x[:self._no_of_dofs_unconstrained]
def du(self, x, dx, t):
"""
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
du: numpy.array
recovered velocities of the unconstrained system
"""
return dx[:self._no_of_dofs_unconstrained]
def ddu(self, x, dx, ddx, t):
"""
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
ddx: numpy.array
Second time derivative of global state vector of the constrained system
t: float
time
Returns
-------
ddu: numpy.array
recovered accelerations of the unconstrained system
"""
return ddx[:self._no_of_dofs_unconstrained]
def lagrange_multiplier(self, x, t):
"""
Recovers the lagrange multipliers of the unconstrained system
Parameters
----------
x: numpy.array
Global state vector of the system
t: float
time
Returns
-------
lambda_: numpy.array
recovered displacements of the unconstrained system
"""
return x[self.no_of_dofs_unconstrained:]
def M(self, x, dx, t):
r"""
Returns the constrained mass matrix
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
M: csr_matrix
Constrained mass matrix
Notes
-----
In this formulation this returns
.. math::
\begin{bmatrix} M_{raw} & 0 \\
0 & 0
\end{bmatrix}
"""
M = self._M_func(self.u(x, t), self.du(x, dx, t), t)
if self._M_full is None:
self._M_full = self._preallocate_M(M)
if not isinstance(M, csr_matrix):
if issparse(M):
M = M.tocsr()
else:
M = csr_matrix(M)
self._M_full.indptr = np.concatenate((M.indptr, np.ones(self._no_of_constraints,
dtype=M.indptr.dtype) * M.indptr[-1]))
self._M_full.indices = M.indices
self._M_full.data = M.data
return self._M_full
def D(self, x, dx, t):
r"""
Returns the constrained damping matrix
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
D: csr_matrix
Constrained damping matrix
Notes
-----
In this formulation this returns
.. math::
\begin{bmatrix} D_{raw} & 0 \\
0 & 0
\end{bmatrix}
"""
u = self.u(x, t)
du = self.du(x, dx, t)
if self._jac_h_du is not None:
if self._jac_p_du is not None:
D = self._jac_h_du(u, du, t) - self._jac_p_du(u, du, t)
else:
D = self._jac_h_du(u, du, t)
else:
raise NotImplementedError('Numerical differentiation of h is not implemented yet')
if self._D_full is None:
self._D_full = self._preallocate_D(D)
if not isinstance(D, csr_matrix):
D = D.tocsr()
self._D_full.indptr = np.concatenate((D.indptr, np.ones(self._no_of_constraints,
dtype=D.indptr.dtype) * D.indptr[-1]))
self._D_full.indices = D.indices
self._D_full.data = D.data
return self._D_full
def f_int(self, x, dx, t):
r"""
Returns the constrained f_int vector
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
f_int: numpy.array
Constrained f_int vector
Notes
-----
In this formulation this returns
.. math::
\begin{bmatrix} h(u, \dot{u}, t) + s \cdot B^T \lambda \\
s g(u, t)
\end{bmatrix}
"""
if self._f_int_full is None:
self._f_int_full = self._preallocate_f()
u = self.u(x, t)
du = self.du(x, dx, t)
B = self._B_func(u, t)
g = self._g_func(u, t)
self._f_int_full *= 0.0
self._f_int_full[:self._no_of_dofs_unconstrained] = self._h_func(u, du, t) + \
self._scaling * B.T.dot(x[self._no_of_dofs_unconstrained:])
if self._penalty is not None:
self._f_int_full[:self.no_of_dofs_unconstrained] += self._penalty * self._scaling * B.T.dot(g)
self._f_int_full[self._no_of_dofs_unconstrained:] = self._scaling * g
return self._f_int_full
def f_ext(self, x, dx, t):
r"""
Returns the constrained f_ext vector
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
f_ext: numpy.array
Constrained f_ext vector
Notes
-----
In this formulation this returns
.. math::
\begin{bmatrix} p(u, \dot{u}, t) \\
0
\end{bmatrix}
"""
if self._f_ext_full is None:
self._f_ext_full = self._preallocate_f()
u = self.u(x, t)
du = self.du(x, dx, t)
self._f_ext_full *= 0.0
if self._p_func is not None:
self._f_ext_full[:self._no_of_dofs_unconstrained] = self._p_func(u, du, t)
return self._f_ext_full
def K(self, x, dx, t):
r"""
Returns the constrained stiffness matrix
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
K: csr_matrix
Constrained mass matrix
Notes
-----
In this formulation this returns
.. math::
\begin{bmatrix} K_{raw} + psB^T B & sB^T \\
sB & 0
\end{bmatrix}
Attention: d(B.T@g)/dq is evaluated as = B.T@dg/dq, which means that dB/dq is assumed to be zero.
This is done because dB/dq could be expensive to evaluate.
"""
B = self._B_func(self.u(x, t), t)
K = self._jac_h_u(self.u(x, t), self.du(x, dx, t), t)
if self._penalty is not None:
K += self._penalty * self._scaling * B.T.dot(B)
return spvstack((sphstack((K, self._scaling * B.T), format='csr'),
sphstack((self._scaling * B, csr_matrix((self._no_of_constraints,
self._no_of_constraints))), format='csr')),
format='csr')
| 30.495968 | 119 | 0.542972 |
acf4b1e7c4dc5052658e90470e28654aabd7c8d7 | 6,415 | py | Python | 4_multi_var.py | jooddang/tensorflow-learning | 6f437b4c039f3bc0b0cb1df602619d9abb723c2c | [
"MIT"
] | null | null | null | 4_multi_var.py | jooddang/tensorflow-learning | 6f437b4c039f3bc0b0cb1df602619d9abb723c2c | [
"MIT"
] | null | null | null | 4_multi_var.py | jooddang/tensorflow-learning | 6f437b4c039f3bc0b0cb1df602619d9abb723c2c | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
def primitive_multi_var():
x1_data = [58.0, 52.0, 53.0, 53.0, 56.0]
x2_data = [100.0, 64.0, 51.0, 93.0, 79.0]
x3_data = [51.0, 58.0, 63.0, 74.0, 85.0]
y_data = [187.0, 184.0, 166.0, 168.0, 169.0]
x1 = tf.placeholder(tf.float32)
x2 = tf.placeholder(tf.float32)
x3 = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
w1 = tf.Variable(tf.random_normal([1]), name='weight1')
w2 = tf.Variable(tf.random_normal([1]), name='weight2')
w3 = tf.Variable(tf.random_normal([1]), name='weight3')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = x1 * w1 + x2 * w2 + x3 * w3 + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(4001):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict={x1: x1_data, x2: x2_data, x3: x3_data, Y: y_data})
if step % 100 == 0:
print(step, "cost: ", cost_val, "prediction: ", hy_val)
# 4000 cost: 61.5923
# prediction: [ 191.70774841 169.1756897 171.99211121 165.69598389 173.98240662]
def matrix_multi_var():
x_data = [[58.0, 100.0, 51.0],
[52.0, 64.0, 58.0],
[53.0, 51.0, 63.0],
[53.0, 93.0, 74.0],
[56.0, 79.0, 85.0]]
y_data = [[187.0], [184.0], [166.0], [168.0], [169.0]]
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(4001):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict={X: x_data, Y: y_data})
if step % 100 == 0:
print(step, "cost: ", cost_val, "prediction: ", hy_val)
# 4000 cost: 60.7822
# prediction: [[ 191.85115051]
# [ 169.44267273]
# [ 172.31044006]
# [ 165.4127655 ]
# [ 173.68466187]]
# matrix_multi_var()
def multi_var_from_file():
xy = np.loadtxt('4_multi_var_from_file.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:3]
y_data = xy[..., [-1]]
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# train
for step in range(4001):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict={X: x_data, Y: y_data})
if step % 100 == 0:
print(step, "cost: ", cost_val, "prediction: ", hy_val)
# 4000 cost: 60.7822
# prediction: [[ 191.85115051]
# [ 169.44267273]
# [ 172.31044006]
# [ 165.4127655 ]
# [ 173.68466187]]
# test
print("my score will be: ", sess.run(hypothesis, feed_dict={X: [[100, 100, 100]]}))
print("Other scores will be: ", sess.run(hypothesis,
feed_dict={X: [[60, 70, 80], [55, 94, 29]]}))
# multi_var_from_file()
def multi_var_from_file_batch():
# When you use string_input_producer(), do not insert #comments in csv files.
# It will spit inefficient data error.
filename_queue = tf.train.string_input_producer(
['/Users/eunkwang/Documents/tensorflow-learning/4_multi_var_from_file.csv',
'/Users/eunkwang/Documents/tensorflow-learning/4_multi_var_from_file_2.csv'],
shuffle=False)
reader = tf.TextLineReader()
_, value = reader.read(filename_queue)
# in case of empty columns, and also specify type of the result
record_defaults = [[0.], [0.], [0.], [0.]]
xy = tf.decode_csv(value, record_defaults=record_defaults)
train_x_batch, train_y_batch = tf.train.batch([xy[0:-1], xy[-1:]], batch_size=2)
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# start populating filename queue
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# train
for step in range(4001):
x_batch, y_batch = sess.run([train_x_batch, train_y_batch])
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict={X: x_batch, Y: y_batch})
if step % 100 == 0:
print(step, "cost: ", cost_val, "prediction: ", hy_val)
# 4000 cost: 60.7822
# prediction: [[ 191.85115051]
# [ 169.44267273]
# [ 172.31044006]
# [ 165.4127655 ]
# [ 173.68466187]]
coord.request_stop()
coord.join(threads)
# test
print("my score will be: ", sess.run(hypothesis, feed_dict={X: [[100, 100, 100]]}))
print("Other scores will be: ", sess.run(hypothesis,
feed_dict={X: [[60, 70, 80], [55, 94, 29]]}))
multi_var_from_file_batch()
| 36.657143 | 100 | 0.566641 |
acf4b1f9aea318929865cb4e4d405e43d405a471 | 19 | py | Python | rpiwepd/lib/__init__.py | genwch/rpiwepd | 27ced8ef1255f17b475b231f6aefc1e4f8ab6a27 | [
"MIT"
] | null | null | null | rpiwepd/lib/__init__.py | genwch/rpiwepd | 27ced8ef1255f17b475b231f6aefc1e4f8ab6a27 | [
"MIT"
] | null | null | null | rpiwepd/lib/__init__.py | genwch/rpiwepd | 27ced8ef1255f17b475b231f6aefc1e4f8ab6a27 | [
"MIT"
] | null | null | null | from .epd import *
| 9.5 | 18 | 0.684211 |
acf4b21796377278d1c939cab768c70f4afa9e6b | 588 | py | Python | docs/pycode/array/rotate-image.py | ppipada/tech-interview-prep | 8842052a6471ab3ac9b55c30ef341da88e55796d | [
"MIT"
] | null | null | null | docs/pycode/array/rotate-image.py | ppipada/tech-interview-prep | 8842052a6471ab3ac9b55c30ef341da88e55796d | [
"MIT"
] | null | null | null | docs/pycode/array/rotate-image.py | ppipada/tech-interview-prep | 8842052a6471ab3ac9b55c30ef341da88e55796d | [
"MIT"
] | null | null | null | class Solution(object):
# Pythonic with extended slices and zip syntax
def rotate(self, A):
"""
:type matrix: List[List[int]]
:rtype: None Do not return anything, modify matrix in-place instead.
"""
A[:] = list(zip(*A[::-1]))
# first transpose and then flip left-right
def rotateManual(self, A):
n = len(A)
for i in range(n):
for j in range(i):
A[i][j], A[j][i] = A[j][i], A[i][j]
for row in A:
for j in range(n / 2):
row[j], row[~j] = row[~j], row[j] | 32.666667 | 76 | 0.491497 |
acf4b25563f1c8f24cd7a2b0f6ed83c740237b8d | 1,805 | py | Python | pyFAI/gui/model/DataModel.py | yugangzhang/pyFAI | e0453b279dac1f165f637e2a2ed1d4ddf57d31ba | [
"MIT"
] | 1 | 2021-04-28T20:09:13.000Z | 2021-04-28T20:09:13.000Z | pyFAI/gui/model/DataModel.py | yugangzhang/pyFAI | e0453b279dac1f165f637e2a2ed1d4ddf57d31ba | [
"MIT"
] | null | null | null | pyFAI/gui/model/DataModel.py | yugangzhang/pyFAI | e0453b279dac1f165f637e2a2ed1d4ddf57d31ba | [
"MIT"
] | null | null | null | # coding: utf-8
# /*##########################################################################
#
# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "14/02/2017"
from .AbstractModel import AbstractModel
class DataModel(AbstractModel):
def __init__(self, parent=None):
super(DataModel, self).__init__(parent)
self.__value = None
def isValid(self):
return self.__value is not None
def value(self):
return self.__value
def setValue(self, value):
self.__value = value
self.wasChanged()
| 36.1 | 79 | 0.679224 |
acf4b26ae364f6b1512e77b3bd4eab9296511d5c | 21,013 | py | Python | ansible/my_env/lib/python2.7/site-packages/ansible/plugins/cliconf/__init__.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 1 | 2019-04-16T21:23:15.000Z | 2019-04-16T21:23:15.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/plugins/cliconf/__init__.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 5 | 2020-02-26T20:10:50.000Z | 2021-09-23T23:23:18.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/plugins/cliconf/__init__.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import abstractmethod
from functools import wraps
from ansible.plugins import AnsiblePlugin
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes, to_text
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def enable_mode(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
prompt = self._connection.get_prompt()
if not to_text(prompt, errors='surrogate_or_strict').strip().endswith('#'):
raise AnsibleError('operation requires privilege escalation')
return func(self, *args, **kwargs)
return wrapped
class CliconfBase(AnsiblePlugin):
"""
A base class for implementing cli connections
.. note:: String inputs to :meth:`send_command` will be cast to byte strings
within this method and as such are not required to be made byte strings
beforehand. Please avoid using literal byte strings (``b'string'``) in
:class:`CliConfBase` plugins as this can lead to unexpected errors when
running on Python 3
List of supported rpc's:
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:get: Execute specified command on remote device
:get_capabilities: Retrieves device information and supported rpc methods
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
Note: List of supported rpc's for remote device can be extracted from
output of get_capabilities()
:returns: Returns output received from remote device as byte string
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
conn.get('show lldp neighbors detail'')
conn.get_config('running')
conn.edit_config(['hostname test', 'netconf ssh'])
"""
__rpc__ = ['get_config', 'edit_config', 'get_capabilities', 'get', 'enable_response_logging', 'disable_response_logging']
def __init__(self, connection):
super(CliconfBase, self).__init__()
self._connection = connection
self.history = list()
self.response_logging = False
def _alarm_handler(self, signum, frame):
"""Alarm handler raised in case of command timeout """
display.display('closing shell due to command timeout (%s seconds).' % self._connection._play_context.timeout, log_only=True)
self.close()
def send_command(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, prompt_retry_check=False, check_all=False):
"""Executes a command over the device connection
This method will execute a command over the device connection and
return the results to the caller. This method will also perform
logging of any commands based on the `nolog` argument.
:param command: The command to send over the connection to the device
:param prompt: A single regex pattern or a sequence of patterns to evaluate the expected prompt from the command
:param answer: The answer to respond with if the prompt is matched.
:param sendonly: Bool value that will send the command but not wait for a result.
:param newline: Bool value that will append the newline character to the command
:param prompt_retry_check: Bool value for trying to detect more prompts
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:returns: The output from the device after executing the command
"""
kwargs = {
'command': to_bytes(command),
'sendonly': sendonly,
'newline': newline,
'prompt_retry_check': prompt_retry_check,
'check_all': check_all
}
if prompt is not None:
if isinstance(prompt, list):
kwargs['prompt'] = [to_bytes(p) for p in prompt]
else:
kwargs['prompt'] = to_bytes(prompt)
if answer is not None:
if isinstance(answer, list):
kwargs['answer'] = [to_bytes(p) for p in answer]
else:
kwargs['answer'] = to_bytes(answer)
resp = self._connection.send(**kwargs)
if not self.response_logging:
self.history.append(('*****', '*****'))
else:
self.history.append((kwargs['command'], resp))
return resp
def get_base_rpc(self):
"""Returns list of base rpc method supported by remote device"""
return self.__rpc__
def get_history(self):
""" Returns the history file for all commands
This will return a log of all the commands that have been sent to
the device and all of the output received. By default, all commands
and output will be redacted unless explicitly configured otherwise.
:return: An ordered list of command, output pairs
"""
return self.history
def reset_history(self):
""" Resets the history of run commands
:return: None
"""
self.history = list()
def enable_response_logging(self):
"""Enable logging command response"""
self.response_logging = True
def disable_response_logging(self):
"""Disable logging command response"""
self.response_logging = False
@abstractmethod
def get_config(self, source='running', flags=None, format=None):
"""Retrieves the specified configuration from the device
This method will retrieve the configuration specified by source and
return it to the caller as a string. Subsequent calls to this method
will retrieve a new configuration from the device
:param source: The configuration source to return from the device.
This argument accepts either `running` or `startup` as valid values.
:param flags: For devices that support configuration filtering, this
keyword argument is used to filter the returned configuration.
The use of this keyword argument is device dependent adn will be
silently ignored on devices that do not support it.
:param format: For devices that support fetching different configuration
format, this keyword argument is used to specify the format in which
configuration is to be retrieved.
:return: The device configuration as specified by the source argument.
"""
pass
@abstractmethod
def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
"""Loads the candidate configuration into the network device
This method will load the specified candidate config into the device
and merge with the current configuration unless replace is set to
True. If the device does not support config replace an errors
is returned.
:param candidate: The configuration to load into the device and merge
with the current running configuration
:param commit: Boolean value that indicates if the device candidate
configuration should be pushed in the running configuration or discarded.
:param replace: If the value is True/False it indicates if running configuration should be completely
replace by candidate configuration. If can also take configuration file path as value,
the file in this case should be present on the remote host in the mentioned path as a
prerequisite.
:param comment: Commit comment provided it is supported by remote host
:return: Returns a json string with contains configuration applied on remote host, the returned
response on executing configuration commands and platform relevant data.
{
"diff": "",
"response": [],
"request": []
}
"""
pass
@abstractmethod
def get(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, output=None, check_all=False):
"""Execute specified command on remote device
This method will retrieve the specified data and
return it to the caller as a string.
:param command: command in string format to be executed on remote device
:param prompt: the expected prompt generated by executing command, this can
be a string or a list of strings
:param answer: the string to respond to the prompt with
:param sendonly: bool to disable waiting for response, default is false
:param newline: bool to indicate if newline should be added at end of answer or not
:param output: For devices that support fetching command output in different
format, this keyword argument is used to specify the output in which
response is to be retrieved.
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:return: The output from the device after executing the command
"""
pass
@abstractmethod
def get_capabilities(self):
"""Returns the basic capabilities of the network device
This method will provide some basic facts about the device and
what capabilities it has to modify the configuration. The minimum
return from this method takes the following format.
eg:
{
'rpc': [list of supported rpcs],
'network_api': <str>, # the name of the transport
'device_info': {
'network_os': <str>,
'network_os_version': <str>,
'network_os_model': <str>,
'network_os_hostname': <str>,
'network_os_image': <str>,
'network_os_platform': <str>,
},
'device_operations': {
'supports_diff_replace': <bool>, # identify if config should be merged or replaced is supported
'supports_commit': <bool>, # identify if commit is supported by device or not
'supports_rollback': <bool>, # identify if rollback is supported or not
'supports_defaults': <bool>, # identify if fetching running config with default is supported
'supports_commit_comment': <bool>, # identify if adding comment to commit is supported of not
'supports_onbox_diff: <bool>, # identify if on box diff capability is supported or not
'supports_generate_diff: <bool>, # identify if diff capability is supported within plugin
'supports_multiline_delimiter: <bool>, # identify if multiline demiliter is supported within config
'supports_diff_match: <bool>, # identify if match is supported
'supports_diff_ignore_lines: <bool>, # identify if ignore line in diff is supported
'supports_config_replace': <bool>, # identify if running config replace with candidate config is supported
'supports_admin': <bool>, # identify if admin configure mode is supported or not
'supports_commit_label': <bool>, # identify if commit label is supported or not
}
'format': [list of supported configuration format],
'diff_match': [list of supported match values],
'diff_replace': [list of supported replace values],
'output': [list of supported command output format]
}
:return: capability as json string
"""
pass
def commit(self, comment=None):
"""Commit configuration changes
This method will perform the commit operation on a previously loaded
candidate configuration that was loaded using `edit_config()`. If
there is a candidate configuration, it will be committed to the
active configuration. If there is not a candidate configuration, this
method should just silently return.
:return: None
"""
return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os)
def discard_changes(self):
"""Discard candidate configuration
This method will discard the current candidate configuration if one
is present. If there is no candidate configuration currently loaded,
then this method should just silently return
:returns: None
"""
return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os)
def rollback(self, rollback_id, commit=True):
"""
:param rollback_id: The commit id to which configuration should be rollbacked
:param commit: Flag to indicate if changes should be committed or not
:return: Returns diff between before and after change.
"""
pass
def copy_file(self, source=None, destination=None, proto='scp', timeout=30):
"""Copies file over scp/sftp to remote device
:param source: Source file path
:param destination: Destination file path on remote device
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
ssh = self._connection.paramiko_conn._connect_uncached()
if proto == 'scp':
if not HAS_SCP:
raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
out = scp.put(source, destination)
elif proto == 'sftp':
with ssh.open_sftp() as sftp:
sftp.put(source, destination)
def get_file(self, source=None, destination=None, proto='scp', timeout=30):
"""Fetch file over scp/sftp from remote device
:param source: Source file path
:param destination: Destination file path
:param proto: Protocol to be used for file transfer,
supported protocol: scp and sftp
:param timeout: Specifies the wait time to receive response from
remote host before triggering timeout exception
:return: None
"""
"""Fetch file over scp/sftp from remote device"""
ssh = self._connection.paramiko_conn._connect_uncached()
if proto == 'scp':
if not HAS_SCP:
raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
scp.get(source, destination)
elif proto == 'sftp':
with ssh.open_sftp() as sftp:
sftp.get(source, destination)
def get_diff(self, candidate=None, running=None, diff_match=None, diff_ignore_lines=None, path=None, diff_replace=None):
"""
Generate diff between candidate and running configuration. If the
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
candidate and running configurations are not required to be passed as argument.
In case if onbox diff capability is not supported candidate argument is mandatory
and running argument is optional.
:param candidate: The configuration which is expected to be present on remote host.
:param running: The base configuration which is used to generate diff.
:param diff_match: Instructs how to match the candidate configuration with current device configuration
Valid values are 'line', 'strict', 'exact', 'none'.
'line' - commands are matched line by line
'strict' - command lines are matched with respect to position
'exact' - command lines must be an equal match
'none' - will not compare the candidate configuration with the running configuration
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
:param path: The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
:param diff_replace: Instructs on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the replace argument is
set to I(block) then the entire command block is pushed to the device in
configuration mode if any line is not correct.
:return: Configuration and/or banner diff in json format.
{
'config_diff': ''
}
"""
pass
def run_commands(self, commands=None, check_rc=True):
"""
Execute a list of commands on remote host and return the list of response
:param commands: The list of command that needs to be executed on remote host.
The individual command in list can either be a command string or command dict.
If the command is dict the valid keys are
{
'command': <command to be executed>
'prompt': <expected prompt on executing the command>,
'answer': <answer for the prompt>,
'output': <the format in which command output should be rendered eg: 'json', 'text'>,
'sendonly': <Boolean flag to indicate if it command execution response should be ignored or not>
}
:param check_rc: Boolean flag to check if returned response should be checked for error or not.
If check_rc is False the error output is appended in return response list, else if the
value is True an exception is raised.
:return: List of returned response
"""
pass
def check_edit_config_capability(self, operations, candidate=None, commit=True, replace=None, comment=None):
if not candidate and not replace:
raise ValueError("must provide a candidate or replace to load configuration")
if commit not in (True, False):
raise ValueError("'commit' must be a bool, got %s" % commit)
if replace and not operations['supports_replace']:
raise ValueError("configuration replace is not supported")
if comment and not operations.get('supports_commit_comment', False):
raise ValueError("commit comment is not supported")
if replace and not operations.get('supports_replace', False):
raise ValueError("configuration replace is not supported")
| 48.084668 | 140 | 0.642221 |
acf4b337a9e10a2e4f7a9f4c59d2a5e449a3f494 | 1,809 | py | Python | pyqtlet/leaflet/layer/layer.py | samhattangady/pyqtlet | 2242f63b0dce6dd6357aaa0c6fe23a991451bfdd | [
"BSD-2-Clause-FreeBSD"
] | 30 | 2018-05-24T17:38:11.000Z | 2021-11-02T19:34:03.000Z | pyqtlet/leaflet/layer/layer.py | samhattangady/pyqtlet | 2242f63b0dce6dd6357aaa0c6fe23a991451bfdd | [
"BSD-2-Clause-FreeBSD"
] | 27 | 2018-02-21T07:22:11.000Z | 2021-10-12T06:24:18.000Z | pyqtlet/leaflet/layer/layer.py | samhattangady/pyqtlet | 2242f63b0dce6dd6357aaa0c6fe23a991451bfdd | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2018-06-11T06:50:44.000Z | 2021-05-17T15:26:26.000Z | from ..core import Evented
class Layer(Evented):
# layerId is a static variable shared between all layers
# It is used to give unique names to layers
layerId = 0
@property
def layerName(self):
return self._layerName
@layerName.setter
def layerName(self, name):
self._layerName = name
@property
def jsName(self):
return self._layerName
@property
def map(self):
return self._map
@map.setter
def map(self, map_):
self._map = map_
def __init__(self):
super().__init__()
self._map = None
self._layerName = self._getNewLayerName()
def _getNewLayerName(self):
layerName = 'l{}'.format(self.layerId)
Layer.layerId += 1
return layerName
def addTo(self, map_):
map_.addLayer(self)
def removeFrom(self, map_):
map_.removeLayer(self)
def bindPopup(self, content, options=None):
js = '{layerName}.bindPopup("{content}"'.format(
layerName=self._layerName, content=content)
if options:
js += ', {options}'.format(self._stringifyForJs(options))
js += ')'
self.runJavaScript(js)
def unbindPopup(self):
js = '{layerName}.unbindPopup()'.format(layerName=self._layerName)
self.runJavaScript(js)
def bindTooltip(self, content, options=None):
js = '{layerName}.bindTooltip("{content}"'.format(
layerName=self._layerName, content=content)
if options:
js += ', {options}'.format(self._stringifyForJs(options))
js += ')'
self.runJavaScript(js)
def unbindTooltip(self):
js = '{layerName}.unbindTooltip()'.format(layerName=self._layerName)
self.runJavaScript(js)
| 25.842857 | 76 | 0.601437 |
acf4b482ba79eb561201e345ff4d1a586d5bd78a | 2,514 | py | Python | train.py | Eminatex/EnlightenGAN-master | cfc408170fb2884209ca9dbf8ed4d1995400ab45 | [
"BSD-3-Clause"
] | null | null | null | train.py | Eminatex/EnlightenGAN-master | cfc408170fb2884209ca9dbf8ed4d1995400ab45 | [
"BSD-3-Clause"
] | null | null | null | train.py | Eminatex/EnlightenGAN-master | cfc408170fb2884209ca9dbf8ed4d1995400ab45 | [
"BSD-3-Clause"
] | null | null | null | import time#写一点注释
#其他账号写的注释
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
def get_config(config):
import yaml
with open(config, 'r') as stream:
return yaml.load(stream)
opt = TrainOptions().parse()
config = get_config(opt.config)
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
model = create_model(opt)
visualizer = Visualizer(opt)
total_steps = 0
for epoch in range(1, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
for i, data in enumerate(dataset):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter = total_steps - dataset_size * (epoch - 1)
model.set_input(data)
model.optimize_parameters(epoch)
if total_steps % opt.display_freq == 0:
visualizer.display_current_results(model.get_current_visuals(), epoch)
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors(epoch)
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
if opt.display_id > 0:
visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save('latest')
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save('latest')
model.save(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
if opt.new_lr:
if epoch == opt.niter:
model.update_learning_rate()
elif epoch == (opt.niter + 20):
model.update_learning_rate()
elif epoch == (opt.niter + 70):
model.update_learning_rate()
elif epoch == (opt.niter + 90):
model.update_learning_rate()
model.update_learning_rate()
model.update_learning_rate()
model.update_learning_rate()
else:
if epoch > opt.niter:
model.update_learning_rate()
| 34.438356 | 98 | 0.640811 |
acf4b4b79aa3d6964d08e39d2864a9cb9af393b0 | 6,815 | py | Python | backend/forms/forms.py | slash26/DevOps | 0fe118e234157a58da009fe9b14a527bc5dbe7f7 | [
"MIT"
] | null | null | null | backend/forms/forms.py | slash26/DevOps | 0fe118e234157a58da009fe9b14a527bc5dbe7f7 | [
"MIT"
] | null | null | null | backend/forms/forms.py | slash26/DevOps | 0fe118e234157a58da009fe9b14a527bc5dbe7f7 | [
"MIT"
] | null | null | null | """This module contains forms classes for admin manage."""
from flask_wtf import FlaskForm
from wtforms import (StringField, HiddenField, TextAreaField,
PasswordField, SelectField, SubmitField,
FloatField)
from wtforms.validators import (DataRequired, Email,
Length, Regexp, ValidationError)
from backend.app import db
from backend.models.users import User
from backend.models.issues import Issue
class UniqueValue(object):
"""Custom validator.
Validate for unique field value.
Skips record in database with current user's id.
"""
# pylint: disable=too-few-public-methods
def __init__(self, model, property_to_find, message=None):
if not message:
message = "This field's value is already exists in database."
self.message = message
self.model = model
self.property_to_find = property_to_find
def __call__(self, form, field):
record_id = None
if form.id.data:
record_id = form.id.data
query = db.session.query(self.model).filter(
self.model.id != record_id).filter(
self.property_to_find == field.data).first()
if query:
raise ValidationError(self.message)
check_email = UniqueValue(
User, User.email,
message="This email is already exists in database.")
check_alias = UniqueValue(
User, User.alias,
message="This alias is already exists in database.")
class BaseForm(FlaskForm):
"""Adds csrf"""
class Meta:
csrf = True
class UserForm(BaseForm):
"""User info modifying form."""
id = HiddenField('id')
name = StringField(
'name',
description=u'Length between 3 and 15 chars.',
validators=[
DataRequired(),
Length(min=3, max=15),
Regexp(
r"^[\w]+$",
message='Only letters, numbers and "_" may be used.')
]
)
alias = StringField(
'alias',
description=u'Length between 3 and 15 chars.',
validators=[
DataRequired(),
check_alias,
Length(min=3, max=15),
Regexp(
r"^[\w]+$",
message='Only letters, numbers and "_" may be used.')
]
)
email = StringField('email', validators=[Email(), check_email])
role_id = SelectField(
'role_id',
choices=[
('1', 'admin'),
('2', 'moderator'),
('3', 'user')
],
validators=[DataRequired()]
)
submit_button = SubmitField('Save')
class UserAddForm(BaseForm):
"""User add form."""
id = HiddenField('id')
name = StringField(
'name',
description=u'Length between 3 and 15 chars.',
validators=[
DataRequired(),
Length(min=3, max=15),
Regexp(
r"^[\w]+$",
message='Only letters, numbers and "_" may be used.')
]
)
alias = StringField(
'alias',
description=u'Length between 3 and 15 chars.',
validators=[
DataRequired(),
check_alias,
Length(min=3, max=15),
Regexp(
r"^[\w]+$",
message='Only letters, numbers and "_" may be used.')
]
)
email = StringField('email', validators=[Email(), check_email])
password = PasswordField(
'password',
description=u'Length between 3 and 20 chars.',
validators=[
DataRequired(),
Length(min=3, max=20),
Regexp(
r"^[\w]+$",
message='Only letters, numbers and "_" may be used.')
]
)
role_id = SelectField(
'role_id',
choices=[
('1', 'admin'),
('2', 'moderator'),
('3', 'user')
],
validators=[DataRequired()]
)
submit_button = SubmitField('Save')
class IssueForm(BaseForm):
"""Issue edit form"""
id = HiddenField('id')
title = StringField(
'title',
description=u'Length between 3 and 15 chars.',
validators=[
DataRequired(),
Length(min=3, max=15)
]
)
status = SelectField(
'status',
choices=[
('new', 'new'),
('on moderation', 'on moderation'),
('open', 'open'),
('closed', 'closed'),
('deleted', 'deleted'),
('pending close', 'pending close'),
],
validators=[DataRequired()]
)
description = TextAreaField(
'description',
description=u'Length between 10 and 144 chars.',
validators=[
DataRequired(),
Length(min=10, max=144)
]
)
location_lat = FloatField(
'location lat',
render_kw={'readonly': True},
validators=[
DataRequired(),
]
)
location_lon = FloatField(
'location lot',
render_kw={'readonly': True},
validators=[
DataRequired(),
]
)
category_id = SelectField(
'category',
choices=[
('1', 'road accident'),
('2', 'infrastructure accident'),
('3', 'another accident'),
('4', 'accident with animals')
],
validators=[DataRequired()]
)
submit_button = SubmitField('Save')
class LoginForm(BaseForm):
"""Login form."""
email = StringField('login', validators=[Email()])
password = PasswordField('password', validators=[DataRequired()])
submit_button = SubmitField('Login')
class SearchUserForm(BaseForm):
"""Search form"""
search = StringField(
'search'
)
search_by = SelectField(
'search_by',
choices=[
('0', 'name'),
('1', 'alias'),
('2', 'email'),
('3', 'name+alias'),
('4', 'alias+email'),
('5', 'email+name'),
('6', 'email+name+alias')
]
)
order_by = SelectField(
'order_by',
choices=[
('0', 'id'),
('1', 'role'),
('2', 'delete date')
]
)
class Meta:
csrf = False
class SearchIssuesForm(BaseForm):
"""Search form"""
search = StringField(
'search'
)
search_by = SelectField(
'search_by',
choices=[
('0', 'summary'),
('1', 'category'),
('2', 'description'),
]
)
order_by = SelectField(
'order_by',
choices=[
('0', 'summary'),
('1', 'category'),
]
)
class Meta:
csrf = False
| 23.663194 | 73 | 0.50609 |
acf4b4e96dc3a2dad6a00d18b660b9055e6125e7 | 2,405 | py | Python | deploy/determined_deploy/aws/deployment_types/vpc.py | jojochuang/determined | 22a7cd4b497767d7420b26ead769ba7e61d7f90a | [
"Apache-2.0"
] | 1 | 2020-09-23T12:02:32.000Z | 2020-09-23T12:02:32.000Z | deploy/determined_deploy/aws/deployment_types/vpc.py | jojochuang/determined | 22a7cd4b497767d7420b26ead769ba7e61d7f90a | [
"Apache-2.0"
] | null | null | null | deploy/determined_deploy/aws/deployment_types/vpc.py | jojochuang/determined | 22a7cd4b497767d7420b26ead769ba7e61d7f90a | [
"Apache-2.0"
] | null | null | null | import boto3
from determined_deploy.aws import aws, constants
from determined_deploy.aws.deployment_types import base
class VPC(base.DeterminedDeployment):
ssh_command = "SSH to master Instance: ssh -i <pem-file> ubuntu@{master_ip}"
det_ui = (
"Configure the Determined CLI: export DET_MASTER={master_ip}\n"
"View the Determined UI: http://{master_ip}:8080\n"
"View Logs at: https://{region}.console.aws.amazon.com/cloudwatch/home?"
"region={region}#logStream:group={log_group}"
)
template = "vpc.yaml"
template_parameter_keys = [
constants.cloudformation.KEYPAIR,
constants.cloudformation.MASTER_INSTANCE_TYPE,
constants.cloudformation.AGENT_INSTANCE_TYPE,
constants.cloudformation.INBOUND_CIDR,
constants.cloudformation.VERSION,
constants.cloudformation.DB_PASSWORD,
constants.cloudformation.MAX_IDLE_AGENT_PERIOD,
constants.cloudformation.MAX_AGENT_STARTING_PERIOD,
constants.cloudformation.MAX_DYNAMIC_AGENTS,
]
def deploy(self) -> None:
self.before_deploy_print()
cfn_parameters = self.consolidate_parameters()
with open(self.template_path) as f:
template = f.read()
aws.deploy_stack(
stack_name=self.parameters[constants.cloudformation.CLUSTER_ID],
template_body=template,
keypair=self.parameters[constants.cloudformation.KEYPAIR],
boto3_session=self.parameters[constants.cloudformation.BOTO3_SESSION],
parameters=cfn_parameters,
)
self.print_results(
self.parameters[constants.cloudformation.CLUSTER_ID],
self.parameters[constants.cloudformation.BOTO3_SESSION],
)
def print_results(self, stack_name: str, boto3_session: boto3.session.Session) -> None:
output = aws.get_output(stack_name, boto3_session)
master_ip = output[constants.cloudformation.DET_ADDRESS]
region = output[constants.cloudformation.REGION]
log_group = output[constants.cloudformation.LOG_GROUP]
ui_command = self.det_ui.format(master_ip=master_ip, region=region, log_group=log_group)
print(ui_command)
ssh_command = self.ssh_command.format(master_ip=master_ip)
print(ssh_command)
class FSx(VPC):
template = "fsx.yaml"
class EFS(VPC):
template = "efs.yaml"
| 36.439394 | 96 | 0.698545 |
acf4b5566ef92e73ca52607fc66c46e1a62de0f6 | 169 | py | Python | docker/__init__.py | glensc/python-docker-py | c66c7f8b0a8ca216e21c9fe1903eb79f4406a93e | [
"Apache-2.0"
] | 2 | 2016-06-28T03:59:36.000Z | 2017-03-16T22:31:29.000Z | docker/__init__.py | glensc/python-docker-py | c66c7f8b0a8ca216e21c9fe1903eb79f4406a93e | [
"Apache-2.0"
] | null | null | null | docker/__init__.py | glensc/python-docker-py | c66c7f8b0a8ca216e21c9fe1903eb79f4406a93e | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from .api import APIClient
from .client import Client, from_env
from .version import version, version_info
__version__ = version
__title__ = 'docker-py'
| 21.125 | 42 | 0.786982 |
acf4b55f15872efd59b3203daf730329a195f720 | 467 | py | Python | setup.py | Emma926/paradnn | 60a25ff20b928b309fed160e440be8a487a9fc6c | [
"Apache-2.0"
] | 41 | 2019-10-06T07:36:56.000Z | 2022-02-23T08:01:18.000Z | setup.py | Emma926/paradnn | 60a25ff20b928b309fed160e440be8a487a9fc6c | [
"Apache-2.0"
] | 1 | 2019-10-30T08:14:07.000Z | 2020-02-12T07:54:59.000Z | setup.py | Emma926/paradnn | 60a25ff20b928b309fed160e440be8a487a9fc6c | [
"Apache-2.0"
] | 19 | 2019-09-20T00:14:38.000Z | 2022-02-01T11:19:17.000Z | from setuptools import setup
setup(
name='paradnn',
version='1.0',
description='A tool that generates parameterized deep neural network models. It provides large “end-to-end” models covering current and future applications, and parameterizing the models to explore a much larger design space of DNN model attributes.',
author='Emma Wang',
author_email='emmawong926@gmail.com',
packages=['paradnn'],
install_requires=['python3', 'tensorflow'],
)
| 38.916667 | 254 | 0.749465 |
acf4b6235a8547c29eed4ac06aed47a958a52681 | 1,865 | py | Python | scripts/deploy_BalancerZAP1.py | apguerrera/LiquidityZAP | c03116f0684c4a416b6e945a5dd21153e945321c | [
"Apache-2.0"
] | 3 | 2020-10-23T19:08:38.000Z | 2021-06-26T15:19:26.000Z | scripts/deploy_BalancerZAP1.py | apguerrera/LiquidityZAP | c03116f0684c4a416b6e945a5dd21153e945321c | [
"Apache-2.0"
] | null | null | null | scripts/deploy_BalancerZAP1.py | apguerrera/LiquidityZAP | c03116f0684c4a416b6e945a5dd21153e945321c | [
"Apache-2.0"
] | 1 | 2021-05-10T12:39:23.000Z | 2021-05-10T12:39:23.000Z | from brownie import *
import time
# Axia Protocol Constants
TOKEN_ADDRESS = '0x793786e2dd4cc492ed366a94b88a3ff9ba5e7546'
TOKEN_WETH_PAIR = '0x1e0693f129d05e5857a642245185ee1fca6a5096'
WETH_ADDRESS = '0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2'
# 60% WETH 10% AXIA BAL COMP SNX
BALANCER_POOL = '0x4833e8b56fc8e8a777fcc5e37cb6035c504c9478'
def deploy_balancer_zap():
balancer_zap = BalancerZAP.deploy({"from": accounts[0]})
# initBalancerZAP(address token, address WETH, address tokenWethPair)
balancer_zap.initBalancerZAP(TOKEN_ADDRESS, BALANCER_POOL, WETH_ADDRESS, TOKEN_WETH_PAIR, {"from": accounts[0]})
print("BalancerZAP contract deployed at: " + str(balancer_zap))
return balancer_zap
def main():
if network.show_active() == 'mainnet':
# replace with your keys
accounts.load("liquidityzap")
# Create Uniswap Liquidity Zap
balancer_zap = deploy_balancer_zap()
# ╰─ brownie run deploy_BalancerZAP1.py --network mainnet ─╯
# Brownie v1.11.0 - Python development framework for Ethereum
# LiquidityzapProject is the active project.
# Running 'scripts/deploy_BalancerZAP1.py::main'...
# Enter the password to unlock this account:
# Transaction sent: 0x689500fa3a725772746ad0eff1b19266e4d0e1115cc911bad001fc964caf49bd
# Gas price: 66.0 gwei Gas limit: 743090
# Waiting for confirmation...
# BalancerZAP.constructor confirmed - Block: 11103424 Gas used: 743090 (100.00%)
# BalancerZAP deployed at: 0x575E0188cFC64d13107d00150dF5e495DFEDa664
# Transaction sent: 0x9e562f8f618bd673d7a1e3b2f02c17f61bafacd43fb7da4ccabab1297a785e1d
# Gas price: 65.0 gwei Gas limit: 131111
# Waiting for confirmation...
# BalancerZAP.initBalancerZAP confirmed - Block: 11103430 Gas used: 131111 (100.00%)
# BalancerZAP contract deployed at: 0x575E0188cFC64d13107d00150dF5e495DFEDa664
| 36.568627 | 116 | 0.770509 |
acf4b66651b07bff0bc4c4f1ec3e4cefcb4b646f | 3,493 | py | Python | assistive_gym/envs/stable_resting_pose.py | mlamsey/assistive-gym | 4c331fb7fb7293ddbef1930fdcbc61db84973625 | [
"MIT"
] | null | null | null | assistive_gym/envs/stable_resting_pose.py | mlamsey/assistive-gym | 4c331fb7fb7293ddbef1930fdcbc61db84973625 | [
"MIT"
] | null | null | null | assistive_gym/envs/stable_resting_pose.py | mlamsey/assistive-gym | 4c331fb7fb7293ddbef1930fdcbc61db84973625 | [
"MIT"
] | null | null | null | import numpy as np
import pybullet as p
from .env import AssistiveEnv
import math
import os
from .agents.human import Human
from .agents.human import right_arm_joints, left_arm_joints, torso_joints, head_joints
controllable_joints = right_arm_joints + left_arm_joints + torso_joints + head_joints
def configure_human(human):
human.impairment = None
# human.set_all_joints_stiffness(0.02)
human.set_whole_body_frictions(lateral_friction=50., spinning_friction=10., rolling_friction=10.)
joint_pos = default_sitting_pose(human)
human.setup_joints(joint_pos, use_static_joints=False, reactive_force=None)
start_pos = [0, 0.05, 0.875]
start_orient = [0, 0, 0, 1]
human.set_base_pos_orient(start_pos, start_orient)
# human.set_on_ground()
joint_i = [pose[0] for pose in joint_pos]
joint_th = [pose[1] for pose in joint_pos]
joint_gains = [10.] * len(joint_i)
# forces = [50.] * len(joint_i)
forces = [1.] * len(joint_i)
# tweak joint control
for i in range(len(joint_gains)):
if i not in controllable_joints:
joint_gains[i] = 0.
forces[i] = 0.
human.control(joint_i, joint_th, joint_gains, forces)
# def set_joint_stiffnesses(human):
# human.set_joint_stiffness(human.j_)
def default_sitting_pose(human):
# Arms
joint_pos = [(human.j_right_shoulder_x, 30.),
(human.j_left_shoulder_x, -30.),
(human.j_right_shoulder_y, 0.),
(human.j_left_shoulder_y, 0.),
(human.j_right_elbow, -90.),
(human.j_left_elbow, -90.)]
# Legs
joint_pos += [(human.j_right_knee, 90.),
(human.j_left_knee, 90.),
(human.j_right_hip_x, -90.),
(human.j_left_hip_x, -90.)]
# Torso
joint_pos += [(human.j_waist_x, 0.)]
return joint_pos
class BasePoseEnv(AssistiveEnv):
def __init__(self, human):
super(BasePoseEnv, self).__init__(robot=None, human = human, task='pose_analysis', )
def step(self, action):
n_dof = len(action)
action = np.zeros(n_dof).ravel()
self.take_step(action)
observation = self._get_obs()
self.steps += 1
reward = 0
done = False if self.steps < self.max_steps else True
info = {"n/a": 'n/a'} # must be a dict
return observation, reward, done, info
def _get_obs(self, agent=None):
# observation = self.human.get_joint_angles()
observation = self.human.get_pos_orient(self.human.head)
return observation
def reset(self):
super(BasePoseEnv, self).reset()
self.build_assistive_env(fixed_human_base=False)
plane_path = os.path.join(self.directory, "primitives", "plane_chair.urdf")
plane_chair = p.loadURDF(plane_path, flags=p.URDF_USE_SELF_COLLISION, physicsClientId=self.id)
# Human init
configure_human(self.human)
p.resetDebugVisualizerCamera(cameraDistance=1.10, cameraYaw=40, cameraPitch=-45, cameraTargetPosition=[-0.2, 0, 0.75], physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)
return self._get_obs()
class StableRestingPoseEnv(BasePoseEnv):
def __init__(self):
human = Human(controllable_joint_indices=controllable_joints, controllable=True)
super(BasePoseEnv, self).__init__(human=human)
self.steps = 0
self.max_steps = 100
| 33.266667 | 151 | 0.659605 |
acf4b72b15c537d2083f80c48e5da395e171c7c8 | 5,958 | py | Python | behavior_regularized_offline_rl/brac/collect_data.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-19T04:26:12.000Z | 2022-03-19T04:26:12.000Z | behavior_regularized_offline_rl/brac/collect_data.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | behavior_regularized_offline_rl/brac/collect_data.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main run file for data collection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import importlib
import os
import time
from absl import app
from absl import flags
from absl import logging
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from behavior_regularized_offline_rl.brac import dataset
from behavior_regularized_offline_rl.brac import policy_loader
from behavior_regularized_offline_rl.brac import train_eval_utils
from behavior_regularized_offline_rl.brac import utils
tf.compat.v1.enable_v2_behavior()
flags.DEFINE_string('root_dir',
os.path.join(os.getenv('HOME', '/'), 'tmp/offlinerl/data'),
'Root directory for saving data.')
flags.DEFINE_string('sub_dir', '0', 'sub directory for saving data.')
flags.DEFINE_string('env_name', 'HalfCheetah-v2', 'env name.')
flags.DEFINE_string('data_name', 'random', 'data name.')
flags.DEFINE_string('env_loader', 'mujoco', 'env loader, suite/gym.')
flags.DEFINE_string('config_dir',
'behavior_regularized_offline_rl.brac.configs',
'config file dir.')
flags.DEFINE_string('config_file', 'dcfg_pure', 'config file name.')
flags.DEFINE_string('policy_root_dir', None,
'Directory in which to find the behavior policy.')
flags.DEFINE_integer('n_samples', int(1e6), 'number of transitions to collect.')
flags.DEFINE_integer('n_eval_episodes', 20,
'number episodes to eval each policy.')
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin binding parameters.')
FLAGS = flags.FLAGS
def get_sample_counts(n, distr):
"""Provides size of each sub-dataset based on desired distribution."""
distr = np.array(distr)
distr = distr / np.sum(distr)
counts = []
remainder = n
for i in range(distr.shape[0] - 1):
count = int(n * distr[i])
remainder -= count
counts.append(count)
counts.append(remainder)
return counts
def collect_n_transitions(tf_env, policy, data, n, log_freq=10000):
"""Adds desired number of transitions to dataset."""
collector = train_eval_utils.DataCollector(tf_env, policy, data)
time_st = time.time()
timed_at_step = 0
steps_collected = 0
while steps_collected < n:
count = collector.collect_transition()
steps_collected += count
if (steps_collected % log_freq == 0
or steps_collected == n) and count > 0:
steps_per_sec = ((steps_collected - timed_at_step)
/ (time.time() - time_st))
timed_at_step = steps_collected
time_st = time.time()
logging.info('(%d/%d) steps collected at %.4g steps/s.', steps_collected,
n, steps_per_sec)
def collect_data(
log_dir,
data_config,
n_samples=int(1e6),
env_name='HalfCheetah-v2',
log_freq=int(1e4),
n_eval_episodes=20,
):
"""Creates dataset of transitions based on desired config."""
tf_env = train_eval_utils.env_factory(env_name)
observation_spec = tf_env.observation_spec()
action_spec = tf_env.action_spec()
# Initialize dataset.
sample_sizes = list([cfg[-1] for cfg in data_config])
sample_sizes = get_sample_counts(n_samples, sample_sizes)
with tf.device('/cpu:0'):
data = dataset.Dataset(
observation_spec,
action_spec,
n_samples,
circular=False)
data_ckpt = tf.train.Checkpoint(data=data)
data_ckpt_name = os.path.join(log_dir, 'data')
# Collect data for each policy in data_config.
time_st = time.time()
test_results = collections.OrderedDict()
for (policy_name, policy_cfg, _), n_transitions in zip(
data_config, sample_sizes):
policy_cfg = policy_loader.parse_policy_cfg(policy_cfg)
policy = policy_loader.load_policy(policy_cfg, action_spec)
logging.info('Testing policy %s...', policy_name)
eval_mean, eval_std = train_eval_utils.eval_policy_episodes(
tf_env, policy, n_eval_episodes)
test_results[policy_name] = [eval_mean, eval_std]
logging.info('Return mean %.4g, std %.4g.', eval_mean, eval_std)
logging.info('Collecting data from policy %s...', policy_name)
collect_n_transitions(tf_env, policy, data, n_transitions, log_freq)
# Save final dataset.
assert data.size == data.capacity
data_ckpt.write(data_ckpt_name)
time_cost = time.time() - time_st
logging.info('Finished: %d transitions collected, '
'saved at %s, '
'time cost %.4gs.', n_samples, data_ckpt_name, time_cost)
def main(_):
logging.set_verbosity(logging.INFO)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
sub_dir = FLAGS.sub_dir
log_dir = os.path.join(
FLAGS.root_dir,
FLAGS.env_name,
FLAGS.data_name,
sub_dir,
)
utils.maybe_makedirs(log_dir)
config_module = importlib.import_module(
'{}.{}'.format(FLAGS.config_dir, FLAGS.config_file))
collect_data(
log_dir=log_dir,
data_config=config_module.get_data_config(FLAGS.env_name,
FLAGS.policy_root_dir),
n_samples=FLAGS.n_samples,
env_name=FLAGS.env_name,
n_eval_episodes=FLAGS.n_eval_episodes
)
if __name__ == '__main__':
app.run(main)
| 35.047059 | 80 | 0.706781 |
acf4b72cebd987ddfa237f5a144cfbaa8bfc4bcb | 7,222 | py | Python | analysis_schema/BaseModelFunctions.py | chrishavlin/analysis_schema | 99c4fbd57d0afc87b81b9ba4cc9a25fba36086e2 | [
"MIT"
] | 2 | 2020-06-28T17:10:37.000Z | 2021-04-05T16:22:49.000Z | analysis_schema/BaseModelFunctions.py | chrishavlin/analysis_schema | 99c4fbd57d0afc87b81b9ba4cc9a25fba36086e2 | [
"MIT"
] | 7 | 2020-08-10T15:21:09.000Z | 2022-03-03T19:25:18.000Z | analysis_schema/BaseModelFunctions.py | chrishavlin/analysis_schema | 99c4fbd57d0afc87b81b9ba4cc9a25fba36086e2 | [
"MIT"
] | 1 | 2021-04-21T14:41:07.000Z | 2021-04-21T14:41:07.000Z | from inspect import getfullargspec
from typing import Optional
from pydantic import BaseModel
def show_plots(schema, files):
"""
This function accepts the schema model and runs it using yt code which returns
a list. This function iterates through the list and displays each output.
Args:
schema ([dict]): the analysis schema filled out with yt specificaions
"""
result = schema._run()
print(result)
for output in range(len(tuple(result))):
print("each output:", result[output])
if files == "Jupter":
result[output].show()
if files != "Jupyter":
result[output].save()
print("Files with output have been created!")
class ytBaseModel(BaseModel):
"""
A class to connect attributes and their values to yt operations and their
keywork arguements.
Args:
BaseModel ([type]): A pydantic basemodel in the form of a json schema
Raises:
AttributeError: [description]
Returns:
[list]: A list of yt classes to be run and then displayed
"""
_arg_mapping: dict = {} # mapping from internal yt name to schema name
_yt_operation: Optional[str]
# the list to store the data after it has been instaniated
_data_source = {}
def _run(self):
# the list that we'll use to eventually call our function
the_args = []
# this method actually executes the yt code
# first make sure yt is imported and then get our function handle. This assumes
# that our class name exists in yt's top level api.
import yt
print(self._yt_operation)
funcname = getattr(self, "_yt_operation", type(self).__name__)
print("found name:", funcname)
# if the function is not readily available in yt, move to the except block
# try:
func = getattr(yt, funcname)
print(f"pulled func {func}", type(func))
# now we get the arguments for the function:
# func_spec.args, which lists the named arguments and keyword arguments.
# ignoring vargs and kw-only args for now...
# see https://docs.python.org/3/library/inspect.html#inspect.getfullargspec
func_spec = getfullargspec(func)
print("spec", func_spec)
# the argument position number at which we have default values (a little
# hacky, should be a better way to do this, and not sure how to scale it to
# include *args and **kwargs)
n_args = len(func_spec.args) # number of arguments
print("number of args:", n_args)
if func_spec.defaults is None:
# no default args, make sure we never get there...
named_kw_start_at = n_args + 1
else:
# the position at which named keyword args start
named_kw_start_at = n_args - len(func_spec.defaults)
print(f"keywords start at {named_kw_start_at}")
# loop over the call signature arguments and pull out values from our pydantic
# class. this is recursive! will call _run() if a given argument value is also
# a ytBaseModel.
for arg_i, arg in enumerate(func_spec.args):
# check if we've remapped the yt internal argument name for the schema
if arg == "self":
continue
# if arg in self._arg_mapping:
# arg = self._arg_mapping[arg]
# get the value for this argument. If it's not there, attempt to set default
# values for arguments needed for yt but not exposed in our pydantic class
print("the arguemnt:", arg)
try:
arg_value = getattr(self, arg)
print("the arg value:", arg_value)
if arg_value is None and arg != "ds":
default_index = arg_i - named_kw_start_at
arg_value = func_spec.defaults[default_index]
print("defaults:", default_index, arg_value)
except AttributeError:
if arg_i >= named_kw_start_at:
# we are in the named keyword arguments, grab the default
# the func_spec.defaults tuple 0 index is the first named
# argument, so need to offset the arg_i counter
default_index = arg_i - named_kw_start_at
arg_value = func_spec.defaults[default_index]
print("defaults:", default_index, arg_value)
else:
raise AttributeError
# check if this argument is itself a ytBaseModel for which we need to run
# this should make this a fully recursive function?
# if hasattr(arg_value,'_run'):
if isinstance(arg_value, ytBaseModel) or isinstance(arg_value, ytParameter):
arg_value = arg_value._run()
the_args.append(arg_value)
print("the args list:", the_args)
# save the data from yt.load, so it can be used to instaniate the data objects
if funcname == "load":
arg_value = str(arg_value)
self._data_source[arg_value] = func(arg_value)
print("data source:", self._data_source)
# if ds is None, then find ._data_source and insert it in the first position
if the_args[0] is None:
if len(self._data_source) > 0:
ds = self._data_source["IsolatedGalaxy/galaxy0030/galaxy0030"]
the_args.remove(None)
the_args.insert(0, ds)
return func(*the_args)
else:
return func(*the_args)
class ytParameter(BaseModel):
_skip_these = ["comments"]
def _run(self):
p = [
getattr(self, key)
for key in self.schema()["properties"].keys()
if key not in self._skip_these
]
if len(p) > 1:
print("some error", p)
raise ValueError(
"whoops. ytParameter instances can only have single values"
)
return p[0]
class ytDataObjectAbstract(ytBaseModel):
# abstract class for all the data selectors to inherit from
def _run(self):
from yt.data_objects.data_containers import data_object_registry
the_args = []
funcname = getattr(self, "_yt_operation", type(self).__name__)
# get the function from the data object registry
val = data_object_registry[funcname]
# iterate through the arguments for the found data object
for arguments in val._con_args:
# print("the args:", arguments)
con_value = getattr(self, arguments)
# print(con_value)
# check that the argument is the correct instance
if isinstance(con_value, ytDataObjectAbstract):
# call the _run() function on the agrument
con_value = con_value._run()
the_args.append(con_value)
# if there is a dataset sitting in _data_source, add it to the args and call as
# a keyword argument
if len(self._data_source) > 0:
ds = list(self._data_source.values())[0]
return val(*the_args, ds=ds)
| 38.21164 | 88 | 0.608003 |
acf4b8e4fa7d98727e20fb25cc517c63eeae9bb4 | 2,268 | py | Python | test_data/build_inverted_and_forward.py | Razdeep/autocomplete | fbe73627d58805e137bef2ebb10945cd845c5928 | [
"MIT"
] | 26 | 2020-05-12T10:55:48.000Z | 2022-03-07T10:57:37.000Z | test_data/build_inverted_and_forward.py | Razdeep/autocomplete | fbe73627d58805e137bef2ebb10945cd845c5928 | [
"MIT"
] | 3 | 2021-08-04T18:27:03.000Z | 2022-02-08T11:22:53.000Z | test_data/build_inverted_and_forward.py | Razdeep/autocomplete | fbe73627d58805e137bef2ebb10945cd845c5928 | [
"MIT"
] | 3 | 2020-05-24T08:07:29.000Z | 2021-07-19T09:59:56.000Z | import sys
input_filename = sys.argv[1]
tokens = {}
print("building dictionary...")
id = 1 # reserve id 0 to mark the end of a string
with open(input_filename + ".dict") as f:
for line in f:
t = line.rstrip('\n')
tokens[t] = id
id += 1
lines = 0
inverted = open(input_filename + ".inverted", 'w')
forward = open(input_filename + ".forward", 'w')
num_terms = 0
num_docs = 0
with open(input_filename + ".mapped.stats") as f:
num_terms = int(f.readline())
print("terms: " + str(num_terms))
f.readline() # skip line: max num. of query terms
f.readline() # skip line: num. of completions
num_docs = int(f.readline())
print("universe: " + str(num_docs))
inverted_index = [[] for i in range(num_terms + 1)] # id 0 is not assigned
forward_index = [[] for i in range(num_docs)]
with open(input_filename, 'r') as f:
for line in f:
x = line.rstrip('\n').split()
mapped = []
doc_id = int(x[0])
discard = False
for i in range(1, len(x)):
try:
term = x[i]
try:
term_id = tokens[term]
if term_id not in mapped:
inverted_index[term_id].append(doc_id)
mapped.append(term_id)
except KeyError:
print("'" + term + "' not found in dictionary")
print(line)
exit()
except UnicodeDecodeError:
discard = True
if not discard:
# NOTE: not sorted!
if doc_id >= num_docs:
print doc_id,num_docs
forward_index[doc_id] = mapped;
lines += 1
if lines % 1000000 == 0:
print("processed " + str(lines) + " lines")
for i in range(0, num_docs):
s = [str(k) for k in forward_index[i]]
forward.write(str(len(forward_index[i])) + " ")
forward.write(" ".join(s) + "\n")
forward.close()
for i in range(1, num_terms + 1):
posting_list = inverted_index[i]
unique = sorted(set(posting_list));
s = [str(i) for i in unique] # remove any possible duplicate
inverted.write(str(len(unique)) + " ")
inverted.write(" ".join(s) + "\n")
inverted.close()
| 30.24 | 74 | 0.542328 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.