id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
4833018 | # Copyright 2013 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to the inspyred heuristic optimization framework
Wraps the GLPK solver by subclassing and extending :class:`Model`,
:class:`Variable`, and :class:`Constraint` from :mod:`interface`.
"""
import logging
import random
import types
log = logging.getLogger(__name__)
import sympy
import interface
class Variable(interface.Variable):
def __init__(self, *args, **kwargs):
super(Variable, self).__init__(*args, **kwargs)
class Objective(interface.Objective):
"""docstring for Objective"""
def __init__(self, expression, *args, **kwargs):
super(Objective, self).__init__(expression, *args, **kwargs)
@property
def value(self):
return self._value
def __str__(self):
if isinstance(self.expression, sympy.Basic):
return super(Objective, self).__str__()
else:
return self.expression.__str__()
# return ' '.join((self.direction, str(self.expression)))
@property
def expression(self):
return self._expression
@expression.setter
def expression(self, value):
self._expression = value
class VariableBounder(object):
"""This class defines a inspyred like Bounder.__init__.py
TODO: Make this work also for integer and binary type variables?
"""
def __init__(self, model):
self.model = model
def __call__(self, candidate, args):
variables = self.model.variables
bounded_candidate = list()
for c, variable in zip(candidate, variables):
if variable.type == 'continuous':
bounded_candidate.append(max(min(c, variable.ub), variable.lb))
elif variable.type == 'integer':
bounded_candidate.append(min(range(variable.lb, variable.ub + 1), key=lambda x: abs(x - c)))
elif variable.type == 'binary':
# print min([0, 1], key=lambda x: abs(x-c))
bounded_candidate.append(min([0, 1], key=lambda x: abs(x - c)))
return bounded_candidate
class Configuration(interface.EvolutionaryOptimizationConfiguration):
"""docstring for Configuration"""
class SubConfiguration(object):
pass
def __init__(self, *args, **kwargs):
super(Configuration, self).__init__(*args, **kwargs)
self._algorithm = inspyred.ec.GA
self._algorithm.terminator = [inspyred.ec.terminators.time_termination,
inspyred.ec.terminators.generation_termination,
inspyred.ec.terminators.evaluation_termination,
inspyred.ec.terminators.diversity_termination,
inspyred.ec.terminators.average_fitness_termination]
self.pop_size = 100
self.seeds = []
self.max_generations = 1
self.max_evaluations = None
self.max_time = None
self.selector_config = self.SubConfiguration()
self.selector_config.num_selected = None
self.selector_config.tournament_size = 2
self.selector_config.num_elites = 0
self.variator_config = self.SubConfiguration()
self.variator_config.mutation_rate = .1
self.variator_config.crossover_rate = 1.
self.variator_config.num_crossover_points = 1
self.topology_config = self.SubConfiguration()
self.topology_config.neighborhood_size = 5
self.swarm_config = self.SubConfiguration()
self.swarm_config.inertia = 0.5
self.swarm_config.cognitive_rate = 2.1
self.swarm_config.social_rate = 2.1
@property
def selector(self):
return self._algorithm.selector
@selector.setter
def selector(self, value):
self.algorithm.selector = value
@property
def variator(self):
return self._algorithm.variator
@variator.setter
def variator(self, value):
self._algorithm.variator = value
@property
def replacer(self):
return self._algorithm.replacer
@replacer.setter
def replacer(self, value):
self._algorithm.replacer = value
@property
def migrator(self):
return self._algorithm.migrator
@migrator.setter
def migrator(self, value):
self._algorithm.migrator = value
@property
def archiver(self):
return self._algorithm.archiver
@archiver.setter
def archiver(self, value):
self._algorithm.archiver = value
@property
def observer(self):
return self._algorithm.observer
@observer.setter
def observer(self, value):
self._algorithm.observer = value
@property
def terminator(self):
return self._algorithm.terminator
@terminator.setter
def terminator(self, value):
self._algorithm.terminator = value
@property
def topology(self):
return self._algorithm.topology
@topology.setter
def topology(self, value):
if value == 'Ring':
self._algorithm.topology = inspyred.swarm.topologies.ring_topology
elif value == 'Star':
self._algorithm.topology = inspyred.swarm.topologies.star_topology
elif isinstance(value, types.FunctionType):
self._algorithm.topology = value
else:
raise ValueError("%s is not a supported topology. Try 'Star' or 'Ring' instead.")
@property
def algorithm(self):
return self._algorithm
@algorithm.setter
def algorithm(self, value):
init = False
try:
previous_selector = self._algorithm.selector
previous_variator = self._algorithm.variator
previous_replacer = self._algorithm.replacer
# previous_migrator = self._algorithm.migrator
# previous_archiver = self._algorithm.archiver
# previous_observer = self._algorithm.observer
# previous_terminator = self._algorithm.terminator
except AttributeError:
init = True
if value == "EvolutionaryComputation":
self._algorithm = inspyred.ec.EvolutionaryComputation
elif value == "GeneticAlgorithm" or value == "GA":
self._algorithm = inspyred.ec.GA(random)
elif value == "ParticleSwarmOptimization" or value == "PSO":
self._algorithm = inspyred.swarm.PSO(random)
elif value == "AntColonySystem" or value == "ACS":
self._algorithm = inspyred.swarm.ACS(random)
elif value == "EvolutionaryStrategy" or value == "ES":
self._algorithm = inspyred.ec.ES(random)
elif value == "DifferentialEvolutionaryAlgorithm" or value == "DEA":
self._algorithm = inspyred.ec.DEA(random)
elif value == "SimulatedAnnealing" or value == "SA":
self._algorithm = inspyred.ec.SA(random)
elif value == "NSGA2":
self._algorithm = inspyred.emo.NSGA2(random)
elif value == "PAES":
self._algorithm = inspyred.emo.PAES(random)
elif value == "Pareto":
self._algorithm = inspyred.emo.Pareto(random)
else:
raise ValueError(
"%s is not a supported. Try one of the following instead:"
"'GeneticAlgorithm', 'ParticleSwarmOptimization', 'EvolutionaryStrategy'."
"TODO: be more specific here")
# self._algorithm.terminator = self._default_terminator
if init is False:
self._algorithm.selector = previous_selector
self._algorithm.variator = previous_variator
self._algorithm.replacer = previous_replacer
# previous_migrator = self._algorithm.migrator
# previous_archiver = self._algorithm.archiver
# previous_observer = self._algorithm.observer
# previous_terminator = self._algorithm.terminator
# TODO: setting a new algorithm should recycle old variators, selectors etc.
def _evolve_kwargs(self):
"""Filter None keyword arguments. Intended to be passed on to algorithm.evolve(...)"""
valid_evolve_kwargs = (
'max_generations', 'max_evaluations', 'pop_size', 'neighborhood_size', 'tournament_size', 'mutation_rate')
filtered_evolve_kwargs = dict()
for key in valid_evolve_kwargs:
attr_value = getattr(self, key)
if attr_value is not None:
filtered_evolve_kwargs[key] = attr_value
# return filtered_evolve_kwargs
return {}
class Model(interface.Model):
"""Interface"""
def __init__(self, algorithm=None, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.configuration = Configuration()
if algorithm is None:
self.configuration.algorithm = "GA"
else:
self.configuration.algorithm = algorithm
self._bounder = VariableBounder(self)
self._generator = self._generator
def _generator(self, random, args):
individual = list()
for variable in self.variables:
if variable.type == 'continuous':
individual.append(random.uniform(variable.lb, variable.ub))
else:
individual.append(random.choice(range(variable.lb, variable.ub + 1)))
return individual
def _evaluator(self, candidates, args):
fitness = list()
for candidate in candidates:
substitution_dict = dict(zip(self.variables, candidate))
if isinstance(self.objective.expression, sympy.Basic):
fitness.append(self.objective.expression.subs(substitution_dict))
else:
fitness.append(self.objective.expression(substitution_dict))
return fitness
# @inspyred.ec.evaluators.evaluator
# def _evaluate(self, candidate, args):
# substitution_dict = dict(zip(self.variables, candidate))
# try:
# fitness = self.objective.expression.subs(substitution_dict)
# except AttributeError:
# fitness = self.objective.expression(substitution_dict)
# return fitness
def optimize(self, *args, **kwargs):
# import pdb; pdb.set_trace();
final_population = self.configuration.algorithm.evolve(
generator=self._generator,
evaluator=self._evaluator,
bounder=self._bounder,
pop_size=self.configuration.pop_size,
maximize={'max': True, 'min': False}[self.objective.direction],
max_generations=self.configuration.max_generations,
max_evaluations=self.configuration.max_evaluations,
neighborhood_size=self.configuration.topology_config.neighborhood_size,
mutation_rate=self.configuration.variator_config.mutation_rate,
tournament_size=self.configuration.selector_config.tournament_size
)
return final_population
if __name__ == '__main__':
# from optlang.interface import Objective, Variable
import numpy
import inspyred
x = Variable('x', lb=0, ub=2)
y = Variable('y', lb=0, ub=2)
rosenbrock_obj = Objective((1 - x) ** 2 + 100 * (y - x ** 2) ** 2, name="<NAME>", direction='min')
print("The rosenbrock function:", rosenbrock_obj)
print("The global minimum at (x,y) = (1,1) is", rosenbrock_obj.expression.subs({x: 1, y: 1}))
problem = Model(name='rosenbrock', algorithm='PSO')
# problem = Model(name='rosenbrock')
problem.objective = rosenbrock_obj
def my_observer(population, num_generations, num_evaluations, args):
best = max(population)
print(('{0:6} -- {1} : {2}'.format(num_generations,
best.fitness,
str(best.candidate))))
problem.configuration.max_generations = 100
problem.configuration.terminator = inspyred.ec.terminators.generation_termination
problem.configuration.observer = my_observer
problem.configuration.selector = inspyred.ec.selectors.tournament_selection
final_pop = problem.optimize()
fitnesses = [individual.fitness for individual in final_pop]
print(fitnesses)
print("mean", numpy.mean(fitnesses))
print("max", numpy.max(fitnesses))
print("min", numpy.min(fitnesses))
# print numpy.std(fitnesses)
| StarcoderdataPython |
3206500 | try:
from os import makedirs
from shutil import copyfile
from os.path import join, exists
except ImportError as err:
exit(err)
if __name__ == "__main__":
# The path to the directory where the original
# dataset was uncompressed
original_dataset_dir = "C:/Users/e_sgouge/Documents/Etienne/Python/Reconnaissance_chiffre/datas/dogs-vs-cats/train"
# The directory where we will
# store our smaller dataset
base_dir = "C:/Users/e_sgouge/Documents/Etienne/Python/Reconnaissance_chiffre/datas/dogs_vs_cats"
makedirs(base_dir, exist_ok=True)
# Directories for our training, validation
# and test splits
# Train
train_dir = join(base_dir, "train")
makedirs(train_dir, exist_ok=True)
# Validation
validation_dir = join(base_dir, "validation")
makedirs(validation_dir, exist_ok=True)
# Test
test_dir = join(base_dir, "test")
makedirs(test_dir, exist_ok=True)
# TRAINING
# Directory with our training cat pictures
train_cats_dir = join(train_dir, 'cats')
makedirs(train_cats_dir, exist_ok=True)
# Directory with our training dog pictures
train_dogs_dir = join(train_dir, 'dogs')
makedirs(train_dogs_dir, exist_ok=True)
# VALIDATION
# Directory with our validation cat pictures
validation_cats_dir = join(validation_dir, 'cats')
makedirs(validation_cats_dir, exist_ok=True)
# Directory with our validation dog pictures
validation_dogs_dir = join(validation_dir, 'dogs')
makedirs(validation_dogs_dir, exist_ok=True)
# TEST
# Directory with our validation cat pictures
test_cats_dir = join(test_dir, 'cats')
makedirs(test_cats_dir, exist_ok=True)
# Directory with our validation dog pictures
test_dogs_dir = join(test_dir, 'dogs')
makedirs(test_dogs_dir, exist_ok=True)
def copyFiles(filename, dir, start, stop):
global original_dataset_dir
fnames = [filename.format(i) for i in range(start, stop)]
for fname in fnames:
src = join(original_dataset_dir, fname)
dst = join(dir, fname)
if not exists(dst):
copyfile(src, dst)
# CATS
# Copy first 1000 cat images to train_cats_dir
copyFiles('cat.{}.jpg', train_cats_dir, 0, 1000)
# Copy next 500 cat images to validation_cats_dir
copyFiles('cat.{}.jpg', validation_cats_dir, 1000, 1500)
# Copy next 500 cat images to test_cats_dir
copyFiles('cat.{}.jpg', test_cats_dir, 1500, 2000)
# DOGS
# Copy first 1000 cat images to train_dogs_dir
copyFiles('dog.{}.jpg', train_dogs_dir, 0, 1000)
# Copy next 500 cat images to validation_dogs_dir
copyFiles('dog.{}.jpg', validation_dogs_dir, 1000, 1500)
# Copy next 500 cat images to test_dogs_dir
copyFiles('dog.{}.jpg', test_dogs_dir, 1500, 2000)
| StarcoderdataPython |
166937 | <filename>tests/b901.py
"""
Should emit:
B901 - on lines 9, 36
"""
def broken():
if True:
return [1, 2, 3]
yield 3
yield 2
yield 1
def not_broken():
if True:
return
yield 3
yield 2
yield 1
def not_broken2():
return not_broken()
def not_broken3():
return
yield from not_broken()
def broken2():
return [3, 2, 1]
yield from not_broken()
async def not_broken4():
import asyncio
await asyncio.sleep(1)
return 1
def actually_not_broken():
yield 2
return 1 # noqa
def not_broken5():
def inner():
return 2
yield inner()
def not_broken6():
return (yield from [])
def not_broken7():
x = yield from []
return x
def not_broken8():
x = None
def inner(ex):
nonlocal x
x = ex
inner((yield from []))
return x
class NotBroken9(object):
def __await__(self):
yield from function()
return 42
| StarcoderdataPython |
1641456 | from django.test import TestCase, SimpleTestCase
from django.test.client import Client
from django.urls import reverse, resolve
from .views import *
from .models.productModel import Product, ProductReviewModel
# uso SimpleTestCase per verificare l'uguaglianza di due url
class TestUrls(SimpleTestCase):
def test_index_url_is_resolved(self):
url = reverse('Store:Base')
self.assertEqual(resolve(url).func, base)
def test_login_url_is_resolved(self):
url = reverse('Store:Accedi')
self.assertEqual(resolve(url).func, login_view)
def test_register_url_is_resolved(self):
url = reverse('Store:Registration')
self.assertEqual(resolve(url).func, register)
def test_logout_is_resolves(self):
url = reverse('Store:Logout')
self.assertEqual(resolve(url).func, logout_view)
def test_search_url_is_resolves(self):
url = reverse('Store:SearchBar')
self.assertEqual(resolve(url).func, search_bar)
def test_product_review_url_is_resolves(self):
url = reverse('Store:ProductReview', args=[1])
self.assertEqual(resolve(url).func, product_review)
def test_price_url_is_resolves(self):
url = reverse('Store:price')
self.assertEqual(resolve(url).func, price)
def test_filter_price_is_resolves(self):
url = reverse('Store:filter-price')
self.assertEqual(resolve(url).func, filter_price)
def test_lista_prodotti_url_is_resolves(self):
url = reverse('Store:lista-prodotti')
self.assertEqual(resolve(url).func, lista_prodotti_amministratore)
def test_recommended_products_url_is_resolves(self):
url = reverse('Store:recommended-products')
self.assertEqual(resolve(url).func, recommended_products_view)
def test_modifica_prodotto_url_is_resolves(self):
url = reverse('Store:modifica-prodotto', args=[1])
self.assertEqual(resolve(url).func, modifica_prodotto)
def test_send_email_url_is_resolves(self):
url = reverse('Store:send-email', args=[1])
self.assertEqual(resolve(url).func, send_email)
# Il client di test è una classe Python che funge da browser Web fittizio, consentendo di testare le visualizzazioni
# e interagire con l'applicazione basata su Django a livello di programmazione.
class StoreViewsTestCase(TestCase):
def setUp(self):
self.clientLogged = Client()
self.clientUnlogged = Client()
self.usrLogin = User.objects.create(username='user', password='<PASSWORD>')
self.clientLogged.login(username=self.usrLogin.username, password=self.usrLogin.password)
self.url_risultati_ricerca = reverse('Store:SearchBar')
self.url_registrazione = reverse('Store:Registration')
self.url_recensioni = reverse('Store:ProductReview', args=[1])
def test_risultati_ricerca(self):
response = self.clientLogged.get(self.url_risultati_ricerca) # invia una richiesta GET
self.assertEqual(response.status_code, 200) # Verifica che la risposta sia 200 OK.
def test_registrazione(self):
resp = self.clientUnlogged.get(self.url_registrazione)
self.assertEqual(resp.status_code, 200)
def test_recensioni(self):
respAnonimo = self.clientUnlogged.get(self.url_recensioni)
self.assertEqual(respAnonimo.status_code, 200)
respLogged = self.clientLogged.get(self.url_recensioni)
self.assertEqual(respLogged.status_code, 200)
class TestUser(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='testuser', email='<EMAIL>', password='<PASSWORD>')
def test_username(self):
self.assertEqual(self.user.username, 'testuser')
def test_email(self):
self.assertEqual(self.user.email, '<EMAIL>')
class TestModelsStore(TestCase):
def setUp(self):
self.user = User.objects.create(username='user', password='<PASSWORD>')
self.prodotto = Product.objects.create(
name='Montale',
price=150,
brand='Montale',
description='test',
quantity=10
)
self.recensione = ProductReviewModel.objects.create(
product=self.prodotto,
user=self.user,
content='Very good',
stars=5
)
def test_aggiunta_recensione(self):
self.assertEqual(self.recensione.product.name, 'Montale')
| StarcoderdataPython |
3268157 | <filename>djangostripe/customer_service/apps.py
from django.apps import AppConfig
class CustomerServiceConfig(AppConfig):
name = 'customer_service'
| StarcoderdataPython |
3201161 | """
Module that contains the code to replace the long forms by short forms
in the corpus.
"""
import os
from typing import List, Tuple
import random
import string
from collections import OrderedDict
import pandas as pd
from engine.utils.preprocessing import Preprocessor
from engine.utils.preprocess_utils import delete_overlapping_tuples
from engine.preprocess.preprocess_superclass import Preprocess
class ReplaceLongForms(Preprocess):
"""
When the instance of the class is executed, it will replace the
long forms by short forms.
You can define the probability of a substitution, and the min length
of the abstracts.
"""
def __init__(self, dataset: str, df_dictionary: pd.DataFrame, probability: float = 0.3, length_abstract: int = 200) -> None:
super().__init__(dataset, df_dictionary)
self.input_path = self.input_path + str("identified")
self.output_path = self.output_path + str("replaced")
self.probability = probability
self.preprocessor = Preprocessor(num_words_to_remove=50, remove_punctuation=False)
self.length_abstract = length_abstract
self.len_batch_key = 8
self.long_form_counts: OrderedDict[str, int] = OrderedDict(
dict.fromkeys(self.dictionary.keys(), 0)
)
self.long_form_loc: OrderedDict[str, list] = OrderedDict({
key: [] for key in self.dictionary.keys()
})
# ^ Creating the dict with dict.fromkeys was appending to all keys, that is why I changed
def __call__(self) -> None:
"""
When the instance of the class is executed, it will replace the
long forms by short forms.
"""
super().__call__()
super().batch_run()
def batch_run(self) -> None:
"""
Empty because the super class method is used.
"""
def decision(self) -> bool:
"""
Return True/False based on a given probability.
"""
return random.random() < self.probability
def generate_key(self) -> str:
"""
Function to generate a unique key for each abstract using the
pubmed file name and the row.
"""
return ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(self.len_batch_key)
)
def replace_abstract(
self,
abstract: str,
long_forms: List[str],
span: List[Tuple[int, int]],
) -> Tuple[str, List[str], List[Tuple[int, int]]]:
"""
Given an abstract, it will replace the long forms by short forms.
If the short form was already in the text, it wont add more short forms.
Will return the replaced abstract and the list spans.
"""
replaced_abstract: str = abstract
span_updated: List[Tuple[int, int]] = []
long_forms_updated: List[str] = []
dict_span_lf = dict(zip(span, long_forms))
# Deal with tuple overlapping
clean_tuples = delete_overlapping_tuples(span)
# Iterave over each long form and span
correction_index: int = 0
for tup in clean_tuples:
if self.decision():
long_form = dict_span_lf[tup]
replaced_abstract = str(
replaced_abstract[: tup[0] + correction_index] + self.dictionary[long_form] +
replaced_abstract[tup[1] + correction_index :],
)
span_updated.append((
tup[0] + correction_index,
tup[0] + correction_index + len(self.dictionary[long_form])
))
correction_index = correction_index + len(self.dictionary[long_form]
) - len(long_form)
long_forms_updated.append(long_form)
return replaced_abstract, long_forms_updated, span_updated
def single_run(self, filename: str) -> None:
"""
Will load the csv file with the abstracts and replace the long
forms by short forms. Will add a unique key to each output row.
The key is generated using the filename.
"""
# Open csv file with abstracts
df_abstracts = pd.read_csv(
os.path.join(self.input_path, filename), converters={'long_forms': eval, 'span': eval}
)
df_results: pd.DataFrame = pd.DataFrame(
columns=['long_forms', 'span_short_form', 'replaced_abstract', 'unique_key']
)
batch_key: str = self.generate_key()
for i, row in df_abstracts.iterrows():
# check that the list is not empy and the length of the abstract.
if row['long_forms'] != [] and len(row['abstract']) > self.length_abstract:
# replace long forms. Need to convert span to tuples
replaced_abstract, long_forms_updated, span_updated = self.replace_abstract(
row['abstract'], row['long_forms'], row['span']
)
# Store in dataframe if not empty
if long_forms_updated != []:
# Define unique key
unique_key: str = batch_key + "_" + str(i)
# Update the dictionaries with the counts and keys
for long_form in long_forms_updated:
self.long_form_counts[long_form] += 1
self.long_form_loc[long_form].append(unique_key)
# Export to df
df_results = df_results.append({
'long_forms': long_forms_updated, 'span_short_form': span_updated,
'replaced_abstract': replaced_abstract, 'unique_key': unique_key
},
ignore_index=True)
# Export df to csv
new_filename: str = "{}.csv".format(batch_key)
df_results.to_csv(os.path.join(self.output_path, new_filename), index=False)
# Export dictionary to csv
df_counts = pd.DataFrame(
list(self.long_form_counts.items()), columns=['long_form', 'counts']
)
df_counts['unique_key'] = list(self.long_form_loc.values())
df_counts.to_csv(os.path.join(self.output_path, "counts.csv"), index=False)
| StarcoderdataPython |
3238950 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QPainterPath
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
# position
self.setGeometry(300, 300, 350, 350)
# window title
self.setWindowTitle("Draw path (Paint bezier curve)")
# show window
self.show()
def paintEvent(self, e):
p = QPainter()
p.begin(self)
p.setRenderHint(QPainter.Antialiasing)
self.drawCurve(e, p)
p.end()
def drawCurve(self, e, p):
path = QPainterPath()
path.moveTo(30, 30)
path.cubicTo(30, 30, 200, 350, 350, 30)
p.drawPath(path)
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| StarcoderdataPython |
1727450 | import os
import sys
import urllib
import urllib2
from bs4 import BeautifulSoup, Tag, NavigableString, Comment
import collections
import math
import csv
import codecs
import json
import inspect
import urlparse
import argparse
import traceback
import time
import datetime
import openpyxl
import Main_Extractor as main_ext
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.firefox.options import Options
from common import shared
from common import bsoup
from common import access_rest as rest
from common import page_group
from common import recurse_ftp as rec_ftp
from common import spreadsheet as sh
class PT_Extractor(main_ext.Extractor):
def __init__(self):
''' Initializer for the Extractor class. '''
# Set the province
self.province = 'Ontario'
# Create the page groups dictionary
self.page_groups = []
self.page_groups = collections.OrderedDict()
# Declare all the different types of pages
pg_grp = page_group.PageGroup('opendata', "Ontario Open Data Catalogue")
pg_grp.add_url('main_url', 'https://www.ontario.ca/search/data-catalogue')
opts = {'format': ['csv', 'json', 'kml', 'mdb', 'txt', 'xls', 'xlsx', 'zip'],
'status': ['open', 'to be opened', 'under review', 'restricted']}
pg_grp.set_opts(opts)
pg_grp.add_default('status', 'open')
self.page_groups['opendata'] = pg_grp
pg_grp = page_group.PageGroup('discovering', "Discovering Ontario")
pg_grp.add_url('main_url', 'https://www.javacoeapp.lrc.gov.on.ca/geonetwork/srv/en/main.home')
pg_grp.add_url('portal_url', 'https://www.javacoeapp.lrc.gov.on.ca/geonetwork/srv/en/main.search.embedded')
pg_grp.add_url('mdata_url', 'https://www.javacoeapp.lrc.gov.on.ca/geonetwork/srv/en/metadata.show.embedded')
self.page_groups['discovering'] = pg_grp
pg_grp = page_group.PageGroup('update', "Update Discovering Ontario")
pg_grp.add_url('mdata_url', 'https://www.javacoeapp.lrc.gov.on.ca/geonetwork/srv/en/metadata.show.embedded')
self.page_groups['update'] = pg_grp
# Initialize the Main Extractor to use its variables
main_ext.Extractor.__init__(self)
# Set the arguments for this extractor
self.argmt['word'] = main_ext.Ext_Arg('word',
methlst=['opendata', 'discovering'])
self.argmt['format'] = main_ext.Ext_Arg('format', methlst=['opendata'])
self.argmt['status'] = main_ext.Ext_Arg('status', methlst=['opendata'])
self.argmt['downloadable'] = main_ext.Ext_Arg('downloadable',
methlst=['discovering'])
sp_opts = ['imagery', 'wms', 'mp3', 'application', 'maps', 'liowarehouse',
'publicwebservice', 'gisdata', 'opendata', 'non-commercialuse',
'geologyontario', 'elevation']
self.argmt['subpage'] = main_ext.Ext_Arg('subpage',
methlst=['discovering'],
opts=sp_opts)
def get_province(self):
''' Gets the province name of the extractor.
:return: The province name of the extractor.
'''
return self.province
###################################################################################################################
def get_discovering_mdata(self, mdata_soup, url, title_str):
if self.check_result(mdata_soup, url, "Metadata Page - %s" % title_str, output=False):
# Get date from adjacent element of <th> with text 'Date'
date_str = bsoup.get_adj_text_by_label(mdata_soup, 'th', 'Date', url=url)
# Get description from adjacent element of <th> with text 'Abstract'
desc_str = bsoup.get_adj_text_by_label(mdata_soup, 'th', 'Abstract', url=url)
#print desc_str
# Get data type from adjacent element of <th> with text 'Environment description'
dtype_str = bsoup.get_adj_text_by_label(mdata_soup, 'th', 'Environment description', url=url)
# Get the publisher from adjacent element of <th> with text 'Organisation name'
pub_str = bsoup.get_adj_text_by_label(mdata_soup, 'th', 'Organisation name', url=url)
# Get Data URL(s)
download_items = mdata_soup.find_all('span', string='Data for download')
data_links = []
for i in download_items:
th_html = i.parent
td_html = th_html.find_next_sibling('td')
data_link = td_html.a['href']
data_links.append(data_link)
# Get metadata type from adjacent element of <th> with text 'Metadata standard name'
mdata_type = bsoup.get_adj_text_by_label(mdata_soup, 'th', 'Metadata standard name', url=url)
# Get licence from adjacent element of <th> with text 'Access constraints'
#lic_str = bsoup.get_adj_text_by_label(mdata_soup, 'th', 'Access constraints')
tags = [('th', 'Use limitation'), ('th', 'Other constraints'), ('th', 'Access constraints')]
lic_str = self.get_mdata(mdata_soup, tags)
#print mdata_url
#print "lic_str: %s" % lic_str
# Get the spatial reference
ref_span = mdata_soup.find('span', string='Reference System Information')
sp_str = ''
if ref_span is not None:
ref_td = bsoup.get_parent(ref_span, 'td')
sp_str = bsoup.get_adj_text_by_label(ref_td, 'th', 'Code', url=url)
# Fill in the access and download_str variables based on the number of
# data_links
if len(data_links) == 0:
download_str = 'No'
access = 'Contact the Province'
elif len(data_links) == 1:
download_str = data_links[0]
access = 'Download/Web Accessible'
else:
download_str = 'Multiple Downloads'
access = 'Download/Web Accessible'
else:
desc_str = ''
pub_str = ''
dtype_str = ''
lic_str = ''
date_str = ''
access = ''
download_str = ''
sp_str = ''
mdata_type = ''
#mdata_url = ''
# FOR DEBUG:
# if self.debug:
# print "\n"
# print "Title: %s" % title_str
# print "Description: %s" % desc_str
# print "Publisher: %s" % pub_str
# print "Type: %s" % dtype_str
# print "Licensing: %s" % lic_str
# print "Date: %s" % date_str
# print "Access: %s" % access
# print "Download: %s" % download_str
# print "Spatial Reference: %s" % sp_str
# print "Metadata Type: %s" % mdata_type
# print "Metadata URL: %s" % url
# print "Notes: %s" % self.notes
# answer = raw_input("Press enter...")
mdata_items = collections.OrderedDict()
mdata_items['Title'] = title_str
mdata_items['Description'] = desc_str
mdata_items['Publisher'] = pub_str
#pt_csv.add('Data URL'] = '|'.join(data_links)
mdata_items['Type'] = dtype_str
mdata_items['Licensing'] = lic_str
mdata_items['Date'] = date_str
mdata_items['Access'] = access
mdata_items['Download'] = download_str
mdata_items['Spatial Reference'] = sp_str
mdata_items['Metadata Type'] = mdata_type
mdata_items['Metadata URL'] = url
mdata_items['Notes'] = self.notes
return mdata_items
def get_category(self, cat_div):
# Get the onclick text to get the category
cat_text = cat_div['onclick']
#print "cat_text: %s" % cat_text
start_pos = cat_text.find("(")
end_pos = cat_text.find(")", start_pos)
category = cat_text[start_pos+2:end_pos-1]
#print "category: %s" % category
return category
def get_next_twenty(self, driver):
''' Gets the element with text 'Next 20'
:param driver: The Selenium driver
:return: Element with text 'Next 20'
'''
found_elements = driver.find_elements_by_class_name("secondary")
for el in found_elements:
if el.text.strip() == 'Next 20':
return el
def get_mdata(self, mdata_soup, heading_tups):
mdata_val = ''
for heading in heading_tups:
if mdata_val == '':
tag = heading[0]
name = heading[1]
mdata_val = bsoup.get_adj_text_by_label(mdata_soup, tag, name)
return mdata_val
def extract_opendata(self): #, word=None, format=None, status=None):
''' Extracts the results from the Open Data Catalogue of Ontario
:param word: The word used to filter the results
:param format: The format used to filter the results
:param status: The status used to filter the results
:return: None
'''
# Get the parameters
word = self.argmt['word'].get_value()
format = self.argmt['format'].get_value()
status = self.argmt['status'].get_value()
# URL examples for statuses:
# https://www.ontario.ca/search/data-catalogue?sort=asc&status=%5B%2230%22%5D
# https://www.ontario.ca/search/data-catalogue?sort=asc&status=["30"]
# Full example of possible URL:
# https://www.ontario.ca/search/data-catalogue?
# sort=asc
# &query=gis
# &filetype=%5B%22csv%22%5D
# &publisher=%5B%22Advanced%20Education%20and%20Skills%20Development%22%5D
# &topic=%5B%22Arts%20and%20culture%22%5D
# &status=%5B%2230%22%5D
# Different statuses:
# 30 - Open
# 20 - To be opened
# 10 - Under review
# 0 - Restricted
main_url = self.pg_grp.get_url('main_url')
self.print_log("\nExtracting from %s" % self.pg_grp.get_title())
self.print_title("Extracting Ontario's Open Data Catalogue")
start_time = datetime.datetime.now()
print "Process started at: %s" % start_time
# Create the CSV file
csv_fn = "OpenData_results"
pt_csv = sh.PT_CSV(csv_fn, self)
pt_csv.open_csv()
# Set the parameters for the URL query
params = collections.OrderedDict()
if word is not None and not word == '':
params['query'] = word
if format is not None and not format == '':
params['filetype'] = '["%s"]' % format
if status is not None and not status == '':
if status.lower() == 'open':
status_val = '30'
elif status.lower() == 'to be opened':
status_val = '20'
elif status.lower() == 'under review':
status_val = '10'
else:
status_val = '0'
params['status'] = '["%s"]' % status_val
# Build the URL query
query_url = shared.get_post_query(main_url, params)
print "\nQuery URL: %s" % query_url
# Create a Selenium driver with a headless Firefox
#options = Options()
#options.add_argument("--headless")
driver = webdriver.Firefox()#firefox_options=options)
driver.implicitly_wait(20)
driver.get(query_url)
# Wait for the element with ID "rechercheEdelForm:lancerRecherche" to load
shared.wait_page_load(driver, By.CLASS_NAME, "results-page")
# Continually click 'Next 20' button until it no longer exists
next_20 = self.get_next_twenty(driver)
while next_20 is not None:
next_20.click()
driver.implicitly_wait(20)
next_20 = self.get_next_twenty(driver)
# Grab the results of the results page once all results are shown
page_html = driver.page_source
# Create the BeautifulSoup object
soup = BeautifulSoup(page_html, 'html.parser')
# Get all the datasets on the current page (all datasets are in a 'div' with class 'dataset-item')
page_list = soup.find_all('ul', attrs={'class': 'results-page'})
# Get the number of records for status purposes
res_span = bsoup.find_tags_containing(soup, 'results', 'span')
res_text = res_span.text
res_text = res_text.strip()
end_pos = res_text.find(' results')
num_results = res_text[:end_pos]
print "The number of results is %s" % num_results
record_count = 0
# Cycle through each page DIV
for page in page_list:
li_res = page.find_all('li')
for li in li_res:
record_count += 1
#print "Extracting record %s of %s" % (record_count, num_results)
msg = "Extracting record %s of %s" % (record_count, num_results)
shared.print_oneliner(msg)
# Get the title and URL
h3 = li.find('h3')
a = h3.find('a')
title_str = a.contents[0].strip()
mdata_href = a['href']
mdata_url = urlparse.urljoin(query_url, mdata_href)
date_str = ''
pub_str = ''
dt_list = li.find_all('dt')
for dt in dt_list:
# Get the date
if dt.text.find("Date added") > -1:
dd_html = dt.find_next_sibling('dd')
date_str = dd_html.string.strip()
# Get the publisher
elif dt.text.find("Publisher") > -1:
dd_html = dt.find_next_sibling('dd')
pub_str = dd_html.text.strip()
# Get the rest of the values using the metadata URL
attrb = ('class', 'thumbs-down')
mdata_soup = bsoup.get_soup(mdata_url, True, attrb)
if self.check_result(mdata_soup, mdata_url,
"Metadata Page - %s" % title_str, output=False):
# Get the description
div_pgbody = mdata_soup.find('div', attrs={'id': 'pagebody'})
desc_str = bsoup.get_text(div_pgbody)
# Get the downloads
h2 = bsoup.get_adj_tags_by_text(mdata_soup, 'h2', 'Download data', True)
if len(h2) == 0:
download_str = 'No'
access = 'Contact the Province'
format_list = []
else:
dwnload_div = h2[0].parent
anchors = dwnload_div.find_all('a')
if len(anchors) == 0:
download_str = 'No'
access = 'Contact the Province'
elif len(anchors) == 1:
download_str = anchors[0]['href']
access = "Download/Web Accessible"
else:
download_str = 'Multiple Downloads'
access = "Download/Web Accessible"
# Get the formats from the downloads
format_list = []
for a in anchors:
format = a.abbr.text
format_list.append(format)
else:
# Set the variables as empty strings if the metadata can't be loaded
desc_str = ''
date_str = ''
pub_str = ''
access = ''
download_str = ''
format_list = []
mdata_url = ''
# Add all values to the CSV file
pt_csv.add('Title', title_str)
pt_csv.add('Description', shared.edit_description(desc_str))
pt_csv.add('Date', date_str)
pt_csv.add('Publisher', pub_str)
pt_csv.add('Access', access)
pt_csv.add('Download', download_str)
#pt_csv.add('Licensing', lic_str)
pt_csv.add('Available Formats', '|'.join(format_list))
pt_csv.add('Metadata URL', mdata_url)
pt_csv.add('Notes', self.notes)
# Write the results to the CSV
pt_csv.write_dataset()
self.notes = ''
driver.quit()
pt_csv.close_csv()
# Print ending time
end_time = datetime.datetime.now()
print "\nExtraction complete at %s." % end_time
tot_time = end_time - start_time
print "It took %s to complete." % tot_time
def extract_discovering(self): #, word=None, downloadable=False):
''' Extracts results from the Land Information Ontario geoportal
:param word: The word used to filter the results
:param downloadable: Determines whether to include results only with downloads
:return: None
'''
# Get the parameters
word = self.argmt['word'].get_value()
subpage = self.argmt['subpage'].get_value()
downloadable = self.argmt['downloadable'].get_value()
self.print_log("\nExtracting from %s" % self.pg_grp.get_title())
self.print_title("Extracting Discovering Ontario Data")
start_time = datetime.datetime.now()
print "Process started at: %s" % start_time
main_url = self.pg_grp.get_url('main_url')
portal_url = self.pg_grp.get_url('portal_url')
mdata_query_url = self.pg_grp.get_url('mdata_url')
# Load the main page to get a list of categories
attrb = ['id', 'latest_updates']
main_soup = bsoup.get_soup(main_url, True, attrb)
print "\nMain URL: %s" % main_url
if not self.check_result(main_soup, main_url, "Ontario Discover Portal"): return None
divs = main_soup.find_all('div')
# Find all <div> within <div> with class 'geosearchfields' to get the different categories
cat_divs = []
for div in divs:
if div.has_attr('class'):
#print div['class']
if div['class'][0].find('geosearchfields') > -1:
cat_divs = div.find_all('div')
break
# Convert the category list of elements to words
categories = [self.get_category(c) for c in cat_divs]
print "subpage: %s" % subpage
print "categories: %s" % categories
# If the subpage is in the available categories,
# set the list of categories to the subpage
for c in categories:
if subpage.lower() == c.lower():
categories = [c]
break
for category in categories:
# Create the CSV file
csv_fn = "%s_results" % category.title()
pt_csv = sh.PT_CSV(csv_fn, self)
#if pt_csv.check_exists():
# print "CSV file for category '%s' already exists." \
# " Proceeding to next category." % category
pt_csv.open_csv()
# Set the parameters for the URL query
params = collections.OrderedDict()
if word is not None and not word == "": params['any'] = word
params['category'] = category
if downloadable: params['download'] = 'on'
params['hitsPerPage'] = '1000'
# Build the URL query
query_url = shared.get_post_query(portal_url, params)
print "\nQuery URL: %s" % query_url
# Get the soup of the query URL
xml_soup = bsoup.get_xml_soup(query_url)
if not self.check_result(xml_soup, query_url, "Ontario Discover Portal Query"): continue
items = xml_soup.find_all('div', attrs={'class': 'hit'})
num_items = len(items)
print "\nNumber of records: " + str(num_items)
record_count = 0
for index, item in enumerate(items):
record_count += 1
msg = "Extracting %s of %s records for category '%s'" % (record_count, num_items, category)
shared.print_oneliner(msg)
# Get the ID from <div> with class 'thumbnail_results'
thumb_div = item.find('div', attrs={'class': 'thumbnail_results'})
rating_link = thumb_div.a['id']
id_str = rating_link.replace("rating.link.", "")
#print "ID: %s" % id_str
# Get the title from <div> with class 'hittitle'
title_str = item.find('div', attrs={'class': 'hittitle'}).text
#answer = raw_input("Press enter...")
# Metadata URL:
mdata_url = '%s?id=%s' % (mdata_query_url, id_str)
# HTML Access
mdata_soup = bsoup.get_soup(mdata_url) #, True, ('class', 'padded-content'))
mdata_items = self.get_discovering_mdata(mdata_soup, mdata_url, title_str)
for k, v in mdata_items.items():
pt_csv.add(k, v)
# Write the results to the CSV
pt_csv.write_dataset()
print
pt_csv.close_csv()
# Print ending time
end_time = datetime.datetime.now()
print "\nExtraction complete at %s." % end_time
tot_time = end_time - start_time
print "It took %s to complete." % tot_time
def update_discover(self):
''' Extracts the latest Discovering Ontario information
'''
self.print_title("Updating Discovering Ontario Data")
start_time = datetime.datetime.now()
print "Process started at: %s" % start_time
excel_url = 'https://www.sse.gov.on.ca/sites/MNR-PublicDocs/EN/CMID/DataDistributionCatalogue.xlsx'
mdata_query_url = self.pg_grp.get_url('mdata_url')
now = datetime.datetime.now()
date_str = now.strftime('%Y%m%d')
# Download the XLSX file
xlsx_fn = 'files\\DataDistributionCatalogue_Update_%s.xlsx' % date_str
xlsx_f = urllib.urlretrieve(excel_url, xlsx_fn)
#r = requests.get(excel_url) # make an HTTP request
wb = openpyxl.load_workbook(xlsx_fn)
sheets = wb.sheetnames
url_list = []
for sh_idx, sh_name in enumerate(sheets):
print "\nSheet %s:" % sh_name
sheet = wb[sh_name]
row_count = sheet.max_row
if sh_idx == 0:
row_start = 5
else:
row_start = 1
for row in range(row_start, row_count - row_start + 1):
cell = sheet['A%s' % row]
hp_link = cell.hyperlink
if hp_link is not None:
#print hp_link
title_str = cell.value
url = hp_link.target
if url is not None:
print url
# Parse the URL to get the ID
id_str = url.split('=')[1]
# Metadata URL:
mdata_url = '%s?uuid=%s' % (mdata_query_url, id_str)
print mdata_url
url_list.append((title_str, mdata_url))
#answer = raw_input("Press enter...")
#answer = raw_input("Press enter...")
# Create the CSV file
csv_fn = "Discovering_Update_%s_results" % date_str
pt_csv = sh.PT_CSV(csv_fn, self)
pt_csv.open_csv()
for idx, info in enumerate(url_list):
msg = "Extracting %s of %s records" % (idx + 1, len(url_list))
shared.print_oneliner(msg)
title_str, url = info
mdata_soup = bsoup.get_soup(url, True)
#mdata_f = codecs.open('mdata_soup.html', encoding='utf-8', mode='w')
#mdata_f.write(unicode(mdata_soup))
#mdata_f.close()
#mdata_f.close()
mdata_items = self.get_discovering_mdata(mdata_soup, url, title_str)
for k, v in mdata_items.items():
pt_csv.add(k, v)
# Write the results to the CSV
pt_csv.write_dataset()
print
pt_csv.close_csv()
# Print ending time
end_time = datetime.datetime.now()
print "\nExtraction complete at %s." % end_time
tot_time = end_time - start_time
print "It took %s to complete." % tot_time
def main():
# parser.add_argument("-t", "--tool", help="The tool to use: %s" % ', '.join(tool_list))
# parser.add_argument("-w", "--word", help="The key word(s) to search for.")
# parser.add_argument("-f", "--format", help="The format(s) to search for.")
# parser.add_argument("-c", "--category", help="The category(ies) to search for.")
# parser.add_argument("-d", "--downloadable", help="Determines wheter to get only downloadable datasets.")
# parser.add_argument("-l", "--html", help="The HTML file to scrape (only for OpenData website).")
# parser.add_argument("-s", "--silent", action='store_true', help="If used, no extra parameters will be queried.")
ext = Extractor()
try:
pages = ext.get_pagelist()
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--page", help="The page to extract: %s or all" % ', '.join(pages.keys()))
parser.add_argument("-w", "--word", help="The key word(s) to search for.")
parser.add_argument("-f", "--format", help="The format(s) to search for.")
parser.add_argument("-t", "--stat", help="The status to search for.")
parser.add_argument("-s", "--silent", action='store_true', help="If used, no extra parameters will be queried.")
args = parser.parse_args()
# print args.echo
# print "province: " + str(args.province)
# print "format: " + str(args.format)
page = args.page
params = collections.OrderedDict()
params['srch_word'] = args.word
params['format'] = args.format
params['status'] = args.stat
silent = args.silent
if page is None:
answer = raw_input("Please enter the page you would like to use (%s or all): " % ', '.join(pages.keys()))
if not answer == "":
page = answer.lower()
else:
print "\nERROR: Please specify a web page."
print "Exiting process."
sys.exit(1)
page = page.lower()
print page
ext.set_page(page)
ext.set_params(params)
ext.run()
except Exception, err:
ext.print_log('ERROR: %s\n' % str(err))
ext.print_log(traceback.format_exc())
ext.close_log()
# geoportal_list = extract_geoportal(province)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
133749 | import glob
import multiprocessing as mp
import os
import time
from argparse import ArgumentParser
from pathlib import Path
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from data.preprocess import generate_flying_things_point_cloud, get_all_flying_things_frames
from data.preprocess import preprocess, merge_metadata
global output_directory
def preprocess_wrap(tfrecord_file):
preprocess(tfrecord_file, output_directory, frames_per_segment=None)
def preprocess_flying_things(input_dir, output_dir, view='right'):
"""
Data directory must be in shape of:
parent-dir
disparity
train
left
right
val
left
right
disparity_change
train
left
right
val
left
right
optical_flow
train
backward
left
right
forward
left
right
val
backward
left
right
forward
left
right
"""
for dataset in ('val', 'train'):
all_files_disparity, all_files_disparity_change, all_files_opt_flow, all_files_img = get_all_flying_things_frames(
input_dir=input_dir, disp_dir='disparity/{}/disparity/{}'.format(dataset, view),
opt_dir='flow/{}/{}/into_past'.format(dataset, view), disp_change_dir='disparity_change/{}/disparity_change/{}/into_past'.format(dataset, view),
img_dir='FlyingThings3D_subset_image_clean/FlyingThings3D_subset/{}/image_clean/{}'.format(dataset, view))
for i in range(len(all_files_disparity) - 1):
disparity = all_files_disparity[i]
disparity_next_frame = all_files_disparity[i + 1]
disparity_change = all_files_disparity_change[i]
optical_flow = all_files_opt_flow[i]
image = all_files_img[i]
image_next_frame = all_files_img[i + 1]
d = generate_flying_things_point_cloud(disparity, disparity_next_frame, disparity_change, optical_flow,
image, image_next_frame)
np.savez_compressed(
os.path.join(output_dir + '/' + dataset, 'frame_{}.npz'.format(i)), points1=d[0], points2=d[1], color1=d[2], color2=d[3], flow=d[4], mask=d[5])
if i == 1000:
break
# https://github.com/tqdm/tqdm/issues/484
if __name__ == '__main__':
tf.config.set_visible_devices([], 'GPU')
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != 'GPU'
parser = ArgumentParser()
parser.add_argument('input_directory', type=str)
parser.add_argument('output_directory', type=str)
parser.add_argument('--n_cores', default=None, type=int)
parser.add_argument('--dataset', default='waymo', type=str)
args = parser.parse_args()
print(f"Extracting frames from {args.input_directory} to {args.output_directory}")
input_directory = Path(args.input_directory)
if not input_directory.exists() or not input_directory.is_dir():
print("Input directory does not exist")
exit(1)
output_directory = Path(args.output_directory)
if not output_directory.exists():
output_directory.mkdir(parents=True)
#if list(output_directory.iterdir()):
# print("Output directory not empty! Please remove existing files as there is no merge.")
# exit(1)
output_directory = os.path.abspath(output_directory)
# TODO also use multiple cores for preprocessing flying things dataset (?)
if args.dataset == 'waymo':
n_cores = mp.cpu_count()
if args.n_cores is not None:
if args.n_cores <= 0:
print("Number of cores cannot be negative")
exit(1)
if args.n_cores > n_cores:
print(f"Number of cores cannot be more than{n_cores}")
exit(1)
else:
n_cores = args.n_cores
print(f"{n_cores} number of cores available")
pool = mp.Pool(n_cores)
tfrecord_filenames = []
os.chdir(input_directory)
for file in glob.glob("*.tfrecord"):
file_name = os.path.abspath(file)
tfrecord_filenames.append(file_name)
t = time.time()
for _ in tqdm(pool.imap_unordered(preprocess_wrap, tfrecord_filenames), total=len(tfrecord_filenames)):
pass
# Close Pool and let all the processes complete
pool.close()
pool.join() # postpones the execution of next line of code until all processes in the queue are done.
# Merge look up tables
print("Merging individual metadata...")
merge_metadata(os.path.abspath(output_directory))
print(f"Preprocessing duration: {(time.time() - t):.2f} s")
elif args.dataset == 'flying_things':
preprocess_flying_things(input_dir=input_directory, output_dir=output_directory)
else:
raise ValueError('Dataset {} not available'.format(args.dataset))
| StarcoderdataPython |
1608935 | <reponame>xuyongzhi/Scan-to-BIM<filename>configs/strpoints/bev_strpoints_r50_fpn_1x_r.py<gh_stars>1-10
''' # pedding
num_outs
assigner
img_norm_cfg
transform_method
'''
TOPVIEW = 'VerD' # better
#*******************************************************************************
from configs.common import DIM_PARSE, Track_running_stats
IMAGE_SIZE = DIM_PARSE.IMAGE_SIZE
DATA = 'beike2d'
#DATA = 'stanford2d'
classes= ['wall']
classes= ['room']
if DATA == 'beike2d':
_obj_rep = 'XYXYSin2'
_transform_method='moment_XYXYSin2'
#_obj_rep = 'XYXYSin2WZ0Z1'
#_transform_method='moment_XYXYSin2WZ0Z1'
_obj_rep_out = _obj_rep
if 'room' in classes:
_obj_rep = 'XYXYSin2WZ0Z1'
_transform_method = ['XYDRSin2Cos2Z0Z1', 'moment_std_XYDRSin2Cos2Z0Z1', 'moment_max_XYDRSin2Cos2Z0Z1'][1]
_obj_rep_out='XYDRSin2Cos2Z0Z1'
elif DATA == 'stanford2d':
_obj_rep = 'Rect4CornersZ0Z1'
_transform_method = 'sort_4corners'
dim_parse = DIM_PARSE(_obj_rep, len(classes)+1)
_obj_dim = dim_parse.OBJ_DIM
#*******************************************************************************
cls_groups = None
#cls_groups = [[1], [2]]
#*******************************************************************************
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='StrPointsDetector',
pretrained=None,
backbone=dict(
type='ResNet',
depth=50,
in_channels=4,
num_stages=4,
out_indices=( 0, 1, 2, 3),
frozen_stages=-1,
style='pytorch',
basic_planes=64,
max_planes=2048),
neck=dict(
type='FPN',
in_channels=[ 256, 512, 1024, 2048],
out_channels=256,
start_level=0,
add_extra_convs=True,
num_outs=4,
norm_cfg=norm_cfg),
bbox_head=dict(
type='StrPointsHead',
obj_rep=_obj_rep,
classes=classes,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[4, 8, 16, 32],
point_base_scale=1,
norm_cfg=norm_cfg,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=10.0,),
cls_types=['refine', 'final'],
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method=_transform_method,
dcn_zero_base=False,
corner_hm = False,
corner_hm_only = False,
move_points_to_center = 0,
relation_cfg=dict(enable=0,
stage='refine',
score_threshold=0.2,
max_relation_num=120),
adjust_5pts_by_4=False,
cls_groups = cls_groups,
)
)
#transform_method='minmax'))
#transform_method='center_size_istopleft'))
# training and testing settings
train_cfg = dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1, obj_rep=_obj_rep_out),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.8,
neg_iou_thr=0.3,
min_pos_iou=0.2,
ignore_iof_thr=-1,
overlap_fun='dil_iou_dis_rotated_3d',
obj_rep=_obj_rep_out),
allowed_border=-1,
pos_weight=-1,
debug=False),
corner=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1,
overlap_fun='dis',
ref_radius=2,
obj_rep='corner'),
allowed_border=-1,
pos_weight=-1,
gaussian_weight=True,
debug=False),
)
# ops/nms/nms_wrapper.py
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.2,
nms=dict(type='nms_rotated', iou_thr=0.2, min_width_length_ratio=0.3),
max_per_img=150)
#img_norm_cfg = dict(
# mean=[ 0, 0,0,0],
# std=[ 255, 1,1,1 ], to_rgb=False, method='raw')
img_norm_cfg = dict(
mean=[ 4.753, 0., 0., 0.],
std=[ 16.158, 0.155, 0.153, 0.22], to_rgb=False, method='rawstd') # better
#img_norm_cfg = dict(
# mean=[4.753, 0.044, 0.043, 0.102],
# std=[ 16.158, 0.144, 0.142, 0.183], to_rgb=False, method='abs')
#
#img_norm_cfg = dict(
# mean=[4.753, 11.142, 11.044, 25.969],
# std=[ 16.158, 36.841, 36.229, 46.637], to_rgb=False, method='abs255')
train_pipeline = [
dict(type='LoadTopviewFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='PadToSameHW_ForRotation',obj_rep=_obj_rep,pad_border_make_bboxes_pos=True),
dict(type='ResizeImgLine', obj_rep=_obj_rep, img_scale=(IMAGE_SIZE, IMAGE_SIZE), keep_ratio=True, obj_dim=_obj_dim),
dict(type='RandomLineFlip', flip_ratio=0.7, obj_rep=_obj_rep, direction='random'),
dict(type='RandomRotate', rotate_ratio=0.9, obj_rep=_obj_rep),
dict(type='NormalizeTopview', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_relations']),
]
test_pipeline = [
dict(type='LoadTopviewFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='MultiScaleFlipAug',
img_scale=(IMAGE_SIZE, IMAGE_SIZE),
flip=False,
transforms=[
dict(type='PadToSameHW_ForRotation', obj_rep=_obj_rep, pad_border_make_bboxes_pos=True),
dict(type='ResizeImgLine', obj_rep=_obj_rep, keep_ratio=True, obj_dim=_obj_dim),
dict(type='RandomLineFlip', obj_rep=_obj_rep),
dict(type='RandomRotate', rotate_ratio=0.0, obj_rep=_obj_rep),
dict(type='NormalizeTopview', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_relations']),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_relations']),
])
]
filter_edges=True
# dataset settings
if DATA == 'beike2d':
dataset_type = 'BeikeDataset'
data_root = f'data/beike/processed_{IMAGE_SIZE}/'
ann_file = data_root + 'json/'
img_prefix_train = data_root + f'TopView_{TOPVIEW}/train.txt'
img_prefix_test = data_root + f'TopView_{TOPVIEW}/test.txt'
#img_prefix_test = img_prefix_train
elif DATA == 'stanford2d':
dataset_type = 'Stanford_2D_Dataset'
ann_file = 'data/stanford/'
img_prefix_train = '12356'
img_prefix_test = '5'
img_prefix_test = '4'
#img_prefix_test = '136'
data = dict(
imgs_per_gpu=7,
workers_per_gpu=2,
train=dict(
type=dataset_type,
obj_rep = _obj_rep,
ann_file=ann_file,
img_prefix=img_prefix_train,
pipeline=train_pipeline,
classes=classes,
filter_edges=filter_edges),
val=dict(
type=dataset_type,
obj_rep = _obj_rep,
ann_file=ann_file,
img_prefix=img_prefix_test,
pipeline=train_pipeline,
classes=classes,
filter_edges=filter_edges),
test=dict(
type=dataset_type,
obj_rep = _obj_rep,
obj_rep_out = _obj_rep_out,
ann_file=ann_file,
img_prefix=img_prefix_test,
pipeline=test_pipeline,
classes=classes,
filter_edges=filter_edges))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
total_epochs = 3
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=20,
warmup_ratio=1.0 / 3,
step=[int(total_epochs*0.5), int(total_epochs*0.8)])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=1,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
tra_run = '' if Track_running_stats else '_nTrun'
work_dir = f'./work_dirs/{DATA[0]}TPV_r50_fpn{tra_run}_{_obj_rep}'
if _transform_method == 'moment_std_XYDRSin2Cos2Z0Z1':
work_dir += '_Std_'
if _transform_method == 'moment_max_XYDRSin2Cos2Z0Z1':
work_dir += '_Max_'
if DATA == 'beike2d':
load_from = './checkpoints/beike/jun17_wd_bev_L.pth'
load_from = './checkpoints/beike/jun15_wd_bev.pth'
if 'room' in classes:
load_from = './checkpoints/beike/jun14_room_bev.pth'
load_from = './checkpoints/beike/jun18_r_bev_L.pth'
elif DATA == 'stanford2d':
load_from = './checkpoints/sfd/24May_bev_abcdif_train_6as.pth'
#load_from = None
resume_from = None
auto_resume = True
workflow = [('train', 1), ('val', 1)]
if 0:
data['workers_per_gpu'] = 0
workflow = [('train', 1),]
checkpoint_config = dict(interval=10)
| StarcoderdataPython |
1772395 | <reponame>RainMark/python-progress-bar<filename>setup.py
# -*- coding:utf-8 -*-
from __future__ import print_function
from setuptools import setup, find_packages
from glob import glob
import pyprobar
with open(glob('requirements.*')[0], encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
with open("README.md", "r", encoding='utf-8') as fr:
long_description = fr.read()
setup(
name = pyprobar.__name__ ,
version=pyprobar.__version__,
packages = find_packages(),
include_package_data = True,
description = " An easy-to-use and colorful progress bar for python." ,
long_description=long_description,
long_description_content_type="text/markdown",
author = "K.y" ,
author_email="<EMAIL>",
url = "https://github.com/beidongjiedeguang/python-progress-bar" ,
license = "MIT" ,
install_requires=install_requires,
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords=[
'Python Utils',
'Machine Learning',
'Neural Networks',
'Natural Language Processing',
'Computer Vision'
]
) | StarcoderdataPython |
1636628 | #!/usr/bin/env python
'''
Original Training code made by <NAME> <<EMAIL>>
Moded by <NAME> <<EMAIL>>
Visit our website at www.theconstructsim.com
'''
import gym
import time
import numpy
import random
import maddpg_training.common.tf_util as U
from maddpg_training.trainer.maddpg import MADDPGAgentTrainer
import tensorflow.contrib.layers as layers
from gym import wrappers
from std_msgs.msg import Float64
# ROS packages required
import rospy
import rospkg
# import our training environment
import multi_UAV_env
params = { 'num_episodes' : 60000,
'num_steps' : 2000,
'lr' : 1e-2,
'gamma' : 0.95,
'batch_size' : 1024,
'num_units' : 256 ,
'exp_name' : None,
'save_dir' : "/home/awesomelb/Multi-UAV_RL/src/Collaborative_Aerial_Transportation/collaborative_training/src/policy/",
'save_rate' : 100,
'load_dir' : "",
'restore' : False,
'display' : False,
'benchmark' : False,
'benchmark_iters': 100000,
'benchmark_dir' : "./benchmark_files/",
'plots_dir' : "./learning_curve/"
}
def lstm_model(lstm_input, lstm_state_in=None, num_outputs, scope, reuse=False, num_units=256, LSTM_UNITS=32, timestep_lstm=8, init=1):
with tf.variable_scope(scope, reuse=reuse):
out = lstm_input # (time_steps, batch_size, state_size)
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = tf.reshape(out,[1, timestep_lstm, LSTM_UNITS])
lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=LSTM_UNITS, state_is_tuple=True)
initial_lstm_state = lstm_cell.zero_state(1, tf.float32)
if init == 1:
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell,
out,
initial_state=lstm_state_in,
sequence_length=timestep_lstm,
time_major=False,
dtype=tf.float32
)
else if init ==0:
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell,
out,
initial_state=lstm_state_in,
sequence_length=timestep_lstm,
time_major=False,
dtype=tf.float32
)
out = layers.fully_connected(lstm_outputs, num_outputs=num_outputs, activation_fn=None)
return out,lstm_state
def mlp_model(mlp_input, num_outputs, scope, reuse=False, num_units=256, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
with tf.variable_scope(scope, reuse=reuse):
out = mlp_input
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
def get_trainers(env,obs_shape_n, arglist):
trainers = []
critic_model = mlp_model
policy_model = lstm_model
trainer = MADDPGAgentTrainer
for i in range(4):
trainers.append(trainer(
"agent_%d" % i, critic_model, policy_model, obs_shape_n, env.action_space, i, params,
local_q_func=False))
return trainers
if __name__ == '__main__':
rospy.init_node('multi_UAV_gym', anonymous=True, log_level=rospy.INFO)
# Create the Gym environment
env = gym.make('multi_UAV-v0')
rospy.logdebug ( "Gym environment done")
####
reward_pub = rospy.Publisher('/multi_UAV/reward', Float64, queue_size=1)
episode_reward_pub = rospy.Publisher('/multi_UAV/episode_reward', Float64, queue_size=1)
######
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('collaborative_training')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.logdebug("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
# Initialises the algorithm that we are going to use for learning
start_time = time.time()
with U.single_threaded_session():
# Create agent trainers
obs_shape_n = [env.observation_space[i].shape for i in range(4)]
trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist)
# Initialize
U.initialize()
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir
if arglist.display or arglist.restore or arglist.benchmark:
print('Loading previous state...')
U.load_state(arglist.load_dir)
episode_rewards = [0.0] # sum of rewards for all agents
agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward
final_ep_rewards = [] # sum of rewards for training curve
final_ep_ag_rewards = [] # agent rewards for training curve
agent_info = [[[]]] # placeholder for benchmarking info
saver = tf.train.Saver()
obs_n = env.reset()
episode_step = 0
train_step = 0
t_start = time.time()
print('Starting iterations...')
while True:
# get action
action_n = [agent.action(obs) for agent, obs in zip(trainers,obs_n)]
# environment step
new_obs_n, rew_n, done_n, info_n = env.step(action_n)
episode_step += 1
done = all(done_n)
terminal = (episode_step >= arglist.max_episode_len)
# collect experience
for i, agent in enumerate(trainers):
agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done_n[i], terminal)
obs_n = new_obs_n
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
agent_rewards[i][-1] += rew
if done or terminal:
obs_n = env.reset()
episode_step = 0
episode_rewards.append(0)
for a in agent_rewards:
a.append(0)
agent_info.append([[]])
# increment global step counter
train_step += 1
# for benchmarking learned policies
if arglist.benchmark:
for i, info in enumerate(info_n):
agent_info[-1][i].append(info_n['n'])
if train_step > arglist.benchmark_iters and (done or terminal):
file_name = arglist.benchmark_dir + arglist.exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(agent_info[:-1], fp)
break
continue
# for displaying learned policies
if arglist.display:
time.sleep(0.1)
env.render()
continue
# update all trainers, if not in display or benchmark mode
loss = None
for agent in trainers:
agent.preupdate()
for agent in trainers:
loss = agent.update(trainers, train_step)
# save model, display training output
if terminal and (len(episode_rewards) % arglist.save_rate == 0):
U.save_state(arglist.save_dir, saver=saver)
# print statement depends on whether or not there are adversaries
if num_adversaries == 0:
print("steps: {}, episodes: {}, mean episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time()-t_start, 3)))
else:
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]),
[np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards], round(time.time()-t_start, 3)))
t_start = time.time()
# Keep track of final episode reward
final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))
for rew in agent_rewards:
final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate:]))
# saves final episode reward for plotting training curve later
if len(episode_rewards) > arglist.num_episodes:
rew_file_name = arglist.plots_dir + arglist.exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(final_ep_rewards, fp)
agrew_file_name = arglist.plots_dir + arglist.exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(final_ep_ag_rewards, fp)
print('...Finished total of {} episodes.'.format(len(episode_rewards)))
break
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.loginfo ("STARTING Episode #"+str(x))
cumulated_reward = 0
cumulated_reward_msg = Float64()
episode_reward_msg = Float64()
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
rospy.logdebug("env.reset...")
# Now We return directly the stringuified observations called state
state = env.reset()
rospy.logdebug("env.get_state...==>"+str(state))
# for each episode, we test the robot for nsteps
for i in range(nsteps):
# Pick an action based on the current state
action = qlearn.chooseAction(state)
# Execute the action in the environment and get feedback
rospy.logdebug("###################### Start Step...["+str(i)+"]")
rospy.logdebug("haa+,haa-,hfe+,hfe-,kfe+,kfe- >> [0,1,2,3,4,5]")
rospy.logdebug("Action to Perform >> "+str(action))
nextState, reward, done, info = env.step(action)
rospy.logdebug("END Step...")
rospy.logdebug("Reward ==> " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
rospy.logdebug("env.get_state...[distance_from_desired_point,base_roll,base_pitch,base_yaw,contact_force,joint_states_haa,joint_states_hfe,joint_states_kfe]==>" + str(nextState))
# Make the algorithm learn based on the results
qlearn.learn(state, action, reward, nextState)
# We publish the cumulated reward
cumulated_reward_msg.data = cumulated_reward
reward_pub.publish(cumulated_reward_msg)
if not(done):
state = nextState
else:
rospy.logdebug ("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logdebug("###################### END Step...["+str(i)+"]")
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
episode_reward_msg.data = cumulated_reward
episode_reward_pub.publish(episode_reward_msg)
rospy.loginfo( ("EP: "+str(x+1)+" - [alpha: "+str(round(qlearn.alpha,2))+" - gamma: "+str(round(qlearn.gamma,2))+" - epsilon: "+str(round(qlearn.epsilon,2))+"] - Reward: "+str(cumulated_reward)+" Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo ( ("\n|"+str(nepisodes)+"|"+str(qlearn.alpha)+"|"+str(qlearn.gamma)+"|"+str(initial_epsilon)+"*"+str(epsilon_discount)+"|"+str(highest_reward)+"| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
| StarcoderdataPython |
1603762 | <reponame>holub008/skiscraper<gh_stars>0
import requests
from HTMLParser import HTMLParser
import mysql.connector
import urllib2
from configs import Configs
from RaceResults import RaceResult, StructuredRaceResults, RaceInfo, UnstructuredPDFRaceResults
config = Configs()
DB_USER = config.get_as_string("DB_USER")
DB_PASSWORD = config.get_as_string("DB_PASSWORD")
# division ids for forming the url correspond to list index
RACES = ["51K Skate","55K Classic","24K Skate","24K Classic"]
# bunch of boilerplate, only variable params are page # (100 per page), year, and divId
BASE_URL_FORMAT_2014ON = "http://birkie.pttiming.com/results/%d/index.php?page=1150&r_page=division&pageNum_rsOverall=%d&divID=%d"
URL_PREFETCH_2007ON = "http://results.birkie.com"
# yikes! this will spit raw sql errors if you supply malformed queries
BASE_URL_FORMAT_2007ON = "http://results.birkie.com/index.php?event_id=%s&page_number=%s"
URL_2007ON_DB_URL_PAGE = 0
URL_2014ON_DB_DIV_ID = 0
URL_PREFETCH_PRE2007 = "http://www.birkie.com/ski/events/birkie/results/"
# todo this is dynamic
BIRKIE_RACE_NAME = "American Birkebeiner"
BIRKIE_RACE_DIVISION = config.get_as_string("CITIZEN_DIVISION")
class Birkie2014Parser(HTMLParser):
"""
a custom parser for results on birkie.pttiming.com
"""
def __init__(self):
HTMLParser.__init__(self)
self.in_data_row = False
self.current_race_result = RaceResult("","","")
self.current_td_data = []
self.td_count = 0
self.race_results = []
def handle_starttag(self, tag, attrs):
if tag == "tr" and self.extract_class(attrs) == "dataRow":
self.td_count = 0
self.in_data_row = True
self.current_race_result = RaceResult("","","")
elif tag == "td" and self.in_data_row:
self.td_count += 1
def handle_endtag(self, tag):
if tag == "tr" and self.in_data_row:
self.race_results.append(self.current_race_result)
self.in_data_row = False
elif tag == "td" and self.in_data_row:
clean_data = " ".join(x.strip() for x in self.current_td_data)
if self.td_count == 1:
self.current_race_result.place = clean_data
elif self.td_count == 5:
self.current_race_result.name = clean_data
elif self.td_count == 7:
self.current_race_result.time = clean_data
self.current_td_data = []
# html parser is disgusting and gives the data not as an entire string
def handle_data(self, data):
if self.in_data_row:
self.current_td_data.append(data)
@staticmethod
def extract_class(attrs):
for attr_pair in attrs:
if len(attr_pair) == 2:
if attr_pair[0] == "class":
return attr_pair[1]
return None
class Birkie2007OnParser(HTMLParser):
"""
a custom parser for results on results.birkie.com
"""
def __init__(self):
HTMLParser.__init__(self)
self.in_data_row = False
self.current_race_result = RaceResult("","","")
self.current_td_data = []
self.td_count = 0
self.race_results = []
def handle_starttag(self, tag, attrs):
if tag == "tr" and self.extract_class(attrs) == "dataRow":
self.td_count = 0
self.in_data_row = True
self.current_race_result = RaceResult("","","")
elif tag == "td" and self.in_data_row:
self.td_count += 1
def handle_endtag(self, tag):
if tag == "tr" and self.in_data_row:
self.race_results.append(self.current_race_result)
self.in_data_row = False
elif tag == "td" and self.in_data_row:
clean_data = " ".join(x.strip() for x in self.current_td_data)
if self.td_count == 1:
self.current_race_result.place = clean_data
elif self.td_count == 5:
self.current_race_result.name = clean_data
elif self.td_count == 7:
self.current_race_result.time = clean_data
self.current_td_data = []
# html parser is disgusting and gives the data not as an entire string
def handle_data(self, data):
if self.in_data_row:
self.current_td_data.append(data)
@staticmethod
def extract_class(attrs):
for attr_pair in attrs:
if len(attr_pair) == 2:
if attr_pair[0] == "class":
return attr_pair[1]
return None
class Birkie2007To2014Prefetcher(HTMLParser):
def __init__(self, season):
"""
:param season: the season the birkie took place, one less than the year of the race you probably want (str)
"""
HTMLParser.__init__(self)
self.year = str(int(season) + 1)
# todo parity between ids and names
self.event_ids = []
self.event_names = []
self.current_id = -1
self.in_option = False
self.in_race_select = False
def handle_starttag(self, tag, attrs):
if self.in_race_select and tag == "option":
self.in_option = True
self.current_id = self.extract_attr(attrs, "value")
elif tag == "select" and self.extract_attr(attrs, "id") == "divid":
self.in_race_select = True
def handle_data(self, data):
# quick hack, check if the season is a substring of the race name
if self.in_option and self.year in data:
self.event_ids.append(self.current_id)
self.event_names.append(data)
def handle_endtag(self, tag):
if self.in_race_select and tag == "select":
self.in_race_select = False
if tag == "option":
self.in_option = False
self.current_id = -1
@staticmethod
def extract_attr(attrs, attr_name):
for attr_pair in attrs:
if len(attr_pair) == 2:
if attr_pair[0] == attr_name:
return attr_pair[1]
return None
class BirkiePre2007Prefetcher(HTMLParser):
def __init__(self, season):
"""
:param season: the season the birkie took place, one less than the year of the race your probably want (str)
"""
HTMLParser.__init__(self)
self.year = str(int(season) + 1)
self.race_urls = []
self.race_names = []
self.current_header = ""
self.in_results_div = False
self.in_results_header = False
self.in_result_list_element = False
self.in_anchor = False
self.current_result_kept = False
self.current_race_name_parts = []
def handle_starttag(self, tag, attrs):
if tag == "div" and self.extract_attr(attrs, "class") == "col-xs-12 col-md-6":
self.in_results_div = True
elif self.in_results_div and tag == "h3":
self.in_results_header = True
elif self.in_results_div and tag == "li" and self.year in self.current_header:
self.in_result_list_element = True
elif self.in_result_list_element and tag == "a":
self.in_anchor = True
url = self.extract_attr(attrs, "href")
if self.is_pdf_link(url) and self.is_non_duped_result(url):
self.race_urls.append(url)
self.current_result_kept = True
else:
self.current_result_kept = False
def handle_data(self, data):
if self.in_results_header:
self.current_header = data
elif self.in_anchor and self.current_result_kept:
self.current_race_name_parts += data.split()
def handle_endtag(self, tag):
if self.in_results_div and tag == "div":
self.in_results_div = False
elif self.in_results_header and tag == "h3":
self.in_results_header = False
elif self.in_result_list_element and tag == "li":
self.in_result_list_element = False
elif self.in_anchor and tag == "a":
self.in_anchor = False
if self.current_result_kept:
self.race_names.append(self.current_header + " " + " ".join(self.current_race_name_parts))
self.current_race_name_parts = []
@staticmethod
def extract_attr(attrs, attr_name):
for attr_pair in attrs:
if len(attr_pair) == 2:
if attr_pair[0] == attr_name:
return attr_pair[1]
return None
@staticmethod
def is_pdf_link(href):
# todo it's too late for doing this right...
return href.endswith(".pdf")
@staticmethod
def is_non_duped_result(href):
# sometimes two pdfs contain more or less the same results
# simple heuristic: these are always age group awards :)
# todo this requires more tuning!
return "age" not in href.lower()
def handle2014On(season, race_store):
"""
:param season: the season of the race we want. note Birkies are in Feb, so they are a year ahead (str)
:param race_store: store of all currently processed races (RaceResultStore)
:return: void
"""
year = int(season) + 1
for div_ix, div in enumerate(RACES):
# todo get date of race
url_for_db = BASE_URL_FORMAT_2014ON % (year, URL_2007ON_DB_URL_PAGE, URL_2014ON_DB_DIV_ID)
race_info = RaceInfo(season, BIRKIE_RACE_DIVISION, str(year), url_for_db, "%s %s" % (BIRKIE_RACE_NAME, div))
if race_info in race_store:
# todo logging
print("Skipping race (%s) that has already been processed." % (race_info, ))
continue
total_results = []
page = 0
# page size defaults to 100, if smaller, we've reached the end
current_page_size = 100
while current_page_size == 100:
# todo a bit inefficient
parser = Birkie2014Parser()
# indexed from 1
div_id = div_ix + 1
url = BASE_URL_FORMAT_2014ON % (year, page, div_id)
try:
response = requests.get(url)
except Exception as e:
print("failed to fetch Birkie results at url (%s) with error: %s" % (url, str(e)))
continue
table = ""
if response.status_code == 200:
table = response.text[response.text.index("Overall Event Results"):]
parser.feed(table)
current_page_size = len(parser.race_results)
total_results += parser.race_results
else:
print("Bad response code (%d) at url %s" % (response.status_code, url))
current_page_size = 0
page += 1
race = StructuredRaceResults(race_info, total_results)
race.serialize()
def handle2007To2015(season, race_store):
"""
structured results are available through a different api 2007-2015
todo merge like components with the 2014On fetcher- very similar (parser is the exact same...)
:param season: the season of the race (str)
:param race_store: store of all currently processed races (RaceResultStore)
:return: void
"""
year = str(int(season) + 1)
# first, we have to figure out which races belong to this season
prefetch_parser = Birkie2007To2014Prefetcher(season)
try:
response = requests.get(URL_PREFETCH_2007ON)
except Exception as e:
print("failed to prefetch birkie races belonging to season '%s'" % (season, ))
return
if response.status_code == 200:
prefetch_parser.feed(response.text)
else:
print("failed to prefetch birkie races belonging to season '%s' due to response %d" % (season, response.status_code))
# now we can fetch the results for the given ids
for ix, race_id in enumerate(prefetch_parser.event_ids):
race_name = prefetch_parser.event_names[ix]
# todo get actual date of the race
url_for_db = BASE_URL_FORMAT_2007ON % (race_id, URL_2007ON_DB_URL_PAGE)
race_info = RaceInfo(season, BIRKIE_RACE_DIVISION, str(year), url_for_db, race_name)
if race_info in race_store:
# todo logging
print("Skipping race (%s) that has already been processed." % (race_info))
continue
total_results = []
# indexed from 1 here for some reason
page = 1
current_page_size = 100
# we expect a full page to be of size 100, anything less means no more results
while current_page_size >= 100:
url = BASE_URL_FORMAT_2007ON % (race_id, page)
# reset state of the parser by creating a new one :/
parser = Birkie2007OnParser()
try:
response = requests.get(url)
except Exception as e:
print("Failed to get individual race at url %s" % (url,))
if response.status_code == 200:
parser.feed(response.text)
partial_results = parser.race_results
current_page_size = len(partial_results)
total_results += partial_results
else:
print("Failed to get individual race at url %s due to response %d" % (url, response.status_code))
current_page_size = 0
page +=1
race = StructuredRaceResults(race_info, total_results)
race.serialize()
def handlePre2007Season(season, race_store):
"""
attempt to find unstructured results on the main results page where some pdfs reside
:param season: season the race took place, probably a year less than the race you are interested in (str)
:param race_store: store of all currently processed races (RaceResultStore)
:return: void
"""
year = str(int(season) + 1)
try:
response = requests.get(URL_PREFETCH_PRE2007)
except Exception as e:
print("Error: failed to prefetch birkie races at url %s" % (URL_PREFETCH_PRE2007))
return
prefetch_parser = BirkiePre2007Prefetcher(season)
if response.status_code == 200:
prefetch_parser.feed(response.text)
else:
print("Error: failed to prefetch birkie races at url '%s' with return code %s" % (URL_PREFETCH_PRE2007, response.status_code))
return
cnx = mysql.connector.connect(user=DB_USER, password=DB_PASSWORD, host="localhost")
for ix, race_name in enumerate(prefetch_parser.race_names):
url = prefetch_parser.race_urls[ix]
race_info = RaceInfo(season, BIRKIE_RACE_DIVISION, year, url, race_name)
if race_info in race_store:
# todo logging
print("Skipping processing of a race (%s) that has already been processed" % (race_info,))
continue
try:
response = urllib2.urlopen(url)
except Exception as e:
print("Failed to fetch pdf at url %s" % (url,))
continue
if response.getcode() == 200:
UnstructuredPDFRaceResults(race_info, response.read()).serialize()
else:
# todo logging
print("Unexpected return code (%d) for url (%s). Skipping..." % (response.getcode(), url, ))
cnx.commit()
cnx.close()
def fetch_season(season, race_store):
"""
results are stored differently for different years.
:param season: the season we are getting results for (str)
:param race_store: store of all currently processed races (RaceResultStore)
:return: void
"""
season_int = int(season)
if season_int >= 2013:
handle2014On(season, race_store)
elif season >= 2007:
handle2007To2015(season, race_store)
else:
# attempt to find unstructured results on the main results pages
handlePre2007Season(season, race_store)
if __name__ == "__main__":
fetch_season("2009") | StarcoderdataPython |
23529 | <filename>app/request.py
from app import app
import urllib.request,json
from .models import source
from .models import article
Source = source.Source
Article = article.Article
# Getting api key
api_key = app.config['NEWS_API_KEY']
# Getting the source base url
base_url = app.config["SOURCE_API_BASE_URL"]
article_url = app.config["ARTICLE_API_BASE_URL"]
def process_results(source_list):
'''
Function that processes the source result and transform them to a list of Objects
Args:
source_list: A list of dictionaries that contain source details
Returns :
source_results: A list of source objects
'''
source_results = []
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description= source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
if url:
source_object = Source(id,name,description,url,category,language,country)
source_results.append(source_object)
return source_results
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = base_url.format(category,api_key)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
source_results = None
if get_sources_response['sources']:
source_results_list = get_sources_response['sources']
source_results = process_results(source_results_list)
return source_results
def get_articles(category):
'''
Function that gets the json response to our url request
'''
get_articles_url = article_url.format(category,api_key)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
article_results = None
if get_articles_response['articles']:
article_results_list = get_articles_response['articles']
article_results = process_results(article_results_list)
return article_results
def get_source(id):
get_sources_details_url = article_url.format(id,api_key)
with urllib.request.urlopen(get_sources_details_url) as url:
source_details_data = url.read()
source_details_response = json.loads(source_details_data)
source_object = None
if source_details_response:
id = source_details_response.get('id')
name = source_details_response.get('name')
description = source_details_response.get('description')
url = source_details_response.get('url')
category = source_details_response.get('category')
language = source_details_response.get('language')
country = source_details_response.get('country')
source_object = Source(id,name,description,url,category,language,country)
return source_object
def process_articles(article_list):
'''
Function that processes the article result and transform them to a list of Objects
Args:
article_list: A list of dictionaries that contain article details
Returns :
article_results: A list of article objects
'''
article_results = []
for article_item in article_list:
author = article_item.get('author')
title = article_item.get('title')
description= article_item.get('description')
url =article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
content = article_item.get('content')
if url:
article_object =Article(author,title,description, url, urlToImage,publishedAt,content)
article_results.append(article_object)
return article_results
def get_articles(source):
'''
Function that gets the json response to our url request
'''
get_articles_url = article_url.format(source,api_key)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
article_results = None
if get_articles_response['articles']:
article_results_list = get_articles_response['articles']
article_results = process_articles(article_results_list)
return article_results | StarcoderdataPython |
117410 | <reponame>yanshengjia/algorithm<gh_stars>10-100
"""
TinyURL is a URL shortening service where you enter a URL such as https://leetcode.com/problems/design-tinyurl and it returns a short URL such as http://tinyurl.com/4e9iAk.
Design the encode and decode methods for the TinyURL service. There is no restriction on how your encode/decode algorithm should work. You just need to ensure that a URL can be encoded to a tiny URL and the tiny URL can be decoded to the original URL.
Solution:
1. Using simple counter
We use a list to store all encountered urls in a list and the index of urls represents the encoding of urls.
Pros:
* easy to understand
Cons:
* The range of URLs that can be decoded is limited by the range of int.
* If excessively large number of URLs have to be encoded, after the range of int is exceeded, integer overflow could lead to overwriting the previous URLs' encodings, leading to the performance degradation.
* The length of the URL isn't necessarily shorter than the incoming longURL. It is only dependent on the relative order in which the URLs are encoded.
* One problem with this method is that it is very easy to predict the next code generated, since the pattern can be detected by generating a few encoded URLs.
* If I'm asked to encode the same long URL several times, it will get several entries. That wastes codes and memory.
* People can find out how many URLs have already been encoded. Not sure I want them to know.
* People might try to get special numbers by spamming me with repeated requests shortly before their desired number comes up.
* Only using digits means the codes can grow unnecessarily large. Only offers a million codes with length 6 (or smaller). Using six digits or lower or upper case letters would offer (10+26*2)6 = 56,800,235,584 codes with length 6.
2. Ramdom number + Hashtable
To solve the problems in the previous solution, we can use random number as encodings of urls.
And use 2 hashtables to record url2code and code2url.
Pros:
* Save space and memory
* Dont have to worry about spamming because the encoding of urls can not be predicted.
Cons:
* Hash collision, we have to re-generate the code if collision happens.
"""
# Using simple counter
# > 40%
class Codec:
def __init__(self):
self.map = []
def encode(self, longUrl):
"""Encodes a URL to a shortened URL.
:type longUrl: str
:rtype: str
"""
self.map.append(longUrl)
tinyurl = 'http://tinyurl.com/{}'.format(len(self.map) - 1)
return tinyurl
def decode(self, shortUrl):
"""Decodes a shortened URL to its original URL.
:type shortUrl: str
:rtype: str
"""
index = int(shortUrl.split('/')[-1])
return self.map[index]
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(url))
# Random encoding + hashtable
# > 20%
import string
import random
class Codec:
def __init__(self):
self.chars = string.digits + string.ascii_letters
self.encode_len = 6
self.url2code = {}
self.code2url = {}
def encode(self, longUrl):
"""Encodes a URL to a shortened URL.
:type longUrl: str
:rtype: str
"""
while longUrl not in self.url2code:
code = ''.join(random.sample(self.chars, self.encode_len))
if code not in self.code2url:
self.code2url[code] = longUrl
self.url2code[longUrl] = code
else:
continue
return 'http://tinyurl.com/' + code
def decode(self, shortUrl):
"""Decodes a shortened URL to its original URL.
:type shortUrl: str
:rtype: str
"""
return self.code2url[shortUrl[-6:]]
| StarcoderdataPython |
1673604 | from queue import Queue
class FizzBuzz:
def __init__(self, n: int):
self.n = n
self.fizz_continue = Queue(1)
self.buzz_continue = Queue(1)
self.fizzbuzz_continue = Queue(1)
self.number_continue = Queue(1)
self.number_continue.put(True)
@classmethod
def is_fizz(cls, n):
return (n % 3 == 0) and (n % 5 != 0)
@classmethod
def is_buzz(cls, n):
return (n % 5 == 0) and (n % 3 != 0)
@classmethod
def is_fizzbuzz(cls, n):
return (n % 3 == 0) and (n % 5 == 0)
@classmethod
def is_number(cls, n):
return (n % 3 != 0) and (n % 5 != 0)
def rouge(self, you_queue, condition, printer):
for i in range(1, self.n + 1):
if condition(i):
you_queue.get()
printer(i)
next_queue = self.get_next_queue(i + 1)
next_queue.put(True)
def get_next_queue(self, n):
result_map = {
self.is_buzz(n): self.buzz_continue,
self.is_fizz(n): self.fizz_continue,
self.is_fizzbuzz(n): self.fizzbuzz_continue,
self.is_number(n): self.number_continue,
}
return result_map[True]
# printFizz() outputs "fizz"
def fizz(self, printFizz: 'Callable[[], None]') -> None:
self.rouge(self.fizz_continue, self.is_fizz, lambda _: printFizz())
# printBuzz() outputs "buzz"
def buzz(self, printBuzz: 'Callable[[], None]') -> None:
self.rouge(self.buzz_continue, self.is_buzz, lambda _: printBuzz())
# printFizzBuzz() outputs "fizzbuzz"
def fizzbuzz(self, printFizzBuzz: 'Callable[[], None]') -> None:
self.rouge(self.fizzbuzz_continue, self.is_fizzbuzz, lambda _: printFizzBuzz())
# printNumber(x) outputs "x", where x is an integer.
def number(self, printNumber: 'Callable[[int], None]') -> None:
self.rouge(self.number_continue, self.is_number, lambda i: printNumber(i))
| StarcoderdataPython |
158496 | # -*- coding: utf-8 -*-
###################################################################################
# Copyright (C) 2019 SuXueFeng
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###################################################################################
import logging
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class OaBaseModel(models.Model):
_name = 'oa.base.model'
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = "流程审批表单基类"
_rec_name = 'form_number'
active = fields.Boolean(string=u'Active', default=True)
name = fields.Char(string='标题', required=True)
form_number = fields.Char(string='单据编号', index=True, copy=False)
company_id = fields.Many2one('res.company', string='公司', default=lambda self: self.env.user.company_id.id)
remarks = fields.Text(string=u'备注')
attachment_number = fields.Integer(compute='_compute_attachment_number', string='附件')
def attachment_image_preview(self):
self.ensure_one()
domain = [('res_model', '=', self._name), ('res_id', '=', self.id)]
return {
'domain': domain,
'res_model': 'ir.attachment',
'name': u'附件管理',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'limit': 20,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, self.id)
}
def _compute_attachment_number(self):
attachment_data = self.env['ir.attachment'].read_group(
[('res_model', '=', self._name), ('res_id', 'in', self.ids)], ['res_id'], ['res_id'])
attachment = dict((data['res_id'], data['res_id_count']) for data in attachment_data)
for expense in self:
expense.attachment_number = attachment.get(expense.id, 0) | StarcoderdataPython |
1790380 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from requests import Response
from typing import Any, Optional, Mapping
Headers = Optional[Mapping[str, str]]
class RequestError(Exception):
""" Error que se genera cuando hay un fallo accediendo al servidor"""
def __init__(self, url: str, headers: Headers, body: Any, response: Response) -> None:
self.url = url
self.headers = headers
self.body = body
self.status_code = response.status_code
self.text = response.text
self.response = response
super().__init__(self.message())
def message(self) -> str:
return "code {}, body: {}".format(self.status_code, self.text)
class FormatError(Exception):
"""Error que se genera cuando la respuesta recibida del servidor no tiene el formato correcto"""
def __init__(self, url: str, headers: Headers, body: Any, json: str, key: str) -> None:
self.url = url
self.headers = headers
self.body = body
self.json = json
self.key = key
super().__init__(self.message())
def message(self) -> str:
return "missing key: {}, body: {}".format(self.key, json.dumps(self.json, indent=4))
| StarcoderdataPython |
3354060 | """Functions to check the status of an existing game on BGA."""
import datetime
import logging
from logging.handlers import RotatingFileHandler
from bga_account import BGAAccount
from bga_game_list import get_game_list
from bga_game_list import update_games_cache
from creds_iface import get_discord_id
from utils import normalize_name
logging.getLogger("discord").setLevel(logging.WARN)
LOG_FILENAME = "errs"
logger = logging.getLogger(__name__)
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10000000, backupCount=0)
formatter = logging.Formatter("%(asctime)s | %(name)s | %(levelname)s | %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
async def get_tables_by_players(players, message, send_running_tables=True, game_target=""):
"""Send running tables option is for integration where people don't want to see existing tables."""
bga_ids = []
tables = {}
bga_account = BGAAccount()
sent_messages = []
for player in players:
if player.startswith("<@"):
await message.channel.send("Not yet set up to read discord tags.")
bga_account.close_connection()
return
bga_id = bga_account.get_player_id(player)
if bga_id == -1:
await message.channel.send(f"Player {player} is not a valid bga name.")
bga_account.close_connection()
return
bga_ids.append(bga_id)
player_tables = bga_account.get_tables(bga_id)
found_msg = await message.channel.send(f"Found {str(len(player_tables))} tables for {player}")
sent_messages += [found_msg]
tables.update(player_tables)
bga_games, err_msg = get_game_list()
if len(err_msg) > 0:
await message.channel.send(err_msg)
return
normalized_bga_games = [normalize_name(game) for game in bga_games]
player_tables = []
for table_id in tables:
table = tables[table_id]
table_player_ids = table["player_display"] # Table.player_display is the player Ids at this table
if set(bga_ids).issubset(table_player_ids):
# match the game if a game was specified
normalized_game_name = get_bga_alias(table["game_name"])
if len(game_target) == 0 or normalized_game_name == normalize_name(game_target):
player_tables.append(table)
for table in player_tables:
sent_messages += [await message.channel.send("Getting table information...")]
logger.debug(f"Checking table {table_id} for bga_ids {str(bga_ids)} in table {str(table)}")
# Check for game name by id as it may differ from name (i.e. 7 vs 'seven')
game_name_list = [game for game in bga_games if table["game_id"] == str(bga_games[game])]
if len(game_name_list) == 0:
game_name = table["game_name"]
new_game = {table["game_name"]: table["game_id"]}
normalized_bga_games.append(normalize_name(table["game_name"]))
update_games_cache(new_game)
else:
game_name = game_name_list[0]
if normalize_name(game_name) not in normalized_bga_games:
bga_account.close_connection()
await message.channel.send(f"{game_name} is not a BGA game.")
return
# Only add table status lines for games we care about
if len(game_target) > 0 and normalize_name(game_name) != normalize_name(game_target):
continue
if send_running_tables:
await send_active_tables_list(message, bga_account, table, game_name)
for sent_message in sent_messages: # Only delete all status messages once we're done
await sent_message.delete()
if len(player_tables) == 0:
# Try to convert bga names to discord names
players_list = []
for player_name in players:
is_player_added = False
if message.guild:
player_id = get_discord_id(player_name, message)
if player_id != -1:
players_list.append(f"<@!{player_id}>")
is_player_added = True
elif not is_player_added:
players_list.append(player_name)
await message.channel.send(f"No {game_target} tables found for players [{', '.join(players_list)}].")
bga_account.close_connection()
def get_bga_alias(game_name):
# BGA uses different names *in game* than for game creation, so recognize this.
aliases = {
"redsevengame": "red7",
"sechsnimmt": "6nimmt",
"sevenwonders": "7wonders",
"sevenwondersduel": "7wondersduel",
"yatzy": "yahtzee", # `yatzy` is due to it initially using the French name due to copyright concerns
}
if normalize_name(game_name) in aliases:
return aliases[normalize_name(game_name)]
return normalize_name(game_name)
async def send_active_tables_list(message, bga_account, table, game_name):
# If a game has not started, but it is scheduled, it will be None here.
if table["gamestart"]:
gamestart = table["gamestart"]
else:
gamestart = table["scheduled"]
days_age = (datetime.datetime.utcnow() - datetime.datetime.fromtimestamp(int(gamestart))).days
percent_done, num_moves, table_url = bga_account.get_table_metadata(table)
percent_text = ""
if percent_done: # If it's at 0%, we won't get a number
percent_text = f"\t\tat {percent_done}%"
p_names = []
for p_id in table["players"]:
p_name = table["players"][p_id]["fullname"]
# Would include this, but current_player_nbr seems to be the opposite value of expected for a player
# if table["players"][p_id]["table_order"] == str(table["current_player_nbr"]):
# p_name = '**' + p_name + ' to play**'
p_names.append(p_name)
msg_to_send = f"__{game_name}__\t\t[{', '.join(p_names)}]\t\t{days_age} days old {percent_text}\t\t{num_moves} moves\n\t\t<{table_url}>\n"
logger.debug("Sending:" + msg_to_send)
await message.channel.send(msg_to_send)
| StarcoderdataPython |
3259540 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 13:35:57 2018
@author: cham
"""
# %%
#%pylab qt5
import numpy as np
from ruby import get_isochrone_grid, IsoGrid
# from ruby import isoc_interp, ezinterp
# from ezpadova.parsec import get_one_isochrone, get_photometry_list
from astropy.table import Table
from ruby.isochrone_interp import ezinterp
from ruby.imf import salpeter
from ezpadova import parsec
# %%
""" 1. define grid """
grid_feh = np.arange(-4., 1.2, 0.1) # 4
grid_logt = np.arange(6., 10.5, 0.1) # x2
# ig12 = get_isochrone_grid(
# grid_feh, grid_logt, model='parsec12s',
# phot='gaiaDR2',Zsun=0.0152, n_jobs=20, verbose=10)
# ig12.unify(model='parsec12s')
""" 2. download isochrones """
ig_gaiaDR2 = get_isochrone_grid(
grid_feh, grid_logt, model='parsec12s_r14',
phot='gaiaDR2', Zsun=0.0152, n_jobs=100, verbose=10)
ig_gaiaDR2.Zsun
# %%
from ruby.isochrone_grid import dump_ig, load_ig
dump_ig(ig_gaiaDR2,
"/media/cham/Seagate Expansion Drive/parsec/grid/gaiaDR2.dump")
# %%
from ruby.isochrone_grid import dump_ig, load_ig
ig_gaiaDR2 = load_ig(
"/media/cham/Seagate Expansion Drive/parsec/grid/gaiaDR2.dump")
# %%
import copy
ig = copy.copy(ig_gaiaDR2)
""" modify columns """
from astropy.table import Column
for i in range(ig.niso):
ig.data[i].add_column(
Column(np.log10(ig.data[i]["Zini"] / ig.Zsun), "feh_ini"))
# ig.data[i].remove_column("feh_ini")
ig.data[i].add_column(Column(np.log10(ig.data[i]["Z"] / ig.Zsun), "feh"))
# ig.data[i].remove_column("feh")
ig.data[i].add_column(Column(np.log10(ig.data[i]["Age"]), "logt"))
# ig.data[i].remove_column("logt")
# %%
# ig.unify(model='parsec12s_r14')
""" 3. select subset of isochrone [columns & rows]"""
# ig.sub_rows(cond=(('label', (0, 8)), ('Mini', (0.0, 8.0)), ('logTe', (3.6, 4.1)), ('logg', (2.0, 5.0))))
ig.sub_rows(cond=(('label', (0, 8)), ('Mini', (0.0, 8.0))))
colnames = ['feh_ini', 'logt', 'Mini', 'Mass', 'logL', 'logTe', 'logg',
'label', 'Mloss', 'feh', 'mbolmag', 'Gmag', 'G_BPmag', 'G_RPmag']
ig.sub_cols(colnames=colnames)
""" 4. interpolate along Mini, logTe, logg, etc."""
for i in range(ig.niso):
ig.data[i] = ezinterp(ig[i],
restrictions=(
('Mini', 0.02), ('logTe', 0.02), ('logg', 0.05)),
mode='linear', Mini='Mini')
""" 5. define volume weight """
ig.set_dt(0.2)
ig.set_dfeh(dfeh=0.5)
ig.set_dm()
ig.set_imf(salpeter)
""" 6. calculate total weight for each point """
from astropy.table import Column
for i in range(ig.niso):
w = ig.data[i]["dm"] * ig.data[i]["dt"] * ig.data[i]["dfeh"] * ig.data[i][
"imf"]
try:
ig.data[i].add_column(Column(w, "w"))
except ValueError:
ig.data[i]["w"] = w
ig.data[i].add_column(Column(10. ** ig.data[i]["logTe"], "teff"))
""" 7. combine all isochrones """
from astropy import table
combined_iso = table.vstack(list(ig.data))
# %%
# from joblib import dump, load
# dump(ig, "/media/cham/Seagate Expansion Drive/parsec/grid/gaiaDR2.dump")
# %%
""" 1. The prior from model """
rcParams.update({"font.size": 20})
H, xe, ye = np.histogram2d(
combined_iso["logTe"], combined_iso["logg"],
bins=(np.arange(3., 5, .005), np.arange(-1., 6., 0.05)), normed=False,
weights=combined_iso["w"])
fig = figure(figsize=(10, 8))
imshow(np.fliplr(np.log10(H.T)), cmap=cm.jet,
extent=(5.005, 2.995, 6.025, -1.025), aspect="auto")
colorbar()
xlim(4.5, 3.3)
ylim(5.5, -1)
title("log10(Prior)")
xlabel("$T_{\\rm eff}$ [K]")
ylabel("$\\log{g}$ [dex]")
fig.tight_layout()
fig.savefig("/home/cham/projects/gaia/figs/bayesian/log10_prior.pdf")
fig.savefig("/home/cham/projects/gaia/figs/bayesian/log10_prior.svg")
# %%
from ruby import IsoGrid
x_sun = IsoGrid.predict_from_chi2(
combined_iso,
var_colnames=["teff", "logg", "feh_ini"],
tlf=np.array(np.array([5750, 4.35, 0.0])),
tlf_err=np.array([100., 0.1, 0.1]),
return_colnames=("Mini", "logt", "feh_ini"),
q=(0.16, 0.50, 0.84))
tlf = np.array([5750, 4.35, 0.0]) # solar value
tlf_err = np.array([100., 0.1, 0.1])
var_colnames = ["teff", "logg", "feh_ini"]
# 1. convert isochrone(table) into array
sub_iso = np.array(combined_iso[var_colnames].to_pandas())
# 2. calculate chi2
chi2_values = 0
for i_var in range(len(var_colnames)):
chi2_values += ((sub_iso[:, i_var] - tlf[i_var]) / tlf_err[
i_var]) ** 2.
chi2_values *= -0.5
# 3. chi2 --> PDF
p_post = np.exp(chi2_values) * combined_iso["w"]
figure()
plt.hist(10 ** combined_iso["logTe"], weights=p_post,
bins=np.arange(5000, 6000, 30), normed=True)
figure()
plt.hist(combined_iso["logg"], weights=p_post, bins=np.arange(4, 5, 0.02),
normed=True)
figure()
plt.hist(combined_iso["feh_ini"], weights=p_post, bins=np.arange(-1, 1, 0.1),
normed=True)
figure()
plt.hist2d(10 ** combined_iso["logTe"], combined_iso["logg"],
bins=(np.arange(5200, 6250, 25), np.arange(4, 5, 0.05)),
weights=p_post, normed=True, cmap=cm.gray_r)
colorbar()
# %%
# H, xe, ye = np.histogram2d(
# 10.**combined_iso["logTe"], combined_iso["logg"],
# bins=(np.arange(3500., 10000, 50.), np.arange(-1., 6., 0.05)), normed=False,
# weights=combined_iso["w"])
#
# import seaborn as sns
# Y = np.vstack((10.**combined_iso["logTe"], combined_iso["logg"])).T
# ax = sns.kdeplot(Y, shade = True, cmap = "PuBu")
#
#
# figure()
# imshow(np.fliplr(np.log10(H.T)), cmap=cm.jet, extent=(10000+25, 3500-25, 6.025, -1.025), aspect="auto")
# colorbar()
# %%
try:
combined_iso.add_column(Column(10. ** combined_iso["logTe"], "teff"))
# %%
test_tlf = np.array([5750, 4.35, 0.0]) # solar value
err_tlf = np.array([100., 0.1, 0.1])
test_tlf = np.array([5500, 2.5, 0.0])
err_tlf = np.array([100., 0.1, 0.1])
test_tlf = np.array([10. ** 3.68, 2.44, 0.0])
err_tlf = np.array([100., 0.1, 0.1])
def chi2(x, x0, err):
return -0.5 * ((x - x0) / err) ** 2.
# %%timeit
grid_logt = ig.grid_logt
grid_feh = ig.grid_feh
# grid_mini = np.arange(0.3, 5.1, 0.1)
grid_logt = ig.grid_logt
# grid_feh = np.arange(-2.0, 0.8, 0.8)
grid_feh = np.array([-2.0, -1.0, 0., 0.4])
grid_mini = np.logspace(-1, 1, 30)
mesh_logt, mesh_feh, mesh_mini = np.meshgrid(grid_logt, grid_feh, grid_mini)
""" ind: 1:feh, 2:logt, 3:mini """
mesh_label = np.zeros_like(mesh_logt)
# basics, mini, logt, feh
mesh_logt_est = np.zeros_like(mesh_logt)
mesh_logt_err = np.zeros_like(mesh_logt)
mesh_feh_est = np.zeros_like(mesh_logt)
mesh_feh_err = np.zeros_like(mesh_logt)
mesh_mini_est = np.zeros_like(mesh_logt)
mesh_mini_err = np.zeros_like(mesh_logt)
# spectroscopic, teff, logg
mesh_teff = np.zeros_like(mesh_logt)
mesh_teff_est = np.zeros_like(mesh_logt)
mesh_teff_err = np.zeros_like(mesh_logt)
mesh_logg = np.zeros_like(mesh_logt)
mesh_logg_est = np.zeros_like(mesh_logt)
mesh_logg_err = np.zeros_like(mesh_logt)
n_all = np.prod(mesh_logt.shape)
ijk = []
for i in range(mesh_logt.shape[0]):
for j in range(mesh_logt.shape[1]):
for k in range(mesh_logt.shape[2]):
o_interp = ig.interp_mini(
mesh_logt[i, j, k], mesh_feh[i, j, k], mesh_mini[i, j, k],
return_colnames=(
'Mini', 'logt', "feh_ini", 'teff', 'logg', "label"))
mesh_teff[i, j, k] = o_interp[3]
mesh_logg[i, j, k] = o_interp[4]
ijk.append((i, j, k))
mesh_label[i, j, k] = o_interp[5]
print(len(ijk))
from ipyparallel import Client
rc = Client(profile="default")
dv = rc.direct_view()
dv.push({"combined_iso": combined_iso}).get()
dv.push({"mesh_teff": mesh_teff}).get()
dv.push({"mesh_logg": mesh_logg}).get()
dv.push({"mesh_feh": mesh_feh}).get()
dv.push({"flat_mini": mesh_mini.flatten()}).get()
dv.push({"flat_logt": mesh_logt.flatten()}).get()
dv.execute("import numpy as np").get()
dv.execute("from ruby.isochrone_grid import IsoGrid").get()
cmd = """
x_all = np.zeros((len(ijk), 3, 3))
for i, _ijk in enumerate(ijk):
_i, _j, _k = _ijk
if mesh_teff[_i, _j, _k]>0:
try:
x_all[i] = IsoGrid.predict_from_chi2(
combined_iso,
var_colnames=["teff", "logg", "feh_ini"],
tlf=np.array([mesh_teff[_i, _j, _k], mesh_logg[_i, _j, _k], mesh_feh[_i, _j, _k]]),
tlf_err=np.array([100., 0.1, 0.1]),
return_colnames=("Mini", "logt", "feh_ini"),
q=(0.16, 0.50, 0.84))
except ValueError as ae:
x_all[i] = np.nan
else:
x_all[i] = np.nan
"""
# %% test
dv.scatter("ijk", ijk[:100]).get()
dv["ijk"]
dv["len(ijk)"]
dv["x=1"]
% % time
dv.execute(cmd).get()
# %%
dv.scatter("ijk", ijk[:]).get()
dv["ijk"]
dv["len(ijk)"]
dv["x=1"]
dv.execute(cmd).get()
# %%
x_all = dv.gather("x_all").get()
# from joblib import dump
# dump(x_all, "/home/cham/projects/gaia/data/x_all.dump")
for i, (_i, _j, _k) in enumerate(ijk):
mesh_mini_est[_i, _j, _k] = x_all[i][1, 0]
mesh_mini_err[_i, _j, _k] = (x_all[i][2, 0] - x_all[i][0, 0]) / 2.
mesh_logt_est[_i, _j, _k] = x_all[i][1, 1]
mesh_logt_err[_i, _j, _k] = (x_all[i][2, 1] - x_all[i][0, 1]) / 2.
# %%
rcParams.update({"font.size": 20})
fig = figure(figsize=(10, 8));
ax = fig.add_subplot(111)
im = ax.imshow(mesh_logt_err[2], vmin=0., vmax=0.5, cmap=cm.jet,
origin="lower",
extent=(-1 - 1 / 30., 1 + 1 / 30., 6. - 0.05, 10.1 + 0.05),
aspect="auto")
xticks = np.array([0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.])
ax.set_xticks(np.log10(xticks))
ax.set_xticklabels(xticks)
colorbar(im)
ax.contour(np.log10(grid_mini), grid_logt, mesh_logt_err[2], [0.1, 0.2, 0.4],
colors="k", linewidths=[1, 2, 3])
ax.set_xlabel("$M_{ini}$ [$M_\\odot$]")
ax.set_ylabel("$\\log_{10}$ (age / yr)")
ax.plot([0.4, 0.6], [10, 10], lw=1, c="k")
ax.text(.7, 10 - .06, "0.10")
ax.plot([0.4, 0.6], [9.75, 9.75], lw=2, c="k")
ax.text(.7, 9.75 - .06, "0.20")
ax.plot([0.4, 0.6], [9.5, 9.50], lw=3, c="k")
ax.text(.7, 9.5 - .06, "0.40")
ax.set_title("Error of $\\log_{10}$ (age / yr)")
fig.tight_layout()
fig.savefig("/home/cham/projects/gaia/figs/bayesian/logt_error.pdf")
fig.savefig("/home/cham/projects/gaia/figs/bayesian/logt_error.svg")
# %%
rcParams.update({"font.size": 20})
fig = figure(figsize=(10, 8));
ax = fig.add_subplot(111)
im = ax.imshow(np.abs(mesh_logt_est[2] - mesh_logt[2]), vmin=0, vmax=3,
cmap=cm.jet, origin="lower",
extent=(-1 - 1 / 30., 1 + 1 / 30., 6. - 0.05, 10.1 + 0.05),
aspect="auto")
xticks = np.array([0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.])
ax.set_xticks(np.log10(xticks))
ax.set_xticklabels(xticks)
colorbar(im)
ax.contour(np.log10(grid_mini), grid_logt,
np.abs(mesh_logt_est[2] - mesh_logt[2]), [0.5, 1.0, 2.0],
colors="w", linewidths=[1, 2, 3])
ax.set_xlabel("$M_{ini}$ [$M_\\odot$]")
ax.set_ylabel("$\\log_{10}$ (age / yr)")
ax.plot([0.4, 0.6], [10, 10], lw=1, c="k")
ax.text(.7, 10 - .06, "0.5")
ax.plot([0.4, 0.6], [9.75, 9.75], lw=2, c="k")
ax.text(.7, 9.75 - .06, "1.0")
ax.plot([0.4, 0.6], [9.5, 9.50], lw=3, c="k")
ax.text(.7, 9.5 - .06, "2.0")
ax.set_title("Bias of $\\log_{10}$ (age / yr)")
fig.tight_layout()
fig.savefig("/home/cham/projects/gaia/figs/bayesian/logt_bias.pdf")
fig.savefig("/home/cham/projects/gaia/figs/bayesian/logt_bias.svg")
# %%
rcParams.update({"font.size": 20})
fig = figure(figsize=(10, 8));
ax = fig.add_subplot(111)
im = ax.imshow(mesh_mini_err[2] / mesh_mini_est[2], vmin=0., vmax=0.3,
cmap=cm.jet, origin="lower",
extent=(-1 - 1 / 30., 1 + 1 / 30., 6. - 0.05, 10.1 + 0.05),
aspect="auto")
xticks = np.array([0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.])
ax.set_xticks(np.log10(xticks))
ax.set_xticklabels(xticks)
colorbar(im)
ax.contour(np.log10(grid_mini), grid_logt, mesh_mini_err[2] / mesh_mini_est[2],
[0.05, 0.1, 0.2], colors="w", linewidths=[1, 2, 3])
ax.set_xlabel("$M_{ini}$ [$M_\\odot$]")
ax.set_ylabel("$\\log_{10}$ (age / yr)")
ax.plot([0.4, 0.6], [10, 10], lw=1, c="k")
ax.text(.7, 10 - .06, "0.05")
ax.plot([0.4, 0.6], [9.75, 9.75], lw=2, c="k")
ax.text(.7, 9.75 - .06, "0.10")
ax.plot([0.4, 0.6], [9.5, 9.50], lw=3, c="k")
ax.text(.7, 9.5 - .06, "0.20")
ax.set_title("Error of $M_{ini}$ [$M_\\odot$]")
fig.tight_layout()
fig.savefig("/home/cham/projects/gaia/figs/bayesian/mass_error.pdf")
fig.savefig("/home/cham/projects/gaia/figs/bayesian/mass_error.svg")
# %%
rcParams.update({"font.size": 20})
fig = figure(figsize=(10, 8));
ax = fig.add_subplot(111)
im = ax.imshow(np.abs(mesh_mini_est[2] - mesh_mini[2]) / mesh_mini[2], vmin=0.,
vmax=0.1, cmap=cm.jet, origin="lower",
extent=(-1 - 1 / 30., 1 + 1 / 30., 6. - 0.05, 10.1 + 0.05),
aspect="auto")
xticks = np.array([0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.])
ax.set_xticks(np.log10(xticks))
ax.set_xticklabels(xticks)
colorbar(im)
ax.contour(np.log10(grid_mini), grid_logt,
np.abs(mesh_mini_est[2] - mesh_mini[2]) / mesh_mini[2],
[0.01, 0.05, 0.08], colors="w", linewidths=[1, 2, 3])
ax.set_xlabel("$M_{ini}$ [$M_\\odot$]")
ax.set_ylabel("$\\log_{10}$ (age / yr)")
ax.plot([0.4, 0.6], [10, 10], lw=1, c="k")
ax.text(.7, 10 - .06, "0.01")
ax.plot([0.4, 0.6], [9.75, 9.75], lw=2, c="k")
ax.text(.7, 9.75 - .06, "0.05")
ax.plot([0.4, 0.6], [9.5, 9.50], lw=3, c="k")
ax.text(.7, 9.5 - .06, "0.08")
ax.set_title("Bias of $M_{ini}$ [$M_\\odot$]")
fig.tight_layout()
fig.savefig("/home/cham/projects/gaia/figs/bayesian/mass_bias.pdf")
fig.savefig("/home/cham/projects/gaia/figs/bayesian/mass_bias.svg")
# %%
rcParams.update({"font.size": 20})
fig = figure(figsize=(10, 8));
ax = fig.add_subplot(111)
im = ax.imshow(mesh_label[2], vmin=0., vmax=7, cmap=cm.jet, origin="lower",
extent=(-1 - 1 / 30., 1 + 1 / 30., 6. - 0.05, 10.1 + 0.05),
aspect="auto")
xticks = np.array([0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.])
ax.set_xticks(np.log10(xticks))
ax.set_xticklabels(xticks)
colorbar(im)
ax.contour(np.log10(grid_mini), grid_logt, mesh_label[2], [0.5, 1.5, 2.5],
colors="w", linewidths=[1, 2, 3])
ax.set_xlabel("$M_{ini}$ [$M_\\odot$]")
ax.set_ylabel("$\\log_{10}$ (age / yr)")
ax.plot([0.4, 0.6], [10, 10], lw=1, c="k")
ax.text(.7, 10 - .06, "0.5")
ax.plot([0.4, 0.6], [9.75, 9.75], lw=2, c="k")
ax.text(.7, 9.75 - .06, "1.5")
ax.plot([0.4, 0.6], [9.5, 9.50], lw=3, c="k")
ax.text(.7, 9.5 - .06, "2.5")
ax.set_title("Labels")
fig.tight_layout()
fig.savefig("/home/cham/projects/gaia/figs/bayesian/label.pdf")
fig.savefig("/home/cham/projects/gaia/figs/bayesian/label.svg")
# %%
iso = ig.get_iso(9.7, 0)
figure()
plot(iso["teff"], iso["logg"], 'x-')
for i in range(len(iso)):
# text(iso["teff"][i], iso["logg"][i], "{:1.0f}".format(iso["label"][i]))
text(iso["teff"][i], iso["logg"][i], "{:1.3f}".format(iso["Mini"][i]))
# %%
figure()
plot(mesh_teff[2].flatten(), mesh_logg[2].flatten(), '.')
# %%
grid_teff = np.arange(8000., 4000, -100)
grid_logg = np.arange(6, -0.1, -0.2)
# grid_feh = np.array([0.])
# mesh_teff, mesh_logg, mesh_feh = np.meshgrid(grid_teff, grid_logg, grid_feh)
mesh_teff, mesh_logg = np.meshgrid(grid_teff, grid_logg)
flat_teff = mesh_teff.flatten()
flat_logg = mesh_logg.flatten()
dv.scatter("flat_teff", flat_teff).get()
dv.scatter("flat_logg", flat_logg).get()
dv["flat_teff.shape"]
cmd = """
x_all = np.zeros((len(flat_teff), 3, 3))
for i, (_teff, _logg) in enumerate(zip(flat_teff, flat_logg)):
try:
x_all[i] = IsoGrid.predict_from_chi2(
combined_iso,
var_colnames=["teff", "logg", "feh_ini"],
tlf=np.array([_teff, _logg, 0.]),
tlf_err=np.array([100., 0.1, 0.1]),
return_colnames=("Mini", "logt", "feh_ini"),
q=(0.16, 0.50, 0.84))
except ValueError as ae:
x_all[i] = np.nan
"""
dv.execute(cmd).get()
# %%
x2 = dv.gather("x_all").get()
mesh_mini_est = x2[:, 1, 0].reshape(*mesh_teff.shape)
mesh_mini_err = ((x2[:, 2, 0] - x2[:, 0, 0]) / 2.).reshape(*mesh_teff.shape)
mesh_logt_est = x2[:, 1, 1].reshape(*mesh_teff.shape)
mesh_logt_err = ((x2[:, 2, 1] - x2[:, 0, 1]) / 2.).reshape(*mesh_teff.shape)
# %%
figure()
imshow(mesh_mini_err)
# %%
if mesh_teff[i, j, k] > 0 and i > 0 and j > 0:
# do a chi2 matching
try:
x = ig.predict_from_chi2(
combined_iso,
var_colnames=["teff", "logg", "feh_ini"],
tlf=np.array(
[mesh_teff[i, j, k], mesh_logg[i, j, k], mesh_feh[i, j, k]]),
tlf_err=np.array([100., 0.1, 0.1]),
return_colnames=("Mini", "logt", "feh_ini"),
q=(0.16, 0.50, 0.84))
mesh_mini_est[i, j, k] = x[1, 0]
mesh_mini_err[i, j, k] = (x[2, 0] - x[0, 0]) / 2.
mesh_logt_est[i, j, k] = x[1, 1]
mesh_logt_err[i, j, k] = (x[2, 1] - x[0, 1]) / 2.
print(i, j, k, n_all, "success")
except ValueError as ae:
mesh_mini_est[i, j, k] = np.nan
mesh_mini_err[i, j, k] = np.nan
mesh_logt_est[i, j, k] = np.nan
mesh_logt_err[i, j, k] = np.nan
print(i, j, k, n_all, "failure")
else:
mesh_mini_est[i, j, k] = np.nan
mesh_mini_err[i, j, k] = np.nan
mesh_logt_est[i, j, k] = np.nan
mesh_logt_err[i, j, k] = np.nan
print(i, j, k, n_all, "failure")
x = ig.predict_from_chi2(
combined_iso,
var_colnames=["teff", "logg", "feh_ini"],
tlf=np.array([5500, 2.5, 0.0]),
tlf_err=np.array([100., 0.1, 0.1]),
return_colnames=("Mini", "logt", "feh_ini", "G"),
q=(0.16, 0.50, 0.84))
# %%
var_colnames = ["teff", "logg", "feh_ini"]
sub_iso = np.array(combined_iso[var_colnames].to_pandas())
chi2_values = 0
for ivar in range(len(var_colnames)):
chi2_values += ((sub_iso[:, ivar] - test_tlf[ivar]) / err_tlf[ivar]) ** 2.
chi2_values *= -0.5
p_post = np.exp(chi2_values) * combined_iso["w"]
u_feh_ini, inv_ind = np.unique(combined_iso["feh_ini"], return_inverse=True)
u_p_post = np.zeros(u_feh_ini.shape)
u_p_post[inv_ind] = u_p_post[inv_ind] + p_post
for i, _ in enumerate(inv_ind):
if _ < len(u_p_post):
u_p_post[_] += 0.5 * p_post[i]
u_p_post[_ + 1] += 0.5 * p_post[i]
else:
u_p_post[_] += p_post[i]
from scipy.interpolate import interp1d
interp1d(np.cumsum(u_p_post) / np.sum(u_p_post), u_feh_ini)((0.16, 0.50, 0.84))
figure()
plot(u_feh_ini, u_p_post)
plot(u_feh_ini, np.cumsum(u_p_post))
u_teff, inv_ind = np.unique(combined_iso["teff"], return_inverse=True)
u_p_post = np.zeros_like(u_teff)
u_p_post[inv_ind] += p_post
figure()
plot(u_teff, u_p_post)
plot(u_teff, np.cumsum(u_p_post))
# %% teff
hist, bin_edges = np.histogram(combined_iso["teff"],
bins=np.arange(3500., 10000, 50.), normed=True,
weights=p_post)
figure()
plt.step(bin_edges[:-1], hist)
# %% logg
hist, bin_edges = np.histogram(combined_iso["logg"],
np.arange(-1., 6., 0.1), normed=True,
weights=p_post)
figure()
plt.step(bin_edges[:-1], hist)
# %% Mini
hist, bin_edges = np.histogram(combined_iso["Mini"],
np.arange(0., 6., 0.2), normed=True,
weights=p_post)
figure()
plt.step(bin_edges[:-1], hist)
# %% logt
hist, bin_edges = np.histogram(combined_iso["logt"],
np.arange(0., 13., 0.2), normed=True,
weights=p_post)
figure()
plt.step(bin_edges[:-1], hist)
# %%
H, xe, ye = np.histogram2d(
combined_iso["teff"], combined_iso["logg"],
bins=(np.arange(3500., 10000, 100.), np.arange(-1., 6., 0.1)), normed=True,
weights=p_post)
H = np.log10(H)
figure()
imshow(np.fliplr(H.T), cmap=cm.jet,
extent=(10000 + 25, 3500 - 25, 6.025, -1.025),
aspect="auto", vmin=-10, vmax=np.nanmax(H))
# %%
H, xe, ye = np.histogram2d(
combined_iso["Mini"], combined_iso["logt"],
bins=(np.arange(0, 5, 0.1), np.arange(0, 13., 0.2)), normed=True,
weights=p_post)
H = np.log10(H)
# H *= H>-10
figure()
imshow(H.T, cmap=cm.gray_r,
extent=(0 - 0.025, 5 + 0.025, 6 - 0.05, 10.0 + 0.05),
aspect="auto", vmin=-10, vmax=np.nanmax(H))
# %%
figure()
plot(sub_iso[:, 1], chi2_values, 'o', alpha=0.5)
figure()
scatter(sub_iso[:, 0], sub_iso[:, 1], s=10, c=chi2_values, alpha=0.5, vmin=-10,
vmax=0, cmap=cm.gray_r)
colorbar()
xlim(6500, 5000)
ylim(, 5500)
# %%
figure()
plot(combined_iso["logTe"], combined_iso["logg"], '.')
from joblib import dump
dump(ig, "/media/cham/Seagate Expansion Drive/parsec/grid/gaiaDR2.dump")
# %%
def salpeter(m, ksi0=1.0):
return ksi0 * (m) ** -2.35
x = np.arange(0.08, 12.0, 0.01)
y = salpeter(x)
figure()
plot(np.log10(x), np.log10(y))
i
d_mini
# %%
figure()
for isoc in ig.data:
ind = (isoc["label"] >= 1) & (isoc["label"] < 8) & (isoc["Mini"] < 12)
ind = (isoc["label"] < 9) & (isoc["Mini"] < 12)
plot(isoc["logTe"][ind], isoc["logg"][ind], 'kx', alpha=0.05)
# %%
figure();
x = ig.get_iso(9.0, 0)
plot(x["logTe"], x["logg"], "r-.")
for i in range(len(x)):
text(x["logTe"][i], x["logg"][i], "{}".format(x["label"][i]))
x = ig12.get_iso(9.0, 0)
plot(x["logTe"], x["logg"], "b-.")
for i in range(len(x)):
text(x["logTe"][i], x["logg"][i], "{}".format(x["label"][i]))
# %%
"""
Process:
0. Download grid d_logt=0.05, d_feh_ini=0.05
1. unify colnames
2. subcol & subrow, Mini<12.0, 1<=label<8
3. interpolate :
Mini 0.02
logTe 0.01
logg 0.01
4. calculate weight
5. add prior/weight
stage. The labels are:
0=PMS,
1=MS,
2=SGB,
3=RGB,
(4,5,6)=different stages of CHEB,
7=EAGB,
8=TPAGB.
"""
# %%
print(ig)
print(ig12)
# %%
ig12.get_iso(9.0, 0.0)
ig.get_iso(9.0, 0.0)["label"]
unify12(isoc)
# %%
# Mini
# Mass --> Mact
# isoc.rename_column("Mass", "Mact")
# Mass --> Mact
# define (minit, logt, feh) column names
x12 = ig12.get_iso(7.0, 0.0)
x = ig.get_iso(7.0, 0.0)
plot(isoc_interp(x, restrictions=(("Mini", 0.05),),
interp_config=(("label", "linear"),), M_ini="Mini"))
figure();
plot(x["Mini"][:-1], np.diff(x["Mini"]))
# %%
from ruby import IsoSet
IsoSet(isoc_list, vgrid_feh)
# %%
# get_one_isochrone(1e9, 0.0152, model="parsec12s",phot="gaia")
grid_feh = np.arange(-4., 1.2, 0.2)
grid_logt = np.arange(6., 10.5, 0.05)
vgrid_feh, vgrid_logt, grid_list, isoc_list = get_isochrone_grid(
grid_feh, grid_logt, model='parsec12s_r14', phot='gaiaDR2',
Zsun=0.0152, n_jobs=20, verbose=10)
np.sum([_.data.shape[0] for _ in isoc_list])
from ezpadova import parsec
print(parsec.get_one_isochrone(1e7, 0.02, model='parsec12s',
phot='gaia').colnames)
print(parsec.get_one_isochrone(1e7, 0.02, model='parsec12s_r14',
phot='gaia').colnames)
# %%
gdr1 = parsec.get_one_isochrone(1e9, 0.0152, model='parsec12s', phot='gaia')
gdr1_r14 = parsec.get_one_isochrone(1e9, 0.0152, model='parsec12s_r14',
phot='gaia')
gdr2 = parsec.get_one_isochrone(1e9, 0.0152, model='parsec12s', phot='gaiaDR2')
# %%
figure()
plot(gdr1["G"] - gdr1["G_RP"], gdr1["G"], '-')
scatter(gdr1["G"] - gdr1["G_RP"], gdr1["G"], s=10, c=gdr1["stage"],
cmap=cm.jet)
ind = gdr1_r14["label"] < 8
plot(gdr1_r14["Gmag"][ind] - gdr1_r14["G_RPmag"][ind], gdr1_r14["Gmag"][ind],
'-')
colorbar()
ylim(30, -30)
# %%
figure()
plot(gdr1["Gmag"] - gdr1["G_RPmag"], gdr1["Gmag"], '-')
plot(gdr2["Gmag"] - gdr2["G_RPmag"], gdr2["Gmag"], '-')
| StarcoderdataPython |
3332180 | from enum import Enum, auto
from autorepr import AutoRepr
from birdway import Unary, Type, Binary
class Token:
def __init__(self, line=None, **attributes):
self._line = line
for attr in dir(self):
if not attr.startswith("_"):
if attr in attributes:
setattr(self, attr, attributes[attr])
else:
raise TypeError(f"no value specified for {attr}")
def __eq__(self, other):
if type(self) is type(other):
for attr in dir(self):
if not attr.startswith("_"):
if getattr(self, attr) != getattr(other, attr):
return False
return True
else:
return False
for name in [
"KeywordMeta",
"KeywordArgs",
"KeywordParam",
"KeywordRun",
"KeywordIf",
"KeywordThen",
"KeywordElse",
"KeywordPrintln",
"KeywordOption",
"BlockBegin",
"BlockEnd",
"TableBegin",
"TableEnd",
"OpeningParens",
"ClosingParens",
"FormattedStringDelimiter",
"StringDelimiter",
"LineEnd",
"Association",
"Separator",
"Assignment",
"Return",
"FormattingExpressionBegin",
"KeywordStruct",
"KeywordEnum",
"KeywordFunc",
"KeywordFor",
"KeywordFrom",
"KeywordTo",
"KeywordDo",
"KeywordTry",
"KeywordOn",
"Range",
"KeywordUse",
"KeywordIn",
]:
exec(f"class {name} (Token, AutoRepr): pass")
del name
class StringContent(Token, AutoRepr):
value = str()
class Identifier(Token, AutoRepr):
name = str()
class Integer(Token, AutoRepr):
value = int()
class UnaryOperator(Token, AutoRepr):
operator = Unary(1)
class BinaryOperator(Token, AutoRepr):
operator = Binary(1)
class Variable(Token, AutoRepr):
name = str()
class TypeName(Token, AutoRepr):
type = Type(1)
| StarcoderdataPython |
3394838 | import keras
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.applications.densenet import DenseNet121
from keras.applications.densenet import DenseNet169
from keras.applications.densenet import DenseNet201
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.mobilenet import MobileNet
#from keras.applications.mobilenetv2 import MobileNetV2
from keras.layers import Activation
from keras.layers.normalization import BatchNormalization as BN
from keras.layers import Input
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
import numpy as np
def load_pretrained_model(args):
h=args.height
w=args.width
if (args.chan=="rgb"):
shape=(h,w,3)
else:
shape=(h,w,1)
print("Setting input shape for pretrained model to",shape)
input_tensor=Input(shape=shape)
if (args.model=="vgg16"):
load_model = VGG16(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="vgg19"):
load_model = VGG19(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="resnet50"):
load_model = ResNet50(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="densenet121"):
load_model = DenseNet121(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="densenet169"):
load_model = DenseNet169(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="densenet201"):
load_model = DenseNet201(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="inceptionv3"):
load_model = InceptionV3(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="inceptionresnetv2"):
load_model = InceptionResNetV2(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="mobilenet"):
load_model = MobileNet(input_tensor=input_tensor,weights='imagenet', include_top=False)
elif (args.model=="mobilenetv2"):
load_model = MobileNetV2(input_tensor=input_tensor,weights='imagenet', include_top=False)
return load_model
def pretrained_model(args,num_classes):
load_model=load_pretrained_model(args)
x=load_model.output
x=GlobalAveragePooling2D()(x)
for i in range(args.predlayers):
x=Dense(args.predsize)(x)
x=BN()(x)
x=Activation('relu')(x)
predictions = Dense(num_classes, activation='softmax')(x)
model=Model(inputs=load_model.input, outputs=predictions)
return load_model,model
| StarcoderdataPython |
1752802 | <filename>populus/utils/formatting.py
from eth_utils import (
to_bytes,
to_text,
is_bytes,
)
def is_prefixed(value, prefix):
return value.startswith(
to_bytes(prefix) if is_bytes(value) else to_text(text=prefix)
)
def is_dunderscore_prefixed(value):
return is_prefixed(value, '__')
def remove_dunderscore_prefix(value):
if is_dunderscore_prefixed(value):
return value[2:]
return value
| StarcoderdataPython |
3218729 | <reponame>dpfranke/qtt<filename>qtt/tests/test_zi_hdawg8.py
import unittest
from unittest.mock import MagicMock, call
from qtt.instrument_drivers.virtualAwg.awgs.ZurichInstrumentsHDAWG8 import ZurichInstrumentsHDAWG8
from qtt.instrument_drivers.virtualAwg.awgs.common import AwgCommonError
class TestZurichInstrumentsHDAWG8(unittest.TestCase):
def setUp(self):
class ZIHDAWG8(MagicMock):
pass
self.awg = ZIHDAWG8()
self.zi_hdawg8 = ZurichInstrumentsHDAWG8(self.awg, 0)
def test_enable_outputs(self):
self.zi_hdawg8.enable_outputs()
calls = [call.enable_channel(ch) for ch in range(0, 8)]
self.awg.assert_has_calls(calls)
with self.assertRaises(AwgCommonError):
self.zi_hdawg8.enable_outputs([0, 1, 2, 3, 8])
self.zi_hdawg8.enable_outputs([6, 7])
calls = [call.enable_channel(ch) for ch in range(6, 7)]
self.awg.assert_has_calls(calls)
def test_disable_outputs(self):
self.zi_hdawg8.disable_outputs()
calls = [call.disable_channel(ch) for ch in range(0, 8)]
self.awg.assert_has_calls(calls)
with self.assertRaises(AwgCommonError):
self.zi_hdawg8.disable_outputs([0, 1, 2, 3, 8])
self.zi_hdawg8.disable_outputs([6, 7])
calls = [call.disable_channel(ch) for ch in range(6, 7)]
self.awg.assert_has_calls(calls)
def test_change_setting(self):
self.awg.get.return_value = 0
self.zi_hdawg8.change_setting('sampling_rate', 2.4e9)
self.assertEqual(self.zi_hdawg8.retrieve_setting('sampling_rate'), 2.4e9)
with self.assertRaises(ValueError):
self.zi_hdawg8.change_setting('gain', 0.5)
def test_update_sampling_rate(self):
sample_rates = [2400000000.0, 1200000000.0, 600000000.0, 300000000.0, 150000000.0, 72000000.0, 37500000.0,
18750000.0, 9400000.0, 4500000.0, 2340000.0, 1200.0, 586000.0, 293000.0]
for sample_rate in sample_rates:
self.zi_hdawg8.update_sampling_rate(sample_rate)
calls = [call.set('awgs_0_time', i) for i in range(0, 14)]
self.awg.assert_has_calls(calls)
with self.assertRaises(ValueError):
self.zi_hdawg8.update_sampling_rate(99)
def test_retrieve_sampling_rate(self):
sampling_rate_index = 5
self.awg.get.return_value = sampling_rate_index
self.assertEqual(72e6, self.zi_hdawg8.retrieve_sampling_rate())
def test_update_gain(self):
self.zi_hdawg8.update_gain(0.5)
calls = [call.set('sigouts_{}_range'.format(ch), 0.5) for ch in range(8)]
self.awg.assert_has_calls(calls)
def test_retrieve_gain(self):
self.awg.get.return_value = 0.2
self.assertEqual(0.2, self.zi_hdawg8.retrieve_gain())
with self.assertRaises(ValueError):
self.awg.get.side_effect = lambda v: v
self.zi_hdawg8.retrieve_gain()
def test_upload_waveforms(self):
sequence_names = ['seq1', 'seq2', 'seq3']
sequence_channels = [(1, 1), (1, 0, 1), (2, 0)]
sequence_items = [range(10), range(1, 11), range(2, 12)]
self.awg.generate_csv_sequence_program.return_value = 'program'
self.zi_hdawg8.upload_waveforms(sequence_names, sequence_channels, sequence_items)
calls = [call.waveform_to_csv('seq1', range(10)),
call.waveform_to_csv('seq2', range(1, 11)),
call.waveform_to_csv('seq3', range(2, 12)),
call.generate_csv_sequence_program(sequence_names, [2, 2, 3]),
call.upload_sequence_program(0, 'program')]
self.awg.assert_has_calls(calls)
| StarcoderdataPython |
1636547 | <filename>py/test/pytests/countdown.py
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A count down monitor for better user interface in run-in tests.
Description
-----------
Count down and display system load. This is helpful for run-in phase to run
multiple stress tests (for example, CPU, memory, disk, GPU, ... etc) in
background so operator can see how long the run-in has been executed, and a
quick overview of system status. It also alarms if there's any abnormal status
(for example overheat) detected during run-in.
Test Procedure
--------------
This test is designed to run in parallel with other background tests.
No user interaction is needed but if there were abnormal events operator should
collect debug logs for fault analysis.
Dependency
----------
- Thermal in Device API (`cros.factory.device.thermal`) for system thermal
sensor readings.
Examples
--------
To run a set of tests for 120 seconds in parallel with countdown showing
progress, add this in test list::
{
"pytest_name": "countdown",
"args": {
"duration_secs": 120
}
}
To run 8 hours and alert if main sensor (CPU) reaches 60 Celcius and fail when
exceeding 65 Celcius::
{
"pytest_name": "countdown",
"args": {
"duration_secs": 28800,
"temp_criteria": [
["CPU", null, 60, 65]
]
}
}
"""
import collections
import logging
import os
import time
from cros.factory.device import device_utils
from cros.factory.test import event_log # TODO(chuntsen): Deprecate event log.
from cros.factory.test import session
from cros.factory.test import state
from cros.factory.test import test_case
from cros.factory.test import test_ui
from cros.factory.testlog import testlog
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import file_utils
from cros.factory.utils import time_utils
from cros.factory.utils import type_utils
from cros.factory.goofy.plugins import plugin_controller
_WARNING_TEMP_RATIO = 0.95
_CRITICAL_TEMP_RATIO = 0.98
Status = collections.namedtuple('Status',
['temperatures', 'fan_rpm', 'cpu_freq'])
class CountDownTest(test_case.TestCase):
"""A countdown test that monitors and logs various system status."""
ui_class = test_ui.UI
ARGS = [
Arg('duration_secs', int, 'Duration of time to countdown.'),
Arg('log_interval', int,
'Interval of time in seconds to log system status.', default=120),
Arg('ui_update_interval', int,
'Interval of time in seconds to update system status on UI.',
default=10),
Arg('grace_secs', int,
'Grace period before starting abnormal status detection.',
default=120),
Arg(
'temp_max_delta', int,
'Allowed difference between current and last temperature of a '
'sensor.', default=None),
Arg(
'temp_criteria', list,
'A list of rules to check that temperature is under the given range, '
'rule format: (name, temp_sensor, warning_temp, critical_temp)',
default=[]),
Arg(
'relative_temp_criteria', list,
'A list of rules to check the difference between two temp sensors, '
'rule format: (relation, first_sensor, second_sensor, max_diff). '
'relation is a text output with warning messages to describe the two '
'temp sensors in the rule', default=[]),
Arg('fan_min_expected_rpm', int, 'Minimum fan rpm expected',
default=None),
Arg(
'allow_invalid_temp', bool,
'Allow invalid temperature e.g. values less then or equal to zero, '
'which may mean thermal nodes are not ready in early builds.',
default=False),
Arg('cpu_min_expected_freq', int,
'Minimum CPU frequency expected. (unit: MHz)', default=None),
Arg('cpu_max_expected_freq', int,
'Maximum CPU frequency expected. (unit: MHz)', default=None)
]
def FormatSeconds(self, secs):
hours = int(secs / 3600)
minutes = int((secs / 60) % 60)
seconds = int(secs % 60)
return '%02d:%02d:%02d' % (hours, minutes, seconds)
def UpdateTimeAndLoad(self):
self.ui.SetHTML(
self.FormatSeconds(self._elapsed_secs),
id='cd-elapsed-time')
self.ui.SetHTML(
self.FormatSeconds(self.args.duration_secs - self._elapsed_secs),
id='cd-remaining-time')
self.ui.SetHTML(' '.join(open('/proc/loadavg').read().split()[0:3]),
id='cd-system-load')
def UpdateUILog(self, sys_status):
# Simplify thermal output by the order of self._sensors
log_items = [
time_utils.TimeString(),
'Temperatures: %s' %
[sys_status.temperatures[sensor] for sensor in self._sensors],
'Fan RPM: %s' % sys_status.fan_rpm,
'CPU frequency (MHz): %s' % sys_status.cpu_freq
]
log_str = '. '.join(log_items)
self._verbose_log.write(log_str + os.linesep)
self._verbose_log.flush()
self.ui.AppendHTML(
'<div>%s</div>' % test_ui.Escape(log_str),
id='cd-log-panel',
autoscroll=True)
self.ui.RunJS('const panel = document.getElementById("cd-log-panel");'
'if (panel.childNodes.length > 512)'
' panel.removeChild(panel.firstChild);')
def UpdateLegend(self, sensor_names):
for i, sensor in enumerate(sensor_names):
self.ui.AppendHTML(
'<div class="cd-legend-item">[%d] %s</div>' % (i, sensor),
id='cd-legend-item-panel')
if sensor_names:
self.ui.ToggleClass('cd-legend-panel', 'hidden', False)
def DetectAbnormalStatus(self, status, last_status):
def GetTemperature(sensor):
try:
if sensor is None:
sensor = self._main_sensor
return status.temperatures[sensor]
except IndexError:
return None
warnings = []
if self.args.temp_max_delta:
if len(status.temperatures) != len(last_status.temperatures):
warnings.append(
'Number of temperature sensors differ (current: %d, last: %d) ' %
(len(status.temperatures), len(last_status.temperatures)))
for sensor in status.temperatures:
current = status.temperatures[sensor]
last = last_status.temperatures[sensor]
# Ignore the case when both are None since it could just mean the
# sensor doesn't exist. If only one of them is None, then there
# is a problem.
if last is None and current is None:
continue
if last is None or current is None:
warnings.append(
'Cannot read temperature sensor %s (current: %r, last: %r)' %
(sensor, current, last))
elif abs(current - last) > self.args.temp_max_delta:
warnings.append(
'Temperature sensor %s delta over %d (current: %d, last: %d)' %
(sensor, self.args.temp_max_delta, current, last))
for name, sensor, warning_temp, critical_temp in self.args.temp_criteria:
temp = GetTemperature(sensor)
if temp is None:
warnings.append('%s temperature unavailable' % name)
continue
if warning_temp is None or critical_temp is None:
try:
sys_temp = self._dut.thermal.GetCriticalTemperature(sensor)
except NotImplementedError:
raise type_utils.TestFailure(
'Failed to get the critical temperature of %r, please explicitly '
'specify the value in the test arguments.' % name)
if warning_temp is None:
warning_temp = sys_temp * _WARNING_TEMP_RATIO
if critical_temp is None:
critical_temp = sys_temp * _CRITICAL_TEMP_RATIO
if temp >= critical_temp:
warnings.append(
'%s over critical temperature (now: %.1f, critical: %.1f)' % (
name, temp, critical_temp))
elif temp >= warning_temp:
warnings.append(
'%s over warning temperature (now: %.1f, warning: %.1f)' %
(name, temp, warning_temp))
for (relation, first_sensor, second_sensor,
max_diff) in self.args.relative_temp_criteria:
first_temp = GetTemperature(first_sensor)
second_temp = GetTemperature(second_sensor)
if first_temp is None or second_temp is None:
unavailable_sensor = []
if first_temp is None:
unavailable_sensor.append(first_sensor)
if second_temp is None:
unavailable_sensor.append(second_sensor)
warnings.append(
'Cannot measure temperature difference between %s: '
'temperature %s unavailable' %
(relation, ', '.join(unavailable_sensor)))
elif abs(first_temp - second_temp) > max_diff:
warnings.append('Temperature difference between %s over %d '
'(first: %d, second: %d)' %
(relation, max_diff, first_temp, second_temp))
if self.args.fan_min_expected_rpm:
for i, fan_rpm in enumerate(status.fan_rpm):
if fan_rpm < self.args.fan_min_expected_rpm:
warnings.append('Fan %d rpm %d less than min expected %d' %
(i, fan_rpm, self.args.fan_min_expected_rpm))
if self.args.cpu_min_expected_freq:
for cpu_freq in status.cpu_freq:
if cpu_freq < self.args.cpu_min_expected_freq:
warnings.append('CPU frequency %f MHz less than expected %d MHz' %
(cpu_freq, self.args.cpu_min_expected_freq))
if self.args.cpu_max_expected_freq:
for cpu_freq in status.cpu_freq:
if cpu_freq > self.args.cpu_max_expected_freq:
warnings.append('CPU frequency %f MHz larger than expected %d MHz' %
(cpu_freq, self.args.cpu_max_expected_freq))
if not self.args.allow_invalid_temp:
for sensor, temp in status.temperatures.items():
if temp <= 0:
warnings.append('Thermal zone %s reports abnormal temperature %d'
% (sensor, temp))
in_grace_period = self._elapsed_secs < self.args.grace_secs
if warnings:
event_log.Log('warnings', elapsed_secs=self._elapsed_secs,
in_grace_period=in_grace_period, warnings=warnings)
if not in_grace_period:
for w in warnings:
session.console.warn(w)
with self._group_checker:
testlog.CheckNumericParam('elapsed', self._elapsed_secs,
max=self.args.grace_secs)
testlog.LogParam('temperatures', status.temperatures)
testlog.LogParam('fan_rpm', status.fan_rpm)
testlog.LogParam('cpu_freq', status.cpu_freq)
testlog.LogParam('warnings', warnings)
def SnapshotStatus(self):
return Status(self._dut.thermal.GetAllTemperatures(),
self._dut.fan.GetFanRPM(),
self._cpu_freq_manager.GetCurrentFrequency())
def setUp(self):
self._dut = device_utils.CreateDUTInterface()
self._main_sensor = self._dut.thermal.GetMainSensorName()
# Normalize the sensors so main sensor is always the first one.
sensors = sorted(self._dut.thermal.GetAllSensorNames())
sensors.insert(0, sensors.pop(sensors.index(self._main_sensor)))
self._sensors = sensors
self._cpu_freq_manager = plugin_controller.GetPluginRPCProxy(
'cpu_freq_manager')
# Group checker for Testlog.
self._group_checker = testlog.GroupParam(
'system_status',
['elapsed', 'temperatures', 'fan_rpm', 'cpu_freq', 'warnings'])
testlog.UpdateParam('elapsed', description='In grace period or not')
self._start_secs = time.time()
self._elapsed_secs = 0
self._next_log_time = 0
self._next_ui_update_time = 0
self._verbose_log = None
self.goofy = state.GetInstance()
def runTest(self):
verbose_log_path = session.GetVerboseTestLogPath()
file_utils.TryMakeDirs(os.path.dirname(verbose_log_path))
logging.info('Raw verbose logs saved in %s', verbose_log_path)
self._verbose_log = open(verbose_log_path, 'a')
last_status = self.SnapshotStatus()
self.UpdateLegend(self._sensors)
# Loop until count-down ends.
while self._elapsed_secs < self.args.duration_secs:
self.UpdateTimeAndLoad()
current_time = time.time()
if (current_time >= self._next_log_time or
current_time >= self._next_ui_update_time):
sys_status = self.SnapshotStatus()
if current_time >= self._next_log_time:
event_log.Log('system_status', elapsed_secs=self._elapsed_secs,
**sys_status._asdict())
self.DetectAbnormalStatus(sys_status, last_status)
last_status = sys_status
self._next_log_time = current_time + self.args.log_interval
if current_time >= self._next_ui_update_time:
self.UpdateUILog(sys_status)
self._next_ui_update_time = current_time + self.args.ui_update_interval
self.Sleep(1)
self._elapsed_secs = time.time() - self._start_secs
self._verbose_log.close()
self.goofy.WaitForWebSocketUp()
| StarcoderdataPython |
3221542 | import constant
import os
from time import gmtime, strftime
import getpass
import socket
from lazagne.config.header import Header
from lazagne.config.color import bcolors
from lazagne.config.constant import constant
import logging
import json
# --------------------------- Functions used to write ---------------------------
def write_header():
time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
header = '''|====================================================================|
| |
| Credentsials discovery |
| |
| ! BANG BANG ! |
| |
|====================================================================|\n
- Date: ''' + time + '''
- Username: ''' + getpass.getuser() + '''
- Hostname: ''' + socket.gethostname() + ''' \n
------------------------------ Results ------------------------------\n'''
open(constant.folder_name + os.sep + 'credentials.txt',"a+b").write(header)
def write_footer():
footer = '\n[+] %s passwords have been found.\n\n' % str(constant.nbPasswordFound)
open(constant.folder_name + os.sep + 'credentials.txt',"a+b").write(footer)
def write_credentials(pwdFound, category):
tmp = "############ %s passwords ############\r\n\r\n" % category
for pwd in pwdFound:
for p in pwd.keys():
tmp = str(tmp) + str(p) + ": " + str(pwd[p]) + "\r\n"
tmp = str(tmp) + "\r\n"
open(constant.folder_name + os.sep + 'credentials.txt',"a+b").write(tmp)
def checks_write(values, category):
if values:
if "Passwords" not in constant.finalResults:
constant.finalResults["Passwords"] = []
constant.finalResults["Passwords"].append([{"Category": category}, values])
# --------------------------- End of functions used to write ---------------------------
# --------------------------- Output functions ---------------------------
def print_footer():
footer = '\n[+] %s passwords have been found.\n' % str(constant.nbPasswordFound)
if logging.getLogger().isEnabledFor(logging.INFO) == False:
footer += 'For more information launch it again with the -v option\n'
print footer
# print output if passwords have been found
def print_output(software_name, pwdFound):
if pwdFound:
# if the debug logging level is not apply => print the title
if logging.getLogger().isEnabledFor(logging.INFO) == False:
Header().title(software_name)
toWrite = []
for pwd in pwdFound:
password_category = False
# detect which kinds of password has been found
lower_list = [s.lower() for s in pwd.keys()]
password = [s for s in lower_list if "password" in s]
if password:
password_category = password
else:
key = [s for s in lower_list if "key" in s] # for the wifi
if key:
password_category = key
else:
hash = [s for s in lower_list if "hash" in s]
if hash:
password_category = hash
else:
cmd = [s for s in lower_list if "cmd" in s]
if cmd:
password_category = cmd
# No password found
if not password_category:
print_debug("FAILED", "Password not found !!!")
else:
print_debug("OK", '%s found !!!' % password_category[0].title())
toWrite.append(pwd)
# Store all passwords found on a table => for dictionary attack if master password set
constant.nbPasswordFound += 1
try:
constant.passwordFound.append(pwd[password_category[0]])
except:
pass
for p in pwd.keys():
print '%s: %s' % (p, pwd[p])
print
# write credentials into a text file
checks_write(toWrite, software_name)
else:
logging.info("[!] No passwords found\n")
def print_debug(error_level, message):
b = bcolors()
# print when password is found
if error_level == 'OK':
print b.OK + message + b.ENDC
# print when password is not found
elif error_level == 'FAILED':
print b.FAIL + message + b.ENDC
# print messages depending of their criticism
elif error_level == 'CRITICAL':
logging.error(b.FAIL + '[CRITICAL] ' + message + '\n' + b.ENDC)
elif error_level == 'ERROR':
logging.error(b.FAIL + '[ERROR] ' + message + '\n' + b.ENDC)
elif error_level == 'WARNING':
logging.warning(b.WARNING + message + '\n' + b.ENDC)
elif error_level == 'DEBUG':
logging.debug(message + '\n')
elif error_level == 'INFO':
logging.info(message + '\n')
else:
logging.info('[%s] %s' % (error_level, message))
# --------------------------- End of output functions ---------------------------
def parseJsonResultToBuffer(jsonString, color=False):
green = ''
reset = ''
title = ''
if color:
b = bcolors()
green = b.OK
title = b.TITLE
reset = b.ENDC
buffer = ''
try:
for json in jsonString:
if json:
if 'Passwords' not in json:
buffer += 'No passwords found for this user !'
else:
for all_passwords in json['Passwords']:
buffer += '{title_color}------------------- {password_category} -----------------{reset_color}\r\n'.format(title_color=title, password_category=all_passwords[0]['Category'], reset_color=reset)
for password_by_category in all_passwords[1]:
buffer += '\r\n{green_color}Password found !!!{reset_color}\r\n'.format(green_color=green, reset_color=reset)
constant.nbPasswordFound += 1
for dic in password_by_category.keys():
try:
buffer += '%s: %s\r\n' % (dic, password_by_category[dic].encode('utf-8'))
except:
buffer += '%s: %s\r\n' % (dic, password_by_category[dic].encode(encoding='utf-8',errors='replace'))
buffer += '\r\n'
except Exception as e:
print_debug('ERROR', 'Error parsing the json results: %s' % e)
print_debug('ERROR', 'json content: %s' % jsonString)
return buffer
| StarcoderdataPython |
3216394 | <gh_stars>100-1000
# encoding: utf8
from __future__ import unicode_literals
from unittest import TestCase
from alex.applications.PublicTransportInfoCS.hdc_slu import PTICSHDCSLU
from alex.applications.PublicTransportInfoCS.preprocessing import PTICSSLUPreprocessing
from alex.components.asr.utterance import Utterance, UtteranceNBList
from alex.components.slu.base import CategoryLabelDatabase
from alex.components.slu.da import DialogueAct, DialogueActItem
from alex.utils.config import as_project_path
class TestPTICSHDCSLU(TestCase):
def test_parse_with_mutliple_date_rel(self):
asr_hyp = UtteranceNBList()
asr_hyp.add(0.1, Utterance("CHTEL BYCH ZITRA ZITRA JET"))
cn = self.slu.parse(asr_hyp)
self.assert_(DialogueActItem(dai="inform(date_rel=tomorrow)") in cn)
def test_parse_meta(self):
utterances_to_understand = [
(u"ahoj", "hello()", ),
(u"sbohem čau", "bye()", ),
(u"jiné", "reqalts()", ),
(u"začneme znovu", "restart()", ),
(u"zopakuj", "repeat()", ),
(u"promiň", "apology()", ),
(u"co se zeptat", "help()", ),
(u"haló", "canthearyou()", ),
(u"nerozuměl jsem", "notunderstood()", ),
(u"ano jo", "affirm()", ),
(u"ne ano nechci", "negate()", ),
(u"děkuji", "thankyou()", ),
(u"dobře", "ack()", ),
(u"chci jet", "inform(task=find_connection)", ),
(u"jak bude", "inform(task=weather)", ),
(u"nástupiště", "inform(task=find_platform)", ),
(u"z jaké jede", "request(from_stop)", ),
(u"kam to jede", "request(to_stop)", ),
(u"kdy to jede", "request(departure_time)", ),
(u"za jak dlouho", "request(departure_time_rel)", ),
(u"kdy tam budem", "request(arrival_time)", ),
(u"za jak dlouho tam přijedu", "request(arrival_time_rel)", ),
(u"jak dlouho bude trvat cesta", "request(duration)", ),
(u"kolik je hodin", "request(current_time)", ),
(u"jak dlouho trvá přestup", "request(time_transfers)", ),
(u"kolik přestupů", "request(num_transfers)", ),
(u"nechci přestup bez jet přímo", "inform(num_transfers=0)", ),
(u"jeden přestup", "inform(num_transfers=1)", ),
(u"dva přestupy", "inform(num_transfers=2)", ),
(u"tři přestupy", "inform(num_transfers=3)", ),
(u"čtyři přestupy", "inform(num_transfers=4)", ),
(u"libovolně přestupů", "inform(num_transfers=dontcare)", ),
(u"jet přímo", "inform(num_transfers=0)", ),
(u"alternativa libovolný", "inform(alternative=dontcare)", ),
(u"alternativa první", "inform(alternative=1)", ),
(u"alternativa druhá", "inform(alternative=2)", ),
(u"alternativa třetí", "inform(alternative=3)", ),
(u"alternativa čtvrtá", "inform(alternative=4)", ),
(u"alternativa páté", "inform(alternative=5)", ),
(u"předchozí spoj", "inform(alternative=prev)", ),
(u"nechci předchozí spoj", "deny(alternative=prev)", ),
(u"poslední spoj", "inform(alternative=last)", ),
(u"nechci poslední spoj", "deny(alternative=last)", ),
(u"další spoj", "inform(alternative=next)", ),
(u"další", "inform(alternative=next)", ),
(u"předchozí", "inform(alternative=prev)", ),
(u"jako ve dne", "inform(ampm=pm)", ),
]
for utt, res in utterances_to_understand:
asr_hyp = UtteranceNBList()
asr_hyp.add(0.79, Utterance(utt))
cn = self.slu.parse(asr_hyp)
self.assertIn(DialogueActItem(dai=res), cn)
@classmethod
def setUpClass(cls):
cfg = {
'SLU': {
'debug': True,
'type': PTICSHDCSLU,
PTICSHDCSLU: {
'preprocessing_cls': PTICSSLUPreprocessing
},
},
}
slu_type = cfg['SLU']['type']
cldb = CategoryLabelDatabase()
class db:
database = {
"task": {
"find_connection": ["najít spojení", "najít spoj", "zjistit spojení",
"zjistit spoj", "hledám spojení", 'spojení', 'spoj',
],
"find_platform": ["najít nástupiště", "zjistit nástupiště", ],
'weather': ['pocasi', ],
},
"number": {
"1": ["jednu"]
},
"time": {
"now": ["nyní", "teď", "teďka", "hned", "nejbližší", "v tuto chvíli", "co nejdřív"],
"18": ["osmnáct", "osmnact"]
},
"date_rel": {
"tomorrow": ["zítra", "zitra"],
}
}
cldb.load(db_mod=db)
preprocessing = cfg['SLU'][slu_type]['preprocessing_cls'](cldb)
cls.slu = slu_type(preprocessing, cfg)
| StarcoderdataPython |
3227407 | # coding: spec
from _pytest.pytester import Testdir as TD, LineMatcher
from contextlib import contextmanager
from textwrap import dedent
import subprocess
import tempfile
import asyncio
import socket
import signal
import pytest
import shutil
import sys
import py
import os
this_dir = os.path.dirname(__file__)
@contextmanager
def listening():
filename = None
try:
with tempfile.NamedTemporaryFile(delete=False) as fle:
filename = fle.name
fle.close()
os.remove(fle.name)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.settimeout(2)
s.bind(fle.name)
s.listen(1)
yield s, fle.name
s.close()
finally:
if os.path.exists(fle.name):
os.remove(fle.name)
def example_dir_factory(tmpdir_factory, name):
path = os.path.join(this_dir, name)
assert os.path.isdir(path)
expected_file = os.path.join(this_dir, name, "expected")
assert os.path.isfile(expected_file)
with open(expected_file, "r") as fle:
expected = fle.read().strip()
directory = tmpdir_factory.mktemp(name)
shutil.rmtree(directory)
shutil.copytree(path, directory)
class Factory:
@property
def expected(s):
return expected
def mktemp(s, p, **kwargs):
if p.startswith("tmp-"):
return tmpdir_factory.mktemp(p)
else:
return directory
return Factory()
@pytest.mark.parametrize(
"name", [name for name in os.listdir(this_dir) if name.startswith("example_")]
)
async it "shows correctly for failing fixtures", name, request, tmpdir_factory:
factory = example_dir_factory(tmpdir_factory, name)
testdir = TD(request, factory)
expected = factory.expected
result = testdir.runpytest("--tb", "short")
assert not result.errlines
lines = 0
for line in result.outlines:
if line.startswith("=") and isinstance(lines, int):
if lines < 1:
lines += 1
else:
lines = []
if isinstance(lines, list):
lines.append(line)
matcher = LineMatcher(lines)
matcher.fnmatch_lines(expected.split("\n"))
@pytest.mark.async_timeout(4)
async it "cleans up tests properly on interrupt":
directory = os.path.join(this_dir, "interrupt_test")
expected_file = os.path.join(directory, "expected")
assert os.path.isfile(expected_file)
with open(expected_file, "r") as fle:
expected = fle.read().strip()
p = await asyncio.create_subprocess_exec(
shutil.which("pytest"), cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
await asyncio.sleep(2)
p.send_signal(signal.SIGINT)
await p.wait()
got = (await p.stdout.read()).decode().strip().split("\n")
while got and not got[0].startswith("collected"):
got.pop(0)
want = expected.strip().split("\n")
if len(got) != len(want):
print("\n".join(got))
assert False, "expected different number of lines in output"
matcher = LineMatcher(got)
matcher.fnmatch_lines(want)
| StarcoderdataPython |
106300 | # -*- coding: utf-8 -*-
"""Test user registration endpoint."""
from flask import url_for
from servicedesk.user.models import User
from tests.factories import UserFactory
def test_can_register(user, testapp):
"""Register a new user."""
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form["username"] = "foobar"
form["email"] = "<EMAIL>"
form["password"] = "<PASSWORD>"
form["confirm"] = "<PASSWORD>"
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(user, testapp):
"""Show error if passwords don't match."""
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form["username"] = "foobar"
form["email"] = "<EMAIL>"
form["password"] = "<PASSWORD>"
form["confirm"] = "secrets"
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(user, testapp):
"""Show error if user already registered."""
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form["username"] = user.username
form["email"] = "<EMAIL>"
form["password"] = "<PASSWORD>"
form["confirm"] = "secret"
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
| StarcoderdataPython |
3203095 | from itertools import combinations
from fcapsy.decorators import metadata
from fcapsy import Concept, Context
@metadata(name='RiceSiffConcepts', short_name='RSConcepts')
def concept_subset(context: Context, similarity_measure) -> list:
"""
Experimental implementation of
Rice, <NAME>., and <NAME>. "Clusters, concepts, and pseudometrics."
Electronic Notes in Theoretical Computer Science 40 (2001): 323-346.
"""
init_intent = context.Attributes.supremum
init_extent = context.down(init_intent)
init_concept = Concept(init_extent, init_intent)
atoms = context.Objects.supremum.atoms()
# init worklist with all atoms
worklist = {Concept.from_intent(
context.up(extent), context) for extent in atoms}
# init resulting concepts with init_concept and worklist
concepts = set(worklist)
concepts.add(init_concept)
while len(worklist) > 1:
# create all possible pairs of different concepts from worklist
concept_combinations = tuple(combinations(worklist, 2))
# calculate all distances
distances = [1 - similarity_measure(
concepts[0].intent, concepts[1].intent) for concepts in concept_combinations]
# select minimal distance from all distances
min_distance = min(distances)
# get all possible pairs of concepts with minimal distance
concept_pairs_min_distance = {concept_tuple for concept_tuple, distance in zip(
concept_combinations, distances) if distance == min_distance}
# flatten pairs and transform them to set
concepts_from_pairs = {
concept for concept_pair in concept_pairs_min_distance for concept in concept_pair}
# calculate new concepts and add them to worklist and result concepts
for concept_tuple in concept_pairs_min_distance:
extent = concept_tuple[0].extent | concept_tuple[1].extent
new_intent = context.up(extent)
new_extent = context.down(new_intent)
new_concept = Concept(new_extent, new_intent)
worklist.add(new_concept)
concepts.add(new_concept)
# remove already processed concepts
worklist = worklist.difference(concepts_from_pairs)
return concepts
| StarcoderdataPython |
3367146 | <reponame>kiv-box/redis<filename>ansible/lib/ansible/modules/core/network/junos/junos_config.py<gh_stars>0
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: junos_config
version_added: "2.1"
author: "<NAME> (@privateip)"
short_description: Manage configuration on devices running Juniper JUNOS
description:
- This module provides an implementation for working with the active
configuration running on Juniper JUNOS devices. It provides a set
of arguments for loading configuration, performing rollback operations
and zeroing the active configuration on the device.
extends_documentation_fragment: junos
options:
lines:
description:
- This argument takes a list of C(set) or C(delete) configuration
lines to push into the remote device. Each line must start with
either C(set) or C(delete). This argument is mutually exclusive
with the I(src) argument.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) argument.
required: false
default: null
version_added: "2.2"
src_format:
description:
- The I(src_format) argument specifies the format of the configuration
found int I(src). If the I(src_format) argument is not provided,
the module will attempt to determine the format of the configuration
file specified in I(src).
required: false
default: null
choices: ['xml', 'set', 'text', 'json']
version_added: "2.2"
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
current configuration to the identifier specified in the
argument. If the specified rollback identifier does not
exist on the remote device, the module will fail. To rollback
to the most recent commit, set the C(rollback) argument to 0.
required: false
default: null
zeroize:
description:
- The C(zeroize) argument is used to completely sanitize the
remote device configuration back to initial defaults. This
argument will effectively remove all current configuration
statements on the remote device.
required: false
default: null
confirm:
description:
- The C(confirm) argument will configure a time out value for
the commit to be confirmed before it is automatically
rolled back. If the C(confirm) argument is set to False, this
argument is silently ignored. If the value for this argument
is set to 0, the commit is confirmed immediately.
required: false
default: 0
comment:
description:
- The C(comment) argument specifies a text string to be used
when committing the configuration. If the C(confirm) argument
is set to False, this argument is silently ignored.
required: false
default: configured by junos_config
replace:
description:
- The C(replace) argument will instruct the remote device to
replace the current configuration hierarchy with the one specified
in the corresponding hierarchy of the source configuration loaded
from this module.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the I(update) argument to C(replace). This argument
will be removed in a future release.
required: false
choices: ['yes', 'no']
default: false
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
netconf:
host: "{{ inventory_hostname }}"
username: ansible
password: <PASSWORD>
- name: load configure file into device
junos_config:
src: srx.cfg
comment: update config
provider: "{{ netconf }}"
- name: rollback the configuration to id 10
junos_config:
rollback: 10
provider: "{{ netconf }}"
- name: zero out the current configuration
junos_config:
zeroize: yes
provider: "{{ netconf }}"
- name: confirm a previous commit
junos_config:
provider: "{{ netconf }}"
"""
RETURN = """
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34
"""
import json
from xml.etree import ElementTree
import ansible.module_utils.junos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig
DEFAULT_COMMENT = 'configured by junos_config'
def guess_format(config):
try:
json.loads(config)
return 'json'
except ValueError:
pass
try:
ElementTree.fromstring(config)
return 'xml'
except ElementTree.ParseError:
pass
if config.startswith('set') or config.startswith('delete'):
return 'set'
return 'text'
def config_to_commands(config):
set_format = config.startswith('set') or config.startswith('delete')
candidate = NetworkConfig(indent=4, contents=config, device_os='junos')
if not set_format:
candidate = [c.line for c in candidate.items]
commands = list()
# this filters out less specific lines
for item in candidate:
for index, entry in enumerate(commands):
if item.startswith(entry):
del commands[index]
break
commands.append(item)
else:
commands = str(candidate).split('\n')
return commands
def diff_commands(commands, config):
config = [unicode(c).replace("'", '') for c in config]
updates = list()
visited = set()
for item in commands:
if len(item) > 0:
if not item.startswith('set') and not item.startswith('delete'):
raise ValueError('line must start with either `set` or `delete`')
elif item.startswith('set') and item[4:] not in config:
updates.append(item)
elif item.startswith('delete'):
for entry in config:
if entry.startswith(item[7:]) and item not in visited:
updates.append(item)
visited.add(item)
return updates
def load_config(module, result):
candidate = module.params['lines'] or module.params['src']
if isinstance(candidate, basestring):
candidate = candidate.split('\n')
kwargs = dict()
kwargs['comment'] = module.params['comment']
kwargs['confirm'] = module.params['confirm']
kwargs['replace'] = module.params['replace']
kwargs['commit'] = not module.check_mode
if module.params['src']:
config_format = module.params['src_format'] or guess_format(candidate)
elif module.params['lines']:
config_format = 'set'
kwargs['config_format'] = config_format
# this is done to filter out `delete ...` statements which map to
# nothing in the config as that will cause an exception to be raised
if config_format == 'set':
config = module.config.get_config()
config = config_to_commands(config)
candidate = diff_commands(candidate, config)
diff = module.config.load_config(candidate, **kwargs)
if diff:
result['changed'] = True
result['diff'] = dict(prepared=diff)
def rollback_config(module, result):
rollback = module.params['rollback']
kwargs = dict(comment=module.param['comment'],
commit=not module.check_mode)
diff = module.connection.rollback_config(rollback, **kwargs)
if diff:
result['changed'] = True
result['diff'] = dict(prepared=diff)
def zeroize_config(module, result):
if not module.check_mode:
module.cli.run_commands('request system zeroize')
result['changed'] = True
def confirm_config(module, result):
checkonly = module.check_mode
result['changed'] = module.connection.confirm_commit(checkonly)
def run(module, result):
if module.params['rollback']:
return rollback_config(module, result)
elif module.params['zeroize']:
return zeroize_config(module, result)
elif not any((module.params['src'], module.params['lines'])):
return confirm_config(module, result)
else:
return load_config(module, result)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
lines=dict(type='list'),
src=dict(type='path'),
src_format=dict(choices=['xml', 'text', 'set', 'json']),
# update operations
replace=dict(default=False, type='bool'),
confirm=dict(default=0, type='int'),
comment=dict(default=DEFAULT_COMMENT),
# config operations
backup=dict(type='bool', default=False),
rollback=dict(type='int'),
zeroize=dict(default=False, type='bool'),
transport=dict(default='netconf', choices=['netconf'])
)
mutually_exclusive = [('lines', 'rollback'), ('lines', 'zeroize'),
('rollback', 'zeroize'), ('lines', 'src'),
('src', 'zeroize'), ('src', 'rollback')]
required_if = [('replace', True, ['src'])]
module = NetworkModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
result = dict(changed=False)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1609723 | import fitbit
from django.contrib.auth.models import Group
from django.db import Error
from django.db.models import Q
from requests import get
from my_life_rest_api.settings import ML_URL
from .constants import *
from .models import *
from .serializers import *
from .utils import *
def add_user(data, is_admin=False):
email = data.get("email")
first_name = data.get("first_name")
last_name = data.get("last_name")
password = data.get("password")
if not is_admin:
birth_date = data.get("birth_date")
# treat nullable fields
phone_number = data.get("phone_number") if "phone_number" in data else None
photo = data.get("photo") if "photo" in data else DEFAULT_USER_IMAGE
if User.objects.filter(username=email).exists():
error_message = "Email already taken. User was not added to the db."
return False, error_message
try:
if is_admin:
# create a user
user = User.objects.create_user(username=email, email=email, first_name=first_name, last_name=last_name,
password=password)
else:
# create a user
auth_user = User.objects.create_user(username=email, email=email, first_name=first_name,
last_name=last_name, password=password)
# create custom user
user = CustomUser.objects.create(auth_user=auth_user, phone_number=phone_number, photo=photo,
birth_date=birth_date)
except Error:
error_message = "Error while creating new user!"
return False, error_message
return True, user
def update_user(data, auth_user, user=None):
if "email" in data:
email = data.get("email")
auth_user.update(email=email)
auth_user.update(username=email)
if "first_name" in data:
first_name = data.get("first_name")
auth_user.update(first_name=first_name)
if "last_name" in data:
last_name = data.get("last_name")
auth_user.update(last_name=last_name)
if "password" in data:
pwd_user = User.objects.get(username=auth_user[0].username)
pwd_user.set_password(data.get("password"))
pwd_user.save()
if "phone_number" in data and user is not None:
phone_number = data.get("phone_number")
user.update(phone_number=phone_number)
if "photo" in data and user is not None:
photo = data.get("photo")
user.update(photo=photo)
if "birth_date" in data and user is not None:
birth_date = data.get("birth_date")
user.update(birth_date=birth_date)
def delete_user(user):
try:
user.delete()
state, message = True, "User successfully deleted"
except Error:
state, message = False, "Error while deleting user"
return state, message
def add_admin(data):
hospital = data.get("hospital")
state, content = add_user(data, is_admin=True)
if not state:
return state, content
user = content
try:
# link the user to an admin
HospitalAdmin.objects.create(auth_user=user, hospital=hospital)
except Exception:
user.delete()
error_message = "Error while creating new admin!"
return False, error_message
try:
admins_group, created = Group.objects.get_or_create(name="admins_group")
admins_group.user_set.add(user)
except Exception:
user.delete()
error_message = "Error while creating new admin!"
return False, error_message
state_message = "Admin registered successfully!"
return True, state_message
def update_admin(request, username):
data = request.data
state = True
message = "Admin successfully updated!"
admin = HospitalAdmin.objects.filter(auth_user__username=username)
if not admin.exists():
state, message = False, "User does not exist or user is not a admin!"
return state, message
try:
auth_user = User.objects.filter(username=username)
update_user(data, auth_user)
except Exception:
state, message = False, "Error while updating admin!"
return state, message
def get_admin(username):
admin = HospitalAdmin.objects.filter(auth_user__username=username)
if not admin.exists():
state, message = False, "User does not exist or user is not a admin!"
return state, message
state, message = True, AdminSerializer(admin[0]).data
return state, message
def add_client(data):
height = data.get("height")
weight_goal = data.get("weight_goal")
current_weight = data.get("current_weight")
sex = data.get("sex")
is_diabetic = data.get("is_diabetic", False)
has_high_colesterol = data.get("has_high_colesterol", False)
state, content = add_user(data)
if not state:
return state, content
custom_user = content
user = custom_user.auth_user
try:
# link the user to a client
Client.objects.create(user=custom_user, height=height, current_weight=current_weight, weight_goal=weight_goal,
sex=sex,
is_diabetic=is_diabetic, has_high_colesterol=has_high_colesterol)
except Exception:
user.delete()
error_message = "Error while creating new client!"
return False, error_message
# check if the client group exists, else create it
# finally add client to group
try:
clients_group, created = Group.objects.get_or_create(name="clients_group")
clients_group.user_set.add(user)
except Exception:
user.delete()
error_message = "Error while creating new client!"
return False, error_message
state_message = "Client was registered successfully!"
return True, state_message
def update_client(request, email):
data = request.data
state = True
message = "Client successfully updated!"
client = Client.objects.filter(user__auth_user__username=email)
if not client.exists():
state, message = False, "User does not exist or user is not a client!"
return state, message
try:
auth_user = User.objects.filter(username=email)
user = CustomUser.objects.filter(auth_user=auth_user[0])
update_user(data, auth_user, user)
if "height" in data:
height = data.get("height")
client.update(height=height)
if "current_weight" in data:
current_weight = data.get("current_weight")
client.update(current_weight=current_weight)
if "weight_goal" in data:
weight_goal = data.get("weight_goal")
client.update(weight_goal=weight_goal)
if "sex" in data:
sex = data.get("sex")
client.update(sex=sex)
if "is_diabetic" in data:
is_diabetic = data.get("is_diabetic")
client.update(is_diabetic=is_diabetic)
if "has_high_colesterol" in data:
has_high_colesterol = data.get("has_high_colesterol")
client.update(has_high_colesterol=has_high_colesterol)
except Exception as e:
print(e)
state, message = False, "Error while updating client!"
return state, message
def get_client(email):
try:
client = Client.objects.get(user__auth_user__username=email)
except Client.DoesNotExist:
state = False
message = "User does not exist or user is not a client!"
return state, message
try:
message = ClientSerializer(client).data
message["steps"] = ""
message["heart_rate"] = ""
message["distance"] = ""
fitbit_access_token = client.fitbit_access_token
fitbit_refresh_token = client.fitbit_refresh_token
if fitbit_access_token is not None and fitbit_refresh_token is not None:
fitbit_api = fitbit.Fitbit(CLIENT_FITBIT_ID, CLIENT_FITBIT_SECRET, system="en_UK", oauth2=True,
access_token=fitbit_access_token, refresh_token=fitbit_refresh_token,
refresh_cb=client.refresh_cb)
message["steps"] = fitbit_api.time_series("activities/steps", period="1d")["activities-steps"][0]["value"]
message["distance"] = fitbit_api.time_series("activities/distance", period="1d")["activities-distance"][0][
"value"]
data = fitbit_api.time_series("activities/heart", period="1m")["activities-heart"]
heart_rates = sorted(filter(lambda e: "restingHeartRate" in e["value"], data), key=lambda e: e["dateTime"],
reverse=True)
message["heart_rate"] = heart_rates[0]["value"]["restingHeartRate"]
state = True
except Exception:
client.fitbit_access_token = None
client.fitbit_refresh_token = None
client.save()
state = False
message = "Error while trying to fetch client information"
return state, message
def get_client_photo(email):
try:
client = Client.objects.get(user__auth_user__username=email)
state = True
message = {"photo": client.user.photo}
except Client.DoesNotExist:
state = False
message = "User does not exist or user is not a client!"
return state, message
return state, message
def add_doctor(data, hospital):
state, content = add_user(data)
if not state:
return state, content
custom_user = content
user = custom_user.auth_user
try:
# link the user to a doctor
Doctor.objects.create(user=custom_user, hospital=hospital)
except Exception:
user.delete()
error_message = "Error while creating new doctor!"
return False, error_message
# check if the doctor group exists, else create it
# finally add doctor to group
try:
doctors_group, created = Group.objects.get_or_create(name="doctors_group")
doctors_group.user_set.add(user)
except Exception:
user.delete()
error_message = "Error while creating new doctor!"
return False, error_message
state_message = "Doctor registered successfully!"
return True, state_message
def update_doctor(request, email):
data = request.data
state = True
message = "Doctor successfully updated!"
doctor = Doctor.objects.filter(user__auth_user__username=email)
if not doctor.exists():
state, message = False, "User does not exist or user is not a doctor!"
return state, message
try:
auth_user = User.objects.filter(username=email)
user = CustomUser.objects.filter(auth_user=auth_user[0])
update_user(data, auth_user, user)
except Exception:
state, message = False, "Error while updating client!"
return state, message
def get_doctor(email):
doctor = Doctor.objects.filter(user__auth_user__username=email)
if not doctor.exists():
state, message = False, "User does not exist or user is not a doctor!"
return state, message
state, message = True, DoctorSerializer(doctor[0]).data
return state, message
def add_food_log(data, email):
day = data.get("day")
type_of_meal = data.get("type_of_meal")
meal_id = data.get("meal")
number_of_servings = data.get("number_of_servings")
alerts = None
client = Client.objects.filter(user__auth_user__username=email)
if not client.exists():
state, message = False, "Client does not exist."
return state, message, alerts
current_client = Client.objects.get(user__auth_user__username=email)
try:
meal = Meal.objects.filter(id=meal_id)
if not meal.exists():
state, message = False, "Meal does not exist."
return state, message, alerts
current_meal = Meal.objects.get(id=meal_id)
number_of_servings = float(number_of_servings)
calories = number_of_servings * current_meal.calories
proteins = number_of_servings * current_meal.proteins
carbs = number_of_servings * current_meal.carbs
fat = number_of_servings * current_meal.fat
inserted_item = MealHistory.objects.create(day=day, type_of_meal=type_of_meal, client=current_client,
meal=current_meal, number_of_servings=number_of_servings,
calories=calories, proteins=proteins, carbs=carbs, fat=fat)
alerts = process_meal_history_insert(current_client, inserted_item)
except Exception:
message = "Error while creating new food log!"
return False, message, alerts
message = "The food log was created with success"
return True, message, alerts
def delete_food_log(meal_history):
try:
meal_history.delete()
state, message = True, "Food log successfully deleted"
except Error:
state, message = False, "Error while deleting user"
return state, message
def get_food_log(email, day):
current_client = Client.objects.get(user__auth_user__username=email)
meal_history = MealHistory.objects.filter(day=day, client=current_client)
data = group_meals(meal_history, current_client)
state, message = True, data
return state, message
def update_food_log(request, meal_history):
data = request.data
state = True
message = "Food log successfully updated!"
try:
if "day" in data:
day = data.get("day")
meal_history.update(day=day)
if "type_of_meal" in data:
type_of_meal = data.get("type_of_meal")
meal_history.update(type_of_meal=type_of_meal)
if "meal" in data:
meal_id = data.get("meal")
meal = Meal.objects.filter(id=meal_id)
if not meal.exists():
state, message = False, "Meal does not exist."
return state, message
current_meal = Meal.objects.get(id=meal_id)
populate_nutrient_values_meal_history(meal_history, meal=current_meal)
meal_history.update(meal=current_meal)
if "number_of_servings" in data:
number_of_servings = float(data.get("number_of_servings"))
meal_history.update(number_of_servings=number_of_servings)
populate_nutrient_values_meal_history(meal_history, number_of_servings=number_of_servings)
except Exception:
state, message = False, "Error while updating Food log!"
return state, message
def add_ingredient(data):
name = data.get("name")
calories = data.get("calories")
carbs = data.get("carbs")
fat = data.get("fat")
proteins = data.get("proteins")
try:
Ingredient.objects.create(name=name, calories=calories, carbs=carbs, fat=fat, proteins=proteins)
except Exception:
error_message = "Error while creating new ingredient!"
return False, error_message
state_message = "The ingredient was created with success"
return True, state_message
def update_ingredient(data, ingredient_id):
state = True
message = "Ingredient successfully updated!"
ingredient = Ingredient.objects.filter(id=ingredient_id)
if not ingredient.exists():
state, message = False, "Ingredient does not exist!"
return state, message
try:
if "calories" in data:
calories = data.get("calories")
ingredient.update(calories=calories)
if "proteins" in data:
proteins = data.get("proteins")
ingredient.update(proteins=proteins)
if "fat" in data:
fat = data.get("fat")
ingredient.update(fat=fat)
if "carbs" in data:
carbs = data.get("carbs")
ingredient.update(carbs=carbs)
if "name" in data:
name = data.get("name")
ingredient.update(name=name)
except Exception:
state, message = False, "Error while updating ingredient!"
return state, message
def delete_ingredient(ingredient_id):
state = True
message = "Ingredient successfully deleted!"
try:
ingredient = Ingredient.objects.get(id=ingredient_id)
ingredient.delete()
except Ingredient.DoesNotExist:
state, message = False, "Ingredient does not exist!"
return state, message
def get_ingredients():
return True, [IngredientSerializer(ingredient).data for ingredient in Ingredient.objects.all()]
def get_ingredient(ingredient_id):
try:
ingredient = Ingredient.objects.get(id=ingredient_id)
except Ingredient.DoesNotExist:
state, message = False, "Ingredient does not exist!"
return state, message
return True, IngredientSerializer(ingredient).data
def add_new_meal(data, username, role="admin"):
name = data.get("name")
category = data.get("category")
ingredients = data.get("ingredients")
# treat nullable fields
client = Client.objects.get(user__auth_user__username=username) if role == "client" else None
if not ingredients:
error_message = "Error while creating new meal!"
return False, error_message
try:
# create new meal
meal = Meal.objects.create(name=name, category=category, client=client)
except Exception:
error_message = "Error while creating new meal!"
return False, error_message
try:
# add ingredients quantities and nutrient values
for ingredient_json in ingredients:
if 'id' in ingredient_json:
ingredient = Ingredient.objects.get(id=ingredient_json["id"])
elif 'name' in ingredient_json:
ingredient = Ingredient.objects.get(name=ingredient_json["name"])
quantity = ingredient_json["quantity"]
Quantity.objects.create(meal=meal, ingredient=ingredient, quantity=quantity)
populate_nutrient_values(Meal.objects.filter(id=meal.id), Ingredient.objects.get(id=ingredient.id),
quantity)
except Ingredient.DoesNotExist:
meal.delete()
error_message = "Ingredient does not exist!"
return False, error_message
state_message = "Meal created successfully!"
return True, state_message
def add_new_ingredient(data):
try:
name = data.get("name")
calories = data.get("calories")
carbs = data.get("carbs")
fat = data.get("fat")
proteins = data.get("proteins")
except Exception:
error_message = "Error creating new ingredient! Request incomplete."
return False, error_message
try:
ingredient = Ingredient.objects.create(name=name, calories=calories, carbs=carbs, fat=fat, proteins=proteins)
except Exception:
error_message = "Error while creating new ingredient!"
return False, error_message
state_message = "Ingredient created successfully!"
return True, state_message
def get_meals(username):
client = Client.objects.get(user__auth_user__username=username)
return True, [MealSerializer(meal).data for meal in Meal.objects.filter(Q(client__isnull=True) | Q(client=client))]
def add_doctor_patient_association(data, email):
client_username = data.get("client")
client = Client.objects.filter(user__auth_user__username=client_username)
current_doctor = Doctor.objects.get(user__auth_user__username=email)
if not client.exists():
state, message = False, "Patient does not exist."
return state, message
current_client = Client.objects.get(user__auth_user__username=client_username)
if current_client.doctor is None:
client.update(doctor=current_doctor)
else:
error_message = "The patient already has a doctor associated."
return False, error_message
state_message = "The Doctor patient association was created with success"
return True, state_message
def delete_doctor_patient_association(email):
try:
client = Client.objects.filter(user__auth_user__username=email)
client.update(doctor=None)
state, message = True, "Doctor patient association successfully deleted"
except Exception:
state, message = False, "Error while deleting Doctor patient association"
return state, message
def doctor_get_all_patients(username):
try:
doctor = Doctor.objects.get(user__auth_user__username=username)
state = True
message = [ClientSerializer(client).data for client in Client.objects.filter(doctor=doctor)]
except Doctor.DoesNotExist:
state = False
message = "Operation not allowed: you are not a doctor!"
except Exception:
state = False
message = "Error while fetching doctor clients' data!"
return state, message
def get_hospital_doctors(email):
admin_hospital = HospitalAdmin.objects.get(auth_user__username=email).hospital
doctors = Doctor.objects.filter(hospital=admin_hospital)
state, message = True, [DoctorSerializer(r).data for r in doctors]
return state, message
def add_fitbit_token(data, email):
fitbit_access_token = data.get("access_token")
fitbit_refresh_token = data.get("refresh_token")
client = Client.objects.filter(user__auth_user__username=email)
try:
client.update(fitbit_access_token=fitbit_access_token, fitbit_refresh_token=fitbit_refresh_token)
state, message = True, "The fitbit token was added with success"
except Exception:
state, message = False, "Error while adding fitbit token."
return state, message
def classify_image(image_b64):
if image_b64 == "":
state = False
message = "Missing parameters"
else:
params = {"image_b64": image_b64}
response = get(url=ML_URL, params=params)
state = False
message = "Error while trying to classify food"
if response.status_code == 200:
data = eval(response.text)
if data: # check if list is not empty
food = data[-1]["label"] # get the last element (the one ml module has most confident)
try:
meal = Meal.objects.get(name__iexact=food)
message = MealSerializer(meal).data
state = True
except Meal.DoesNotExist:
message = "Recognized meal does not exist in the system!"
return state, message
def classify_barcode(username, barcode):
if barcode == "":
state = False
message = "Missing parameter: 'barcode'"
else:
response = get_product(barcode)
state = False
message = "Product not found."
if response.get("status") == 1:
product_name = response.get("product").get("product_name")
message = "Error while trying to classify product"
if product_name is not None:
try:
client = Client.objects.get(user__auth_user__username=username)
meals = Meal.objects.filter(Q(client__isnull=True) | Q(client=client))
meal = meals.filter(name__iexact=product_name)[0]
message = MealSerializer(meal).data
state = True
except Exception:
message = "Item does not exist in the system!"
return state, message
def get_client_doctor(username):
client = Client.objects.get(user__auth_user__username=username)
try:
doctor = client.doctor
state = True
message = DoctorSerializer(doctor).data if doctor is not None else None
except Exception:
state, message = False, "Error while adding fitbit token."
return state, message
def get_nutrients_ratio(username, day):
client = Client.objects.get(user__auth_user__username=username)
meal_history = MealHistory.objects.filter(day=day, client=client)
if not meal_history.exists():
state = False
message = "The specified day has no history yet."
else:
initial_info = get_total_nutrients(meal_history)
message = get_nutrients_info(client, initial_info)
state = True
return state, message
def get_nutrients_total(username, day):
client = Client.objects.get(user__auth_user__username=username)
meal_history = MealHistory.objects.filter(day=day, client=client)
if not meal_history.exists():
state = False
message = "The specified day has no history yet."
else:
initial_info = get_total_nutrients(meal_history)
message = get_nutrients_left_values(client, initial_info)
state = True
return state, message
def get_nutrients_history(username, params):
metric = params["metric"]
if metric not in ["calories", "fat", "carbs", "proteins"]:
state = False
message = "Invalid metric!"
return state, message
period = params["period"]
if period not in ["week", "month", "3-months"]:
state = False
message = "Invalid period!"
return state, message
client = Client.objects.get(user__auth_user__username=username)
return True, get_nutrient_history(client, metric, period)
def get_body_history(username, params):
metric = params["metric"]
if metric not in ["steps", "distance", "calories", "floors", "heart"]:
state = False
message = "Invalid metric!"
return state, message
period = params["period"]
if period not in ["week", "month", "3-months"]:
state = False
message = "Invalid period!"
return state, message
client = Client.objects.get(user__auth_user__username=username)
fitbit_access_token = client.fitbit_access_token
fitbit_refresh_token = client.fitbit_refresh_token
if fitbit_access_token is None or fitbit_refresh_token is None:
state = False
message = "You have not integrated your Fitbit device yet!"
return state, message
try:
fitbit_api = fitbit.Fitbit(CLIENT_FITBIT_ID, CLIENT_FITBIT_SECRET, system="en_UK", oauth2=True,
access_token=fitbit_access_token, refresh_token=fitbit_refresh_token,
refresh_cb=client.refresh_cb)
message = get_body_history_values(fitbit_api, metric, period)
state = True
except Exception:
client.fitbit_access_token = None
client.fitbit_refresh_token = None
client.save()
state, message = False, "Error while accessing fitbit information."
return state, message
def get_body_avg_heart_rate(username):
client = Client.objects.get(user__auth_user__username=username)
fitbit_access_token = client.fitbit_access_token
fitbit_refresh_token = client.fitbit_refresh_token
if fitbit_access_token is None or fitbit_refresh_token is None:
state = False
message = "You have not integrated your Fitbit device yet!"
return state, message
try:
fitbit_api = fitbit.Fitbit(CLIENT_FITBIT_ID, CLIENT_FITBIT_SECRET, system="en_UK", oauth2=True,
access_token=fitbit_access_token, refresh_token=fitbit_refresh_token,
refresh_cb=client.refresh_cb)
message = get_client_heart_rate_chart(client, fitbit_api)
state = True
except Exception:
client.fitbit_access_token = None
client.fitbit_refresh_token = None
client.save()
state, message = False, "Error while accessing fitbit information."
return state, message
def get_my_life_stat(username):
client = Client.objects.get(user__auth_user__username=username)
fitbit_access_token = client.fitbit_access_token
fitbit_refresh_token = client.fitbit_refresh_token
if fitbit_access_token is None or fitbit_refresh_token is None:
message = get_my_life_stats(client)
state = True
else:
try:
fitbit_api = fitbit.Fitbit(CLIENT_FITBIT_ID, CLIENT_FITBIT_SECRET, system="en_UK", oauth2=True,
access_token=fitbit_access_token, refresh_token=fitbit_refresh_token,
refresh_cb=client.refresh_cb)
message = get_my_life_stats(client, fitbit_api)
state = True
except Exception:
client.fitbit_access_token = None
client.fitbit_refresh_token = None
client.save()
state, message = False, "Error while accessing fitbit information."
return state, message
def new_expo_token(data, username):
client = Client.objects.get(user__auth_user__username=username)
expo_token = data["expo_token"]
ExpoToken.objects.create(client=client, token=expo_token)
return True, "Expo Token registered successfully"
def get_client_expo_tokens(username):
client = Client.objects.get(user__auth_user__username=username)
return True, [token.token for token in ExpoToken.objects.filter(client=client)]
def delete_client_expo_tokens(data, username):
token = data.get("expo_token")
client = Client.objects.get(user__auth_user__username=username)
if token is None:
ExpoToken.objects.filter(client=client).delete()
message = "All client's expo tokens were deleted successfully"
else:
ExpoToken.objects.filter(client=client, token=token).delete()
message = "Client's token was successfully deleted"
return True, message
def reload_database():
try:
#######################################
# WIPE DATABASE #
#######################################
User.objects.all().delete()
Ingredient.objects.all().delete()
Meal.objects.all().delete()
#######################################
# CREATE DJANGO ADMIN #
#######################################
User.objects.create_superuser("admin", "<EMAIL>", "admin")
#######################################
# CREATE USERS - ADMINS #
#######################################
success, state = add_admin({
"hospital": "Hospital São João",
"email": "<EMAIL>",
"first_name": "António",
"last_name": "Martins",
"password": "<PASSWORD>",
"birth_date": "1970-10-01",
"phone_number": "910845367"
})
cur_success = success
success, state = add_admin({
"hospital": "Hospital Santo António",
"email": "<EMAIL>",
"first_name": "Rui",
"last_name": "Almeida",
"password": "<PASSWORD>",
"birth_date": "1971-03-04",
"phone_number": "910547367"
})
cur_success = cur_success and success
success, state = add_admin({
"hospital": "Hospital da Luz",
"email": "<EMAIL>",
"first_name": "Pedro",
"last_name": "Silva",
"password": "<PASSWORD>",
"birth_date": "1980-12-03",
"phone_number": "910443377"
})
cur_success = cur_success and success
#######################################
# CREATE USERS - CLIENTS #
#######################################
success, state = add_client({
"height": 180,
"weight_goal": 75,
"current_weight": 90,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Vasco",
"last_name": "Almeida",
"password": "<PASSWORD>",
"birth_date": "1975-11-05",
"phone_number": "936545567"
})
cur_success = cur_success and success
success, state = add_client({
"height": 170,
"weight_goal": 70,
"current_weight": 85,
"sex": "F",
"email": "<EMAIL>",
"first_name": "Ana",
"last_name": "Almeida",
"password": "<PASSWORD>",
"birth_date": "1977-09-03",
"phone_number": "936735367"
})
cur_success = cur_success and success
success, state = add_client({
"height": 190,
"weight_goal": 80,
"current_weight": 100,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Miguel",
"last_name": "Silva",
"password": "<PASSWORD>",
"birth_date": "1990-10-04",
"phone_number": "966735367"
})
cur_success = cur_success and success
success, state = add_client({
"height": 184,
"weight_goal": 80,
"current_weight": 90,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Miguel",
"last_name": "Oliveira",
"password": "<PASSWORD>",
"birth_date": "1990-12-07",
"phone_number": "966434367"
})
cur_success = cur_success and success
success, state = add_client({
"height": 165,
"weight_goal": 70,
"current_weight": 90,
"sex": "M",
"email": "<EMAIL>",
"first_name": "António",
"last_name": "Silva",
"password": "<PASSWORD>",
"birth_date": "1991-10-04",
"phone_number": "965735367"
})
cur_success = cur_success and success
success, state = add_client({
"height": 168,
"weight_goal": 80,
"current_weight": 90,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Miguel",
"last_name": "Pedroseiro",
"password": "<PASSWORD>",
"birth_date": "1980-10-04",
"phone_number": "936735367"
})
cur_success = cur_success and success
success, state = add_client({
"height": 170,
"weight_goal": 60,
"current_weight": 75,
"sex": "F",
"email": "<EMAIL>",
"first_name": "Fátima",
"last_name": "Silva",
"password": "<PASSWORD>",
"birth_date": "1990-05-04",
"phone_number": "964755367"
})
cur_success = cur_success and success
success, state = add_client({
"height": 180,
"weight_goal": 70,
"current_weight": 75,
"sex": "F",
"email": "<EMAIL>",
"first_name": "Laura",
"last_name": "Silva",
"password": "<PASSWORD>",
"birth_date": "1998-10-04",
"phone_number": "916735367"
})
cur_success = cur_success and success
success, state = add_client({
"height": 195,
"weight_goal": 90,
"current_weight": 110,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Pedro",
"last_name": "Pereira",
"password": "<PASSWORD>",
"birth_date": "1980-11-04",
"phone_number": "966725567"
})
cur_success = cur_success and success
success, state = add_client({
"height": 160,
"weight_goal": 70,
"current_weight": 100,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Miguel",
"last_name": "Pereira",
"password": "<PASSWORD>",
"birth_date": "1990-10-10",
"phone_number": "916735360"
})
cur_success = cur_success and success
success, state = add_client({
"height": 180,
"weight_goal": 70,
"current_weight": 100,
"sex": "F",
"email": "<EMAIL>",
"first_name": "Manuela",
"last_name": "Silva",
"password": "<PASSWORD>",
"birth_date": "1990-10-10",
"phone_number": "912684259"
})
cur_success = cur_success and success
success, state = add_client({
"height": 172,
"weight_goal": 80,
"current_weight": 98,
"sex": "M",
"email": "<EMAIL>",
"first_name": "António",
"last_name": "Almeida",
"password": "<PASSWORD>",
"birth_date": "1990-10-11",
"phone_number": "968124520"
})
cur_success = cur_success and success
success, state = add_client({
"height": 174,
"weight_goal": 75,
"current_weight": 99,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Paulo",
"last_name": "Silva",
"password": "<PASSWORD>",
"birth_date": "1990-10-20",
"phone_number": "930407895"
})
cur_success = cur_success and success
success, state = add_client({
"height": 170,
"weight_goal": 80,
"current_weight": 110,
"sex": "M",
"email": "<EMAIL>",
"first_name": "André",
"last_name": "Silva",
"password": "<PASSWORD>",
"birth_date": "1990-06-04",
"phone_number": "910348305"
})
cur_success = cur_success and success
success, state = add_client({
"height": 165,
"weight_goal": 70,
"current_weight": 100,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Miguel",
"last_name": "Matos",
"password": "<PASSWORD>",
"birth_date": "1980-10-04",
"phone_number": "930438012"
})
cur_success = cur_success and success
success, state = add_client({
"height": 165,
"weight_goal": 70,
"current_weight": 100,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Miguel",
"last_name": "Pedroso",
"password": "<PASSWORD>",
"birth_date": "1980-06-04",
"phone_number": "915005009"
})
cur_success = cur_success and success
success, state = add_client({
"height": 165,
"weight_goal": 70,
"current_weight": 100,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Alberto",
"last_name": "Matos",
"password": "<PASSWORD>",
"birth_date": "1988-10-04",
"phone_number": "910002068"
})
cur_success = cur_success and success
success, state = add_client({
"height": 165,
"weight_goal": 70,
"current_weight": 100,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Alberto",
"last_name": "Marques",
"password": "<PASSWORD>",
"birth_date": "1980-10-06",
"phone_number": "930002789"
})
cur_success = cur_success and success
success, state = add_client({
"height": 165,
"weight_goal": 70,
"current_weight": 100,
"sex": "M",
"email": "<EMAIL>",
"first_name": "Agostinho",
"last_name": "Matos",
"password": "<PASSWORD>",
"birth_date": "1980-12-12",
"phone_number": "913000555"
})
cur_success = cur_success and success
success, state = add_client({
"height": 165,
"weight_goal": 70,
"current_weight": 100,
"sex": "F",
"email": "<EMAIL>",
"first_name": "Albertina",
"last_name": "Matos",
"password": "<PASSWORD>",
"birth_date": "1978-10-04",
"phone_number": "908000458"
})
cur_success = cur_success and success
#######################################
# CREATE USERS - DOCTORS #
#######################################
success, state = add_doctor({
"email": "<EMAIL>",
"first_name": "André",
"last_name": "Almeida",
"password": "<PASSWORD>",
"birth_date": "1980-05-10",
"phone_number": "966565565"
},
hospital='Hospital São João')
cur_success = cur_success and success
success, state = add_doctor({
"email": "<EMAIL>",
"first_name": "Rui",
"last_name": "Pereira",
"password": "<PASSWORD>",
"birth_date": "1985-05-04",
"phone_number": "964275097"
},
hospital='Hospital Santo António')
cur_success = cur_success and success
success, state = add_doctor({
"email": "<EMAIL>",
"first_name": "João",
"last_name": "Pereira",
"password": "<PASSWORD>",
"birth_date": "1985-09-16",
"phone_number": "914608627"
},
hospital='Hospital da Luz')
cur_success = cur_success and success
#######################################
# CREATE USERS - DOCTOR-PATIENT #
#######################################
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
success, state = add_doctor_patient_association({"client": "<EMAIL>"},
email="<EMAIL>")
cur_success = cur_success and success
#######################################
# CREATE INGREDIENTS #
#######################################
flour = Ingredient.objects.create(name="Flour", calories=364, carbs=76.3, fat=1, proteins=10.3)
water = Ingredient.objects.create(name="Water", calories=0, carbs=0, fat=0, proteins=0)
sugar = Ingredient.objects.create(name="Sugar", calories=389, carbs=99.8, fat=0, proteins=0)
salt = Ingredient.objects.create(name="Salt", calories=0, carbs=0, fat=0, proteins=0)
oil = Ingredient.objects.create(name="Oil", calories=884, carbs=0, fat=100, proteins=0)
tomato_sauce = Ingredient.objects.create(name="Tomato Sauce", calories=82, carbs=18.9, fat=0.5, proteins=4.3)
pepper = Ingredient.objects.create(name="Pepper", calories=251, carbs=64, fat=3.3, proteins=10.4)
mozzarella_cheese = Ingredient.objects.create(name="Mozzarella Cheese", calories=300, carbs=2.2, fat=22.4,
proteins=22.2)
pork = Ingredient.objects.create(name="Pork", calories=275, carbs=0.8, fat=10, proteins=19)
egg = Ingredient.objects.create(name="Egg", calories=143, carbs=0.7, fat=9.5, proteins=12.6)
garlic = Ingredient.objects.create(name="Garlic", calories=149, carbs=33.1, fat=0.5, proteins=6.4)
onion = Ingredient.objects.create(name="Onion", calories=32, carbs=7.5, fat=0.1, proteins=0.8)
cheese = Ingredient.objects.create(name="Cheese", calories=264, carbs=0, fat=21.1, proteins=18.5)
#######################################
# CREATE MEALS #
#######################################
meal = {"name": "Pizza", "category": "Fast Food",
"ingredients": [{"id": water.id, "quantity": 42}, {"id": flour.id, "quantity": 39.4},
{"id": sugar.id, "quantity": 0.8}, {"id": salt.id, "quantity": 1.25},
{"id": oil.id, "quantity": 1.25}, {"id": tomato_sauce.id, "quantity": 15.63},
{"id": pepper.id, "quantity": 1.88}, {"id": mozzarella_cheese.id, "quantity": 15.7}]}
success, state = add_new_meal(meal, None)
cur_success = cur_success and success
meal = {"name": "Hamburger", "category": "Fast Food",
"ingredients": [{"id": pork.id, "quantity": 85}, {"id": salt.id, "quantity": 10},
{"id": pepper.id, "quantity": 10}, {"id": egg.id, "quantity": 50},
{"id": garlic.id, "quantity": 5}, {"id": onion.id, "quantity": 15},
{"id": cheese.id, "quantity": 28}]}
success, state = add_new_meal(meal, None)
cur_success = cur_success and success
try:
meals_json, ingredients_json = load_from_files('../db_data/')
# print(meals_json)
# print(ingredients_json)
except Exception as e:
print(e)
for ingredient in ingredients_json:
success, state = add_new_ingredient(ingredient)
if not success:
print(ingredient)
cur_success = cur_success and success
for meal in meals_json:
success, state = add_new_meal(meal, None)
if not success:
print(meal)
cur_success = cur_success and success
return cur_success
except Exception:
return False
| StarcoderdataPython |
1641041 | <reponame>nlantau/Codewars_2020_2021
# nlantau, 2021-11-03
filter_string=lambda a:int("".join(filter(str.isdigit, a)))
print(filter_string("123"))
| StarcoderdataPython |
1662013 | <filename>kafka_influxdb/reader/kafka_python.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
import logging
from kafka import KafkaConsumer
from kafka.common import ConsumerTimeout, KafkaUnavailableError
from kafka_influxdb.encoder.errors import EncoderError
from kafka_influxdb.reader.reader import ReaderAbstract
class Reader(ReaderAbstract):
"""
A Kafka consumer based on kafka-python
See: https://github.com/dpkp/kafka-python
"""
def _connect(self):
connection = "{0}:{1}".format(self.host, self.port)
logging.info("Connecting to Kafka at %s...", connection)
try:
self.consumer = KafkaConsumer(self.topic,
group_id=self.group,
bootstrap_servers=[connection]
)
except KafkaUnavailableError as e:
raise EncoderError(e)
def _handle_read(self):
"""
Read messages from Kafka.
"""
try:
for message in self.consumer:
yield message.value
except ConsumerTimeout as timeout:
logging.error("Kafka error: %s.", timeout)
# The actual reconnect handling is done in the caller
raise EncoderError(timeout)
| StarcoderdataPython |
1682390 | from OpenGL.GL import glBindTexture, glTexParameteri, GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, \
GL_TEXTURE_WRAP_T, GL_REPEAT, GL_TEXTURE_MIN_FILTER, GL_TEXTURE_MAG_FILTER, GL_LINEAR,\
glTexImage2D, GL_RGBA, GL_UNSIGNED_BYTE
from PIL import Image
# for use with GLFW
def load_texture(path, texture):
glBindTexture(GL_TEXTURE_2D, texture)
# Set the texture wrapping parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# Set texture filtering parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
# load image
image = Image.open(path)
image = image.transpose(Image.FLIP_TOP_BOTTOM)
img_data = image.convert("RGBA").tobytes()
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.width, image.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, img_data)
return texture
| StarcoderdataPython |
1615359 | <filename>setup.py
from setuptools import setup
import setuptools
setup(name="rubikenv",
version="0.1",
description="Gym env for rubik cube",
author="<NAME>",
author_email="<EMAIL>",
packages=setuptools.find_packages(),
package_dir={"rubikenv": "rubikenv"},
install_requires=[],
extras_require={
"dev": [],
},
license="Apache 2.0") | StarcoderdataPython |
115213 | #!/usr/bin/python
'''
https://docs.python.org/2/reference/datamodel.html
'''
class Empty:
pass
#base class is object
class BankAccount(object):
#__doc__ next string becomes doc
""" Bank acount class """
# static variable
class_variable="bank"
#__new___??
#default args
def __init__(self, initial_balance=0):
self.balance = initial_balance
self.index = 0 # for iter
# cmp called when class instances compared
def __cmp__(self, dict):
pass
# representation
# for bytes(x)
def __bytes__(self):
pass
def __format__(self, format_spec):
pass
# return a valid python expr
def __repr__(self):
pass
# when u call print(x)
def __str__(self):
pass
def __new__(cls, *args, **kwargs):
new_inst = super(BankAccount, cls).__new__(cls, *args, **kwargs)
# new_inst = object.__new__(cls, *args, **kwargs)
return new_inst
# called on last dereference
# exceptions raised in dtor are ignored
def __del__(self):
pass
def __hash__():
pass
# isinstance()
def __instancecheck__(self):
pass
# issubclass():
def __subclasscheck__(self):
pass
# issubclass()
def __subclasshook__(self):
pass
# for classes that act like sets
# called on "x in s"
def __contains__(self, x):
pass
# len
def __len__(self):
return 4
# FOR CLASSES THAT ACT LIKE DICTIONARIES
def __delitem__(self, key):
pass
def __setitem__(self, key):
pass
def __getitem__(self, key):
pass
def __missing__(self, non_existent_key):
pass
# FOR CLASSES THAT ACT LIKE NUMBERS
def __add__(self, other):
pass
def __sub__(self, other):
pass
def __mul__(self, other):
pass
def __truediv__(self, other):
pass
# when x // y
def __floordiv__(self, other):
pass
def __mod__(self, other):
pass
# when divmod(x,y)
def __divmod__(self, other):
pass
def __pow__(self, other):
pass
def __lshift__(self, other):
pass
def __rshift__(self, other):
pass
def __and__(self, other):
pass
def __xor__(self, other):
pass
def __or__(self, other):
pass
# the r variants for when self is 2nd arg
def __radd__(self, other):
pass
def __rsub__(self, other):
pass
def __rmul__(self, other):
pass
def __rtruediv__(self, other):
pass
# when x // y
def __rfloordiv__(self, other):
pass
def __rmod__(self, other):
pass
# when divmod(x,y)
def __rdivmod__(self, other):
pass
def __rpow__(self, other):
pass
def __rlshift__(self, other):
pass
def __rrshift__(self, other):
pass
def __rand__(self, other):
pass
def __rxor__(self, other):
pass
def __ror__(self, other):
pass
# the i variants for adding to oneself (i.e x <<= y)
def __iadd__(self, other):
pass
def __isub__(self, other):
pass
def __imul__(self, other):
pass
def __itruediv__(self, other):
pass
# when x // y
def __ifloordiv__(self, other):
pass
def __imod__(self, other):
pass
# when divmod(x,y)
def __idivmod__(self, other):
pass
def __ipow__(self, other):
pass
def __ilshift__(self, other):
pass
def __irshift__(self, other):
pass
def __iand__(self, other):
pass
def __ixor__(self, other):
pass
def __ior__(self, other):
pass
# unary ops
def __neg__(self):
pass
def __pos__(self):
pass
def __abs__(self):
pass
def __invert__(self):
pass
def __complex__(self):
pass
def __int__(self):
pass
def __float__(self):
pass
def __round__(self):
pass
def __round__(self, n):
pass
def __ceil__(self, x):
pass
def __floor__(self, x):
pass
def __trunc__(self, x):
pass
# called on a_list[x]
def __index__(self):
pass
# compare ops
def __eq__(self, y):
pass
def __ne__(self, y):
pass
def __lt__(self, y):
pass
def __le__(self, y):
pass
def __gt__(self, y):
pass
def __ge__(self, y):
pass
# called on if x:
def __bool__(self):
pass
def __nonzero__(self):
pass
def __unicode__(self):
pass
# class creation
#__metaclass__="
# SERIALIZE classes
# copy.copy(x)
def __copy__(self):
pass
# copy.deepcopy(x)
def __deepcopy__(self):
pass
# pickle.dump(x, file)
def __getstate__(self):
pass
# pickle.dump(x, file)
def __reduce__(self):
pass
# pickle.dump(x, file, protocol version)
def __reduce_ex__(self, protocol_version):
pass
# x =pickle.load(file)
def __setstate__(self):
pass
# x = pickle.load(file)
def __getnewargs__(self):
pass
# for use with "with x:"
def __enter__():
pass
def __exit__(exc_type, exc_value, traceback):
pass
# called on every ref to attr or method name
def __getattribute__(self, key):
pass
# fallback called on every ref to attr
def __getattr__(self, key):
pass
# called on every set to attr
def __setattr__(self, key, val):
pass
# called on every delete attr (i.e. del x.var)
def __detattr__(self):
pass
# called on dir(x) - list all attrs and methods
def __dir__(self):
pass
# CLASS WHICH ACTS LIKE FUNC - i.e. operator()
# called when u type myinstance()
def __call__(self):
pass
# CLASSES THAT ACT LIKE ITERATORS
# iter must return some obj (need not be self) which has next method
def __iter__(self):
return self
# next method must increment index
def __next__(self):
if self.index == 1:
raise StopIteration
self.index = self.index + 1
return self.balance
def __reversed__(self):
pass
# defined generator
def generate(self):
yield self.balance
# all methods virtual by default
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
self.balance -= amount
def overdrawn(self):
return self.balance < 0
# no private variables but name prefixed with _ is considered non-public
# identifier with atleast 2 leading __ is replaced by _classname__ident
def override_overdrawn(self):
return self.__overdrawn()
# make pvt copy of method so it can be called from within
__overdrawn = overdrawn
# main code follows
if __name__ == "__main__":
my_account = BankAccount(15)
print my_account.__class__
print my_account.__doc__
# ptr to func
wptr = my_account.withdraw
wptr(5)
print my_account.balance
print my_account.override_overdrawn()
print "using iterator"
for i in my_account:
print i
print "using generator which generates iter and next"
for ch in my_account.generate():
print ch
# isinstance(obj, type1), issubclass(type1, type2), istype
| StarcoderdataPython |
1735973 | import sys
import os
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from torch.nn.parallel import DataParallel
from datetime import datetime
from pepper_variant.modules.python.models.dataloader_predict import SequenceDataset
from pepper_variant.modules.python.models.ModelHander import ModelHandler
from pepper_variant.modules.python.Options import ImageSizeOptions, ImageSizeOptionsHP, TrainOptions
from pepper_variant.modules.python.DataStorePredict import DataStore
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
def predict(options, input_filepath, input_files, output_filepath, threads):
if options.use_hp_info:
image_features = ImageSizeOptionsHP.IMAGE_HEIGHT
else:
image_features = ImageSizeOptions.IMAGE_HEIGHT
transducer_model, hidden_size, gru_layers, prev_ite = \
ModelHandler.load_simple_model_for_training(options.model_path,
image_features=image_features,
num_classes=ImageSizeOptions.TOTAL_LABELS,
num_type_classes=ImageSizeOptions.TOTAL_TYPE_LABELS)
transducer_model.eval()
transducer_model = transducer_model.eval()
# create output file
output_filename = output_filepath + "pepper_prediction" + ".hdf"
prediction_data_file = DataStore(output_filename, mode='w')
torch.set_num_threads(threads)
transducer_model = transducer_model.cuda()
transducer_model.eval()
transducer_model = DataParallel(transducer_model)
sys.stderr.write("[" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + "] " +
"INFO: TOTAL FILES: " + str(len(input_files)) + ".\n")
sys.stderr.flush()
batch_completed = 0
for file_id, input_file in enumerate(input_files):
# data loader
input_data = SequenceDataset(input_filepath, input_file)
data_loader = DataLoader(input_data,
batch_size=options.batch_size,
shuffle=False,
num_workers=options.num_workers,
collate_fn=SequenceDataset.my_collate)
with torch.no_grad():
for contigs, positions, depths, candidates, candidate_frequencies, images in data_loader:
sys.stderr.flush()
images = images.type(torch.FloatTensor)
hidden = torch.zeros(images.size(0), 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE)
cell_state = torch.zeros(images.size(0), 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE)
images = images.cuda()
hidden = hidden.cuda()
cell_state = cell_state.cuda()
# run inference
# output_base, output_type = transducer_model(images, hidden, cell_state, False)
outputs = transducer_model(images, hidden, cell_state, False)
output_base, output_type = tuple(outputs)
# output_base = output_base.detach().cpu().numpy()
output_type = output_type.detach().cpu().numpy()
prediction_data_file.write_prediction(batch_completed, contigs, positions, depths, candidates, candidate_frequencies, output_type)
batch_completed += 1
sys.stderr.write("[" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + "] " +
"INFO: FILES COMPLETED: " + str(file_id + 1) + "/" + str(len(input_files)) + ".\n")
sys.stderr.flush()
def predict_distributed_gpu(options, filepath, input_files, output_filepath, threads_per_caller):
"""
Create a prediction table/dictionary of an images set using a trained model.
:param options: Options for prediction.
:param filepath: Path to image files to predict on.
:param input_files: Path to individual image files to predict on.
:param output_filepath: Path to output directory.
:param threads_per_caller: How many threads to set per caller.
:return:
"""
predict(options, filepath, input_files, output_filepath, threads_per_caller)
| StarcoderdataPython |
50985 | #<NAME>
#Linguagem: Python
#Exercício 13 do site: https://wiki.python.org.br/EstruturaSequencial
#Entra com a altura
altura = float(input("Qual sua altura? "))
#Realiza os cálculos
pesoIdealHomem = (72.7*altura)-58
pesoIdealMulher = (62.1*altura)-44.7
#Imprime o resultado
print("Seu peso ideal caso você seja homem é {:.2f} e se for mulher {:.2f}". format(pesoIdealHomem,pesoIdealMulher)) | StarcoderdataPython |
195937 | <reponame>clayne/mdec<filename>backend/ghidra/dump.py
from ghidra.app.decompiler import DecompInterface
import traceback
out = open('out.c', 'w')
for f in currentProgram.getFunctionManager().getFunctions(True):
try:
di = DecompInterface()
di.openProgram(currentProgram)
out.write(di.decompileFunction(f, 0, None).getDecompiledFunction().getC())
out.write('\n')
except:
out.write(traceback.format_exc())
out.write('Failed to decompile %s' % str(f))
| StarcoderdataPython |
1702299 | import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
import numpy as np
import pandas as pd
from collections import Counter
import time, os, platform, sys, re
import torch.backends.cudnn as cudnn
def metric(probability, truth, threshold=0.5, reduction='none'):
'''Calculates dice of positive and negative images seperately'''
'''probability and truth must be torch tensors'''
batch_size = len(truth)
with torch.no_grad():
probability = probability.view(batch_size, -1)
truth = truth.view(batch_size, -1)
assert (probability.shape == truth.shape)
p = (probability > threshold).float()
t = (truth > 0.5).float()
t_sum = t.sum(-1)
p_sum = p.sum(-1)
neg_index = torch.nonzero(t_sum == 0)
pos_index = torch.nonzero(t_sum >= 1)
dice_neg = (p_sum == 0).float()
dice_pos = 2 * (p * t).sum(-1) / ((p + t).sum(-1))
dice_neg = dice_neg[neg_index]
dice_pos = dice_pos[pos_index]
dice = torch.cat([dice_pos, dice_neg])
num_neg = len(neg_index)
num_pos = len(pos_index)
return dice, dice_neg, dice_pos, num_neg, num_pos
class Meter:
'''A meter to keep track of iou and dice scores throughout an epoch'''
def __init__(self, phase, epoch):
self.base_threshold = 0.5 # <<<<<<<<<<< here's the threshold
self.base_dice_scores = []
self.dice_neg_scores = []
self.dice_pos_scores = []
self.iou_scores = []
def predict(self, X, threshold):
'''X is sigmoid output of the model'''
X_p = np.copy(X)
preds = (X_p > threshold).astype('uint8')
return preds
def update(self, targets, outputs):
probs = torch.sigmoid(outputs)
dice, dice_neg, dice_pos, _, _ = metric(probs, targets, self.base_threshold)
self.base_dice_scores.extend(dice.tolist())
self.dice_pos_scores.extend(dice_pos.tolist())
self.dice_neg_scores.extend(dice_neg.tolist())
preds = self.predict(probs, self.base_threshold)
iou = compute_iou_batch(preds, targets, classes=[1])
self.iou_scores.append(iou)
def get_metrics(self):
dice = np.nanmean(self.base_dice_scores)
dice_neg = np.nanmean(self.dice_neg_scores)
dice_pos = np.nanmean(self.dice_pos_scores)
dices = [dice, dice_neg, dice_pos]
iou = np.nanmean(self.iou_scores)
return dices, iou
def epoch_log(phase, epoch, epoch_loss, meter, start):
'''logging the metrics at the end of an epoch'''
dices, iou = meter.get_metrics()
dice, dice_neg, dice_pos = dices
print("Loss: %0.4f | IoU: %0.4f | dice: %0.4f | dice_neg: %0.4f | dice_pos: %0.4f" % (
epoch_loss, iou, dice, dice_neg, dice_pos))
return dice, iou
def compute_ious(pred, label, classes, ignore_index=255, only_present=True):
'''computes iou for one ground truth mask and predicted mask'''
pred[label == ignore_index] = 0
ious = []
for c in classes:
label_c = label == c
if only_present and np.sum(label_c) == 0:
ious.append(np.nan)
continue
pred_c = pred == c
intersection = np.logical_and(pred_c, label_c).sum()
union = np.logical_or(pred_c, label_c).sum()
if union != 0:
ious.append(intersection / union)
return ious if ious else [1]
def compute_iou_batch(outputs, labels, classes=None):
'''computes mean iou for a batch of ground truth masks and predicted masks'''
ious = []
preds = np.copy(outputs) # copy is imp
labels = np.array(labels) # tensor to np
for pred, label in zip(preds, labels):
ious.append(np.nanmean(compute_ious(pred, label, classes)))
iou = np.nanmean(ious)
return iou
class Seg_Trainer(object):
'''This class takes care of training and validation of our model'''
def __init__(self, dataloaders, model, criterion, out_dir, lr=5e-4, batch_size=8, epochs=20, use_sam=False):
self.batch_size = {"train": batch_size, "val": 4 * batch_size}
if self.batch_size["train"] < 24:
self.accumulation_steps = 24 // self.batch_size["train"]
else:
self.accumulation_steps = 1
# print(self.accumulation_steps)
self.out_dir = out_dir
self.lr = lr
self.num_epochs = epochs
self.best_loss = float("inf")
self.phases = ["train", "val"]
self.cuda = torch.cuda.is_available()
if self.cuda:
# torch.set_default_tensor_type("torch.cuda.FloatTensor")
cudnn.benchmark = True
# else:
# torch.set_default_tensor_type("torch.FloatTensor")
self.net = model
self.criterion = criterion
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr, weight_decay=1e-4, amsgrad=True)
self.scheduler = StepLR(self.optimizer, step_size=10, gamma=0.2)
self.dataloaders = dataloaders
self.losses = {phase: [] for phase in self.phases}
self.iou_scores = {phase: [] for phase in self.phases}
self.dice_scores = {phase: [] for phase in self.phases}
self.use_sam = use_sam
def forward(self, images, targets, usmasks=None):
if self.cuda:
images = images.cuda()
masks = targets.cuda()
if self.use_sam:
usmasks = usmasks.cuda()
if self.use_sam:
outputs = self.net(images, usmasks)
else:
outputs = self.net(images)
# masks=masks.unsqueeze(1)
loss = self.criterion(outputs, masks)
# print(loss)
return loss, outputs
def iterate(self, epoch, phase):
meter = Meter(phase, epoch)
start = time.strftime("%H:%M:%S")
print(f"Starting epoch: {epoch} | phase: {phase} | ⏰: {start}")
batch_size = self.batch_size[phase]
self.net.train(phase == "train")
dataloader = self.dataloaders[phase]
running_loss = 0.0
total_batches = len(dataloader)
# tk0 = tqdm(dataloader, total=total_batches)
self.optimizer.zero_grad()
if self.use_sam:
for itr, (images, targets, usmasks) in enumerate(dataloader):
loss, outputs = self.forward(images, targets, usmasks)
loss = loss / self.accumulation_steps
if phase == "train":
loss.backward()
if (itr + 1) % self.accumulation_steps == 0:
self.optimizer.step()
self.optimizer.zero_grad()
running_loss += loss.item()
outputs = outputs.detach().cpu()
meter.update(targets, outputs)
else:
for itr, (images, targets) in enumerate(dataloader):
loss, outputs = self.forward(images, targets)
loss = loss / self.accumulation_steps
if phase == "train":
loss.backward()
if (itr + 1) % self.accumulation_steps == 0:
self.optimizer.step()
self.optimizer.zero_grad()
running_loss += loss.item()
outputs = outputs.detach().cpu()
meter.update(targets, outputs)
# tk0.set_postfix(loss=(running_loss / ((itr + 1))))
epoch_loss = (running_loss * self.accumulation_steps) / total_batches
dice, iou = epoch_log(phase, epoch, epoch_loss, meter, start)
self.losses[phase].append(epoch_loss)
self.dice_scores[phase].append(dice)
self.iou_scores[phase].append(iou)
torch.cuda.empty_cache()
return epoch_loss
def start(self):
for epoch in range(self.num_epochs):
self.iterate(epoch, "train")
state = {
"epoch": epoch,
"best_loss": self.best_loss,
"state_dict": self.net.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
with torch.no_grad():
val_loss = self.iterate(epoch, "val")
# self.scheduler.step(val_loss)
restart = self.scheduler.step()
if val_loss < self.best_loss:
print("******** New optimal found, saving state ********")
state["best_loss"] = self.best_loss = val_loss
torch.save(state, self.out_dir + '/model_lowest_loss.pth')
with open(self.out_dir + '/train_log.txt', 'a') as acc_file:
acc_file.write('New optimal found (loss %s), state saved.\n' % val_loss)
with open(self.out_dir + '/train_log.txt', 'a') as acc_file:
acc_file.write('Epoch: %2d, Loss: %.8f, Dice: %.8f, IoU: %.8f.\n ' % (epoch,
self.losses['val'][-1],
self.dice_scores['val'][-1],
self.iou_scores['val'][-1]))
if restart:
with open(self.out_dir + '/train_log.txt', 'a') as acc_file:
acc_file.write('At Epoch: %2d, Optim Restart. Got Loss: %.8f, Dice: %.8f, IoU: %.8f.\n ' % (epoch,
self.losses[
'val'][
-1],
self.dice_scores[
'val'][
-1],
self.iou_scores[
'val'][
-1]))
print()
| StarcoderdataPython |
1676441 | import math
from tqdm import tqdm
class VerboseMixin(object):
def _progress(self, iterator):
if self.verbose:
return tqdm(iterator, desc=self.__class__.__name__)
else:
return iterator
def _log(self, message):
if self.verbose:
print(f"[{self.__class__.__name__}] {message}")
class NoFitMixin(object):
def fit(self, X, y=None, *args, **kwargs):
return self
def optimal_clusters(n_samples: int) -> int:
"""
Returns optimal number of clusters for the given number of samples
(uneven distribution of character images results in fewer image classes
being present in the smaller sample, preventing fixed number of clusters
to yield desired results)
The formula was developed via empirical trial-and-error tesing.
"""
return 60 + int(12 * math.log10(n_samples // 100))
| StarcoderdataPython |
1783716 | <reponame>Cipahi/explore_australia
""" file: rotation.py (explore_australia)
author: <NAME>, @jesserobertson
date: Thursday, 03 January 2019
description: Rotation of geographic points
"""
from scipy.linalg import expm, norm
import numpy as np
from shapely.geometry import Polygon, MultiPolygon, MultiLineString, LineString, LinearRing, \
Point, MultiPoint
def rotation_matrix(axis, angle):
"""
Generate the Rodrigues' rotation matrix about a given axis with
the given (counterclockwise/right-hand) angle
Parameters:
axis - the vector to rotate about
theta - the amount of rotation, in radians
"""
unit_axis = axis / norm(axis)
return expm(np.cross(np.identity(3), unit_axis * angle))
def geographic_to_spherical(points):
"""
Convert geographic coordinates (longitude, latitude) to spherical
coordinates (inclination, azimuth)
Uses the physics/ISO convention, in that theta is the inclination,
running from 0 to pi, and phi is the azimuth, lying in (0, 2*pi)
Parameters:
points - an (N, 2) shaped array of longitude/latitude
points, in degrees
Returns:
an (N, 2) shaped array of (theta/inclination, phi/azimuth)
points, in radians
"""
longitude, latitude = points.transpose()
inclination = np.radians(90 - latitude)
azimuth = np.radians(longitude + 180)
return np.vstack([inclination, azimuth]).transpose()
def spherical_to_cartesian(points):
"""
Convert spherical coordinates (inclination, azimuth) to cartesian
coordinates (x, y, z) assuming r = 1.
Uses the physics/ISO convention, in that theta is the inclination,
running from 0 to pi, and phi is the azimuth, lying in (0, 2*pi)
Parameters:
points - an (N, 2) shaped array of (inclination, azimuth)
points, in radians
Returns:
an (N, 3) shaped array of (x, y, z) points, in radians
"""
inclination, azimuth = points.transpose()
x = np.sin(inclination) * np.cos(azimuth)
y = np.sin(inclination) * np.sin(azimuth)
z = np.cos(inclination)
return np.vstack([x, y, z]).transpose()
def cartesian_to_spherical(points):
"""
Convert cartesian coordinates (x, y, z) to spherical coordinates
(inclination, azimuth) to assuming r = 1.
Uses the physics/ISO convention, in that theta is the inclination,
running from 0 to pi, and phi is the azimuth, lying in (0, 2*pi)
Parameters:
points - an (N, 3) shaped array of (x, y, z) points, in radians
Returns:
an (N, 2) shaped array of (inclination, azimuth) points,
in radians
"""
x, y, z = points.transpose()
radius = np.sqrt(x ** 2 + y ** 2 + z ** 2)
inclination = np.arccos(z / radius)
azimuth = np.arctan2(y, x)
azimuth[azimuth < 0] += 2 * np.pi
return np.vstack([inclination, azimuth]).transpose()
def spherical_to_geographic(points):
"""
Convert spherical coordinates (inclination, azimuth) to geographic
coordinates (longitude, latitude)
Uses the physics/ISO convention, in that theta is the inclination,
running from 0 to pi, and phi is the azimuth, lying in (0, 2*pi)
Parameters:
points - an (N, 2) shaped array of (theta/inclination, phi/azimuth)
points, in radians
Returns:
an (N, 2) shaped array of longitude/latitude points, in degrees
"""
inclination, azimuth = points.transpose()
latitude = 90 - np.degrees(inclination)
longitude = np.degrees(azimuth) - 180
return np.vstack([longitude, latitude]).transpose()
def geographic_to_cartesian(points):
"""
Convert geographic coordinates (longitude, latitude) into cartesian
coordinates (x, y, z) assuming r = 1
Parameters:
points - an (N, 2) shaped array of longitude/latitude points, in degrees
Returns:
a 3 by N array containing cartesian values for the points
"""
return spherical_to_cartesian(geographic_to_spherical(points))
def cartesian_to_geographic(points):
"""
Convert cartesian coordinates (x, y, z) into geographic
coordinates (longitude, latitude) assuming r = 1
Parameters:
points - a 3 by N array containing cartesian values for the points
Returns:
an (N, 2) shaped array of longitude/latitude points, in degrees
"""
return spherical_to_geographic(cartesian_to_spherical(points))
def rotate(geom, pole, angle):
"""
Rotate a shapely {Multi,}Polygon or {Multi,}LineString
through a given angle on a spherical surface
Parameters:
geom - the geometry to rotate, given in WGS84 positions
pole - the axis through which to rotate the geometry (again in WGS84 positions)
angle - the angle to rotate the geometry by
Returns:
the rotated geometries
"""
# Construct the rotation axis
pole = geographic_to_cartesian(np.asarray(pole.coords))[0]
pole /= norm(pole)
# Construct the rotation matrix and a function to rotate vectors
rmatrix = rotation_matrix(pole, np.radians(angle))
def rotator(points):
"Rotation function"
vec = geographic_to_cartesian(points)
vec_rot = (rmatrix @ vec.T).T
return cartesian_to_geographic(vec_rot)
# Handle different geometry types
mapping = {
'Point': _point,
'MultiPoint': _multipoint,
'Polygon': _polygon,
'LineString': _linestring,
'MultiPolygon': _multipolygon,
'LinearRing': _linearring,
'MultiLineString': _multilinestring
}
try:
return mapping[geom.geom_type](geom, rotator)
except KeyError:
msg = "Don't know how to rotate a {}".format(geom.geom_type)
raise ValueError(msg)
except AttributeError:
raise ValueError("Object doesn't look like a geometry object")
# Reprojection helpers
def _point(geom, rotator):
return Point(*rotator(np.asarray(geom.coords)))
def _multipoint(geom, rotator):
return MultiPoint([_point(p, rotator) for p in geom])
def _polygon(geom, rotator):
if geom.interiors:
return Polygon(
shell=_linearring(geom.exterior, rotator),
holes=[_linearring(i, rotator) for i in geom.interiors]
)
else:
return Polygon(_linearring(geom.exterior, rotator))
def _linestring(geom, rotator):
return LineString(rotator(np.asarray(geom.coords)))
def _linearring(geom, rotator):
return LinearRing(rotator(np.asarray(geom.coords)))
def _multipolygon(geom, rotator):
return MultiPolygon([_polygon(g, rotator) for g in geom])
def _multilinestring(geom, rotator):
return MultiLineString([_linestring(g, rotator) for g in geom])
| StarcoderdataPython |
4830340 | <filename>LinearSVCKfold.py
import pandas
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.svm import LinearSVC
import utils
targetClass = 2
data = utils.getDataset(targetClass)
data = utils.MapLabels(targetClass,data)
data = utils.preprocess(data)
model = LinearSVC()
kf = KFold(n_splits=10, random_state=43, shuffle=True)
vectorizer = TfidfVectorizer(min_df=5,
max_df=0.8,
sublinear_tf=True,
use_idf=True, ngram_range=(1, 2))
accurs = []
conf_matrix = []
tp = []
tn = []
fp = []
fn = []
for train_index, test_index in kf.split(data):
X_train, X_test = data.iloc[train_index]['Comment'], data.iloc[test_index]['Comment']
y_train, y_test = data.iloc[train_index]['Label'], data.iloc[test_index]['Label']
train_vectors = vectorizer.fit_transform(X_train)
test_vectors = vectorizer.transform(X_test)
print("Training Data")
model.fit(train_vectors, y_train)
prediction = model.predict(test_vectors)
accur = accuracy_score(y_test, prediction)
print("Score ",accur)
accurs.append(accur)
# tp,tn,fp,fn = confusion_matrix(y_test,prediction)
# print("TP ",tp)
# print("TN ",tn)
# conf_matrix.append(matrix)
print(pandas.np.mean(accurs))
# print(pandas.np.mean(conf_matrix))
# filename = 'finalized_logistic.sav'
# joblib.dump(clf_svc, filename) | StarcoderdataPython |
1647380 | <reponame>backwardn/practical_cryptography_engineering
#!/usr/bin/env python3
# coding=utf-8
"""
This is a simple example of using the cryptography module to securely encrypt and decrypt data with
AES in GCM mode.
GCM (Galois Counter Mode) is a mode of operation for block ciphers. An AEAD (authenticated
encryption with additional data) mode is a type of block cipher mode that simultaneously encrypts
the message as awell as authenticating it. Additional unencrypted data may also be authenticated.
Additional means of verifying integrity such as HMAC are not necessary.
NOTE: There is a better way to do AES-GCM in Cryptography version 2.0 or newer using the AES-GCM construction which is
composed of the AES block cipher utilizing GCM mode. This version is intended to be compatible with version 1.7
or newer of the Cryptography module.
"""
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
def encrypt(key: bytes, iv: bytes, plaintext: bytes, associated_data: bytes) -> (bytes, bytes):
# Construct an AES-GCM Cipher object with the given key and a randomly generated nonce.
encryptor = Cipher(algorithms.AES(key), modes.GCM(iv), backend=default_backend()).encryptor()
# associated_data will be authenticated but not encrypted, it must also be passed in on decryption.
encryptor.authenticate_additional_data(associated_data)
# Encrypt the plaintext and get the associated ciphertext. GCM does not require padding.
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
# Cryptography will generate a 128-bit tag when finalizing encryption. This tag is authenticated when decrypting.
return ciphertext, encryptor.tag
def decrypt(key: bytes, iv: bytes, ciphertext: bytes, associated_data: bytes, tag: bytes) -> bytes:
# Construct a Cipher object, with the key, iv, and additionally the GCM tag used for authenticating the message.
decryptor = Cipher(algorithms.AES(key), modes.GCM(iv, tag), backend=default_backend()).decryptor()
# We put associated_data back in or the tag will fail to verify when we finalize the decryptor.
decryptor.authenticate_additional_data(associated_data)
# Decryption gets us the authenticated plaintext. If the tag does not match an InvalidTag exception will be raised.
return decryptor.update(ciphertext) + decryptor.finalize()
if __name__ == '__main__':
# The message we which to transmit in a secret fashion
data = b"a secret message!"
# Associated data which will be authenticated but not encrypted (must be passed into both encrypt and decrypt)
aad = b"authenticated but not encrypted payload"
# Generate a random 256-bit key which must be kept secret
key = os.urandom(32)
# Generate a random 96-bit nonce. NIST recommends a 96-bit IV length for performance, but can be up to 2^64 - 1 bits
nonce = os.urandom(12)
# Encrypt a secret message
cipher_text, tag = encrypt(key, nonce, data, aad)
# Decrypt the secret message - if GCM fails to authenticate, an InvalidTag exception is raised
decrypted = decrypt(key, nonce, cipher_text, aad, tag)
# So the nonce, add, and tag all get sent unencrypted along with the encrypted ciphertext.
print('plain text: {!r}'.format(data))
print('ciphertext: {!r}'.format(cipher_text))
print('decrypted : {!r}'.format(decrypted))
| StarcoderdataPython |
127445 | from django import forms
class ClientErrorForm(forms.Form):
msg = forms.CharField(max_length=1024, required=False)
url = forms.CharField(max_length=256, required=False)
line = forms.CharField(max_length=4, required=False)
| StarcoderdataPython |
120701 | <filename>desktop_local_tests/macos/test_macos_packet_capture_disrupt_reorder_services.py<gh_stars>100-1000
from desktop_local_tests.local_packet_capture_test_case_with_disrupter import LocalPacketCaptureTestCaseWithDisrupter
from desktop_local_tests.macos.macos_reorder_services_disrupter import MacOSDNSReorderServicesDisrupter
class TestMacOSPacketCaptureDisruptReorderServices(LocalPacketCaptureTestCaseWithDisrupter):
'''Summary:
Tests whether traffic leaving the user's device leaks outside of the VPN tunnel when the network
service order is changed.
Details:
This test will connect to VPN then swap the priority of the primary and secondary network
services. The test looks for leaking traffic once the service order is changed.
Discussion:
It's not 100% clear if, in the real world, services can change their order without user
involvement. It is still however a good stress test of the application.
Weaknesses:
Packet capture tests can be noisy. Traffic can be detected as a leak but in actual fact may not
be. For example, traffic might go to a server owned by the VPN provider to re-establish
connections. In general this test is best used for manual exploring leaks rather than for
automation.
Scenarios:
Requires two active network services.
TODO:
Consider a variant which changes the network "Location". This is much more likely to be
something a user might do.
'''
def __init__(self, devices, parameters):
super().__init__(MacOSDNSReorderServicesDisrupter, devices, parameters)
| StarcoderdataPython |
1605243 | <filename>google-spiders/g4spiders-4in2-run-from-script/google_email_spider_4in2/google_email_spider_4in2/spiders/__init__.py<gh_stars>1-10
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import os
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
| StarcoderdataPython |
150054 | <reponame>noisy/-python-social-auth-steemconnect
from setuptools import setup, find_packages
setup(
name='social-auth-steemconnect',
version='0.0.3',
packages=find_packages(),
author='Krzysztof @noisy Szumny',
author_email='<EMAIL>',
description='SteemConnect backend for python-social-auth.',
long_description=open('README.md').read(),
license='LICENSE',
url='https://github.com/wise-team/python-social-auth-steemconnect',
keywords='django social auth oauth2 social-auth steem steemconnect steemit',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
],
install_requires=[
'python-social-auth',
]
)
| StarcoderdataPython |
1671046 | <gh_stars>0
# 使用[]提取字符
"""
通过在字符串名字后面添加[] 来指定偏移量从而提取该字符
"""
letter = "Hello World"
print(letter[0])
### 注意事项:字符串是不可以变的
letter[0] = 'G'
print(letter) | StarcoderdataPython |
1787311 | import os
import sys
import requests as r
import time
import json
from signal import signal, SIGINT
import threading
from datetime import datetime
import math
import subprocess
import multiprocessing
from multiprocessing import Manager, Value
from ctypes import c_char_p
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
##globals##
threads = 8
threadL = []
orderAddr = []
order = []
startTimes = []
mainThread = None
manager = Manager()
totalAddr = manager.Value(c_char_p, '')
totalStartTime = Value('d', 0.0)
content = []
for i in range(threads):
content.append(b'')#inits list with threads number of empty byte arrays
mode = '' #user, provider, or validator
fileName = ''
encKey = None
encNonce = None
#######################################################################################################################################
#######################################################encryption######################################################################
#######################################################################################################################################
def genKey():
keyFile = "key.txt"
nonceFile = "nonce.txt"
f = open(keyFile, 'w')
f.close()
f = open(nonceFile, 'w')
f.close()
key = os.urandom(32)
nonce = os.urandom(32)
f = open(keyFile, 'wb')
f.write(key)
f.close()
f = open(nonceFile, 'wb')
f.write(nonce)
f.close()
def getKey(keyFile="", nonceFile=""):
f = open(keyFile, 'rb')
key = f.read()
f.close()
f = open(nonceFile, 'rb')
nonce = f.read()
f.close()
return [key, nonce]
def enc(key=b"", nonce=b"", mess=b""):
alg = algorithms.AES(key)
cipher = Cipher(alg, modes.GCM(nonce), default_backend())
encryptor = cipher.encryptor()
return encryptor.update(mess)
def dec(key=b"", nonce=b"", mess=b""):
alg = algorithms.AES(key)
cipher = Cipher(alg, modes.GCM(nonce), default_backend())
decryptor = cipher.decryptor()
return decryptor.update(mess)
genKey()
def resetHost(resetMode):
global threadL
global orderAddr
global order
global startTimes
global totalStartTime
global mode
global fileName
global totalAddr
for i in threadL:
try: #May or may not already be deleted
#i._delete()
i.terminate()
except: pass
threadL = []
orderAddr = []
order = []
startTimes = []
totalStartTime.value = 0.0
if resetMode == True:
mode = ''
totalAddr.value = ''
try:
os.system('rm restart.txt totalOrderAddress.txt totalOrder.txt onionShareOrder.txt onionshare*.txt order.txt image.zip*.txt >/dev/null 2>&1')
except:
pass
fileName = ''
#new memory and command line reset
os.system("reset")
os.system("ps aux > ps.txt")
f = open("ps.txt", 'r')
line = f.readline()
while line != '':
if line.find('onionshare') != -1:
try:
os.system('kill -9' + line.split()[1] + ' >/dev/null 2>&1')
except:
pass
line = f.readline()
f.close()
try:
os.system('rm ps.txt')
except:
pass
def failingCheck():
global threadL
while True:
time.sleep(120)
positions = []
try:
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
fails = session.get(totalAddr.value + '/reqFails.txt')
f = open('reqFails.txt', 'wb').write(fails.contetn)
f.close()
f = open('reqFails.txt', 'r')
lines = f.readlines()
f.close()
for line in lines:
positions.append(int(line).rstrip())
f = open('totalOrder.txt', 'r')
lines = f.readlines()
for pos in positions:
#threadL[pos]._delete()
threadL[pos].terminate()
#threadL[pos] = threading.Thread(target=getShare,args=[lines[pos].rstrip(),pos])
threadL[pos] = multiprocessing.Process(target=getShare,args=(lines[pos].rstrip(),pos,))
threadL[pos].daemon = True
threadL[pos].start()
except:
pass
#######################################################################################################################################
########################################################request########################################################################
#######################################################################################################################################
def getShare(address, iter):
global content
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
res = session.get(address) #download file
#content[iter] = res.content #append this slice's content to total content list
########################get key and nonce##################################
[key, nonce] = getKey("key.txt","nonce.txt")
###########################################################################
f = open("image.txt" + str(iter) + ".txt","wb" )
f.write(dec(key, nonce, res.content))
f.close()
#print(type("-----Received content from thread " + iter))
#for i in range(threads):
# print(len(content[i]))
#This thread unneeded now, can safely kill it
killMe(iter)
def getShareWithoutIter(address):
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
res = session.get(address) #download file
#########save the zip and unzip it#########
open("totalOrder.zip", 'wb').write(res.content)
time.sleep(5)
os.system("unzip -o totalOrder.zip")
###########################################
def createThreadsReq():
global totalAddr
global content
global mode
flag = True
flagTwo = True
flagThree = True
while flag:
time.sleep(5)
#Addresses written to file (Step 2)
if os.path.isfile("totalOrder.txt") and flagTwo:
print("Downloading file from host. This may take a while...")
flagTwo = False
#Need to make a thread for each address
f = open("totalOrder.txt", 'r')
lines = f.readlines()
f.close()
j = 0
for line in lines:
#t = threading.Thread(target=getShare,args=[line.strip('\n'), j])
t = multiprocessing.Process(target=getShare,args=(line.strip('\n'), j,))
t.daemon = True
threadL.append(t)
t.start()
j += 1
#Every slot in content has been written to (Step 3)
allVal = True
for i in range(0,threads):
if os.path.isfile("image.txt" + str(i) + ".txt"):
content[i] = True
else:
allVal = False
break
if allVal:
getTime("Finished")
session = r.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
session.get(totalAddr.value + '/finish') #tell server finished downloading
totalFile = open('image.txt', 'wb')
for i in range(0, threads):
iterFile = open('image.txt' + str(i) + '.txt', 'rb')
totalFile.write(iterFile.read())
iterFile.close()
totalFile.close()
flag = False
resetReq()
#totalOrder.txt not yet received (Step 1)
elif flagThree:
statF = open("stat.txt", 'r')
totalAddr.value = statF.readline().rstrip()
statF.close()
#if file ready to be received from worker. totalAddr will hold the .onion address
if totalAddr.value != '' and totalAddr.value != 'Executing' and totalAddr.value != 'Ready':
flagThree = False
getTime("Starting")
getShareWithoutIter(totalAddr.value) #download totalOrder.txt
def resetReq():
global content
global threadL
global mode
global mainThread
global totalAddr
content = []
for i in range(threads):
content.append(False)
#content.append(b'')#inits list with threads number of empty byte arrays
#kill all threads before resetting
for i in threadL:
try: #May or may not already be deleted
#i._delete()
i.terminate()
except: pass
threadL = []
mainThread = None
totalAddr.value = ''
mode = ''
try:
os.system('rm totalOrder.txt onionShareOrder.txt image.zip*.txt')
except:
pass
#new memory and command line reset
os.system("reset")
os.system("ps aux > ps.txt")
f = open("ps.txt", 'r')
line = f.readline()
while line != '':
if line.find('onionshare') != -1:
try:
os.system('kill ' + line.split()[1])
except:
pass
line = f.readline()
f.close()
f = open('stat.txt', 'w')
f.close()
try:
os.system('rm ps.txt')
except:
pass
#kill specified thread
def killMe(iter):
#threadL[iter]._delete()
try:
threadL[iter].terminate()
except:
pass
#######################################################################################################################################
#####################################################controller########################################################################
#######################################################################################################################################
def getTime(mess):
now = datetime.now()
end = open('log.txt', 'r').readline().rstrip()[24:]
#print(now.strftime("%a %b %d %Y %H:%M:%S" + end))
time = now.strftime("%a %b %d %Y %H:%M:%S" + end)
f = open('log.txt', 'a')
f.write('\n' + time + " "+ mess)
f.close()
def reqController():
#failThread = threading.Thread(target=failingCheck)
failThread = multiprocessing.Process(target=failingCheck)
failThread.daemon = True
failThread.start()
createThreadsReq()
try: #May or may not already be deleted
#failThread._delete()
failThread.terminate()
except: pass
if __name__ == '__main__':
resetHost(False)
reqController()
| StarcoderdataPython |
3379272 | ####################################
## batch code for WF simulation
####################################
import sys, array, os, getpass
from subprocess import call
import subprocess as subp
import time
import math as math
from subprocess import Popen, PIPE
if len(sys.argv)<2:
print("========= Syntax ========")
print("python BatchSimulation.py ....")
print("<Output path (abs.)>")
print("<number of jobs>")
print("<number of events in each job>")
print("<enable PMT after pulses ?(0 for disable)>")
print("<enable S2 after pulses ?(0 for disable)>")
print("<photon number lower>")
print("<photon number upper>")
print("<electron number lower>")
print("<electron number upper>")
print("<If enable S1-S2 correlation (0 for no, 1 for yes)>")
print("<If use Public node (0 for no(xenon1t nodes); 1 for yes; 2 for kicp nodes)>")
exit()
OutputGeneralPath = sys.argv[1]
NumJobs = int(sys.argv[2])
NumEvents = int(sys.argv[3])
PMTAfterpulseFlag = int(sys.argv[4])
S2AfterpulseFlag = int(sys.argv[5])
PhotonNumLower = int(sys.argv[6])
PhotonNumUpper = int(sys.argv[7])
ElectronNumLower = int(sys.argv[8])
ElectronNumUpper = int(sys.argv[9])
IfEnableS1S2Correlation = int(sys.argv[10])
IfUsePublicNodes = int(sys.argv[11])
MaxNumJob = 64
if not IfUsePublicNodes:
MaxNumJob=200
##### Start batching #########
CurrentPath = os.getcwd()
print (CurrentPath)
CurrentUser = getpass.getuser()
for i in range(NumJobs):
RunString = "%06d" % i
# create folder
OutputPath = OutputGeneralPath + "/" + RunString
if os.path.exists(OutputPath):
subp.call("rm -r "+OutputPath, shell=True)
subp.call("mkdir -p "+OutputPath, shell=True)
# define filenames
SubmitFile = OutputPath+"/submit_"+ RunString + ".sh"
SubmitOutputFilename = OutputPath+"/submit_"+ RunString + ".log"
SubmitErrorFilename = OutputPath+"/submit_"+ RunString + ".log"
# create the basic submit
subp.call("echo '#!/bin/bash\n' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --output="+SubmitOutputFilename+"' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --error="+SubmitErrorFilename+"' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --time=03:59:00' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --account=pi-lgrandi' >> "+SubmitFile, shell=True)
if IfUsePublicNodes==0:
subp.call("echo '#SBATCH --qos=xenon1t' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --partition=xenon1t\n' >> "+SubmitFile, shell=True)
elif IfUsePublicNodes==2:
subp.call("echo '#SBATCH --qos=xenon1t-kicp' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --partition=kicp\n' >> "+SubmitFile, shell=True)
Command = CurrentPath+"/./run_fax.sh "+str(PhotonNumLower)+" "+str(PhotonNumUpper)+" "+str(ElectronNumLower)+" "+str(ElectronNumUpper)+" "+str(PMTAfterpulseFlag)+" "+str(S2AfterpulseFlag)+" "+str(NumEvents)+" "+OutputGeneralPath+" "+RunString+" "+str(IfEnableS1S2Correlation)
subp.call("echo '"+Command+"\n' >> "+SubmitFile, shell=True)
SubmitPath = OutputPath
#submit
IfSubmitted=0
while IfSubmitted==0:
Partition = "sandyb" # public
if not IfUsePublicNodes:
Partition = "xenon1t"
elif IfUsePublicNodes==2:
Partition = "kicp"
p1 = Popen(["squeue","--partition="+Partition, "--user="+CurrentUser], stdout=PIPE)
p2 = Popen(["wc", "-l"], stdin=p1.stdout, stdout=PIPE)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
output = p2.communicate()[0]
Status=subp.call("squeue --partition="+Partition+" --user="+CurrentUser +" | wc -l", shell=True)
Output=int(output)
#print(Status)
print("Current job running number "+str(Output))
if Status==0 and Output<MaxNumJob:
#sbatch it
subp.call("cd "+SubmitPath+";sbatch "+SubmitFile+";cd -", shell=True)
IfSubmitted=1
time.sleep(2.0)
else:
time.sleep(30)
| StarcoderdataPython |
3250114 | <gh_stars>0
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "13/10/17 20:54"
from config import CONFIG
from optparse import OptionGroup
from libs.core.options import Options
class Mods(Options):
"""
Mods control options.
"""
def __init__(self):
super(Mods, self).__init__()
def group(self, parser):
group = OptionGroup(parser, 'Mods')
group.add_option('--enable-mods', dest='enable_mods', action="store_true", default=False,
help='Enable Mods if available. Mods will be ON before and OFF after testing.')
return group
@property
def priority(self):
return 500
def validate(self, options):
# enable mods
if options.enable_mods:
CONFIG.SYSTEM.MODS_ENABLE = True
| StarcoderdataPython |
168673 | """
This plugin adds support for the "Ashata Relay Board" family of USB controlled relay boards as a device. This device
can then be accessed by the ufotest system through the device manager. Relay channels can be switched on and off
individually. The Ashata Relay Boards are compatible with linux by using the system library and corresponding
command line tool "usbrelay". Ashata Relay Boards come with either 2, 4 or 8 controllable relays. The specifics of
the board used with this plugin can the defined in the ufotest config file. For more detailed information consult
the README.
"""
import time
import types
from ufotest.hooks import Action, Filter
from ufotest.devices import DeviceManager, AbstractDevice, Expose
from ufotest.util import run_command
# == IMPLEMENTING THE DEVICE
class AshataRelayBoard(AbstractDevice):
name = 'ashata_relay_board'
description = 'This is a USB controlled relay board with 4 Relay channels.'
def __init__(self, config, relay_count: int, base_name: str):
super(AshataRelayBoard, self).__init__()
self.config = config
self.relay_count = relay_count
self.base_name = base_name
self.allowed_indices = range(1, self.relay_count + 1)
def set_up(self):
pass
def tear_down(self):
pass
@Expose(name='activate_ashata_relay',
description='Activates one of the relays identified by its COM index, such that it conducts electricity',
args={'index': 'The integer index of the relay, starting at 1'})
def activate_relay(self, index: int):
# First we need to check if the index is valid. This method will raise a KeyError if the index is not valid
# for the existing board config
self.check_index(index)
# Now the actual control of the board is managed through a command line interface
command = f'usbrelay {self.base_name}_{index}=1'
exit_code, output = run_command(command)
return exit_code
@Expose(name='deactivate_ashata_relay',
description='Deactivates one of the relays identified by its COM index, such that it does not conduct',
args={'index': 'The integer index of the relay, starting at 1'})
def deactivate_relay(self, index: int):
# First we need to check if the index is valid. This method will raise a KeyError if the index is not valid
# for the existing board config
self.check_index(index)
# Now the actual control of the board is managed through a command line interface
command = f'usbrelay {self.base_name}_{index}=0'
exit_code, output = run_command(command)
return exit_code
@Expose(name='hard_reset_camera',
description='Hard resets the camera by activating and deactivating the power line connection',
args={})
def hard_reset_camera(self):
camera_index = self.config.get_ashata_relay_board_camera_index()
self.deactivate_relay(camera_index)
time.sleep(1)
self.activate_relay(camera_index)
# -- Utility methods
def check_index(self, index: int):
if index not in self.allowed_indices:
raise KeyError(
f'The index {index} does not identify a valid COM port for the Ashata relay board! This configured '
f'number of relays is {self.relay_count}. Please use one of the valid indices: '
f'{",".join(self.allowed_indices)}.'
)
# == IMPLEMENTING THE HOOKS
# -- Modifying the config
DEFAULT_BASE_NAME = 'QAAMZ'
DEFAULT_RELAY_COUNT = 4
DEFAULT_CAMERA_INDEX = 1
def get_base_name(config):
return config.get_data_or_default(
('ashata_relay_board', 'base_name'),
DEFAULT_BASE_NAME
)
def get_relay_count(config):
return int(config.get_data_or_default(
('ashata_relay_board', 'relay_count'),
DEFAULT_RELAY_COUNT
))
def get_camera_index(config):
return int(config.get_data_or_default(
('ashata_relay_board', 'camera_index'),
DEFAULT_CAMERA_INDEX
))
@Action('pre_prepare', 10)
def register_custom_config_methods(config, namespace):
setattr(config, 'get_ashata_relay_board_base_name', types.MethodType(get_base_name, config))
setattr(config, 'get_ashata_relay_board_relay_count', types.MethodType(get_relay_count, config))
setattr(config, 'get_ashata_relay_board_camera_index', types.MethodType(get_camera_index, config))
# -- Register the device itself
@Action('register_devices', 10)
def register_relay_board_device(config, device_manager: DeviceManager):
relay_count = config.get_ashata_relay_board_relay_count()
base_name = config.get_ashata_relay_board_base_name()
device = AshataRelayBoard(config, relay_count, base_name)
device_manager.register_device(device)
# -- Create a new CLI command
| StarcoderdataPython |
3322375 | <reponame>nhtoshiaki/Infeed
# Generated by Django 2.2 on 2019-05-07 19:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feed', '0004_auto_20190429_2010'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('subtitle', models.CharField(max_length=200)),
('pub_date', models.DateTimeField()),
('url', models.CharField(max_length=200)),
('author', models.CharField(max_length=200)),
('summary', models.TextField()),
('readed', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('login', models.CharField(max_length=200)),
('password', models.CharField(max_length=200)),
],
),
migrations.AlterField(
model_name='source',
name='source_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feed.List'),
),
migrations.DeleteModel(
name='SourceItem',
),
migrations.DeleteModel(
name='SourceList',
),
migrations.AddField(
model_name='list',
name='list_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feed.User'),
),
migrations.AddField(
model_name='item',
name='item_source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feed.Source'),
),
]
| StarcoderdataPython |
3259453 | """Tests for the :mod:`campy.graphics.gevents` module."""
from campy.graphics.gevents import GEvent, GMouseEvent, EventType, EventClassType
def test_create_empty_event():
event = GEvent()
assert event.event_class == EventClassType.NULL_EVENT
assert event.event_type is None
def test_mouse_clicked_event():
event = GMouseEvent(EventType.MOUSE_CLICKED, x=100, y=50)
assert event.event_class == EventClassType.MOUSE_EVENT
assert event.event_type == EventType.MOUSE_CLICKED
assert event.x == 100
assert event.y == 50
| StarcoderdataPython |
143612 | import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from reformer.model import ReforBertLM, Reformer
from finetuning.pretrained_model import ReforBertPreTrainedModel
class ReforBertForQA(ReforBertPreTrainedModel):
def __init__( self, config):
super().__init__(config)
self.num_labels = config.num_labels
# self.device = config.device
self.reforBert = ReforBertLM(
num_tokens=config.vocab_size,
dim=config.embedding_size,
depth=config.depth,
heads=config.heads,
max_seq_len=config.max_seq_len,
causal=True
)
self.qa_outputs = nn.Linear(config.embedding_size, 2)
self.init_weights()
def from_pretrained(self, pretrained_model_path):
device = torch.device("cpu")
if os.path.isfile(pretrained_model_path):
checkpoint = torch.load(pretrained_model_path, map_location= device)
self.reforBert.load_state_dict(checkpoint['model_state_dict'])
def forward(
self,
input_ids=None,
token_type_ids = None, # 세그멘트 id
start_positions=None,
end_positions=None,
):
# 1. reforBert에 대한 입력
outputs, _, _ = self.reforBert(input_ids,token_type_ids)
# 2. reforBert 출력에 대해 classification 위해 Linear 레이어 통과
logits = self.qa_outputs(outputs)
# 3. start logits, end_logits 구하기
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
# (start_logits, end_logits,) #+ outputs[1:]
# 에러 발생 reforbert에서 출력결과가 튜플이 아니라서 발생
outputs = (start_logits, end_logits,) #+ outputs[1:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
# 손실 함수
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
# 시작과 끝 포지션 예측값에 대한 손실값
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
# 토탈 손실값
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| StarcoderdataPython |
3296178 | # Copyright (c) 2018, <NAME>. All rights reserved.
# ISC License (ISCL) - see LICENSE file for details.
name = "pyrap"
from .pyrap import chkdir
from .pyrap import process
| StarcoderdataPython |
179050 | # Copyright (c) 2013 <NAME> <<EMAIL>>
#
# This file is part of OctoHub.
#
# OctoHub is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
import simplejson as json
class ResponseError(Exception):
"""Accessible attributes: error
error (AttrDict): Parsed error response
"""
def __init__(self, error):
Exception.__init__(self, error)
self.error = error
def __str__(self):
return json.dumps(self.error, indent=1)
class OctoHubError(Exception):
pass
| StarcoderdataPython |
1657189 | <reponame>dheera/termgraphics
from setuptools import setup
setup(
name='termgraphics',
version='1.0.1',
install_requires=['numpy'],
py_modules=['termgraphics']
)
| StarcoderdataPython |
3366451 | t=int(input());
for i in range(t):
s=input().split(' ')
a=int(s[0])
b=int(s[1])
a=a**65
if(a%b==0):
print("Yes")
else:
print("No")
| StarcoderdataPython |
1647619 | # Apache License Version 2.0
#
# Copyright (c) 2021., Redis Labs Modules
# All rights reserved.
#
from redisbench_admin.utils.remote import (
PERFORMANCE_RTS_HOST,
PERFORMANCE_RTS_PORT,
PERFORMANCE_RTS_AUTH,
PERFORMANCE_RTS_USER,
REDIS_SOCKET_TIMEOUT,
REDIS_HEALTH_CHECK_INTERVAL,
REDIS_AUTH_SERVER_HOST,
REDIS_AUTH_SERVER_PORT,
)
def create_grafana_api_arguments(parser):
parser.add_argument("--port", type=str, default=5000)
parser.add_argument("--redis_host", type=str, default=PERFORMANCE_RTS_HOST)
parser.add_argument("--redis_port", type=int, default=PERFORMANCE_RTS_PORT)
parser.add_argument("--redis_pass", type=str, default=PERFORMANCE_RTS_AUTH)
parser.add_argument("--redis_user", type=str, default=PERFORMANCE_RTS_USER)
parser.add_argument(
"--redis_health_check_interval", type=int, default=REDIS_HEALTH_CHECK_INTERVAL
)
parser.add_argument(
"--redis_socket_connect_timeout", type=int, default=REDIS_SOCKET_TIMEOUT
)
parser.add_argument("--auth_server_host", type=str, default=REDIS_AUTH_SERVER_HOST)
parser.add_argument("--auth_server_port", type=int, default=REDIS_AUTH_SERVER_PORT)
return parser
| StarcoderdataPython |
89261 | # coding=utf-8
import datetime
import logging
import traceback
from config import config
def parse_frequency(s):
if s == "never" or s is None:
return None, None
kind, num, unit = s.split()
return int(num), unit
class DefaultScheduler(object):
queue_thread = None
scheduler_thread = None
running = False
registry = None
def __init__(self):
self.queue_thread = None
self.scheduler_thread = None
self.running = False
self.registry = []
self.tasks = {}
self.init_storage()
def init_storage(self):
if "tasks" not in Dict:
Dict["tasks"] = {"queue": []}
Dict.Save()
if "queue" not in Dict["tasks"]:
Dict["tasks"]["queue"] = []
def get_task_data(self, name):
if name not in Dict["tasks"]:
raise NotImplementedError("Task missing! %s" % name)
if "data" in Dict["tasks"][name]:
return Dict["tasks"][name]["data"]
def clear_task_data(self, name=None):
if name is None:
# full clean
Log.Debug("Clearing previous task data")
if Dict["tasks"]:
for task_name in Dict["tasks"].keys():
if task_name == "queue":
Dict["tasks"][task_name] = []
continue
Dict["tasks"][task_name]["data"] = {}
Dict["tasks"][task_name]["running"] = False
Dict.Save()
return
if name not in Dict["tasks"]:
raise NotImplementedError("Task missing! %s" % name)
Dict["tasks"][name]["data"] = {}
Dict["tasks"][name]["running"] = False
Dict.Save()
Log.Debug("Task data cleared: %s", name)
def register(self, task):
self.registry.append(task)
def setup_tasks(self):
# discover tasks;
self.tasks = {}
for cls in self.registry:
task = cls()
try:
task_frequency = Prefs["scheduler.tasks.%s.frequency" % task.name]
except KeyError:
task_frequency = getattr(task, "frequency", None)
self.tasks[task.name] = {"task": task, "frequency": parse_frequency(task_frequency)}
def run(self):
self.running = True
self.scheduler_thread = Thread.Create(self.scheduler_worker)
self.queue_thread = Thread.Create(self.queue_worker)
def stop(self):
self.running = False
def task(self, name):
if name not in self.tasks:
return None
return self.tasks[name]["task"]
def is_task_running(self, name):
task = self.task(name)
if task:
return task.running
def last_run(self, task):
if task not in self.tasks:
return None
return self.tasks[task]["task"].last_run
def next_run(self, task):
if task not in self.tasks or not self.tasks[task]["task"].periodic:
return None
frequency_num, frequency_key = self.tasks[task]["frequency"]
if not frequency_num:
return None
last = self.tasks[task]["task"].last_run
use_date = last
now = datetime.datetime.now()
if not use_date:
use_date = now
return max(use_date + datetime.timedelta(**{frequency_key: frequency_num}), now)
def run_task(self, name, *args, **kwargs):
task = self.tasks[name]["task"]
if task.running:
Log.Debug("Scheduler: Not running %s, as it's currently running.", name)
return False
Log.Debug("Scheduler: Running task %s", name)
try:
task.prepare(*args, **kwargs)
task.run()
except Exception, e:
Log.Error("Scheduler: Something went wrong when running %s: %s", name, traceback.format_exc())
finally:
try:
task.post_run(Dict["tasks"][name]["data"])
except:
Log.Error("Scheduler: task.post_run failed for %s: %s", name, traceback.format_exc())
Dict.Save()
config.sync_cache()
def dispatch_task(self, *args, **kwargs):
if "queue" not in Dict["tasks"]:
Dict["tasks"]["queue"] = []
Dict["tasks"]["queue"].append((args, kwargs))
def signal(self, name, *args, **kwargs):
for task_name in self.tasks.keys():
task = self.task(task_name)
if not task:
Log.Error("Scheduler: Task %s not found (?!)" % task_name)
continue
if not task.periodic:
continue
if task.running:
Log.Debug("Scheduler: Sending signal %s to task %s (%s, %s)", name, task_name, args, kwargs)
try:
status = task.signal(name, *args, **kwargs)
except NotImplementedError:
Log.Debug("Scheduler: Signal ignored by %s", task_name)
continue
if status:
Log.Debug("Scheduler: Signal accepted by %s", task_name)
else:
Log.Debug("Scheduler: Signal not accepted by %s", task_name)
continue
Log.Debug("Scheduler: Not sending signal %s to task %s, because: not running", name, task_name)
def queue_worker(self):
Thread.Sleep(10.0)
while 1:
if not self.running:
break
# single dispatch requested?
if Dict["tasks"]["queue"]:
# work queue off
queue = Dict["tasks"]["queue"][:]
Dict["tasks"]["queue"] = []
Dict.Save()
for args, kwargs in queue:
Log.Debug("Queue: Dispatching single task: %s, %s", args, kwargs)
Thread.Create(self.run_task, True, *args, **kwargs)
Thread.Sleep(5.0)
Thread.Sleep(1)
def scheduler_worker(self):
Thread.Sleep(10.0)
while 1:
if not self.running:
break
# scheduled tasks
for name in self.tasks.keys():
now = datetime.datetime.now()
info = self.tasks.get(name)
if not info:
Log.Error("Scheduler: Task %s not found (?!)" % name)
continue
task = info["task"]
if name not in Dict["tasks"] or not task.periodic:
continue
if task.running:
continue
frequency_num, frequency_key = info["frequency"]
if not frequency_num:
continue
# run legacy SARAM once
if name == "SearchAllRecentlyAddedMissing" and ("hasRunLSARAM" not in Dict or not Dict["hasRunLSARAM"]):
task = self.tasks["LegacySearchAllRecentlyAddedMissing"]["task"]
task.last_run = None
name = "LegacySearchAllRecentlyAddedMissing"
Dict["hasRunLSARAM"] = True
Dict.Save()
if not task.last_run or (task.last_run + datetime.timedelta(**{frequency_key: frequency_num}) <= now):
# fixme: scheduled tasks run synchronously. is this the best idea?
Thread.Create(self.run_task, True, name)
#Thread.Sleep(5.0)
#self.run_task(name)
Thread.Sleep(5.0)
Thread.Sleep(1)
scheduler = DefaultScheduler()
| StarcoderdataPython |
1624221 | <reponame>sapphirecat/hashsum
# vim:fileencoding=utf-8
# Python 3.4+
import argparse
import hashlib
import sys
from traceback import print_exc
def hash_file (name, algo):
h = hashlib.new(algo)
with open(name, 'rb') as f:
h.update(f.read())
return h.hexdigest()
def create_arg_parser (**kwargs):
p = argparse.ArgumentParser(**kwargs)
p.add_argument('-a', '--algo',
default='sha256',
help='uses a specific hash algorithm')
p.add_argument('filename',
help='the filename to be hashed')
return p
def main (argv=None):
if argv is None:
argv = sys.argv
kwargs = dict(prog=argv[0],
description='Displays file hashes')
parser = create_arg_parser(**kwargs)
opts = parser.parse_args(argv[1:])
# TODO: study the linux sha1sum output and format this likewise
hash_out = hash_file(opts.filename, opts.algo)
print("{0} = {1} {2}".format(hash_out, opts.algo, opts.filename))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1758593 | from collections import OrderedDict
import numpy as np
def get_vocab(text):
"""Get all tokens"""
vocab = OrderedDict()
i = 0
for word in text:
if word not in vocab:
vocab[word] = i
i += 1
return vocab
def get_token_pairs(window_size, text):
"""Build token_pairs from windows in sentences"""
token_pairs = list()
for i, word in enumerate(text):
for j in range(i + 1, i + window_size):
if j >= len(text):
break
pair = (word, text[j])
if pair not in token_pairs:
token_pairs.append(pair)
return token_pairs
def symmetrize(a):
return a + a.T - np.diag(a.diagonal())
def get_matrix(vocab, token_pairs):
"""Get normalized matrix"""
# Build matrix
vocab_size = len(vocab)
g = np.zeros((vocab_size, vocab_size), dtype='float')
for word1, word2 in token_pairs:
i, j = vocab[word1], vocab[word2]
g[i][j] = 1
# Get Symmetric matrix
g = symmetrize(g)
# Normalize matrix by column
norm = np.sum(g, axis=0)
g_norm = np.divide(g, norm, where=norm != 0) # this is ignoring the 0 element in norm
return g_norm
def text_rank(text, kw_num, window_size=4, steps=10, min_diff=1e-5, d=0.85):
# Build vocabulary
vocab = get_vocab(text)
# Get token_pairs from windows
token_pairs = get_token_pairs(window_size, text)
# Get normalized matrix
g = get_matrix(vocab, token_pairs)
# Initialization for weight(pagerank value)
pr = np.array([1] * len(vocab))
# Iteration
previous_pr = 0
for epoch in range(steps):
pr = (1 - d) + d * np.dot(g, pr)
if abs(previous_pr - sum(pr)) < min_diff:
break
else:
previous_pr = sum(pr)
# Get weight for each node
node_weight = dict()
for word, index in vocab.items():
node_weight[word] = pr[index]
predicted_kw = [k[0] for k in sorted(node_weight.items(), key=lambda x: x[1], reverse=True)[:kw_num]]
return predicted_kw
| StarcoderdataPython |
57483 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
# import matplotlib.pyplot as plt
# from scipy import interpolate
import numpy as np
# step = np.array([12, 6, 4, 3, 2])
# MAP5 = np.array([0.6480, 0.6797, 0.6898, 0.6921, 0.6982])
# step_new = np.arange(step.min(), step.max(), 0.1)
# # step_new = np.arange(2, 11, 0.1)
# func = interpolate.interp1d(step, MAP5, kind='cubic', fill_value="extrapolate")
# MAP5_new = func(step_new)
# plt.figure(figsize=(10,10))
# ax1 = plt.subplot(2,1,2)
# plt.sca(ax1)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# plt.xlabel("KEY-FRAME STEP", fontsize=16)
# plt.ylabel("MAP@5", fontsize=16)
# plt.title("MVOF STEP-MAP@5 CURVE", fontsize=16)
# plt.plot(step_new, MAP5_new, label="$MVOF\quad MAP@5$", linestyle='--')
# plt.scatter(step, MAP5, color="g")
# plt.hlines(0.7026, 13, 2, colors = "r", linestyles = "--", label="$DFF\qquad MAP@5$")
# plt.legend(loc="lower left", fontsize=16)
# ax2 = plt.subplot(2,1,1)
# plt.sca(ax2)
# the_table = plt.table(cellText=[list(np.flip(step, 0)), list(np.flip(MAP5, 0))],
# rowLabels=["STEP", "MAP@5"],
# # colLabels=list(np.flip(step, 0)),
# loc='lower center')
# the_table.set_fontsize(18)
# the_table.scale(1, 2)
# plt.axis('off')
# plt.show()
# In[4]:
import pickle
diffs = []
mvs = []
flows = []
for i in range(602):
try:
flow = pickle.load(open("/home/jingtun/feat_flow_compare/flow_%06d.pkl" % i, 'rb'))
mv = pickle.load(open("/home/jingtun/feat_flow_compare/mv_%06d.pkl" % i, 'rb'))
diff = flow - mv
diffs.append(np.mean(abs(diff)))
mvs.append(np.mean(abs(mv)))
flows.append(np.mean(abs(flow)))
except:
print("not fit")
print("diff abs mean : ", np.mean(diffs))
print("mv abs mean : ", np.mean(mvs))
print("flow abs mean : ", np.mean(flows))
| StarcoderdataPython |
1630528 | from mlpractice.stats.stats_utils import print_stats, _update_stats
from mlpractice.utils import ExceptionInterception
try:
from mlpractice_solutions.\
mlpractice_solutions.linear_classifier_solution import softmax
except ImportError:
softmax = None
from scipy.special import softmax as softmax_sample
import numpy as np
def test_all(softmax=softmax):
test_interface(softmax)
test_public(softmax)
test_default(softmax)
test_normalization(softmax)
test_random(softmax, 100)
print('All tests passed!')
_update_stats('linear_classifier', 'softmax')
print_stats('linear_classifier')
def test_interface(softmax=softmax):
with ExceptionInterception():
x1 = np.array([1, 2, 3])
x2 = np.array([[1, 2, 3],
[1, 2, 3]])
y1 = softmax(x1)
y2 = softmax(x2)
assert isinstance(y1, np.ndarray), \
"softmax must return an ndarray"
assert x1.shape == y1.shape, \
"The output shape must match the input shape"
assert isinstance(y2, np.ndarray), \
"softmax must return an ndarray"
assert x2.shape == y2.shape, \
"The output shape must match the input shape"
def test_public(softmax=softmax):
with ExceptionInterception():
x = np.array([1, 2, 3])
y_sample = softmax_sample(x)
y = softmax(x)
assert np.all(np.abs(y - y_sample) < 10 ** -8)
def test_default(softmax=softmax):
with ExceptionInterception():
x = np.array([[1, 0.5, 0.2, 3],
[1, -1, 7, 3],
[2, 12, 13, 3]])
y_sample = softmax_sample(x, axis=1)
y = softmax(x)
assert np.all(np.abs(y - y_sample) < 10 ** -8)
def test_normalization(softmax=softmax):
with ExceptionInterception():
x = np.array([10000, 0, 0])
y_sample = softmax_sample(x)
y = softmax(x)
assert np.all(np.abs(y - y_sample) < 10 ** -8)
def test_random(softmax=softmax, iterations=1):
with ExceptionInterception():
np.random.seed(42)
for _ in range(iterations):
x = np.random.rand(3, 4)
y_sample = softmax_sample(x, axis=1)
y = softmax(x)
assert np.all(np.abs(y - y_sample) < 10 ** -8)
| StarcoderdataPython |
111494 | from numba.pycc import CC
from numpy import zeros
cc = CC('UnsatStor_inner_compiled')
@cc.export('UnsatStor_inner', '(int64,int64[:,::1],float64,float64,float64[:,:,::1],float64[:,:,::1])')
def UnsatStor_inner(NYrs, DaysMonth, MaxWaterCap, UnsatStor_0, infiltration, DailyET):
unsatstor = zeros((NYrs, 12, 31))
unsatstor_carryover = UnsatStor_0
et = zeros((NYrs, 12, 31))
for Y in range(NYrs):
for i in range(12):
for j in range(DaysMonth[Y][i]):
unsatstor[Y][i][j] = unsatstor_carryover
unsatstor[Y][i][j] = unsatstor[Y][i][j] + infiltration[Y][i][j]
if DailyET[Y][i][j] >= unsatstor[Y][i][j]:
et[Y][i][j] = unsatstor[Y][i][j]
unsatstor[Y][i][j] = 0
else:
et[Y][i][j] = DailyET[Y][i][j]
unsatstor[Y][i][j] = unsatstor[Y][i][j] - DailyET[Y][i][j]
if unsatstor[Y][i][j] > MaxWaterCap:
unsatstor[Y][i][j] = MaxWaterCap
else:
pass
unsatstor_carryover = unsatstor[Y][i][j]
return unsatstor, et, unsatstor_carryover
| StarcoderdataPython |
1633432 | <filename>prometheus/serverboards-prometheus.py
#!env/bin/python3
import serverboards_aio as serverboards
import sys
import asks
import time
import json
import urllib
import curio
from serverboards_aio import print
from pcolor import printc
asks.init('curio')
IGNORE_METRIC_NAMES = set(['instance', 'job'])
td_to_s_multiplier = [
("ms", 0.001),
("s", 1),
("m", 60),
("h", 60 * 60),
("d", 24 * 60 * 60),
]
uuid_to_timer = {}
def time_description_to_seconds(td):
if type(td) in (int, float):
return float(td)
for sufix, multiplier in td_to_s_multiplier:
if td.endswith(sufix):
return float(td[:-len(sufix)]) * multiplier
return float(td)
def decorate_serie(serie, name=None):
"""
Returns the series decorated as Serverboards likes it, not as Prometheus
returns it.
"""
metric = serie.get("metric", {})
name = name or metric.get("__name__", None)
if not name:
name = ', '.join("%s: %s" % (k, v)
for k, v in metric.items()
if k not in IGNORE_METRIC_NAMES)
return {
"name": name,
"values": serie.get("values", [])
}
ssh = serverboards.Plugin("serverboards.core.ssh/daemon")
@serverboards.cache_ttl(3000)
async def port_tunnel(via, hostname, port):
newport = await ssh.open_port(service=via, hostname=hostname, port=port)
await serverboards.debug("Opened new port: %s" % newport)
return newport
@serverboards.cache_ttl(300)
async def service_get(service_id):
if not service_id:
return {"config": {}}
return await serverboards.service.get(service_id)
@serverboards.rpc_method
async def get(expression, service=None, start=None, end=None, step=None):
if not expression:
raise Exception("An expression is required")
service = await service_get(service)
url = service.get("config", {}).get("url", "http://localhost:9090")
via = service.get("config", {}).get("via")
if via:
url = urllib.parse.urlparse(url)
port = await port_tunnel(via, url.hostname, url.port)
url = "http://localhost:%d" % port
now = int(time.time())
if not start:
start = now - 600
if not end:
end = now
if not step:
step = 14
ret = []
# maybe several expresions, one per line
for expr in expression.split('\n'):
expr = expr.strip()
name = None
if ':' in expr:
d = expr.split(':')
name = d[0]
expr = d[1].strip()
if not expr:
continue
params = {
"query": expr,
"start": start,
"end": end,
"step": step,
"_": now
}
await serverboards.debug("Get data from %s, %s: %s" % (url, repr(via), expr))
try:
res = await asks.get(url + "/api/v1/query_range", params=params)
except Exception:
raise Exception(
"Coult not connect to the Prometheus server. Is it running?")
if res.status_code != 200:
raise Exception(res.text)
js = res.json()
if js.get("status") != "success":
raise Exception("Unknown response from prometheus")
for x in js.get("data", {}).get("result", []):
ret.append(decorate_serie(x, name=name))
return ret
async def get_points(via=None, url=None, expression=None):
if not url:
url = "http://localhost:9090"
if via:
url = urllib.parse.urlparse(url)
port = await port_tunnel(via, url.hostname, url.port)
url = "http://localhost:%d" % port
now = int(time.time())
params = {
"query": expression,
"time": now,
"_": now
}
printc(url + "/api/v1/query", params)
res = await asks.get(url + "/api/v1/query", params=params)
return res.json()["data"]["result"]
watch_tasks = {}
@serverboards.rpc_method
async def watch_start(id=None, period=None, service=None, expression=None, **kwargs):
state = None
via = service.get("config", {}).get("via")
url = service.get("config", {}).get("url")
period_s = time_description_to_seconds(period or "5m")
nstate = None
async def check_ok():
while True:
await serverboards.debug("Checking expression: %s" % (expression))
p = await get_points(via=via, url=url, expression=expression)
if state != nstate:
await serverboards.rpc.event("trigger", {"id": id, "value": p})
await curio.sleep(period_s)
await serverboards.info("Start Prometheus watch %s" % id)
watch_tasks[id] = curio.spawn(check_ok)
return id
@serverboards.rpc_method
async def watch_stop(id):
await serverboards.info("Stop Prometheus watch %s" % (id))
await watch_tasks[id].cancel()
watch_tasks[id].join()
del watch_tasks[id]
return "ok"
@serverboards.cache_ttl(30)
async def get_values(via=None, url=None):
if not url:
url = "http://localhost:9090"
if via:
url = urllib.parse.urlparse(url)
port = await port_tunnel(via, url.hostname, url.port)
url = "http://localhost:%d" % port
res = await asks.get(url + "/api/v1/label/__name__/values")
return res.json()["data"]
@serverboards.cache_ttl(30)
async def get_tags(via=None, url=None, value="", tag=None):
if not url:
url = "http://localhost:9090"
if via:
url = urllib.parse.urlparse(url)
port = await port_tunnel(via, url.hostname, url.port)
url = "http://localhost:%d" % port
res = await asks.get(url + "/api/v1/series?match[]=%s" % value)
data = res.json()["data"]
print("value", value, data)
if not tag:
ret = set()
for d in data:
ret.update(d.keys())
return sorted([x for x in ret if not x.startswith('__')])
else:
ret = set()
for d in data:
ret.add(d.get(tag))
return [x for x in ret if x and not x.startswith('__')]
BUILTINS = ["sum(", "min(", "max(", "avg(", "stddev(", "stdvar(",
"count(", "count_values(", "bottomk(", "topk(", "quantile("]
@serverboards.rpc_method
async def autocomplete_values(current="", via=None, url=None, **kwargs):
if not current:
return []
if '=' in current:
prefix, suffix = current.split('{')
tag, suffix = suffix.split('=')
if suffix.startswith('"'):
suffix = suffix[1:]
options = ['%s{%s="%s"}' % (prefix, tag, x)
for x in (await get_tags(via, url, prefix, tag))
if x.startswith(suffix)
]
elif '{' in current:
prefix, suffix = current.split('{')
options = ['%s{%s="' % (prefix, x)
for x in (await get_tags(via, url, prefix))
if x.startswith(suffix)
]
else:
options = (await get_values(via, url)) + BUILTINS
for cpart in \
current.lower().replace('{', '_').replace('=', '_').split('_'):
options = [x for x in options if cpart in x.lower()]
return sorted(options)
async def connect_url_via_status(url, via):
# print("Check ", url, via)
if via:
url = urllib.parse.urlparse(url)
try:
port = await port_tunnel(via, url.hostname, url.port)
await serverboards.debug("Opened tunel localhost:%s -> %s:%s", port, url.hostname, url.port, service_id=via)
except Exception as e:
await serverboards.error("Could not open tunel to %s:%s: %s" % (url.hostname, url.port, e), service_id=via)
return "ssh-proxy-error"
url = "http://localhost:%d" % port
# print("Check url", url)
try:
res = await asks.get(url)
except Exception:
return "down"
if res.status_code == 200:
return "ok"
else:
return "nok"
@serverboards.rpc_method
async def prometheus_is_up(service):
return await connect_url_via_status(
url=service["config"].get("url") or "http://localhost:9090",
via=service["config"].get("via"))
@serverboards.rpc_method
async def agent_is_up(service):
return await connect_url_via_status(
url=service["config"].get("url") or "http://localhost:9090",
via=service["config"].get("via"))
@serverboards.rpc_method
async def node_exporter_is_up(service):
return await connect_url_via_status(
url=service["config"].get("url") or "http://localhost:9100",
via=service["config"].get("server"))
async def test():
# res=get(expression="prometheus_rule_evaluation_failures_total")
# print(json.dumps(res, indent=2))
printc("START")
try:
res = await autocomplete_values("up")
printc(json.dumps(res, indent=2))
res = await get_points(expression="up == 1")
assert res
res = await get(service="XXX", expression="up")
assert res
res = await node_exporter_is_up({
"config": {
"url": None,
"server": None,
}
})
printc(res)
assert res == "ok"
res = await node_exporter_is_up({
"config": {
"url": "https://127.255.255.255:1000/",
"server": None,
}
})
printc(res)
assert res == "down"
except Exception:
import traceback
traceback.print_exc()
sys.exit(1)
printc("Success")
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
import yaml
serverboards.test_mode(test, yaml.load(open("mock.yaml")))
print("Failed!")
sys.exit(1)
else:
serverboards.loop()
| StarcoderdataPython |
3267336 | from __future__ import print_function
from __future__ import unicode_literals
import time
import re
from tiny_test_fw import DUT, App, TinyFW
from ttfw_bl import BL602App, BL602DUT
@TinyFW.test_method(app=BL602App.BL602App, dut=BL602DUT.BL602TyMbDUT, test_suite_name='sdk_app_pwm_tc')
def sdk_app_pwm_tc(env, extra_data):
# first, flash dut
# then, test
dut = env.get_dut("port0", "fake app path")
print('Flashing app')
dut.flash_app(env.log_path, env.get_variable('flash'))
print('Starting app')
dut.start_app()
try:
dut.expect("Booting BL602 Chip...", timeout=0.5)
print('BL602 booted')
dut.halt()
except DUT.ExpectTimeout:
print('ENV_TEST_FAILURE: BL602 example test failed')
raise
if __name__ == '__main__':
sdk_app_pwm_tc()
| StarcoderdataPython |
3316538 | """ Test suite for Application. """
import unittest
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
import os
import sys
#pylint: disable=line-too-long,wrong-import-position
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src')))
from application import Application
class TestApplication(unittest.TestCase):
""" Test cases for Application. """
def setUp(self):
self.mock_session = Mock()
self.mock_authorizer = Mock()
self.mock_subscriber = Mock()
self.mock_options = Mock()
def testSessionStartFail(self):
""" Verify that if the session fails to start, no authorization or
subscriptions are made.
Plan:
Set up and verify the following mocks:
a. start() returns False.
b. authorize() is not called.
c. subscribe() is not called.
"""
application = Application(self.mock_session,
self.mock_authorizer,
self.mock_subscriber,
self.mock_options)
self.mock_session.start.return_value = False
application.run()
self.mock_session.start.assert_called_once()
self.mock_authorizer.authorize.assert_not_called()
self.mock_subscriber.subscribe.assert_not_called()
def testSessionAuthorizeFail(self):
""" Verify that if authorization fails, no subscriptions are made.
Plan:
Set up and verify the following mocks:
a. start() returns True.
b. authorize() fails and returns False.
c. subscribe() is not called.
"""
application = Application(self.mock_session,
self.mock_authorizer,
self.mock_subscriber,
self.mock_options)
self.mock_session.start.return_value = True
self.mock_authorizer.authorize.return_value = False
application.run()
self.mock_session.start.assert_called_once()
self.mock_authorizer.authorize.assert_called_once()
self.mock_subscriber.subscribe.assert_not_called()
def testSubscribeWithConfig(self):
""" Verify the correct topics and fields are used when subscribing on
the session.
Plan:
Set up and verify the following mocks:
a. start() returns True.
b. authorize() succeeds and returns True.
c. subscribe() is called with the same topics and fields configured in
the options.
"""
expected_topics = ["IBM US Equity", "MSFT US Equity"]
expected_fields = ["LAST_PRICE", "BID", "ASK"]
service_name = "//blp/mktdata"
self.mock_options.configure_mock(**{
"service": service_name,
"topics": expected_topics,
"fields": expected_fields
})
application = Application(self.mock_session,
self.mock_authorizer,
self.mock_subscriber,
self.mock_options)
self.mock_session.start.return_value = True
self.mock_authorizer.authorize.return_value = True
application.run()
self.mock_session.start.assert_called_once()
self.mock_authorizer.authorize.assert_called_once()
actual_topics = self.mock_subscriber.subscribe.call_args[0][0]
actual_fields = self.mock_subscriber.subscribe.call_args[0][1]
self.assertEqual(expected_topics, actual_topics)
self.assertEqual(expected_fields, actual_fields)
if __name__ == "__main__":
unittest.main()
__copyright__ = """
Copyright 2020. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| StarcoderdataPython |
1720726 | <reponame>rochus/transitionscalespace
#!/usr/bin/env python
import random
import numpy as np
#try:
# import ujson as json
#except:
import json
def dist(X, Y, dist_type='angle'):
# compute distance between particle and some input
if dist_type == 'angle':
# this computes the angular distance between two inputs. this is
# primarily useful for particles that live on a unit sphere.
# otherwise, results are not guaranteed to be useful
return np.arccos(X.T.dot(Y))
elif dist_type == 'euclidean':
return np.linalg.norm(X - Y)
else:
raise RuntimeError(f"Unknown dist_type '{dist_type}'")
class Particle:
def __init__(self, X, **kwargs):
"""Initialize a particle.
Args:
X (array-like): Initial position of the particle
dist (str): distance computation function. One of 'euclidean', 'angle'
"""
self.X = X
self.set_config(**kwargs)
self.reset_push_pull()
def set_config(self, **kwargs):
self.dist_type = kwargs.get('dist_type', 'angle')
self.alpha = kwargs.get('alpha', 0.2)
self.alpha_decay = kwargs.get('alpha_decay', 0.99999)
self.mem = kwargs.get('mem', 0.8)
def dist(self, X):
return dist(self.X, X, self.dist_type)
def normalize(self):
"""Normalize X.
This is only useful when the agent that uses the particle system lives
on a unit sphere.
"""
self.X /= np.linalg.norm(self.X)
def apply(self):
# compute new position
Xnew = self.X + (self.alpha * self.pull - 5.0 * self.alpha * self.push)
Xold = self.X
# smoothed update + renormalize (XXX: only useful for unit sphere agents)
tmp = self.mem * Xold + (1.0 - self.mem) * Xnew
tmp /= np.linalg.norm(tmp)
# test if this is ok. TODO: no hard thresholding here! this only
# works in the hemisphere example
if True:
if tmp[2] >= 0:
self.X = tmp
else:
self.X = tmp
# slowly decay alpha
self.alpha *= self.alpha_decay
def reset_push_pull(self):
self.push = np.zeros(self.X.shape)
self.pull = np.zeros(self.X.shape)
def from_json(self, state_dict):
self.set_config(**state_dict)
X = state_dict['X']
self.X = np.asarray(X)
self.reset_push_pull()
def to_json(self):
state_dict = {
# meta
'dist_type' : self.dist_type,
'alpha' : self.alpha,
'alpha_decay' : self.alpha_decay,
'mem' : self.mem,
# data
'X' : [float(self.X[0]), float(self.X[1]), float(self.X[2])],
}
return state_dict
class PushPullParticleSystem:
def __init__(self, **kwargs):
super(PushPullParticleSystem, self).__init__()
self.particles = []
self.set_config(**kwargs)
def set_config(self, **kwargs):
# settings for particle interactions
self.mindist = kwargs.get('mindist', 0.1)
self.maxdist = kwargs.get('maxdist', 2.0 * self.mindist)
self.dist_type = kwargs.get('dist_type', 'euclidean')
# update and interaction strenghts for particles. will be passed forward
# during construction of new particles
self.alpha = kwargs.get('alpha', 0.15)
self.alpha_decay = kwargs.get('alpha_decay', 0.99999)
self.mem = kwargs.get('mem', 0.8)
# threshold and random chance for creation of new symbol
self.mindist_rnd_threshold = kwargs.get('mindist_rnd_threshold', 0.9)
self.mindist_rnd_chance = kwargs.get('mindist_rnd_chance', 0.1)
def __len__(self):
return len(self.particles)
def __getitem__(self, i):
assert i < len(self), "Index out of bounds"
return self.particles[i]
def save_json(self, path : str):
# create state dict with configuration
state_dict = {
# meta
'dist_type' : self.dist_type,
'mindist' : self.mindist,
'maxdist' : self.maxdist,
'mindist_rnd_threshold' : self.mindist_rnd_threshold,
'mindist_rnd_chance' : self.mindist_rnd_chance,
'alpha' : self.alpha,
'alpha_decay' : self.alpha_decay,
'mem' : self.mem,
# data
'particles' : list(),
}
data = self.particles[0].to_json()
state_dict['particles'].append(data)
for p in self.particles:
state_dict['particles'].append(p.to_json())
with open(path, 'w') as f:
json.dump(state_dict, f, indent=1)
def load_json(self, path : str):
with open(path, 'r') as f:
state_dict = json.load(f)
self.set_config(**state_dict)
for p in state_dict['particles']:
self.particles.append(Particle(np.array([0,0,0])))
self.particles[-1].from_json(p)
def avg_dist(self):
"""Compute the average distance between closest particles"""
raise NotImplementedError("Not yet done.")
mean = 0.0
std = 0.0
return mean, std
def spawn_particle(self, X):
particle_args = {
'dist' : self.dist_type,
'alpha' : self.alpha,
'alpha_decay' : self.alpha_decay,
'mem' : self.mem,
}
self.particles.append(Particle(X, **particle_args))
return self.particles[-1]
def update(self, X):
"""Compute updates in the particle system.
Args:
X (array-like): Euclidean coordinate / input
Returns:
int: index of the
"""
# no particle yet, so let's create the first one
if len(self) == 0:
self.spawn_particle(X)
return 0
# compute distance from all particles to target
ds = [p.dist(X) for p in self.particles]
# find all particles that are within a treshold-distance. store only the
# indices
rs = []
for i in range(len(ds)):
if ds[i] <= self.mindist:
rs.append(i)
# if we have no particle close to this location, let's spawn a new one
if len(rs) == 0:
self.spawn_particle(X)
rs.append(len(self)-1)
else:
# close to minimal distance and some luck? create a new particle
md = min(ds)
rnd = False
if md > (self.mindist_rnd_threshold * self.mindist):
rnd = random.random() > (1 - self.mindist_rnd_chance)
if rnd:
self.spawn_particle(X)
# there's at least one particle with its receptive field covering X. We
# thus need to find the winner. This effectively implements
# winner-take-all dynamics
w = rs[0]
for i in range(1, len(rs)):
if ds[rs[i]] < ds[w]:
w = rs[i]
# reset all particles' push pull dynamics
for i in range(len(rs)):
ri = rs[i]
self[ri].reset_push_pull()
# compute local push-pull interactions of particles
for i in range(len(rs)):
# skip processing the winner. XXX: is this correct? pull closer to current state?
if w == rs[i]:
continue
# get real index
ri = rs[i]
# local interactions between all particles. O(N^2) time :E could be
# reduced (because of symmetry), but i'm just too lazy
for j in range(len(rs)):
# get real particle indices
rj = rs[j]
# skip self-reinforcing interactions
if ri == rj:
continue
# vector for direction
v = self[rj].X - self[ri].X
v /= np.linalg.norm(v)
# distance
d = dist(self[rj].X, self[ri].X)
# (weighted) push or pull?
if d <= self.mindist:
self[ri].push += d * v
elif d <= self.maxdist:
self[ri].pull += d * v
# update according to push/pull
for i in range(len(rs)):
# skip processing the winner. XXX: is this correct? pull closer to current state?
if w == rs[i]:
continue
# get real particle index
ri = rs[i]
self[ri].apply()
return w
| StarcoderdataPython |
1743780 | """Policies which use NumPy as a numerical backend."""
from garage.np.policies.fixed_policy import FixedPolicy
from garage.np.policies.policy import Policy
from garage.np.policies.scripted_policy import ScriptedPolicy
__all__ = [
'FixedPolicy',
'Policy',
'ScriptedPolicy',
]
| StarcoderdataPython |
1669502 | import psutil
print(psutil.virtual_memory().percent)
print(psutil.cpu_percent(1,True)) | StarcoderdataPython |
3215043 | <filename>moduledev/module.py<gh_stars>0
import os
import shlex
import shutil
from abc import ABCMeta, abstractmethod
from glob import glob
from . import util
_modulefile_template = """#%%Module1.0
set MODULENAME [ file tail [ file dirname $ModulesCurrentModulefile ] ]
set MODULEVERSION [ file tail $ModulesCurrentModulefile ]
set MODULEBASE %s
set basedir $MODULEBASE/$MODULENAME/$MODULEVERSION
conflict $MODULENAME
if { [ file exists $MODULEBASE/$MODULENAME/.modulefile ] } {
source $MODULEBASE/$MODULENAME/.modulefile
}
if { [ file exists $MODULEBASE/$MODULENAME/$MODULEVERSION/.modulefile ] } {
source $MODULEBASE/$MODULENAME/$MODULEVERSION/.modulefile
}
proc ModulesHelp { } {
global dotversion
global MODULENAME
global MODULEVERSION
global DESCRIPTION
global HELPTEXT
global MAINTAINER
puts stderr "\\t$MODULENAME $MODULEVERSION - $DESCRIPTION\\n\\tMaintainer: $MAINTAINER\\n"
puts stderr "\\n$HELPTEXT"
}
module-whatis $DESCRIPTION
"""
class ModuleTree:
def __init__(self, root_dir):
self.root_dir = os.path.abspath(root_dir)
@property
def name(self):
modulefile = self.master_module_file()
if modulefile is not None:
return os.path.basename(modulefile).split("_")[0]
else:
return None
def module_dir(self):
return os.path.join(self.root_dir, "module")
def modulefile_dir(self):
return os.path.join(self.root_dir, "modulefile")
def master_module_file(self):
"""Return the master module file if it exists, None otherwise."""
files = glob(os.path.join(self.module_dir(), "*modulefile"))
if len(files):
return files[0]
else:
return None
def _master_module_file_name(self, name):
"""Construct the name of the master module file"""
return os.path.join(self.module_dir(), f"{name}_modulefile")
def exists(self):
"""Return true if the root directory exists"""
return os.path.lexists(self.root_dir)
def valid(self):
"""
Check if the module root tree is set up. Exit if it appears
corrupted.
"""
return (
self.exists()
and util.writeable_dir(self.root_dir)
and util.writeable_dir(self.modulefile_dir())
and util.writeable_dir(self.module_dir())
and self.master_module_file() is not None
)
def module_names(self):
return [
m for m in os.listdir(self.root_dir) if m != "module" and m != "modulefile"
]
def modules(self, all_versions=False):
if not self.valid():
raise RuntimeError(
"Cannot get available modules from a "
"module tree that has not been setup"
)
for m in self.module_names():
loader = self.load_module(m, parse_error_handler=util.ignore_error)
if all_versions:
for v in loader.available_versions():
version_loader = self.load_module(
m, v, parse_error_handler=util.ignore_error
)
yield version_loader.module
else:
yield loader.module
def can_setup(self, name):
"""Return True if the root directory of this tree can be setup"""
return (
self.exists()
and os.path.exists(self.root_dir)
and os.access(self.root_dir, os.W_OK)
and not len(os.listdir(self.root_dir))
)
def setup(self, name):
"""Set up the module root tree."""
if not self.can_setup(name):
raise ValueError(
"Module tree must be set up in an empty, " "writeable directory"
)
os.makedirs(str(self.modulefile_dir()))
os.makedirs(str(self.module_dir()))
f = open(self._master_module_file_name(name), "w")
f.write(_modulefile_template % self.root_dir)
f.close()
def init_module(self, module, overwrite=False):
"""
Create a module, throwing an exception if any files are in
the way of the module
:return: a ModuleBuilder used to build the module.
"""
builder = ModuleBuilder(self, module)
if not builder.clean():
if overwrite:
builder.clear()
else:
raise ValueError(
f"Some files exist in the module tree " f"where {module} should be."
)
builder.build()
return builder
def shared_module(self, module, version, error_handler=util.raise_value_error):
"""
Get the module object for a shared module, if it exists.
:param module: a module name
:param version: a module version
:error_handler: a callback handler of an error if the module parsing fails
:return: a Module object if a shared module exists for this module, otherwise None.
"""
loader = ModuleLoader(self, module, version)
if loader.shared_exists():
loader.load(force_shared=True, error_handler=error_handler)
return loader.module
else:
return None
def module_clean(self, module):
"""
Return True if nothing is in place where a module would be initialized.
"""
builder = ModuleBuilder(self, module)
return builder.clean()
def module_exists(self, name, version=None):
"""
Check for the existence of a valid module
:param name: the name of the module
:param version: a version number
:return: True if the module is found.
"""
loader = ModuleLoader(self, name, version)
return loader.valid()
def load_module(
self, name, version=None, parse_error_handler=util.raise_value_error
):
"""
Locate and parse the module from the filesystem identified by the
given name and version.
:param name: the name of the module
:param version: the version of the module. if none is provided, the
latest is loaded
:param parse_error_handler: a function which handles parse error
messages. If none is provided, an exception is raised.
:return: a ModuleLoder used to load the module.
"""
loader = ModuleLoader(self, name, version)
if not loader.valid():
raise ValueError(
f"Module {name}-{version} does not appear to "
f"be a valid module in the tree {self.root_dir}"
)
loader.load(error_handler=parse_error_handler)
return loader
class ModuleLocation(metaclass=ABCMeta):
"""Resolves module file locations relative to a module tree"""
@abstractmethod
def __init__(self, module_tree):
self.module_tree = module_tree
self.module = None
@abstractmethod
def category_name(self):
raise NotImplementedError
@abstractmethod
def shared(self):
raise NotImplementedError
@abstractmethod
def name(self):
raise NotImplementedError
@abstractmethod
def version(self):
raise NotImplementedError
def available_versions(self):
return [v for v in os.listdir(self.module_base()) if util.valid_version(v)]
def moduledotfile_path(self):
base = self.module_base()
if self.shared():
return os.path.join(base, ".modulefile")
else:
return os.path.join(base, self.version(), ".modulefile")
def shared_moduledotfile_path(self):
return os.path.join(self.module_base(), ".modulefile")
def module_base(self):
"""
:return: The path to the base of the module without the version
"""
return os.path.join(self.module_tree.root_dir, self.name())
def module_path(self):
return os.path.join(self.module_base(), self.version())
def modulefile_base(self):
return os.path.join(
self.module_tree.modulefile_dir(), self.category_name(), self.name()
)
def modulefile_path(self):
return os.path.join(self.modulefile_base(), self.version())
def clean(self):
"""Return false if files exist where the module resolves to. Note this
does not imply validity or readability"""
return not os.path.exists(self.module_path()) and not os.path.exists(
self.modulefile_path()
)
def valid(self):
return (
util.writeable_dir(self.module_base())
and self.version() is not None
and util.writeable_dir(self.module_path())
and os.path.exists(self.moduledotfile_path())
and os.readlink(self.modulefile_path())
== self.module_tree.master_module_file()
)
def path_exists(self, path):
"""Return true if the path that the path object implies already exists."""
return os.path.lexists(path.resolve(self.module_path()))
def add_path(self, source, path_obj, link=True):
"""Copy or link the contents of the source path to the path implied
in the destination path object."""
dest = path_obj.resolve(self.module_path())
cp = os.symlink if link else shutil.copytree
cp(os.path.abspath(source), dest)
self.module.paths.append(path_obj)
def remove_path(self, path_obj):
loc = path_obj.resolve(self.module_path())
rm = os.unlink if os.path.islink(loc) else shutil.rmtree
rm(path_obj.resolve(self.module_path()))
self.module.remove_path(path_obj)
def save_module_file(self):
if self.module is None:
raise RuntimeError("Cannot save unloaded module")
with open(self.moduledotfile_path(), "w") as f:
f.write(self.module.dump())
def clear(self):
if os.path.exists(self.modulefile_path()):
os.unlink(self.modulefile_path())
shutil.rmtree(self.module_path(), ignore_errors=True)
if len(self.available_versions()) == 0:
shutil.rmtree(self.module_base())
shutil.rmtree(self.modulefile_base())
class ModuleBuilder(ModuleLocation):
"""A module builder class."""
def __init__(self, module_tree, module):
super(ModuleBuilder, self).__init__(module_tree)
self.module = module
def category_name(self):
return self.module.category or self.module_tree.name
def shared(self):
return self.module.shared
def name(self):
return self.module.name
def version(self):
return self.module.version
def build(self):
os.makedirs(os.path.dirname(self.modulefile_path()), exist_ok=True)
os.symlink(self.module_tree.master_module_file(), self.modulefile_path())
os.makedirs(self.module_path())
self.save_module_file()
class ModuleLoader(ModuleLocation):
"""A module loader class."""
def __init__(self, module_tree, name, version=None):
"""
Loads a module. If no version is specified, the latest version is used.
:param module_tree: a ModuleTree object
:param name: The name of the module
:param version: The version of the module
"""
super(ModuleLoader, self).__init__(module_tree)
self._name = name
self._version = version
def category_name(self):
files = glob(os.path.join(self.module_tree.modulefile_dir(), "*", self.name()))
return os.path.basename(os.path.dirname(files[0]))
def shared(self):
return not os.path.exists(
os.path.join(
self.module_tree.root_dir, self.name(), self.version(), ".modulefile"
)
)
def shared_exists(self):
return os.path.exists(self.shared_moduledotfile_path())
def name(self):
return self._name
def version(self):
if self._version is None:
available_versions = self.available_versions()
if len(available_versions) == 0:
raise ValueError(f"No versions found for module {self.name()}")
return max(available_versions, key=util.version_key)
else:
return self._version
def load(self, force_shared=False, error_handler=util.raise_value_error):
self.module = Module.from_file(
self.moduledotfile_path(),
self.module_tree,
self.name(),
self.version(),
force_shared or self.shared(),
self.category_name(),
error_handler,
)
class Path:
"""A module path object"""
def __init__(self, path, operation="prepend-path", name="PATH"):
(self.operation, self.name) = operation, name
if "$basedir" in path:
self.path = path
else:
self.path = os.path.join("$basedir", os.path.basename(path.rstrip("/")))
def __repr__(self):
return f"{self.operation} {self.name} {self.path}"
def resolve(self, basedir):
"""replace the $basedir variable to the given path"""
return self.path.replace("$basedir", basedir)
class Module:
def __init__(
self,
root,
name,
version,
maintainer="no_maintainer",
helptext="",
description="",
extra_vars=None,
category=None,
shared=True,
extra_commands=None,
):
"""
Initialize a module.
:param root: the ModuleTree object under which this module exists
:param name: the name of the module (corresponding to the tool name)
:param version: the version of the module
:param maintainer: name and email address of the maintainer
:param helptext: the helptext for the module
:param description: longer form description of the module
:param extra_vars: a dict of extra variables to add
:param category: a category for the module
:param shared: whether the module file is shared among multiple versions
or, if false, is specific to this version
:param extra_commands: list of extra lines to add to the module file
"""
if extra_commands is None:
extra_commands = []
if extra_vars is None:
extra_vars = {}
self.root = root
self.name = name
self.version = version
self.maintainer = maintainer
self.helptext = helptext
self.description = description
self.category = category
self.shared = shared
self.extra_vars = extra_vars
self.extra_commands = extra_commands
self.paths = []
@classmethod
def from_file(
cls,
filename,
root,
name,
version,
shared,
category=None,
error_handler=util.raise_value_error,
):
"""parse a module file
:param filename: the path to the module dotfile
:param name: the package name for the module
:param version: the version of the module:
:param shared: whether the moduledotfile is located at the shared
:param category: the category of the module
:param error_handler: a which handles any parse errors during parsing.
If there is a parse error and a handler is provided, the line is
not interpreted and error handler is called. The default handler
raises a value error with the given error message.
:return: a new module parsed from the given file
"""
module = cls(root, name, version, shared=shared, category=category)
for line in open(filename):
try:
fields = shlex.split(line.strip())
except ValueError as e:
error_handler(f"parse error in {filename}: {e}")
continue
if len(fields) == 0:
continue
if fields[0] == "set":
if len(fields) < 3:
error_handler(f"Unparsable line in {filename}:\n{line}")
if fields[1] == "MAINTAINER":
module.maintainer = fields[2]
elif fields[1] == "HELPTEXT":
module.helptext = fields[2]
elif fields[1] == "DESCRIPTION":
module.description = fields[2]
else:
module.extra_vars.update({fields[1]: fields[2]})
elif fields[0] == "prepend-path" or fields[0] == "append-path":
module.paths.append(
Path(path=fields[2], operation=fields[0], name=fields[1])
)
else:
module.extra_commands.append(line.strip())
return module
def remove_path(self, path_obj):
"""
Remove the path from the module if the path_obj.path itself matches any of the paths in the module.
:param path_obj: a path object to compare to
:return:
"""
self.paths = [p for p in self.paths if p.path != path_obj.path]
def __repr__(self):
return f"{self.name}-{self.version}"
def dump(self):
"""Dump the module file as a string"""
text = (
f"""set MAINTAINER "{self.maintainer}"
set HELPTEXT "{self.helptext}"
set DESCRIPTION "{self.description}"\n"""
+ "\n".join([f'set {k} "{v}"' for k, v in self.extra_vars.items()])
+ "\n"
+ "\n".join(str(p) for p in self.paths)
+ "\n"
+ "\n".join(self.extra_commands)
)
return text
| StarcoderdataPython |
1751524 | <reponame>lcgong/dpillars<filename>domainics/json.py
# -*- coding: utf-8 -*-
import json
import datetime
from .domobj import DSetBase, DObject
from .db.dtable import dsequence
from decimal import Decimal
def loads(s):
"""Deserialize s to a python object"""
return json.loads(s)
def dumps(obj):
return json.dumps(obj, cls=DefaultJSONEncoder)
class DefaultJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.date, datetime.datetime)):
return obj.isoformat()
elif isinstance(obj, (Decimal)) :
return float(obj)
elif hasattr(obj, '__json_object__'):
return obj.__json_object__()
else:
return super(DefaultJSONEncoder, self).default(obj)
| StarcoderdataPython |
1797234 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""create_gt_txt_from_mat.py"""
import os
import argparse
import tqdm
import numpy as np
from scipy.io import loadmat
from cython_bbox import bbox_overlaps
_MAP = {
'0': '0--Parade',
'1': '1--Handshaking',
'2': '2--Demonstration',
'3': '3--Riot',
'4': '4--Dancing',
'5': '5--Car_Accident',
'6': '6--Funeral',
'7': '7--Cheering',
'8': '8--Election_Campain',
'9': '9--Press_Conference',
'10': '10--People_Marching',
'11': '11--Meeting',
'12': '12--Group',
'13': '13--Interview',
'14': '14--Traffic',
'15': '15--Stock_Market',
'16': '16--Award_Ceremony',
'17': '17--Ceremony',
'18': '18--Concerts',
'19': '19--Couple',
'20': '20--Family_Group',
'21': '21--Festival',
'22': '22--Picnic',
'23': '23--Shoppers',
'24': '24--Soldier_Firing',
'25': '25--Soldier_Patrol',
'26': '26--Soldier_Drilling',
'27': '27--Spa',
'28': '28--Sports_Fan',
'29': '29--Students_Schoolkids',
'30': '30--Surgeons',
'31': '31--Waiter_Waitress',
'32': '32--Worker_Laborer',
'33': '33--Running',
'34': '34--Baseball',
'35': '35--Basketball',
'36': '36--Football',
'37': '37--Soccer',
'38': '38--Tennis',
'39': '39--Ice_Skating',
'40': '40--Gymnastics',
'41': '41--Swimming',
'42': '42--Car_Racing',
'43': '43--Row_Boat',
'44': '44--Aerobics',
'45': '45--Balloonist',
'46': '46--Jockey',
'47': '47--Matador_Bullfighter',
'48': '48--Parachutist_Paratrooper',
'49': '49--Greeting',
'50': '50--Celebration_Or_Party',
'51': '51--Dresses',
'52': '52--Photographers',
'53': '53--Raid',
'54': '54--Rescue',
'55': '55--Sports_Coach_Trainer',
'56': '56--Voter',
'57': '57--Angler',
'58': '58--Hockey',
'59': '59--people--driving--car',
'61': '61--Street_Battle'
}
def get_gt_boxes(gt_dir):
""" gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)"""
gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat'))
hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat'))
medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat'))
easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat'))
facebox_list = gt_mat['face_bbx_list']
event_list = gt_mat['event_list']
file_list = gt_mat['file_list']
hard_gt_list = hard_mat['gt_list']
medium_gt_list = medium_mat['gt_list']
easy_gt_list = easy_mat['gt_list']
return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list
def norm_score(pred):
""" norm score
pred {key: [[x1,y1,x2,y2,s]]}
"""
max_score = 0
min_score = 1
for _, k in pred.items():
for _, v in k.items():
if v:
_min = np.min(v[:, -1])
_max = np.max(v[:, -1])
max_score = max(_max, max_score)
min_score = min(_min, min_score)
else:
continue
diff = max_score - min_score
for _, k in pred.items():
for _, v in k.items():
if v:
v[:, -1] = (v[:, -1] - min_score) / diff
else:
continue
def image_eval(pred, gt, ignore, iou_thresh):
""" single image evaluation
pred: Nx5
gt: Nx4
ignore:
"""
_pred = pred.copy()
_gt = gt.copy()
pred_recall = np.zeros(_pred.shape[0])
recall_list = np.zeros(_gt.shape[0])
proposal_list = np.ones(_pred.shape[0])
_pred[:, 2] = _pred[:, 2] + _pred[:, 0]
_pred[:, 3] = _pred[:, 3] + _pred[:, 1]
_gt[:, 2] = _gt[:, 2] + _gt[:, 0]
_gt[:, 3] = _gt[:, 3] + _gt[:, 1]
overlaps = bbox_overlaps(_pred[:, :4], _gt)
for h in range(_pred.shape[0]):
gt_overlap = overlaps[h]
max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax()
if max_overlap >= iou_thresh:
if ignore[max_idx] == 0:
recall_list[max_idx] = -1
proposal_list[h] = -1
elif recall_list[max_idx] == 0:
recall_list[max_idx] = 1
r_keep_index = np.where(recall_list == 1)[0]
pred_recall[h] = len(r_keep_index)
return pred_recall, proposal_list
def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall):
"""
img_pr_info
"""
pr_info = np.zeros((thresh_num, 2)).astype('float')
for t in range(thresh_num):
thresh = 1 - (t + 1) / thresh_num
r_index = np.where(pred_info[:, 4] >= thresh)[0]
if r_index:
r_index = r_index[-1]
p_index = np.where(proposal_list[:r_index + 1] == 1)[0]
pr_info[t, 0] = len(p_index)
pr_info[t, 1] = pred_recall[r_index]
else:
pr_info[t, 0] = 0
pr_info[t, 1] = 0
return pr_info
def dataset_pr_info(thresh_num, pr_curve, count_face):
_pr_curve = np.zeros((thresh_num, 2))
for i in range(thresh_num):
_pr_curve[i, 0] = pr_curve[i, 1] / pr_curve[i, 0]
_pr_curve[i, 1] = pr_curve[i, 1] / count_face
return _pr_curve
def voc_ap(rec, prec):
"""
voc_ap
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def evaluation(pred, gt_path, iou_thresh=0.5):
"""
evaluation
"""
facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path)
event_num = len(event_list)
settings = ['easy', 'medium', 'hard']
setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list]
for setting_id in range(3):
# different setting
gt_list = setting_gts[setting_id]
# [hard, medium, easy]
pbar = tqdm.tqdm(range(event_num))
outputTxtDir = './bbx_gt_txt/'
if not os.path.exists(outputTxtDir):
os.makedirs(outputTxtDir)
outputTxtFile = outputTxtDir + settings[setting_id] + '.txt'
if os.path.exists(outputTxtFile):
os.remove(outputTxtFile)
for i in pbar:
pbar.set_description('Processing {}'.format(settings[setting_id]))
img_list = file_list[i][0]
sub_gt_list = gt_list[i][0]
gt_bbx_list = facebox_list[i][0]
for j in range(len(img_list)):
gt_boxes = gt_bbx_list[j][0]
keep_index = sub_gt_list[j][0]
imgName = img_list[j][0][0]
imgPath = _MAP[imgName.split('_')[0]] + '/' + imgName + '.jpg'
faceNum = len(keep_index)
with open(outputTxtFile, 'a') as txtFile:
txtFile.write(imgPath + '\n')
txtFile.write(str(faceNum) + '\n')
if faceNum == 0:
txtFile.write(str(faceNum) + '\n')
for index in keep_index:
curI = index[0] - 1
bbox = gt_boxes[curI]
txtFile.write('%d %d %d %d\n' % (bbox[0], bbox[1], bbox[2], bbox[3]))
txtFile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pred')
parser.add_argument('-g', '--gt', default='./eval_tools/ground_truth/')
args = parser.parse_args()
evaluation(args.pred, args.gt)
| StarcoderdataPython |
3217213 | '''
'''
import keras
import tensorflow as tf
from keras.models import Model
from keras import backend as K
from keras.layers import Input, merge, Conv2D, ZeroPadding2D, UpSampling2D, Dense, concatenate, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import BatchNormalization, Dropout, Flatten, Lambda
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.optimizers import Adam, RMSprop, SGD
from keras.regularizers import l2
from keras.layers.noise import GaussianDropout
import numpy as np
smooth = 1.
dropout_rate = 0.5
act = "relu"
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
# Custom loss function
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def bce_dice_loss(y_true, y_pred):
return 0.5 * keras.losses.binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
# Evaluation metric: IoU
def compute_iou(img1, img2):
img1 = np.array(img1)
img2 = np.array(img2)
if img1.shape[0] != img2.shape[0]:
raise ValueError("Shape mismatch: the number of images mismatch.")
IoU = np.zeros( (img1.shape[0],), dtype=np.float32)
for i in range(img1.shape[0]):
im1 = np.squeeze(img1[i]>0.5)
im2 = np.squeeze(img2[i]>0.5)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
if im1.sum() + im2.sum() == 0:
IoU[i] = 100
else:
IoU[i] = 2. * intersection.sum() * 100.0 / (im1.sum() + im2.sum())
#database.display_image_mask_pairs(im1, im2)
return IoU
# Evaluation metric: Dice
def compute_dice(im1, im2, empty_score=1.0):
"""
Computes the Dice coefficient, a measure of set similarity.
Parameters
----------
im1 : array-like, bool
Any array of arbitrary size. If not boolean, will be converted.
im2 : array-like, bool
Any other array of identical size. If not boolean, will be converted.
Returns
-------
dice : float
Dice coefficient as a float on range [0,1].
Maximum similarity = 1
No similarity = 0
Both are empty (sum eq to zero) = empty_score
Notes
-----
The order of inputs for `dice` is irrelevant. The result will be
identical if `im1` and `im2` are switched.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return empty_score
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
########################################
# 2D Standard
########################################
def standard_unit(input_tensor, stage, nb_filter, kernel_size=3):
x = Conv2D(nb_filter, (kernel_size, kernel_size), activation=act, name='conv'+stage+'_1', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(input_tensor)
x = Dropout(dropout_rate, name='dp'+stage+'_1')(x)
x = Conv2D(nb_filter, (kernel_size, kernel_size), activation=act, name='conv'+stage+'_2', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(x)
x = Dropout(dropout_rate, name='dp'+stage+'_2')(x)
return x
########################################
"""
Standard U-Net [Ronneberger et.al, 2015]
Total params: 7,759,521
"""
def U_Net(img_rows, img_cols, color_type=1, num_class=1):
nb_filter = [32,64,128,256,512]
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='main_input')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='main_input')
conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)
conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)
conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)
conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)
conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])
up4_2 = Conv2DTranspose(nb_filter[3], (2, 2), strides=(2, 2), name='up42', padding='same')(conv5_1)
conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])
up3_3 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up33', padding='same')(conv4_2)
conv3_3 = concatenate([up3_3, conv3_1], name='merge33', axis=bn_axis)
conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])
up2_4 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up24', padding='same')(conv3_3)
conv2_4 = concatenate([up2_4, conv2_1], name='merge24', axis=bn_axis)
conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])
up1_5 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up15', padding='same')(conv2_4)
conv1_5 = concatenate([up1_5, conv1_1], name='merge15', axis=bn_axis)
conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])
unet_output = Conv2D(num_class, (1, 1), activation='sigmoid', name='output', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
model = Model(input=img_input, output=unet_output)
return model
"""
wU-Net for comparison
Total params: 9,282,246
"""
def wU_Net(img_rows, img_cols, color_type=1, num_class=1):
# nb_filter = [32,64,128,256,512]
nb_filter = [35,70,140,280,560]
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='main_input')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='main_input')
conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)
conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)
conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)
conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)
conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])
up4_2 = Conv2DTranspose(nb_filter[3], (2, 2), strides=(2, 2), name='up42', padding='same')(conv5_1)
conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])
up3_3 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up33', padding='same')(conv4_2)
conv3_3 = concatenate([up3_3, conv3_1], name='merge33', axis=bn_axis)
conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])
up2_4 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up24', padding='same')(conv3_3)
conv2_4 = concatenate([up2_4, conv2_1], name='merge24', axis=bn_axis)
conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])
up1_5 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up15', padding='same')(conv2_4)
conv1_5 = concatenate([up1_5, conv1_1], name='merge15', axis=bn_axis)
conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])
unet_output = Conv2D(num_class, (1, 1), activation='sigmoid', name='output', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
model = Model(input=img_input, output=unet_output)
return model
"""
Standard UNet++ [Zhou et.al, 2018]
Total params: 9,041,601
"""
def Nest_Net(img_rows, img_cols, color_type=1, num_class=1, deep_supervision=False):
nb_filter = [32,64,128,256,512]
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='main_input')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='main_input')
conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)
conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)
up1_2 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up12', padding='same')(conv2_1)
conv1_2 = concatenate([up1_2, conv1_1], name='merge12', axis=bn_axis)
conv1_2 = standard_unit(conv1_2, stage='12', nb_filter=nb_filter[0])
conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)
up2_2 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up22', padding='same')(conv3_1)
conv2_2 = concatenate([up2_2, conv2_1], name='merge22', axis=bn_axis)
conv2_2 = standard_unit(conv2_2, stage='22', nb_filter=nb_filter[1])
up1_3 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up13', padding='same')(conv2_2)
conv1_3 = concatenate([up1_3, conv1_1, conv1_2], name='merge13', axis=bn_axis)
conv1_3 = standard_unit(conv1_3, stage='13', nb_filter=nb_filter[0])
conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)
up3_2 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up32', padding='same')(conv4_1)
conv3_2 = concatenate([up3_2, conv3_1], name='merge32', axis=bn_axis)
conv3_2 = standard_unit(conv3_2, stage='32', nb_filter=nb_filter[2])
up2_3 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up23', padding='same')(conv3_2)
conv2_3 = concatenate([up2_3, conv2_1, conv2_2], name='merge23', axis=bn_axis)
conv2_3 = standard_unit(conv2_3, stage='23', nb_filter=nb_filter[1])
up1_4 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up14', padding='same')(conv2_3)
conv1_4 = concatenate([up1_4, conv1_1, conv1_2, conv1_3], name='merge14', axis=bn_axis)
conv1_4 = standard_unit(conv1_4, stage='14', nb_filter=nb_filter[0])
conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])
up4_2 = Conv2DTranspose(nb_filter[3], (2, 2), strides=(2, 2), name='up42', padding='same')(conv5_1)
conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])
up3_3 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up33', padding='same')(conv4_2)
conv3_3 = concatenate([up3_3, conv3_1, conv3_2], name='merge33', axis=bn_axis)
conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])
up2_4 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up24', padding='same')(conv3_3)
conv2_4 = concatenate([up2_4, conv2_1, conv2_2, conv2_3], name='merge24', axis=bn_axis)
conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])
up1_5 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up15', padding='same')(conv2_4)
conv1_5 = concatenate([up1_5, conv1_1, conv1_2, conv1_3, conv1_4], name='merge15', axis=bn_axis)
conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])
nestnet_output_1 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_1', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_2)
nestnet_output_2 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_2', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_3)
nestnet_output_3 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_3', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_4)
nestnet_output_4 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_4', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
if deep_supervision:
model = Model(input=img_input, output=[nestnet_output_1,
nestnet_output_2,
nestnet_output_3,
nestnet_output_4])
else:
model = Model(input=img_input, output=[nestnet_output_4])
return model
if __name__ == '__main__':
model = U_Net(96,96,1)
model.summary()
model = wU_Net(96,96,1)
model.summary()
model = Nest_Net(96,96,1)
model.summary()
| StarcoderdataPython |
151772 | """
Given two sorted linked lists, merge them so that the resulting linked list is also sorted.
Consider two sorted linked lists and the merged list below them as an example. Click here to view the solution in C++, Java, JavaScript, and Ruby.
head1 -> 4 -> 8 -> 15 -> 19 -> null
head2 -> 7 -> 9 -> 10 -> 16 -> null
head1 -> 4 -> 7 -> 8 -> 9-> 10 -> 15 -> 16 -> 19 -> NULL
"""
class Node:
def __init__(self, data = None):
self.data = data
self.next = None
def insert(self, A):
node = self
for data in A:
node.next = Node(data)
node = node.next
def display(self):
elements = []
node = self
while True:
elements.append(node.data)
if node.next:
node = node.next
else:
break
print(elements)
def get(self, index):
if index >= self.length():
print ("ERROR: 'Get' index out of range!")
return None
currentIndex = 0
currentNode = self.head
while True:
currentNode = currentNode.next
if currentIndex == index: return currentNode.data
currentIndex += 1
def erase(self, index):
if index >= self.length():
print ("ERROR: 'Erase' Index out of range!")
return
currentIndex = 0
currentNode = self.head
while True:
lastNode = currentNode
currentNode = currentNode.next
if currentIndex == index:
lastNode.next = currentNode.next
return
currentIndex += 1
def merge_sorted(head1, head2):
# if both lists are empty then merged list is also empty
# if one of the lists is empty then other is the merged list
if head1 == None:
return head2
elif head2 == None:
return head1
mergedHead = None
if head1.data <= head2.data:
mergedHead = head1
head1 = head1.next
else:
mergedHead = head2
head2 = head2.next
mergedTail = mergedHead
while head1 != None and head2 != None:
temp = None
if head1.data <= head2.data:
temp = head1
head1 = head1.next
else:
temp = head2
head2 = head2.next
mergedTail.next = temp
mergedTail = temp
if head1 != None:
mergedTail.next = head1
elif head2 != None:
mergedTail.next = head2
return mergedHead
def create_linked_list(A):
node = Node(A[0])
A = A[1:]
node.insert(A)
return node
array1 = [2, 3, 5, 6]
linkedList1 = create_linked_list(array1)
print("Original1:")
linkedList1.display()
array2 = [1, 4, 10]
linkedList2 = create_linked_list(array2)
print("\nOriginal2:")
linkedList2.display()
new_head = merge_sorted(linkedList1, linkedList2)
print("\nMerged:")
new_head.display()
stop = True
"""
Runtime Complexity: Linear, O(m+n) where m and n are lengths of both linked lists
Memory Complexity: Constant, O(1)
Maintain a head and a tail pointer on the merged linked list.
Then choose the head of the merged linked list by comparing the first node of both linked lists.
For all subsequent nodes in both lists, you choose the smaller current node, link it to the tail of the merged list,
and move the current pointer of that list one step forward.
Continue this while there are some remaining elements in both the lists. If there are still some elements in only one of the lists,
you link this remaining list to the tail of the merged list. Initially, the merged linked list is NULL.
Compare the value of the first two nodes and make the node with the smaller value the head node of the merged linked list.
In this example, it is 4 from head1. Since it’s the first and only node in the merged list, it will also be the tail. Then move head1 one step forward.
""" | StarcoderdataPython |
3267178 | <gh_stars>0
import logging
import pathlib
from django.test import SimpleTestCase
from bbapp.loader import Load, log as loaderlog
from bbapp.models import Player, Batting
log = logging.getLogger(__name__)
root_level = logging.getLogger().getEffectiveLevel()
class SimpleLoaderTest(SimpleTestCase):
def setUp(self):
log.setLevel(root_level)
loaderlog.setLevel(root_level)
def test_label_files(self):
log.debug('load files from directory')
# loader.log.setLevel(logging.DEBUG)
# log.setLevel(logging.DEBUG)
cur = pathlib.Path(__file__)
load = Load()
load.find_and_label_files(
cur.parent / '../../../data/baseballdatabank-master'
)
log.debug('Labeled files: %s', load)
playermap = filter(lambda m: m.typ == Player, load.mappings) \
.__next__()
self.assertEqual(
'People.csv', playermap.file.name
)
self.assertIn('id', playermap.headerList,
'Expected player to use local map from playerID to id')
battingmap = filter(lambda m: m.typ == Batting, load.mappings) \
.__next__()
self.assertEqual(
'Batting.csv', battingmap.file.name
)
def test_get_model_fields(self):
load = Load()
player_map = filter(lambda m: m.typ == Player, load.mappings) \
.__next__()
player_fields = player_map.fields
log.debug('player fields: %s', player_fields)
self.assertIn('id', player_fields,
'Expected field to be in player field list')
self.assertIn('deathCountry', player_fields,
'Expected field to be in player field list')
self.assertIn('bbrefID', player_fields,
'Expected field to be in player field list')
self.assertNotIn('player_id', player_fields,
'Unexpected field in player field list')
# Test batting key
batting_map = filter(lambda m: m.typ == Batting, load.mappings) \
.__next__()
batting_fields = batting_map.fields
log.debug('batting fields: %s', batting_fields)
self.assertIn('player_id', batting_fields,
'Expected field to be in batting field list')
self.assertIn('id', batting_fields,
'Expected field to be in batting field list')
def test_log_levels(self):
log.info('hi')
| StarcoderdataPython |
176293 | #!/usr/bin/env python
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
import getpass
import json
import os
import socket
import StringIO
import sys
import tempfile
import unittest
import nuclide_server_manager
from nuclide_server_test_base import NuclideServerTestBase
from nuclide_server_manager import NuclideServerManager
WORK_DIR = os.path.dirname(os.path.realpath(__file__))
TARGET_SCRIPT = os.path.join(WORK_DIR, 'nuclide_server_manager.py')
class NuclideServerManagerTest(NuclideServerTestBase):
def verify_key(self, text):
self.assertTrue('BEGIN RSA PRIVATE KEY' in text) # nolint
self.assertTrue('END RSA PRIVATE KEY' in text)
def verify_cert(self, text):
self.assertTrue('BEGIN CERTIFICATE' in text)
self.assertTrue('END CERTIFICATE' in text)
def start_nuclide_server_and_get_outut(self, args):
parser = nuclide_server_manager.get_option_parser()
options, _ = parser.parse_args(args)
manager = NuclideServerManager(options)
try:
# Redirect stdout to a stream for capturing.
original_stdout = sys.stdout
sys.stdout = stdout_io = StringIO.StringIO()
self.assertEquals(manager.start_nuclide(), 0)
return stdout_io.getvalue()
finally:
# Restore stdout.
sys.stdout = original_stdout
def start_nuclide_server_and_verify_output(self, args, workspace, port, secure, version=None):
out = self.start_nuclide_server_and_get_outut(args)
json_ret = json.loads(out)
# Verify workspace gets resolved.
self.assertEquals(os.path.realpath(workspace), json_ret['workspace'])
if secure:
self.assertTrue('key' in json_ret)
self.verify_key(json_ret['key'])
self.assertTrue('cert' in json_ret)
self.verify_cert(json_ret['cert'])
self.assertTrue('ca' in json_ret)
self.verify_cert(json_ret['ca'])
hostname = '%s.nuclide.%s' % (getpass.getuser(), socket.gethostname())
self.assertEquals(hostname, json_ret['hostname'])
return json_ret['port'], json_ret['version'], json_ret['pid']
def start_nuclide_server_twice_and_verify(
self, workspace, port=None, secure=False, upgrade=False):
args = [TARGET_SCRIPT]
if port is not None:
args.append('-p')
args.append(str(port))
args.append('-w')
args.append(workspace)
# Set timeout
args.append('-t')
args.append('10')
if secure:
args.append('-d')
# Send the test certs files to temp dir.
args.append(tempfile.gettempdir())
else:
args.append('-k')
# Suppress nohup logging.
# See nuclide_server.py for details.
args.append('-q')
# Pick a random version to start with.
version = 100
# Generate version file for the mock.
with open(NuclideServerManager.package_file, 'w') as f:
json.dump({'version': '0.%s.0' % version}, f)
# Get the port from the started Nuclide server, and use it in the next step.
port1, version1, pid1 = self.start_nuclide_server_and_verify_output(
args, workspace, port, secure)
self.assertEquals(version1, str(version))
if port is not None:
self.assertEquals(port1, port)
if upgrade:
# Bump up the version for upgrade.
version += 1
with open(NuclideServerManager.package_file, 'w') as f:
json.dump({'version': '0.%s.0' % version}, f)
# Try to start Nuclide server again.
port2, version2, pid2 = self.start_nuclide_server_and_verify_output(
args, workspace, port1, secure)
# Verify it returns with same port that is passed in.
self.assertEquals(port1, port2)
self.assertEquals(version2, str(version))
if upgrade:
self.assertNotEquals(pid1, pid2)
else:
self.assertEquals(pid1, pid2)
def test_nuclide_server_manager_on_http(self):
self.start_nuclide_server_twice_and_verify(port=9090, workspace='.')
self.start_nuclide_server_twice_and_verify(port=9091, workspace='..')
manager = NuclideServerManager({})
servers = manager.list_servers()
self.assertEquals(len(servers), 2)
port0 = servers[0].port
port1 = servers[1].port
# A good enough test.
self.assertEquals(port0 + port1, 9090 + 9091)
def test_upgrade_on_given_port(self):
self.start_nuclide_server_twice_and_verify(port=9090, workspace='.', upgrade=True)
# This tests the find open port feature and uses http.
def test_find_open_port(self):
# Without specifying the port, it will find an open port and start the server.
self.start_nuclide_server_twice_and_verify(workspace='..')
def test_find_open_port_and_upgrade(self):
# Nuclide server shall be able to find open port and upgrade it on the same port.
self.start_nuclide_server_twice_and_verify(workspace='.', upgrade=True)
def test_nuclide_server_manager_on_https(self):
self.start_nuclide_server_twice_and_verify(workspace='.', secure=True)
manager = NuclideServerManager({})
servers = manager.list_servers()
self.assertEquals(len(servers), 1)
# Must be one of the open ports.
self.assertTrue(servers[0].port in nuclide_server_manager.OPEN_PORTS)
def test_common_name(self):
parser = nuclide_server_manager.get_option_parser()
# Start a Nuclide server using default parameters.
# It will get a default common name like user.nuclide.host
options, _ = parser.parse_args([])
manager = NuclideServerManager(options)
self.assertEquals(len(manager.list_servers()), 0)
self.assertEquals(manager.start_nuclide(), 0)
self.assertEquals(len(manager.list_servers()), 1)
# Change the default common name and verify it upgrades the existing server.
options, _ = parser.parse_args(['-n', 'localhost'])
manager = NuclideServerManager(options)
self.assertEquals(manager.start_nuclide(), 0)
servers = manager.list_servers()
self.assertEquals(len(servers), 1)
# Verify the new common name.
self.assertEquals(servers[0].get_common_name(), 'localhost')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
41400 | import os
import sys
import tkinter as tk
from configparser import ConfigParser
from tkinter import filedialog # for Python 3
from tkinter import messagebox
from config.Dialogs import Dialogs
from UI.helpers.open_folder import open_folder
# https://stackoverflow.com/questions/31170616/how-to-access-a-method-in-one-inherited-tkinter-class-from-another-inherited-tki
class SetupMenu(tk.Menu):
def __init__(self, parent, main):
tk.Menu.__init__(self, parent)
self.window = main
self.dialogs = Dialogs()
setupMenu = tk.Menu(parent, tearoff=False)
parent.add_cascade(label="Setup", underline=0, menu=setupMenu)
setupMenu.add_command(
label="Save as", command=self.save_as_DIALOGS)
setupMenu.add_command(label="Open", command=self.open_DIALOGS)
setupMenu.add_command(label="Save to defaults",
command=self.save_to_default_DIALOGS)
setupMenu.add_command(label="Reset to defaults",
command=self.reset_to_default_DIALOGS)
setupMenu.add_command(label="Defaults reset",
command=self.reset_default_DIALOGS)
setupMenu.add_separator()
setupMenu.add_command(label="Exit", underline=1, command=self.quit)
def quit(self):
sys.exit(0)
def save_as_DIALOGS(self):
window = self.window
DIALOGS_path = self.dialogs.get_dialogs_path_save()
if DIALOGS_path:
checkedboxes = list(window.checkbars.state())
source_path = window.source_path_entry.get()
target_path = window.target_path_entry.get()
similarity = float(window.similarity_entry.get())
self.dialogs.saving_dialogs_to_file(
DIALOGS_path,
checkedboxes,
source_path,
target_path,
similarity
)
messagebox.showinfo(
"Done!",
"You saved setup file in:"f"\n{DIALOGS_path}"
)
else:
messagebox.showinfo(
"Ouch!",
"You haven't saved config!"
)
def open_DIALOGS(self):
setup_path = self.dialogs.get_dialogs_path_open()
if setup_path:
config = self.dialogs.read_config_file(setup_path)
self.set_setup_dialogs(config)
else:
messagebox.showinfo(
"Ouch!",
"You haven't choose any file!"
)
def save_to_default_DIALOGS(self):
window = self.window
AppFolder = self.dialogs.AppData_folder_path
DEFAULTS_folder = self.dialogs.DEFAULTS_folder_path
DEFAULT_DIALOGS = self.dialogs.DEFAULT_file_path
if not os.path.isdir(AppFolder):
os.mkdir(AppFolder)
if not os.path.isdir(DEFAULTS_folder):
os.mkdir(DEFAULTS_folder)
checkedboxes = list(window.checkbars.state())
source_path = window.source_path_entry.get()
target_path = window.target_path_entry.get()
similarity = float(window.similarity_entry.get())
self.dialogs.saving_dialogs_to_file(
DEFAULT_DIALOGS,
checkedboxes,
source_path,
target_path,
similarity
)
def reset_to_default_DIALOGS(self):
AppFolder = self.dialogs.AppData_folder_path
DEFAULTS_folder = self.dialogs.DEFAULTS_folder_path
DEFAULT_DIALOGS = self.dialogs.DEFAULT_file_path
if not os.path.isdir(AppFolder):
os.mkdir(AppFolder)
if not os.path.isdir(DEFAULTS_folder):
os.mkdir(DEFAULTS_folder)
if not os.path.exists(DEFAULT_DIALOGS):
self.dialogs.create_DEFAULT_file()
config = self.dialogs.read_config_file(DEFAULT_DIALOGS)
self.set_setup_dialogs(config)
def reset_default_DIALOGS(self):
AppFolder = self.dialogs.AppData_folder_path
DEFAULTS_folder = self.dialogs.DEFAULTS_folder_path
DEFAULT_DIALOGS = self.dialogs.DEFAULT_file_path
if not os.path.isdir(AppFolder):
os.mkdir(AppFolder)
if not os.path.isdir(DEFAULTS_folder):
os.mkdir(DEFAULTS_folder)
# it will overwrite the file if it already exists
self.dialogs.create_DEFAULT_file()
config = self.dialogs.read_config_file(DEFAULT_DIALOGS)
self.set_setup_dialogs(config)
def set_setup_dialogs(self, config):
window = self.window
window.source_path_entry = window.entry_set(
window.source_path_entry, self.dialogs.get_source_path(
config)
)
window.target_path_entry = window.entry_set(
window.target_path_entry, self.dialogs.get_target_path(
config)
)
picks = self.dialogs.get_checked_extensions(config)
window.checkbars.set_state(picks)
window.similarity_entry = window.entry_set(
window.similarity_entry, self.dialogs.get_similarity(config)
)
| StarcoderdataPython |
3284152 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
from django.views import View
from django import http
import re,json,logging
from django.db import DatabaseError
from django.urls import reverse
from django.contrib.auth import login, authenticate, logout
from django_redis import get_redis_connection
from carts.utils import merge_carts_cookies_redis
from goods.models import SKU
from users.models import User, Address
from meiduo_mall.utils.views import LoginRequiredJSONMixin
from meiduo_mall.utils.response_code import RETCODE
from celery_tasks.email.tasks import send_verify_email
from users.utils import generate_verify_email_url,check_verify_email_token
from . import constants
# Create your views here.
logger=logging.getLogger('django')
class UserBrowseHistory(LoginRequiredJSONMixin, View):
'''用户浏览记录'''
def post(self,request):
'''保存商品浏览记录'''
#接收参数
json_str = request.body.decode()
json_dict = json.loads(json_str)
sku_id = json_dict.get('sku_id')
#校验参数
try:
SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden('参数sku_id错误')
# 保存sku_id到redis
redis_conn = get_redis_connection('history')
user = request.user
pl = redis_conn.pipeline()
# 先去重
pl.lrem('history_%s' % user.id, 0, sku_id)
# 再保存:最近浏览的商品在最前面
pl.lpush('history_%s' % user.id, sku_id)
# 最后截取
pl.ltrim('history_%s' % user.id, 0, 4)
# 执行
pl.execute()
# 响应结果
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
def get(self, request):
"""查询用户商品浏览记录"""
# 获取登录用户信息
user = request.user
# 创建连接到redis对象
redis_conn = get_redis_connection('history')
# 取出列表数据(核心代码)
sku_ids = redis_conn.lrange('history_%s' % user.id, 0, -1) # (0, 4)
# 将模型转字典
skus = []
for sku_id in sku_ids:
sku = SKU.objects.get(id=sku_id)
skus.append({
'id': sku.id,
'name': sku.name,
'price': sku.price,
'default_image_url': sku.default_image.url
})
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'skus': skus})
class UpdateTitleAddressView(LoginRequiredJSONMixin,View):
#更新地址标题
def put(self,request,address_id):
'''实现更新地址标题的逻辑'''
#接收参数title
json_dict=json.loads(request.body.decode())
title = json_dict.get('title')
#校验参数
if not title:
return http.HttpResponseForbidden('缺少title')
#查询当前需要更新标题的地址
try:
#将新的标题覆盖给地址标题
address = Address.objects.get(id=address_id)
address.title=title
address.save()
except Exception as e:
logger.error(e)
return http.JsonResponse({'code': RETCODE.DBERR, 'errmsg': '更新标题失败'})
#响应结果
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '更新标题成功'})
class DefaultAddressView(LoginRequiredJSONMixin,View):
'''设置默认地址'''
def put(self,request,address_id):
'''设置默认地址逻辑'''
try:
#查询出当前哪个地址作为登陆用户的默认地址
address=Address.objects.get(id=address_id)
#将指定地址设置为默认地址
request.user.default_address=address
request.user.save()
except Exception as e:
logger.error(e)
return http.JsonResponse({'code': RETCODE.DBERR, 'errmsg': '设置默认地址失败'})
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '设置默认地址成功'})
class UpdateDestoryAddressView(LoginRequiredMixin,View):
'''更新和删除地址'''
def put(self,request,address_id):
'''更新地址'''
#接收参数
json_str=request.body.decode()
json_dict=json.loads(json_str)
receiver = json_dict.get('receiver')
province_id = json_dict.get('province_id')
city_id = json_dict.get('city_id')
district_id = json_dict.get('district_id')
place = json_dict.get('place')
mobile = json_dict.get('mobile')
tel = json_dict.get('tel')
email = json_dict.get('email')
# 校验参数
if not all([receiver, province_id, city_id, district_id, place, mobile]):
return http.HttpResponseForbidden('缺少必传参数')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('参数mobile有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.HttpResponseForbidden('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
try:
#使用最新的地址覆盖指定的旧的地址信息
Address.objects.filter(id=address_id).update(
user=request.user,
title=receiver,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email
)
except Exception as e:
logger.error(e)
return http.JsonResponse({'code':RETCODE.DBERR,'errmsg':'修改地址失败'})
#响应新的地址给前端
address = Address.objects.get(id=address_id)
address_dict = {
"id": address.id,
"title": address.title,
"receiver": address.receiver,
"province": address.province.name,
"city": address.city.name,
"district": address.district.name,
"place": address.place,
"mobile": address.mobile,
"tel": address.tel,
"email": address.email
}
return http.JsonResponse({'code':RETCODE.OK,'errmsg':'新增地址成功','address':address_dict})
pass
def delete(self,request,address_id):
'''删除地址'''
#实现指定地址删除的逻辑 is_delete=True
try:
address=Address.objects.get(id=address_id)
address.is_deleted = True
address.save()
except Exception as e:
logger.error(e)
return http.JsonResponse({'code': RETCODE.DBERR, 'errmsg': '删除地址失败'})
#响应结果
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '删除地址成功'})
class AddressCreateView(LoginRequiredJSONMixin,View):
'''新增地址'''
def post(self,request):
'''实现新增地址的逻辑'''
#判断当前用户地址数量是否超过上限,
#count = Address.objects.filter(user=request.user).count()
count = request.user.addresses.count()#一查多,使用related_name查询
if count > constants.USER_ADDRESS_COUNTS_LIMIT:
return http.JsonResponse({'code':RETCODE.THROTTLINGERR,'errmsg':'超出用户地址上限'})
#接收参数
json_str=request.body.decode()
json_dict=json.loads(json_str)
receiver = json_dict.get('receiver')
province_id = json_dict.get('province_id')
city_id = json_dict.get('city_id')
district_id = json_dict.get('district_id')
place = json_dict.get('place')
mobile = json_dict.get('mobile')
tel = json_dict.get('tel')
email = json_dict.get('email')
# 校验参数
if not all([receiver, province_id, city_id, district_id, place, mobile]):
return http.HttpResponseForbidden('缺少必传参数')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('参数mobile有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.HttpResponseForbidden('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
#保存用户传入的地址信息
try:
address=Address.objects.create(
user=request.user,
title=receiver,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email
)
#如果登陆用户没有默认地址,我们需要指定默认地址
if not request.user.default_address:
request.user.default_address = address
request.user.save()
except Exception as e:
logger.error(e)
return http.JsonResponse({'code':RETCODE.DBERR,'errmsg':'新增地址失败'})
#响应新增地址结果,需要将新增的地址返回给前端
# 新增地址成功,将新增的地址响应给前端实现局部刷新
address_dict = {
"id": address.id,
"title": address.title,
"receiver": address.receiver,
"province": address.province.name,
"city": address.city.name,
"district": address.district.name,
"place": address.place,
"mobile": address.mobile,
"tel": address.tel,
"email": address.email
}
return http.JsonResponse({'code':RETCODE.OK,'errmsg':'新增地址成功','address':address_dict})
class AddressView(LoginRequiredMixin,View):
'''用户收获地址'''
def get(self,request):
'''查询并展示用户地址信息'''
#获取当前登陆用户对象
login_user = request.user
#使用当前登陆用户和is——deleted=false作为条件查询地址数据
addresses = Address.objects.filter(user=request.user,is_deleted=False)
#将用户地址模型列表转换成字典列表,因为JsonResponse和Vue.js不认识模型类型。只有django和jinja2模版引擎认识
address_list = []
for address in addresses:
address_dict={
"id": address.id,
"title": address.title,
"receiver": address.receiver,
"province": address.province.name,
"city": address.city.name,
"district": address.district.name,
"place": address.place,
"mobile": address.mobile,
"tel": address.tel,
"email": address.email
}
address_list.append(address_dict)
#构造上下文
context={
'default_address_id': login_user.default_address_id or '0',#如果没有默认值,用0表示占位符,防止前端报错
'addresses': address_list,
}
return render(request,'user_center_site.html',context)
class VerifyEmailView(View):
'''验证邮箱'''
def get(self,request):
#接收参数
token = request.GET.get('token')
#校验参数
if not token:
return http.HttpResponseForbidden('缺少必填参数')
#从token中提取用户信息user_id ==> user
user = check_verify_email_token(token)
if not user:
return http.HttpResponseBadRequest('无效的token')
try:
user.email_active = True
user.save()
except Exception as e:
logger.error(e)
return http.HttpResponseServerError('激活失败')
return redirect(reverse('users:info'))
class EmailView(LoginRequiredJSONMixin,View):
'''添加邮箱'''
def put(self,request):
#bodu是bytes类型,要转成字符串
json_str = request.body.decode()
json_dict=json.loads(json_str)#将json转换成字符串
email = json_dict.get('email')
# 校验参数
if not email:
return http.HttpResponseForbidden('缺少email参数')
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
try:
#将用户传入的邮箱保存到用户数据库的email字段中
request.user.email=email
request.user.save()
except Exception as e:
logger.error(e)
return http.JsonResponse({'code':RETCODE.DBERR,'errmsg':'添加邮箱失败'})
#发送邮箱验证
verify_url = generate_verify_email_url(request.user)
print(verify_url)
send_verify_email.delay(email,verify_url)
#响应结果
return http.JsonResponse({'code':RETCODE.OK,'errmsg':'OK'})
# class UserInfoView(View):
# '''用户中心'''
# def get(self,request):
# '''提供用户中心页面'''
# if request.user.is_authenticated:#django 自带判断用户是否登陆方法
# return render(request,'user_center_info.html')
# else:
# return redirect(reverse('users:login'))
class UserInfoView(LoginRequiredMixin, View):
'''用户中心'''
def get(self, request):
'''提供个人信息页面'''
'''提供用户中心页面'''
# if request.user.is_authenticated:#django 自带判断用户是否登陆方法
# return render(request,'user_center_info.html')
# else:
# return redirect(reverse('users:login'))
context = {
'username': request.user.username,
'mobile': request.user.mobile,
'email': request.user.email,
'email_active': request.user.email_active
}
return render(request, 'user_center_info.html', context=context)
#退出登陆
class LogoutView(View):
'''实现退出登陆'''
def get(self,request):
#清除session
logout(request)
response = redirect(reverse('contents:index'))
response.delete_cookie('username')
return response
class LoginView(View):
'''用户名登陆'''
def get(self,request):
'''
提供登陆页面
:param request:请求对象
:return: 登陆页面
'''
return render(request,'login.html')
def post(self,request):
'''
实现登陆逻辑
:param request: 请求对象
:return: 登陆结果
'''
username = request.POST.get('username')
password = request.POST.get('password')
remembered = request.POST.get('remembered')
'''校验参数'''
#判断参数是否齐全
if not all([username,password]):
return http.HttpResponseForbidden('缺少必填参数')
#判断用户名是否是5-20个字符
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$',username):
return http.HttpResponseForbidden('请输入正确的用户名或手机号')
#判断密码是否是8-20位数字
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return http.HttpResponseForbidden('密码最少8位,最长20位')
#认证用户登陆
user = authenticate(username=username,password=password)
if user is None:
return render(request,'login.html',{'account_errmsg':'用户名或密码错误'})
# 状态保持
login(request, user)
# 使用remembered确定状态保持周期(实现记住登录)
if remembered != 'on':
# 没有记住登录:状态保持在浏览器会话结束后就销毁
request.session.set_expiry(0) # 单位是秒
else:
# 记住登录:状态保持周期为两周:默认是两周
request.session.set_expiry(None)
response = redirect(reverse('contents:index'))
#响应结果
#先取出next
next = request.GET.get('next')
if next:
#重定向到next
response=redirect(next)
else:
response = redirect(reverse('contents:index'))
#为了实现右上角显示用户名。需要将用户名写进cookie中
#response.set-cookie('key',value,'expiry')
response.set_cookie('username',user.username,max_age=3600 * 24 * 15)#缓存15天
# 用户登录成功,合并cookie购物车到redis购物车
response = merge_carts_cookies_redis(request=request, user=user, response=response)
# 响应结果:重定向到首页
return response
class MobileCountView(View):
"""判断手机号是否重复注册"""
def get(self, request, mobile):
"""
:param mobile: 手机号
:return: JSON
"""
count = User.objects.filter(mobile=mobile).count()
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'count': count})
class UsernameCountView(View):
"""判断用户名是否重复注册"""
def get(self, request, username):
"""
:param username: 用户名
:return: JSON
"""
# 实现主体业务逻辑:使用username查询对应的记录的条数(filter返回的是满足条件的结果集)
count = User.objects.filter(username=username).count()
# 响应结果
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'count': count})
class RegisterView(View):
"""用户注册"""
def get(self, request):
"""提供用户注册页面"""
return render(request, 'register.html')
def post(self, request):
"""实现用户注册业务逻辑"""
# 接收参数:表单参数
username = request.POST.get('username')
password = request.POST.get('password')
password2 = request.POST.get('<PASSWORD>')
mobile = request.POST.get('mobile')
sms_code_client = request.POST.get('sms_code')
allow = request.POST.get('allow')
# 校验参数:前后端的校验需要分开,避免恶意用户越过前端逻辑发请求,要保证后端的安全,前后端的校验逻辑相同
# 判断参数是否齐全:all([列表]):会去校验列表中的元素是否为空,只要有一个为空,返回false
if not all([username, password, password2, mobile, allow]):
return http.HttpResponseForbidden('缺少必传参数')
# 判断用户名是否是5-20个字符
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return http.HttpResponseForbidden('请输入5-20个字符的用户名')
if re.match(r'^\d{1,90}$', username):
return http.HttpResponseForbidden('用户名不能是纯数字')
# 判断密码是否是8-20个数字
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return http.HttpResponseForbidden('请输入8-20位的密码')
# 判断两次密码是否一致
if password != <PASSWORD>:
return http.HttpResponseForbidden('两次输入的密码不一致')
# 判断手机号是否合法
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('请输入正确的手机号码')
#连接redis数据库
redis_conn = get_redis_connection('verify_code')
# 判断短信验证码是否输入正确
sms_code_server = redis_conn.get('sms_%s' % mobile)
if sms_code_server is None:
return render(request, 'register.html', {'sms_code_errmsg': '短信验证码已失效'})
if sms_code_client != sms_code_server.decode():
return render(request, 'register.html', {'sms_code_errmsg': '输入短信验证码有误'})
# 判断是否勾选用户协议
if allow != 'on':
return http.HttpResponseForbidden('请勾选用户协议')
# 保存注册数据:是注册业务的核心
# return render(request, 'register.html', {'register_errmsg': '注册失败'})
try:
user = User.objects.create_user(username=username, password=password, mobile=mobile)
except DatabaseError:
return render(request, 'register.html', {'register_errmsg':'注册失败'})
# 实现状态保持
login(request, user)
# 响应结果:重定向到首页
# return http.HttpResponse('注册成功,重定向到首页')
# return redirect('/')
# reverse('contents:index') == '/'
return redirect(reverse('contents:index'))
| StarcoderdataPython |
144341 | from click.testing import CliRunner
import time
import pytest
import json
from calm.dsl.cli import main as cli
from calm.dsl.cli.constants import APPLICATION
from calm.dsl.tools import get_logging_handle
LOG = get_logging_handle(__name__)
BP_FILE_PATH = "tests/cli/runtime_helpers/ahv/blueprint.py"
LAUNCH_PARAMS = "tests/cli/runtime_helpers/ahv/editable_params.py"
NON_BUSY_APP_STATES = [
APPLICATION.STATES.STOPPED,
APPLICATION.STATES.RUNNING,
APPLICATION.STATES.ERROR,
]
@pytest.mark.slow
def test_ahv_substrate_editables_interactive_mode():
"""Tests interactive mode for getting runtime values under ahv substrate"""
runner = CliRunner()
command = "launch bp --file {}".format(BP_FILE_PATH)
result = runner.invoke(cli, command)
# create blueprint
BP_NAME = "Test_Runtime_Bp_{}".format(int(time.time()))
command = "create bp --file={} --name={}".format(BP_FILE_PATH, BP_NAME)
LOG.info("Creating Bp {}".format(BP_NAME))
result = runner.invoke(cli, command)
LOG.debug(result.output)
if result.exit_code:
delete_bp(BP_NAME)
pytest.fail("Error occured in bp creation")
# launch blueprint
APP_NAME = "Test_Runtime_App_{}".format(int(time.time()))
command = "launch bp {} -a {}".format(BP_NAME, APP_NAME)
input = [
"5",
"22",
"@@{calm_application_name}@@-@@{calm_array_index}@@",
"y", # Edit categories
"y", # Delete category
"AppFamily", # Category Family
"y", # Delete more cetgories
"AppTier", # Category Family
"n", # Delete category
"y", # Add category
"6", # Index of category (AppFamily:DevOps)
"n", # Add more category
"vlan.0", # Nic name
"CLONE_FROM_IMAGE", # Opertaion
"Centos7", # Image name
"DISK", # Device Type of 2nd disk
"PCI", # Device Bus
"ALLOCATE_STORAGE_CONTAINER", # Operation
"10", # Disk size
"1", # vCPUS
"1", # Cores per vCPU
"1", # Memory(GiB)
"y", # Connection status for serail port 0
"n", # Connection status for serail port 0
"y", # Edit guest customization
"Sample data", # User data
"bar1", # Variable value
"bar2", # Variable value
]
input = "\n".join(input)
result = runner.invoke(cli, command, input=input)
try:
delete_app(APP_NAME)
except Exception:
pass
try:
delete_bp(BP_NAME)
except Exception:
pass
if result.exit_code:
pytest.fail("App creation failed")
@pytest.mark.slow
def test_ahv_substrate_editables_non_interactive_mode():
"""Tests non-interactive mode for getting runtime values under ahv substrate"""
runner = CliRunner()
command = "launch bp --file {}".format(BP_FILE_PATH)
result = runner.invoke(cli, command)
# create blueprint
BP_NAME = "Test_Runtime_Bp_{}".format(int(time.time()))
command = "create bp --file={} --name={}".format(BP_FILE_PATH, BP_NAME)
LOG.info("Creating Bp {}".format(BP_NAME))
result = runner.invoke(cli, command)
LOG.debug(result.output)
if result.exit_code:
delete_bp(BP_NAME)
pytest.fail("Error occured in bp creation")
APP_NAME = "Test_Runtime_App_{}".format(int(time.time()))
command = "launch bp {} -a {} -l {}".format(BP_NAME, APP_NAME, LAUNCH_PARAMS)
result = runner.invoke(cli, command)
try:
delete_app(APP_NAME)
except Exception:
pass
try:
delete_bp(BP_NAME)
except Exception:
pass
if result.exit_code:
pytest.fail("App creation failed")
def delete_bp(name):
runner = CliRunner()
result = runner.invoke(cli, "delete bp {}".format(name))
LOG.debug(result.output)
assert result.exit_code == 0, "Error occured in blueprint deletion"
def delete_app(name):
runner = CliRunner()
_wait_for_non_busy_state(name)
result = runner.invoke(cli, "delete app {}".format(name))
LOG.debug(result.output)
assert result.exit_code == 0, "Error occured in application deletion"
def _wait_for_non_busy_state(name):
runner = CliRunner()
result = runner.invoke(cli, ["describe", "app", name, "--out=json"])
app_data = json.loads(result.output)
LOG.info("App State: {}".format(app_data["status"]["state"]))
LOG.debug("App Terminal states: {}".format(NON_BUSY_APP_STATES))
while app_data["status"]["state"] not in NON_BUSY_APP_STATES:
time.sleep(5)
result = runner.invoke(cli, ["describe", "app", name, "--out=json"])
app_data = json.loads(result.output)
LOG.info("App State: {}".format(app_data["status"]["state"]))
| StarcoderdataPython |
4817862 | <reponame>CFWLoader/supreme-bassoon<gh_stars>0
from sklearn import tree
from sklearn.datasets import load_iris
import graphviz
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
iris_data = load_iris()
clf = tree.DecisionTreeClassifier(criterion="gini")
clf = clf.fit(iris_data.data, iris_data.target)
print(clf.score(iris_data.data, iris_data.target))
# dot_data = tree.export_graphviz(clf, out_file=os.path.join(dir_path, "tree.dot"),
# feature_names=iris_data.feature_names, class_names=iris_data.target_names,
# filled=True, rounded=True, special_characters=True) | StarcoderdataPython |
1671946 | <filename>simple_salesforce/api.py
"""Core classes and exceptions for Simple-Salesforce"""
# has to be defined prior to login import
DEFAULT_API_VERSION = '52.0'
import base64
import json
import logging
import re
from collections import OrderedDict, namedtuple
from urllib.parse import urljoin, urlparse
import requests
from .bulk import SFBulkHandler
from .exceptions import SalesforceGeneralError
from .login import SalesforceLogin
from .util import date_to_iso8601, exception_handler
from .metadata import SfdcMetadataApi
# pylint: disable=invalid-name
logger = logging.getLogger(__name__)
Usage = namedtuple('Usage', 'used total')
PerAppUsage = namedtuple('PerAppUsage', 'used total name')
# pylint: disable=too-many-instance-attributes
class Salesforce:
"""Salesforce Instance
An instance of Salesforce is a handy way to wrap a Salesforce session
for easy use of the Salesforce REST API.
"""
_parse_float = None
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
def __init__(
self,
username=None,
password=<PASSWORD>,
security_token=None,
session_id=None,
instance=None,
instance_url=None,
organizationId=None,
version=DEFAULT_API_VERSION,
proxies=None,
session=None,
client_id=None,
domain=None,
consumer_key=None,
privatekey_file=None,
privatekey=None,
parse_float=None,
):
"""Initialize the instance with the given parameters.
Available kwargs
Password Authentication:
* username -- the Salesforce username to use for authentication
* password -- the password for the username
* security_token -- the security token for the username
* domain -- The domain to using for connecting to Salesforce. Use
common domains, such as 'login' or 'test', or
Salesforce My domain. If not used, will default to
'login'.
OAuth 2.0 JWT Bearer Token Authentication:
* consumer_key -- the consumer key generated for the user
Then either
* privatekey_file -- the path to the private key file used
for signing the JWT token
OR
* privatekey -- the private key to use
for signing the JWT token
Direct Session and Instance Access:
* session_id -- Access token for this session
Then either
* instance -- Domain of your Salesforce instance, i.e.
`na1.salesforce.com`
OR
* instance_url -- Full URL of your instance i.e.
`https://na1.salesforce.com
Universal Kwargs:
* version -- the version of the Salesforce API to use, for example
`29.0`
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
* parse_float -- Function to parse float values with. Is passed along to
https://docs.python.org/3/library/json.html#json.load
"""
if domain is None:
domain = 'login'
# Determine if the user passed in the optional version and/or
# domain kwargs
self.sf_version = version
self.domain = domain
self.session = session or requests.Session()
self.proxies = self.session.proxies
# override custom session proxies dance
if proxies is not None:
if not session:
self.session.proxies = self.proxies = proxies
else:
logger.warning(
'Proxies must be defined on custom session object, '
'ignoring proxies: %s', proxies
)
# Determine if the user wants to use our username/password auth or pass
# in their own information
if all(arg is not None for arg in (
username, password, security_token)):
self.auth_type = "password"
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=self.session,
username=username,
password=password,
security_token=security_token,
sf_version=self.sf_version,
proxies=self.proxies,
client_id=client_id,
domain=self.domain)
elif all(arg is not None for arg in (
session_id, instance or instance_url)):
self.auth_type = "direct"
self.session_id = session_id
# If the user provides the full url (as returned by the OAuth
# interface for example) extract the hostname (which we rely on)
if instance_url is not None:
self.sf_instance = urlparse(instance_url).hostname
port = urlparse(instance_url).port
if port not in (None, 443):
self.sf_instance += ':' + str(port)
else:
self.sf_instance = instance
elif all(arg is not None for arg in (
username, password, organizationId)):
self.auth_type = 'ipfilter'
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=self.session,
username=username,
password=password,
organizationId=organizationId,
sf_version=self.sf_version,
proxies=self.proxies,
client_id=client_id,
domain=self.domain)
elif all(arg is not None for arg in (
username, consumer_key, privatekey_file or privatekey)):
self.auth_type = "jwt-bearer"
# Pass along the username/password to our login helper
self.session_id, self.sf_instance = SalesforceLogin(
session=self.session,
username=username,
consumer_key=consumer_key,
privatekey_file=privatekey_file,
privatekey=privatekey,
proxies=self.proxies,
domain=self.domain)
else:
raise TypeError(
'You must provide login information or an instance and token'
)
self.auth_site = ('https://{domain}.salesforce.com'
.format(domain=self.domain))
self.headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.session_id,
'X-PrettyPrint': '1'
}
self.base_url = ('https://{instance}/services/data/v{version}/'
.format(instance=self.sf_instance,
version=self.sf_version))
self.apex_url = ('https://{instance}/services/apexrest/'
.format(instance=self.sf_instance))
self.bulk_url = ('https://{instance}/services/async/{version}/'
.format(instance=self.sf_instance,
version=self.sf_version))
self.metadata_url = ('https://{instance}/services/Soap/m/{version}/'
.format(instance=self.sf_instance,
version=self.sf_version))
self.tooling_url = '{base_url}tooling/'.format(base_url=self.base_url)
self.api_usage = {}
self._parse_float = parse_float
self._mdapi = None
@property
def mdapi(self):
"""Utility to interact with metadata api functionality"""
if not self._mdapi:
self._mdapi = SfdcMetadataApi(session=self.session,
session_id=self.session_id,
instance=self.sf_instance,
metadata_url=self.metadata_url,
api_version=self.sf_version,
headers=self.headers)
return self._mdapi
def describe(self, **kwargs):
"""Describes all available objects
Arguments:
* keyword arguments supported by requests.request (e.g. json, timeout)
"""
url = self.base_url + "sobjects"
result = self._call_salesforce('GET', url, name='describe', **kwargs)
json_result = self.parse_result_to_json(result)
if len(json_result) == 0:
return None
return json_result
def is_sandbox(self):
"""After connection returns is the organization in a sandbox"""
is_sandbox = None
if self.session_id:
is_sandbox = self.query_all("SELECT IsSandbox "
"FROM Organization LIMIT 1")
is_sandbox = is_sandbox.get('records', [{'IsSandbox': None}])[
0].get(
'IsSandbox')
return is_sandbox
# SObject Handler
def __getattr__(self, name):
"""Returns an `SFType` instance for the given Salesforce object type
(given in `name`).
The magic part of the SalesforceAPI, this function translates
calls such as `salesforce_api_instance.Lead.metadata()` into fully
constituted `SFType` instances to make a nice Python API wrapper
for the REST API.
Arguments:
* name -- the name of a Salesforce object type, e.g. Lead or Contact
"""
# fix to enable serialization
# (https://github.com/heroku/simple-salesforce/issues/60)
if name.startswith('__'):
return super().__getattr__(name)
if name == 'bulk':
# Deal with bulk API functions
return SFBulkHandler(self.session_id, self.bulk_url, self.proxies,
self.session)
return SFType(
name, self.session_id, self.sf_instance, sf_version=self.sf_version,
proxies=self.proxies, session=self.session)
# User utility methods
def set_password(self, user, password):
"""Sets the password of a user
salesforce dev documentation link:
https://www.salesforce.com/us/developer/docs/api_rest/Content
/dome_sobject_user_password.htm
Arguments:
* user: the userID of the user to set
* password: <PASSWORD>
"""
url = self.base_url + 'sobjects/User/%s/password' % user
params = {'NewPassword': password}
result = self._call_salesforce('POST', url, data=json.dumps(params))
if result.status_code == 204:
return None
# salesforce return 204 No Content when the request is successful
if result.status_code != 200:
raise SalesforceGeneralError(url,
result.status_code,
'User',
result.content)
return self.parse_result_to_json(result)
# Generic Rest Function
def restful(self, path, params=None, method='GET', **kwargs):
"""Allows you to make a direct REST call if you know the path
Arguments:
* path: The path of the request
Example: sobjects/User/ABC123/password'
* params: dict of parameters to pass to the path
* method: HTTP request method, default GET
* other arguments supported by requests.request (e.g. json, timeout)
"""
url = self.base_url + path
result = self._call_salesforce(method, url, name=path, params=params,
**kwargs)
json_result = self.parse_result_to_json(result)
if len(json_result) == 0:
return None
return json_result
# Search Functions
def search(self, search):
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the fully formatted SOSL search string, e.g.
`FIND {Waldo}`
"""
url = self.base_url + 'search/'
# `requests` will correctly encode the query string passed as `params`
params = {'q': search}
result = self._call_salesforce('GET', url, name='search', params=params)
json_result = self.parse_result_to_json(result)
if len(json_result) == 0:
return None
return json_result
def quick_search(self, search):
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the non-SOSL search string, e.g. `Waldo`. This search
string will be wrapped to read `FIND {Waldo}` before being
sent to Salesforce
"""
search_string = 'FIND {{{search_string}}}'.format(search_string=search)
return self.search(search_string)
def limits(self, **kwargs):
"""Return the result of a Salesforce request to list Organization
limits.
"""
url = self.base_url + 'limits/'
result = self._call_salesforce('GET', url, **kwargs)
if result.status_code != 200:
exception_handler(result)
return self.parse_result_to_json(result)
# Query Handler
def query(self, query, include_deleted=False, **kwargs):
"""Return the result of a Salesforce SOQL query as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* query -- the SOQL query to send to Salesforce, e.g.
SELECT Id FROM Lead WHERE Email = "wal<EMAIL>"
* include_deleted -- True if deleted records should be included
"""
url = self.base_url + ('queryAll/' if include_deleted else 'query/')
params = {'q': query}
# `requests` will correctly encode the query string passed as `params`
result = self._call_salesforce('GET', url, name='query',
params=params, **kwargs)
return self.parse_result_to_json(result)
def query_more(
self, next_records_identifier, identifier_is_url=False,
include_deleted=False, **kwargs):
"""Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
Arguments:
* next_records_identifier -- either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
* identifier_is_url -- True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifier` should be treated as
an Id.
* include_deleted -- True if the `next_records_identifier` refers to a
query that includes deleted records. Only used if
`identifier_is_url` is False
"""
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = ('https://{instance}{next_record_url}'
.format(instance=self.sf_instance,
next_record_url=next_records_identifier))
else:
endpoint = 'queryAll' if include_deleted else 'query'
url = self.base_url + '{query_endpoint}/{next_record_id}'
url = url.format(query_endpoint=endpoint,
next_record_id=next_records_identifier)
result = self._call_salesforce('GET', url, name='query_more', **kwargs)
return self.parse_result_to_json(result)
def query_all_iter(self, query, include_deleted=False, **kwargs):
"""This is a lazy alternative to `query_all` - it does not construct
the whole result set into one container, but returns objects from each
page it retrieves from the API.
Since `query_all` has always been eagerly executed, we reimplemented it
using `query_all_iter`, only materializing the returned iterator to
maintain backwards compatibility.
The one big difference from `query_all` (apart from being lazy) is that
we don't return a dictionary with `totalSize` and `done` here,
we only return the records in an iterator.
Arguments
* query -- the SOQL query to send to Salesforce, e.g.
SELECT Id FROM Lead WHERE Email = "<EMAIL>"
* include_deleted -- True if the query should include deleted records.
"""
result = self.query(query, include_deleted=include_deleted, **kwargs)
while True:
for record in result['records']:
yield record
# fetch next batch if we're not done else break out of loop
if not result['done']:
result = self.query_more(result['nextRecordsUrl'],
identifier_is_url=True,
**kwargs)
else:
return
def query_all(self, query, include_deleted=False, **kwargs):
"""Returns the full set of results for the `query`. This is a
convenience
wrapper around `query(...)` and `query_more(...)`.
The returned dict is the decoded JSON payload from the final call to
Salesforce, but with the `totalSize` field representing the full
number of results retrieved and the `records` list representing the
full list of records retrieved.
Arguments
* query -- the SOQL query to send to Salesforce, e.g.
SELECT Id FROM Lead WHERE Email = "<EMAIL>"
* include_deleted -- True if the query should include deleted records.
"""
records = self.query_all_iter(query, include_deleted=include_deleted,
**kwargs)
all_records = list(records)
return {
'records': all_records,
'totalSize': len(all_records),
'done': True,
}
def toolingexecute(self, action, method='GET', data=None, **kwargs):
"""Makes an HTTP request to an TOOLING REST endpoint
Arguments:
* action -- The REST endpoint for the request.
* method -- HTTP method for the request (default GET)
* data -- A dict of parameters to send in a POST / PUT request
* kwargs -- Additional kwargs to pass to `requests.request`
"""
# If data is None, we should send an empty body, not "null", which is
# None in json.
json_data = json.dumps(data) if data is not None else None
result = self._call_salesforce(
method,
self.tooling_url + action,
name="toolingexecute",
data=json_data, **kwargs
)
try:
response_content = result.json()
# pylint: disable=broad-except
except Exception:
response_content = result.text
return response_content
def apexecute(self, action, method='GET', data=None, **kwargs):
"""Makes an HTTP request to an APEX REST endpoint
Arguments:
* action -- The REST endpoint for the request.
* method -- HTTP method for the request (default GET)
* data -- A dict of parameters to send in a POST / PUT request
* kwargs -- Additional kwargs to pass to `requests.request`
"""
# If data is None, we should send an empty body, not "null", which is
# None in json.
json_data = json.dumps(data) if data is not None else None
result = self._call_salesforce(
method,
self.apex_url + action,
name="apexecute",
data=json_data, **kwargs
)
try:
response_content = result.json()
# pylint: disable=broad-except
except Exception:
response_content = result.text
return response_content
def _call_salesforce(self, method, url, name="", **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
headers = self.headers.copy()
additional_headers = kwargs.pop('headers', {})
headers.update(additional_headers)
result = self.session.request(
method, url, headers=headers, **kwargs)
if result.status_code >= 300:
exception_handler(result, name=name)
sforce_limit_info = result.headers.get('Sforce-Limit-Info')
if sforce_limit_info:
self.api_usage = self.parse_api_usage(sforce_limit_info)
return result
@staticmethod
def parse_api_usage(sforce_limit_info):
"""parse API usage and limits out of the Sforce-Limit-Info header
Arguments:
* sforce_limit_info: The value of response header 'Sforce-Limit-Info'
Example 1: 'api-usage=18/5000'
Example 2: 'api-usage=25/5000;
per-app-api-usage=17/250(appName=sample-connected-app)'
"""
result = {}
api_usage = re.match(r'[^-]?api-usage=(?P<used>\d+)/(?P<tot>\d+)',
sforce_limit_info)
pau = r'.+per-app-api-usage=(?P<u>\d+)/(?P<t>\d+)\(appName=(?P<n>.+)\)'
per_app_api_usage = re.match(pau, sforce_limit_info)
if api_usage and api_usage.groups():
groups = api_usage.groups()
result['api-usage'] = Usage(used=int(groups[0]),
total=int(groups[1]))
if per_app_api_usage and per_app_api_usage.groups():
groups = per_app_api_usage.groups()
result['per-app-api-usage'] = PerAppUsage(used=int(groups[0]),
total=int(groups[1]),
name=groups[2])
return result
# file-based deployment function
def deploy(self, zipfile, sandbox, **kwargs):
"""Deploy using the Salesforce Metadata API. Wrapper for
SfdcMetaDataApi.deploy(...).
Arguments:
* zipfile: a .zip archive to deploy to an org, given as (
"path/to/zipfile.zip")
* options: salesforce DeployOptions in .json format.
(https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta
/api_meta/meta_deploy.htm)
Returns a process id and state for this deployment.
"""
asyncId, state = self.mdapi.deploy(zipfile, sandbox, **kwargs)
result = {'asyncId': asyncId, 'state': state}
return result
# check on a file-based deployment
def checkDeployStatus(self, asyncId, **kwargs):
"""Check on the progress of a file-based deployment via Salesforce
Metadata API.
Wrapper for SfdcMetaDataApi.check_deploy_status(...).
Arguments:
* asyncId: deployment async process ID, returned by Salesforce.deploy()
Returns status of the deployment the asyncId given.
"""
state, state_detail, deployment_detail, unit_test_detail = \
self.mdapi.check_deploy_status(asyncId, **kwargs)
results = {
'state': state,
'state_detail': state_detail,
'deployment_detail': deployment_detail,
'unit_test_detail': unit_test_detail
}
return results
def parse_result_to_json(self, result):
""""Parse json from a Response object"""
return result.json(object_pairs_hook=OrderedDict,
parse_float=self._parse_float)
class SFType:
"""An interface to a specific type of SObject"""
_parse_float = None
# pylint: disable=too-many-arguments
def __init__(
self,
object_name,
session_id,
sf_instance,
sf_version=DEFAULT_API_VERSION,
proxies=None,
session=None,
parse_float=None,
):
"""Initialize the instance with the given parameters.
Arguments:
* object_name -- the name of the type of SObject this represents,
e.g. `Lead` or `Contact`
* session_id -- the session ID for authenticating to Salesforce
* sf_instance -- the domain of the instance of Salesforce to use
* sf_version -- the version of the Salesforce API to use
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requests Session features not otherwise
exposed by simple_salesforce.
* parse_float -- Function to parse float values with. Is passed along to
https://docs.python.org/3/library/json.html#json.load
"""
self.session_id = session_id
self.name = object_name
self.session = session or requests.Session()
# don't wipe out original proxies with None
if not session and proxies is not None:
self.session.proxies = proxies
self.api_usage = {}
self.base_url = (
'https://{instance}/services/data/v{sf_version}/sobjects'
'/{object_name}/'.format(instance=sf_instance,
object_name=object_name,
sf_version=sf_version))
self._parse_float = parse_float
def metadata(self, headers=None):
"""Returns the result of a GET to `.../{object_name}/` as a dict
decoded from the JSON payload returned by Salesforce.
Arguments:
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce('GET', self.base_url, headers=headers)
return self.parse_result_to_json(result)
def describe(self, headers=None):
"""Returns the result of a GET to `.../{object_name}/describe` as a
dict decoded from the JSON payload returned by Salesforce.
Arguments:
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='GET', url=urljoin(self.base_url, 'describe'),
headers=headers
)
return self.parse_result_to_json(result)
def describe_layout(self, record_id, headers=None):
"""Returns the layout of the object
Returns the result of a GET to
`.../{object_name}/describe/layouts/<recordid>` as a dict decoded from
the JSON payload returned by Salesforce.
Arguments:
* record_id -- the Id of the SObject to get
* headers -- a dict with additional request headers.
"""
custom_url_part = 'describe/layouts/{record_id}'.format(
record_id=record_id
)
result = self._call_salesforce(
method='GET',
url=urljoin(self.base_url, custom_url_part),
headers=headers
)
return self.parse_result_to_json(result)
def get(self, record_id, headers=None):
"""Returns the result of a GET to `.../{object_name}/{record_id}` as a
dict decoded from the JSON payload returned by Salesforce.
Arguments:
* record_id -- the Id of the SObject to get
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='GET', url=urljoin(self.base_url, record_id),
headers=headers
)
return self.parse_result_to_json(result)
def get_by_custom_id(self, custom_id_field, custom_id, headers=None):
"""Return an ``SFType`` by custom ID
Returns the result of a GET to
`.../{object_name}/{custom_id_field}/{custom_id}` as a dict decoded
from the JSON payload returned by Salesforce.
Arguments:
* custom_id_field -- the API name of a custom field that was defined
as an External ID
* custom_id - the External ID value of the SObject to get
* headers -- a dict with additional request headers.
"""
custom_url = urljoin(
self.base_url, '{custom_id_field}/{custom_id}'.format(
custom_id_field=custom_id_field, custom_id=custom_id
)
)
result = self._call_salesforce(
method='GET', url=custom_url, headers=headers
)
return self.parse_result_to_json(result)
def create(self, data, headers=None):
"""Creates a new SObject using a POST to `.../{object_name}/`.
Returns a dict decoded from the JSON payload returned by Salesforce.
Arguments:
* data -- a dict of the data to create the SObject from. It will be
JSON-encoded before being transmitted.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='POST', url=self.base_url,
data=json.dumps(data), headers=headers
)
return self.parse_result_to_json(result)
def upsert(self, record_id, data, raw_response=False, headers=None):
"""Creates or updates an SObject using a PATCH to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- an identifier for the SObject as described in the
Salesforce documentation
* data -- a dict of the data to create or update the SObject from. It
will be JSON-encoded before being transmitted.
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='PATCH', url=urljoin(self.base_url, record_id),
data=json.dumps(data), headers=headers
)
return self._raw_response(result, raw_response)
def update(self, record_id, data, raw_response=False, headers=None):
"""Updates an SObject using a PATCH to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- the Id of the SObject to update
* data -- a dict of the data to update the SObject from. It will be
JSON-encoded before being transmitted.
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='PATCH', url=urljoin(self.base_url, record_id),
data=json.dumps(data), headers=headers
)
return self._raw_response(result, raw_response)
def delete(self, record_id, raw_response=False, headers=None):
"""Deletes an SObject using a DELETE to
`.../{object_name}/{record_id}`.
If `raw_response` is false (the default), returns the status code
returned by Salesforce. Otherwise, return the `requests.Response`
object.
Arguments:
* record_id -- the Id of the SObject to delete
* raw_response -- a boolean indicating whether to return the response
directly, instead of the status code.
* headers -- a dict with additional request headers.
"""
result = self._call_salesforce(
method='DELETE', url=urljoin(self.base_url, record_id),
headers=headers
)
return self._raw_response(result, raw_response)
def deleted(self, start, end, headers=None):
# pylint: disable=line-too-long
"""Gets a list of deleted records
Use the SObject Get Deleted resource to get a list of deleted records
for the specified object.
.../deleted/?start=2013-05-05T00:00:00+00:00&end=2013-05-10T00:00:00
+00:00
* start -- start datetime object
* end -- end datetime object
* headers -- a dict with additional request headers.
"""
url = urljoin(
self.base_url, 'deleted/?start={start}&end={end}'.format(
start=date_to_iso8601(start), end=date_to_iso8601(end)
)
)
result = self._call_salesforce(method='GET', url=url, headers=headers)
return self.parse_result_to_json(result)
def updated(self, start, end, headers=None):
# pylint: disable=line-too-long
"""Gets a list of updated records
Use the SObject Get Updated resource to get a list of updated
(modified or added) records for the specified object.
.../updated/?start=2014-03-20T00:00:00+00:00&end=2014-03-22T00:00:00
+00:00
* start -- start datetime object
* end -- end datetime object
* headers -- a dict with additional request headers.
"""
url = urljoin(
self.base_url, 'updated/?start={start}&end={end}'.format(
start=date_to_iso8601(start), end=date_to_iso8601(end)
)
)
result = self._call_salesforce(method='GET', url=url, headers=headers)
return self.parse_result_to_json(result)
def _call_salesforce(self, method, url, **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + self.session_id,
'X-PrettyPrint': '1'
}
additional_headers = kwargs.pop('headers', {})
headers.update(additional_headers or {})
result = self.session.request(method, url, headers=headers, **kwargs)
if result.status_code >= 300:
exception_handler(result, self.name)
sforce_limit_info = result.headers.get('Sforce-Limit-Info')
if sforce_limit_info:
self.api_usage = Salesforce.parse_api_usage(sforce_limit_info)
return result
# pylint: disable=no-self-use
def _raw_response(self, response, body_flag):
"""Utility method for processing the response and returning either the
status code or the response object.
Returns either an `int` or a `requests.Response` object.
"""
if not body_flag:
return response.status_code
return response
def parse_result_to_json(self, result):
""""Parse json from a Response object"""
return result.json(object_pairs_hook=OrderedDict,
parse_float=self._parse_float)
def upload_base64(self, file_path, base64_field='Body', headers=None,
**kwargs):
"""Upload base64 encoded file to Salesforce"""
data = {}
with open(file_path, "rb") as f:
body = base64.b64encode(f.read()).decode('utf-8')
data[base64_field] = body
result = self._call_salesforce(method='POST', url=self.base_url,
headers=headers, json=data, **kwargs)
return result
def update_base64(self, record_id, file_path, base64_field='Body',
headers=None, raw_response=False,
**kwargs):
"""Updated base64 image from file to Salesforce"""
data = {}
with open(file_path, "rb") as f:
body = base64.b64encode(f.read()).decode('utf-8')
data[base64_field] = body
result = self._call_salesforce(method='PATCH',
url=urljoin(self.base_url, record_id),
json=data,
headers=headers, **kwargs)
return self._raw_response(result, raw_response)
def get_base64(self, record_id, base64_field='Body', data=None,
headers=None, **kwargs):
"""Returns binary stream of base64 object at specific path.
Arguments:
* path: The path of the request
Example: sobjects/Attachment/ABC123/Body
sobjects/ContentVersion/ABC123/VersionData
"""
result = self._call_salesforce(method='GET', url=urljoin(
self.base_url, '{record_id}/{base64_field}'.format(
record_id=record_id, base64_field=base64_field)),
data=data,
headers=headers, **kwargs)
return result.content
| StarcoderdataPython |
56199 | <reponame>RTUITLab/Energomach-Hack-2021-RealityGang
# Generated by Django 3.2 on 2021-05-22 10:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_app', '0004_auto_20210522_1349'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='inn',
new_name='company',
),
]
| StarcoderdataPython |
1753001 | <filename>setup.py
import os.path
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='pygments-lexer-solidity',
version='0.3.1',
description='Solidity lexer for Pygments',
long_description=read('README.rst'),
license="BSD",
author='<NAME>',
author_email='<EMAIL>',
url='https://gitlab.com/veox/pygments-lexer-solidity',
packages=['pygments_lexer_solidity'],
classifiers=[
'Environment :: Plugins',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
],
keywords='pygments lexer syntax highlight solidity sol ethereum',
install_requires=[
'pygments>=2.1'
],
entry_points="""
[pygments.lexers]
solidity = pygments_lexer_solidity:SolidityLexer
"""
)
| StarcoderdataPython |
1753922 | <reponame>Akashdawari/Classifying-Classifiers-<filename>iris_dataset_classifiers.py<gh_stars>0
# -*- coding: utf-8 -*-
"""Iris_dataset_classifiers.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1De9mW9xFR0sMiAQvc08IzTQxe2_htH8J
"""
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
import numpy as np
import matplotlib.pyplot as plt
svm_clf = SVC()
dt_clf = DecisionTreeClassifier()
log_clf = LogisticRegression()
rnd_clf = RandomForestClassifier()
clf = [svm_clf, dt_clf, log_clf, rnd_clf]
arr = [[0,1], [0,2], [0,3], [1,2], [1,3], [2,3]]
iris = load_iris()
plt.figure(figsize=(14,9))
for ot,i in enumerate(arr):
X = load_iris().data[:, i]
Y = load_iris().target
plt.subplot(2,3, ot+1)
# this formatter will label the colorbar with the correct target names
formatter = plt.FuncFormatter(lambda j, *args: iris.target_names[int(j)])
plt.scatter(X[:, 0], X[:, 1], c=iris.target)
plt.colorbar(ticks=[0, 1, 2], format=formatter)
plt.xlabel(iris.feature_names[i[0]], color="white")
plt.ylabel(iris.feature_names[i[1]], color="white")
plt.show()
for classifier in clf:
# Parameters
n_classes = 3
plot_colors = ["mediumslateblue", "pink", "lime"]
scatter_colors = ["navy", "firebrick", "darkgreen"]
plot_step = 0.02
# Load data
iris = load_iris()
plt.figure(figsize=(14,9))
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
x_train, x_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
# Train
clf = classifier.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Plot the decision boundary
sub = plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
acc = accuracy_score(y_test, y_pred)
sub.set_title("Accuracy :- "+str(acc), color="lime")
cs = plt.contourf(xx, yy, Z, alpha=0.5)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), scatter_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired, alpha=1)
plt.axis("tight")
plt.suptitle("Decision surface of a "+ classifier.__class__.__name__ +" using paired features with accuracy of ", color="red")
plt.legend()
plt.show() | StarcoderdataPython |
189176 | #!/usr/bin/env python
import hashlib
import itertools
import struct
import time
import multiprocessing
from ctypes import cdll
nbits = 19
mask = (1<<nbits)-1 #0x7ffff
tot_values = pow(2,nbits)
def process_killer():
cdll['libc.so.6'].prctl(1,9)
def _left_rotate(n, b):
"""Left rotate a 32-bit integer n by b bits."""
return ((n << b) | (n >> (32 - b))) & 0xffffffff
def process_chunk(chunk, h0, h1, h2, h3, h4):
"""Process a chunk of data and return the new digest variables."""
assert len(chunk) == 64
w = [0] * 80
# Break chunk into sixteen 4-byte big-endian words w[i]
for i in range(16):
w[i] = struct.unpack(b'>I', chunk[i*4:i*4 + 4])[0]
#print hex(i),hex(w[i])
# Extend the sixteen 4-byte words into eighty 4-byte words
for i in range(16, 80):
w[i] = _left_rotate(w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16], 1)
#print hex(i),hex(w[i])#,":",hex(w[i-3]),hex(w[i-8]),hex(w[i-14]),hex(w[i-16])
# Initialize hash value for this chunk
a = h0
b = h1
c = h2
d = h3
e = h4
for i in range(80):
if 0 <= i <= 19:
# Use alternative 1 for f from FIPS PB 180-1 to avoid bitwise not
f = d ^ (b & (c ^ d))
k = 0x5A827999
elif 20 <= i <= 39:
f = b ^ c ^ d
k = 0x6ED9EBA1
elif 40 <= i <= 59:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1BBCDC
elif 60 <= i <= 79:
f = b ^ c ^ d
k = 0xCA62C1D6
#print map(hex,[a,f,e,k,w[i]])
a, b, c, d, e = ((_left_rotate(a, 5) + f + e + k + w[i]) & 0xffffffff,
a, _left_rotate(b, 30), c, d)
#print hex(a)
# Add this chunk's hash to result so far
h0 = (h0 + a) & 0xffffffff
return h0
def sha1block(tinput):
tinput = tinput.ljust(64,'\x00')
h0 = 0x67452301
h1 = 0xEFCDAB89
h2 = 0x98BADCFE
h3 = 0x10325476
h4 = 0xC3D2E1F0
res = process_chunk(tinput,h0,h2,h1,h3,h4) #intentionally switched
return res
if (res & mask) == res:
return res
else:
return None
#import IPython; IPython.embed()
'''
tl = list(xrange(256))
ss = {}
for i,t in enumerate(itertools.product(tl,repeat=5)):
if i % 0x100000 == 0:
if i == 0: continue
print hex(i), hex(len(ss))
break
tinput = "".join(map(chr,t))
r = sha1block(tinput)
if r != None:
# print "tinput: %s, res: %08x" % (tinput.encode("hex"),r)
ss[r] = tinput
for k in sorted(ss.keys()):
v = ss[k]
print "%08x <-- %s" % (k,v.encode('hex'))
'''
def bruteforce_process(tid,nproc,result_queue):
process_killer()
slen = pow(2,8*5)
tl = list(xrange(256))
it = itertools.product(tl,repeat=5)
step = (slen/nproc)*tid
for x in itertools.islice(it,tid,slen):
#print x
tinput = "".join(map(chr,x))
r = sha1block(tinput)
if r != None:
result_queue.put((r,tinput))
res = sha1block("\x01\x09\x54\x7f\x6b")
res = sha1block("\x01\x09\x54\x7f\x6b")
print hex(res)
res = sha1block("\x28\x71\x3a\xbc")
print hex(res)
res = sha1block("\xff\xff\xff\xff")
print hex(res)
res = sha1block("\x00\x00\x00\x00")
print hex(res)
res = sha1block("\x80\x00\x00\x00")
print hex(res)
res = sha1block("\x00\x00\x00\x01")
print hex(res)
| StarcoderdataPython |
1602243 | <reponame>stat-kwon/notification<filename>src/spaceone/notification/manager/identity_manager.py
import logging
from spaceone.core.manager import BaseManager
from spaceone.core.connector.space_connector import SpaceConnector
_LOGGER = logging.getLogger(__name__)
class IdentityManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.identity_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='identity')
def get_user(self, user_id, domain_id):
return self.identity_connector.dispatch('User.get', {'user_id': user_id, 'domain_id': domain_id})
def get_project(self, project_id, domain_id):
return self.identity_connector.dispatch('Project.get', {'project_id': project_id, 'domain_id': domain_id})
def get_service_account(self, service_account_id, domain_id):
return self.identity_connector.dispatch('ServiceAccount.get', {'service_account_id': service_account_id,
'domain_id': domain_id})
| StarcoderdataPython |
52002 | <gh_stars>1-10
import numpy as np
import sys, traceback
import cv2
device_id = 0
def nothing(x):
pass
cv2.namedWindow('ESF_preview')
cv2.namedWindow('Trackbars')
cv2.createTrackbar('ACC_RATE','Trackbars',80,100,nothing)
cv2.createTrackbar('MIN_GRAY','Trackbars',10,255,nothing)
cv2.createTrackbar('MIN_DIFF','Trackbars',1,10,nothing)
cv2.createTrackbar('SCALE','Trackbars',20,100,nothing)
cv2.createTrackbar('OFF/ON', 'Trackbars',0,1,nothing)
cap = cv2.VideoCapture(device_id, cv2.CAP_DSHOW)
cv2.waitKey(1500)
if cap.isOpened() == False:
print("Unable to connect with selected capturing device")
cv2.destroyAllWindows()
sys.exit(0)
ret, current_frame = cap.read()
height = 0
width = 0
channels = 1
if len(current_frame.shape) == 2:
height, width = current_frame.shape
channels = 1
else:
height, width, channels = current_frame.shape
if channels > 1:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
current_frame = current_frame.astype(np.float32) * (1.0 / 255.0)
previous_frame = current_frame.copy()
weighted_esf = np.zeros((height,width,1), np.uint8)
while(True):
value = cv2.getTrackbarPos('ACC_RATE','Trackbars')
acc_rate = value / 100.0
value = cv2.getTrackbarPos('MIN_GRAY','Trackbars')
min_gray = value / 255.0
value = cv2.getTrackbarPos('MIN_DIFF','Trackbars')
min_diff = value / 255.0
value = cv2.getTrackbarPos('SCALE','Trackbars')
max_esf = 0.25 * ((value + 1.0) / 100.0)
scale_coeff = (1.0 / max_esf) * 255.0
s = cv2.getTrackbarPos('OFF/ON','Trackbars')
ret, current_frame = cap.read()
if np.shape(current_frame) != ():
if channels > 1:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
if s == 1:
current_frame = current_frame.astype(np.float32) * (1.0/255.0)
sum = current_frame + previous_frame
diff = cv2.absdiff(current_frame, previous_frame)
diff[diff < min_diff] = 0
sum[sum < min_gray] = 1000
esf = cv2.multiply(diff, cv2.pow(sum, -1.0))
esf *= scale_coeff
esf[esf > 255] = 255
esf[esf < 0] = 0
esf = esf.astype(np.uint8)
esf = cv2.GaussianBlur(esf,(5,5),0)
weighted_esf = cv2.addWeighted(weighted_esf, 1.0 - acc_rate, esf, acc_rate, gamma = 0)
im_color = cv2.applyColorMap(weighted_esf, cv2.COLORMAP_JET)
cv2.imshow('ESF_preview', im_color)
previous_frame = current_frame.copy()
else:
cv2.imshow('ESF_preview', current_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
1733349 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:41:17 2015.
@author: mje
"""
import numpy as np
import numpy.random as npr
import os
import socket
import mne
# import pandas as pd
from mne.connectivity import spectral_connectivity
from mne.minimum_norm import (apply_inverse_epochs, read_inverse_operator)
# Permutation test.
def permutation_resampling(case, control, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for case is different
from statistc for control.
"""
observed_diff = abs(statistic(case) - statistic(control))
num_case = len(case)
combined = np.concatenate([case, control])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_case]) - np.mean(xs[num_case:])
diffs.append(diff)
pval = (np.sum(diffs > observed_diff) +
np.sum(diffs < -observed_diff))/float(num_samples)
return pval, observed_diff, diffs
def permutation_test(a, b, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for a is different
from statistc for b.
"""
observed_diff = abs(statistic(b) - statistic(a))
num_a = len(a)
combined = np.concatenate([a, b])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_a]) - np.mean(xs[num_a:])
diffs.append(diff)
pval = np.sum(np.abs(diffs) >= np.abs(observed_diff)) / float(num_samples)
return pval, observed_diff, diffs
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
# change dir to save files the rigth place
os.chdir(data_path)
fname_inv = data_path + '0001-meg-oct-6-inv.fif'
fname_epochs = data_path + '0001_p_03_filter_ds_ica-mc_tsss-epo.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
# Parameters
snr = 1.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
epochs = mne.read_epochs(fname_epochs)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
#labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Lobes',
labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Brodmann',
regexp="Brodmann",
subjects_dir=subjects_dir)
labels_occ = labels[6:12]
# labels = mne.read_labels_from_annot('subject_1', parc='aparc.DKTatlas40',
# subjects_dir=subjects_dir)
for cond in epochs.event_id.keys():
stcs = apply_inverse_epochs(epochs[cond], inverse_operator, lambda2,
method, pick_ori="normal")
exec("stcs_%s = stcs" % cond)
labels_name = [label.name for label in labels_occ]
for label in labels_occ:
labels_name += [label.name]
# Extract time series
ts_ctl_left = mne.extract_label_time_course(stcs_ctl_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
ts_ent_left = mne.extract_label_time_course(stcs_ent_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
stcs_all_left = stcs_ctl_left + stcs_ent_left
ts_all_left = np.asarray(mne.extract_label_time_course(stcs_all_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip"))
number_of_permutations = 2000
index = np.arange(0, len(ts_all_left))
permutations_results = np.empty(number_of_permutations)
fmin, fmax = 7, 12
tmin, tmax = 0, 1
con_method = "plv"
diff_permuatation = np.empty([6, 6, number_of_permutations])
# diff
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
ts_ctl_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
con_ent, freqs_ent, times_ent, n_epochs_ent, n_tapers_ent =\
spectral_connectivity(
ts_ent_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
diff = con_ctl[:, :, 0] - con_ent[:, :, 0]
for i in range(number_of_permutations):
index = np.random.permutation(index)
tmp_ctl = ts_all_left[index[:64], :, :]
tmp_case = ts_all_left[index[64:], :, :]
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
tmp_ctl,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
con_case, freqs_case, times_case, n_epochs_case, n_tapers_case =\
spectral_connectivity(
tmp_case,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
diff_permuatation[:, :, i] = con_ctl[:, :, 0] - con_case[:, :, 0]
pval = np.empty_like(diff)
for h in range(diff.shape[0]):
for j in range(diff.shape[1]):
if diff[h, j] != 0:
pval[h, j] = np.sum(np.abs(diff_permuatation[h, h, :] >=
np.abs(diff[h, j, :])))/float(number_of_permutations)
# np.sum(np.abs(diff[h, j]) >= np.abs(
# diff_permuatation[h, j, :]))\
# / float(number_of_permutations)
| StarcoderdataPython |
3235642 | <filename>tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Array operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_dispatch # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_operators # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_shape
from tensorflow.python.ops.ragged import ragged_where_op
#===============================================================================
# ragged.batch_gather_with_default
#===============================================================================
def batch_gather_with_default(params,
indices,
default_value='',
name=None):
"""Same as `batch_gather` but inserts `default_value` for invalid indices.
This operation is similar to `batch_gather` except that it will substitute
the value for invalid indices with `default_value` as the contents.
See `batch_gather` for more details.
Args:
params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`,
`M>0`).
indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`).
default_value: A value to be inserted in places where `indices` are out of
bounds. Must be the same dtype as params and either a scalar or rank 1.
name: A name for the operation (optional).
Returns:
A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`.
`result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`.
#### Example:
```python
>>> params = tf.ragged.constant([
['a', 'b', 'c'],
['d'],
[],
['e']])
>>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]])
>>> batch_gather_with_default(params, indices, 'FOO')
[['b', 'c', 'FOO'], [], [], ['e', 'FOO']]
```
"""
with ops.name_scope(name, 'RaggedBatchGatherWithDefault'):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params',
)
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices',
)
default_value = ragged_tensor.convert_to_tensor_or_ragged_tensor(
default_value, name='default_value',
)
row_splits_dtype, (params, indices, default_value) = (
ragged_tensor.match_row_splits_dtypes(params, indices, default_value,
return_dtype=True))
# TODO(hterry): lift this restriction and support default_values of
# of rank > 1
if (default_value.shape.ndims is not 0
and default_value.shape.ndims is not 1):
raise ValueError('"default_value" must be a scalar or vector')
upper_bounds = None
if indices.shape.ndims is None:
raise ValueError('Indices must have a known rank.')
if params.shape.ndims is None:
raise ValueError('Params must have a known rank.')
num_batch_dimensions = indices.shape.ndims - 1
pad = None
# The logic for this works as follows:
# - create a padded params, where:
# padded_params[b1...bn, 0] = default_value
# padded_params[b1...bn, i] = params[b1...bn, i-1] (i>0)
# - create an `upper_bounds` Tensor that contains the number of elements
# in each innermost rank. Broadcast `upper_bounds` to be the same shape
# as `indices`.
# - check to see which index in `indices` are out of bounds and substitute
# it with the index containing `default_value` (the first).
# - call batch_gather with the indices adjusted.
with ops.control_dependencies([
check_ops.assert_greater_equal(array_ops.rank(params),
array_ops.rank(indices))]):
if ragged_tensor.is_ragged(params):
row_lengths = ragged_array_ops.expand_dims(
params.row_lengths(axis=num_batch_dimensions),
axis=-1)
upper_bounds = math_ops.cast(row_lengths, indices.dtype)
pad_shape = _get_pad_shape(params, indices, row_splits_dtype)
pad = ragged_tensor_shape.broadcast_to(
default_value, pad_shape)
else:
params_shape = array_ops.shape(params)
pad_shape = array_ops.concat([
params_shape[:num_batch_dimensions],
[1],
params_shape[num_batch_dimensions + 1:params.shape.ndims]
], 0)
upper_bounds = params_shape[num_batch_dimensions]
pad = array_ops.broadcast_to(default_value, pad_shape)
# Add `default_value` as the first value in the innermost (ragged) rank.
pad = math_ops.cast(pad, params.dtype)
padded_params = array_ops.concat(
[pad, params], axis=num_batch_dimensions)
# Adjust the indices by substituting out-of-bound indices to the
# default-value index (which is the first element)
shifted_indices = indices + 1
is_out_of_bounds = (indices < 0) | (indices > upper_bounds)
adjusted_indices = ragged_where_op.where(
is_out_of_bounds,
x=array_ops.zeros_like(indices), y=shifted_indices,
)
return array_ops.batch_gather(
params=padded_params, indices=adjusted_indices, name=name)
def _get_pad_shape(params, indices, row_splits_dtype):
"""Gets the RaggedTensorDynamicShape for the pad tensor."""
num_batch_dimensions = indices.shape.ndims - 1
params_shape = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(
params, dim_size_dtype=row_splits_dtype)
# We want to create a pad tensor that can be concatenated with the params.
if params.shape.ndims == indices.shape.ndims:
# When params and indices are the same rank, the shape of the pad tensor is
# almost identical to params, except the last dimension which has size = 1.
if params_shape.num_inner_dimensions == 0:
pad_dims = params_shape.partitioned_dim_sizes[:-1] + (
array_ops.ones_like(params_shape.partitioned_dim_sizes[-1]),)
return ragged_tensor_shape.RaggedTensorDynamicShape(
pad_dims, [])
else:
return ragged_tensor_shape.RaggedTensorDynamicShape(
params_shape.partitioned_dim_sizes,
array_ops.concat([params_shape.inner_dim_sizes[:-1], [1]], axis=0))
else:
# When the rank of indices < params, the pad has the same dimension as
# params up to the 'num_batch_dimensions' rank. Every dimension after that
# has size 1.
pad_dims = None
if num_batch_dimensions == 0:
pad_dims = (constant_op.constant(1, dtype=row_splits_dtype),) + (
constant_op.constant([1], dtype=row_splits_dtype),) * (
params_shape.num_partitioned_dimensions -
num_batch_dimensions - 1)
else:
batch_dimensions = params_shape.partitioned_dim_sizes[
:num_batch_dimensions]
gather_dimension = params_shape.partitioned_dim_sizes[
num_batch_dimensions]
pad_dims = batch_dimensions + (
array_ops.ones_like(gather_dimension),) * (
params_shape.num_partitioned_dimensions - num_batch_dimensions)
return ragged_tensor_shape.RaggedTensorDynamicShape(
pad_dims, params_shape.inner_dim_sizes)
| StarcoderdataPython |
3378687 | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class UpgradeRedisRequest(Request):
def __init__(self):
super(UpgradeRedisRequest, self).__init__(
'redis', 'qcloudcliV1', 'UpgradeRedis', 'redis.api.qcloud.com')
def get_memSize(self):
return self.get_params().get('memSize')
def set_memSize(self, memSize):
self.add_param('memSize', memSize)
def get_redisId(self):
return self.get_params().get('redisId')
def set_redisId(self, redisId):
self.add_param('redisId', redisId)
| StarcoderdataPython |
3387512 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 14:43:27 2018
@author: hfwittmann
"""
import numpy as np
# objective tests
from nim_perfect_play.nim_perfect_play import findWinningMove
class Accuracy:
def __init__(self, maxHeapSize = 7, numberOfHeaps = 3, nofPositions = 1000):
# config
maxHeapSize = maxHeapSize
shape = [nofPositions, numberOfHeaps]
# np.random.seed(167)
# positions
random_positions = np.random.randint(1,maxHeapSize+1, size = shape)
# intialize
next_positions = np.zeros(shape = shape)
winning_from_random = np.zeros(shape[0], dtype = bool )
winning_from_next = np.zeros(shape[0], dtype = bool )
move_from_random = np.zeros([shape[0], 2])
move_from_next = np.zeros([shape[0], 2])
for heapIndex in np.arange(len(random_positions)):
# to debug
# heapIndex = 0
heap = random_positions[heapIndex]
fWM = findWinningMove(heap)
fWM_next = findWinningMove(fWM['next_position'])
next_positions[heapIndex] = fWM['next_position']
winning_from_random[heapIndex] = fWM['winning']
move_from_random[heapIndex] = fWM['move']
winning_from_next[heapIndex] = fWM_next['winning']
move_from_next[heapIndex] = fWM_next['move']
# https://stackoverflow.com/questions/5347065/interweaving-two-numpy-arrays
# https://stackoverflow.com/questions/9027862/what-does-listxy-do
positions = np.empty(shape = [ shape[0] * 2, shape[1] ])
positions[0::2,:] = random_positions
positions[1::2,:] = next_positions
winning = np.empty(shape = shape[0] * 2)
winning[0::2] = winning_from_random
winning[1::2] = winning_from_next
move = np.zeros([shape[0] * 2 , 2])
move[0::2] = move_from_random
move[1::2] = move_from_next
inSample_percentage = 0.95 # 90%
inSample = int (len(positions) * inSample_percentage)
positions_InSample = positions[:inSample]
positions_OutOfSample = positions[inSample:]
winning_InSample = winning[:inSample]
winning_OutOfSample = winning[inSample:]
move_InSample = move[:inSample]
move_OutOfSample = move[inSample:]
self.positions = positions
self.positions_InSample = positions_InSample
self.positions_OutOfSample = positions_OutOfSample
self.winning = winning
self.winning_InSample = winning_InSample
self.winning_OutOfSample = winning_OutOfSample
self.move = move
self.move_InSample = move_InSample
self.move_OutOfSample = move_OutOfSample
| StarcoderdataPython |
3348627 | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
import torch.nn.functional as F
from models.model import create_model, load_model
from utils.image import get_affine_transform
from utils.debugger import Debugger
import os.path as op
import os, uuid
import random
getfilename = lambda x: op.splitext(op.split(x)[-1])[0]
def random_filename(filename):
ext = os.path.splitext(filename)[1]
new_filename = uuid.uuid4().hex + ext
return new_filename
class BaseDetector(object):
def __init__(self, opt):
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
self.model = create_model(opt.arch, opt.heads, opt.head_conv)
self.model = load_model(self.model, opt.load_model)
self.model = self.model.to(opt.device)
self.model.eval()
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
self.max_per_image = 100
self.num_classes = opt.num_classes
self.scales = opt.test_scales
self.opt = opt
self.pause = True
self.class_name = ['person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self.chinese_name = {'person': '人', 'bicycle': '自行车', 'car': '汽车', 'motorcycle': '摩托车',
'airplane': '飞机', 'bus': '公交车', 'train': '火车', 'truck': '卡车', 'boat': '船',
'traffic light': '红绿灯', 'fire hydrant': '消防栓', 'stop sign': '停止标志',
'parking meter': '停车收费表', 'bench': '长凳', 'bird': '鸟', 'cat': '猫', 'dog': '狗',
'horse': '马', 'sheep': '羊', 'cow': '牛', 'elephant': '大象', 'bear': '熊',
'zebra': '斑马', 'giraffe': '长颈鹿', 'backpack': '背包', 'umbrella': '雨伞',
'handbag': '手提包', 'tie': '领带', 'suitcase': '飞盘', 'frisbee': '滑雪板', 'skis': '滑雪板',
'snowboard': '单板滑雪', 'sports ball': '运动球', 'kite': '风筝', 'baseball bat': '棒球棒',
'baseball glove': '棒球手套', 'skateboard': '滑板', 'surfboard': '冲浪板',
'tennis racket': '网球拍', 'bottle': '瓶子 ', 'wine glass': '红酒杯', 'cup': '杯子',
'fork': '叉子', 'knife': '刀', 'spoon': '勺子', 'bowl': '碗', 'banana': '香蕉',
'apple': '苹果', 'sandwich': '三明治', 'orange': '橙子', 'broccoli': '西兰花',
'carrot': '胡萝卜', 'hot dog': '热狗', 'pizza': '披萨', 'donut': '甜甜圈', 'cake': '蛋糕',
'chair': '椅子', 'couch': '沙发', 'potted plant': '盆栽', 'bed': '床', 'dining table': '餐桌',
'toilet': '马桶', 'tv': '电视', 'laptop': '笔记本电脑', 'mouse': '鼠标',
'remote': '遥控器', 'keyboard': '键盘', 'cell phone': '手机', 'microwave': '微波炉',
'oven': '烤箱', 'toaster': '烤面包机', 'sink': '洗碗槽', 'refrigerator': '冰箱', 'book': '书',
'clock': '时钟', 'vase': '花瓶', 'scissors': '剪刀', 'teddy bear': '泰迪熊',
'hair drier': '吹风机', 'toothbrush': '牙刷'}
def pre_process(self, image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if self.opt.fix_res:
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
else:
inp_height = (new_height | self.opt.pad) + 1
inp_width = (new_width | self.opt.pad) + 1
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
if self.opt.flip_test:
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
return images, meta
def process(self, images, return_time=False):
raise NotImplementedError
def post_process(self, dets, meta, scale=1):
raise NotImplementedError
def merge_outputs(self, detections):
raise NotImplementedError
def debug(self, debugger, images, dets, output, scale=1):
raise NotImplementedError
def show_results(self, debugger, image, results):
raise NotImplementedError
def run(self, image_or_path_or_tensor, meta=None):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, tot_time = 0, 0
debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug == 3),
theme=self.opt.debugger_theme)
start_time = time.time()
pre_processed = False
if isinstance(image_or_path_or_tensor, np.ndarray):
image = image_or_path_or_tensor
elif type(image_or_path_or_tensor) == type(''):
image = cv2.imread(image_or_path_or_tensor)
else:
image = image_or_path_or_tensor['image'][0].numpy()
# 标注
if self.opt.dataset in ['voc07', 'voc12', 'pascal']:
anno = {
}
else:
anno = {
'hm': image_or_path_or_tensor['hm'][0].numpy(),
'wh': image_or_path_or_tensor['wh'][0].numpy(),
'reg': image_or_path_or_tensor['reg'][0].numpy(),
'offset_map': image_or_path_or_tensor['offset_map'][0].numpy(),
'offset_mask': image_or_path_or_tensor['offset_mask'][0].numpy(),
'ct_list': image_or_path_or_tensor['ct_list'][0] if len(image_or_path_or_tensor['ct_list']) > 0 else [],
'file_name': image_or_path_or_tensor['file_name'][0],
'cat_id': image_or_path_or_tensor['cat_id'][0].tolist() if len(
image_or_path_or_tensor['cat_id']) > 0 else [],
'target_box': image_or_path_or_tensor['target_box'][0].numpy(),
}
pre_processed_images = image_or_path_or_tensor
pre_processed = True
loaded_time = time.time()
load_time += (loaded_time - start_time)
detections = []
for scale in self.scales:
scale_start_time = time.time()
if not pre_processed:
images, meta = self.pre_process(image, scale, meta)
else:
images = pre_processed_images['images'][scale][0]
meta = pre_processed_images['meta'][scale]
meta = {k: v.numpy()[0] for k, v in meta.items()}
images = images.to(self.opt.device)
torch.cuda.synchronize()
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
if self.opt.debug >= 1:
output, dets, forward_time = self.process(images, None, return_time=True)
else:
output, dets, forward_time = self.process(images, anno, return_time=True) # 此处执行了sigmoid
vis_offset = False
if vis_offset:
ct_list = torch.tensor(anno['ct_list'])
if len(ct_list) == 0:
ct_list = torch.zeros(1, 2, device=output['hm'].device).long()
ct_list = ct_list.view(ct_list.shape[0], ct_list.shape[-1])
off_map = output['offset_map'][0].cpu()
off_map = (off_map ** 2).sum(dim=0).unsqueeze(0).permute(1, 2, 0)
torch.set_printoptions(profile="full")
off_map = (off_map - off_map.min()) / (off_map.max() - off_map.min() + 1e-7)
off_map = off_map.cpu().numpy()
off_map[ct_list.T[1], ct_list.T[0]] = 1.
off_map = cv2.applyColorMap((off_map * 255).astype(np.uint8), cv2.COLORMAP_JET) # 变channel=3
off_map = cv2.resize(off_map, (512, 512))
draw_img = cv2.resize(image, (512, 512))
fig = (draw_img * 0.5 + off_map * 0.5).astype(np.uint8)
cv2.imwrite('offset_map/%s' % anno['file_name'], fig)
vis_hm = False
if vis_hm: # 可视化预测/GT的heatmap图
det_cat = [int(ann_cat) for ann_cat in anno['cat_id']] # 现有类别
plot_feat = output['hm']
for cat in det_cat:
heatmap = cv2.resize(plot_feat[0, cat][None,].permute(1, 2, 0).cpu().numpy(),
(output['hm'].shape[-1] * 4, output['hm'].shape[-2] * 4)) # param: WH
heatmap = cv2.applyColorMap((heatmap * 255).astype(np.uint8), cv2.COLORMAP_JET) # 变channel=3
draw_image = cv2.resize(image, (heatmap.shape[1], heatmap.shape[0]))
fig = (draw_image * 0.5 + heatmap * 0.5).astype(np.uint8)
cv2.imwrite('visual/merge/%s_%s.jpg' % (anno['file_name'].split('.')[0], self.class_name[cat]), fig)
print('save visual/merge [%s_%s.jpg]' % (anno['file_name'].split('.')[0], self.class_name[cat]))
torch.cuda.synchronize()
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
if self.opt.debug >= 2:
self.debug(debugger, images, dets, output, scale)
dets = self.post_process(dets, meta, scale) # 按照类别
torch.cuda.synchronize()
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(dets)
results = self.merge_outputs(detections)
torch.cuda.synchronize()
end_time = time.time()
merge_time += end_time - post_process_time
tot_time += end_time - start_time
cam_li = []
vis_cam = self.opt.vis_cam # 只有在网页端可视化的时候开启
if vis_cam:
det_cat = []
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
det_cat.append(j-1)
det_cat = list(set(det_cat))
if len(det_cat)>10:
det_cat = det_cat[:10]
plot_feat = torch.tensor(output['hm'])
for cat in det_cat:
heatmap = cv2.resize(plot_feat[0, cat][None,].permute(1, 2, 0).cpu().numpy(),
(output['hm'].shape[-1] * 4, output['hm'].shape[-2] * 4)) # param: WH
heatmap = cv2.applyColorMap((heatmap * 255).astype(np.uint8), cv2.COLORMAP_JET) # 变channel=3
draw_image = cv2.resize(image, (heatmap.shape[1], heatmap.shape[0]))
fig = (draw_image * 0.5 + heatmap * 0.5).astype(np.uint8)
cam_filepath = './static/heatmap/cam_%s.jpg' % self.class_name[cat].replace(' ', '_')
cv2.imwrite(cam_filepath, fig)
cam_li.append((self.chinese_name[self.class_name[cat]], cam_filepath.replace('./static/', '')))
print('save [%s]' % cam_filepath)
res_img = ''
if self.opt.debug >= 1: # 都会执行
if type(image_or_path_or_tensor) == type(''):
res_img = self.show_results(debugger, image, results, img_name=image_or_path_or_tensor)
else:
rand_name = random_filename(''.join(random.sample('zyxwvutsrqponmlkjihgfedcba',7))+'.jpg')
res_img = self.show_results(debugger, image, results, img_name=rand_name)
return {'results': results, 'tot': tot_time, 'load': load_time,
'pre': pre_time, 'net': net_time, 'dec': dec_time,
'post': post_time, 'merge': merge_time, 'img_path': res_img, 'cam_li': cam_li}
| StarcoderdataPython |
85590 | <filename>sdk/python/pulumi_aws/secretsmanager/outputs.py<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SecretReplica',
'SecretRotationRotationRules',
'SecretRotationRules',
'GetSecretRotationRotationRuleResult',
'GetSecretRotationRuleResult',
]
@pulumi.output_type
class SecretReplica(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "lastAccessedDate":
suggest = "last_accessed_date"
elif key == "statusMessage":
suggest = "status_message"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretReplica. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretReplica.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretReplica.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
region: str,
kms_key_id: Optional[str] = None,
last_accessed_date: Optional[str] = None,
status: Optional[str] = None,
status_message: Optional[str] = None):
"""
:param str region: Region for replicating the secret.
:param str kms_key_id: ARN, Key ID, or Alias.
:param str last_accessed_date: Date that you last accessed the secret in the Region.
:param str status: Status can be `InProgress`, `Failed`, or `InSync`.
:param str status_message: Message such as `Replication succeeded` or `Secret with this name already exists in this region`.
"""
pulumi.set(__self__, "region", region)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if last_accessed_date is not None:
pulumi.set(__self__, "last_accessed_date", last_accessed_date)
if status is not None:
pulumi.set(__self__, "status", status)
if status_message is not None:
pulumi.set(__self__, "status_message", status_message)
@property
@pulumi.getter
def region(self) -> str:
"""
Region for replicating the secret.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
"""
ARN, Key ID, or Alias.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="lastAccessedDate")
def last_accessed_date(self) -> Optional[str]:
"""
Date that you last accessed the secret in the Region.
"""
return pulumi.get(self, "last_accessed_date")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Status can be `InProgress`, `Failed`, or `InSync`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="statusMessage")
def status_message(self) -> Optional[str]:
"""
Message such as `Replication succeeded` or `Secret with this name already exists in this region`.
"""
return pulumi.get(self, "status_message")
@pulumi.output_type
class SecretRotationRotationRules(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "automaticallyAfterDays":
suggest = "automatically_after_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretRotationRotationRules. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretRotationRotationRules.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretRotationRotationRules.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
automatically_after_days: int):
"""
:param int automatically_after_days: Specifies the number of days between automatic scheduled rotations of the secret.
"""
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> int:
"""
Specifies the number of days between automatic scheduled rotations of the secret.
"""
return pulumi.get(self, "automatically_after_days")
@pulumi.output_type
class SecretRotationRules(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "automaticallyAfterDays":
suggest = "automatically_after_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SecretRotationRules. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SecretRotationRules.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SecretRotationRules.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
automatically_after_days: int):
"""
:param int automatically_after_days: Specifies the number of days between automatic scheduled rotations of the secret.
"""
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> int:
"""
Specifies the number of days between automatic scheduled rotations of the secret.
"""
return pulumi.get(self, "automatically_after_days")
@pulumi.output_type
class GetSecretRotationRotationRuleResult(dict):
def __init__(__self__, *,
automatically_after_days: int):
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> int:
return pulumi.get(self, "automatically_after_days")
@pulumi.output_type
class GetSecretRotationRuleResult(dict):
def __init__(__self__, *,
automatically_after_days: int):
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> int:
return pulumi.get(self, "automatically_after_days")
| StarcoderdataPython |
1648357 | from . import vec
| StarcoderdataPython |
1795757 | from django.test import TestCase
from staff import models
class ModelTest(TestCase):
def test_department_str(self):
'''Test string representation of dept'''
dept = models.Department.objects.create(
name='Accounting'
)
self.assertEqual(str(dept), dept.name)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.