id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
4834632 | <filename>api/core/react-native/LibLedgerCore/binding_copy.gyp
{
'variables': {
'core_library%': "../../../../../lib-ledger-core-build",
'run_path%': "../../../../../lib-ledger-core-build/core/src/Release-iphonesimulator",
'header_path%': "../../objc",
},
'targets': [
{
'target_name': 'libledger-core-objc',
'type': 'static_library',
'conditions': [],
'dependencies': [
'../../../../djinni/support-lib/support_lib.gyp:djinni_objc'
],
'sources': [
"<!@(python glob.py ../../objc *.h *.m *.mm)",
"<!@(python glob.py ../../objcpp *.h *.m *.mm)",
"<!@(python glob.py ../../src/objc *.h *.m *.mm)",
],
'include_dirs': [
"<@(core_library)/include/ledger/core/api",
"<@(header_path)",
],
'all_dependent_settings': {
'include_dirs': [
"<@(core_library)/include/ledger/core/api",
"<@(header_path)",
],
},
'libraries': ['<!(pwd)/<@(run_path)/libledger-core.dylib']
},
{
'target_name': 'libledger-core-android',
'android_unmangled_name': 1,
'type': 'shared_library',
'dependencies': [
#'../../../../djinni/support-lib/support_lib.gyp:djinni_jni'
'../../../../djinni/support-lib/support_lib.gyp:djinni_jni_main'
],
'ldflags' : [ '-llog' ],
'sources': [
'<!@(python glob.py android/src/java *.java)',
],
'include_dirs': [
#"<@(core_library)/include/ledger/core/api",
#"<!(echo %JAVA_HOME%)/include",
#"<!(echo %ANDROID_NDK%)/sysroot/usr/include",
#"/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers"
"android/include",
"android/src/jni",
"android/src/jni/jni",
],
'all_dependent_settings': {
'include_dirs': [
#"<@(core_library)/include/ledger/core/api",
#"<!(echo %JAVA_HOME%)/include",
#"<!(echo %ANDROID_NDK%)/sysroot/usr/include",
#"/System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers"
"android/include",
"android/src/jni",
"android/src/jni/jni",
],
},
},
],
}
| StarcoderdataPython |
3311151 | <reponame>Matthew1906/100DaysOfPython
# Import Modules
from flask import Flask, render_template, redirect, url_for, request, flash
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, FloatField
from wtforms.validators import DataRequired
import requests
# Create App
app = Flask(__name__)
app.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///top-10-movies-collection.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
Bootstrap(app)
# Setup Database
movie_db = SQLAlchemy(app)
class Movie(movie_db.Model):
movie_id = movie_db.Column(movie_db.Integer, primary_key=True)
title = movie_db.Column(movie_db.String(255), unique=True, nullable=False)
year = movie_db.Column(movie_db.Integer, nullable=False)
description = movie_db.Column(movie_db.String(5000), nullable=False)
rating = movie_db.Column(movie_db.Float, nullable=False)
ranking = movie_db.Column(movie_db.Integer, nullable=False)
review = movie_db.Column(movie_db.String(5000), nullable=False)
img_url = movie_db.Column(movie_db.String(5000), nullable=False)
movie_db.create_all()
# When running the program for the first time, it is necessary to uncomment the code below, else, comment them
# first_movie = Movie(
# title = 'V for Vendetta',
# year = 2005,
# description = 'In a future British tyranny, a shadowy freedom fighter, known only by the alias of "V", plots to overthrow it with the help of a young woman.',
# rating = 8.1,
# ranking = 10,
# review = 'Utterly Spectacular!',
# img_url = 'https://upload.wikimedia.org/wikipedia/id/9/9f/Vforvendettamov.jpg'
# )
# movie_db.session.add(first_movie)
# movie_db.session.commit()
# Home -> display all movies
@app.route("/")
def home():
movies = movie_db.session.query(Movie).order_by(Movie.rating.desc())
rank = 1
for movie in movies:
movie.ranking = rank
rank+=1
movie_db.session.commit()
return render_template("index.html", movies=movies)
# Edit Movie Rating and Review
class EditForm(FlaskForm):
rating = FloatField(validators=[DataRequired()])
review = StringField(validators=[DataRequired()])
submit = SubmitField("Done")
@app.route("/edit/<movie_id>", methods=['GET','POST'])
def edit(movie_id):
to_edit= Movie.query.get(movie_id)
if request.method=='POST':
to_edit.rating = request.form['rating']
to_edit.review = request.form['review']
movie_db.session.commit()
return redirect(url_for('home'))
return render_template('edit.html', movie=to_edit, form=EditForm())
# Delete Movie
@app.route('/delete/<movie_id>')
def delete(movie_id):
to_delete = Movie.query.get(movie_id)
movie_db.session.delete(to_delete)
movie_db.session.commit()
return redirect(url_for('home'))
# Add Movie
class AddForm(FlaskForm):
title = StringField("Movie Title", validators=[DataRequired()])
search = SubmitField("Find Movie")
@app.route('/add')
def add():
return render_template('add.html', form = AddForm())
# Search Movies
@app.route('/search', methods=['POST','GET'])
def select():
search_url ='https://api.themoviedb.org/3/search/movie'
search_params = {
'api_key' : '<KEY>',
'query' : request.form['title']
}
search_responses = requests.get(url=search_url, params=search_params).json()['results']
search_results = [
Movie(
movie_id = search_response['id'],
title = search_response['original_title'],
year = search_response['release_date'][:4],
description = search_response['overview'],
ranking = 1,
rating = search_response['vote_average'],
review = 'No Review',
img_url= f'https://image.tmdb.org/t/p/w500/{search_response["poster_path"]}'
) for search_response in search_responses
]
return render_template('select.html', results=search_results)
# Save the Movies
@app.route('/save/<movie_id>')
def save(movie_id):
save_url = 'https://api.themoviedb.org/3/movie/' + movie_id
save_params = {'api_key' : '<KEY>'}
save_response = requests.get(url=save_url, params=save_params).json()
new_movie = Movie(
movie_id = save_response['id'],
title = save_response['original_title'],
year = save_response['release_date'][:4],
description = save_response['overview'],
ranking = 1,
rating = save_response['vote_average'],
review = 'No Review',
img_url= 'https://image.tmdb.org/t/p/w500/'+ save_response['poster_path']
)
movie_db.session.add(new_movie)
movie_db.session.commit()
return redirect(url_for('edit', movie_id = new_movie.movie_id))
# Driver Code
if __name__ == '__main__':
app.run(debug=True) | StarcoderdataPython |
3250211 | import timeit
import os
from objects import *
from serializers import *
from table import Table
class Evaluator:
OBJECTS_TO_EVALUATE = (
PrimitiveObject(int1=9, float1=3.1415, int2=-5156, float2=1e128), # Light object
DictObject(dict1={"D" + str(i): i ** 2 for i in range(100)}, # Medium object
dict2={"a": -123456789},
dict3={s: s + s + s for s in ["a", "b", "c", "d", "e", "f"]}),
RepeatedObject(str1="lorem ipsum." * 100, # Heavy object
str2="a" * 1000,
arr1=[1, 2, 3, 5],
arr2=["Hello world!"] * 255),
CompositeObject(int1=123456789987654321, float1=0.0001, # Very heavy object
str1="Some more of an exciting and interesting text!" * 1000,
arr1=list(range(500)),
dict1={"INT" + str(i): str(i) * 100 for i in range(100)})
)
SERIALIZERS_TO_EVALUATE = (
Native(),
XML(),
JSON(),
Proto(),
Avro(),
Yaml(),
MessagePack(),
)
INDEX = list(map(lambda obj: obj.__class__.__name__, SERIALIZERS_TO_EVALUATE))
COLUMNS = ['data_size', 'serialization_time', 'deserialization_time', 'total_time']
num_tests: int
tables: dict[str, Table]
def __init__(self, num_tests):
self.num_tests = num_tests
self.tables = {obj.__class__.__name__: Table(obj.__class__.__name__, self.INDEX, self.COLUMNS)
for obj in self.OBJECTS_TO_EVALUATE}
def _evaluate_for(self, obj: ObjectToEvaluate, serializer: Serializer) -> None:
print(f"Measuring time for object {obj.__class__.__name__} with serializer {serializer.__class__.__name__}")
used_time_to_serialize = timeit.timeit(
stmt="serializer.serialize(obj)",
number=self.num_tests,
globals=locals()
)
used_time_to_serialize = round(used_time_to_serialize, 6)
serialized = serializer.serialize(obj)
used_time_to_deserialize = timeit.timeit(
stmt="serializer.deserialize(serialized)",
number=self.num_tests,
globals=locals()
)
used_time_to_deserialize = round(used_time_to_deserialize, 6)
deserialized = serializer.deserialize(serialized)
assert deserialized == obj, \
f"Serializer {serializer.__class__.__name__} returned wrong value for object {obj.__class__.__name__}"
total_time = round(used_time_to_serialize + used_time_to_deserialize, 6)
self.tables[obj.__class__.__name__].set(len(serialized))
self.tables[obj.__class__.__name__].set(used_time_to_serialize)
self.tables[obj.__class__.__name__].set(used_time_to_deserialize)
self.tables[obj.__class__.__name__].set(total_time)
print("RESULTS:")
print("Total size (in bytes):", len(serialized))
print("Serialization time:", used_time_to_serialize, "s")
print("Deserialization time:", used_time_to_deserialize, "s")
print("Total time:", total_time, "s")
print("Done")
def evaluate_for_all_pairs(self):
for obj in self.OBJECTS_TO_EVALUATE:
for serializer in self.SERIALIZERS_TO_EVALUATE:
self._evaluate_for(obj, serializer)
if os.path.exists("tmp.avro"):
os.remove("tmp.avro")
| StarcoderdataPython |
3236748 | <reponame>TorgeirUstad/dlite
#!/usr/bin/env python
from pathlib import Path
import dlite
print('dlite storage paths:')
for path in dlite.storage_path:
print('- ' + path)
print()
print('append path with glob pattern:')
thisdir = Path(__file__).parent.absolute()
dlite.storage_path.append(f'{thisdir}/*.json')
for path in dlite.storage_path:
print('- ' + path)
print()
print('delete second last path:')
del dlite.storage_path[-2]
for path in dlite.storage_path:
print('- ' + path)
print()
print('Predefined paths:')
for (k,v) in dlite.__dict__.items():
if k.endswith('path'):
print(f"dlite.{k}='{v}'")
| StarcoderdataPython |
133301 | <gh_stars>10-100
from .tableview_blueprint import tableview_blueprint
from .get_tableview import get_tableview as __get_tableview
from .post_tableview import post_tableview as __post_tableview
from .delete_tableview import delete_tableview as __delete_tableview
from .put_tableview import put_tableview as __put_tableview | StarcoderdataPython |
3303632 | <gh_stars>0
'''
Thevenin Equivalent-Circuit Model
given values of R1 and C1, the code is going to iterate to predict, in a discrete time,
the state of charge (z), the difussion-resistor current (ir1) and the output voltage (v)
EQUATIONS OF THE MODEL (Now using number 1 and 3):
1) z(i+1) = z(i) - ((t2 - t1)) * n * I(i) / Q
2) ir1(i+1) = exp(-(t2 - t1)/R1 * C1) * Ir1(i) + (1-exp(-(t2 - t1)/R1 * C1)) * I(i)
3) v(i) = ocv(i) - (R1 * Ir1(i)) - (R0 * I(i))
'''
#from numpy.core.function_base import linspace
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
import math
##### Definition of interpolation function used with the csv data #####
def ocv_interpolation(soc_data, ocv_data, soc_in):
ocv_out = 0
for i in range(len(soc_data)):# soc_in.max()):
if soc_in <= soc_data[i]:
if soc_data[i-1] == soc_data[i]:
ocv_out = ocv_data[i]
break
ocv_out = ocv_data[i-1] + (ocv_data[i] - ocv_data[i-1]) * ((soc_in - soc_data[i-1]) / (soc_data[i] - soc_data[i-1]))
break
return ocv_out
###### Generic interpolation w/o extrapolation #####
def interpolation(x_data, y_data, x_in):
for i in range(len(x_data)-1):
if x_in < x_data[0]:
x_out = y_data[0] #Cambiar por extrapolación
break
if x_in > x_data[len(x_data)-1]:
x_out = y_data[len(x_data)-1] #Cambiar por extrapolación
break
if x_data[i+1] >= x_in and x_data[i] <= x_in: #Función de interpolación
x_out = y_data[i] + (y_data[i+1] - y_data[i]) * ((x_in - x_data[i]) / (x_data[i+1] - x_data[i]))
break
return x_out
#Definición de datos de entrada
# df = pd.read_csv('C:/Users/Diego/Desktop/OCV(z).csv')
# df.columns = ['soc','ocv']
# soc_data = df.soc.values #Valores del csv
# ocv_data = df.ocv.values #Valores del csv
############################################
df = pd.read_csv('C:/Repositories/battery_characterizer/coulomb_tests/sococv.csv')
df.columns = ['soc','ocv']
soc_data = df.soc.values #Valores del csv
ocv_data = df.ocv.values #Valores del csv
############################################
# Initial values given in the PDF
v_0 = ocv_interpolation(soc_data,ocv_data, 0.2)
# print(v_0)
z = np.array([0.2]) #Soc en tiempo 0
t = np.array([0]) #Tiempo inicial es 0
v = np.array([v_0]) #Primer valor de V del csv
i = np.array([0]) #Corriente es 0 al inicio
# plt.plot(soc_data,ocv_data)
# plt.show()
##### INICIA PRUEBA DE LA INTERPOLACIÓN #####
# soctest = np.linspace(0,1,101)
# ocvtest = np.array([])
# # Probar si la interpolación es correcta #
# for j in range(len(soctest)):
# ocvtest = np.append(ocvtest, ocv_interpolation(soc_data, ocv_data, soctest[j]))
# plt.figure
# plt.plot(soctest,ocvtest, 'or', soc_data, ocv_data)
# plt.show()
##### TERMINA PRUEBA DE LA INTERPOLACIÓN #####
##### Defining the model´s inputs #####
dfp = pd.read_csv('C:/Users/Diego/Desktop/battery_data/parameters/parameters.csv')
dfp.columns = ['r0', 'r1', 'c1']
r0 = dfp.r0.values
r1 = dfp.r1.values
c1 = dfp.c1.values
charge_n = 0.99 # Charging Efficiency
disch_n = 1 # Discharging Efficiency
R0 = 0.085 # Internal Resistance
Q = 3.250 # Capacity
CC_charge = -1*Q # Charging C Rate. In charge, i < 0
CV = 4.2
CC_disch = 1*Q # Discharging C Rate. In discharge, i > 0
eoc = -0.3 # End of Charge 300 mA
eod = 3.2 # End of Discharge 3.2 V
Dt = 0.25/3600 # Discrete integration interval
ind = 0 # Starts in zero and goes on until reaching a condition
##### CC Charging #####
n = charge_n
while v[ind] < CV:
i = np.append(i,CC_charge)
z = np.append(z,z[ind] - (n*Dt*i[ind])/Q)
v = np.append(v,ocv_interpolation(soc_data,ocv_data,z[ind+1]) - i[ind+1]*R0)
t = np.append(t,t[ind] + Dt)
ind += 1
##### CV Charging #####
while i[ind] < eoc:
v = np.append(v,CV)
z = np.append(z,z[ind] - (n*Dt*i[ind])/Q)
i = np.append(i,(ocv_interpolation(soc_data,ocv_data,z[ind+1]) - v[ind+1])/R0)
# ir2 = math.exp(-Dt / r1 * c1) + (1 - math.exp(-Dt / r1 * c1)) * i
t = np.append(t,t[ind] + Dt)
ind += 1
# ##### CC Discharging #####
n = disch_n
while v[ind] > eod:
i = np.append(i,CC_disch)
z = np.append(z,z[ind] - (n*Dt*i[ind])/Q)
v = np.append(v,ocv_interpolation(soc_data,ocv_data,z[ind+1]) - i[ind+1]*R0)
t = np.append(t,t[ind] + Dt)
ind += 1
# print(v)
##### Prints and plots #####
plt.plot(t,v)
plt.show()
plt.plot(t,i)
plt.show()
plt.plot(t,z)
plt.show()
# fig2 = go.Figure()
# fig2.add_trace(go.Scatter(x=t, y=v,
# mode='lines',
# name='Data'))
# fig2.show()
| StarcoderdataPython |
11618 | # -*- Python -*-
# license
# license.
# ======================================================================
"""Looks name up in the [geonames database](http://www.geonames.org/).
[GeoNames Search Webservice API](http://www.geonames.org/export/geonames-search.html)
"""
import sys, os, urllib.request, json, time
from pathlib import Path
import logging; module_logger = logging.getLogger(__name__)
from .utilities import is_chinese
# ======================================================================
def geonames(name):
if not name:
return name
if is_chinese(name):
r = _lookup_chinese(name=name)
else:
r = _lookup("search", isNameRequired="true", name=name)
return r
# ----------------------------------------------------------------------
def _lookup(feature, **args):
def make(entry):
if entry.get("fcl") in ["A", "P"]:
return {
# "local_name": entry[],
"name": entry["toponymName"],
"province": entry["adminName1"],
"country": entry["countryName"],
"latitude": entry["lat"],
"longitude": entry["lng"],
}
else:
return None
return _get(feature, make, args)
# ----------------------------------------------------------------------
def _get(feature, result_maker, args):
args.update({"username": "acorg", "type": "json"})
url = "http://api.geonames.org/{}?{}".format(feature, urllib.parse.urlencode(args))
# module_logger.debug('_lookup {!r}'.format(url))
while True:
rj = json.loads(urllib.request.urlopen(url=url).read().decode("utf-8"))
try:
return [e2 for e2 in (result_maker(e1) for e1 in rj["geonames"]) if e2]
except KeyError:
if "the hourly limit of" in rj.get("status", {}).get("message"):
print(f"WARNING: {rj['status']['message']}", file=sys.stderr)
seconds_to_wait = 120
print(f"WARNING: about to wait {seconds_to_wait} seconds", file=sys.stderr)
time.sleep(seconds_to_wait)
else:
print(f"ERROR: {rj}", file=sys.stderr)
raise RuntimeError(str(rj))
except Exception as err:
print(f"ERROR: {rj}: {err}", file=sys.stderr)
raise RuntimeError(f"{rj}: {err}")
# ----------------------------------------------------------------------
def _lookup_chinese(name):
if len(name) > 3:
r = []
if provinces := _find_chinese_province(name):
province = provinces[0]
county = _find_chinese_county(name, province);
if county:
r = [{
"local_name": name,
"name": _make_chinese_name(province, county),
"province": _make_province_name(province),
"country": province["countryName"],
"latitude": county["lat"],
"longitude": county["lng"],
}]
else:
def make(entry):
province_name = _make_province_name(entry)
return {
"local_name": name,
"name": province_name,
"province": province_name,
"country": entry["countryName"],
"latitude": entry["lat"],
"longitude": entry["lng"],
}
r = [make(e) for e in _find_chinese_province(name)]
return r
# ----------------------------------------------------------------------
def _find_chinese_province(name):
r = _get("search", lambda e: e if e["name"] == name[:2] else None, {"isNameRequired": "true", "name_startsWith": name[:2], "fclass": "A", "fcode": "ADM1", "lang": "cn"})
# module_logger.debug('name: {!r} : {!r}'.format(name[:2], r))
if not r: # Inner Mongolia is written using 3 Hanzi
r = _get("search", lambda e: e if e["name"] == name[:3] else None, {"isNameRequired": "true", "name_startsWith": name[:3], "fclass": "A", "fcode": "ADM1", "lang": "cn"})
return r
# ----------------------------------------------------------------------
def _make_province_name(entry):
r = entry["toponymName"].upper()
space_pos = r.find(' ', 6 if r[:6] == "INNER " else 0)
if space_pos >= 0:
r = r[:space_pos]
return r;
# ----------------------------------------------------------------------
def _find_chinese_county(full_name, province):
name = full_name[len(province["name"]):]
r = _get("search", lambda e: e, {"isNameRequired": "true", "name_startsWith": name, "fclass": "A", "fcode": "ADM3", "adminCode1": province["adminCode1"], "lang": "cn"})
if not r:
r = _get("search", lambda e: e, {"isNameRequired": "true", "name_startsWith": name, "adminCode1": province["adminCode1"], "lang": "cn"})
# module_logger.debug('_find_chinese_county {}'.format(r))
return r[0] if r else None
# ----------------------------------------------------------------------
def _make_chinese_name(province, county):
return _make_province_name(province) + " " + _make_county_name(county)
# ----------------------------------------------------------------------
def _make_county_name(county):
def remove_suffix(source, suffix):
if source[-len(suffix):] == suffix:
source = source[:-len(suffix)]
return source
def remove_apostrophe(source):
return source.replace("’", "")
r = county["toponymName"].upper()
r1 = remove_suffix(r, " ZIZHIXIAN")
if r1 != r:
r = remove_suffix(r1, "ZU")
else:
for s in [" QU", " XIAN", " SHI"]:
r2 = remove_suffix(r, s)
if r2 != r:
r = r2
break
r = remove_apostrophe(r)
return r
# ======================================================================
### Local Variables:
### eval: (if (fboundp 'eu-rename-buffer) (eu-rename-buffer))
### End:
| StarcoderdataPython |
3399931 | <filename>infermedica_api/webservice.py
# -*- coding: utf-8 -*-
"""
infermedica_api.webservice
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains function responsible for manging a global handlers for API Connector classes.
"""
from inspect import isclass
from typing import Optional, Any, Union
from . import exceptions, connectors
__api__ = None
__api_aliased__ = {}
def get_api(alias: str = None) -> connectors.APIConnectorType:
"""
Returns global API object if present,
otherwise raise MissingConfiguration exception.
:param alias: Alias of the API to retrieve
:returns: An API object
:raises: :class:`infermedica_api.exceptions.MissingConfiguration`
"""
global __api__
global __api_aliased__
if alias:
try:
return __api_aliased__[alias]
except KeyError:
raise exceptions.MissingConfiguration(alias)
if __api__ is None:
raise exceptions.MissingConfiguration()
return __api__
def configure(
app_id: str,
app_key: str,
alias: Optional[str] = None,
default: Optional[bool] = False,
api_connector: Optional[Union[connectors.APIConnectorType, str]] = "APIv3Connector",
**kwargs: Any
) -> connectors.APIConnectorType:
"""
Configure and create new global API object with given configuration. Many global configurations can be created
upfront (e.g. with different credentials or language models configured) by providing a unique alias
for each configuration. The configrations can be latter accessed any where in the projcts by
simply calling `infermedica_api.get_api()` or `infermedica_api.get_api('<alias>')`.
Usage:
>>> import infermedica_api
>>> infermedica_api.configure(app_id='YOUR_APP_ID', app_key='YOUR_APP_KEY')
# Then somewhere in the project:
>>> import infermedica_api
>>> api = infermedica_api.get_api()
>>> api.info()
:param app_id: Infermedica API App Id
:param app_key: Infermedica API App Key
:param alias: (optional) Alias for the configuration, if not provided the configuration
became the default configuration when calling `get_api` without alias
:param default: (optional) If alias proveded determinates if this configuration
should also be set as the default configuration when calling `get_api` without alias
:param api_connector: (optional) APIConnector class (may be custom) or a name of the of
the build in API connector classes to be used
:param kwargs: (optional) Config to be used to initialise API connector class
:returns: An API connctor object
"""
global __api__
global __api_aliased__
if isclass(api_connector):
api_connector_class = api_connector
else:
api_connector_class = getattr(connectors, api_connector)
api_obj = api_connector_class(app_id=app_id, app_key=app_key, **kwargs)
if alias:
__api_aliased__[alias] = api_obj
if default:
__api__ = api_obj
else:
__api__ = api_obj
return api_obj
| StarcoderdataPython |
1615227 | from distutils.core import setup
setup(
name='sortedcounter',
version='0.1',
packages=['sortedcounter'],
license='MIT',
author='<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/ckingdev/sortedcounter',
download_url = 'https://github.com/ckingdev/sortedcounter/archive/0.1.tar.gz',
keywords = ['counter', 'collection', 'sorted', 'dict'],
classifiers = [],
description='A Counter like the collections class but with sorted keys.',
)
| StarcoderdataPython |
79795 | import pytest
from graphene import Field, ID, Int, ObjectType, String
from .. import graphql_compatibility
from ..extend import extend, external, requires
from ..main import build_schema
PRODUCT_SCHEMA_2 = """schema {
query: Query
}
type Product {
sku: ID
size: Int
weight: Int
shippingEstimate: String
}
type Query {
product: Product
_entities(representations: [_Any]): [_Entity]
_service: _Service
}
scalar _Any
union _Entity = Product
type _Service {
sdl: String
}
"""
PRODUCT_SCHEMA_3 = """schema {
query: Query
}
type Query {
product: Product
_entities(representations: [_Any] = null): [_Entity]
_service: _Service
}
type Product {
sku: ID
size: Int
weight: Int
shippingEstimate: String
}
union _Entity = Product
\"\"\"Anything\"\"\"
scalar _Any
type _Service {
sdl: String
}
"""
PRODUCTION_RESPONSE_2 = """
extend type Product @key(fields: "sku") {
sku: ID @external
size: Int @external
weight: Int @external
shippingEstimate: String @requires(fields: "size weight")
}
type Query {
product: Product
}
"""
PRODUCTION_RESPONSE_3 = """type Query {
product: Product
}
extend type Product @key(fields: "sku") {
sku: ID @external
size: Int @external
weight: Int @external
shippingEstimate: String @requires(fields: "size weight")
}
"""
MULTIPLE_FIELDS_SCHEMA_2 = """schema {
query: Query
}
type Product {
sku: ID
size: Int
weight: Int
shippingEstimate: String
}
type Query {
product: Product
_entities(representations: [_Any]): [_Entity]
_service: _Service
}
scalar _Any
union _Entity = Product
type _Service {
sdl: String
}
"""
MULTIPLE_FIELDS_SCHEMA_3 = """schema {
query: Query
}
type Query {
product: Product
_entities(representations: [_Any] = null): [_Entity]
_service: _Service
}
type Product {
sku: ID
size: Int
weight: Int
shippingEstimate: String
}
union _Entity = Product
\"\"\"Anything\"\"\"
scalar _Any
type _Service {
sdl: String
}
"""
MULTIPLE_FIELDS_RESPONSE_2 = """
extend type Product @key(fields: "sku") {
sku: ID @external
size: Int @external
weight: Int @external
shippingEstimate: String @requires(fields: "size weight")
}
type Query {
product: Product
}
"""
MULTIPLE_FIELDS_RESPONSE_3 = """type Query {
product: Product
}
extend type Product @key(fields: "sku") {
sku: ID @external
size: Int @external
weight: Int @external
shippingEstimate: String @requires(fields: "size weight")
}
"""
INPUT_SCHEMA_2 = """schema {
query: Query
}
type Acme {
id: ID!
age: Int
foo(someInput: String): String
}
type Query {
acme: Acme
_entities(representations: [_Any] = null): [_Entity]
_service: _Service
}
scalar _Any
union _Entity = Acme
type _Service {
sdl: String
}
"""
INPUT_SCHEMA_3 = """schema {
query: Query
}
type Query {
acme: Acme
_entities(representations: [_Any] = null): [_Entity]
_service: _Service
}
type Acme {
id: ID!
age: Int
foo(someInput: String = null): String
}
union _Entity = Acme
\"\"\"Anything\"\"\"
scalar _Any
type _Service {
sdl: String
}
"""
INPUT_RESPONSE_2 = """
extend type Acme @key(fields: "id") {
id: ID! @external
age: Int @external
foo(someInput: String): String @requires(fields: "age")
}
type Query {
acme: Acme
}
"""
INPUT_RESPONSE_3 = """type Query {
acme: Acme
}
extend type Acme @key(fields: "id") {
id: ID! @external
age: Int @external
foo(someInput: String = null): String @requires(fields: "age")
}
"""
def test_chain_requires_failure():
"""
Check that we can't nest call the requires method on a field.
"""
with pytest.raises(AssertionError) as err:
@extend("id")
class A(ObjectType):
id = external(ID())
something = requires(requires(String(), fields="id"), fields="id")
assert "Can't chain `requires()` method calls on one field." == str(err.value)
def test_requires_multiple_fields():
"""
Check that requires can take more than one field as input.
"""
@extend("sku")
class Product(ObjectType):
sku = external(ID())
size = external(Int())
weight = external(Int())
shipping_estimate = requires(String(), fields="size weight")
class Query(ObjectType):
product = Field(Product)
schema = build_schema(query=Query)
graphql_compatibility.assert_schema_is(
actual=schema,
expected_2=PRODUCT_SCHEMA_2,
expected_3=PRODUCT_SCHEMA_3,
)
# Check the federation service schema definition language
query = """
query {
_service {
sdl
}
}
"""
result = graphql_compatibility.perform_graphql_query(schema, query)
assert not result.errors
graphql_compatibility.assert_graphql_response_data(
schema=schema,
actual=result.data["_service"]["sdl"].strip(),
expected_2=PRODUCTION_RESPONSE_2,
expected_3=PRODUCTION_RESPONSE_3,
)
def test_requires_multiple_fields_as_list():
"""
Check that requires can take more than one field as input.
"""
@extend("sku")
class Product(ObjectType):
sku = external(ID())
size = external(Int())
weight = external(Int())
shipping_estimate = requires(String(), fields=["size", "weight"])
class Query(ObjectType):
product = Field(Product)
schema = build_schema(query=Query)
graphql_compatibility.assert_schema_is(
actual=schema,
expected_2=MULTIPLE_FIELDS_SCHEMA_2,
expected_3=MULTIPLE_FIELDS_SCHEMA_3,
)
# Check the federation service schema definition language
query = """
query {
_service {
sdl
}
}
"""
result = graphql_compatibility.perform_graphql_query(schema, query)
assert not result.errors
graphql_compatibility.assert_graphql_response_data(
schema=schema,
actual=result.data["_service"]["sdl"].strip(),
expected_2=MULTIPLE_FIELDS_RESPONSE_2,
expected_3=MULTIPLE_FIELDS_RESPONSE_3,
)
def test_requires_with_input():
"""
Test checking that the issue https://github.com/preply/graphene-federation/pull/47 is resolved.
"""
@extend("id")
class Acme(ObjectType):
id = external(ID(required=True))
age = external(Int())
foo = requires(Field(String, someInput=String()), fields="age")
class Query(ObjectType):
acme = Field(Acme)
schema = build_schema(query=Query)
graphql_compatibility.assert_schema_is(
actual=schema,
expected_2=INPUT_SCHEMA_2,
expected_3=INPUT_SCHEMA_3,
)
# Check the federation service schema definition language
query = """
query {
_service {
sdl
}
}
"""
result = graphql_compatibility.perform_graphql_query(schema, query)
assert not result.errors
graphql_compatibility.assert_graphql_response_data(
schema=schema,
actual=result.data["_service"]["sdl"].strip(),
expected_2=INPUT_RESPONSE_2,
expected_3=INPUT_RESPONSE_3,
)
| StarcoderdataPython |
3296993 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def findBottomLeftValue(self, root: TreeNode) -> int:
q = deque([root])
left = None
while q:
left = q[0]
for _ in range(len(q)):
n = q.popleft()
if n.left:
q.append(n.left)
if n.right:
q.append(n.right)
return left.val
| StarcoderdataPython |
1664755 | <reponame>yunionyun/python_yunionsdk<gh_stars>1-10
from yunionclient.common import base
class Unifiedmonitor(base.ResourceBase):
pass
class UnifiedmonitorManager(base.StandaloneManager):
resource_class = Unifiedmonitor
keyword = 'unifiedmonitor'
keyword_plural = 'unifiedmonitors'
_columns = ["Id", "Name", "Description"]
| StarcoderdataPython |
1637470 | import emacspy, socket, tempfile, queue, threading
from emacspy import sym
from typing import Optional
import concurrent.futures, traceback
_call_soon_queue: queue.Queue = queue.Queue(0)
_wakeup_conn: Optional[socket.socket] = None
_emacs_thread = threading.current_thread()
def call_soon_in_main_thread(f):
_call_soon_queue.put(f)
if _wakeup_conn:
_wakeup_conn.send(b'x')
def run_in_main_thread_future(f):
fut: concurrent.futures.Future = concurrent.futures.Future()
def wrapper():
try:
fut.set_result(f())
except Exception as exc:
traceback.print_exc()
fut.set_exception(exc)
call_soon_in_main_thread(wrapper)
return fut
def run_in_main_thread(f):
if _emacs_thread == threading.current_thread():
raise Exception('already on emacs main thread')
return run_in_main_thread_future(f).result()
@emacspy.defun('emacspy-threads/wakeup')
def wakeup(p, data):
while True:
try:
f = _call_soon_queue.get_nowait()
except queue.Empty:
break
f()
def init():
with tempfile.TemporaryDirectory() as dir:
path = dir + '/socket'
s = socket.socket(socket.AF_UNIX)
s.bind(path)
s.listen(1)
# this is "self-pipe trick"
emacspy.f.make_network_process(
sym(":name"), "emacspy-wakeup",
sym(":remote"), path,
sym(":filter"), sym('emacspy-threads/wakeup'))
global _wakeup_conn
_wakeup_conn, _ = s.accept()
wakeup(None, None)
| StarcoderdataPython |
54960 | # License: MIT
'''
:author: <NAME> (<EMAIL>)
:organization: ETS
'''
import ctypes as c
import logging
import os
class Tagger(object):
"""The ZPar English POS Tagger"""
def __init__(self, modelpath, libptr, zpar_session_obj):
super(Tagger, self).__init__()
# save the zpar session object
self._zpar_session_obj = zpar_session_obj
# set up a logger
self.logger = logging.getLogger(__name__)
# get the library method that loads the tagger models
self._load_tagger = libptr.load_tagger
self._load_tagger.restype = c.c_int
self._load_tagger.argtypes = [c.c_void_p, c.c_char_p]
# get the library methods that tag sentences and files
self._tag_sentence = libptr.tag_sentence
self._tag_sentence.restype = c.c_char_p
self._tag_sentence.argtypes = [c.c_void_p, c.c_char_p, c.c_bool]
self._tag_file = libptr.tag_file
self._tag_file.restype = None
self._tag_file.argtypes = [c.c_void_p, c.c_char_p, c.c_char_p, c.c_bool]
if self._load_tagger(self._zpar_session_obj, modelpath.encode('utf-8')):
raise OSError('Cannot find tagger model at {}\n'.format(modelpath))
def tag_sentence(self, sentence, tokenize=True):
if not sentence.strip():
# return empty string if the input is empty
ans = ""
else:
zpar_compatible_sentence = sentence.strip() + "\n "
zpar_compatible_sentence = zpar_compatible_sentence.encode('utf-8')
tagged_sent = self._tag_sentence(self._zpar_session_obj, zpar_compatible_sentence, tokenize)
ans = tagged_sent.decode('utf-8')
return ans
return ans
def tag_file(self, inputfile, outputfile, tokenize=True):
if os.path.exists(inputfile):
self._tag_file(self._zpar_session_obj, inputfile.encode('utf-8'), outputfile.encode('utf-8'), tokenize)
else:
raise OSError('File {} does not exist.'.format(inputfile))
def cleanup(self):
self._load_tagger = None
self._tag_sentence = None
self._tag_file = None
self._zpar_session_obj = None
| StarcoderdataPython |
3398647 | <filename>exceptions/exceptions.py
class RSAEcryptionException(Exception):
pass
class NotAnIterableObject(RSAEcryptionException):
pass
class NotATextMensage(RSAEcryptionException):
pass
class CastError(RSAEcryptionException):
pass
class NotAKey(RSAEcryptionException):
pass
class LoadKeyDictError(RSAEcryptionException):
pass | StarcoderdataPython |
1728517 | <reponame>kennethwdk/PINet
import torch
from torch import nn
from .utils import int_sample, float_sample
from .gcn_module import BasicGraphBlock, ResGraphBlock
from dataset import VIS_CONFIG
def build_pr_net(cfg, num_joints, input_channels=480):
net = PoseRefine(cfg, num_joints, input_channels)
return net
class PoseRefine(nn.Module):
def __init__(self, cfg, num_joints, input_channels=480):
super(PoseRefine, self).__init__()
self.num_joints = num_joints
dataset = cfg.DATASET.DATASET
if 'ochuman' in dataset:
dataset = 'COCO'
elif 'crowdpose' in dataset:
dataset = 'CROWDPOSE'
else:
dataset = 'COCO'
self.part_idx = VIS_CONFIG[dataset]['part_idx']
self.part_labels = VIS_CONFIG[dataset]['part_labels']
self.part_orders = VIS_CONFIG[dataset]['part_orders']
self.num_layers = cfg.REFINE.NUM_LAYERS
init_graph = self.build_graph()
self.adj = nn.Parameter(init_graph)
self.gconv_head = BasicGraphBlock(input_channels, input_channels)
gconv_layers = [ResGraphBlock(input_channels, input_channels, input_channels) for _ in range(self.num_layers)]
self.gconv_layers = nn.ModuleList(gconv_layers)
self.gconv_pred = nn.Sequential(
nn.Conv1d(input_channels * 3, input_channels, 1, 1, 0),
nn.BatchNorm1d(input_channels),
nn.ReLU(inplace=True),
nn.Conv1d(input_channels, self.num_joints * 2, 1, 1, 0)
)
def forward(self, features, proposals):
coords, center_ind = proposals
feat_joints = float_sample(features, coords) # batch size x max people x num_joint x feat_dim
feat_center = int_sample(features, center_ind)
b, num_people, num_joints, feat_dim = feat_joints.shape
feats = feat_joints.reshape(b * num_people, num_joints, -1)
feats = self.gconv_head(feats, self.adj)
for i in range(self.num_layers):
feats = self.gconv_layers[i](feats, self.adj)
feat1 = torch.mean(feats, dim=1).reshape(b, num_people, feat_dim).permute(0, 2, 1)
feat2 = torch.max(feats, dim=1)[0].reshape(b, num_people, feat_dim).permute(0, 2, 1)
feats = torch.cat((feat1, feat2, feat_center.permute(0, 2, 1)), dim=1)
refine_offset = self.gconv_pred(feats).permute(0, 2, 1).reshape(b, num_people, num_joints*2)
return refine_offset
def build_graph(self):
graph = torch.eye(len(self.part_labels))
for i, part in enumerate(self.part_labels):
for (p1, p2) in self.part_orders:
if p1 == part: graph[i, self.part_idx[p2]] = 1
if p2 == part: graph[i, self.part_idx[p1]] = 1
rowsum = graph.sum(1)
r_inv_sqrt = torch.pow(rowsum, -0.5).flatten()
r_inv_sqrt[torch.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = torch.diag(r_inv_sqrt)
graph = r_mat_inv_sqrt.mm(graph).mm(r_mat_inv_sqrt)
return graph | StarcoderdataPython |
1658624 | <filename>torch_glow/tests/nodes/bmm_test.py
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests.utils import jitVsGlow
def test_bmm():
"""Basic test of the PyTorch bmm Node on Glow."""
def test_f(a, b):
return (a + a).bmm(b)
x = torch.randn(6, 4, 10)
y = torch.randn(6, 10, 2)
jitVsGlow(test_f, x, y, expected_fused_ops={"aten::bmm"})
| StarcoderdataPython |
1727106 | <gh_stars>0
import contextlib
import errno
import os
import signal
import socket
import struct
def builtin_base(exc_type):
for cls in exc_type.mro():
if cls.__module__ != 'py3oserror':
return cls
# pytest.raises isn't catching these errors on Python 2.6
# implement a simple version with standard exception-catching semantics
# also checks that the base class of the fake exception matches the real one
@contextlib.contextmanager
def raises(exc_type, check_base=True):
try:
yield
except exc_type as e:
if check_base:
assert type(e) is builtin_base(exc_type)
else:
raise AssertionError('Expected %s to be raised' % exc_type.__name__)
def raise_errno(exc_type, err):
raise exc_type(err, os.strerror(err))
def test_blocking_io():
from py3oserror import BlockingIOError
left, right = socket.socketpair()
left.setblocking(0)
with raises(BlockingIOError):
left.recv(1)
def test_child_process():
from py3oserror import ChildProcessError
with raises(ChildProcessError):
os.waitpid(-1, 0)
def test_connection_error():
from py3oserror import ConnectionError
left, right = socket.socketpair()
right.close()
with raises(ConnectionError, check_base=False):
left.send(b'x')
def test_broken_pipe():
from py3oserror import BrokenPipeError
left, right = socket.socketpair()
right.close()
with raises(BrokenPipeError):
left.send(b'x')
def test_connection_aborted():
from py3oserror import ConnectionAbortedError
with raises(ConnectionAbortedError):
# TODO real-world example
raise_errno(socket.error, errno.ECONNABORTED)
def test_connection_refused():
from py3oserror import ConnectionRefusedError
# find an available address and then close the connection to make sure
# it's unbound
sock = socket.socket()
try:
sock.bind(('', 0))
addr = sock.getsockname()
finally:
sock.close()
with raises(ConnectionRefusedError):
socket.create_connection(addr)
def test_connection_reset():
from py3oserror import ConnectionResetError
# connect a client and server socket
listener = socket.socket()
client = socket.socket()
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client.connect(listener.getsockname())
server, _ = listener.accept()
# force server to reset the connection
server.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
server = None
with raises(ConnectionResetError):
client.recv(1)
def test_file_exists_error(tmpdir):
from py3oserror import FileExistsError
with raises(FileExistsError):
os.mkdir(str(tmpdir))
def test_file_not_found_error():
from py3oserror import FileNotFoundError
with raises(FileNotFoundError):
open('does-not-exist.txt')
@contextlib.contextmanager
def sig_handler(signum, handler):
orig = signal.signal(signum, handler)
try:
yield handler
finally:
signal.signal(signum, orig)
def test_interrupted():
from py3oserror import InterruptedError
left, right = os.pipe()
with sig_handler(signal.SIGALRM, lambda n,f: None):
signal.setitimer(signal.ITIMER_REAL, 0.1)
with raises(InterruptedError):
os.read(left, 1)
def test_is_a_directory():
from py3oserror import IsADirectoryError
with raises(IsADirectoryError):
open(os.path.dirname(__file__))
def test_not_a_directory():
from py3oserror import NotADirectoryError
with raises(NotADirectoryError):
os.listdir(__file__)
def test_permission(tmpdir):
from py3oserror import PermissionError
path = str(tmpdir.join('test.txt'))
# just create the file
with open(path, 'w'):
pass
# make it read-only
os.chmod(path, 0o400)
with raises(PermissionError):
open(path, 'w')
def test_process_lookup():
from py3oserror import ProcessLookupError
# get a pid that we know has exited, so that the signal will fail
pid = os.fork()
if pid == 0:
os._exit(0)
os.waitpid(pid, 0)
with raises(ProcessLookupError):
os.kill(pid, signal.SIG_DFL)
def test_timeout():
from py3oserror import TimeoutError
with raises(TimeoutError):
# TODO real-world example
raise_errno(IOError, errno.ETIMEDOUT)
| StarcoderdataPython |
3371870 | from typing import Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from sgkit.typing import ArrayLike
def _tuple_len(t: Union[int, Tuple[int, ...], str, Tuple[str, ...]]) -> int:
"""Return the length of a tuple, or 1 for an int or string value."""
if isinstance(t, int) or isinstance(t, str):
return 1
return len(t)
def _cohorts_to_array(
cohorts: Sequence[Union[int, Tuple[int, ...], str, Tuple[str, ...]]],
index: Optional[pd.Index] = None,
) -> ArrayLike:
"""Convert cohorts or cohort tuples specified as a sequence of values or
tuples to an array of ints used to match samples in ``sample_cohorts``.
Cohorts can be specified by index (as used in ``sample_cohorts``), or a label, in
which case an ``index`` must be provided to find index locations for cohorts.
Parameters
----------
cohorts
A sequence of values or tuple representing cohorts or cohort tuples.
index
An index to turn labels into index locations, by default None.
Returns
-------
An array of shape ``(len(cohorts), tuple_len)``, where ``tuple_len`` is the length
of the tuples, or 1 if ``cohorts`` is a sequence of values.
Raises
------
ValueError
If the cohort tuples are not all the same length.
Examples
--------
>>> import pandas as pd
>>> from sgkit.cohorts import _cohorts_to_array
>>> _cohorts_to_array([(0, 1), (2, 1)]) # doctest: +SKIP
array([[0, 1],
[2, 1]], dtype=int32)
>>> _cohorts_to_array([("c0", "c1"), ("c2", "c1")], pd.Index(["c0", "c1", "c2"])) # doctest: +SKIP
array([[0, 1],
[2, 1]], dtype=int32)
"""
if len(cohorts) == 0:
return np.array([], np.int32)
tuple_len = _tuple_len(cohorts[0])
if not all(_tuple_len(cohort) == tuple_len for cohort in cohorts):
raise ValueError("Cohort tuples must all be the same length")
# convert cohort IDs using an index
if index is not None:
if isinstance(cohorts[0], str):
cohorts = [index.get_loc(id) for id in cohorts]
elif tuple_len > 1 and isinstance(cohorts[0][0], str): # type: ignore
cohorts = [tuple(index.get_loc(id) for id in t) for t in cohorts] # type: ignore
ct = np.empty((len(cohorts), tuple_len), np.int32)
for n, t in enumerate(cohorts):
ct[n, :] = t
return ct
| StarcoderdataPython |
1668865 | <gh_stars>1-10
__version__ = "0.3.19"
| StarcoderdataPython |
18459 | <filename>tools/com/test/test_alpha.py
from tools.com.alpha import Flow, Path
def test_flow():
p = Path("TEST", "layer", ["TASK", "SUB"])
f = Flow(content="abc123", path=p, format="text", a=1, b=7, c="aaaa")
s = str(p).encode() + b"""
text
a: 1
b: 7
c: aaaa
abc123"""
assert f.to_bytes()[5:] == s
| StarcoderdataPython |
3309568 | import time, array, random, copy, math
import pandas as pd
import numpy as np
from math import sqrt
from deap import algorithms, base, creator, gp, benchmarks, tools
from deap.benchmarks.tools import diversity, convergence, hypervolume
from deap.tools import History
import json, codecs
import csv
from functions import resistance
def optimization_deap_resistance(lwlmin, lwlmax, bwlmin, bwlmax, tcmin, tcmax, lcfmin, lcfmax, lcbmin, lcbmax, cbmin, cbmax, cwpmin, cwpmax, cpmin, cpmax, cmmin, cmmax, dispmin):
### PARAMATERS
gaconfig_obj = codecs.open('assets/data/parametersga.json', 'r', encoding='utf-8').read()
gaconfig = json.loads(gaconfig_obj)
# weight objectives (values) and whether minimized (negative) or maximized (positive)
weight1 = np.float(gaconfig["weight1"])*(-1)/10 # resistance weight - multiplied by one to be minimized
weight2 = np.float(gaconfig["weight2"])/10 # comfort ratio weight
velocityrange = np.array(gaconfig["velocityrange"])
heelrange = np.array(gaconfig["heelrange"])
bound_low1, bound_up1 = lwlmin, lwlmax
bound_low2, bound_up2 = bwlmin, bwlmax
bound_low3, bound_up3 = tcmin, tcmax
bound_low4, bound_up4 = lcfmin, lcfmax
bound_low5, bound_up5 = lcbmin, lcbmax
bound_low6, bound_up6 = cbmin, cbmax
bound_low7, bound_up7 = cwpmin, cwpmax
bound_low8, bound_up8 = cpmin, cpmax
bound_low9, bound_up9 = cmmin, cmmax
pop_size = np.int(gaconfig["popsize"]) # number of the population
children_size = np.int(gaconfig["childrensize"]) # number of children to produce at each generation
max_gen = np.int(gaconfig["maxgeneration"]) # number of times the algorithm is run
mut_prob = np.int(gaconfig["mutprob"])/100 # probability of mutation
halloffame_number = np.int(gaconfig["halloffamenumber"]) # number of best individuals selected
indpb_value = np.int(gaconfig["indpb"])/100 # independent probability for each attribute to be mutated
eta_value = np.int(gaconfig["eta"]) # crowding degree of the crossover. A high eta will produce children resembling to their parents, while a small eta will produce solutions much more different
selectionmethod = np.int(gaconfig["selectionmethod"])
mutationmethod = np.int(gaconfig["mutationmethod"])
crossovermethod = np.int(gaconfig["crossovermethod"])
NDIM = 2 # numero de dimensoes do problema (objetivos?)
random.seed(a = 42) # control randomnesss
savefile = "optimizationresistance"
### BUILD MODEL
def uniform(low1, up1, low2, up2, low3, up3, low4, up4, low5, up5, low6, up6, low7, up7, low8, up8, low9, up9, size=None): # function to generate the attributes of the initial population
return [random.uniform(low1, up1), random.uniform(low2, up2), random.uniform(low3, up3), random.uniform(low4, up4), random.uniform(low5, up5), random.uniform(low6, up6), random.uniform(low7, up7), random.uniform(low8, up8), random.uniform(low9, up9)]
def evaluate(individual): # calculate the evaluating functions (objetive 1 = f1 and objective = f2)
lwl, bwl, tcan, lcf, lcb, cb, cwp, cp, cm = individual[0], individual[1], individual[2], individual[3], individual[4], individual[5], individual[6], individual[7], individual[8]
divcan = lwl*bwl*tcan*cb
awp = bwl*lwl*cwp
dimensions = codecs.open('assets/data/dimensions.json', 'r', encoding='utf-8').read()
dim = json.loads(dimensions)
alcb_coefficient = np.float(dim["alcb_coefficient"])
alcb = lwl*alcb_coefficient*tcan
loa = lwl*1.05
boa = bwl*1.1
savefile="optimizationresistance"
Rt, CR, Rv, Ri, Rr, Rincli, count = 0, 0, 0, 0, 0, 0, 0
for vboat in range (velocityrange[0], velocityrange[1], 1):
for heel in range (heelrange[0], heelrange[1], 5):
result = resistance(lwl, bwl, tcan, alcb, cp, cm, awp, divcan, lcb, lcf, vboat, heel)
Rt, Rv, Ri, Rr, Rincli, CR, count = Rt+result[0], Rv+result[1], Ri+result[2], Rr+result[3], Rincli+result[4], CR+result[5], count+1
Rt, CR, Rv, Ri, Rr, Rincli = Rt/count, CR/count, Rv/count, Ri/count, Rr/count, Rincli/count
f1 = Rt
f2 = CR
exportresults(savefile, boa, tcan, divcan, lwl, bwl, awp, lcb, lcf, Rt, Rv, Ri, Rr, Rincli, CR, dispmin)
return f1, f2
def feasible(individual):
# https://deap.readthedocs.io/en/master/tutorials/advanced/constraints.html
# returns true if feasible, false otherwise
# adicionar um counter para cada violacao
lwl, bwl, tc, lcf, lcb, cb, cwp, cp, cm = individual[0], individual[1], individual[2], individual[3], individual[4], individual[5], individual[6], individual[7], individual[8]
disp = lwl*bwl*tc*cb
awp = bwl*lwl*cwp
boa = bwl*1.2
loa = lwl*1.05
dispmass = disp*1025
cs = boa*3.28084/(dispmass*2.20462/64)**(1/3) # capsize screening factor
csmax = 2
if (lwl/bwl) > 5 or (lwl/bwl) < 2.73:
if (bwl/tc) > 6.5 or (bwl/tc) < 3.8: #(bwl/tcan) > 19.39 or (bwl/tcan) < 2.46 delft series seems to have unrealistic limits
if (lwl/disp**(1/3)) > 8.5 or (lwl/disp**(1/3)) < 4.34:
if (awp/disp**(2/3)) > 12.67 or (awp/disp**(2/3)) < 3.78:
if cs > csmax:
if disp < dispmin:
return True
else:
return False
else:
return False
else:
return False
else:
return False
else:
return False
else:
return False
# create a function and assign the weights
creator.create("FitnessMulti", base.Fitness, weights=(weight1, weight2))
# define the type of each individual (array, list, ...) and inherit the Fitness attributes
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMulti)
toolbox = base.Toolbox()
toolbox.register("attr_float", uniform, bound_low1, bound_up1, bound_low2, bound_up2, bound_low3, bound_up3, bound_low4, bound_up4, bound_low5, bound_up5, bound_low6, bound_up6, bound_low7, bound_up7, bound_low8, bound_up8, bound_low9, bound_up9) # defines how to create an individual with attributes within the bounds
# create the individual
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
# create the population in a list
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# defines what is the evaluating function
toolbox.register("evaluate", evaluate)
if crossovermethod == 1:
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=[bound_low1, bound_low2, bound_low3, bound_low4, bound_low5, bound_low6, bound_low7, bound_low8, bound_low9], up=[bound_up1, bound_up2, bound_up3, bound_up4, bound_up5, bound_up6, bound_up7, bound_up8, bound_up9], eta=eta_value)
if crossovermethod == 2:
toolbox.register("mate", tools.cxOnePoint)
if crossovermethod == 3:
toolbox.register("mate", tools.cxTwoPoints)
if crossovermethod == 4:
toolbox.register("mate", tools.cxUniform, indpb=indpb_value)
if mutationmethod == 1:
toolbox.register("mutate", tools.mutPolynomialBounded, low=[bound_low1, bound_low2, bound_low3, bound_low4, bound_low5, bound_low6, bound_low7, bound_low8, bound_low9], up=[bound_up1, bound_up2, bound_up3, bound_up4, bound_up5, bound_up6, bound_up7, bound_up8, bound_up9], eta=eta_value, indpb=indpb_value)
if mutationmethod == 2:
toolbox.register("mutate", tools.mutGaussian, mu=[lwl, bwl, tcan, lcf, lcb, cb, cwp, cp, cm], sigma=0.5, indpb=indpb_value)
if selectionmethod == 1:
toolbox.register("select", tools.selNSGA2)
elif selectionmethod == 2:
toolbox.register("select", tools.selSPEA2)
history = History() # store the data to generate the genealogic diagram
toolbox.decorate("mate", history.decorator) # store the mate data
toolbox.decorate("mutate", history.decorator) # store the mutate data
toolbox.decorate("evaluate", tools.DeltaPenalty(feasible, 99999)) # constraint handler, function and result that is returned
toolbox.pop_size = pop_size # number of the population
toolbox.children_size = children_size # number of children to produce at each generation
toolbox.max_gen = max_gen # number of times the algorithm is run
toolbox.mut_prob = mut_prob # probability of mutation
hof = tools.HallOfFame(halloffame_number) # number of best individuals selected
stats_fit = tools.Statistics(key=lambda ind: ind.fitness.values)
stats_size = tools.Statistics(key=len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean, axis=0)
mstats.register("std", np.std, axis=0)
mstats.register("min", np.min, axis=0)
mstats.register("max", np.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "fitness", "size"
logbook.chapters["fitness"].header = "std", "min", "avg", "max",
logbook.chapters["size"].header = "min", "avg", "max"
### RUN MODEL
def run_ea(toolbox, stats=mstats, verbose=False):
pop = toolbox.population(n=toolbox.pop_size)
history.update(pop)
pop = toolbox.select(pop, len(pop))
return algorithms.eaMuPlusLambda(pop, toolbox,
mu=toolbox.pop_size, # number of individuals to select for the next generation
lambda_=toolbox.children_size, # number of children to produce at each generation
cxpb=1-toolbox.mut_prob, # probability that an offspring is produced by crossover
mutpb=toolbox.mut_prob, # probability that an offspring is produced by mutation
stats=mstats,
halloffame=hof, # contain the best individuals
ngen=toolbox.max_gen, # number of generation (em bom portugues: ciclos, loops, iteracoes...)
verbose=False) # print or not
res, logbook = run_ea(toolbox, stats=mstats) # res: ultima populacao gerada
fronts = tools.emo.sortLogNondominated(res, len(res)) # fronts: pareto otimo desta ultima populacao
### PLOTS
par1=[]
for i, inds in enumerate(fronts): # two set of values, Im getting only one
par = [toolbox.evaluate(ind) for ind in inds]
if i == 0:
par1 = par
flength=len(history.genealogy_history)
f1, f2, index = np.zeros(flength), np.zeros(flength), np.zeros(flength)
x1, x2, x3, x4, x5, x6, x7 = np.zeros(len(res)), np.zeros(len(res)), np.zeros(len(res)), np.zeros(len(res)), np.zeros(len(res)), np.zeros(len(res)), np.zeros(len(res))
for i in range (1, flength):
f1[i]=np.float(evaluate(history.genealogy_history[i+1])[0])
f2[i]=np.float(evaluate(history.genealogy_history[i+1])[1])
index[i]=i
return f1, f2, index
def exportresults(savefile, boa, tcan, divcan, lwl, bwl, awp, lcb, lcf, Rt, Rv, Ri, Rr, Rincli, CR, dispmin):
rows = []
with open("assets/data/optimizationresistance.csv", "r") as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
rows.append(row)
index = csvreader.line_num
print(index)
constraint1, constraint2, constraint3, constraint4, constraint5, constraint6, valid = False, False, False, False, False, False, False, False
dispmass = divcan*1025
cs = boa*3.28084/(dispmass*2.20462/64)**(1/3)
if (lwl/bwl) > 5 or (lwl/bwl) < 2.73:
constraint1 = True
if (bwl/tcan) > 6.5 or (bwl/tcan) < 3.8:
constraint2 = True
if (lwl/divcan**(1/3)) > 8.5 or (lwl/divcan**(1/3)) < 4.34:
constraint3 = True
if (awp/divcan**(2/3)) > 12.67 or (awp/divcan**(2/3)) < 3.78:
constraint4 = True
if divcan < dispmin:
constraint5 = True
if cs > 2:
constraint6 = True
if constraint1==False and constraint2 == False and constraint3 == False and constraint4 == False and constraint5 == False and constraint6 == False:
valid = True
exportdata = [index, format(Rt, '.4f'), format(Rv, '.4f'), format(Ri, '.4f'), format(Rr, '.4f'), format(Rincli, '.4f'), format(CR, '.4f'), format(cs, '.4f'), format(lwl, '.4f'), format(bwl, '.4f'), format(tcan, '.4f'), format(divcan, '.4f'), format(awp, '.4f'), format(lcb, '.4f'), format(lcf, '.4f'), constraint1, constraint2, constraint3, constraint4, constraint5, constraint6, valid]
with open("assets/data/optimizationresistance.csv", "a") as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(exportdata)
return cs | StarcoderdataPython |
9412 | <reponame>xwu20/wmg_agent<filename>specs/test_gru_on_flat_babyai.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
### CONTROLS (non-tunable) ###
# general
TYPE_OF_RUN = test_episodes # train, test, test_episodes, render
NUM_EPISODES_TO_TEST = 1000
MIN_FINAL_REWARD_FOR_SUCCESS = 1.0
LOAD_MODEL_FROM = models/gru_flat_babyai.pth
SAVE_MODELS_TO = None
# worker.py
ENV = BabyAI_Env
ENV_RANDOM_SEED = 1
AGENT_RANDOM_SEED = 1
REPORTING_INTERVAL = 1
TOTAL_STEPS = 1
ANNEAL_LR = False
# A3cAgent
AGENT_NET = GRU_Network
# BabyAI_Env
BABYAI_ENV_LEVEL = BabyAI-GoToLocal-v0
USE_SUCCESS_RATE = True
SUCCESS_RATE_THRESHOLD = 0.99
HELDOUT_TESTING = False
NUM_TEST_EPISODES = 10000
OBS_ENCODER = Flat
BINARY_REWARD = True
### HYPERPARAMETERS (tunable) ###
# A3cAgent
A3C_T_MAX = 4
LEARNING_RATE = 4e-05
DISCOUNT_FACTOR = 0.9
GRADIENT_CLIP = 512.0
ENTROPY_TERM_STRENGTH = 0.02
ADAM_EPS = 1e-12
REWARD_SCALE = 2.0
WEIGHT_DECAY = 0.
# RNNs
NUM_RNN_UNITS = 96
OBS_EMBED_SIZE = 512
AC_HIDDEN_LAYER_SIZE = 4096
| StarcoderdataPython |
1606833 | """
`edit-flags` command test module
"""
import pytest
from tests.utils import (
ARCH,
GefUnitTestGeneric,
gdb_start_silent_cmd_last_line,
gdb_start_silent_cmd,
)
@pytest.mark.skipif(ARCH not in ["i686", "x86_64", "armv7l", "aarch64"],
reason=f"Skipped for {ARCH}")
class EditFlagsCommand(GefUnitTestGeneric):
"""`edit-flags` command test module"""
def setUp(self) -> None:
res = gdb_start_silent_cmd_last_line("edit-flags")
self.assertNoException(res)
flags = res[1:-1].split()
self.flag_name = "carry"
self.initial_value = [f for f in flags if f.lower() == self.flag_name][0]
return super().setUp()
def test_cmd_edit_flags_disable(self):
res = gdb_start_silent_cmd_last_line("edit-flags",
after=(f"edit-flags +{self.flag_name}",
f"edit-flags -{self.flag_name}"))
self.assertNoException(res)
self.assertIn(self.flag_name.lower(), res)
def test_cmd_edit_flags_enable(self):
res = gdb_start_silent_cmd("edit-flags",
after=(f"edit-flags -{self.flag_name}",
f"edit-flags +{self.flag_name}"))
self.assertNoException(res)
self.assertIn(self.flag_name.upper(), res)
def test_cmd_edit_flags_toggle(self):
res = gdb_start_silent_cmd_last_line(f"edit-flags ~{self.flag_name}")
self.assertNoException(res)
if self.initial_value == self.flag_name.upper():
self.assertIn(self.flag_name.lower(), res)
else:
self.assertIn(self.flag_name.upper(), res)
| StarcoderdataPython |
66050 | <reponame>GlobalFishingWatch/anchorages_pipeline
from __future__ import absolute_import
from apache_beam.options.pipeline_options import PipelineOptions
class PortEventsOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
# Use add_value_provider_argument for arguments to be templatable
# Use add_argument as usual for non-templatable arguments
required = parser.add_argument_group('Required')
optional = parser.add_argument_group('Optional')
required.add_argument('--anchorage_table',
help='Name of of anchorages table (BQ)')
required.add_argument('--input_table', required=True,
help='Table to pull position messages from')
required.add_argument('--state_table', required=True,
help='Table containing port state on each day')
required.add_argument('--output_table', required=True,
help='Output table (BQ) to write results to.')
required.add_argument('--start_date', required=True,
help="First date to look for entry/exit events.")
required.add_argument('--end_date', required=True,
help="Last date (inclusive) to look for entry/exit events.")
optional.add_argument('--config', default='anchorage_cfg.yaml',
help="Path to configuration file")
optional.add_argument('--ssvid_filter',
help='Subquery or list of ssvid to limit processing to.\n'
'If prefixed by @, load from given path')
optional.add_argument('--wait_for_job', default=False, action='store_true',
help='Wait until the job finishes before returning.')
| StarcoderdataPython |
36090 | # Copyright 2022 Tiernan8r
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constructs the quantum register, circuits of composite gates, and runs the
simulation of Grover's Algorithm
"""
import abc
from typing import Tuple
from qcp.matrices import SPARSE, DefaultMatrix, Matrix
class GeneralAlgorithm(abc.ABC):
def __init__(self, size: int):
assert size > 1, "need minimum of two qbits"
self.size = size
self.state = self.initial_state()
self.circuit = self.construct_circuit()
def initial_state(self) -> Matrix:
"""
Creates a state vector corresponding to |1..0>
returns:
Matrix: the state vector
"""
entries: SPARSE = {i: {} for i in range(2 ** self.size)}
entries[0][0] = 1
return DefaultMatrix(entries)
def construct_circuit(self) -> Matrix:
"""
Construct the circuit for the algorithm
returns:
Matrix: Matrix representing our the circuit for the algorithm
"""
pass
def run(self) -> Matrix:
"""
Run the algorithm by applying the quantum circuit to the initial
state
returns:
Matrix: Column matrix representation of the final state
"""
if self.circuit is not None:
self.state = self.circuit * self.state
return self.state
def measure(self) -> Tuple[int, float]:
"""
'measures' self.state by selecting a state weighted by its
(amplitude ** 2)
returns:
Tuple[int, float]: The state observed and the probability of
measuring said state
"""
pass
| StarcoderdataPython |
3233646 | from datetime import datetime
import logging
from typing import List
import time
import hashlib
from scrapydd.models import session_scope, ProjectPackage, Project, Spider, \
Trigger, SpiderExecutionQueue, \
SpiderParameter, Session, User
from scrapydd.storage import ProjectStorage
from scrapydd.exceptions import ProjectNotFound, SpiderNotFound, \
ProjectAlreadyExists
from .workspace import SpiderSetting, find_package_version
from .models import Package
logger = logging.getLogger(__name__)
class SpiderNameAlreadyExist(Exception):
pass
class ProjectManager:
def __init__(self, runner_factory,
project_storage_dir,
scheduler_manager,
default_project_storage_version=2,
):
self.runner_factory = runner_factory
self.project_storage_dir = project_storage_dir
self.default_project_storage_version = default_project_storage_version
self.scheduler_manager = scheduler_manager
async def upload_project(self, user, project_name, version, eggf):
with session_scope() as session:
project = session.query(Project)\
.filter_by(name=project_name, owner=user).first()
if project is None:
project = Project()
project.name = project_name
project.storage_version = self.default_project_storage_version
if user:
project.owner_id = user.id
project.version = version
session.add(project)
session.commit()
ret = await self.upload_project_package(session, project,
f_egg=eggf,
version=version,
auto_populate_spiders=True)
return ret
def create_project(self, session, user_id, project_name,
return_existing=False):
if hasattr(user_id, 'id'):
user_id = user_id.id
existing_project = session.query(Project)\
.filter_by(owner_id=user_id, name=project_name).first()
if existing_project and return_existing:
return existing_project
if existing_project:
raise ProjectAlreadyExists()
project = Project()
project.owner_id = user_id
project.name = project_name
project.storage_version = self.default_project_storage_version
session.add(project)
session.commit()
return project
def create_spider(self, session, project, spider_name):
existing_spiders = session.query(Spider)\
.filter_by(project_id=project.id)
for existing_spider in existing_spiders:
if existing_spider.name == spider_name:
raise SpiderNameAlreadyExist()
spider = Spider(project=project, name=spider_name)
session.add(spider)
session.commit()
return spider
async def upload_project_package(self, session, project, f_egg, version,
auto_populate_spiders=False):
"""
Upload a new package for an existing project.
:param session: Session
:param project: Project
:param f_egg: file-like obj of project egg binary
:param version: package version
:return: the project
"""
version = str(int(time.time()))
runner = self.runner_factory.build(f_egg)
try:
spiders = await runner.list()
logger.debug('spiders: %s' % spiders)
finally:
runner.clear()
package = project.package
if not package:
package = ProjectPackage()
package.project = project
package.type = 'scrapy'
package.spider_list = ','.join(spiders)
project_storage = ProjectStorage(self.project_storage_dir, project)
f_egg.seek(0)
# TODO: identifying egg by custom version should be removed.
# we can extract version from egg metadata, and
# the uploaded package should be versioned not only by the binary
# version tag but also the uploaded time.
# As the auto-generated package.id will not be available
# before the data persisted, but file_path must be ready
# before it, so the file_path cannot be related to the
# package.id. Need another unique identifier here.
egg_file_path = project_storage.put_egg(f_egg, version)
project.package = package
session.add(project)
session.commit()
if auto_populate_spiders:
for spider_name in spiders:
existing_spider = session.query(Spider)\
.filter_by(project=project, name=spider_name).first()
if not existing_spider:
new_spider = Spider(project=project, name=spider_name)
session.add(new_spider)
session.commit()
package = Package()
package.project = project
package.type = 'scrapy'
package.spider_list = ','.join(spiders)
package.version = self._generate_project_package_version(project)
f_egg.seek(0)
package.egg_version = find_package_version(f_egg)
package.file_path = egg_file_path
package.create_date = datetime.now()
f_egg.seek(0)
package.checksum = self._compute_checksum(f_egg)
session.add(package)
session.commit()
session.refresh(project)
return project
def _generate_project_package_version(self, project):
try:
last_version = project.packages[0].version
return last_version + 1
except IndexError:
return 1
def _compute_checksum(self, f_egg):
f_egg.seek(0)
h = hashlib.sha1()
h.update(f_egg.read())
return h.hexdigest()
def delete_project(self, user_id, project_id):
with session_scope() as session:
project = session.query(Project).get(project_id)
project_storage = ProjectStorage(
self.project_storage_dir, project,
self.default_project_storage_version)
for spider in project.spiders:
triggers = session.query(Trigger)\
.filter_by(spider_id=spider.id)
session.query(SpiderExecutionQueue)\
.filter_by(spider_id=spider.id).delete()
session.query(SpiderParameter)\
.filter_by(spider_id=spider.id).delete()
session.commit()
for trigger in triggers:
self.scheduler_manager\
.remove_schedule(spider, trigger_id=trigger.id)
session.query(SpiderExecutionQueue)\
.filter_by(spider_id=spider.id).delete()
for historical_job in spider.historical_jobs:
project_storage.delete_job_data(historical_job)
session.delete(historical_job)
session.delete(spider)
project_storage.delete_egg()
for package in project.packages:
self._delete_project_package(session, package)
if project.package:
session.delete(project.package)
session.delete(project)
session.commit()
def _delete_project_package(self, session, package):
session.delete(package)
session.commit()
def get_projects(self, session: Session, user: User) -> List[Project]:
"""
Get projects by owner.
:param session: pass a session from caller.
:param user: the owner
:return: list of Project
"""
projects = session.query(Project).filter_by(owner=user)
return projects
def get_spider(self, session: Session, user: User,
project_id, spider_id) -> Spider:
project = session.query(Project).filter_by(owner=user,
id=project_id).first()
if not project:
raise ProjectNotFound()
spider = session.query(Spider).filter_by(project_id=project.id,
id=spider_id).first()
if not spider:
raise SpiderNotFound()
return spider
def get_project(self, session: Session, user: User, project_id) -> Project:
project = session.query(Project).filter_by(owner=user,
id=project_id).first()
if not project:
raise ProjectNotFound()
return project
def get_project_by_name(self, session: Session, user: User,
project_name) -> Project:
project = session.query(Project).filter_by(owner=user,
name=project_name).first()
return project
def get_job_figure(self, session: Session,
job: SpiderExecutionQueue) -> dict:
job = session.query(SpiderExecutionQueue).get(job.id)
if job.spider.figure and job.spider.figure.text:
figure = SpiderSetting.from_json(job.spider.figure.text)
else:
figure = SpiderSetting(job.spider_name)
figure.spider = job.spider_name
for parameter in job.spider.parameters:
figure.spider_parameters[parameter.parameter_key] = parameter.value
return figure
def get_job_egg(self, session: Session, job: SpiderExecutionQueue):
project = job.spider.project
try:
package = project.packages[0]
return open(package.file_path, 'rb')
except IndexError:
logger.warning('get_job_egg IndexError when retrieving project '
'packages.')
project_storage_dir = self.project_storage_dir
project_storage = ProjectStorage(project_storage_dir, project)
version, f_egg = project_storage.get_egg()
logger.debug('get project version, project id: %s version: %s',
project.id, version)
return f_egg
| StarcoderdataPython |
1627239 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetDatasetGroupResult',
'AwaitableGetDatasetGroupResult',
'get_dataset_group',
'get_dataset_group_output',
]
@pulumi.output_type
class GetDatasetGroupResult:
def __init__(__self__, dataset_arns=None, dataset_group_arn=None, domain=None, tags=None):
if dataset_arns and not isinstance(dataset_arns, list):
raise TypeError("Expected argument 'dataset_arns' to be a list")
pulumi.set(__self__, "dataset_arns", dataset_arns)
if dataset_group_arn and not isinstance(dataset_group_arn, str):
raise TypeError("Expected argument 'dataset_group_arn' to be a str")
pulumi.set(__self__, "dataset_group_arn", dataset_group_arn)
if domain and not isinstance(domain, str):
raise TypeError("Expected argument 'domain' to be a str")
pulumi.set(__self__, "domain", domain)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="datasetArns")
def dataset_arns(self) -> Optional[Sequence[str]]:
"""
An array of Amazon Resource Names (ARNs) of the datasets that you want to include in the dataset group.
"""
return pulumi.get(self, "dataset_arns")
@property
@pulumi.getter(name="datasetGroupArn")
def dataset_group_arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the dataset group to delete.
"""
return pulumi.get(self, "dataset_group_arn")
@property
@pulumi.getter
def domain(self) -> Optional['DatasetGroupDomain']:
"""
The domain associated with the dataset group. When you add a dataset to a dataset group, this value and the value specified for the Domain parameter of the CreateDataset operation must match.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.DatasetGroupTag']]:
"""
The tags of Application Insights application.
"""
return pulumi.get(self, "tags")
class AwaitableGetDatasetGroupResult(GetDatasetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatasetGroupResult(
dataset_arns=self.dataset_arns,
dataset_group_arn=self.dataset_group_arn,
domain=self.domain,
tags=self.tags)
def get_dataset_group(dataset_group_arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatasetGroupResult:
"""
Represents a dataset group that holds a collection of related datasets
:param str dataset_group_arn: The Amazon Resource Name (ARN) of the dataset group to delete.
"""
__args__ = dict()
__args__['datasetGroupArn'] = dataset_group_arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:forecast:getDatasetGroup', __args__, opts=opts, typ=GetDatasetGroupResult).value
return AwaitableGetDatasetGroupResult(
dataset_arns=__ret__.dataset_arns,
dataset_group_arn=__ret__.dataset_group_arn,
domain=__ret__.domain,
tags=__ret__.tags)
@_utilities.lift_output_func(get_dataset_group)
def get_dataset_group_output(dataset_group_arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDatasetGroupResult]:
"""
Represents a dataset group that holds a collection of related datasets
:param str dataset_group_arn: The Amazon Resource Name (ARN) of the dataset group to delete.
"""
...
| StarcoderdataPython |
1768763 | <reponame>sandbox-data-catalog/hiroshima-opendata-catalog<filename>ckanext-hiroshima/ckanext/hiroshima/lib/mailer.py
import ckan.plugins.toolkit as toolkit
import ckan.lib.mailer as mailer
from ckan.common import config
def send_confirm_mail_address(user_name, email, token):
host = config['ckan.host']
mail_extra_vars = {
'name': user_name,
'link': 'http://' + host + '/dataset/complete/' + str(token)
}
try:
mailer.mail_recipient(
recipient_name = None,
recipient_email = email,
subject = toolkit.render('mail/request_data_subject.txt', {}),
body = toolkit.render('mail/request_data.txt', mail_extra_vars),
headers = [])
except mailer.MailerException:
raise
def send_request_to_other_consortium(user_name, user_email, package_id, consortium_name, consortium_email):
host = config['ckan.host']
mail_extra_vars = {
'user_name': user_name,
'user_email': user_email,
'consortium_name': consortium_name,
'link': 'http://' + host + '/dataset/' + str(package_id)
}
headers = {}
cc_email = config.get('smtp.mail_cc')
if cc_email:
headers['Cc'] = u"%s" % (cc_email)
try:
mailer.mail_recipient(
recipient_name = None,
recipient_email = consortium_email,
subject = toolkit.render('mail/request_consortium_subject.txt', {}),
body = toolkit.render('mail/request_consortium.txt', mail_extra_vars),
headers = headers)
except mailer.MailerException:
raise
| StarcoderdataPython |
199977 | <gh_stars>1-10
import os
from conan.tools.files import rename
from conan.tools.microsoft import msvc_runtime_flag
from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.43.0"
class AwsSdkCppConan(ConanFile):
name = "aws-sdk-cpp"
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/aws/aws-sdk-cpp"
description = "AWS SDK for C++"
topics = ("aws", "cpp", "cross-platform", "amazon", "cloud")
settings = "os", "arch", "compiler", "build_type"
_sdks = (
"access-management",
"accessanalyzer",
"acm",
"acm-pca",
"alexaforbusiness",
"amp",
"amplify",
"amplifybackend",
"apigateway",
"apigatewaymanagementapi",
"apigatewayv2",
"appconfig",
"appflow",
"appintegrations",
"application-autoscaling",
"application-insights",
"appmesh",
"appstream",
"appsync",
"athena",
"auditmanager",
"autoscaling",
"autoscaling-plans",
"awstransfer",
"backup",
"batch",
"braket",
"budgets",
"ce",
"chime",
"cloud9",
"clouddirectory",
"cloudformation",
"cloudfront",
"cloudhsm",
"cloudhsmv2",
"cloudsearch",
"cloudsearchdomain",
"cloudtrail",
"codeartifact",
"codebuild",
"codecommit",
"codedeploy",
"codeguru-reviewer",
"codeguruprofiler",
"codepipeline",
"codestar",
"codestar-connections",
"codestar-notifications",
"cognito-identity",
"cognito-idp",
"cognito-sync",
"comprehend",
"comprehendmedical",
"compute-optimizer",
"config",
"connect",
"connect-contact-lens",
"connectparticipant",
"cur",
"customer-profiles",
"databrew",
"dataexchange",
"datapipeline",
"datasync",
"dax",
"detective",
"devicefarm",
"devops-guru",
"directconnect",
"discovery",
"dlm",
"dms",
"docdb",
"ds",
"dynamodb",
"dynamodbstreams",
"ebs",
"ec2",
"ec2-instance-connect",
"ecr",
"ecr-public",
"ecs",
"eks",
"elastic-inference",
"elasticache",
"elasticbeanstalk",
"elasticfilesystem",
"elasticloadbalancing",
"elasticloadbalancingv2",
"elasticmapreduce",
"elastictranscoder",
"email",
"emr-containers",
"es",
"eventbridge",
"events",
"firehose",
"fms",
"forecast",
"forecastquery",
"frauddetector",
"fsx",
"gamelift",
"glacier",
"globalaccelerator",
"glue",
"greengrass",
"greengrassv2",
"groundstation",
"guardduty",
"health",
"healthlake",
"honeycode",
"iam",
"identity-management",
"identitystore",
"imagebuilder",
"importexport",
"inspector",
"iot",
"iot-data",
"iot-jobs-data",
"iot1click-devices",
"iot1click-projects",
"iotanalytics",
"iotdeviceadvisor",
"iotevents",
"iotevents-data",
"iotfleethub",
"iotsecuretunneling",
"iotsitewise",
"iotthingsgraph",
"iotwireless",
"ivs",
"kafka",
"kendra",
"kinesis",
"kinesis-video-archived-media",
"kinesis-video-media",
"kinesis-video-signaling",
"kinesisanalytics",
"kinesisanalyticsv2",
"kinesisvideo",
"kms",
"lakeformation",
"lambda",
"lex",
"lex-models",
"lexv2-models",
"lexv2-runtime",
"license-manager",
"lightsail",
"location",
"logs",
"lookoutvision",
"machinelearning",
"macie",
"macie2",
"managedblockchain",
"marketplace-catalog",
"marketplace-entitlement",
"marketplacecommerceanalytics",
"mediaconnect",
"mediaconvert",
"medialive",
"mediapackage",
"mediapackage-vod",
"mediastore",
"mediastore-data",
"mediatailor",
"meteringmarketplace",
"migrationhub-config",
"mobile",
"mobileanalytics",
"monitoring",
"mq",
"mturk-requester",
"mwaa",
"neptune",
"network-firewall",
"networkmanager",
"opsworks",
"opsworkscm",
"organizations",
"outposts",
"personalize",
"personalize-events",
"personalize-runtime",
"pi",
"pinpoint",
"pinpoint-email",
"polly",
"polly-sample",
"pricing",
"qldb",
"qldb-session",
"queues",
"quicksight",
"ram",
"rds",
"rds-data",
"redshift",
"redshift-data",
"rekognition",
"resource-groups",
"resourcegroupstaggingapi",
"robomaker",
"route53",
"route53domains",
"route53resolver",
"s3",
"s3-crt",
"s3-encryption",
"s3control",
"s3outposts",
"sagemaker",
"sagemaker-a2i-runtime",
"sagemaker-edge",
"sagemaker-featurestore-runtime",
"sagemaker-runtime",
"savingsplans",
"schemas",
"sdb",
"secretsmanager",
"securityhub",
"serverlessrepo",
"service-quotas",
"servicecatalog",
"servicecatalog-appregistry",
"servicediscovery",
"sesv2",
"shield",
"signer",
"sms",
"sms-voice",
"snowball",
"sns",
"sqs",
"ssm",
"sso",
"sso-admin",
"sso-oidc",
"states",
"storagegateway",
"sts",
"support",
"swf",
"synthetics",
"text-to-speech",
"textract",
"timestream-query",
"timestream-write",
"transcribe",
"transcribestreaming",
"transfer",
"translate",
"waf",
"waf-regional",
"wafv2",
"wellarchitected",
"workdocs",
"worklink",
"workmail",
"workmailmessageflow",
"workspaces",
"xray",
)
options = {
**{
"shared": [True, False],
"fPIC": [True, False],
"min_size": [True, False],
},
**{ x: [True, False] for x in _sdks},
}
default_options = {key: False for key in options.keys()}
default_options["fPIC"] = True
default_options["access-management"] = True
default_options["identity-management"] = True
default_options["monitoring"] = True
default_options["queues"] = True
default_options["s3-encryption"] = True
default_options["transfer"] = True
default_options["text-to-speech"] = True
generators = "cmake", "cmake_find_package"
short_paths = True
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _is_msvc(self):
return str(self.settings.compiler) in ["Visual Studio", "msvc"]
@property
def _internal_requirements(self):
return {
"access-management": ["iam", "cognito-identity"],
"identity-management": ["cognito-identity", "sts"],
"queues": ["sqs"],
"s3-encryption": ["s3", "kms"],
"text-to-speech": ["polly"],
"transfer": ["s3"],
}
@property
def _use_aws_crt_cpp(self):
return tools.Version(self.version) >= "1.9"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if tools.Version(self.version) < "1.9":
delattr(self.options, "s3-crt")
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("aws-c-common/0.6.19")
if self._use_aws_crt_cpp:
self.requires("aws-c-cal/0.5.13")
self.requires("aws-c-http/0.6.13")
self.requires("aws-c-io/0.10.20")
self.requires("aws-crt-cpp/0.17.23")
else:
self.requires("aws-c-event-stream/0.2.7")
if self.settings.os != "Windows":
self.requires("openssl/1.1.1n")
self.requires("libcurl/7.80.0")
if self.settings.os in ["Linux", "FreeBSD"]:
if self.options.get_safe("text-to-speech"):
self.requires("pulseaudio/14.2")
def validate(self):
if (self.options.shared
and self.settings.compiler == "gcc"
and tools.Version(self.settings.compiler.version) < "6.0"):
raise ConanInvalidConfiguration(
"Doesn't support gcc5 / shared. "
"See https://github.com/conan-io/conan-center-index/pull/4401#issuecomment-802631744"
)
if (tools.Version(self.version) < "1.9.234"
and self.settings.compiler == "gcc"
and tools.Version(self.settings.compiler.version) >= "11.0"
and self.settings.build_type == "Release"):
raise ConanInvalidConfiguration(
"Versions prior to 1.9.234 don't support release builds on >= gcc 11 "
"See https://github.com/aws/aws-sdk-cpp/issues/1505"
)
if self._use_aws_crt_cpp:
if self._is_msvc and "MT" in msvc_runtime_flag(self):
raise ConanInvalidConfiguration("Static runtime is not working for more recent releases")
else:
if self.settings.os == "Macos" and self.settings.arch == "armv8":
raise ConanInvalidConfiguration(
"This version doesn't support arm8. "
"See https://github.com/aws/aws-sdk-cpp/issues/1542"
)
def package_id(self):
for hl_comp in self._internal_requirements.keys():
if getattr(self.options, hl_comp):
for internal_requirement in self._internal_requirements[hl_comp]:
setattr(self.info.options, internal_requirement, True)
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
build_only = ["core"]
for sdk in self._sdks:
if self.options.get_safe(sdk):
build_only.append(sdk)
self._cmake.definitions["BUILD_ONLY"] = ";".join(build_only)
self._cmake.definitions["ENABLE_UNITY_BUILD"] = True
self._cmake.definitions["ENABLE_TESTING"] = False
self._cmake.definitions["AUTORUN_UNIT_TESTS"] = False
self._cmake.definitions["BUILD_DEPS"] = False
if self.settings.os != "Windows":
self._cmake.definitions["ENABLE_OPENSSL_ENCRYPTION"] = True
self._cmake.definitions["MINIMIZE_SIZE"] = self.options.min_size
if self._is_msvc and not self._use_aws_crt_cpp:
self._cmake.definitions["FORCE_SHARED_CRT"] = "MD" in msvc_runtime_flag(self)
if tools.cross_building(self):
self._cmake.definitions["CURL_HAS_H2_EXITCODE"] = "0"
self._cmake.definitions["CURL_HAS_H2_EXITCODE__TRYRUN_OUTPUT"] = ""
self._cmake.definitions["CURL_HAS_TLS_PROXY_EXITCODE"] = "0"
self._cmake.definitions["CURL_HAS_TLS_PROXY_EXITCODE__TRYRUN_OUTPUT"] = ""
self._cmake.configure()
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
@property
def _res_folder(self):
return "res"
def _create_project_cmake_module(self):
# package files needed to build other components (e.g. aws-cdi-sdk) with this SDK
for file in [
"cmake/compiler_settings.cmake",
"cmake/initialize_project_version.cmake",
"cmake/utilities.cmake",
"cmake/sdk_plugin_conf.cmake",
"toolchains/cmakeProjectConfig.cmake",
"toolchains/pkg-config.pc.in",
"aws-cpp-sdk-core/include/aws/core/VersionConfig.h"
]:
self.copy(file, src=self._source_subfolder, dst=self._res_folder)
tools.replace_in_file(os.path.join(self.package_folder, self._res_folder, file), "CMAKE_CURRENT_SOURCE_DIR", "AWS_NATIVE_SDK_ROOT", strict=False)
# avoid getting error from hook
with tools.chdir(os.path.join(self.package_folder, self._res_folder)):
rename(self, os.path.join("toolchains", "cmakeProjectConfig.cmake"), os.path.join("toolchains", "cmakeProjectConf.cmake"))
tools.replace_in_file(os.path.join("cmake", "utilities.cmake"), "cmakeProjectConfig.cmake", "cmakeProjectConf.cmake")
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
if self._is_msvc:
self.copy(pattern="*.lib", dst="lib", keep_path=False)
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "*.lib")
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
self._create_project_cmake_module()
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "AWSSDK")
# core component
self.cpp_info.components["core"].set_property("cmake_target_name", "AWS::aws-sdk-cpp-core")
self.cpp_info.components["core"].set_property("pkg_config_name", "aws-sdk-cpp-core")
self.cpp_info.components["core"].libs = ["aws-cpp-sdk-core"]
self.cpp_info.components["core"].requires = ["aws-c-common::aws-c-common-lib"]
if self._use_aws_crt_cpp:
self.cpp_info.components["core"].requires.extend([
"aws-c-cal::aws-c-cal-lib",
"aws-c-http::aws-c-http-lib",
"aws-c-io::aws-c-io-lib",
"aws-crt-cpp::aws-crt-cpp-lib",
])
else:
self.cpp_info.components["core"].requires.append("aws-c-event-stream::aws-c-event-stream-lib")
# other components
enabled_sdks = [sdk for sdk in self._sdks if self.options.get_safe(sdk)]
for hl_comp in self._internal_requirements.keys():
if getattr(self.options, hl_comp):
for internal_requirement in self._internal_requirements[hl_comp]:
if internal_requirement not in enabled_sdks:
enabled_sdks.append(internal_requirement)
for sdk in enabled_sdks:
# TODO: there is no way to properly emulate COMPONENTS names for
# find_package(AWSSDK COMPONENTS <sdk>) in set_property()
# right now: see https://github.com/conan-io/conan/issues/10258
self.cpp_info.components[sdk].set_property("cmake_target_name", "AWS::aws-sdk-cpp-{}".format(sdk))
self.cpp_info.components[sdk].set_property("pkg_config_name", "aws-sdk-cpp-{}".format(sdk))
self.cpp_info.components[sdk].requires = ["core"]
if sdk in self._internal_requirements:
self.cpp_info.components[sdk].requires.extend(self._internal_requirements[sdk])
self.cpp_info.components[sdk].libs = ["aws-cpp-sdk-" + sdk]
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.components[sdk].names["cmake_find_package"] = "aws-sdk-cpp-" + sdk
self.cpp_info.components[sdk].names["cmake_find_package_multi"] = "aws-sdk-cpp-" + sdk
component_alias = "aws-sdk-cpp-{}_alias".format(sdk) # to emulate COMPONENTS names for find_package()
self.cpp_info.components[component_alias].names["cmake_find_package"] = sdk
self.cpp_info.components[component_alias].names["cmake_find_package_multi"] = sdk
self.cpp_info.components[component_alias].requires = [sdk]
# specific system_libs, frameworks and requires of components
if self.settings.os == "Windows":
self.cpp_info.components["core"].system_libs.extend([
"winhttp", "wininet", "bcrypt", "userenv", "version", "ws2_32"
])
if self.options.get_safe("text-to-speech"):
self.cpp_info.components["text-to-speech"].system_libs.append("winmm")
else:
self.cpp_info.components["core"].requires.extend(["libcurl::curl", "openssl::openssl"])
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["core"].system_libs.append("atomic")
if self.options.get_safe("text-to-speech"):
self.cpp_info.components["text-to-speech"].requires.append("pulseaudio::pulseaudio")
if self.settings.os == "Macos":
if self.options.get_safe("text-to-speech"):
self.cpp_info.components["text-to-speech"].frameworks.append("CoreAudio")
lib_stdcpp = tools.stdcpp_library(self)
if lib_stdcpp:
self.cpp_info.components["core"].system_libs.append(lib_stdcpp)
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.filenames["cmake_find_package"] = "AWSSDK"
self.cpp_info.filenames["cmake_find_package_multi"] = "AWSSDK"
self.cpp_info.names["cmake_find_package"] = "AWS"
self.cpp_info.names["cmake_find_package_multi"] = "AWS"
self.cpp_info.components["core"].names["cmake_find_package"] = "aws-sdk-cpp-core"
self.cpp_info.components["core"].names["cmake_find_package_multi"] = "aws-sdk-cpp-core"
self.cpp_info.components["plugin_scripts"].requires = ["core"]
self.cpp_info.components["plugin_scripts"].builddirs.extend([
os.path.join(self._res_folder, "cmake"),
os.path.join(self._res_folder, "toolchains")])
self.cpp_info.components["plugin_scripts"].build_modules.append(os.path.join(self._res_folder, "cmake", "sdk_plugin_conf.cmake"))
| StarcoderdataPython |
1726325 | from copy import deepcopy
from mesh.generic.nodeState import LinkStatus
def findShortestPaths(numNodes, meshGraph, startNode):
## Find shortest path to all other nodes using Dijkstra's algorithm
# Initialize arrays
pathArray = [[i+1,100,[-1]] for i in range(numNodes)]
#visited = [startNode-1]
visited = []
pathArray[startNode-1][1] = 0 # path to self is zero
unvisited = [i for i in range(numNodes)]
#unvisited.remove(startNode-1) # start node has been visited
# Start search
currentNode = startNode-1
pathArray[currentNode][2] = [startNode]
while (unvisited): # continue until all nodes visited
nextNode = -1
nextLength = 100
for node in range(len(meshGraph[currentNode])):
mapEntry = [1 if (link == LinkStatus.GoodLink) else 0 for link in meshGraph[currentNode]] # filter out stale or no links
if (mapEntry[node] > 0): # link exists to this node
# Shorter path to this node
if (mapEntry[node] + pathArray[currentNode][1] < pathArray[node][1]):
pathArray[node][1] = mapEntry[node] + pathArray[currentNode][1] # store shorter path to this node
pathArray[node][2][0] = currentNode + 1 # store previous node
# Visit next closest unvisited node
visited.append(currentNode)
unvisited.remove(currentNode)
if len(unvisited) == 0: # all nodes visited so break out of loop
break
nextNode = unvisited[0]
for node in unvisited:
if (pathArray[node][1] < pathArray[nextNode][1]): # closer node
nextNode = node
currentNode = nextNode
# Populate paths to all nodes
pathMap = []
for node in range(numNodes):
currentNode = node + 1
currentPath = [currentNode] # start path
if (pathArray[node][2] != [-1]): # path information available
paths = buildPaths(currentNode, startNode, currentPath, pathArray)
else:
paths = []
#print("Shortest paths to node " + str(node+1) + ": ", paths)
pathMap.append(paths)
return pathMap
def buildPaths(currentNode, startNode, currentPath, pathArray):
outPaths = []
paths = []
for node in pathArray[currentNode-1][2]: # Check for path branches
if (currentNode != node): # path continues
newPath = deepcopy(currentPath)
newPath.insert(0,node)
paths.append(newPath)
if (len(paths) == 0): # no path found # TODO: Is this needed
return [currentPath]
# Iterate through path branches
for path in paths:
if (path[0] != startNode): # continue path
newPaths = buildPaths(path[0], startNode, path, pathArray)
for newPath in newPaths: # Add determined paths to output
outPaths.append(newPath)
else: # path ended
outPaths.append(path)
return outPaths
nodeArchitecture =[[0, 1, 0, 0, 0, 1], # circle
[1, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 0]]
# nodeArchitecture =[[0, 0, 0, 1, 0, 0], # single diamond
# [0, 0, 1, 1, 1, 0],
# [0, 1, 0, 1, 0, 0],
# [1, 1, 1, 0, 0, 0],
# [0, 1, 0, 0, 0, 1],
# [0, 0, 0, 0, 1, 0]]
# Double diamond
# nodeArchitecture =[[0, 1, 1, 0, 0, 0, 0],
# [1, 0, 0, 1, 0, 0, 1],
# [1, 0, 0, 1, 0, 0, 0],
# [0, 1, 1, 0, 1, 1, 0],
# [0, 0, 0, 1, 0, 0, 1],
# [0, 0, 0, 1, 0, 0, 1],
# [0, 1, 0, 0, 1, 1, 0]]
# Tight grid
# nodeArchitecture = [[0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0],
# [1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0],
# [1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1],
# [1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
# [1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0],
# [0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0],
# [1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
# [0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
# [0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0],
# [0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],
# [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
# [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0]]
# Find shortest paths to each other node
allPaths = [[]*len(nodeArchitecture)] * len(nodeArchitecture)
for node in range(len(nodeArchitecture)):
startNode = node + 1
paths = findShortestPaths(len(nodeArchitecture), nodeArchitecture, startNode)
# print(paths)
allPaths[node] = paths
#paths = findShortestPaths(len(nodeArchitecture), nodeArchitecture, 12)
# Test relay logic
sourceId = 4
destId = 6
relay = False
for node in range(len(nodeArchitecture)):
currentNodeId = node + 1
if currentNodeId == sourceId or currentNodeId == destId:
continue
lenPathToSource = len(allPaths[currentNodeId-1][sourceId-1][0])-1
lenPathToDest = len(allPaths[currentNodeId-1][destId-1][0])-1
lenSourceToDest = len(allPaths[sourceId-1][destId-1][0])-1
#print(allPaths[sourceId-1][destId-1])
#print(lenPathToSource, lenPathToDest, lenSourceToDest)
if (lenSourceToDest >= (lenPathToDest + lenPathToSource)):
relay = True
# for path in allPaths[sourceId-1][destId-1]:
# if (currentNodeId in path or len(path) <): # sending node is on the path to the destination
# relay = True
# break
#print("Current node, relay: ", currentNodeId, relay)
relay = False
| StarcoderdataPython |
65138 | <gh_stars>10-100
#!/usr/bin/env python2
##########################################################
#
# Script: txt2float.py
#
# Description: Convert GMT text grid files into float
#
##########################################################
# Basic modules
import os
import sys
import struct
from ParseHeader import *
class txt2float:
def __init__(self, hdr, infile, outfile):
self.valid = False
self.hdr = hdr
self.infile = infile
self.outfile = outfile
self.valid = True
def isValid(self):
return self.valid
def cleanup(self):
return
def _parseHdr(self):
fp = open(self.hdr, 'r')
data = fp.readlines()
fp.close()
p = ParseConfig(data)
p.showDict()
config = p.getDict()
self.ncols = int(config['ncols'])
self.nrows = int(config['nrows'])
return(0)
def main(self):
# Parse header
print "Parsing data header"
self._parseHdr()
ifp = open(self.infile, 'rb')
ofp = open(self.outfile, 'wb')
for j in xrange(0, self.nrows):
for i in xrange(0, self.ncols):
buf = ifp.readline()
val = float(buf.split()[2])
buf = struct.pack('f', val)
ofp.write(buf)
ifp.close()
ofp.close()
return 0
def usage():
print "usage: %s <hdr> <infile> <outfile>" % (sys.argv[0])
return
if __name__ == '__main__':
if (len(sys.argv) != 4):
usage()
sys.exit(1)
hdr = sys.argv[1]
infile = sys.argv[2]
outfile = sys.argv[3]
prog = txt2float(hdr, infile, outfile)
sys.exit(prog.main())
| StarcoderdataPython |
3353620 | <reponame>manakpandey/reacmchain<filename>api/ml/getDemand.py
def current_day():
from datetime import datetime
datetime_object = datetime.now()
day1 = datetime_object.day
day = int(day1)
month1 = datetime_object.month
month = int(month1)
monthvalue = 0
if month == 1:
monthvalue = 0
if month == 2:
monthvalue = 31
if month == 3:
monthvalue = 60
if month == 4:
monthvalue = 91
if month == 5:
monthvalue = 121
if month == 6:
monthvalue = 152
if month == 7:
monthvalue = 182
if month == 8:
monthvalue = 213
if month == 9:
monthvalue = 244
if month == 10:
monthvalue = 274
if month == 11:
monthvalue = 305
if month == 12:
monthvalue = 335
return monthvalue + day
def return_demand(input_array):
import numpy as np
import pandas as pd
import pickle
from sklearn import linear_model
filename = 'ml/finalized_model.sav'
day = current_day()
# input_array = [[3,89]]
df = pd.DataFrame.from_records(input_array)
df[2] = [day]
loaded_model = pickle.load(open(filename, 'rb'))
return str(loaded_model.predict(df)[0][0])
| StarcoderdataPython |
16980 | <filename>pygama/dsp/_processors/trap_filter.py
import numpy as np
from numba import guvectorize
@guvectorize(["void(float32[:], int32, int32, float32[:])",
"void(float64[:], int32, int32, float64[:])",
"void(int32[:], int32, int32, int32[:])",
"void(int64[:], int32, int32, int64[:])"],
"(n),(),()->(n)", nopython=True, cache=True)
def trap_filter(wf_in, rise, flat, wf_out):
"""
Symmetric trapezoidal filter
"""
wf_out[0] = wf_in[0]
for i in range(1, rise):
wf_out[i] = wf_out[i-1] + wf_in[i]
for i in range(rise, rise+flat):
wf_out[i] = wf_out[i-1] + wf_in[i] - wf_in[i-rise]
for i in range(rise+flat, 2*rise+flat):
wf_out[i] = wf_out[i-1] + wf_in[i] - wf_in[i-rise] - wf_in[i-rise-flat]
for i in range(2*rise+flat, len(wf_in)):
wf_out[i] = wf_out[i-1] + wf_in[i] - wf_in[i-rise] - wf_in[i-rise-flat] + wf_in[i-2*rise-flat]
| StarcoderdataPython |
1708390 | """
Setup Module to setup Python Handlers for LSST query templating
"""
import setuptools
setuptools.setup(
name='jupyterlab_lsstquery',
version='1.0.0-alpha6',
packages=setuptools.find_packages(),
install_requires=[
'notebook',
],
package_data={'jupyterlab_lsstquery': ['*']},
)
| StarcoderdataPython |
62994 | <reponame>banxi1988/iOSCodeGenerator
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ios_code_generator.generators import as_ios_swift_generator
from ios_code_generator.maps import settings_raw_type_map
from ios_code_generator.models import Model, Field
from ios_code_generator.models import model_property
__author__ = 'banxi'
class SettingsField(Field):
@property
def settings_name(self):
return self.name
@property
def settings_key(self):
prefix = self.model.prefix
if prefix:
return "%s_%s" % (prefix, self.name)
else:
return self.name
@property
def settings_type(self):
return settings_raw_type_map.get(self.ftype, 'String')
@property
def settings_default_value(self):
map = {
'i': '0',
'b': 'false',
'f': '0',
's': '""',
'':'""'
}
return map.get(self.ftype, 'nil')
@property
def settings_type_annotation(self):
t = self.settings_type
if self.ftype in ['b','i','f']:
return t
else:
return t+"?"
@property
def settings_getter_type(self):
map = {
'i':'integer',
'b':'bool',
'f':'double',
'u':'URL',
's':'string',
'd': 'object'
}
return map.get(self.ftype,'string')
@property
def settings_set_stmt(self):
key = 'Keys.%s' % self.settings_name
return 'userDefaults.set(newValue,forKey:%s)' % (key)
@property
def settings_get_stmt(self):
type = self.settings_getter_type
key = 'Keys.%s' % self.settings_name
stmt = 'return userDefaults.%s(forKey: %s)' % (type,key)
if self.ftype == 'd':
stmt += " as? Date"
return stmt
@as_ios_swift_generator("settings")
class SettingsModel(Model):
field_class = SettingsField
@property
def settings_prefix(self):
return self.prefix
@property
def settings_sync_on_save(self):
value = self.model_config.get('sync_on_save', 'true').lower()
if value in ['1', 't', 'true', 'on']:
return True
return False
@classmethod
def parse_source(cls, lines):
if lines[0].startswith('-'):
model = cls.parse_model_line(lines[0])
lines = lines[1:]
else:
model = cls(name=cls.FRAGMENT_NAME)
fields = cls.parse_field_lines(lines)
for field in fields:
field.model = model
model.fields = fields
return model, fields
| StarcoderdataPython |
3242300 | <gh_stars>100-1000
import flask
def success(data={}, status_code=200):
"""
Factory method for creating a successful Flask response.
:param data: JSON data to package with the response.
:param status_code: Optional HTTP status code.
:return: A tuple of (response object, status code) with the input data represented.
"""
template = {
'success': True,
'message': None,
}
resp_data = dict(template, **data)
return flask.jsonify(resp_data), status_code
def error(status_code, message, failure, data={}):
"""
Factory method for creating an error Flask response.
:param status_code: The HTTP status code to associate with the response.
:param message: A string describing the error.
:param failure: A string describing the failure name/code; frontend logic checks this value as a
string.
:param data: JSON data to package with the response.
:return: A tuple of (response object, status code) with the input data represented.
"""
template = {
'success': False,
'message': message,
'failure': failure,
}
resp_data = dict(template, **data)
return flask.jsonify(resp_data), status_code
def undefined_error():
"""
Factory method for creating an undefined error.
:return: A tuple of (response object, status code) describing an undefined error.
"""
return flask.jsonify({
'success': False,
'message': 'There was an undefined server-side failure. This is probably a bug.',
'failure': 'failure_undefined',
}), 500
| StarcoderdataPython |
69017 | from rest_framework.test import APIClient
from tests.app.serializers import QuoteSerializer
from tests.utils import decode_content
def test_list_response_unfiltered():
response = APIClient().get('/quotes/')
expected = [
{
'character': 'Customer',
'line': "It's certainly uncontaminated by cheese",
'sketch': 'CHEESE SHOP',
},
{
'character': 'The Black Knight',
'line': "It's just a flesh wound",
'sketch': 'HOLY GRAIL',
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_unfiltered():
response = APIClient().get('/quotes/parrot/')
expected = {
'character': 'Shopkeeper',
'line': "Well, he's...he's, ah...probably pining for the fjords",
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_list_response_filtered_includes():
response = APIClient().get('/quotes/?fields=character,line')
expected = [
{
'character': 'Customer',
'line': "It's certainly uncontaminated by cheese",
},
{
'character': 'The Black Knight',
'line': "It's just a flesh wound",
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_filtered_includes():
response = APIClient().get('/quotes/parrot/?fields=character,line')
expected = {
'character': 'Shopkeeper',
'line': "Well, he's...he's, ah...probably pining for the fjords",
}
content = decode_content(response)
assert content == expected
def test_list_response_filtered_excludes():
response = APIClient().get('/quotes/?fields!=character')
expected = [
{
'line': "It's certainly uncontaminated by cheese",
'sketch': 'CHEESE SHOP',
},
{
'line': "It's just a flesh wound",
'sketch': 'HOLY GRAIL',
},
]
content = decode_content(response)
assert content == expected
def test_detail_response_filtered_excludes():
response = APIClient().get('/quotes/parrot/?fields!=character')
expected = {
'line': "Well, he's...he's, ah...probably pining for the fjords",
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_some_bogus_fields():
response = APIClient().get('/quotes/parrot/?fields=sketch,spam,eggs')
expected = {
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_only_bogus_fields():
response = APIClient().get('/quotes/parrot/?fields=blah')
expected = {}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_multiple_fields_in_separate_query_args():
response = APIClient().get('/quotes/parrot/?fields=character&fields=sketch')
expected = {
'character': 'Shopkeeper',
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_response_filtered_with_include_and_exclude():
response = APIClient().get('/quotes/parrot/?fields=character&fields=sketch&fields!=line')
expected = {
'character': 'Shopkeeper',
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_exclude_wins_for_ambiguous_filtering():
response = APIClient().get('/quotes/parrot/?fields=line,sketch&fields!=line')
expected = {
'sketch': 'PET SHOP',
}
content = decode_content(response)
assert content == expected
def test_post_ignores_queryfields():
# Ensures that fields aren't dropped for other types of request
response = APIClient().post('/quotes/?fields=line,sketch')
expected = {
'request_method': 'POST',
'serializer_instance_fields': ['character', 'line', 'sketch'],
'request_query': {'fields': 'line,sketch'},
}
content = decode_content(response)
assert content == expected
def test_instantiate_without_request_context():
# just test that it doesn't crash or b0rk the serializer to omit request context
data = {
'character': 'the character',
'line': 'the line',
'sketch': 'the sketch',
}
serializer = QuoteSerializer(data=data)
assert serializer.is_valid()
assert sorted(serializer.get_fields()) == ['character', 'line', 'sketch']
| StarcoderdataPython |
99924 | def test():
# Here we can either check objects created in the solution code, or the
# string value of the solution, available as __solution__. A helper for
# printing formatted messages is available as __msg__. See the testTemplate
# in the meta.json for details.
# If an assertion fails, the message will be displayed
assert not world_df is None, "Your answer for world_df does not exist. Have you loaded the TopoJSON data to the correct variable name?"
assert "topo_feature" in __solution__, "The loaded data should be in TopoJSON format. In order to read TopoJSON file correctly, you need to use the alt.topo_feature() function."
assert (
"quantitative" in __solution__ or
"pop_density:Q" in __solution__
), "Make sure you use pop_density column from gapminder_df for the color encoding. Hint: since pop_density column does not exist in world_df, Altair can't infer its data type and you need to specify that it is quantitative data."
assert type(world_df) == alt.UrlData, "world_df does not appear to be an Altair UrlData object. Have you assigned the Altair UrlData object for the TopoJSON data to the correct variable?"
assert world_df.url == data.world_110m.url, "Make sure you are loading the data from correct url."
assert (world_df.format != alt.utils.schemapi.Undefined and
world_df.format.type == 'topojson'
), "The loaded data should be in TopoJSON format. In order to read TopoJSON file correctly, you need to use the alt.topo_feature() function."
assert world_df.format.feature == "countries", "Make sure to specify 'countries' feature when loading the TopoJSON file using alt.topo_feature()."
assert not pop_dense_plot is None, "Your answer for pop_dense_plot does not exist. Have you assigned the plot to the correct variable name?"
assert type(pop_dense_plot) == alt.Chart, "pop_dense_plot does not appear to be an Altair Chart object. Have you assigned the Altair Chart object for the plot to the correct variable?"
assert pop_dense_plot.mark == 'geoshape', "Make sure you are using mark_geoshape for pop_dense_plot."
assert pop_dense_plot.encoding.color != alt.utils.schemapi.Undefined and (
pop_dense_plot.encoding.color.shorthand in {'pop_density:quantitative', 'pop_density:Q'} or
(pop_dense_plot.encoding.color.shorthand == 'pop_density' and pop_dense_plot.encoding.color.type == 'quantitative') or
pop_dense_plot.encoding.color.field in {'pop_density:quantitative', 'pop_density:Q'} or
(pop_dense_plot.encoding.color.field == 'pop_density' and pop_dense_plot.encoding.color.type == 'quantitative')
), "Make sure you use pop_density column from gapminder_df for the color encoding. Hint: since pop_density column does not exist in world_df, Altair can't infer its data type and you need to specify that it is quantitative data."
assert pop_dense_plot.encoding.color.scale != alt.utils.schemapi.Undefined and (
pop_dense_plot.encoding.color.scale.scheme != alt.utils.schemapi.Undefined
), "Make sure to specify a colour scheme."
assert pop_dense_plot.encoding.color.scale.domainMid == 81, "Make sure you set the domainMid of the color scale as the global median (81)."
assert type(pop_dense_plot.transform) == list and (
len(pop_dense_plot.transform) == 1 and
pop_dense_plot.transform[0]['from'] != alt.utils.schemapi.Undefined and
pop_dense_plot.transform[0]['from'].fields == ['pop_density'] and
pop_dense_plot.transform[0]['from'].key
), "Make sure you use .transform_lookup() to lookup the column 'pop_density' from the gapminder_df data using 'id' as the connecting column. Hint: 'pop_density' should be inside a list."
assert pop_dense_plot.projection != alt.utils.schemapi.Undefined and (
pop_dense_plot.projection.scale == 80
), "Make sure you use 'equalEarth' projection. Hint: you can use .project() method with type argument to specify projection type."
__msg__.good("You're correct, well done!")
| StarcoderdataPython |
1763811 | from kivy.config import Config
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.popup import Popup
from kivy.uix.label import Label
import matplotlib.pyplot as plt
import pandas as pd
from multiprocessing import Process
class ActiveGridLayout(GridLayout):
gold_model = ''
active_result = []
@staticmethod
def get_path():
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
return filedialog.askopenfilename()
@staticmethod
def visualize_dataframe(filename):
import PySimpleGUI as sg
# Header=None means you directly pass the columns names to the dataframe
df = pd.read_csv(filename, sep=',', engine='python', header=None)
data = df[1:].values.tolist()
header_list = df.iloc[0].tolist()
# given a pandas dataframe
layout = [[sg.Table(values=data, max_col_width=5,
auto_size_columns=True,
vertical_scroll_only=False,
justification='right', alternating_row_color='blue',
key='_table_', headings=header_list)]]
window = sg.Window('Table', layout)
event, values = window.read()
if event is None or event == 'Back':
window.close()
def train_gold_model(self, filename):
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
# dataset
dataset = pd.read_csv(filename)
X = dataset.iloc[:, 0:11].values
y = dataset.iloc[:, 11].values
# Define our GOLD STANDARD MODEL
gold_regressor = XGBRegressor(
objective='reg:squarederror',
n_estimators=500,
learning_rate=0.01,
max_depth=6,
min_child_weight=1,
subsample=0.8
)
# Train Test Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Validation Split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
# Training ...
gold_regressor.fit(X_train, y_train, eval_set=[(X_val, y_val)], eval_metric='mae', early_stopping_rounds=15,verbose=False)
# Predicting the Test set results
y_pred = gold_regressor.predict(X_test)
# save model
self.gold_model = gold_regressor
# Prediction Metrics
from sklearn.metrics import mean_absolute_error, r2_score
mae = mean_absolute_error(y_pred, y_test)
r2 = r2_score(y_pred, y_test)
popup = Popup(title='Gold Standard Model', content=Label(text='Gold Model has been trained \n mean absolute '+
'error = {} \n R2 = {}'.format(mae, r2)),
auto_dismiss=True,
size_hint=(None, None), size=(400, 400))
popup.open()
def acive_learning_main(self, steps_number=10):
import numpy as np
from xgboost import XGBRegressor
gold_regressor = self.gold_model
# Part 1: choose grid for our metabolite conc
# Allowed concentrations
allowed_conc = {
'nad': (0.033, 0.33),
'folinic_acid': (0.0068, 0.068),
'coa_conc': (0.026, 0.26),
'nucleo_mix': (0.15, 1.5),
'spermidine': (0.1, 1.0),
'pga': (3.0, 30.0),
'aa': (0.15, 1.5),
'trna': (0.02, 0.2),
'mg_gluta': (0.4, 4.0),
'camp': (0.075, 0.75),
'K_gluta': (8.0, 80.0)}
# Part 2: make a random input for our model
def random_input(allowed_conc, n=100, rounded=3, verbose=0):
X_train = []
for data_point in range(n):
input_data = []
if (data_point % 10000 == 0) and verbose:
print(data_point)
for key, value in allowed_conc.items():
input_data.append(np.round(np.random.uniform(*value), rounded))
X_train.append(input_data)
X_train = np.array(X_train)
return X_train
# define our model that will be trained by active learning
# same hyperparameter as Gold Standard model
regressor = XGBRegressor(
objective='reg:squarederror',
n_estimators=500,
learning_rate=0.01,
max_depth=6,
min_child_weight=1,
subsample=0.8)
def active_learning(regressor, gold_regressor, allowed_conc, test_size=100, steps=10):
## first step
# make first dataset
X_train_1 = random_input(allowed_conc, test_size)
# first fit
regressor.fit(X_train_1, gold_regressor.predict(X_train_1))
# save results
result = pd.DataFrame(X_train_1)
result['gold_yield'] = gold_regressor.predict(X_train_1)
result['step'] = 'step_1'
## next steps loop
for step in range(steps - 1):
print('step: ', step)
# make i th dataset
X_train_1_1 = random_input(allowed_conc, 100000)
df_1 = pd.DataFrame(X_train_1_1)
df_1['pred_yield'] = regressor.predict(X_train_1_1)
df_1 = df_1.sort_values(['pred_yield'], ascending=False)
X_train_2 = df_1.iloc[0:test_size, 0:11].values
# save and add results
temp_result = pd.DataFrame(X_train_2)
temp_result['gold_yield'] = gold_regressor.predict(X_train_2)
temp_result['step'] = 'step_{}'.format(step + 2)
result = pd.concat([result, temp_result], ignore_index=True)
# update and refit regressor
regressor.fit(result.iloc[:, 0:11].values, result.iloc[:, 11].values)
popup = Popup(title='Active Learning',
content=Label(text='Active Learning Finished'),
auto_dismiss=True,
size_hint=(None, None), size=(400, 400))
popup.open()
return result, regressor
self.active_result, _ = active_learning(regressor, gold_regressor, allowed_conc, steps=steps_number)
@staticmethod
def boxplot(data, group_name, quantity, title='', point_size=10):
import seaborn as sns
plt.figure(figsize=(6, 4))
plt.style.use('seaborn-whitegrid')
plt.style.use('seaborn-poster')
# Usual boxplot
sns.boxplot(x=group_name, y=quantity, data=data)
# Add jitter with the swarmplot function.
sns.swarmplot(x=group_name, y=quantity, data=data, color='k', size=point_size)
plt.title(title)
plt.show()
def show_plot(self):
t = Process(target=self.boxplot, args=(self.active_result, 'step', 'gold_yield', 'Kivy_BoxPlot', 5))
t.start()
class ActiveApp(App):
title = 'Active Learning'
icon = 'icon.png'
def build(self):
return ActiveGridLayout()
if __name__ == '__main__':
ActiveApp().run()
| StarcoderdataPython |
1752496 | from django.db import models
# Create your models here.
class image_upload(models.Model):
name = models.CharField(max_length=50)
img = models.ImageField(upload_to='images/')
| StarcoderdataPython |
9646 | <reponame>michalgagat/plugins_oauth
load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps(omit_commons_codec = True):
JACKSON_VERS = "2.10.2"
maven_jar(
name = "scribejava-core",
artifact = "com.github.scribejava:scribejava-core:6.9.0",
sha1 = "ed761f450d8382f75787e8fee9ae52e7ec768747",
)
maven_jar(
name = "jackson-annotations",
artifact = "com.fasterxml.jackson.core:jackson-annotations:" + JACKSON_VERS,
sha1 = "3a13b6105946541b8d4181a0506355b5fae63260",
)
maven_jar(
name = "jackson-databind",
artifact = "com.fasterxml.jackson.core:jackson-databind:" + JACKSON_VERS,
sha1 = "0528de95f198afafbcfb0c09d2e43b6e0ea663ec",
deps = [
"@jackson-annotations//jar",
],
)
if not omit_commons_codec:
maven_jar(
name = "commons-codec",
artifact = "commons-codec:commons-codec:1.4",
sha1 = "4216af16d38465bbab0f3dff8efa14204f7a399a",
)
| StarcoderdataPython |
9308 | <gh_stars>1-10
import datetime
import os
import sys
import unittest
from unittest import mock
import akismet
class AkismetTests(unittest.TestCase):
api_key = os.getenv("TEST_AKISMET_API_KEY")
blog_url = os.getenv("TEST_AKISMET_BLOG_URL")
api_key_env_var = "PYTHON_AKISMET_API_KEY"
blog_url_env_var = "PYTHON_AKISMET_BLOG_URL"
def setUp(self):
self.api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
class AkismetConfigurationTests(AkismetTests):
"""
Tests configuration of the Akismet class.
"""
def test_config_from_args(self):
"""
Configuring via explicit arguments succeeds.
"""
api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
def test_bad_config_args(self):
"""
Configuring with bad arguments fails.
"""
with self.assertRaises(akismet.APIKeyError):
akismet.Akismet(key="invalid", blog_url="http://invalid")
def test_config_from_env(self):
"""
Configuring via environment variables succeeds.
"""
try:
os.environ[self.api_key_env_var] = self.api_key
os.environ[self.blog_url_env_var] = self.blog_url
api = akismet.Akismet(key=None, blog_url=None)
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
api = akismet.Akismet()
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
finally:
os.environ[self.api_key_env_var] = ""
os.environ[self.blog_url_env_var] = ""
def test_bad_config_env(self):
"""
Configuring with bad environment variables fails.
"""
try:
os.environ[self.api_key_env_var] = "invalid"
os.environ[self.blog_url_env_var] = "http://invalid"
with self.assertRaises(akismet.APIKeyError):
akismet.Akismet()
finally:
os.environ[self.api_key_env_var] = ""
os.environ[self.blog_url_env_var] = ""
def test_bad_url(self):
"""
Configuring with a bad URL fails.
"""
bad_urls = (
"example.com",
"ftp://example.com",
"www.example.com",
"http//example.com",
"https//example.com",
)
for url in bad_urls:
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet(key=self.api_key, blog_url=url)
def test_missing_config(self):
"""
Instantiating without any configuration fails.
"""
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet(key=None, blog_url=None)
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet()
def test_user_agent(self):
"""
The Akismet class creates the correct user-agent string.
"""
api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
expected_agent = "Python/{} | akismet.py/{}".format(
"{}.{}".format(*sys.version_info[:2]), akismet.__version__
)
self.assertEqual(expected_agent, api.user_agent_header["User-Agent"])
class AkismetAPITests(AkismetTests):
"""
Tests implementation of the Akismet API.
"""
base_kwargs = {
"user_ip": "127.0.0.1",
"user_agent": "Mozilla",
# Always send this when testing; Akismet recognizes it as a
# test query and does not train/learn from it.
"is_test": 1,
}
def test_verify_key_valid(self):
"""
The verify_key operation succeeds with a valid key and URL.
"""
self.assertTrue(akismet.Akismet.verify_key(self.api_key, self.blog_url))
def test_verify_key_invalid(self):
"""
The verify_key operation fails with an invalid key and URL.
"""
self.assertFalse(akismet.Akismet.verify_key("invalid", "http://invalid"))
def test_comment_check_spam(self):
"""
The comment_check method correctly identifies spam.
"""
check_kwargs = {
# Akismet guarantees this will be classified spam.
"comment_author": "<PASSWORD>",
**self.base_kwargs,
}
self.assertTrue(self.api.comment_check(**check_kwargs))
def test_comment_check_not_spam(self):
"""
The comment_check method correctly identifies non-spam.
"""
check_kwargs = {
# Akismet guarantees this will not be classified spam.
"user_role": "administrator",
**self.base_kwargs,
}
self.assertFalse(self.api.comment_check(**check_kwargs))
def test_submit_spam(self):
"""
The submit_spam method succeeds.
"""
spam_kwargs = {
"comment_type": "comment",
"comment_author": "<PASSWORD>",
"comment_content": "<PASSWORD>",
**self.base_kwargs,
}
self.assertTrue(self.api.submit_spam(**spam_kwargs))
def test_submit_ham(self):
"""
The submit_ham method succeeds.
"""
ham_kwargs = {
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_content": "This is a legitimate comment.",
"user_role": "administrator",
**self.base_kwargs,
}
self.assertTrue(self.api.submit_ham(**ham_kwargs))
def test_unexpected_verify_key_response(self):
"""
Unexpected verify_key API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
akismet.Akismet.verify_key(self.api_key, self.blog_url)
def test_unexpected_comment_check_response(self):
"""
Unexpected comment_check API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
check_kwargs = {"comment_author": "<PASSWORD>", **self.base_kwargs}
self.api.comment_check(**check_kwargs)
def test_unexpected_submit_spam_response(self):
"""
Unexpected submit_spam API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
spam_kwargs = {
"comment_type": "comment",
"comment_author": "<PASSWORD>",
"comment_content": "viagra-test-123",
**self.base_kwargs,
}
self.api.submit_spam(**spam_kwargs)
def test_unexpected_submit_ham_response(self):
"""
Unexpected submit_ham API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
ham_kwargs = {
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_content": "This is a legitimate comment.",
"user_role": "administrator",
**self.base_kwargs,
}
self.api.submit_ham(**ham_kwargs)
class AkismetRequestTests(AkismetTests):
"""
Tests the requests constructed by the Akismet class.
"""
def _get_mock(self, text):
"""
Create a mock for requests.post() returning expected text.
"""
post_mock = mock.MagicMock()
post_mock.return_value.text = text
return post_mock
def _mock_request(self, method, endpoint, text, method_kwargs):
"""
Issue a mocked request and verify requests.post() was called
with the correct arguments.
"""
method_kwargs.update(user_ip="127.0.0.1", user_agent="Mozilla", is_test=1)
expected_kwargs = {"blog": self.blog_url, **method_kwargs}
post_mock = self._get_mock(text)
with mock.patch("requests.post", post_mock):
getattr(self.api, method)(**method_kwargs)
post_mock.assert_called_with(
endpoint.format(self.api_key),
data=expected_kwargs,
headers=akismet.Akismet.user_agent_header,
)
def test_verify_key(self):
"""
The request issued by verify_key() is correct.
"""
post_mock = self._get_mock("valid")
with mock.patch("requests.post", post_mock):
akismet.Akismet.verify_key(self.api_key, self.blog_url)
post_mock.assert_called_with(
akismet.Akismet.VERIFY_KEY_URL,
data={"key": self.api_key, "blog": self.blog_url},
headers=akismet.Akismet.user_agent_header,
)
def test_comment_check(self):
"""
The request issued by comment_check() is correct.
"""
self._mock_request(
"comment_check",
akismet.Akismet.COMMENT_CHECK_URL,
"true",
{"comment_author": "<PASSWORD>"},
)
def test_submit_spam(self):
"""
The request issued by submit_spam() is correct.
"""
self._mock_request(
"submit_spam",
akismet.Akismet.SUBMIT_SPAM_URL,
akismet.Akismet.SUBMIT_SUCCESS_RESPONSE,
{"comment_content": "Bad comment", "comment_author": "<PASSWORD>ra-test-<PASSWORD>"},
)
def test_submit_ham(self):
"""
The request issued by submit_ham() is correct.
"""
self._mock_request(
"submit_ham",
akismet.Akismet.SUBMIT_HAM_URL,
akismet.Akismet.SUBMIT_SUCCESS_RESPONSE,
{
"comment_content": "Good comment",
"comment_author": "Legitimate commenter",
},
)
def test_full_kwargs(self):
"""
All optional Akismet arguments are correctly passed through.
"""
modified_timestamp = datetime.datetime.now()
posted_timestamp = modified_timestamp - datetime.timedelta(seconds=30)
full_kwargs = {
"referrer": "http://www.example.com/",
"permalink": "http://www.example.com/#comment123",
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_author_email": "<EMAIL>",
"comment_author_url": "http://www.example.com/",
"comment_content": "This is a fine comment.",
"comment_date_gmt": posted_timestamp.isoformat(),
"comment_post_modified_gmt": modified_timestamp.isoformat(),
"blog_lang": "en_us",
"blog_charset": "utf-8",
"user_role": "administrator",
"recheck_reason": "edit",
}
self._mock_request(
"comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", full_kwargs
)
def test_unknown_kwargs(self):
"""
Unknown Akismet arguments are correctly rejected.
"""
bad_kwargs = {"bad_arg": "bad_val"}
with self.assertRaises(akismet.UnknownArgumentError):
self._mock_request(
"comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", bad_kwargs
)
| StarcoderdataPython |
1711149 | from useless.stack import Stack
from useless.globals import * | StarcoderdataPython |
3271159 | from django.db import models
class Watch(models.Model):
GENDER_CHOICES = (
('male', 'Male'),
('female', 'Female'),
('teenagers', 'Teenagers'),
('children', 'Children'),
('unisex', 'Unisex'),
)
brand = models.CharField(max_length=20, blank=False, null=False)
model = models.CharField(max_length=20, blank=False, null=False)
price = models.PositiveIntegerField(default=0, blank=False, null=False)
gender = models.CharField(max_length=20, choices=GENDER_CHOICES, default=GENDER_CHOICES[0])
image = models.ImageField(upload_to='images/watches_pics/')
def __str__(self):
return f'{self.brand} Model: {self.model}'
| StarcoderdataPython |
116034 | # -*- coding: utf-8 -*-
#@Author: <NAME>
#@Date: 2019-11-18 20:53:24
#@Last Modified by: <NAME>
#@Last Modified time: 2019-11-18 21:44:1
import numpy as np
import torch
import torch.nn.functional as F
import os
def compute_pairwise_distance(x):
''' computation of pairwise distance matrix
---- Input
- x: input tensor (sample_number,2)
---- Return
- matrix: output matrix torch.Tensor [sample_number,sample_number]
'''
y=x
xx=torch.sum(torch.pow(x,2),dim=1)
yy=torch.sum(torch.pow(y,2),dim=1)
xy=torch.matmul(x,y.transpose(1,0))
xx=xx.unsqueeze(0).expand_as(xy)
yy=yy.unsqueeze(0).expand_as(xy)
dist=xx.transpose(1,0)+yy-2*xy
#be attention i do not use the norm
return torch.clamp(dist,min=1e-6)
def middle_p(i,j,size):
#current is just the simplest version
#u can try to add more middle steps then
pi=np.array([i//size,i%size])
pj=np.array([j//size,j%size])
if pi[1]>pj[1]:
pj+=pi
pi=pj-pi
pj=pj-pi
if pi[0]>pj[0]:
return pi[0]*size+pj[1]
else:
return pj[0]*size+pi[1]
def compute_norm_pairwise_distance(x):
''' computation of normalized pairwise distance matrix
---- Input
- x: input tensor torch.Tensor (sample_number,2)
---- Return
- matrix: output matrix torch.Tensor [sample_num, sample_num]
'''
x_pair_dist = compute_pairwise_distance(x)
connection=torch.zeros_like(x_pair_dist)
size=np.sqrt(x.shape[0])
# for i in range(x.shape[0]):
# for j in range(x.shape[0]):
# if i//size==j//size or i%size==j%size:
# connection=1
# dist_straight=x_pair_dist*connection
surface_dist=torch.zeros_like(x_pair_dist)
for i in range(x.shape[0]):
for j in range(x.shape[0]):
middle=torch.tensor(middle_p(i,j,size)).to(x.device).long()
surface_dist[i,j]=x_pair_dist[i,middle]+x_pair_dist[middle,j]
normalizer = torch.sum(surface_dist, dim = -1,keepdim=True)
x_norm_pair_dist = surface_dist / (normalizer + 1e-12).detach()
return x_norm_pair_dist
def NDiv_loss_surface(x, y, alpha=1,mode=2):
''' NDiv loss function.
---- Input
- x: (sample_number,2)
#x is the 2d grid, the shortest path the min 2d
- y: (sample_number,3)
#y is the 3d points, the corresponidng to 2d is set by index
- loss: normalized diversity loss.
'''
x=x.view(-1,2)
y=y.view(-1,3)
size=2/np.sqrt(x.shape[0])
S = x.shape[0]
x_norm_pair_dist = compute_norm_pairwise_distance(x)
y_norm_pair_dist = compute_norm_pairwise_distance(y)
if mode==0:
ndiv_loss_matrix = torch.abs(x_norm_pair_dist - y_norm_pair_dist)
if mode==1:
ndiv_loss_matrix = F.relu(y_norm_pair_dist-x_norm_pair_dist * alpha )
if mode==2:
ndiv_loss_matrix = F.relu(x_norm_pair_dist * alpha - y_norm_pair_dist)
if mode==3:
ndiv_loss_matrix =torch.clamp(torch.abs(x_norm_pair_dist - y_norm_pair_dist),min=0.1*size)
if mode==4:
ndiv_loss_matrix = F.relu(x_norm_pair_dist * alpha - y_norm_pair_dist)
ndiv_loss = ndiv_loss_matrix.sum(-1).sum(-1) / (S * (S - 1))
return ndiv_loss
if __name__ == '__main__':
x=torch.rand(100,2)
y=torch.rand(100,3)
loss=NDiv_loss_surface(x,y)
| StarcoderdataPython |
3220574 | <reponame>tehmaze/piece
#!/usr/bin/env python2
import os
import sys
def convert(filename, stream=sys.stdout):
fontname = os.path.splitext(os.path.basename(filename))[0]
fontname = fontname.replace('-', '_')
glyphs = []
comments = []
h = 16
w = 8
with open(filename) as handle:
for line in handle:
line = line.rstrip()
if line.startswith('# Height:'):
h = int(line.split(': ')[1])
elif line.startswith('# Width:'):
w = int(line.split(': ')[1])
elif line.startswith('#'):
comments.append(line[1:].strip())
else:
glyphs.extend(line.split(':')[1].decode('hex'))
l = len(glyphs)
if comments:
for comment in comments:
stream.write('// {0}\n'.format(comment))
stream.write('\n')
stream.write('uint8_t piece_{0}_font_glyphs[{t}] = {{\n'.format(
fontname,
l=l,
h=h,
t=l * h,
))
as_hex = lambda c: '0x%02x' % (ord(c),)
last = (len(glyphs) // 12) * 12
for i in range(0, len(glyphs), 12):
stream.write(' {0}'.format(
', '.join(map(as_hex, glyphs[i:i + 12])),
))
if i != last:
stream.write(',')
stream.write('\n')
stream.write('};\n\n')
stream.write('static piece_font piece_{0}_font = {{\n'.format(fontname))
stream.write(' "{0}",\n'.format(fontname))
stream.write(' {w},\n {h},\n {l},\n'.format(w=w, h=h, l=l))
stream.write(' piece_{0}_font_glyphs,\n'.format(fontname))
stream.write(' 0,\n NULL\n')
stream.write('};\n\n')
return fontname
def convert_to(sources, target):
with open(target, 'w') as handle:
handle.write('/* This file is generated, do not modify */\n\n')
handle.write('/* Splint directives */\n/*@+charint@*/\n\n')
handle.write('#include <stdint.h>\n')
handle.write('#include <stdlib.h>\n\n')
handle.write('#include "piece/font.h"\n')
handle.write('#include "piece/util.h"\n\n')
fontnames = []
for source in sources:
fontnames.append(convert(str(source), handle))
fontnames.sort()
handle.write('void piece_font_init(void) {\n')
handle.write(' piece_fonts = piece_allocate(sizeof(piece_list));\n');
handle.write(' piece_list_new(piece_fonts, piece_font_free_item);\n')
for fontname in fontnames:
handle.write(' piece_list_append(piece_fonts, &piece_{0}_font);\n'
.format(fontname))
handle.write(' piece_font_init_alias();\n')
handle.write('}\n\n')
def run():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('font', nargs=1)
options = parser.parse_args()
convert(options.font[0])
if __name__ == '__main__':
sys.exit(run())
| StarcoderdataPython |
87631 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from functools import reduce
import operator
from seedot.compiler.antlr.seedotParser import seedotParser as SeeDotParser
import seedot.compiler.ast.ast as AST
from seedot.compiler.ast.astVisitor import ASTVisitor
class Type:
pass
class Int(Type):
pass
class Tensor(Type):
def __init__(self, shape: list):
self.shape = shape
self.dim = len(shape)
def size(self):
return reduce(operator.mul, self.shape, 1)
# Tensor without any dimension (float) or a tensor with all dimensions
# equal to 1
def isShapeOne(self):
return self.dim == 0 or self.size() == 1
def isInt(type: Type):
return isinstance(type, Int)
def isTensor(type: Type):
return isinstance(type, Tensor)
def isEqual(type1: Type, type2: Type):
if isInt(type1) and isInt(type2):
return True
elif isTensor(type1) and isTensor(type2):
if type1.dim != type2.dim:
return False
return type1.shape == type2.shape
else:
assert False
class InferType(ASTVisitor):
def visitInt(self, node: AST.Int):
node.type = Int()
return node.type
# Float is represented as a tensor with 0 dimension
def visitFloat(self, node: AST.Float):
node.type = Tensor([])
return node.type
def visitId(self, node: AST.ID):
node.type = node.gamma[node.name]
return node.type
def visitDecl(self, node: AST.Decl):
node.type = Tensor(node.shape)
return node.type
# Matrix transpose
def visitTransp(self, node: AST.Transp):
node.expr.gamma = dict(node.gamma)
exprType = self.visit(node.expr)
assert isTensor(exprType) and exprType.dim == 2
[m, n] = exprType.shape
node.type = Tensor([n, m])
return node.type
# Reshape the tensor with custom dimensions
def visitReshape(self, node: AST.Reshape):
node.expr.gamma = dict(node.gamma)
exprType = self.visit(node.expr)
assert isTensor(exprType) and exprType.dim >= 1
# Reshape is valid if the total number of elements remain same after
# reshape
assert reduce(operator.mul, exprType.shape, 1) == reduce(
operator.mul, node.shape, 1)
node.type = Tensor(node.shape)
return node.type
# Reduces the shape of a tensor by choosing the maximum from a filter
def visitMaxpool(self, node: AST.Maxpool):
node.expr.gamma = dict(node.gamma)
exprType = self.visit(node.expr)
[n1, n2, n3, n4] = exprType.shape
# Implementation only performs maxpool over a 4D input
assert isTensor(exprType) and exprType.dim == 4
# Implementation needs node.dim to exactly divide matrix dimensions
assert n2 % node.dim == 0 and n3 % node.dim == 0
shape = [n1, n2 // node.dim, n3 // node.dim, n4]
node.type = Tensor(shape)
return node.type
# Indexing a tensor
def visitIndex(self, node: AST.Index):
node.expr.gamma = dict(node.gamma)
exprType = self.visit(node.expr)
assert isTensor(exprType) and exprType.dim >= 1
node.index.gamma = dict(node.gamma)
indexType = self.visit(node.index)
assert isInt(indexType)
shape = exprType.shape[1:]
node.type = Tensor(shape)
return node.type
# Currently assuming that the type of each expr is same
def visitFuncCall(self, node: AST.FuncCall):
type = None
for expr in node.exprList:
expr.gamma = dict(node.gamma)
currType = self.visit(expr)
if type != None:
assert isEqual(type, currType)
else:
type = currType
node.type = type
return node.type
def visitUop(self, node: AST.Uop):
node.expr.gamma = dict(node.gamma)
node.type = self.visit(node.expr)
return node.type
# e BINOP f
def visitBop1(self, node: AST.Bop1):
node.expr1.gamma = dict(node.gamma)
eType = self.visit(node.expr1)
node.expr2.gamma = dict(node.gamma)
fType = self.visit(node.expr2)
if node.op == SeeDotParser.MUL or node.op == SeeDotParser.SPARSEMUL:
return self.visitBopMul(node, eType, fType)
elif node.op == SeeDotParser.ADDCIR or node.op == SeeDotParser.SUBCIR:
return self.visitBopAddOrSubCir(node, eType, fType)
elif node.op == SeeDotParser.MULCIR:
return self.visitBopMulCir(node, eType, fType)
elif node.op == SeeDotParser.CONV:
return self.visitBopConv(node, eType, fType)
else:
assert False
# e * f OR e |*| f
def visitBopMul(self, node: AST.Bop1, eType: Type, fType: Type):
if isInt(eType) and isInt(fType):
node.type = Int()
elif isTensor(eType) and isTensor(fType):
# Tensor() * Tensor(...)
if eType.dim == 0:
node.type = fType
elif fType.dim == 0:
node.type = eType
# Tensor(...) * Tensor(...)
else:
assert eType.dim == 2 and fType.dim == 2
[n1, n2] = eType.shape
[n3, n4] = fType.shape
assert n2 == n3
node.type = Tensor([n1, n4])
else:
assert False
return node.type
# e <+> f OR e <-> f
def visitBopAddOrSubCir(self, node: AST.Bop1, eType: Type, fType: Type):
assert isTensor(eType) and isTensor(fType)
assert eType.dim >= fType.dim
assert fType.dim == 1
assert eType.shape[-1] == fType.shape[-1]
shape = eType.shape
node.type = Tensor(shape)
return node.type
# e <*> f - Point-wise multiplication
def visitBopMulCir(self, node: AST.Bop1, eType: Type, fType: Type):
assert isTensor(eType) and isTensor(fType)
assert eType.dim >= 1
assert eType.shape == fType.shape
node.type = eType
return node.type
# e # f
def visitBopConv(self, node: AST.Bop1, eType: Type, fType: Type):
assert isTensor(eType) and isTensor(fType)
assert eType.dim == 4 and fType.dim == 4
# Implementation does Conv on 4D input on 4D filter
# Input is padded with 0s to ensure that the output dimension of the
# matrix is same as the input
[n, h, w, cin] = eType.shape
[hf, wf, cin_, cout] = fType.shape
assert cin == cin_
shape = [n, h, w, cout]
node.type = Tensor(shape)
return node.type
# e + f OR e - f
def visitBop2(self, node: AST.Bop2):
node.expr1.gamma = dict(node.gamma)
eType = self.visit(node.expr1)
node.expr2.gamma = dict(node.gamma)
fType = self.visit(node.expr2)
if isInt(eType) and isInt(fType):
pass
elif isTensor(eType) and isTensor(fType):
assert eType.shape == fType.shape
else:
assert False
node.type = eType
return node.type
def visitFunc(self, node: AST.Func):
node.expr.gamma = dict(node.gamma)
eType = self.visit(node.expr)
# relu(e)
if node.op == SeeDotParser.RELU:
assert isTensor(eType) and eType.dim >= 1
node.type = eType
# exp(e)
elif node.op == SeeDotParser.EXP:
# Currently supports exp() on a tensor with single element
assert isTensor(eType) and eType.isShapeOne()
node.type = eType
# argmax(e)
elif node.op == SeeDotParser.ARGMAX:
assert isTensor(eType) and eType.dim >= 1
node.type = Int()
# sgn(e)
elif node.op == SeeDotParser.SGN:
assert isTensor(eType) and eType.isShapeOne()
node.type = Int()
# tanh(e)
elif node.op == SeeDotParser.TANH:
assert isTensor(eType) and eType.dim == 2
node.type = eType
else:
assert False
return node.type
# $(x=[1:5]) e
def visitSum(self, node: AST.Sum):
node.expr.gamma = dict(node.gamma)
node.expr.gamma[node.name] = Int()
eType = self.visit(node.expr)
assert isTensor(eType)
node.type = eType
return node.type
# e >= 0? f : g
def visitCond(self, node: AST.Cond):
node.expr.gamma = dict(node.gamma)
eType = self.visit(node.expr)
node.trueBlock.gamma = dict(node.gamma)
fType = self.visit(node.trueBlock)
node.falseBlock.gamma = dict(node.gamma)
gType = self.visit(node.falseBlock)
assert isInt(eType) or (isTensor(eType) and eType.isShapeOne())
assert (isInt(fType) and isInt(gType)) or (isTensor(fType)
and isTensor(gType) and fType.shape == gType.shape)
node.type = fType
return node.type
# Let x = e in f
def visitLet(self, node: AST.Let):
node.decl.gamma = dict(node.gamma)
eType = self.visit(node.decl)
node.expr.gamma = dict(node.gamma)
node.expr.gamma[node.name] = eType
fType = self.visit(node.expr)
node.type = fType
return node.type
| StarcoderdataPython |
3252033 | <filename>bot/cogs/comics.py
import io
import random
import aiohttp
from bs4 import BeautifulSoup
from discord import Color, Embed, File
from discord.ext.commands import Cog, Context, command
from bot import config
from bot.core.bot import Bot
class Comics(Cog):
"""View random comics from popular sources."""
def __init__(self, bot: Bot) -> None:
self.bot = bot
self.session = aiohttp.ClientSession()
@command()
async def ohno(self, ctx: Context) -> None:
"""Sends a random 'Webcomic Name' comic."""
url = "http://webcomicname.com/random"
async with ctx.typing():
async with self.session.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
img_url = soup.find(property="og:image")["content"]
async with self.session.get(img_url) as response:
img = io.BytesIO(await response.read())
embed = Embed(title="Random Webcomic", color=Color.blurple())
embed.set_image(url="attachment://ohno.png")
file = File(img, "ohno.png")
await ctx.send(file=file, embed=embed)
@command()
async def smbc(self, ctx: Context) -> None:
"""Sends a random 'Saturday Morning' comic."""
url = "http://www.smbc-comics.com/comic/archive"
async with ctx.typing():
async with self.session.get(
url, headers={"Connection": "keep-alive"}
) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
all_comics = soup.find("select", attrs={"name": "comic"})
all_comics_url_stubs = [
option["value"] for option in all_comics.findChildren()
]
random_comic = random.choice(all_comics_url_stubs)
comic_url = f"http://www.smbc-comics.com/{random_comic}"
async with self.session.get(
comic_url, headers={"Connection": "keep-alive"}
) as resp:
soup = BeautifulSoup(await resp.text(), "html.parser")
img_url = soup.find(property="og:image")["content"]
async with self.session.get(img_url) as response:
img = io.BytesIO(await response.read())
embed = Embed(title="Random Sunday Morning", color=Color.blurple())
embed.set_image(url="attachment://smbc.png")
file = File(img, "smbc.png")
await ctx.send(file=file, embed=embed)
@command()
async def pbf(self, ctx: Context) -> None:
"""Sends a random 'The Perry Bible' comic."""
url = "http://pbfcomics.com/random"
async with ctx.typing():
async with self.session.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
img_url = soup.find(property="og:image")["content"]
async with self.session.get(img_url) as response:
img = io.BytesIO(await response.read())
embed = Embed(title="Random Perry Bible", color=Color.blurple())
embed.set_image(url="attachment://pbf.png")
file = File(img, "pbf.png")
await ctx.send(file=file, embed=embed)
@command()
async def cah(self, ctx: Context) -> None:
"""Sends a random 'Cyanide and Happiness' comic."""
url = "http://explosm.net/comics/random"
async with ctx.typing():
async with self.session.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
img_url = soup.find(property="og:image")["content"]
async with self.session.get(img_url) as response:
img = io.BytesIO(await response.read())
embed = Embed(
title="Random Cyanide and Happiness", color=Color.blurple()
)
embed.set_image(url="attachment://cah.png")
file = File(img, "cah.png")
await ctx.send(file=file, embed=embed)
@command()
async def xkcd(self, ctx: Context, comic_type: str = "latest") -> None:
"""See the latest/a random 'xkcd' comic."""
comic_type = comic_type.lower()
if comic_type not in ["latest", "random"]:
url = f"https://xkcd.com/{comic_type}/info.0.json"
else:
url = "https://xkcd.com/info.0.json"
if comic_type == "random":
async with aiohttp.ClientSession() as session:
async with session.get("https://xkcd.com/info.0.json") as r:
data = await r.json()
random_comic = random.randint(1, data["num"])
url = f"https://xkcd.com/{random_comic}/info.0.json"
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if r.status == 200:
data = await r.json()
day, month, year = data["day"], data["month"], data["year"]
comic_num = data["num"]
embed = Embed(
title=data["title"],
description=data["alt"],
color=Color.blurple(),
)
embed.set_image(url=data["img"])
embed.set_footer(
text=f"Comic date : [{day}/{month}/{year}] | Comic Number - {comic_num}"
)
await ctx.send(embed=embed)
else:
url = "https://xkcd.com/info.0.json"
async with aiohttp.ClientSession() as csession:
async with csession.get(url) as req:
data = await req.json()
latest_comic_num = data["num"]
help_embed = Embed(
title="XKCD HELP",
description=f"""
**{config.COMMAND_PREFIX}xkcd latest** - (Get the latest comic)
**{config.COMMAND_PREFIX}xkcd <num>** - (Enter a comic number | range 1 to {latest_comic_num})
**{config.COMMAND_PREFIX}xkcd random** - (Get a random comic)
""",
)
await ctx.send(embed=help_embed)
@command()
async def mrls(self, ctx: Context) -> None:
"""Sends a random 'Mr. Lovenstein' comic."""
url = "http://www.mrlovenstein.com/shuffle"
async with ctx.typing():
async with self.session.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
img_url = f"http://www.mrlovenstein.com{soup.find(id='comic_main_image')['src']}"
async with self.session.get(img_url) as response:
img = io.BytesIO(await response.read())
embed = Embed(title="Random Mr. Lovenstein", color=Color.blurple())
embed.set_image(url="attachment://mrls.png")
file = File(img, "mrls.png")
await ctx.send(file=file, embed=embed)
@command()
async def chainsaw(self, ctx: Context) -> None:
"""Sends a random 'Chainsawsuit' comic."""
url = "http://chainsawsuit.com/comic/random/?random&nocache=1"
async with ctx.typing():
async with self.session.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
img_url = soup.find(property="og:image")["content"]
async with self.session.get(img_url) as response:
img = io.BytesIO(await response.read())
embed = Embed(title="Random Chainsawsuit", color=Color.blurple())
embed.set_image(url="attachment://chainsawsuit.png")
file = File(img, "chainsawsuit.png")
await ctx.send(file=file, embed=embed)
@command()
async def sarah(self, ctx: Context) -> None:
"""Sends a random 'Sarah's Scribbles' comic."""
url = "http://www.gocomics.com/random/sarahs-scribbles"
async with ctx.typing():
async with self.session.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
img_url = soup.find(property="og:image")["content"]
async with self.session.get(img_url) as response:
img = io.BytesIO(await response.read())
embed = Embed(
title="Random Sarah Scribbles", color=Color.blurple()
)
embed.set_image(url="attachment://sarahscribbles.png")
file = File(img, "sarahscribbles.png")
await ctx.send(file=file, embed=embed)
def setup(bot: Bot) -> None:
bot.add_cog(Comics(bot))
| StarcoderdataPython |
3235158 | from rest_framework.serializers import ModelSerializer
from base.models import GrassMachine
class GrassMachineSerializer(ModelSerializer):
class Meta:
model = GrassMachine
fields = ['id', 'name', 'serie_number', 'battery_percentage', 'model', 'power', 'voltage', 'motor_type', 'cut_type', 'rotation_number', 'qr_code_identifier', 'owner'] | StarcoderdataPython |
1685616 | puzzle_input_list = []
with open("input.txt", "r") as puzzle_input:
for line in puzzle_input:
for digit in line:
puzzle_input_list.append(int(digit))
width = 25
height = 6
digits_per_layer = width*height
layers = [[puzzle_input_list[0]]]
counter = 1
for digit in puzzle_input_list[1:]:
if counter % digits_per_layer == 0:
layers.append([digit])
else:
layers[-1].append(digit)
counter += 1
final_result = []
for index in range(width*height):
for layer in layers:
if layer[index] in (0,1):
final_result.append(layer[index])
break
assert len(final_result) == width*height
final_final_result = [[]]
counter = 0
for digit in final_result:
if counter == width:
final_final_result.append([digit])
counter = 1
else:
final_final_result[-1].append(digit)
counter += 1
print(final_final_result)
for h in range(0,height):
for w in range(0,width):
if final_final_result[h][w] == 1:
print('*',end='')
else:
print(' ',end='')
print()
"""
** ** * ***** **
* * * * ** * *
* * * * *** * *
* ** * * * ****
* * * * * * * *
*** ** * **** * *
""" | StarcoderdataPython |
1667385 | <filename>bootstrap_admin/__init__.py
__version__ = '0.3.7.1'
| StarcoderdataPython |
1643882 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
"""
This file implements a DC-OPF for time series
That means that solves the OPF problem for a complete time series at once
"""
from GridCal.Engine.basic_structures import ZonalGrouping
from GridCal.Engine.Simulations.OPF.opf_templates import OpfTimeSeries, LpProblem, LpVariable, Logger
from GridCal.Engine.basic_structures import MIPSolvers
from GridCal.Engine.Core.time_series_opf_data import OpfTimeCircuit
from GridCal.Engine.Devices.enumerations import TransformerControlType, ConverterControlType, HvdcControlType, GenerationNtcFormulation
from GridCal.ThirdParty.pulp import *
def get_objective_function(Pg, Pb, LSlack, FSlack1, FSlack2, FCSlack1, FCSlack2,
hvdc_overload1, hvdc_overload2, hvdc_control1_slacks, hvdc_control2_slacks,
cost_g, cost_b, cost_l, cost_br):
"""
Add the objective function to the problem
:param Pg: generator LpVars (ng, nt)
:param Pb: batteries LpVars (nb, nt)
:param LSlack: Load slack LpVars (nl, nt)
:param FSlack1: Branch overload slack1 (m, nt)
:param FSlack2: Branch overload slack2 (m, nt)
:param FCSlack1: Branch contingency overload slack1 (m, nt)
:param FCSlack2: Branch contingency overload slack2 (m, nt)
:param cost_g: Cost of the generators (ng, nt)
:param cost_b: Cost of the batteries (nb, nt)
:param cost_l: Cost of the loss of load (nl, nt)
:param cost_br: Cost of the overload (m, nt)
:return: Nothing, just assign the objective function
"""
f_obj = lpSum(cost_g * Pg)
f_obj += lpSum(cost_b * Pb)
f_obj += lpSum(cost_l * LSlack)
f_obj += lpSum(cost_br * (FSlack1 + FSlack2))
f_obj += cost_br * lpSum(FCSlack1) + cost_br * lpSum(FCSlack2)
f_obj += cost_br * lpSum(hvdc_overload1) + cost_br * lpSum(hvdc_overload2)
f_obj += cost_br * lpSum(hvdc_control1_slacks) + cost_br * lpSum(hvdc_control2_slacks)
return f_obj
def set_fix_generation(problem, Pg, P_profile, enabled_for_dispatch):
"""
Set the generation fixed at the non dispatchable generators
:param problem: LP problem instance
:param Pg: Array of generation variables
:param P_profile: Array of fixed generation values
:param enabled_for_dispatch: array of "enables" for dispatching generators
:return: Nothing
"""
idx = np.where(enabled_for_dispatch == False)[0]
lpAddRestrictions2(problem=problem,
lhs=Pg[idx, :],
rhs=P_profile[idx, :],
# Fmax + FSlack2
name='fixed_generation',
op='=')
def get_power_injections(C_bus_gen, Pg, C_bus_bat, Pb, C_bus_load, LSlack, Pl):
"""
Create the power injections per bus
:param C_bus_gen: Bus-Generators sparse connectivity matrix (n, ng)
:param Pg: generator LpVars (ng, nt)
:param C_bus_bat: Bus-Batteries sparse connectivity matrix (n, nb)
:param Pb: Batteries LpVars (nb, nt)
:param C_bus_load: Bus-Load sparse connectivity matrix (n, nl)
:param LSlack: Load slack LpVars (nl, nt)
:param Pl: Load values (nl, nt)
:return: Power injection at the buses (n, nt)
"""
P = lpDot(C_bus_gen, Pg)
P += lpDot(C_bus_bat, Pb)
P -= lpDot(C_bus_load, Pl - LSlack)
return P
def add_dc_nodal_power_balance(numerical_circuit: OpfTimeCircuit, problem: LpProblem, theta, P, start_, end_):
"""
Add the nodal power balance
:param numerical_circuit: NumericalCircuit instance
:param problem: LpProblem instance
:param theta: Voltage angles LpVars (n, nt)
:param P: Power injection at the buses LpVars (n, nt)
:return: Nothing, the restrictions are added to the problem
"""
# nodal_restrictions = lpAddRestrictions2(problem=problem,
# lhs=lpDot(numerical_circuit.Bbus, theta),
# rhs=P[:, :],
# name='Nodal_power_balance_all',
# op='=')
# do the topological computation
calc_inputs = numerical_circuit.split_into_islands(ignore_single_node_islands=True)
# generate the time indices to simulate
if end_ == -1:
end_ = len(numerical_circuit.time_array)
# For every island, run the time series
nodal_restrictions = np.empty((numerical_circuit.nbus, end_ - start_), dtype=object)
for i, calc_inpt in enumerate(calc_inputs):
# find the original indices
bus_original_idx = np.array(calc_inpt.original_bus_idx)
# re-pack the variables for the island and time interval
P_island = P[bus_original_idx, :] # the sizes already reflect the correct time span
theta_island = theta[bus_original_idx, :] # the sizes already reflect the correct time span
B_island = calc_inpt.Ybus.imag
pqpv = calc_inpt.pqpv
vd = calc_inpt.vd
# Add nodal power balance for the non slack nodes
idx = bus_original_idx[pqpv]
nodal_restrictions[idx] = lpAddRestrictions2(problem=problem,
lhs=lpDot(B_island[np.ix_(pqpv, pqpv)], theta_island[pqpv, :]),
rhs=P_island[pqpv, :],
name='Nodal_power_balance_pqpv_is' + str(i),
op='=')
# Add nodal power balance for the slack nodes
idx = bus_original_idx[vd]
nodal_restrictions[idx] = lpAddRestrictions2(problem=problem,
lhs=lpDot(B_island[vd, :], theta_island),
rhs=P_island[vd, :],
name='Nodal_power_balance_vd_is' + str(i),
op='=')
# slack angles equal to zero
lpAddRestrictions2(problem=problem,
lhs=theta_island[vd, :],
rhs=np.zeros_like(theta_island[vd, :]),
name='Theta_vd_zero_is' + str(i),
op='=')
return nodal_restrictions
def add_branch_loading_restriction(problem: LpProblem,
nc: OpfTimeCircuit,
theta, F, T,
ratings, ratings_slack_from, ratings_slack_to,
monitored, active):
"""
Add the branch loading restrictions
:param problem: LpProblem instance
:param theta_f: voltage angles at the "from" side of the branches (m, nt)
:param theta_t: voltage angles at the "to" side of the branches (m, nt)
:param Bseries: Array of branch susceptances (m)
:param ratings: Array of branch ratings (m, nt)
:param ratings_slack_from: Array of branch loading slack variables in the from-to sense
:param ratings_slack_to: Array of branch loading slack variables in the to-from sense
:return: Nothing
"""
nbr, nt = ratings_slack_to.shape
# from-to branch power restriction
Pbr_f = np.zeros((nbr, nt), dtype=object)
tau = np.zeros((nbr, nt), dtype=object)
for m, t in product(range(nbr), range(nt)):
if active[m, t]:
# compute the branch susceptance
if nc.branch_data.branch_dc[m]:
bk = -1.0 / nc.branch_data.R[m]
else:
bk = -1.0 / nc.branch_data.X[m]
# compute the flow
if nc.branch_data.control_mode[m] == TransformerControlType.Pt:
# is a phase shifter device (like phase shifter transformer or VSC with P control)
tau[m, t] = LpVariable('Tau_{0}_{1}'.format(m, t), nc.branch_data.theta_min[m], nc.branch_data.theta_max[m])
Pbr_f[m, t] = bk * (theta[F[m], t] - theta[T[m], t] + tau[m, t])
else:
# regular branch
tau[m, t] = 0.0
Pbr_f[m, t] = bk * (theta[F[m], t] - theta[T[m], t])
if monitored[m]:
problem.add(Pbr_f[m, t] <= ratings[m, t] + ratings_slack_from[m, t], 'upper_rate_{0}_{1}'.format(m, t))
problem.add(-ratings[m, t] - ratings_slack_to[m, t] <= Pbr_f[m, t], 'lower_rate_{0}_{1}'.format(m, t))
else:
Pbr_f[m, t] = 0
return Pbr_f, tau
def formulate_contingency(problem: LpProblem, numerical_circuit: OpfTimeCircuit, flow_f, ratings, LODF, monitor,
lodf_tolerance):
"""
:param problem:
:param numerical_circuit:
:param flow_f:
:param LODF:
:param monitor:
:return:
"""
nbr, nt = ratings.shape
# get the indices of the branches marked for contingency
con_br_idx = numerical_circuit.branch_data.get_contingency_enabled_indices()
# formulate contingency flows
# this is done in a separated loop because all te flow variables must exist beforehand
flow_lst = list()
indices = list() # (t, m, contingency_m)
overload1_lst = list()
overload2_lst = list()
for t, m in product(range(nt), range(nbr)): # for every branch
if monitor[m]: # the monitor variable is pre-computed in the previous loop
_f = numerical_circuit.branch_data.F[m]
_t = numerical_circuit.branch_data.T[m]
for ic, c in enumerate(con_br_idx): # for every contingency
if m != c and abs(LODF[m, c]) >= lodf_tolerance:
# compute the N-1 flow
contingency_flow = flow_f[m, t] + LODF[m, c] * flow_f[c, t]
# rating restriction in the sense from-to
overload1 = LpVariable("n-1_overload1_{0}_{1}_{2}".format(t, m, c), 0, 99999)
problem.add(contingency_flow <= (ratings[m, t] + overload1),
"n-1_ft_up_rating_{0}_{1}_{2}".format(t, m, c))
# rating restriction in the sense to-from
overload2 = LpVariable("n-1_overload2_{0}_{1}_{2}".format(t, m, c), 0, 99999)
problem.add((-ratings[m, t] - overload2) <= contingency_flow,
"n-1_tf_down_rating_{0}_{1}_{2}".format(t, m, c))
# store the variables
flow_lst.append(contingency_flow)
overload1_lst.append(overload1)
overload2_lst.append(overload2)
indices.append((t, m, c))
return flow_lst, overload1_lst, overload2_lst, indices
def add_battery_discharge_restriction(problem: LpProblem, SoC0, Capacity, Efficiency, Pb, E, dt):
"""
Add the batteries capacity restrictions
:param problem: LpProblem instance
:param SoC0: State of Charge at 0 (nb)
:param SoCmax: Maximum State of Charge (nb)
:param SoCmin: Minimum State of Charge (nb)
:param Capacity: Capacities of the batteries (nb) in MWh/MW base
:param Efficiency: Roundtrip efficiency
:param Pb: Batteries injection power LpVars (nb, nt)
:param E: Batteries Energy state LpVars (nb, nt)
:param dt: time increments in hours (nt-1)
:return: Nothing, the restrictions are added to the problem
"""
# set the initial state of charge
lpAddRestrictions2(problem=problem,
lhs=E[:, 0],
rhs=SoC0 * Capacity,
name='initial_soc',
op='=')
# compute the inverse of he efficiency because pulp does not divide by floats
eff_inv = 1 / Efficiency
# set the Energy values for t=1:nt
for i in range(len(dt) - 1):
t = i + 1
# set the energy value Et = E(t-1) + dt * Pb / eff
lpAddRestrictions2(problem=problem,
lhs=E[:, t],
rhs=E[:, t - 1] - dt[i] * Pb[:, t] * eff_inv,
name='initial_soc_t' + str(t) + '_',
op='=')
def formulate_hvdc_flow(problem: LpProblem, angles, Pinj, rates, active, Pt, control_mode, dispatchable, r, F, T,
logger: Logger = Logger(), inf=999999):
"""
:param problem:
:param nc:
:param angles:
:param Pinj:
:param t:
:param logger:
:param inf:
:return:
"""
nhvdc, nt = rates.shape
flow_f = np.zeros((nhvdc, nt), dtype=object)
overload1 = np.zeros((nhvdc, nt), dtype=object)
overload2 = np.zeros((nhvdc, nt), dtype=object)
hvdc_control1 = np.zeros((nhvdc, nt), dtype=object)
hvdc_control2 = np.zeros((nhvdc, nt), dtype=object)
for t, i in product(range(nt), range(nhvdc)):
if active[i, t]:
_f = F[i]
_t = T[i]
hvdc_control1[i, t] = LpVariable('hvdc_control1_{0}_{1}'.format(i, t), 0, inf)
hvdc_control2[i, t] = LpVariable('hvdc_control2_{0}_{1}'.format(i, t), 0, inf)
P0 = Pt[i, t]
if control_mode[i] == HvdcControlType.type_0_free:
if rates[i, t] <= 0:
logger.add_error('Rate = 0', 'HVDC:{0} t:{1}'.format(i, t), rates[i, t])
# formulate the hvdc flow as an AC line equivalent
bk = 1.0 / r[i] # TODO: yes, I know... DC...
flow_f[i, t] = P0 + bk * (angles[_f, t] - angles[_t, t]) + hvdc_control1[i, t] - hvdc_control2[i, t]
# add the injections matching the flow
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
# rating restriction in the sense from-to: eq.17
overload1[i, t] = LpVariable('overload_hvdc1_{0}_{1}'.format(i, t), 0, inf)
problem.add(flow_f[i, t] <= (rates[i, t] + overload1[i, t]), "hvdc_ft_rating_{0}_{1}".format(i, t))
# rating restriction in the sense to-from: eq.18
overload2[i, t] = LpVariable('overload_hvdc2_{0}_{1}'.format(i, t), 0, inf)
problem.add((-rates[i, t] - overload2[i, t]) <= flow_f[i, t], "hvdc_tf_rating_{0}_{1}".format(i, t))
elif control_mode[i] == HvdcControlType.type_1_Pset and not dispatchable[i]:
# simple injections model: The power is set by the user
flow_f[i, t] = P0 + hvdc_control1[i, t] - hvdc_control2[i, t]
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
elif control_mode[i] == HvdcControlType.type_1_Pset and dispatchable[i]:
# simple injections model, the power is a variable and it is optimized
P0 = LpVariable('hvdc_pf_{0}_{1}'.format(i, t), -rates[i, t], rates[i, t])
flow_f[i, t] = P0 + hvdc_control1[i, t] - hvdc_control2[i, t]
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
return flow_f, overload1, overload2, hvdc_control1, hvdc_control2
class OpfDcTimeSeries(OpfTimeSeries):
def __init__(self, numerical_circuit: OpfTimeCircuit, start_idx, end_idx, solver: MIPSolvers = MIPSolvers.CBC,
batteries_energy_0=None, zonal_grouping: ZonalGrouping = ZonalGrouping.NoGrouping,
skip_generation_limits=False, consider_contingencies=False, LODF=None, lodf_tolerance=0.001):
"""
DC time series linear optimal power flow
:param numerical_circuit: NumericalCircuit instance
:param start_idx: start index of the time series
:param end_idx: end index of the time series
:param solver: MIP solver_type to use
:param batteries_energy_0: initial state of the batteries, if None the default values are taken
:param zonal_grouping:
:param skip_generation_limits:
"""
OpfTimeSeries.__init__(self, numerical_circuit=numerical_circuit, start_idx=start_idx, end_idx=end_idx,
solver=solver, skip_formulation=True)
self.zonal_grouping = zonal_grouping
self.skip_generation_limits = skip_generation_limits
self.consider_contingencies = consider_contingencies
self.LODF = LODF
self.lodf_tolerance = lodf_tolerance
# build the formulation
self.problem = self.formulate(batteries_energy_0=batteries_energy_0)
def formulate(self, batteries_energy_0=None):
"""
Formulate the DC OPF time series in the non-sequential fashion (all to the solver_type at once)
:param batteries_energy_0: initial energy state of the batteries (if none, the default is taken)
:return: PuLP Problem instance
"""
# general indices
n = self.numerical_circuit.nbus
m = self.numerical_circuit.nbr
ng = self.numerical_circuit.ngen
nb = self.numerical_circuit.nbatt
nl = self.numerical_circuit.nload
nt = self.end_idx - self.start_idx
a = self.start_idx
b = self.end_idx
Sbase = self.numerical_circuit.Sbase
# battery
Capacity = self.numerical_circuit.battery_enom / Sbase
minSoC = self.numerical_circuit.battery_min_soc
maxSoC = self.numerical_circuit.battery_max_soc
if batteries_energy_0 is None:
SoC0 = self.numerical_circuit.battery_soc_0
else:
SoC0 = (batteries_energy_0 / Sbase) / Capacity
Pb_max = self.numerical_circuit.battery_pmax / Sbase
Pb_min = self.numerical_circuit.battery_pmin / Sbase
Efficiency = (self.numerical_circuit.battery_discharge_efficiency + self.numerical_circuit.battery_charge_efficiency) / 2.0
cost_b = self.numerical_circuit.battery_cost[:, a:b]
# generator
if self.skip_generation_limits:
Pg_max = np.zeros(self.numerical_circuit.ngen) * 99999999.0
Pg_min = np.zeros(self.numerical_circuit.ngen) * -99999999.0
else:
Pg_max = self.numerical_circuit.generator_pmax / Sbase
Pg_min = self.numerical_circuit.generator_pmin / Sbase
P_profile = self.numerical_circuit.generator_p[:, a:b] / Sbase
cost_g = self.numerical_circuit.generator_cost[:, a:b]
enabled_for_dispatch = self.numerical_circuit.generator_dispatchable
# load
Pl = (self.numerical_circuit.load_active[:, a:b] * self.numerical_circuit.load_s.real[:, a:b]) / Sbase
cost_l = self.numerical_circuit.load_cost[:, a:b]
# branch
branch_ratings = self.numerical_circuit.branch_rates[:, a:b] / Sbase
br_active = self.numerical_circuit.branch_data.branch_active[:, a:b]
F = self.numerical_circuit.F
T = self.numerical_circuit.T
cost_br = self.numerical_circuit.branch_cost[:, a:b]
# Compute time delta in hours
dt = np.zeros(nt) # here nt = end_idx - start_idx
for t in range(1, nt + 1):
if a + t < nt:
dt[t - 1] = (self.numerical_circuit.time_array[a + t] - self.numerical_circuit.time_array[a + t - 1]).seconds / 3600
else:
dt[t - 1] = 1.0
# create LP variables
Pg = lpMakeVars(name='Pg', shape=(ng, nt), lower=Pg_min, upper=Pg_max)
Pb = lpMakeVars(name='Pb', shape=(nb, nt), lower=Pb_min, upper=Pb_max)
E = lpMakeVars(name='E', shape=(nb, nt), lower=Capacity * minSoC, upper=Capacity * maxSoC)
load_slack = lpMakeVars(name='LSlack', shape=(nl, nt), lower=0, upper=None)
theta = lpMakeVars(name='theta', shape=(n, nt),
lower=self.numerical_circuit.bus_data.angle_min,
upper=self.numerical_circuit.bus_data.angle_max)
branch_rating_slack1 = lpMakeVars(name='FSlack1', shape=(m, nt), lower=0, upper=None)
branch_rating_slack2 = lpMakeVars(name='FSlack2', shape=(m, nt), lower=0, upper=None)
# declare problem
problem = LpProblem(name='DC_OPF_Time_Series')
# set the fixed generation values
set_fix_generation(problem=problem,
Pg=Pg,
P_profile=P_profile,
enabled_for_dispatch=enabled_for_dispatch)
# compute the power injections
P = get_power_injections(C_bus_gen=self.numerical_circuit.generator_data.C_bus_gen,
Pg=Pg,
C_bus_bat=self.numerical_circuit.battery_data.C_bus_batt,
Pb=Pb,
C_bus_load=self.numerical_circuit.load_data.C_bus_load,
LSlack=load_slack,
Pl=Pl)
# formulate the simple HVDC models
hvdc_flow_f, hvdc_overload1, hvdc_overload2, \
hvdc_control1_slacks, hvdc_control2_slacks = formulate_hvdc_flow(problem=problem,
angles=theta,
Pinj=P,
rates=self.numerical_circuit.hvdc_data.rate[:, a:b] / Sbase,
active=self.numerical_circuit.hvdc_data.active[:, a:b],
Pt=self.numerical_circuit.hvdc_data.Pt[:, a:b],
control_mode=self.numerical_circuit.hvdc_data.control_mode,
dispatchable=self.numerical_circuit.hvdc_data.dispatchable,
r=self.numerical_circuit.hvdc_data.r,
F=self.numerical_circuit.hvdc_data.get_bus_indices_f(),
T=self.numerical_circuit.hvdc_data.get_bus_indices_t(),
logger=self.logger,
inf=999999)
# set the nodal restrictions
nodal_restrictions = add_dc_nodal_power_balance(numerical_circuit=self.numerical_circuit,
problem=problem,
theta=theta,
P=P,
start_=self.start_idx,
end_=self.end_idx)
# add branch restrictions
if self.zonal_grouping == ZonalGrouping.NoGrouping:
load_f, tau = add_branch_loading_restriction(problem=problem,
nc=self.numerical_circuit,
theta=theta,
F=F,
T=T,
ratings=branch_ratings,
ratings_slack_from=branch_rating_slack1,
ratings_slack_to=branch_rating_slack2,
monitored=self.numerical_circuit.branch_data.monitor_loading,
active=br_active)
elif self.zonal_grouping == ZonalGrouping.All:
load_f = np.zeros((self.numerical_circuit.nbr, nt))
tau = np.ones((self.numerical_circuit.nbr, nt))
else:
raise ValueError()
# if there are batteries, add the batteries
if nb > 0:
add_battery_discharge_restriction(problem=problem,
SoC0=SoC0,
Capacity=Capacity,
Efficiency=Efficiency,
Pb=Pb, E=E, dt=dt)
if self.consider_contingencies:
con_flow_lst, con_overload1_lst, con_overload2_lst, \
con_br_idx = formulate_contingency(problem=problem,
numerical_circuit=self.numerical_circuit,
flow_f=load_f,
ratings=branch_ratings,
LODF=self.LODF,
monitor=self.numerical_circuit.branch_data.monitor_loading,
lodf_tolerance=self.lodf_tolerance)
else:
con_flow_lst = list()
con_br_idx = list()
con_overload1_lst = list()
con_overload2_lst = list()
# add the objective function
problem += get_objective_function(Pg=Pg,
Pb=Pb,
LSlack=load_slack,
FSlack1=branch_rating_slack1,
FSlack2=branch_rating_slack2,
FCSlack1=con_overload1_lst,
FCSlack2=con_overload2_lst,
hvdc_overload1=hvdc_overload1,
hvdc_overload2=hvdc_overload2,
hvdc_control1_slacks=hvdc_control1_slacks,
hvdc_control2_slacks=hvdc_control2_slacks,
cost_g=cost_g,
cost_b=cost_b,
cost_l=cost_l,
cost_br=cost_br)
# Assign variables to keep
# transpose them to be in the format of GridCal: time, device
self.theta = theta.transpose()
self.Pg = Pg.transpose()
self.Pb = Pb.transpose()
self.Pl = Pl.transpose()
self.Pinj = P.transpose()
self.phase_shift = tau.transpose()
self.hvdc_flow = hvdc_flow_f.transpose()
self.hvdc_slacks = (hvdc_overload1 + hvdc_overload2).transpose()
self.E = E.transpose()
self.load_shedding = load_slack.transpose()
self.s_from = load_f.transpose()
self.s_to = -load_f.transpose()
self.overloads = (branch_rating_slack1 + branch_rating_slack2).transpose()
self.rating = branch_ratings.T
self.nodal_restrictions = nodal_restrictions
self.contingency_flows_list = con_flow_lst
self.contingency_indices_list = con_br_idx # [(t, m, c), ...]
self.contingency_flows_slacks_list = con_overload1_lst
return problem
if __name__ == '__main__':
from GridCal.Engine import *
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/Lynn 5 Bus pv.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/grid_2_islands.xlsx'
main_circuit = FileOpen(fname).open()
# main_circuit.buses[3].controlled_generators[0].enabled_dispatch = False
# get the power flow options from the GUI
solver = SolverType.DC_OPF
mip_solver = MIPSolvers.CBC
grouping = TimeGrouping.Daily
pf_options = PowerFlowOptions()
options = OptimalPowerFlowOptions(solver=solver,
time_grouping=grouping,
mip_solver=mip_solver,
power_flow_options=pf_options)
start = 0
end = len(main_circuit.time_profile)
# create the OPF time series instance
# if non_sequential:
optimal_power_flow_time_series = OptimalPowerFlowTimeSeries(grid=main_circuit,
options=options,
start_=start,
end_=end)
optimal_power_flow_time_series.run()
v = optimal_power_flow_time_series.results.voltage
print('Angles\n', np.angle(v))
l = optimal_power_flow_time_series.results.loading
print('Branch loading\n', l)
g = optimal_power_flow_time_series.results.generator_power
print('Gen power\n', g)
pr = optimal_power_flow_time_series.results.shadow_prices
print('Nodal prices \n', pr)
import pandas as pd
pd.DataFrame(optimal_power_flow_time_series.results.loading).to_excel('opf_loading.xlsx') | StarcoderdataPython |
3387023 | <gh_stars>0
#p3 size 791mm x 384mm
#sculpteo 940mm x 590
import svgwrite
#Adobe Illustrator
#72 ppi,
mm = 72 / 25.4
#manufacturer = "sculpteo"
manufacturer = "ponoko_shelf_2_riser"
node_radius_ratio = 11
node_length = 7
opening_length = 1898.65
max_shelf = 316.65
if manufacturer == "sculpteo":
join_width = 6.1
color_width = 41
color_height = 96.475
panel_width = 940
panel_height = 590
shelf_width = 939
depth = 160
riser_height = 290
elif manufacturer == "ponoko":
color_width = 41
color_height = 96.475
panel_width = 791
panel_height = 384
shelf_width = 790
depth = 160
riser_depth=160
riser_height = 290
join_width = 5.9
join_length = depth/2
elif manufacturer == "ponoko_shelf_2_riser":
color_width = 41
color_height = 96.475
panel_width = 791
panel_height = 384
shelf_width = 790
depth = 160
riser_height = 290
riser_depth = (384-depth)/2
join_width = 5.96
join_length = depth/2
stroke_width="0.01"
#Ponoko P3 Template
dwg = svgwrite.Drawing('shelf_po_v4_4.svg',size=('2267.72','1116.85'),viewBox="0 0 2267.72 1116.85",profile="tiny")
current_group = dwg.add(dwg.g(id="Design_Template"))
#organge 5mm border
current_group.add(dwg.rect((0,0),(2267.72,1116.85),fill=svgwrite.rgb(246,146,30)))
#white rect inside the orange border
current_group.add(dwg.rect((5*mm,5*mm),(2239.37,1088.5),fill=svgwrite.rgb(255,255,255)))
#transform, #because the template uses an offset we need to offset all drawings by that amount:
transform_x_offset = 5*mm
transform_y_offset = 5*mm
#current_group = dwg.add(dwg.g(id="design", stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width,fill='none')
current_group = dwg.add(dwg.g(id="ADD_your_design_here", stroke=svgwrite.rgb(0, 0, 255),stroke_width=stroke_width,fill='none'))
def addArc(p0, p1, ratio):
""" Adds an arc that bulges to the right as it moves from p0 to p1 """
args = {'x0':p0[0]*mm+transform_x_offset,
'y0':p0[1]*mm+transform_y_offset,
'xradius':mm*1/ratio,
'yradius':mm*1,
'ellipseRotation':0, #has no effect for circles
'x1':(p1[0]-p0[0])*mm,
'y1':(p1[1]-p0[1])*mm}
current_group.add(dwg.path(d="M %(x0)f,%(y0)f a %(xradius)f,%(yradius)f %(ellipseRotation)f 0,0 %(x1)f,%(y1)f"%args,
fill="none",
stroke=svgwrite.rgb(0, 0, 255), stroke_width=stroke_width
))
def line(start,end):
rx1 = round(start[0]*mm+transform_x_offset,3)
ry1 = round(start[1]*mm+transform_y_offset,3)
rx2 = round(end[0]*mm+transform_x_offset,3)
ry2 = round(end[1]*mm+transform_y_offset,3)
current_group.add(dwg.line((rx1, ry1), (rx2, ry2)))
def slot(x,y,top=True):
if top:
line((x-join_width/2, y), (x+join_width/2, y))
#break in 3 section
y1 = round(join_length / 3, 2) + y
y2 = round(2 * join_length / 3, 2) + y
y3 = join_length + y
#down the right
current_x = x+join_width/2
line((current_x, y), (current_x, y1 - node_length/2))
addArc((current_x, y1 - node_length/2),(current_x, y1 + node_length/2),node_radius_ratio)
line( (current_x, y1 + node_length/2),(current_x, y2 - node_length/2))
addArc((current_x, y2 - node_length/2),(current_x, y2 + node_length/2),node_radius_ratio)
line( (current_x, y2 + node_length/2), (current_x, y3))
#bottom
line( (x+join_width/2, y3),(x-join_width/2, y3))
#up the left
current_x = x-join_width/2
line((current_x, y3),(current_x, y2 + node_length/2))
addArc((current_x, y2 + node_length/2),(current_x, y2 - node_length/2),node_radius_ratio)
line((current_x, y2 - node_length/2), (current_x, y1 + node_length/2))
addArc((current_x, y1 + node_length/2),(current_x, y1 - node_length/2),node_radius_ratio)
line( (current_x, y1 - node_length/2),(current_x, y))
def shelf(x,y,top=True,bottom=True,left=True,right=True):
if top:
line((x, y), (x+shelf_width, y))
if left:
line((x, y), (x, y+depth))
curr_pos=0
for incr in range(0,int(shelf_width/(color_width+join_width))):
curr_pos+=color_width+join_width
slot(curr_pos,y,top=False)
if right:
line((x+shelf_width, y), (x+shelf_width, y+depth))
if bottom:
line((x+shelf_width, y+depth), (x, y+depth))
def riser(x,y,top=True,bottom=True,left=True,right=True):
if top:
line((x, y), (x+riser_height, y))
if left:
line((x, y), (x, y+riser_depth))
if bottom:
line((x, y+riser_depth), (x+riser_height, y+riser_depth))
if right:
line((x+riser_height, y), (x+riser_height, y+riser_depth))
slot(x+128.63,y,top=False)
if manufacturer == "sculpteo":
shelf(0,0,top=False,left=False,right=False)
shelf(0,depth,top=False,left=False,right=False)
riser(0*riser_height,depth*2,top=False,bottom=False,left=False)
riser(1*riser_height,depth*2,top=False,bottom=False,left=False)
riser(2*riser_height,depth*2,top=False,bottom=False,left=False)
elif manufacturer == "ponoko":
shelf(0,0,top=False,left=False,right=False)
riser(0*riser_height,depth*1,top=False,bottom=False,left=False)
riser(1*riser_height,depth*1,top=False,bottom=False,left=False)
riser(2*riser_height,depth*1,top=False,bottom=False,left=False,right=False)
line((0,depth*2),(shelf_width,2*depth))
line((0,0),(shelf_width,0))
line((shelf_width,0),(shelf_width,2*depth))
line((0,0),(0,2*depth))
elif manufacturer == "ponoko_shelf_2_riser":
shelf(0,0,top=False,left=False,right=False)
riser(0*riser_height,depth*1,top=False,bottom=False,left=False)
riser(1*riser_height,depth*1,top=False,bottom=False,left=False)
riser(2*riser_height,depth*1,top=False,bottom=False,left=False,right=False)
line((0,depth+riser_depth),(shelf_width,depth+riser_depth))
riser(0*riser_height,depth+riser_depth,top=False,bottom=False,left=False)
riser(1*riser_height,depth+riser_depth,top=False,bottom=False,left=False)
riser(2*riser_height,depth+riser_depth,top=False,bottom=False,left=False,right=False)
line((0,depth+2*riser_depth),(shelf_width,depth+2*riser_depth))
line((0,0),(shelf_width,0))
line((shelf_width,0),(shelf_width,depth+2*riser_depth))
line((0,0),(0,depth+2*riser_depth))
#dwg.add(dwg.line((0, 0), (10, 0), stroke=svgwrite.rgb(0, 0, 255),stroke_width="0.01"))
#dwg.add(dwg.text('Test', insert=(0, 0.2), fill='red'))
dwg.save()
| StarcoderdataPython |
3282468 | <reponame>lyw07/kolibri
import os
import time
from django.apps.registry import AppRegistryNotReady
from django.core.management import call_command
from django.http.response import Http404
from django.utils.translation import gettext_lazy as _
from iceqube.classes import State
from iceqube.exceptions import JobNotFound
from iceqube.exceptions import UserCancelledError
from rest_framework import serializers
from rest_framework import viewsets
from rest_framework.decorators import list_route
from rest_framework.response import Response
from six import string_types
from .queue import get_queue
from kolibri.core.content.permissions import CanManageContent
from kolibri.core.content.utils.channels import get_mounted_drive_by_id
from kolibri.core.content.utils.channels import get_mounted_drives_with_channel_info
from kolibri.core.content.utils.paths import get_content_database_file_path
from kolibri.utils import conf
try:
from django.apps import apps
apps.check_apps_ready()
except AppRegistryNotReady:
import django
django.setup()
NETWORK_ERROR_STRING = _("There was a network error.")
DISK_IO_ERROR_STRING = _("There was a disk access error.")
CATCHALL_SERVER_ERROR_STRING = _("There was an unknown error.")
class TasksViewSet(viewsets.ViewSet):
permission_classes = (CanManageContent,)
def list(self, request):
jobs_response = [_job_to_response(j) for j in get_queue().jobs]
return Response(jobs_response)
def create(self, request):
# unimplemented. Call out to the task-specific APIs for now.
pass
def retrieve(self, request, pk=None):
try:
task = _job_to_response(get_queue().fetch_job(pk))
return Response(task)
except JobNotFound:
raise Http404("Task with {pk} not found".format(pk=pk))
def destroy(self, request, pk=None):
# unimplemented for now.
pass
@list_route(methods=["post"])
def startremotechannelimport(self, request):
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
baseurl = request.data.get(
"baseurl", conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"]
)
job_metadata = {"type": "REMOTECHANNELIMPORT", "started_by": request.user.pk}
job_id = get_queue().enqueue(
call_command,
"importchannel",
"network",
channel_id,
baseurl=baseurl,
extra_metadata=job_metadata,
cancellable=True,
)
resp = _job_to_response(get_queue().fetch_job(job_id))
return Response(resp)
@list_route(methods=["post"])
def startremotecontentimport(self, request):
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
# optional arguments
baseurl = request.data.get(
"baseurl", conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"]
)
node_ids = request.data.get("node_ids", None)
exclude_node_ids = request.data.get("exclude_node_ids", None)
if node_ids and not isinstance(node_ids, list):
raise serializers.ValidationError("node_ids must be a list.")
if exclude_node_ids and not isinstance(exclude_node_ids, list):
raise serializers.ValidationError("exclude_node_ids must be a list.")
job_metadata = {"type": "REMOTECONTENTIMPORT", "started_by": request.user.pk}
job_id = get_queue().enqueue(
call_command,
"importcontent",
"network",
channel_id,
baseurl=baseurl,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
extra_metadata=job_metadata,
track_progress=True,
cancellable=True,
)
resp = _job_to_response(get_queue().fetch_job(job_id))
return Response(resp)
@list_route(methods=["post"])
def startdiskchannelimport(self, request):
# Load the required parameters
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
try:
drive_id = request.data["drive_id"]
except KeyError:
raise serializers.ValidationError("The drive_id field is required.")
try:
drive = get_mounted_drive_by_id(drive_id)
except KeyError:
raise serializers.ValidationError(
"That drive_id was not found in the list of drives."
)
job_metadata = {"type": "DISKCHANNELIMPORT", "started_by": request.user.pk}
job_id = get_queue().enqueue(
call_command,
"importchannel",
"disk",
channel_id,
drive.datafolder,
extra_metadata=job_metadata,
cancellable=True,
)
resp = _job_to_response(get_queue().fetch_job(job_id))
return Response(resp)
@list_route(methods=["post"])
def startdiskcontentimport(self, request):
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
try:
drive_id = request.data["drive_id"]
except KeyError:
raise serializers.ValidationError("The drive_id field is required.")
try:
drive = get_mounted_drive_by_id(drive_id)
except KeyError:
raise serializers.ValidationError(
"That drive_id was not found in the list of drives."
)
# optional arguments
node_ids = request.data.get("node_ids", None)
exclude_node_ids = request.data.get("exclude_node_ids", None)
if node_ids and not isinstance(node_ids, list):
raise serializers.ValidationError("node_ids must be a list.")
if exclude_node_ids and not isinstance(exclude_node_ids, list):
raise serializers.ValidationError("exclude_node_ids must be a list.")
job_metadata = {"type": "DISKCONTENTIMPORT", "started_by": request.user.pk}
job_id = get_queue().enqueue(
call_command,
"importcontent",
"disk",
channel_id,
drive.datafolder,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
extra_metadata=job_metadata,
track_progress=True,
cancellable=True,
)
resp = _job_to_response(get_queue().fetch_job(job_id))
return Response(resp)
@list_route(methods=["post"])
def startdeletechannel(self, request):
"""
Delete a channel and all its associated content from the server
"""
if "channel_id" not in request.data:
raise serializers.ValidationError("The 'channel_id' field is required.")
channel_id = request.data["channel_id"]
job_metadata = {"type": "DELETECHANNEL", "started_by": request.user.pk}
task_id = get_queue().enqueue(
call_command,
"deletechannel",
channel_id,
track_progress=True,
extra_metadata=job_metadata,
)
# attempt to get the created Task, otherwise return pending status
resp = _job_to_response(get_queue().fetch_job(task_id))
return Response(resp)
@list_route(methods=["post"])
def startdiskexport(self, request):
"""
Export a channel to a local drive, and copy content to the drive.
"""
# Load the required parameters
try:
channel_id = request.data["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
try:
drive_id = request.data["drive_id"]
except KeyError:
raise serializers.ValidationError("The drive_id field is required.")
# optional arguments
node_ids = request.data.get("node_ids", None)
exclude_node_ids = request.data.get("exclude_node_ids", None)
if node_ids and not isinstance(node_ids, list):
raise serializers.ValidationError("node_ids must be a list.")
if exclude_node_ids and not isinstance(exclude_node_ids, list):
raise serializers.ValidationError("exclude_node_ids must be a list.")
job_metadata = {"type": "DISKEXPORT", "started_by": request.user.pk}
task_id = get_queue().enqueue(
_localexport,
channel_id,
drive_id,
track_progress=True,
cancellable=True,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
extra_metadata=job_metadata,
)
# attempt to get the created Task, otherwise return pending status
resp = _job_to_response(get_queue().fetch_job(task_id))
return Response(resp)
@list_route(methods=["post"])
def canceltask(self, request):
"""
Cancel a task with its task id given in the task_id parameter.
"""
if "task_id" not in request.data:
raise serializers.ValidationError("The 'task_id' field is required.")
if not isinstance(request.data["task_id"], string_types):
raise serializers.ValidationError("The 'task_id' should be a string.")
try:
get_queue().cancel(request.data["task_id"])
waiting_time = 0
job = get_queue().fetch_job(request.data["task_id"])
interval = 0.1
while job.state != State.CANCELED or waiting_time < 5.0:
time.sleep(interval)
waiting_time += interval
job = get_queue().fetch_job(request.data["task_id"])
if job.state != State.CANCELED:
return Response(status=408)
get_queue().clear_job(request.data["task_id"])
except JobNotFound:
pass
return Response({})
@list_route(methods=["post"])
def cleartasks(self, request):
"""
Cancels all running tasks.
"""
get_queue().empty()
return Response({})
@list_route(methods=["post"])
def deletefinishedtasks(self, request):
"""
Delete all tasks that have succeeded, failed, or been cancelled.
"""
get_queue().clear()
return Response({})
@list_route(methods=["get"])
def localdrive(self, request):
drives = get_mounted_drives_with_channel_info()
# make sure everything is a dict, before converting to JSON
assert isinstance(drives, dict)
out = [mountdata._asdict() for mountdata in drives.values()]
return Response(out)
@list_route(methods=["post"])
def startexportlogcsv(self, request):
"""
Dumps in csv format the required logs.
By default it will be dump contentsummarylog.
:param: logtype: Kind of log to dump, summary or session
:returns: An object with the job information
"""
csv_export_filenames = {
"session": "content_session_logs.csv",
"summary": "content_summary_logs.csv",
}
log_type = request.data.get("logtype", "summary")
if log_type in csv_export_filenames.keys():
logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
filepath = os.path.join(logs_dir, csv_export_filenames[log_type])
else:
raise Http404(
"Impossible to create a csv export file for {}".format(log_type)
)
if not os.path.isdir(logs_dir):
os.mkdir(logs_dir)
job_type = (
"EXPORTSUMMARYLOGCSV" if log_type == "summary" else "EXPORTSESSIONLOGCSV"
)
job_metadata = {"type": job_type, "started_by": request.user.pk}
job_id = get_queue().enqueue(
call_command,
"exportlogs",
log_type=log_type,
output_file=filepath,
overwrite="true",
extra_metadata=job_metadata,
track_progress=True,
)
resp = _job_to_response(get_queue().fetch_job(job_id))
return Response(resp)
def _localexport(
channel_id,
drive_id,
update_progress=None,
check_for_cancel=None,
node_ids=None,
exclude_node_ids=None,
extra_metadata=None,
):
drive = get_mounted_drive_by_id(drive_id)
call_command(
"exportchannel",
channel_id,
drive.datafolder,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
try:
call_command(
"exportcontent",
channel_id,
drive.datafolder,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
except UserCancelledError:
try:
os.remove(
get_content_database_file_path(channel_id, datafolder=drive.datafolder)
)
except OSError:
pass
raise
def _job_to_response(job):
if not job:
return {
"type": None,
"started_by": None,
"status": State.SCHEDULED,
"percentage": 0,
"progress": [],
"id": None,
"cancellable": False,
}
else:
return {
"type": getattr(job, "extra_metadata", {}).get("type"),
"started_by": getattr(job, "extra_metadata", {}).get("started_by"),
"status": job.state,
"exception": str(job.exception),
"traceback": str(job.traceback),
"percentage": job.percentage_progress,
"id": job.job_id,
"cancellable": job.cancellable,
}
| StarcoderdataPython |
1670241 | <gh_stars>10-100
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import reflection
class EventBackend(object):
@classmethod
def _check_entity(cls, e):
e_type = reflection.get_class_name(e, fully_qualified=False)
return e_type.upper()
@classmethod
def _get_action_name(cls, action):
"""Get action name by inference.
:param action: An action object.
:returns: A string containing the inferred action name.
"""
name = action.action.split('_', 1)
if len(name) == 1:
return name[0].lower()
name = name[1].lower()
if name == "operation":
name = action.inputs.get("operation", name)
return name
@classmethod
def dump(cls, level, action, **kwargs):
"""A method for sub-class to override.
:param level: An integer as defined by python logging module.
:param action: The action that triggered this dump.
:param dict kwargs: Additional parameters such as ``phase``,
``timestamp`` or ``extra``.
:returns: None
"""
raise NotImplementedError
| StarcoderdataPython |
1797659 | from .trainer_pushpull import *
from .trainer_lasso import *
| StarcoderdataPython |
127038 | <reponame>willingc/escapement<filename>escapement/cli.py
"""Console script for escapement."""
import sys
import click
from .escapement import escapement
@click.command()
def main(args=None):
"""Console script for escapement."""
click.echo("Escapement: Understand your projects")
escapement()
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| StarcoderdataPython |
1690752 | # Copyright (c) 2019 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
from m5.params import *
from m5.SimObject import SimObject
from m5.util.fdthelper import *
class Display(SimObject):
type = 'Display'
cxx_header = "dev/arm/display.hh"
clock_frequency = Param.Unsigned("clock-frequency property")
hactive = Param.Unsigned("hactive property")
vactive = Param.Unsigned("vactive property")
hfront_porch = Param.Unsigned("hfront-porch property")
hback_porch = Param.Unsigned("hback-porch property")
hsync_len = Param.Unsigned("hsync-len property")
vfront_porch = Param.Unsigned("vfront-porch property")
vback_porch = Param.Unsigned("vback-porch property")
vsync_len = Param.Unsigned("vsync-len property")
_endpoint_node = None
def endpointPhandle(self):
return "encoder_endpoint"
def endpointNode(self):
assert self._endpoint_node is not None
return self._endpoint_node
def generateDeviceTree(self, state):
# timing node
timing_node = FdtNode(self.timingNode())
timing_node.append(FdtPropertyWords(
"clock-frequency", [self.clock_frequency]))
timing_node.append(FdtPropertyWords(
"hactive", [self.hactive]))
timing_node.append(FdtPropertyWords(
"vactive", [self.vactive]))
timing_node.append(FdtPropertyWords(
"hfront-porch", [self.hfront_porch]))
timing_node.append(FdtPropertyWords(
"hback-porch", [self.hback_porch]))
timing_node.append(FdtPropertyWords(
"hsync-len", [self.hsync_len]))
timing_node.append(FdtPropertyWords(
"vfront-porch", [self.vfront_porch]))
timing_node.append(FdtPropertyWords(
"vback-porch", [self.vback_porch]))
timing_node.append(FdtPropertyWords(
"vsync-len", [self.vsync_len]))
timing_node.appendPhandle(self.timingNode())
# display timing node
dispt_node = FdtNode("display-timings")
dispt_node.append(FdtPropertyWords("native-mode",
state.phandle(self.timingNode())))
dispt_node.append(timing_node)
# endpoint node
endpoint_node = FdtNode("endpoint")
endpoint_node.appendPhandle(
self.endpointPhandle())
# Assign node so that it can be retrieved
self._endpoint_node = endpoint_node
# port node
port_node = FdtNode("port")
port_node.append(endpoint_node)
# Virt-encoder
node = FdtNode("virt-encoder")
node.appendCompatible(["drm,virtual-encoder"])
node.append(dispt_node)
node.append(port_node)
yield node
class Display1080p(Display):
clock_frequency = 148500000
hactive = 1920
vactive = 1080
hfront_porch = 148
hback_porch = 88
hsync_len = 44
vfront_porch = 36
vback_porch = 4
vsync_len = 5
def timingNode(self):
return "timing_1080p60"
| StarcoderdataPython |
198037 | <reponame>Bhanditz/spyder<filename>spyder/widgets/tests/test_findinfiles.py
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for findinfiles.py
"""
# Test library imports
import os
import pytest
import os.path as osp
from pytestqt import qtbot
# Local imports
import spyder.widgets.findinfiles
from spyder.widgets.findinfiles import FindInFilesWidget
LOCATION = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
def process_search_results(results):
"""
Transform result representation from the output of the widget to the
test framework comparison representation.
"""
matches = {}
for result in results.values():
file, line, col = result
filename = osp.basename(file)
if filename not in matches:
matches[filename] = []
matches[filename].append((line, col))
matches[filename] = sorted(matches[filename])
return matches
@pytest.fixture
def setup_findinfiles(qtbot, *args, **kwargs):
"""Set up find in files widget."""
widget = FindInFilesWidget(None, *args, **kwargs)
qtbot.addWidget(widget)
return widget
def expected_results():
results = {'spam.txt': [(1, 0), (1, 5), (3, 22)],
'spam.py': [(2, 7), (5, 1), (7, 12)],
'spam.cpp': [(2, 9), (6, 15), (8, 2), (11, 4),
(11, 10), (13, 12)]
}
return results
def expected_case_unsensitive_results():
results = {'spam.txt': [(1, 10)],
'ham.txt': [(1, 0), (1, 10), (3, 0), (4, 0),
(5, 4), (9, 0), (10, 0)]}
return results
def test_findinfiles(qtbot):
"""Run find in files widget."""
find_in_files = setup_findinfiles(qtbot)
find_in_files.resize(640, 480)
find_in_files.show()
assert find_in_files
def test_find_in_files_search(qtbot):
"""
Test the find in files utility by searching a string located on a set of
known files.
The results of the test should be equal to the expected search result
values.
"""
find_in_files = setup_findinfiles(qtbot)
find_in_files.set_search_text("spam")
find_in_files.find_options.set_directory(osp.join(LOCATION, "data"))
find_in_files.find()
blocker = qtbot.waitSignal(find_in_files.sig_finished)
blocker.wait()
matches = process_search_results(find_in_files.result_browser.data)
assert expected_results() == matches
def test_exclude_extension(qtbot):
find_in_files = setup_findinfiles(qtbot, exclude="\.py$")
find_in_files.set_search_text("spam")
find_in_files.find_options.set_directory(osp.join(LOCATION, "data"))
find_in_files.find()
blocker = qtbot.waitSignal(find_in_files.sig_finished)
blocker.wait()
matches = process_search_results(find_in_files.result_browser.data)
files_filtered = True
for file in matches:
filename, ext = osp.splitext(file)
if ext == '.py':
files_filtered = False
break
assert files_filtered
def test_case_unsensitive_search(qtbot):
find_in_files = setup_findinfiles(qtbot, case_sensitive=False)
find_in_files.set_search_text('ham')
find_in_files.find_options.set_directory(osp.join(LOCATION, "data"))
find_in_files.find()
blocker = qtbot.waitSignal(find_in_files.sig_finished)
blocker.wait()
matches = process_search_results(find_in_files.result_browser.data)
print(matches)
assert expected_case_unsensitive_results() == matches
def test_case_sensitive_search(qtbot):
find_in_files = setup_findinfiles(qtbot)
find_in_files.set_search_text('HaM')
find_in_files.find_options.set_directory(osp.join(LOCATION, "data"))
find_in_files.find()
blocker = qtbot.waitSignal(find_in_files.sig_finished)
blocker.wait()
matches = process_search_results(find_in_files.result_browser.data)
print(matches)
assert matches == {'ham.txt': [(9, 0)]}
if __name__ == "__main__":
pytest.main()
| StarcoderdataPython |
157579 | <filename>spark/spark_streaming/src/main.py
from com.example.app.streaming_app import StreamingApp
from com.example.handler.spark import Spark
Spark.start_streaming(StreamingApp().handler)
#StreamingApp().handler
| StarcoderdataPython |
147924 | <gh_stars>1-10
# MIT License
#
# Copyright (c) 2021 Emc2356
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
managers for some of the classes
"""
from typing import Tuple, List, Iterable
import pygame
from PygameHaze.types import *
from PygameHaze.constants import *
from PygameHaze.Classes import Button
from PygameHaze.Classes import InputField, InputFieldNumbers, InputFieldLetters
from PygameHaze.Classes import Particle
from PygameHaze.Classes import Animation
class _BaseManager:
"""
override:
__getitem__
__iter__
__next__
__reversed__
"""
def __init__(self):
self.__items: List[any] = []
self.__i = 0
def __getitem__(self, item) -> any:
pass
def __iter__(self) -> Iterable[any]:
pass
def __next__(self) -> any:
try:
item = self.__items[self.__i]
self.__i += 1
except IndexError:
self.__i = 0
item = self.__next__()
return item
def __reversed__(self) -> List[any]:
pass
# global method
def __setitem__(self, key, value) -> None:
self.__items[key] = value
def __delitem__(self, key) -> None:
del self.__items[key]
def __iadd__(self, other) -> None:
if isinstance(other, type(self)):
self.__items += other.__items
else:
raise TypeError(
f"the given obj is not a instance of {type(self)} and it is a instance of the class {type(other)}"
)
def __add__(self, other) -> None:
if isinstance(other, type(self)):
self.__items += other.__items
else:
raise TypeError(
f"the given obj is not a instance of {type(self)} and it is a instance of the class {type(other)}"
)
def __contains__(self, item) -> bool:
if item in self.__items:
return True
return False
def __del__(self) -> None:
for i in range(len(self.__items)):
del self.__items[i]
def __len__(self) -> int:
return len(self.__items)
def __repr__(self) -> str:
_str = ""
if self.__items:
_str += "["
for item in self.__items:
_str += f"{item},\n"
_str = _str[:-2]
_str += "]"
else:
_str += "[]"
return _str
def __str__(self) -> str:
_str = ""
if self.__items:
_str += "["
for item in self.__items:
_str += f"{item},\n"
_str = _str[:-2]
_str += "]"
else:
_str += "[]"
return _str
def __bool__(self) -> bool:
return len(self) > 0
class ParticleManager(_BaseManager):
"""
Creates a storage for the particles with more functions
Methods:
-----------
draw(pygame.surface.Surface):
it draws the particles on the screen
update(dt: float=1, rects=[]):
it shrinks, apply gravity, move and collide with rects
get_particles():
it returns a list of the particles
add_particle(x, y, vel_x, vel_y, shrink_amount, size, color, collision_tolerance, gravity):
it adds a new particle
"""
def __init__(self):
self.__items: List[Particle] = []
def draw(self, surface: pygame.surface.Surface) -> None:
[particle.draw(surface) for particle in self.__items]
def update(self, dt: float = 1, rects: list[pygame.Rect] = []) -> None:
[particle.update(dt, rects) for particle in self.__items]
self.__items = [particle for particle in self.__items if particle.size > 0]
def get_particles(self) -> List[Particle]:
return self.__items
def add_particle(
self,
x: int,
y: int,
vel_x: float,
vel_y: float,
shrink_amount: float,
size: float = 7,
color: Tuple[int, int, int] = (255, 255, 255),
collision_tolerance: float = 10,
gravity: float = 0.1,
) -> None:
self.__items.append(
Particle(
x,
y,
vel_x,
vel_y,
shrink_amount,
size,
color,
collision_tolerance,
gravity,
)
)
def __getitem__(self, item) -> Particle:
return self.__items[item]
def __iter__(self) -> Iterable[Particle]:
return iter(self.__items)
def __next__(self) -> Particle:
try:
item = self.__items[self.__i]
self.__i += 1
except IndexError:
self.__i = 0
item = self.__next__()
return item
def __reversed__(self) -> List[Particle]:
reversed(self.__items)
return self.__items
__all__ = ["ParticleManager"]
| StarcoderdataPython |
52081 | '''
0052. N-Queens II
The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.
Given an integer n, return the number of distinct solutions to the n-queens puzzle.
Example:
Input: 4
Output: 2
Explanation: There are two distinct solutions to the 4-queens puzzle as shown below.
[
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
'''
class Solution:
def totalNQueens(self, n: int) -> int:
def backtrack(i):
if i == n:
return 1
res = 0
for j in range(n):
if j not in cols and i-j not in diag and i+j not in off_diag:
cols.add(j)
diag.add(i-j)
off_diag.add(i+j)
res += backtrack(i+1)
off_diag.remove(i+j)
diag.remove(i-j)
cols.remove(j)
return res
cols = set()
diag = set()
off_diag = set()
return backtrack(0)
| StarcoderdataPython |
3316500 | <reponame>FarhanShoukat/DigitRecognition1438
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from datetime import datetime
import time
fmt = '%H:%M:%S'
def get_current_time():
time.ctime()
return time.strftime(fmt)
r = range(42, 753)
trainX = np.genfromtxt(fname='trainData.csv', delimiter=',', dtype=int, skip_header=1, usecols=r)
trainY = np.genfromtxt(fname='trainLabels.csv', dtype=int, skip_header=1)
# testX = np.genfromtxt(fname='testData.csv', delimiter=',', dtype=int, skip_header=1, usecols=r)
testX = np.genfromtxt(fname='kaggleTestSubset.csv', delimiter=',', dtype=int, skip_header=1, usecols=r)
testY = np.genfromtxt(fname='kaggleTestSubsetLabels.csv', dtype=int, skip_header=1)
trainX = preprocessing.normalize(trainX, axis=1, copy=True, return_norm=False)
testX = preprocessing.normalize(testX, axis=1, copy=True, return_norm=False)
# trainX = preprocessing.scale(trainX, axis=1, with_mean=False)
# testX = preprocessing.scale(testX, axis=1, with_mean=False)
first = get_current_time()
print('Fitting Data')
classifier = KNeighborsClassifier(n_neighbors=3, weights='distance', algorithm='auto', p=3,
metric='minkowski', n_jobs=-1)
classifier.fit(trainX, trainY)
second = get_current_time()
print("Time taken to train(sec):", datetime.strptime(second, fmt) - datetime.strptime(first, fmt))
print('Predicting Data')
testpredy = classifier.predict(testX)
third = get_current_time()
print("Time taken to predict(sec):", datetime.strptime(third, fmt) - datetime.strptime(second, fmt))
np.savetxt('result.csv', np.dstack((np.arange(1, testpredy.size+1), testpredy))[0], "%d,%d", header="ID,Label",
comments='')
print("Accuracy is", accuracy_score(testY, testpredy)*100)
| StarcoderdataPython |
1762982 | #!/usr/bin/env python
#
# XML Parser.
# file : XmlParser.py
# author : <NAME> <<EMAIL>>
# since : 2011-07-20
# last modified : 2011-07-22
from xml.dom.minidom import parse
from lib.Functions import asciify
class XmlDataFormatException(Exception):
"""Xml file is not valid """
pass
class XmlDocument(object):
"""Provides a basic interface to an XML file"""
def __init__(self, filename):
self._xml_doc = parse(filename)
self._root_node = self._xml_doc.documentElement
class XmlReader(object):
"""Provides storage for derrived classes """
_data={}
def __del__(self):
self._data.clear()
class InstructionReader(XmlReader):
"""A class to parse an instruction.xsd-validated isa specification"""
def __init__(self, filename):
self._document=XmlDocument(filename + 'instructions.xml')
self._root_node=self._document._root_node
self._parse_root()
self._parse_formats()
self._parse_assembler()
self._parse_instructions()
self.data = self._data
def _parse_root(self):
language = asciify(self._root_node.attributes['language'].value)
size = asciify(self._root_node.attributes['size'].value)
api = asciify(self._root_node.attributes['api'].value)
self._data['language'] = language
self._data['size'] = int(size, 16)
self._data['api'] = api
def _parse_formats(self):
data=[]
f_root = self._root_node.getElementsByTagName('formats')[0]
formats = f_root.getElementsByTagName('format')
for instruction_format in formats:
f_type = asciify(
instruction_format.attributes['type'].value)
f_size = int(asciify(
instruction_format.attributes['size'].value), 16)
f_fetch = int(asciify(
instruction_format.attributes['fetch'].value), 16)
fields = instruction_format.getElementsByTagName('field')
f_data=[]
for field in fields:
fd_name = asciify(field.attributes['name'].value)
fd_start = int(asciify(field.attributes['start'].value), 16)
fd_end = int(asciify(field.attributes['end'].value), 16)
f_data.append((fd_name, fd_start, fd_end))
data.append((f_type, f_size, tuple(f_data), f_fetch))
self._data['formats'] = tuple(data)
def _parse_instructions(self):
data=[]
instructions=self._root_node.getElementsByTagName('instruction')
for instruction in instructions:
# add attributes
i_name = asciify(instruction.attributes['name'].value)
i_format = asciify(instruction.attributes['format'].value)
# add signatures
i_signature=[]
s_root = instruction.getElementsByTagName('signature')[0]
fields = s_root.getElementsByTagName('field')
for field in fields:
f_name = asciify(field.attributes['name'].value)
i_signature.append(f_name)
i_signature = tuple(i_signature)
# add preset values
i_values=[]
f_root = instruction.getElementsByTagName('fields')[0]
fields = f_root.getElementsByTagName('field')
for field in fields:
f_name = asciify(field.attributes['name'].value)
f_value = asciify(field.attributes['value'].value)
i_values.append((f_name, int(f_value, 16)))
i_values = tuple(i_values)
# add syntax
i_syntax=[]
s_root = instruction.getElementsByTagName('syntax')[0]
fields = s_root.getElementsByTagName('field')
expression = s_root.getElementsByTagName('expression')[0]
symbols = s_root.getElementsByTagName('symbol')
for symbol in symbols:
s_kind=symbol.attributes['type'].value
s_match=symbol.attributes['matches'].value
i_syntax.append((asciify(s_match), asciify(s_kind)))
i_expression = asciify(expression.attributes['pattern'].value)
i_syntax = tuple(i_syntax)
# add implementation
i_implementation=[]
im_root = instruction.getElementsByTagName('implementation')[0]
methods = im_root.getElementsByTagName('method')
for method in methods:
im_name = asciify(method.attributes['name'].value)
im_args = asciify(method.attributes['args'].value)
im_args = im_args.split()
for i in range(len(im_args)):
if im_args[i][:2] == '0x':
im_args[i] = int(im_args[i], 16)
i_implementation.append(tuple((im_name, tuple(im_args))))
i_implementation = tuple(i_implementation)
# add replacements
i_replacements=[]
try:
r_root = instruction.getElementsByTagName('replacements')[0]
replacements = r_root.getElementsByTagName('replacement')
for replacement in replacements:
r_name = asciify(replacement.attributes['name'].value)
r_group = asciify(replacement.attributes['group'].value)
r_type = asciify(replacement.attributes['type'].value)
try:
i_replacements = (
r_name, int(r_group, 16), r_type)
except Exception, e:
raise XmlDataFormatException(e)
except Exception, e:
pass
instruction=(i_name, i_format, i_signature,
i_expression, i_values, i_syntax,
i_implementation, i_replacements)
data.append(instruction)
self._data['instructions'] = tuple(data)
def _parse_assembler(self):
"""{ name:<string> : pattern:<string> } -> assembler_syntax
Reads an XML instruction specification and looks for the
assembler syntax.
"""
assembler = self._root_node.getElementsByTagName('assembler')[0]
assembler_syntax = assembler.getElementsByTagName('syntax')[0]
label = assembler_syntax.getElementsByTagName('label')[0]
reference = assembler_syntax.getElementsByTagName('reference')[0]
comment = assembler_syntax.getElementsByTagName('comment')[0]
hexadacimal = assembler_syntax.getElementsByTagName('hex')[0]
data=[]
for element in [label, reference, comment, hexadacimal]:
name = element.tagName.encode('ascii')
pattern = element.attributes['pattern'].value.encode('ascii')
data.append((name, pattern))
self._data['assembler']=tuple(data)
def get_data(self):
return self._data
class MachineReader(XmlReader):
"""A class to parse a machine.xsd-validated machine specification"""
_language=None
_address_space=None
_memory={}
_registers={}
_register_mappings={}
def __init__(self, filename):
self._document=XmlDocument(filename + 'machine.xml')
self._root_node=self._document._root_node
self._parse_root()
self._parse_language()
self._parse_memory()
self._parse_registers()
self._parse_pipeline()
self.data = self._data
def _parse_root(self):
name = asciify(self._root_node.attributes['name'].value)
self._data['name'] = name
def _parse_language(self):
try:
language = self._root_node.getElementsByTagName('language')[0]
l_name = asciify(language.attributes['name'].value)
self._data['language'] = l_name
except Exception, e:
raise XmlDataFormatException(e.message)
def _parse_memory(self):
"""Stores a segmentation profile for the machine's memory
Reads an xml machine specification and looks for details
of its memory organization.
"""
memory=[]
try:
memory_node=self._root_node.getElementsByTagName('memory')[0]
address_space = asciify(memory_node.attributes['address_space'].value)
word = asciify(memory_node.attributes['word'].value)
addressable = asciify(memory_node.attributes['addressable'].value)
for attribute in [address_space, word, addressable]:
#readable, but a potential bug for non-hex data
memory.append(int(attribute, 16))
text_node = memory_node.getElementsByTagName('text')[0]
data_node = memory_node.getElementsByTagName('data')[0]
stack_node = memory_node.getElementsByTagName('stack')[0]
for segment in [text_node, data_node, stack_node]:
s_name = asciify(segment.tagName)
s_start = int(asciify(segment.attributes['start'].value), 16)
s_end = int(asciify(segment.attributes['end'].value), 16)
memory.append((s_name, s_start, s_end))
except Exception, e:
raise XmlDataFormatException(e.message)
# TODO: address space is currently hard-coded to begin at 0x00.
# this is just a marker for future refactoring
self._data['memory']=tuple(memory)
def _parse_registers(self):
registers=[]
registers_node=self._root_node.getElementsByTagName('registers')
register_node_list=registers_node[0].getElementsByTagName('register')
for register in register_node_list:
name = asciify(register.attributes['name'].value)
number = int(asciify(register.attributes['number'].value), 16)
size = int(asciify(register.attributes['size'].value), 16)
write = asciify(register.attributes['write'].value)
write = (write == 'True' and True or False)
profile = asciify(register.attributes['profile'].value)
visible = asciify(register.attributes['visible'].value)
visible = (visible == 'True' and True or False)
#prepared to be nice and leniant for this one
preset = 0
try:
preset = int(asciify(register.attributes['preset'].value), 16)
except:
pass
registers.append((name, number, size, write, profile,
visible, preset))
self._data['registers'] = tuple(registers)
def _parse_pipeline(self):
try:
pipeline = self._root_node.getElementsByTagName('pipeline')[0]
stages = pipeline.getElementsByTagName('stage')
flags = asciify(pipeline.attributes['flags'].value)
pipeline_stages = []
for stage in stages:
name = asciify(stage.attributes['name'].value)
pipeline_stages.append(name)
except Exception, e:
raise XmlDataFormatException(e.message)
self._data['pipeline'] = tuple((pipeline_stages, flags))
def get_data(self):
return self._data
if __name__ == '__main__':
reader=InstructionReader('../config/instructions.xml')
print reader.data['instructions'][13]
del reader
reader=MachineReader('../config/machine.xml')
#print(reader.data)
del reader
| StarcoderdataPython |
1600169 | <reponame>rwtaylor/mpcr-analyses-pipelines
#!/usr/bin/env python3
import sys
import re
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This is copied from https://github.com/lh3/readfq
# Fast parsing of fasta/fastq
def readfq(fp): # this is a generator function
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#fasta_in = open("/Users/ryan/Projects/testpy/test.fasta")
#regions_out = open('/Users/ryan/Projects/testpy/out.fasta', 'w+')
nmatcher = re.compile(r'(N{1000,})') # Regex for N-blocks
regions = [] # list of regions
# First go through each sequence and break up scaffolds into contigs. Save contig regions in a list.
# regions like this: [(length, chrom, start, end), (length, chrom, start, end), (length, chrom, start, end)]
for name, seq, qual in readfq(sys.stdin): # Iterate through each sequence in the fasta
seq_start = 0 # reset sequence start
if nmatcher.search(seq): # If there are any N-blocks in the sequence
for m in nmatcher.finditer(seq): # Iterate through each N-block match
# The end of the sequence block is 1 character before the start of the N-block
seq_end = m.start() - 1
# Append the sequence region prior to the start of the N-block to the regions list
regions.append((seq_end - seq_start, name, seq_start, seq_end))
# The next seqeunce will start 1 character after the N-block
seq_start = m.end() + 1
# Now that all N-blocks have been processed, add the final sequence block (if any)
if seq_start < len(seq):
regions.append((len(seq) - seq_start, name, seq_start, len(seq)))
# If no N-blocks, just print the entire sequence region
else: regions.append((len(seq) - seq_start, name, seq_start, len(seq)))
# Before
# Now place the regions into tasks
task_seq_limit = 5e6 # The maximum total length of regions per task (unless region is larger than this, then task will only consist of that 1 region)
region_seq_min = 0 # The minimum length of sequence for a region to be included
task_counter = 0 # counter for each task
task_seq_cumsum = 0 # cumulative sum of sequences added to each task
tasks = [] # list of tasks to be returned
for length, name, seq_start, seq_end in regions:
if(length >= region_seq_min):
# if adding this sequence to task will bump the task over the limit, append the task, set the new task as region
if task_seq_cumsum > 0 and length + task_seq_cumsum > task_seq_limit:
tasks.append(str(task_counter) + "," + str(task_seq_cumsum) + "," + task)
task_counter += 1
task_seq_cumsum = 0
if task_seq_cumsum == 0:
task = "--region " + name + ":" + str(seq_start) + "-" + str(seq_end)
task_seq_cumsum = length
else:
task += " --region " + name + ":" + str(seq_start) + "-" + str(seq_end)
task_seq_cumsum += length
for task in tasks:
sys.stdout.write(task + "\n")
| StarcoderdataPython |
22981 | <gh_stars>1-10
# Copyright (c) 2020. <NAME>, Ghent University
from os.path import join as jp
import numpy as np
from tomopal.crtomopy.crtomo.crc import (
Crtomo,
datread,
import_res,
mesh_geometry,
mtophase,
)
from ..parent import inventory
from ...geoview.diavatly import model_map # To plot results
# %% Directories
# Input here the folders to structure your project. It is not necessary to previously create them
# (except the data folder)
# they will be automatically generated once you initialize a crtomo object.
# Note: the function 'jp' simply joins the arguments to build a path.
main_dir = inventory.hello() # Current working directory of the project
data_dir = jp(main_dir, "data", "demo") # Data files directory
mesh_dir = jp(main_dir, "mesh", "demo") # Mesh files directory
iso_dir = jp(main_dir, "iso", "demo") # ISO file dir
ref_dir = jp(main_dir, "ref", "demo") # Reference model files dir
start_dir = jp(main_dir, "start", "demo") # Start model files dir
results_dir = jp(main_dir, "results", "demo") # Results files directory
# %% Exe names
# Input here the path to your exe files.
mesh_exe_name = jp(main_dir, "mesh.exe")
crtomo_exe_name = jp(main_dir, "crtomo.exe")
# %% Create crtomo object
# Folders will be generated here if they don't exist already.
myinv = Crtomo(
working_dir=main_dir,
data_dir=data_dir,
mesh_dir=mesh_dir,
iso_dir=iso_dir,
ref_dir=ref_dir,
start_dir=start_dir,
crtomo_exe=crtomo_exe_name,
mesh_exe=mesh_exe_name,
)
# %% Generating the mesh
# Data file name A B M N in meters
df = jp(data_dir, "demo_elecs.dat") # Path to electrode configuration file
dat = datread(df) # Use built-in function to extract data (optional)
# Electrode spacing in meters
es = 5
# Electrodes elevation
# Data elevation file name X Z in meters
ef = jp(data_dir, "demo_elevation.dat")
elev = datread(ef) # Use built-in function to extract data (optional)
# %% Build the mesh
# The following command generates the mesh in the folder indicated previously.
# It requires 3 arguments:
# the numpy array of electrodes position of shape (n, 4) (required)
# the electrode spacing (required)
# the elevation data (optional)
myinv.meshmaker(abmn=dat[:, [0, 1, 2, 3]], electrode_spacing=es, elevation_data=elev)
# If you already have generated a mesh, comment the line above and instead
# load the previously generated Mesh.dat file as described below.
# %% Read the mesh data (number of cells, blocks coordinates, x-y coordinates of the center of the blocks) from Mesh.dat
mshf = jp(mesh_dir, "Mesh.dat") # Path to the generated 'Mesh.dat' file.
ncol, nlin, nelem, blocks, centerxy = mesh_geometry(mshf) # Extract mesh properties
# %% Build configuration file
# 0 Mesh.dat file
mesh_file = mshf
# 1 elec.dat file
elec_file = jp(mesh_dir, "elec.dat")
# 2 Data file
data_file = jp(data_dir, "demo_data.dat")
# 3 Results folder file
# Specify the path where the results will be loaded
frname = (
"" # If you want to save the results in a sub-folder in the main results folder
)
result_folder = jp(results_dir, frname)
# 8 Flag for reference model constraint (0/1)
reference_model = 0
#
reference_model_file = None
# %% 12 File for reference model (model weights)
reference_weights_file = None
# You can use the tool ModelMaker from mohinh to interactively create prior models, and automatically save the results
# in a dat file if you provide a file name.
# Otherwise you can access the final results with (ModelMaker object).final_results and export it yourself.
# Example with a background resistivity of 100 ohm.m :
# rfwm = ModelMaker(blocks=blocks, values_log=1, bck=100)
# my_model = rfwm.final_results
# Alternatively, use a simpler approach to produce a reference model file:
# with open(reference_weights_file, 'w') as rw:
# rw.write(str(nelem)+'\n')
# [rw.write('0.1'+'\n') for i in range(nelem)]
# rw.close()
# %% 22 Maximum numbers of iterations
iterations = 20
# 23 Min data RMS
rms = 1.0000
# 24 Flag for DC inversion (0 = with IP / 1 = only DC)
dc = 1
# 25 Flag for robust inversion (0/1)
robust = 1
# 26 Flag for checking polarity (0/1)
check_polarity = 1
# 27 Flag for final phase improvement (0/1)
final_phase_improvement = 1
# 29 Relative magnitude error level (%)
error_level = 2.5
# 30 Minimum absolute magnitude error (ohm)
min_abs_error = 0.00015
# 31 Error in phase (mrad)
phase_error = 0.5
# 36 Flag for MGS inversion (0/1)
mgs = 0
# 37 Beta value
beta = 0.002
# 38 Flag for starting model (0/1)
starting_model = 0
# 39 Starting model file
starting_model_file = None
# %% 19 ISO file 1
iso_file1 = jp(iso_dir, "iso.dat")
# dm = datread(starting_model_file, start=1)[:, 0]
# isom = ModelMaker(blocks=blocks, values=dm, values_log=1, bck=1)
# #
# with open(iso_file1, 'w') as rw:
# rw.write(str(nelem)+'\n')
# [rw.write('{} 1'.format(str(i))+'\n') for i in isom.final_results]
# rw.close()
# %% Generate configuration file
# If erase = 1, every item in the result folder will be deleted. If you don't want that, pick 0 instead.
# Use help(Crtomo.write_config) to see which parameters you can implement.
myinv.write_config(
erase=1,
mesh_file=mesh_file,
elec_file=elec_file,
data_file=data_file,
result_folder=result_folder,
reference_model=reference_model,
reference_model_file=reference_model_file,
reference_weights_file=reference_weights_file,
iso_file1=iso_file1,
iterations=iterations,
rms=rms,
dc=dc,
robust=robust,
check_polarity=check_polarity,
final_phase_improvement=final_phase_improvement,
error_level=error_level,
min_abs_error=min_abs_error,
phase_error=phase_error,
mgs=mgs,
beta=beta,
starting_model=starting_model,
starting_model_file=starting_model_file,
)
# Forward modeling example :
# # Results folder file
# fwname = 'fwd' # If you want to save the results in a sub-folder in the main results folder
#
# result_folder_fwd = jp(results_dir, fwname)
#
# myfwd = Crtomo(working_dir=cwd,
# data_dir=data_dir,
# mesh_dir=mesh_dir,
# crtomo_exe=crtomo_exe_name)
#
# # # res2mod(jp(result_folder, 'rho1.txt'))
# myfwd.write_config(mesh_file=mesh_file,
# elec_file=elec_file,
# fwd_only=1,
# result_folder=result_folder_fwd,
# starting_model_file=jp(cwd, 'rho1.dat'))
# myfwd.run()
# %% Run CRTOMO
# This will make your Crtomo object run the inversion. The configuration files are
# automatically saved in the results folder
myinv.run()
# %% Import results
if dc == 0: # If you have IP results to load
res, ip = import_res(result_folder=result_folder)
m2p = mtophase(ncycles=1, pulse_l=3.5, tmin=0.02, tmax=2.83)
ipt = ip[:] * m2p
else: # if you only have resistivity data to load
res, files = import_res(result_folder=result_folder, return_file=1)
rest = np.copy(res[0])
# If you want to convert a crtomo result file in a prior model for future inversions for example:
# modf = res2mod(files[0])
# Let's plot the results:
# Remove outliers (arbitrary)
cut = np.log10(4500)
rest[rest > cut] = cut
# Define a linear space for the color map
res_levels = 10 ** np.linspace(min(rest), cut, 10)
rtp = 10 ** np.copy(rest)
# Use the model_map function to display the computed resistivity:
# log=1 because we want a logarithmic scale.
# cbpos is for the position of the color bar.
model_map(
polygons=blocks,
vals=rtp,
log=1,
cbpos=0.4,
levels=res_levels,
folder=result_folder,
figname="demo_res_levels",
)
# %% if IP
if dc == 0:
ip = np.copy(res[1])
# crtomo works in phase so we perform the conversion to go back to "mv/v".
m2p = mtophase(ncycles=1, pulse_l=3.5, tmin=0.02, tmax=2.83)
ipt = np.copy(np.abs(ip / m2p))
# Arbitrarily cut outliers
hist = np.histogram(ipt, bins="auto")
cut = 260
ipt[ipt > cut] = cut
# Define levels to be plotted
ip_levels = [0, 10, 20, 30, 40, 50, 60, 70, 260]
model_map(
polygons=blocks,
vals=ipt,
log=0,
levels=ip_levels,
folder=result_folder,
figname="demo_ip_level",
)
| StarcoderdataPython |
3217403 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2019 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pytest
import networkx as nx
import pandas as pd
from stellargraph.mapper import DirectedGraphSAGENodeGenerator
from stellargraph.core.graph import StellarGraph, StellarDiGraph
def create_simple_graph():
"""
Creates a simple directed graph for testing. The node ids are integers.
Returns:
A small, directed graph with 3 nodes and 2 edges in StellarDiGraph format.
"""
g = nx.DiGraph()
edges = [(1, 2), (2, 3)]
g.add_edges_from(edges)
nodes = list(g.nodes())
features = [(node, -1.0 * node) for node in nodes]
df = pd.DataFrame(features, columns=["id", "f0"]).set_index("id")
return StellarDiGraph(g, node_features=df)
class TestDirectedNodeGenerator(object):
"""
Test various aspects of the directed GrapohSAGE node generator, with the focus
on the sampled neighbourhoods and the extracted features.
"""
def sample_one_hop(self, num_in_samples, num_out_samples):
g = create_simple_graph()
nodes = list(g.nodes())
in_samples = [num_in_samples]
out_samples = [num_out_samples]
gen = DirectedGraphSAGENodeGenerator(g, len(g), in_samples, out_samples)
flow = gen.flow(node_ids=nodes, shuffle=False)
# Obtain tree of sampled features
features = gen.sample_features(nodes)
num_hops = len(in_samples)
tree_len = 2 ** (num_hops + 1) - 1
assert len(features) == tree_len
# Check node features
node_features = features[0]
assert len(node_features) == len(nodes)
assert node_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
assert node_features[idx, 0, 0] == -1.0 * node
# Check in-node features
in_features = features[1]
assert in_features.shape == (len(nodes), in_samples[0], 1)
for n_idx in range(in_samples[0]):
for idx, node in enumerate(nodes):
if node == 1:
# None -> 1
assert in_features[idx, n_idx, 0] == 0.0
elif node == 2:
# 1 -> 2
assert in_features[idx, n_idx, 0] == -1.0
elif node == 3:
# 2 -> 3
assert in_features[idx, n_idx, 0] == -2.0
else:
assert False
# Check out-node features
out_features = features[2]
assert out_features.shape == (len(nodes), out_samples[0], 1)
for n_idx in range(out_samples[0]):
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2
assert out_features[idx, n_idx, 0] == -2.0
elif node == 2:
# 2 -> 3
assert out_features[idx, n_idx, 0] == -3.0
elif node == 3:
# 3 -> None
assert out_features[idx, n_idx, 0] == 0.0
else:
assert False
def test_one_hop(self):
# Test 1 in-node and 1 out-node sampling
self.sample_one_hop(1, 1)
# Test 0 in-nodes and 1 out-node sampling
self.sample_one_hop(0, 1)
# Test 1 in-node and 0 out-nodes sampling
self.sample_one_hop(1, 0)
# Test 0 in-nodes and 0 out-nodes sampling
self.sample_one_hop(0, 0)
# Test 2 in-nodes and 3 out-nodes sampling
self.sample_one_hop(2, 3)
def test_two_hop(self):
g = create_simple_graph()
nodes = list(g.nodes())
gen = DirectedGraphSAGENodeGenerator(
g, batch_size=len(g), in_samples=[1, 1], out_samples=[1, 1]
)
flow = gen.flow(node_ids=nodes, shuffle=False)
features = gen.sample_features(nodes)
num_hops = 2
tree_len = 2 ** (num_hops + 1) - 1
assert len(features) == tree_len
# Check node features
node_features = features[0]
assert len(node_features) == len(nodes)
assert node_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
assert node_features[idx, 0, 0] == -1.0 * node
# Check in-node features
in_features = features[1]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *1 -> 2
assert in_features[idx, 0, 0] == -1.0
elif node == 3:
# *2 -> 3
assert in_features[idx, 0, 0] == -2.0
else:
assert False
# Check out-node features
out_features = features[2]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> *2
assert out_features[idx, 0, 0] == -2.0
elif node == 2:
# 2 -> *3
assert out_features[idx, 0, 0] == -3.0
elif node == 3:
# 3 -> *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
# Check in-in-node features
in_features = features[3]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None -> None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *None -> 1 -> 2
assert in_features[idx, 0, 0] == 0.0
elif node == 3:
# *1 -> 2 -> 3
assert in_features[idx, 0, 0] == -1.0
else:
assert False
# Check in-out-node features
in_features = features[4]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None <- None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *2 <- 1 -> 2
assert in_features[idx, 0, 0] == -2.0
elif node == 3:
# *3 <- 2 -> 3
assert in_features[idx, 0, 0] == -3.0
else:
assert False
# Check out-in-node features
out_features = features[5]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2 <- *1
assert out_features[idx, 0, 0] == -1.0
elif node == 2:
# 2 -> 3 <- *2
assert out_features[idx, 0, 0] == -2.0
elif node == 3:
# 3 -> None <- *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
# Check out-out-node features
out_features = features[6]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2 -> *3
assert out_features[idx, 0, 0] == -3.0
elif node == 2:
# 2 -> 3 -> *None
assert out_features[idx, 0, 0] == 0.0
elif node == 3:
# 3 -> None -> *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
| StarcoderdataPython |
4832612 | import sys
import pytest
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from Hologram.Network import NetworkManager
class TestNetworkManager(object):
def test_create_non_network(self):
networkManager = NetworkManager.NetworkManager(None, '')
assert networkManager.networkActive == True
assert repr(networkManager) == 'Network Agnostic Mode'
def test_invalid_create(self):
with pytest.raises(Exception, message = 'Invalid network type: invalid'):
networkManager = NetworkManager.NetworkManager('invalid')
def test_invalid_ppp_create(self):
with pytest.raises(Exception, message = 'Invalid mode type: invalid-ppp'):
networkManager = NetworkManager.NetworkManager('invalid-ppp')
def test_network_connected(self):
networkManager = NetworkManager.NetworkManager(None, '')
networkManager.networkConnected()
assert networkManager.networkActive == True
def test_network_disconnected(self):
networkManager = NetworkManager.NetworkManager(None, '')
networkManager.networkDisconnected()
assert networkManager.networkActive == False
| StarcoderdataPython |
3307810 | from keras.layers import ZeroPadding2D, BatchNormalization, Input, MaxPooling2D, AveragePooling2D, Conv2D, LeakyReLU, Flatten, Conv2DTranspose, Activation, add, Lambda, GaussianNoise, merge, concatenate, Dropout
from keras_contrib.layers.normalization import InstanceNormalization
from keras.layers.core import Dense, Flatten, Reshape
from keras.models import Model, load_model
from keras.optimizers import Adam, adam
from keras.activations import tanh
from keras.regularizers import l2
import keras.backend as K
from keras.initializers import RandomNormal
import cv2
from tensorflow.contrib.kfac.python.ops import optimizer
from collections import OrderedDict
from time import localtime, strftime
from scipy.misc import imsave, toimage
import numpy as np
import json
import sys
import time
import datetime
sys.path.append('..')
import load_data
import os
import csv
class UNIT():
def __init__(self, lr = 1e-4, date_time_string_addition=''):
self.channels = 3 # 1 for grayscale 3 RGB
weight_decay = 0.0001/2
# Load data
nr_A_train_imgs = 1
nr_B_train_imgs = 1
nr_A_test_imgs = 1
nr_B_test_imgs = None
image_folder = 'dataset-name/'
# Fetch data during training instead of pre caching all images - might be necessary for large datasets
self.use_data_generator = False
if self.use_data_generator:
print('--- Using dataloader during training ---')
else:
print('--- Caching data ---')
sys.stdout.flush()
if self.use_data_generator:
self.data_generator = load_data.load_data(
self.channels, generator=True, subfolder=image_folder)
nr_A_train_imgs=2
nr_B_train_imgs=2
data = load_data.load_data(self.channels,
nr_A_train_imgs=nr_A_train_imgs,
nr_B_train_imgs=nr_B_train_imgs,
nr_A_test_imgs=nr_A_test_imgs,
nr_B_test_imgs=nr_B_test_imgs,
subfolder=image_folder)
self.A_train = data["trainA_images"]
self.B_train = data["trainB_images"]
self.A_test = data["testA_images"]
self.B_test = data["testB_images"]
self.testA_image_names = data["testA_image_names"]
self.testB_image_names = data["testB_image_names"]
self.img_width = self.A_train.shape[2]
self.img_height = self.A_train.shape[1]
self.latent_dim = (int(self.img_height / 4), int(self.img_width / 4), 256)
self.img_shape = (self.img_height, self.img_width, self.channels)
self.date_time = strftime("%Y%m%d-%H%M%S", localtime()) + date_time_string_addition
self.learning_rate = lr
self.beta_1 = 0.5
self.beta_2 = 0.999
self.lambda_0 = 10
self.lambda_1 = 0.1
self.lambda_2 = 100
self.lambda_3 = self.lambda_1 # cycle
self.lambda_4 = self.lambda_2 # cycle
# Optimizer
opt = Adam(self.learning_rate, self.beta_1, self.beta_2)
optStandAdam = Adam()
# Simple Model
self.superSimple = self.modelSimple()
self.superSimple.compile(optimizer=optStandAdam,
loss="mae")
# Discriminator
self.discriminatorA = self.modelMultiDiscriminator("discriminatorA")
self.discriminatorB = self.modelMultiDiscriminator("discriminatorB")
for layer in self.discriminatorA.layers:
if hasattr(layer, 'kernel_regularizer'):
layer.kernel_regularizer= l2(weight_decay)
layer.bias_regularizer = l2(weight_decay)
if hasattr(layer, 'kernel_initializer'):
layer.kernel_initializer = RandomNormal(mean=0.0, stddev=0.02)
layer.bias_initializer = RandomNormal(mean=0.0, stddev=0.02)
for layer in self.discriminatorB.layers:
if hasattr(layer, 'kernel_regularizer'):
layer.kernel_regularizer= l2(weight_decay)
layer.bias_regularizer = l2(weight_decay)
if hasattr(layer, 'kernel_initializer'):
layer.kernel_initializer = RandomNormal(mean=0.0, stddev=0.02)
layer.bias_initializer = RandomNormal(mean=0.0, stddev=0.02)
self.discriminatorA.compile(optimizer=opt,
loss=['binary_crossentropy',
'binary_crossentropy',
'binary_crossentropy'],
loss_weights=[self.lambda_0,
self.lambda_0,
self.lambda_0])
self.discriminatorB.compile(optimizer=opt,
loss=['binary_crossentropy',
'binary_crossentropy',
'binary_crossentropy'],
loss_weights=[self.lambda_0,
self.lambda_0,
self.lambda_0])
# Encoder
self.encoderA = self.modelEncoder("encoderA")
self.encoderB = self.modelEncoder("encoderB")
self.encoderShared = self.modelSharedEncoder("encoderShared")
self.decoderShared = self.modelSharedDecoder("decoderShared")
# Generator
self.generatorA = self.modelGenerator("generatorA")
self.generatorB = self.modelGenerator("generatorB")
# Input Encoder Decoder
imgA = Input(shape=(self.img_shape))
imgB = Input(shape=(self.img_shape))
encodedImageA = self.encoderA(imgA)
encodedImageB = self.encoderB(imgB)
sharedA = self.encoderShared(encodedImageA)
sharedB = self.encoderShared(encodedImageB)
outSharedA = self.decoderShared(sharedA)
outSharedB = self.decoderShared(sharedB)
# Input Generator
outAa = self.generatorA(outSharedA)
outBa = self.generatorA(outSharedB)
outAb = self.generatorB(outSharedA)
outBb = self.generatorB(outSharedB)
guess_outBa = self.discriminatorA(outBa)
guess_outAb = self.discriminatorB(outAb)
# Cycle
cycle_encodedImageA = self.encoderA(outBa)
cycle_encodedImageB = self.encoderB(outAb)
cycle_sharedA = self.encoderShared(cycle_encodedImageA)
cycle_sharedB = self.encoderShared(cycle_encodedImageB)
cycle_outSharedA = self.decoderShared(cycle_sharedA)
cycle_outSharedB = self.decoderShared(cycle_sharedB)
cycle_Ab_Ba = self.generatorA(cycle_outSharedB)
cycle_Ba_Ab = self.generatorB(cycle_outSharedA)
# Train only generators
self.discriminatorA.trainable = False
self.discriminatorB.trainable = False
self.encoderGeneratorModel = Model(inputs=[imgA, imgB],
outputs=[sharedA, sharedB,
cycle_sharedA, cycle_sharedB,
outAa, outBb,
cycle_Ab_Ba, cycle_Ba_Ab,
guess_outBa[0], guess_outAb[0],
guess_outBa[1], guess_outAb[1],
guess_outBa[2], guess_outAb[2]])
for layer in self.encoderGeneratorModel.layers:
if hasattr(layer, 'kernel_regularizer'):
layer.kernel_regularizer= l2(weight_decay)
layer.bias_regularizer = l2(weight_decay)
if hasattr(layer, 'kernel_initializer'):
layer.kernel_initializer = RandomNormal(mean=0.0, stddev=0.02)
layer.bias_initializer = RandomNormal(mean=0.0, stddev=0.02)
self.encoderGeneratorModel.compile(optimizer=opt,
loss=[self.vae_loss_CoGAN, self.vae_loss_CoGAN,
self.vae_loss_CoGAN, self.vae_loss_CoGAN,
'mae', 'mae',
'mae', 'mae',
'binary_crossentropy', 'binary_crossentropy',
'binary_crossentropy', 'binary_crossentropy',
'binary_crossentropy', 'binary_crossentropy'],
loss_weights=[self.lambda_1, self.lambda_1,
self.lambda_3, self.lambda_3,
self.lambda_2, self.lambda_2,
self.lambda_4, self.lambda_4,
self.lambda_0, self.lambda_0,
self.lambda_0, self.lambda_0,
self.lambda_0, self.lambda_0])
#===============================================================================
# Decide what to Run
self.trainFullModel()
#self.load_model_and_generate_synthetic_images("name_of_saved_model", epoch) # eg. "20180504-140511_test1", 180
#self.loadAllWeightsToModelsIncludeDisc("name_of_saved_model", epoch)
#===============================================================================
# Architecture functions
def resblk(self, x0, k):
# first layer
x = Conv2D(filters=k, kernel_size=3, strides=1, padding="same")(x0)
x = BatchNormalization(axis=3, momentum=0.9, epsilon=1e-05, center=True)(x, training=True)
x = Activation('relu')(x)
# second layer
x = Conv2D(filters=k, kernel_size=3, strides=1, padding="same")(x)
x = BatchNormalization(axis=3, momentum=0.9, epsilon=1e-05, center=True)(x, training=True)
x = Dropout(0.5)(x, training=True)
# merge
x = add([x, x0])
return x
#===============================================================================
# Loss function from PyTorch implementation from original article
def vae_loss_CoGAN(self, y_true, y_pred):
y_pred_2 = K.square(y_pred)
encoding_loss = K.mean(y_pred_2)
return encoding_loss
#===============================================================================
# Models
def modelMultiDiscriminator(self, name):
x1 = Input(shape=self.img_shape)
x2 = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(x1)
x4 = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(x2)
x1_out = self.modelDiscriminator(x1)
x2_out = self.modelDiscriminator(x2)
x4_out = self.modelDiscriminator(x4)
return Model(inputs=x1, outputs=[x1_out, x2_out, x4_out], name=name)
def modelDiscriminator(self, x):
# Layer 1
x = Conv2D(64, kernel_size=3, strides=2, padding='same')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 2
x = Conv2D(128, kernel_size=3, strides=2, padding='same')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 3
x = Conv2D(256, kernel_size=3, strides=2, padding='same')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 4
x = Conv2D(512, kernel_size=3, strides=2, padding='same')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 5
x = Conv2D(1, kernel_size=1, strides=1)(x)
prediction = Activation('sigmoid')(x)
return prediction
def modelEncoder(self, name):
inputImg = Input(shape=self.img_shape)
# Layer 1
x = ZeroPadding2D(padding=(3, 3))(inputImg)
x = Conv2D(64, kernel_size=7, strides=1, padding='valid')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 2
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(128, kernel_size=3, strides=2, padding='valid')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 3
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(256, kernel_size=3, strides=2, padding='valid')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 4: 2 res block
x = self.resblk(x, 256)
# Layer 5: 3 res block
x = self.resblk(x, 256)
# Layer 6: 3 res block
z = self.resblk(x, 256)
return Model(inputs=inputImg, outputs=z, name=name)
def modelSharedEncoder(self, name):
input = Input(shape=self.latent_dim)
x = self.resblk(input, 256)
z = GaussianNoise(stddev=1)(x, training=True)
return Model(inputs=input, outputs=z, name=name)
def modelSharedDecoder(self, name):
input = Input(shape=self.latent_dim)
x = self.resblk(input, 256)
return Model(inputs=input, outputs=x, name=name)
def modelGenerator(self, name):
inputImg = Input(shape=self.latent_dim)
# Layer 1: 1 res block
x = self.resblk(inputImg, 256)
# Layer 2: 2 res block
x = self.resblk(x, 256)
# Layer 3: 3 res block
x = self.resblk(x, 256)
# Layer 4:
x = Conv2DTranspose(128, kernel_size=3, strides=2, padding='same')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 5:
x = Conv2DTranspose(64, kernel_size=3, strides=2, padding='same')(x)
x = LeakyReLU(alpha=0.01)(x)
# Layer 6
x = Conv2DTranspose(self.channels, kernel_size=1, strides=1, padding='valid')(x)
z = Activation("tanh")(x)
return Model(inputs=inputImg, outputs=z, name=name)
def modelSimple(self):
inputImg = Input(shape=self.img_shape)
x = Conv2D(256, kernel_size=1, strides=1, padding='same')(inputImg)
x = Activation('relu')(x)
prediction = Conv2D(1, kernel_size=5, strides=1, padding='same')(x)
return Model(input=inputImg, output=prediction)
#===============================================================================
# Training
def trainFullModel(self, epochs=100, batch_size=1, save_interval=1):
def run_training_iteration(loop_index, epoch_iterations, imgA, imgB):
# Flip was not done in article
# if np.random.rand(1) > 0.5:
# imgA = cv2.flip(imgA[0], 1)
# imgA = imgA[np.newaxis,:,:,:]
# if np.random.rand(1) > 0.5:
# imgB = cv2.flip(imgB[0], 1)
# imgB = imgB[np.newaxis,:,:,:]
# Generate fake images
encodedImageA = self.encoderA.predict(imgA)
encodedImageB = self.encoderB.predict(imgB)
sharedA = self.encoderShared.predict(encodedImageA)
sharedB = self.encoderShared.predict(encodedImageB)
outSharedA = self.decoderShared.predict(sharedA)
outSharedB = self.decoderShared.predict(sharedB)
outAa = self.generatorA.predict(outSharedA)
outBa = self.generatorA.predict(outSharedB)
outAb = self.generatorB.predict(outSharedA)
outBb = self.generatorB.predict(outSharedB)
# Train discriminator
dA_loss_real = self.discriminatorA.train_on_batch(imgA, real_labels)
dA_loss_fake = self.discriminatorA.train_on_batch(outBa, synthetic_labels)
dA_loss = np.add(dA_loss_real, dA_loss_fake)
dB_loss_real = self.discriminatorB.train_on_batch(imgB, real_labels)
dB_loss_fake = self.discriminatorB.train_on_batch(outAb, synthetic_labels)
dB_loss = np.add(dB_loss_real, dB_loss_fake)
# Train generator
g_loss = self.encoderGeneratorModel.train_on_batch([imgA, imgB],
[dummy, dummy,
dummy, dummy,
imgA, imgB,
imgA, imgB,
real_labels1, real_labels1,
real_labels2, real_labels2,
real_labels3, real_labels3])
# Store training data
epoch_list.append(epoch)
loop_index_list.append(loop_index)
# Discriminator loss
loss_dA_real_list.append(dA_loss_real[0])
loss_dA_fake_list.append(dA_loss_fake[0])
loss_dB_real_list.append(dB_loss_real[0])
loss_dB_fake_list.append(dB_loss_fake[0])
dA_sum_loss_list.append(dA_loss[0])
dB_sum_loss_list.append(dB_loss[0])
# Generator loss
loss_gen_list_1.append(g_loss[0])
loss_gen_list_2.append(g_loss[1])
loss_gen_list_3.append(g_loss[2])
loss_gen_list_4.append(g_loss[3])
loss_gen_list_5.append(g_loss[4])
loss_gen_list_6.append(g_loss[5])
loss_gen_list_7.append(g_loss[6])
loss_gen_list_8.append(g_loss[7])
loss_gen_list_9.append(g_loss[8])
loss_gen_list_10.append(g_loss[9])
loss_gen_list_11.append(g_loss[10])
print('----------------Epoch-------640x480---------', epoch, '/', epochs - 1)
print('----------------Loop index-----------', loop_index, '/', epoch_iterations - 1)
print('Discriminator TOTAL loss: ', dA_loss[0] + dB_loss[0])
print('Discriminator A loss total: ', dA_loss[0])
print('Discriminator B loss total: ', dB_loss[0])
print('Genarator loss total: ', g_loss[0])
print('----------------Discriminator loss----')
print('dA_loss_real: ', dA_loss_real[0])
print('dA_loss_fake: ', dA_loss_fake[0])
print('dB_loss_real: ', dB_loss_real[0])
print('dB_loss_fake: ', dB_loss_fake[0])
print('----------------Generator loss--------')
print('Shared A: ', g_loss[1])
print('Shared B: ', g_loss[2])
print('Cycle shared A: ', g_loss[3])
print('Cycle shared B: ', g_loss[4])
print('OutAa MAE: ', g_loss[5])
print('OutBb MAE: ', g_loss[6])
print('Cycle_Ab_Ba MAE: ', g_loss[7])
print('Cycle_Ba_Ab MAE: ', g_loss[8])
print('guess_outBa: ', g_loss[9])
print('guess_outAb: ', g_loss[10])
print('guess_outBa: ', g_loss[11])
print('guess_outAb: ', g_loss[12])
print('guess_outBa: ', g_loss[13])
print('guess_outAb: ', g_loss[14])
sys.stdout.flush()
if loop_index % 5 == 0:
# Save temporary images continously
self.save_tmp_images(imgA, imgB)
self.print_ETA(start_time, epoch, epoch_iterations, loop_index)
A_train = self.A_train
B_train = self.B_train
self.history = OrderedDict()
self.epochs = epochs
self.batch_size = batch_size
loss_dA_real_list = []
loss_dA_fake_list = []
loss_dB_real_list = []
loss_dB_fake_list = []
dA_sum_loss_list = []
dB_sum_loss_list = []
loss_gen_list_1 = []
loss_gen_list_2 = []
loss_gen_list_3 = []
loss_gen_list_4 = []
loss_gen_list_5 = []
loss_gen_list_6 = []
loss_gen_list_7 = []
loss_gen_list_8 = []
loss_gen_list_9 = []
loss_gen_list_10 = []
loss_gen_list_11 = []
epoch_list = []
loop_index_list = []
#dummy = []
#dummy = shape=self.latent_dim
#dummy = np.zeros(shape=self.latent_dim)
#dummy = np.expand_dims(dummy, 0)
dummy = np.zeros(shape = ((self.batch_size,) + self.latent_dim))
self.writeMetaDataToJSON()
self.saveImages('init', 1)
sys.stdout.flush()
# Start stopwatch for ETAs
start_time = time.time()
label_shape1 = (batch_size,) + self.discriminatorA.output_shape[0][1:]
label_shape2 = (batch_size,) + self.discriminatorA.output_shape[1][1:]
label_shape3 = (batch_size,) + self.discriminatorA.output_shape[2][1:]
real_labels1 = np.ones(label_shape1)
real_labels2 = np.ones(label_shape2)
real_labels3 = np.ones(label_shape3)
synthetic_labels1 = np.zeros(label_shape1)
synthetic_labels2 = np.zeros(label_shape2)
synthetic_labels3 = np.zeros(label_shape3)
real_labels = [real_labels1, real_labels2, real_labels3]
synthetic_labels = [synthetic_labels1, synthetic_labels2, synthetic_labels3]
for epoch in range(epochs):
if self.use_data_generator:
loop_index = 1
for images in self.data_generator:
imgA = images[0]
imgB = images[1]
# Run all training steps
run_training_iteration(loop_index, self.data_generator.__len__(), imgA, imgB)
print("-----------------Loop Index:", loop_index)
if loop_index % 20000 == 0: # 20000
self.saveCurrentModels(loop_index)
self.saveImages(loop_index, 1)
elif loop_index >= self.data_generator.__len__():
break
loop_index += 1
else: # Train with all data in cache
A_train = self.A_train
B_train = self.B_train
random_order_A = np.random.randint(len(A_train), size=len(A_train))
random_order_B = np.random.randint(len(B_train), size=len(B_train))
epoch_iterations = max(len(random_order_A), len(random_order_B))
min_nr_imgs = min(len(random_order_A), len(random_order_B))
for loop_index in range(0, epoch_iterations, batch_size):
if loop_index + batch_size >= min_nr_imgs:
# If all images soon are used for one domain,
# randomly pick from this domain
if len(A_train) <= len(B_train):
indexes_A = np.random.randint(len(A_train), size=batch_size)
indexes_B = random_order_B[loop_index:
loop_index + batch_size]
else:
indexes_B = np.random.randint(len(B_train), size=batch_size)
indexes_A = random_order_A[loop_index:
loop_index + batch_size]
else:
indexes_A = random_order_A[loop_index:
loop_index + batch_size]
indexes_B = random_order_B[loop_index:
loop_index + batch_size]
imgA = A_train[indexes_A]
imgB = B_train[indexes_B]
run_training_iteration(loop_index, epoch_iterations, imgA, imgB)
if epoch % 10 == 0:
self.saveCurrentModels(epoch)
if epoch % save_interval == 0:
print('--------Saving images for epoch', epoch, '--------')
self.saveImages(epoch, 3)
# Create dictionary of losses and save to file
self.history = {
'loss_dA_real_list': loss_dA_real_list,
'loss_dA_fake_list': loss_dA_fake_list,
'loss_dB_real_list': loss_dB_real_list,
'loss_dB_fake_list': loss_dB_fake_list,
'dA_sum_loss_list': dA_sum_loss_list,
'dB_sum_loss_list': dB_sum_loss_list,
'loss_gen_list_1': loss_gen_list_1,
'loss_gen_list_2': loss_gen_list_2,
'loss_gen_list_3': loss_gen_list_3,
'loss_gen_list_4': loss_gen_list_4,
'loss_gen_list_5': loss_gen_list_5,
'loss_gen_list_6': loss_gen_list_6,
'loss_gen_list_7': loss_gen_list_7,
'loss_gen_list_8': loss_gen_list_8,
'loss_gen_list_9': loss_gen_list_9,
'loop_index': loop_index_list,
'epoch': epoch_list}
self.writeLossDataToFile()
self.saveModel(self.discriminatorA, 'discriminatorA', epoch)
self.saveModel(self.discriminatorB, 'discriminatorB', epoch)
self.saveModel(self.generatorA, 'generatorA', epoch)
self.saveModel(self.generatorB, 'generatorB', epoch)
self.saveModel(self.encoderA, 'encoderA', epoch)
self.saveModel(self.encoderB, 'encoderB', epoch)
self.saveModel(self.decoderShared, 'decoderShared', epoch)
self.saveModel(self.encoderShared, 'encoderShared', epoch)
sys.stdout.flush()
def saveCurrentModels(self, epoch):
self.saveModel(self.discriminatorA, 'discriminatorA', epoch)
self.saveModel(self.discriminatorB, 'discriminatorB', epoch)
self.saveModel(self.generatorA, 'generatorA', epoch)
self.saveModel(self.generatorB, 'generatorB', epoch)
self.saveModel(self.encoderA, 'encoderA', epoch)
self.saveModel(self.encoderB, 'encoderB', epoch)
self.saveModel(self.decoderShared, 'decoderShared', epoch)
self.saveModel(self.encoderShared, 'encoderShared', epoch)
def trainSimpleModel(self, epochs=200, batch_size=1, T=1):
A_train = self.A_train
B_train = self.B_train
if T == 1:
X_train = self.A_train
Y_train = self.B_train
else:
Y_train = self.A_train
X_train = self.B_train
self.superSimple.fit(x=X_train, y=Y_train, batch_size=1, epochs=epochs, verbose=1, callbacks=None, validation_split=0.0,
validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0,
steps_per_epoch=None, validation_steps=None)
self.saveImagesSimpleModel(epoch=epochs, num_saved_images=10, T=T)
self.saveModel(self.superSimple, 'superSimpleModel', epochs)
#===============================================================================
# Save and load Models
def saveModel(self, model, model_name, epoch):
directory = os.path.join('saved_model', self.date_time)
if not os.path.exists(directory):
os.makedirs(directory)
model_path_w = 'saved_model/{}/{}_epoch_{}_weights.hdf5'.format(self.date_time, model_name, epoch)
model.save_weights(model_path_w)
model_path_m = 'saved_model/{}/{}_epoch_{}_model.json'.format(self.date_time, model_name, epoch)
model.save_weights(model_path_m)
def loadAllWeightsToModelsIncludeDisc(self, folder_name, epoch):
pathEncoderA = 'saved_model/{}/encoderA_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathEncoderB = 'saved_model/{}/encoderB_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathEncoderShared = 'saved_model/{}/encoderShared_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathDecoderShared = 'saved_model/{}/decoderShared_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathGeneratorA = 'saved_model/{}/generatorA_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathGeneratorB = 'saved_model/{}/generatorB_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathDiscriminatorA = 'saved_model/{}/discriminatorA_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathDiscriminatorB = 'saved_model/{}/discriminatorB_epoch_{}_weights.hdf5'.format(folder_name, epoch)
self.encoderA.load_weights(pathEncoderA)
self.encoderB.load_weights(pathEncoderB)
self.encoderShared.load_weights(pathEncoderShared)
self.decoderShared.load_weights(pathDecoderShared)
self.generatorA.load_weights(pathGeneratorA)
self.generatorB.load_weights(pathGeneratorB)
self.discriminatorA.load_weights(pathDiscriminatorA)
self.discriminatorB.load_weights(pathDiscriminatorB)
def loadAllWeightsToModels(self, folder_name, epoch):
pathEncoderA = 'saved_model/{}/encoderA_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathEncoderB = 'saved_model/{}/encoderB_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathEncoderShared = 'saved_model/{}/encoderShared_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathDecoderShared = 'saved_model/{}/decoderShared_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathGeneratorA = 'saved_model/{}/generatorA_epoch_{}_weights.hdf5'.format(folder_name, epoch)
pathGeneratorB = 'saved_model/{}/generatorB_epoch_{}_weights.hdf5'.format(folder_name, epoch)
self.encoderA.load_weights(pathEncoderA)
self.encoderB.load_weights(pathEncoderB)
self.encoderShared.load_weights(pathEncoderShared)
self.decoderShared.load_weights(pathDecoderShared)
self.generatorA.load_weights(pathGeneratorA)
self.generatorB.load_weights(pathGeneratorB)
def load_model_and_generate_synthetic_images(self, folder_name, epoch):
self.loadAllWeightsToModels(folder_name, epoch)
synthetic_images_B = self.predict_A_B(self.A_test)
synthetic_images_A = self.predict_B_A(self.B_test)
def save_image(image, name, domain):
if self.channels == 1:
image = image[:, :, 0]
image = np.clip(image/2 + 0.5, 0, 1)
directory = os.path.join('generate_images_test', folder_name, domain)
if not os.path.exists(directory):
os.makedirs(directory)
directory = os.path.join(directory, name)
try:
toimage(image, cmin=0, cmax=1).save(directory)
except Exception as e:
print("type error: " + str(e))
# Test A images
for i in range(len(synthetic_images_A)):
# Get the name from the image it was conditioned on
name = self.testB_image_names[i].strip('.png') + '_synthetic.png'
synt_A = synthetic_images_A[i]
save_image(synt_A, name, 'A')
# Test B images
for i in range(len(synthetic_images_B)):
# Get the name from the image it was conditioned on
name = self.testA_image_names[i].strip('.png') + '_synthetic.png'
synt_B = synthetic_images_B[i]
save_image(synt_B, name, 'B')
print('{} synthetic images have been generated and placed in ./generate_images/synthetic_images'
.format(len(self.A_test) + len(self.B_test)))
def predict_A_B(self, imgA):
encodedImageA = self.encoderA.predict(imgA)
sharedA = self.encoderShared.predict(encodedImageA)
outSharedA = self.decoderShared.predict(sharedA)
outAb = self.generatorB.predict(outSharedA)
return outAb
def predict_B_A(self, imgB):
encodedImageB = self.encoderB.predict(imgB)
sharedB = self.encoderShared.predict(encodedImageB)
outSharedB = self.decoderShared.predict(sharedB)
outBa = self.generatorA.predict(outSharedB)
return outBa
def truncateAndSave(self, real_A, real_B, synthetic, reconstructed, epoch, sample, name, filename, tmp=False):
synthetic = synthetic.clip(min=-1)
if reconstructed is not None:
reconstructed = reconstructed.clip(min=-1)
# Append and save
if tmp:
imsave('images/{}/{}.png'.format(
self.date_time, name), synthetic)
else:
if real_A is None and real_B is None:
imsave('images/{}/{}/{}_synt.png'.format(
self.date_time, name, filename), synthetic)
elif real_B is None and reconstructed is None:
image = np.hstack((real_A, synthetic))
elif real_A is not None:
image = np.hstack((real_B, real_A, synthetic, reconstructed))
else:
image = np.hstack((real_B, synthetic, reconstructed))
imsave('images/{}/{}/epoch{}_sample{}.png'.format(
self.date_time, name, epoch, sample), image)
def saveImages(self, epoch, num_saved_images=1):
directory = os.path.join('images', self.date_time)
if not os.path.exists(os.path.join(directory, 'A')):
os.makedirs(os.path.join(directory, 'A'))
os.makedirs(os.path.join(directory, 'B'))
for i in range(num_saved_images):
imgA = self.A_test[i]
imgB = self.B_test[i]
imgA = np.expand_dims(imgA, axis=0)
imgB = np.expand_dims(imgB, axis=0)
# Generate fake images
encodedImageA = self.encoderA.predict(imgA)
encodedImageB = self.encoderB.predict(imgB)
sharedA = self.encoderShared.predict(encodedImageA)
sharedB = self.encoderShared.predict(encodedImageB)
outSharedA = self.decoderShared.predict(sharedA)
outSharedB = self.decoderShared.predict(sharedB)
outAa = self.generatorA.predict(outSharedA)
outBa = self.generatorA.predict(outSharedB)
outAb = self.generatorB.predict(outSharedA)
outBb = self.generatorB.predict(outSharedB)
# Cycle
encodedImageC_A = self.encoderA.predict(outBa)
encodedImageC_B = self.encoderB.predict(outAb)
sharedC_A = self.encoderShared.predict(encodedImageC_A)
sharedC_B = self.encoderShared.predict(encodedImageC_B)
outSharedC_A = self.decoderShared.predict(sharedC_A)
outSharedC_B = self.decoderShared.predict(sharedC_B)
outC_Ba = self.generatorA.predict(outSharedC_B)
outC_Ab = self.generatorB.predict(outSharedC_A)
print('')
if self.channels == 1:
imgA = imgA[0, :, :, 0]
outAb0 = outAb[0, :, :, 0]
outC_Ab = outC_Ab[0, :, :, 0]
imgB = imgB[0, :, :, 0]
outBa0 = outBa[0, :, :, 0]
outC_Ba = outC_Ba[0, :, :, 0]
self.truncateAndSave(imgA, imgB, outAb0, outC_Ba, epoch, i, 'B', None)
self.truncateAndSave(imgB, imgA, outBa0, outC_Ab, epoch, i, 'A', None)
else:
imgA = imgA[0, :, :, :]
outAb0 = outAb[0, :, :, :]
outAa0 = outAa[0, :, :, :]
imgB = imgB[0, :, :, :]
outBa0 = outBa[0, :, :, :]
outBb0 = outBb[0, :, :, :]
outC_Ab = outC_Ab[0, :, :, :]
outC_Ba = outC_Ba[0, :, :, :]
self.truncateAndSave(None, imgA, outAb0, outC_Ba, epoch, i, 'A', None)
self.truncateAndSave(None, imgB, outBa0, outC_Ab, epoch, i, 'B', None)
def saveImagesSimpleModel(self, epoch, num_saved_images=1, T=1):
directory = os.path.join('images', self.date_time)
if not os.path.exists(os.path.join(directory, 'A')):
os.makedirs(os.path.join(directory, 'A'))
os.makedirs(os.path.join(directory, 'B'))
for i in range(num_saved_images):
if T == 1:
img_real = self.A_train[i]
img_real = np.expand_dims(img_real, axis=0)
if T == 2:
img_real = self.B_train[i]
img_real = np.expand_dims(img_real, axis=0)
print("Max", np.max(img_real.flatten()))
print("Min", np.min(img_real.flatten()))
# Generate fake images
img_synt = self.superSimple.predict(img_real)
img_real = img_real[0, :, :, 0]
img_synt = img_synt[0, :, :, 0]
self.truncateAndSave(img_real, None, img_synt, None, epoch, i, 'A', i, None)
def save_tmp_images(self, imgA, imgB):
try:
# Generate fake images
encodedImageA = self.encoderA.predict(imgA)
encodedImageB = self.encoderB.predict(imgB)
sharedA = self.encoderShared.predict(encodedImageA)
sharedB = self.encoderShared.predict(encodedImageB)
outSharedA = self.decoderShared.predict(sharedA)
outSharedB = self.decoderShared.predict(sharedB)
outAa = self.generatorA.predict(outSharedA)
outBa = self.generatorA.predict(outSharedB)
outAb = self.generatorB.predict(outSharedA)
outBb = self.generatorB.predict(outSharedB)
# Cycle
encodedImageC_A = self.encoderA.predict(outBa)
encodedImageC_B = self.encoderB.predict(outAb)
sharedC_A = self.encoderShared.predict(encodedImageC_A)
sharedC_B = self.encoderShared.predict(encodedImageC_B)
outSharedC_A = self.decoderShared.predict(sharedC_A)
outSharedC_B = self.decoderShared.predict(sharedC_B)
outC_Ba = self.generatorA.predict(outSharedC_B)
outC_Ab = self.generatorB.predict(outSharedC_A)
if self.channels == 1:
imgA = imgA[0, :, :, 0]
outAa0 = outAa[0, :, :, 0]
outAb0 = outAb[0, :, :, 0]
outC_Ab0 = outC_Ab[0, :, :, 0]
imgB = imgB[0, :, :, 0]
outBb0 = outBb[0, :, :, 0]
outBa0 = outBa[0, :, :, 0]
outC_Ba0 = outC_Ba[0, :, :, 0]
else:
imgA = imgA[0, :, :, :]
outAa0 = outAa[0, :, :, :]
outAb0 = outAb[0, :, :, :]
outC_Ab0 = outC_Ab[0, :, :, :]
imgB = imgB[0, :, :, :]
outBb0 = outBb[0, :, :, :]
outBa0 = outBa[0, :, :, :]
outC_Ba0 = outC_Ba[0, :, :, :]
real_images = np.vstack((imgA, imgB))
recon_images = np.vstack((outAa0, outBb0))
synthetic_images = np.vstack((outAb0, outBa0))
recon_cycle_images = np.vstack((outC_Ba0, outC_Ab0))
image1 = np.hstack((real_images, recon_images))
image2 = np.hstack((synthetic_images, recon_cycle_images))
image_tot = np.hstack((image1, image2))
image_tot_clip = np.clip(image_tot/2 + 0.5, 0, 1)
np.save('images/{}/{}.npy'.format(self.date_time, 'tmp'), image_tot)
imsave('images/{}/{}.png'.format(self.date_time, 'tmp'), image_tot_clip)
except: # Ignore if file is open
pass
def print_ETA(self, start_time, epoch, epoch_iterations, loop_index):
passed_time = time.time() - start_time
iterations_so_far = (epoch * epoch_iterations + loop_index) / self.batch_size + 1e-5
iterations_total = self.epochs * epoch_iterations / self.batch_size
iterations_left = iterations_total - iterations_so_far
eta = round(passed_time / iterations_so_far * iterations_left)
passed_time_string = str(datetime.timedelta(seconds=round(passed_time)))
eta_string = str(datetime.timedelta(seconds=eta))
print('Time passed', passed_time_string, ': ETA in', eta_string)
def writeLossDataToFile(self):
keys = sorted(self.history.keys())
with open('images/{}/loss_output.csv'.format(self.date_time), 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(keys)
writer.writerows(zip(*[self.history[key] for key in keys]))
def writeMetaDataToJSON(self):
directory = os.path.join('images', self.date_time)
if not os.path.exists(directory):
os.makedirs(directory)
# Save meta_data
data = {}
data['meta_data'] = []
data['meta_data'].append({
'Learning Rate': self.learning_rate,
'beta 1': self.beta_1,
'beta 2': self.beta_2,
'img height': self.img_height,
'img width': self.img_width,
'channels': self.channels,
'epochs': self.epochs,
'batch size': self.batch_size,
'number of S1 train examples': len(self.A_train),
'number of S2 train examples': len(self.B_train),
'number of S1 test examples': len(self.A_test),
'number of S2 test examples': len(self.B_test),
})
with open('images/{}/meta_data.json'.format(self.date_time), 'w') as outfile:
json.dump(data, outfile, sort_keys=True)
if __name__ == '__main__':
np.random.seed(10)
model = UNIT()
| StarcoderdataPython |
3371846 | <reponame>22014471/malonghui_Django<filename>back_end/mlh/apps/activity/views.py
from django.shortcuts import render
# Create your views here.
from rest_framework.generics import ListAPIView, RetrieveAPIView
from activity.models import Activity
from activity.serializers import ActivitySerializer, ActivityDetailSerializer
from mlh.utils.pagination import StandardResultsSetPagination
class ActivityView(ListAPIView):
"""
活动页
"""
queryset = Activity.objects.all().order_by('-start_time')
serializer_class = ActivitySerializer
pagination_class = StandardResultsSetPagination
class ActivityDetailView(RetrieveAPIView):
"""
活动详细页
"""
queryset = Activity.objects.all()
serializer_class = ActivityDetailSerializer
| StarcoderdataPython |
3347123 | # -*- coding:utf-8 -*-
# !/usr/bin/env python3
"""Tail
Usage:
tail <filename> [-n=<n>] [--encoding=<encoding>] [--no-more]
Options:
-n=<n> head number of the file [default: 5].
--encoding=<encoding> point the encoding of the file manually
--no-more don't use `more` to show
"""
import cchardet as chardet
import minghu6
from docopt import docopt
from minghu6.etc import fileecho
from color import color
from minghu6.text.more import more
def main(path, n, encoding=None, no_more=False):
try:
with open(path, 'rb') as f:
res_list = fileecho.tail(f, n)
res = b'\n'.join(res_list)
detect_result = chardet.detect(res)
if encoding is not None:
codec = encoding
elif detect_result['confidence'] > 0.7:
codec = detect_result['encoding']
else:
color.print_warn('Not Known encoding, may be %s.\n'
'Please point it explictly' % detect_result['encoding'])
return
if no_more:
color.print_info(res.decode(codec, errors='ignore'))
else:
more(res.decode(codec, errors='ignore'), print_color=True)
except FileNotFoundError:
color.print_err('%s not found' % path)
except PermissionError:
color.print_err('Permission denied: %s' % path)
def cli():
arguments = docopt(__doc__, version=minghu6.__version__)
n = int(arguments['-n'])
encoding = arguments['--encoding']
path = arguments['<filename>']
no_more = arguments['--no-more']
main(path, n, encoding=encoding, no_more=no_more)
# color.print_info(arguments)
if __name__ == '__main__':
cli()
| StarcoderdataPython |
1791434 | <gh_stars>0
#---- Class to hold information about a generic network device --------
class NetworkDevice():
def __init__(self, name, ip, user='cisco', pw='cisco'):
self.name = name
self.ip_address = ip
self.username = user
self.password = pw
self.os_type = 'unknown'
#---- Class to hold information about an IOS-XE network device --------
class NetworkDeviceIOS(NetworkDevice):
def __init__(self, name, ip, user='cisco', pw='cisco'):
NetworkDevice.__init__(self, name, ip, user, pw)
self.os_type = 'ios'
#---- Class to hold information about an IOS-XR network device --------
class NetworkDeviceXR(NetworkDevice):
def __init__(self, name, ip, user='cisco', pw='cisco'):
NetworkDevice.__init__(self, name, ip, user, pw)
self.os_type = 'ios-xr'
#---- Function to read device information from file -------------------
def read_device_info(devices_file):
devices_list = []
# Read in the devices from the file
file = open(devices_file,'r')
for line in file:
device_info = line.strip().split(',') # Get device info into list
# Create a device object with this data
if device_info[1] == 'ios':
device = NetworkDeviceIOS(device_info[0],device_info[2],
device_info[3],device_info[4])
elif device_info[1] == 'ios-xr':
device = NetworkDeviceXR(device_info[0],device_info[2],
device_info[3],device_info[4])
else:
continue # go to the next device in the file
devices_list.append(device) # add this device object to list
file.close() # Close the file since we are done with it
return devices_list
#---- Function to go through devices printing them to table -----------
def print_device_info(devices_list):
print ''
print 'Name OS-type IP address Username Password'
print '------ ------- -------------- -------- --------'
# Go through the list of devices, printing out values in nice format
for device in devices_list:
print '{0:8} {1:8} {2:16} {3:8} {4:8}'.format(device.name,
device.os_type,
device.ip_address,
device.username,
device.password)
print ''
#---- Main: read device info, then print ------------------------------
devices = read_device_info('devices')
print_device_info(devices)
| StarcoderdataPython |
1731822 | from django.shortcuts import render
from django.http import JsonResponse
from django.http import HttpResponseBadRequest
from django.db.models import Count
from HMBBF.models import home_ad
from HMBBF.models import home_news
from HMBBF.models import home_Keyword
from HMBBF.models import Guest
from HMBBF.models import Theme
from HMBBF.models import live as Live
from HWHMBBF.models_extension import getReturnJSON
from HWHMBBF.Request_Check import method_check_GET,method_check_POST,User_ID_Check
def testReturn(res,msg="line success "):
return JsonResponse({"msg":msg+" : "+str(res)})
#首页数据
@method_check_GET
def home_information(res):
lan = res.GET.get("lang")
return JsonResponse({"flag":"success","data":{"ad":getReturnJSON(home_ad.objects.all(),lang=lan),"news":getReturnJSON(home_news.objects.all(),lang=lan)}})
#首页页面得到热词
@method_check_GET
def home_seacher_keyword(res):
print("========")
lan = res.GET.get("lang")
return JsonResponse({"flag":"success","msg":"","data":getReturnJSON(home_Keyword.objects.all(),lang=lan)})
#搜索
@method_check_GET
def home_seacher(res):
return testReturn(res)
pass
# 大会议程有多少天
@method_check_GET
def assembly_days(res):
result = Theme.objects.values("date").annotate(Count("date"))
if result is None:
data = {"flag":"fail"}
else:
data = {"flag": "success","msg":""}
msg=[]
for one in result:
time = one["date"].strftime("%m-%d")
id = len(msg)+1
msg.append({"id":id,"date":time})
data["data"]=msg
return JsonResponse(data)
#得到数据
@method_check_GET
@User_ID_Check
def theme_day(res):
user_id = 1760
pars = res.GET if res.method == "GET" else res.POST
lang = pars.get("lang","zh")
day = int(pars.get("day","1"))
sel_result = Theme.objects.values("date").annotate(Count("date"))
time = sel_result[day-1]["date"].strftime("%Y-%m-%d")
resu = Theme.objects.filter(date=time)
data = []
for i in resu:
guests = i.theme_guest.all()
guests_json = getReturnJSON(guests,lang="zh")
one = i.json()
one["theme_guest"]=guests_json
data.append(one)
return JsonResponse({"status":"success","msg":"","data":data})
#直播数据
@method_check_GET
def live(res):
print(res.path)
pars = res.GET
day = pars.get("day",1)
print(pars)
#得到分组shuju
data = Live.objects.values("date").annotate(Count("date"))
time = data[int(day)-1]["date"].strftime("%Y-%m-%d")
data = Live.objects.filter(date=time)
data = [one.serializa() for one in data]
json = getReturnJSON(data)
return JsonResponse({"status":"success","msg":"","data":json})
@method_check_GET
def load_guests(res):
return JsonResponse({"status":"success","msg":"","data":getReturnJSON(Guest.objects.all())}) | StarcoderdataPython |
192025 | import pymongo
from bson import ObjectId, json_util
from datetime import date
import json
import os
class DBService:
def __init__(self):
self.db = pymongo.MongoClient(os.environ["MONGO"]).slrdb
###############
# COLLABS #
###############
def add_collab_to_project(self, project_id, user_sub):
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$addToSet": {"collabs": user_sub}})
self.db.users.update_one({"sub": user_sub}, {"$addToSet": {"projects": ObjectId(project_id)}})
return self.get_collabs(project_id)
def remove_collab_from_project(self, project_id, user_sub):
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$pull": {"collabs": user_sub}})
self.db.users.update_one({"sub": user_sub}, {"$pull": {"projects": ObjectId(project_id)}})
return self.get_collabs(project_id)
def get_collabs(self, project_id):
pipeline = [
{"$match": {
"_id": ObjectId(project_id)
}},
{"$unwind": "$collabs"},
{"$lookup": {
"from": "users",
"localField": "collabs",
"foreignField": "sub",
"as": "collabs"
}},
{"$project": {
"_id": 0, "collabs.sub": 1, "collabs.username": 1, "collabs.name": 1
}},
{"$unwind": "$collabs"},
{"$replaceRoot": {
"newRoot": "$collabs"
}}
]
return list(self.db.reviews.aggregate(pipeline))
###############
# COMMENTS #
###############
def add_comment_to_result(self, project_id, result_id, user_id, comment):
#TODO: Validate comment object
comment["_id"] = ObjectId()
comment["date"] = str(date.today())
comment["user"] = user_id
self.db.reviews.update_one({"_id": ObjectId(project_id), "results._id": ObjectId(result_id)}, {"$push": {"results.$.comments": comment}})
return self.get_comments_for_result(project_id, result_id)
def delete_comment_from_result(self, project_id, result_id, comment_id):
self.db.reviews.update_one({"_id": ObjectId(project_id), "results._id": ObjectId(result_id)}, {"$pull": {"results.$.comments": {"_id": ObjectId(comment_id)}}})
return self.get_comments_for_result(project_id, result_id)
def get_comments_for_result(self, project_id, result_id):
pipeline = [
{"$match": {
"_id": ObjectId(project_id)
}},
{"$unwind": "$results"},
{"$match": {
"results._id": ObjectId(result_id)
}},
{"$replaceRoot": {
"newRoot": "$results"
}},
{"$unwind": "$comments"},
{"$lookup": {
"from": "users",
"localField": "comments.user",
"foreignField": "sub",
"as": "comments.user"
}},
{"$unwind": "$comments.user"},
{"$project": {
"_id": 0, "comments": 1
}},
{
"$replaceRoot": {
"newRoot": "$comments"
}
},
{
"$project": {
"user._id": 0, "user.searches": 0, "user.projects": 0
}
}
]
return list(self.db.reviews.aggregate(pipeline))
#######################
# PROJECT LABELS #
#######################
def add_label_to_project(self, project_id, label):
label["_id"] = ObjectId()
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$push": {"labels": label}})
return label
def remove_label_from_project(self, project_id, label_id):
#Remove Label from project field
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$pull": {"labels": {"_id": ObjectId(label_id)}}})
#Remove label from all results inside the project
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$pull": {"results.$[].labels": ObjectId(label_id)}})
return self.get_labels_in_project(project_id)
def update_label_in_project(self, project_id, label_id, label):
#TODO: Validate label object
label["_id"] = label_id
self.db.reviews.update_one({"_id": ObjectId(project_id), "labels._id": ObjectId(label_id)}, {"$set": {"labels.$": label}})
return label
def get_labels_in_project(self, project_id):
return self.db.reviews.find_one({"_id": ObjectId(project_id)})["labels"]
#######################
# RESULT LABELS #
#######################
def add_label_to_result(self, project_id, result_id, label_id):
self.db.reviews.update_one({"_id": ObjectId(project_id), "results._id": ObjectId(result_id)}, {"$addToSet": {"results.$.labels": ObjectId(label_id)}})
return self.get_result_in_project(project_id, result_id)
def remove_label_from_result(self, project_id, result_id, label_id):
self.db.reviews.update_one({"_id": ObjectId(project_id), "results._id": ObjectId(result_id)},
{"$pull": {"results.$.labels": ObjectId(label_id)}})
return self.get_result_in_project(project_id, result_id)
################
# RESULTS #
################
def add_result_to_project(self, project_id, result_id):
result = {
"_id": ObjectId(result_id),
"labels": [],
"comments": []
}
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$push": {"results": result}})
return {
"message": "Added successfully"
}
def remove_result_from_project(self, project_id, result_id):
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$pull": {"results": {"_id": ObjectId(result_id)}}})
return {
"message": "Removed successfully"
}
def get_result_in_project(self, project_id, result_id):
pipeline = [
{"$match": {
"_id": ObjectId(project_id)
}},
{"$unwind": "$results"},
{"$match": {
"results._id": ObjectId(result_id)
}},
{"$lookup": {
"from": "results",
"localField": "results._id",
"foreignField": "_id",
"as": "results.result"
}},
{"$unwind": {
"path": "$results.labels",
"preserveNullAndEmptyArrays": True
}},
{"$set": {
"results.labels": {
"$filter": {
"input": "$labels",
"as": "labels",
"cond": {
"$eq": [
"$$labels._id",
"$results.labels"
]
}
}
}
}},
{"$unwind": {
"path": "$results.labels",
"preserveNullAndEmptyArrays": True
}},
{
"$replaceRoot": {
"newRoot": "$results"
}
},
{"$group": {
"_id": "$_id",
"labels": {
"$push": "$labels"
},
"comments": {
"$first": "$comments"
},
"result": {
"$first": "$result"
}
}},
{"$unwind": {
"path": "$comments",
"preserveNullAndEmptyArrays": True
}},
{"$lookup": {
"from": "users",
"localField": "comments.user",
"foreignField": "sub",
"as": "comments.user"
}},
{"$unwind": {
"path": "$comments.user",
"preserveNullAndEmptyArrays": True
}},
{"$project": {
"comments.user.searches": 0, "comments.user.projects": 0, "comments.user._id": 0
}},
{"$group": {
"_id": "$_id",
"labels": {
"$first": "$labels"
},
"comments": {
"$push": {
"$cond": [
{"$gt": ["$comments", {}]},
"$comments",
"$$REMOVE"
]
}
},
"result": {
"$first": "$result"
}
}},
{"$unwind": "$result"},
{"$project": {
"_id": 0
}}
]
return list(self.db.reviews.aggregate(pipeline))[0]
def get_all_results_in_project(self, project_id, filter = None, sort_order = None):
filters = []
if filter is not None:
filters = list(map(lambda x: ObjectId(x), filter.split(" ")))
print(filters)
if sort_order is not None:
sort = sort_order.split("_")
if sort[1] == "asc":
sort[1] = 1
else:
sort[1] = -1
else:
sort = ["date", -1]
pipeline = [
{"$match": {
"_id": ObjectId(project_id)
}},
{"$unwind": "$results"},
{"$lookup": {
"from": "results",
"localField": "results._id",
"foreignField": "_id",
"as": "results.result"
}},
{"$unwind": {
"path": "$results.labels",
"preserveNullAndEmptyArrays": True
}},
{"$set": {
"results.labels": {
"$filter": {
"input": "$labels",
"as": "labels",
"cond": {
"$eq": [
"$$labels._id",
"$results.labels"
]
}
}
}
}},
{"$unwind": {
"path": "$results.labels",
"preserveNullAndEmptyArrays": True
}},
{
"$replaceRoot": {
"newRoot": "$results"
}
},
{"$group": {
"_id": "$_id",
"labels": {
"$push": {
"$cond": [
{"$gt": ["$labels", None]},
"$labels",
"$$REMOVE"
]
}
},
"comments": {
"$first": "$comments"
},
"result": {
"$first": "$result"
}
}},
{"$unwind": {
"path": "$comments",
"preserveNullAndEmptyArrays": True
}},
{"$lookup": {
"from": "users",
"localField": "comments.user",
"foreignField": "sub",
"as": "comments.user"
}},
{"$unwind": {
"path": "$comments.user",
"preserveNullAndEmptyArrays": True
}},
{"$project": {
"comments.user.searches": 0, "comments.user.projects": 0, "comments.user._id": 0
}},
{"$group": {
"_id": "$_id",
"labels": {
"$first": "$labels"
},
"comments": {
"$push": {
"$cond": [
{"$gt": ["$comments", {}]},
"$comments",
"$$REMOVE"
]
}
},
"result": {
"$first": "$result"
}
}},
{"$project": {
"_id": 0
}},
{"$unwind": "$result"},
{"$match": {
"$expr": {
"$cond": [
{"$gt": [filter, None]},
{"$setIsSubset": [filters, "$labels._id"]},
True
]
}
}},
{"$sort": {
"result." + sort[0]: sort[1]
}}
]
print(type(filters))
return list(self.db.reviews.aggregate(pipeline))
################
# SEARCHES #
################
def add_search_to_project(self, project_id, search_id, add_results = False):
search_object = self.db.searches.find_one({"_id": ObjectId(search_id)})
if add_results:
results = search_object["results"]
else:
results = []
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$addToSet": {"searches": ObjectId(search_id),
"results": {"$each": results},
"terms": {"$each": search_object["terms"]}}})
return {
"message": "Added successfully"
}
def remove_search_from_project(self, project_id, search_id):
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$pull": {"searches": ObjectId(search_id)}})
return {
"message": "Removed successfully"
}
################
# META #
################
def change_meta_info(self, project_id, meta_object):
#TODO: Validate meta object
#{
# "name": "<name>",
# "description": "<description>"
#}
self.db.reviews.update_one({"_id": ObjectId(project_id)}, {"$set": {"name": meta_object["name"], "description": meta_object["description"]}})
return meta_object
def get_project(self, project_id):
pipeline = [
{"$match": {
"_id": ObjectId(project_id)
}},
{"$set": {
"results": {
"$size": "$results"
}
}},
{
"$lookup": {
"from": "searches",
"localField": "searches",
"foreignField": "_id",
"as": "searches"
}
},
{"$unwind": "$searches"},
{"$set": {
"searches.terms": {
"$size": "$searches.terms"
},
"searches.results": {
"$size": "$searches.results"
}
}},
{"$group": {
"_id": "$_id",
"doc": {
"$first": "$$ROOT"
},
"searches": {
"$push": "$searches"
}
}},
{"$replaceRoot":
{"newRoot": {
"$mergeObjects": [
"$doc",
{"searches": "$searches"}
]
}}
},
{
"$lookup": {
"from": "terms",
"localField": "terms",
"foreignField": "_id",
"as": "terms"
}
},
{
"$lookup": {
"from": "users",
"localField": "collabs",
"foreignField": "sub",
"as": "collabs"
}
},
{"$project": {
"collabs._id": 0,
"collabs.temp_searches": 0,
"collabs.searches": 0,
"collabs.projects": 0
}},
{"$addFields": {
"_links": {
"results": "/project/" + project_id + "/results",
"collabs": "/project/" + project_id + "/collabs",
"labels": "/project/" + project_id + "/labels",
"meta": "/project/" + project_id + "/meta"
}
}}
]
return list(self.db.reviews.aggregate(pipeline))[0]
################
# USER #
################
def find_user(self, username = None, name = None):
# TODO: Validate user request object
if username is None and name is None:
return None
matching_object = {}
if username is not None:
matching_object["username"] = username
if name is not None:
matching_object["name"] = name
pipeline = [
{"$match": matching_object},
{"$project": {
"_id": 0, "temp_searches": 0, "projects": 0, "searches": 0
}}
]
return list(self.db.users.aggregate(pipeline))
if __name__ == "__main__":
db = DBService()
# pipeline = [
# {"$match": {
# "_id": ObjectId("5f0db46c377c3ce2c35cd67e")
# }},
# {"$unwind": "$results"},
# {"$match": {
# "results._id": ObjectId("5ef003e7b3a3406cf571d6b2")
# }},
# {"$lookup": {
# "from": "results",
# "localField": "results._id",
# "foreignField": "_id",
# "as": "results.result"
# }},
# {"$unwind": {
# "path": "$results.labels",
# "preserveNullAndEmptyArrays": True
# }},
# {"$set": {
# "results.labels": {
# "$filter": {
# "input": "$labels",
# "as": "labels",
# "cond": {
# "$eq": [
# "$$labels._id",
# "$results.labels"
# ]
# }
# }
# }
# }},
# {"$unwind": {
# "path": "$results.labels",
# "preserveNullAndEmptyArrays": True
# }},
# {
# "$replaceRoot": {
# "newRoot": "$results"
# }
# },
# {"$group": {
# "_id": "$_id",
# "labels": {
# "$push": "$labels"
# },
# "comments": {
# "$first": "$comments"
# },
# "result": {
# "$first": "$result"
# }
# }},
# {"$unwind": {
# "path": "$comments",
# "preserveNullAndEmptyArrays": True
# }},
# {"$lookup": {
# "from": "users",
# "localField": "comments.user",
# "foreignField": "sub",
# "as": "comments.user"
# }},
# {"$unwind": {
# "path": "$comments.user",
# "preserveNullAndEmptyArrays": True
# }},
# {"$project": {
# "comments.user.searches": 0, "comments.user.projects": 0, "comments.user._id": 0
# }},
# {"$group": {
# "_id": "$_id",
# "labels": {
# "$first": "$labels"
# },
# "comments": {
# "$push": {
# "$cond": [
# {"$gt": ["$comments", {}]},
# "$comments",
# "$$REMOVE"
# ]
# }
# },
# "result": {
# "$first": "$result"
# }
# }},
# {"$unwind": "$result"},
# {"$project": {
# "_id": 0
# }}
# ]
#
# print(list(db.db.reviews.aggregate(pipeline)))
#print(db.get_all_results_in_project("5f1762e130b3254a781fe48e"))
print(db.find_user("<EMAIL>"))
#print(db.add_label_to_project("5f0db46c377c3ce2c35cd67e", {"name": "Test Label", "color": "#E91E63"}))
#print(db.add_label_to_result("5f0db46c377c3ce2c35cd67e", "5ef003e7b3a3406cf571d6b0", "5f10911513c0671e78219027"))
#print(db.add_collab_to_project("5f04f6605cdf30a746ef211d", "ff0c664a-9048-4680-8560-cb1e903274e6"))
#print(db.get_collabs("5f04f6605cdf30a746ef211d"))
# print(db.get_all_results_in_project("5f04f6605cdf30a746ef211d"))
# print(db.get_labels_in_project("5f04f6605cdf30a746ef211d"))
#print(db.get_comments_for_result("5f04f6605cdf30a746ef211d", "5ee9568a61e11a8c28261b62"))
#print(db.get_result_in_project("5f04f6605cdf30a746ef211d", "5ee9568a61e11a8c28261b5e"))
| StarcoderdataPython |
140084 | <gh_stars>1-10
"""
doa.py
Direction of arrival (DOA) estimation.
Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
Written by <NAME> <<EMAIL>>
"""
import os
import math
import numpy as np
import scipy
_TOLERANCE = 1e-13
def load_pts_on_sphere(name='p4000'):
"""Load points on a unit sphere
Args:
name : should always be 'p4000'
Returns:
pts : array of points on a unit sphere
"""
this_dir, this_filename = os.path.split(__file__)
data_path = os.path.join(this_dir, 'data', '%s.npy' % name)
return np.load(data_path)
def load_pts_horizontal(npts=360):
"""Load points evenly distributed on the unit circle on x-y plane
Args:
npts : (default 360) number of points
Returns:
pts : array of points on a unit circle
"""
aindex = np.arange(npts) * 2 * np.pi / npts
return np.array([np.cos(aindex), np.sin(aindex), np.zeros(npts)]).T
def neighbor_list(pts, dist, scale_z=1.0):
"""List of neighbors (using angular distance as metic)
Args:
pts : array of points on a unit sphere
dist : distance (rad) threshold
scale_z : (default 1.0) scale of z-axis,
if scale_z is smaller than 1, more neighbors will be
along elevation
Returns:
nlist : list of list of neighbor indices
"""
# pairwise inner product
if scale_z != 1.0:
pts = np.copy(pts)
pts[:,2] *= scale_z
pts /= np.linalg.norm(pts, axis=1, keepdims=True)
pip = np.einsum('ik,jk->ij', pts, pts)
# adjacency matrix
amat = pip >= math.cos(dist)
for i in range(len(pts)):
amat[i,i] = False
# convert to list
return [list(np.nonzero(n)[0]) for n in amat]
_norm = np.linalg.norm
def angular_distance(a, b):
a = np.asarray(a)
b = np.asarray(b)
assert a.ndim == 1
assert a.shape == b.shape
denom = (_norm(a) * _norm(b))
if denom < 1e-16:
return math.pi
sim = np.dot(a, b) / denom
if sim > 1.0:
return 0.0
elif sim < -1.0:
return math.pi
else:
return math.acos(sim)
def azimuth_distance(a, b):
return angular_distance(a[:2], b[:2])
def vec2ae(v):
"""Compute the azimuth and elevation of vectors
Args:
v : vector or list of vectors
Returns:
if one vector given:
azimuth : angle in radian in the x-y plane
elevation : angle in radian from x-y plane to vector
else (list of vectors):
list of (azimuth, elevation)
"""
v = np.asarray(v)
if v.ndim == 1:
x, y, z = v
else:
x = v[:,0]
y = v[:,1]
z = v[:,2]
n = np.sqrt(x ** 2 + y ** 2)
return np.asarray([np.arctan2(y, x), np.arctan2(z, n)]).T
def vec2xsyma(v):
"""Compute the angle between the vector and the x-axis
Args:
v : vector or list of vectors
Returns:
if one vector given:
angle (in radian)
else (list of vectors):
list of angles
"""
v = np.asarray(v)
if v.ndim == 1:
x, y, z = v
else:
x = v[:,0]
y = v[:,1]
z = v[:,2]
n = np.sqrt(y ** 2 + z ** 2)
return np.arctan2(n, x)
def vec2ysyma(v):
"""Compute the angle between the vector and the y-axis - pi/2
Args:
v : vector or list of vectors
Returns:
if one vector given:
angle (in radian)
else (list of vectors):
list of angles
"""
v = np.asarray(v)
if v.ndim == 1:
x, y, z = v
else:
x = v[:,0]
y = v[:,1]
z = v[:,2]
n = np.sqrt(x ** 2 + z ** 2)
return np.arctan2(y, n)
def _u_sqr_minus_1(lam, m, tau):
n, d = m.shape
try:
u = np.linalg.solve(m.T * m - lam * np.eye(d), m.T * tau)
except np.linalg.LinAlgError as e:
print(e)
# TODO handle singular case
return -1
return np.asscalar(u.T * u - 1)
def _error_func(u, m, tau):
if not isinstance(u, np.matrix):
u = np.asmatrix(u).T
d = tau - m * u
return np.asscalar(d.T * d)
def _constraint_func(u):
if not isinstance(u, np.matrix):
u = np.asmatrix(u).T
return 1.0 - np.asscalar(u.T * u)
def doa_least_squares(pw_tdoa, m_pos, c=340.29):
"""DOA estimation by minimizing TDOA error (L2 norm).
The objective is formulated with the far-field assumption:
minimize : f(u) = |tau - m u|^2
subject to : g(u) = |u|^2 - 1 = 0
where u is the direction of arrival, tau is the vector of estimated
difference of path (TDOA times sound speed), and m is the difference
of microphone positions.
Args:
pw_tdoa : pairwise TDOA estimate, result from apkit.pairwise_tdoa.
m_pos : list of vector position in 3D space in meters.
c : (default 340.29 m/s) speed of sound.
Returns:
list of optimal solutions of u
"""
pairs = list(pw_tdoa.keys())
tau = np.matrix([pw_tdoa[p] * c for p in pairs]).T
m = np.matrix([m_pos[i] - m_pos[j] for i, j in pairs])
# dimension
n, d = m.shape
# check rank of m by computing svd
u, s, vh = np.linalg.svd(m)
rank = int((s > _TOLERANCE).sum())
if rank == d:
assert False # TODO pepper has coplanar microphones
else:
assert rank == d - 1 # not able to compute rank deficiency of
# more than one
sinv = np.diag(np.append(1. / s[:-1], 0))
sinv = np.append(sinv, np.zeros((n-d, n-d)), axis=1)
# psusdo inverse times tau, get min-norm solution of least-square
# (without constraints)
# the solution x must be orthognal to null space of m
x = vh.H * sinv * u.H * tau
if x.T * x <= 1.0:
# in case of norm of x less than 1, add y in the null space to
# make norm to one
y = np.asscalar(1 - x.T * x) ** .5
# two possible solutions
u1 = x + vh[-1].H * y
u2 = x - vh[-1].H * y
return [u1.A1, u2.A1]
else:
# otherwise the solution must be somewhere f is not stationary,
# while f - lambda * g is stationary, therefore
# solve lambda (lagrange multiplier)
lam = scipy.optimize.fsolve(_u_sqr_minus_1, 1e-5, (m, tau))
u = np.linalg.solve(m.T * m - lam * np.eye(d), m.T * tau)
# TODO: there are two solutions, one at mininum of f and
# the other at maximum. However only one is solved here.
# hack here, normalize unconstraint solution, and compare
u2 = x / np.linalg.norm(x)
if _error_func(u2, m, tau) < _error_func(u, m, tau):
u = u2
''' tried with directly optimize objective numerically,
but doesn't work
res = scipy.optimize.minimize(_error_func, u2, (m, tau),
constraints={'type':'ineq', 'fun':_constraint_func})
u3 = np.asmatrix(res.x).T
print u3
print np.linalg.norm(tau - m * u3)
'''
return [u.A1]
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
| StarcoderdataPython |
76507 | <reponame>dcompane/controlm_py
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_py
from controlm_py.api.config_api import ConfigApi # noqa: E501
from controlm_py.rest import ApiException
class TestConfigApi(unittest.TestCase):
"""ConfigApi unit test stubs"""
def setUp(self):
self.api = ConfigApi() # noqa: E501
def tearDown(self):
pass
def test_add_agent(self):
"""Test case for add_agent
add agent to Server # noqa: E501
"""
pass
def test_add_archive_rule(self):
"""Test case for add_archive_rule
Add Workload Archiving rule # noqa: E501
"""
pass
def test_add_external_user(self):
"""Test case for add_external_user
Add and external user # noqa: E501
"""
pass
def test_add_external_user_or_user_group_to_mft_folder(self):
"""Test case for add_external_user_or_user_group_to_mft_folder
Add external user or user groups to virtual folder external users list. # noqa: E501
"""
pass
def test_add_gateway(self):
"""Test case for add_gateway
add gateway. # noqa: E501
"""
pass
def test_add_host_to_hostgroup(self):
"""Test case for add_host_to_hostgroup
add agent to hostgroup # noqa: E501
"""
pass
def test_add_hub_to_cluster(self):
"""Test case for add_hub_to_cluster
add hub to cluster. # noqa: E501
"""
pass
def test_add_mft_folder(self):
"""Test case for add_mft_folder
Add virtual folder # noqa: E501
"""
pass
def test_add_mft_user_group(self):
"""Test case for add_mft_user_group
Add user group. # noqa: E501
"""
pass
def test_add_pgp_template(self):
"""Test case for add_pgp_template
Add PGP Template # noqa: E501
"""
pass
def test_add_remote_host(self):
"""Test case for add_remote_host
add remote host to Server # noqa: E501
"""
pass
def test_add_role(self):
"""Test case for add_role
Add Authorization Role # noqa: E501
"""
pass
def test_add_role_to_ldap_group(self):
"""Test case for add_role_to_ldap_group
Add a role to LDAP group # noqa: E501
"""
pass
def test_add_role_to_user(self):
"""Test case for add_role_to_user
Add a role to user # noqa: E501
"""
pass
def test_add_secret(self):
"""Test case for add_secret
Add a new secret # noqa: E501
"""
pass
def test_add_server(self):
"""Test case for add_server
add server to the system # noqa: E501
"""
pass
def test_add_user(self):
"""Test case for add_user
Add user # noqa: E501
"""
pass
def test_add_zos_template(self):
"""Test case for add_zos_template
Add z/OS Template # noqa: E501
"""
pass
def test_authorize_mft_ssh_cluster(self):
"""Test case for authorize_mft_ssh_cluster
Authorize SSH Cluster # noqa: E501
"""
pass
def test_authorize_mft_ssh_host(self):
"""Test case for authorize_mft_ssh_host
Authorize SSH Host # noqa: E501
"""
pass
def test_authorize_ssh_known_remotehost(self):
"""Test case for authorize_ssh_known_remotehost
Authorize # noqa: E501
"""
pass
def test_change_user_password(self):
"""Test case for change_user_password
Change user password # noqa: E501
"""
pass
def test_create_agent_certificate_signing_request(self):
"""Test case for create_agent_certificate_signing_request
Create certificate signing request (CSR). # noqa: E501
"""
pass
def test_create_run_as_user(self):
"""Test case for create_run_as_user
Add a new Run-as user # noqa: E501
"""
pass
def test_delete_agent(self):
"""Test case for delete_agent
delete an agent from Server # noqa: E501
"""
pass
def test_delete_archive_rule(self):
"""Test case for delete_archive_rule
Delete Workload Archiving rule # noqa: E501
"""
pass
def test_delete_authorization_role(self):
"""Test case for delete_authorization_role
Delete Authorization Role # noqa: E501
"""
pass
def test_delete_external_user(self):
"""Test case for delete_external_user
Delete an external user # noqa: E501
"""
pass
def test_delete_external_user_or_user_group_from_mft_folder(self):
"""Test case for delete_external_user_or_user_group_from_mft_folder
Remove an external user or user group from an existing virtual folder in MFT. # noqa: E501
"""
pass
def test_delete_host_from_group(self):
"""Test case for delete_host_from_group
delete an agent from a hostgroup # noqa: E501
"""
pass
def test_delete_host_group(self):
"""Test case for delete_host_group
delete host group # noqa: E501
"""
pass
def test_delete_mft_folder(self):
"""Test case for delete_mft_folder
Delete a virtual folder. # noqa: E501
"""
pass
def test_delete_mft_user_group(self):
"""Test case for delete_mft_user_group
Delete user group. # noqa: E501
"""
pass
def test_delete_pgp_template(self):
"""Test case for delete_pgp_template
Delete PGP Template # noqa: E501
"""
pass
def test_delete_remote_host(self):
"""Test case for delete_remote_host
delete a remote host from Server # noqa: E501
"""
pass
def test_delete_role_from_ldap_group(self):
"""Test case for delete_role_from_ldap_group
Delete a role from LDAP group # noqa: E501
"""
pass
def test_delete_run_as_user(self):
"""Test case for delete_run_as_user
delete Run-as user # noqa: E501
"""
pass
def test_delete_secret(self):
"""Test case for delete_secret
Delete an existing secret # noqa: E501
"""
pass
def test_delete_user(self):
"""Test case for delete_user
Delete user # noqa: E501
"""
pass
def test_delete_zos_template(self):
"""Test case for delete_zos_template
Delete z/OS Template # noqa: E501
"""
pass
def test_deploy_agent_certificate(self):
"""Test case for deploy_agent_certificate
Deploy certificate (CRT). # noqa: E501
"""
pass
def test_disable_agent(self):
"""Test case for disable_agent
disable agent from the Server # noqa: E501
"""
pass
def test_enable_agent(self):
"""Test case for enable_agent
enable agent from the Server # noqa: E501
"""
pass
def test_failover(self):
"""Test case for failover
Perform Manual Failover on a specified Server # noqa: E501
"""
pass
def test_generate_mft_rsa_ssh_key(self):
"""Test case for generate_mft_rsa_ssh_key
Generate RSA SSH Key # noqa: E501
"""
pass
def test_get_agent_certificate_expiration_date(self):
"""Test case for get_agent_certificate_expiration_date
Get certificate expiration date. # noqa: E501
"""
pass
def test_get_agent_parameters(self):
"""Test case for get_agent_parameters
get agent parameters # noqa: E501
"""
pass
def test_get_agents(self):
"""Test case for get_agents
get Server agents # noqa: E501
"""
pass
def test_get_all_archive_rules(self):
"""Test case for get_all_archive_rules
Get all Workload Archiving rules # noqa: E501
"""
pass
def test_get_all_authorization_roles(self):
"""Test case for get_all_authorization_roles
Get Authorization Roles # noqa: E501
"""
pass
def test_get_all_organization_groups(self):
"""Test case for get_all_organization_groups
Get All organization groups # noqa: E501
"""
pass
def test_get_all_roles_associated_with_organization_group(self):
"""Test case for get_all_roles_associated_with_organization_group
Get Authorization Roles associated with an organization group # noqa: E501
"""
pass
def test_get_all_users(self):
"""Test case for get_all_users
Get users # noqa: E501
"""
pass
def test_get_archive_statistics(self):
"""Test case for get_archive_statistics
Get Workload Archiving statistics # noqa: E501
"""
pass
def test_get_external_user_authorized_folders(self):
"""Test case for get_external_user_authorized_folders
Get MFT external user authorized folders # noqa: E501
"""
pass
def test_get_external_users(self):
"""Test case for get_external_users
Get MFT external users that match the search criteria. # noqa: E501
"""
pass
def test_get_fts_settings(self):
"""Test case for get_fts_settings
Get File Transfer Server (FTS) configuration data. # noqa: E501
"""
pass
def test_get_hostgroups(self):
"""Test case for get_hostgroups
get Server hostgroups # noqa: E501
"""
pass
def test_get_hosts_in_group(self):
"""Test case for get_hosts_in_group
get hostgroup agents # noqa: E501
"""
pass
def test_get_hub_status_details(self):
"""Test case for get_hub_status_details
Get hub status. # noqa: E501
"""
pass
def test_get_mft_configuration(self):
"""Test case for get_mft_configuration
Get MFT Configuration # noqa: E501
"""
pass
def test_get_mft_folders(self):
"""Test case for get_mft_folders
Get MFT virtual folders that match the search criteria. # noqa: E501
"""
pass
def test_get_mft_gateways(self):
"""Test case for get_mft_gateways
Get MFT gateways # noqa: E501
"""
pass
def test_get_mft_user_groups(self):
"""Test case for get_mft_user_groups
Get all user groups that match the search criteria. # noqa: E501
"""
pass
def test_get_pgp_templates(self):
"""Test case for get_pgp_templates
Get PGP Templates # noqa: E501
"""
pass
def test_get_remote_host_properties(self):
"""Test case for get_remote_host_properties
get a remote host configuration from Server # noqa: E501
"""
pass
def test_get_remote_hosts(self):
"""Test case for get_remote_hosts
get Server remote hosts # noqa: E501
"""
pass
def test_get_role(self):
"""Test case for get_role
Get Authorization Role # noqa: E501
"""
pass
def test_get_role_associates(self):
"""Test case for get_role_associates
Get all authorization entities associated with role # noqa: E501
"""
pass
def test_get_run_as_user(self):
"""Test case for get_run_as_user
Get Run-as user # noqa: E501
"""
pass
def test_get_run_as_users_list(self):
"""Test case for get_run_as_users_list
Get Run-as user list that match the requested search criteria. # noqa: E501
"""
pass
def test_get_server_parameters(self):
"""Test case for get_server_parameters
get Server parameters # noqa: E501
"""
pass
def test_get_servers(self):
"""Test case for get_servers
get all the Servers name and hostname in the system # noqa: E501
"""
pass
def test_get_user(self):
"""Test case for get_user
Get user # noqa: E501
"""
pass
def test_get_user_effective_rights(self):
"""Test case for get_user_effective_rights
Get user real effective authorizations # noqa: E501
"""
pass
def test_get_workflow_insights_status(self):
"""Test case for get_workflow_insights_status
get Workflow Insights status # noqa: E501
"""
pass
def test_get_zos_templates(self):
"""Test case for get_zos_templates
Get z/OS Templates # noqa: E501
"""
pass
def test_list_secrets(self):
"""Test case for list_secrets
Get list of secret names # noqa: E501
"""
pass
def test_ping_agent(self):
"""Test case for ping_agent
ping to the agent in the Server # noqa: E501
"""
pass
def test_recycle_item(self):
"""Test case for recycle_item
recycle item # noqa: E501
"""
pass
def test_remove_controlm_server(self):
"""Test case for remove_controlm_server
Delete Server # noqa: E501
"""
pass
def test_remove_gateway(self):
"""Test case for remove_gateway
remove gateway. # noqa: E501
"""
pass
def test_remove_hub_from_cluster(self):
"""Test case for remove_hub_from_cluster
remove hub from cluster. # noqa: E501
"""
pass
def test_remove_role_from_user(self):
"""Test case for remove_role_from_user
Remove a role from a user # noqa: E501
"""
pass
def test_send_archive_cleanup_request(self):
"""Test case for send_archive_cleanup_request
Deletes data (jobs including outputs and logs) from the Workload Archiving database. # noqa: E501
"""
pass
def test_set_agent_parameter(self):
"""Test case for set_agent_parameter
set agent parameter # noqa: E501
"""
pass
def test_set_system_param(self):
"""Test case for set_system_param
set value of a an em system parameter # noqa: E501
"""
pass
def test_setasprimary(self):
"""Test case for setasprimary
Set secondary server as Primary on a specified Server # noqa: E501
"""
pass
def test_test_run_as_user(self):
"""Test case for test_run_as_user
Test existed Run-as user # noqa: E501
"""
pass
def test_update_archive_rule(self):
"""Test case for update_archive_rule
Edit Workload Archiving rule # noqa: E501
"""
pass
def test_update_external_user(self):
"""Test case for update_external_user
Update an external user # noqa: E501
"""
pass
def test_update_fts_settings(self):
"""Test case for update_fts_settings
Update File Transfer Server (FTS) configuration data. # noqa: E501
"""
pass
def test_update_hosts_in_hostgroup(self):
"""Test case for update_hosts_in_hostgroup
update agents in hostgroup. # noqa: E501
"""
pass
def test_update_mft_configuration(self):
"""Test case for update_mft_configuration
Update MFT Configuration # noqa: E501
"""
pass
def test_update_mft_folder(self):
"""Test case for update_mft_folder
Update an existing virtual folder in MFT. # noqa: E501
"""
pass
def test_update_mft_user_group(self):
"""Test case for update_mft_user_group
Update user group. # noqa: E501
"""
pass
def test_update_pgp_template(self):
"""Test case for update_pgp_template
Update PGP Template # noqa: E501
"""
pass
def test_update_role(self):
"""Test case for update_role
Update Authorization Role # noqa: E501
"""
pass
def test_update_run_as_user(self):
"""Test case for update_run_as_user
Update Run-as user # noqa: E501
"""
pass
def test_update_secret(self):
"""Test case for update_secret
Update an existing secret # noqa: E501
"""
pass
def test_update_user(self):
"""Test case for update_user
Update user # noqa: E501
"""
pass
def test_update_zos_template(self):
"""Test case for update_zos_template
Update z/OS Template # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1634793 | <filename>qmctorch/sampler/metropolis.py
from tqdm import tqdm
import torch
from torch.distributions import MultivariateNormal
from time import time
from typing import Callable, Union, Dict
from .sampler_base import SamplerBase
from .. import log
class Metropolis(SamplerBase):
def __init__(self,
nwalkers: int = 100,
nstep: int = 1000,
step_size: float = 0.2,
ntherm: int = -1,
ndecor: int = 1,
nelec: int = 1,
ndim: int = 3,
init: Dict = {'min': -5, 'max': 5},
move: Dict = {'type': 'all-elec', 'proba': 'normal'},
cuda: bool = False):
"""Metropolis Hasting generator
Args:
nwalkers (int, optional): Number of walkers. Defaults to 100.
nstep (int, optional): Number of steps. Defaults to 1000.
step_size (int, optional): length of the step. Defaults to 0.2.
nelec (int, optional): total number of electrons. Defaults to 1.
ntherm (int, optional): number of mc step to thermalize. Defaults to -1, i.e. keep ponly last position
ndecor (int, optional): number of mc step for decorelation. Defauts to 1.
ndim (int, optional): total number of dimension. Defaults to 3.
init (dict, optional): method to init the positions of the walkers. See Molecule.domain()
move (dict, optional): method to move the electrons. default('all-elec','normal') \n
'type':
'one-elec': move a single electron per iteration \n
'all-elec': move all electrons at the same time \n
'all-elec-iter': move all electrons by iterating through single elec moves \n
'proba' :
'uniform': uniform ina cube \n
'normal': gussian in a sphere \n
cuda (bool, optional): turn CUDA ON/OFF. Defaults to False.
Examples::
>>> mol = Molecule('h2.xyz')
>>> wf = SlaterJastrow(mol)
>>> sampler = Metropolis(nwalkers=100, nelec=wf.nelec)
>>> pos = sampler(wf.pdf)
"""
SamplerBase.__init__(self, nwalkers, nstep,
step_size, ntherm, ndecor,
nelec, ndim, init, cuda)
self.configure_move(move)
self.log_data()
def log_data(self):
"""log data about the sampler."""
log.info(' Move type : {0}', self.movedict['type'])
log.info(
' Move proba : {0}', self.movedict['proba'])
def __call__(self, pdf: Callable, pos: Union[None, torch.Tensor] = None,
with_tqdm: bool = True) -> torch.Tensor:
"""Generate a series of point using MC sampling
Args:
pdf (callable): probability distribution function to be sampled
pos (torch.tensor, optional): position to start with.
Defaults to None.
with_tqdm (bool, optional): use tqdm progress bar. Defaults to True.
Returns:
torch.tensor: positions of the walkers
"""
_type_ = torch.get_default_dtype()
if _type_ == torch.float32:
eps = 1E-7
elif _type_ == torch.float64:
eps = 1E-16
if self.ntherm >= self.nstep:
raise ValueError('Thermalisation longer than trajectory')
with torch.no_grad():
if self.ntherm < 0:
self.ntherm = self.nstep + self.ntherm
self.walkers.initialize(pos=pos)
fx = pdf(self.walkers.pos)
fx[fx == 0] = eps
pos, rate, idecor = [], 0, 0
rng = tqdm(range(self.nstep),
desc='INFO:QMCTorch| Sampling',
disable=not with_tqdm)
tstart = time()
for istep in rng:
for id_elec in self.fixed_id_elec_list:
# new positions
Xn = self.move(pdf, id_elec)
# new function
fxn = pdf(Xn)
fxn[fxn == 0.] = eps
df = fxn / fx
# accept the moves
index = self._accept(df)
# acceptance rate
rate += index.byte().sum().float().to('cpu') / \
(self.nwalkers * self._move_per_iter)
# update position/function value
self.walkers.pos[index, :] = Xn[index, :]
fx[index] = fxn[index]
fx[fx == 0] = eps
if (istep >= self.ntherm):
if (idecor % self.ndecor == 0):
pos.append(self.walkers.pos.to('cpu').clone())
idecor += 1
if with_tqdm:
log.info(
" Acceptance rate : {:1.2f} %", (rate / self.nstep * 100))
log.info(
" Timing statistics : {:1.2f} steps/sec.", self.nstep/(time()-tstart))
log.info(
" Total Time : {:1.2f} sec.", (time()-tstart))
return torch.cat(pos).requires_grad_()
def configure_move(self, move: Dict):
"""Configure the electron moves
Args:
move (dict, optional): method to move the electrons. default('all-elec','normal') \n
'type':
'one-elec': move a single electron per iteration \n
'all-elec': move all electrons at the same time \n
'all-elec-iter': move all electrons by iterating through single elec moves \n
'proba' :
'uniform': uniform ina cube \n
'normal': gussian in a sphere \n
Raises:
ValueError: If moves are not recognized
"""
self.movedict = move
if 'type' not in self.movedict.keys():
print('Metroplis : Set 1 electron move by default')
self.movedict['type'] = 'one-elec'
if 'proba' not in self.movedict.keys():
print('Metroplis : Set uniform trial move probability')
self.movedict['proba'] = 'uniform'
if self.movedict['proba'] == 'normal':
_sigma = self.step_size / \
(2 * torch.sqrt(2 * torch.log(torch.as_tensor(2.))))
self.multiVariate = MultivariateNormal(
torch.zeros(self.ndim), _sigma * torch.eye(self.ndim))
self._move_per_iter = 1
if self.movedict['type'] not in [
'one-elec', 'all-elec', 'all-elec-iter']:
raise ValueError(
" 'type' in move should be 'one-elec','all-elec', \
'all-elec-iter'")
if self.movedict['type'] == 'all-elec-iter':
self.fixed_id_elec_list = range(self.nelec)
self._move_per_iter = self.nelec
else:
self.fixed_id_elec_list = [None]
def move(self, pdf: Callable, id_elec: int) -> torch.Tensor:
"""Move electron one at a time in a vectorized way.
Args:
pdf (callable): function to sample
id_elec (int): index f the electron to move
Returns:
torch.tensor: new positions of the walkers
"""
if self.nelec == 1 or self.movedict['type'] == 'all-elec':
return self.walkers.pos + self._move(self.nelec)
else:
# clone and reshape data : Nwlaker, Nelec, Ndim
new_pos = self.walkers.pos.clone()
new_pos = new_pos.view(self.nwalkers,
self.nelec, self.ndim)
# get indexes
if id_elec is None:
index = torch.LongTensor(self.nwalkers).random_(
0, self.nelec)
else:
index = torch.LongTensor(self.nwalkers).fill_(id_elec)
# change selected data
new_pos[range(self.nwalkers), index,
:] += self._move(1)
return new_pos.view(self.nwalkers, self.nelec * self.ndim)
def _move(self, num_elec: int) -> torch.Tensor:
"""propose a move for the electrons
Args:
num_elec (int): number of electrons to move
Returns:
torch.tensor: new positions of the walkers
"""
if self.movedict['proba'] == 'uniform':
d = torch.rand(
(self.nwalkers, num_elec, self.ndim), device=self.device).view(
self.nwalkers, num_elec * self.ndim)
return self.step_size * (2. * d - 1.)
elif self.movedict['proba'] == 'normal':
displacement = self.multiVariate.sample(
(self.nwalkers, num_elec)).to(self.device)
return displacement.view(
self.nwalkers, num_elec * self.ndim)
def _accept(self, proba: torch.Tensor) -> torch.Tensor:
"""accept the move or not
Args:
proba (torch.tensor): probability of each move
Returns:
t0rch.tensor: the indx of the accepted moves
"""
proba[proba > 1] = 1.0
tau = torch.rand_like(proba)
index = (proba - tau >= 0).reshape(-1)
return index.type(torch.bool)
| StarcoderdataPython |
4840415 | from google_hangouts_chat_bot.commands import Commands
from google_hangouts_chat_bot.event_handler import EventHandler
from tests.functional.helpers import load_payload
def test_added_to_room():
payload = load_payload("added_to_room")
expected = {
"text": "Hello people! Thanks for adding me to *Testing room*!\n\nPlease, type *help* for more information about the commands available."
}
assert EventHandler(payload, Commands()).process() == expected
| StarcoderdataPython |
152357 | <reponame>HPCToolkit/hpctest
#====================#
# AMGMK PACKAGE FILE #
#====================#
from spack import *
class Amgmk(MakefilePackage):
""" This microkernel contains three compute-intensive sections of the larger AMG benchmark.
Optimizing performance for these three sections will improve the figure of merit of AMG.
AMGmk, like the full AMG benchmark, is written in C. The three sections chosen to create
this benchmark perform compressed sparse row (CSR) matrix vector multiply, algebraic
multigrid (AMG) mesh relaxation, and a simple a * X + Y vector operation. OpenMP
directives allow additional increases in performance. AMGmk uses no MPI parallelism and
is meant to be studied as a single-CPU benchmark or OpenMP benchmark only. The run time
of this benchmark is not linearly related to the figure of merit of the larger AMG
benchmark because the exact proportion of time spent performing these three operations
varies depending on the size of the problem and the specific linear system being solved.
"""
homepage = "https://asc.llnl.gov/CORAL-benchmarks/"
url = "https://asc.llnl.gov/CORAL-benchmarks/Micro/amgmk-v1.0.tar.gz"
version('1.0', 'app/amgmk')
variant('openmp', description='Build with OpenMP support', default=True)
@property
def build_targets(self):
targets = []
languages = 'CC = {}'.format(spack_cc)
cxxflags = '-g -O3' + ' ' + self.compiler.openmp_flag
ldflags = cxxflags
if '+openmp' in self.spec:
cxxflags += ' ' + self.compiler.openmp_flag
ldflags += ' ' + self.compiler.openmp_flag
targets.append('-f')
targets.append("Makefile.hpctest")
targets.append(languages)
targets.append('OFLAGS = {0}'.format(cxxflags))
targets.append('LDFLAGS = {0}'.format(ldflags))
return targets
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('AMGMk', prefix.bin)
| StarcoderdataPython |
3325203 | import numpy as np
import os
import random
import cv2
import operator
import json
import time
from utils.tools import normalization, augment_bbox
# Change this path to the users own dataset path
desktop_path = os.path.expanduser("~\Desktop")
seq_path = os.path.join(desktop_path, "dataset", 'MOT')
class data():
def __init__(self, is_test=False, seq_names=[]):
if is_test:
assert len(seq_names) > 0, "You should set the seq_lists during test"
# Read sequence name and info
self.seq_names, self.train_idxs, self.val_idxs = self.read_seq_names(is_test, seq_lists=seq_names)
self.seq_infos = self.read_seq_info()
# Create lists for training
self.seq_lists = self.read_dets(self.seq_names, is_test)
self.fr_lists = []
self.id_lists = []
for i in range(0, len(self.seq_lists)):
fr_list = self.create_fr_lists(self.seq_lists[i])
self.fr_lists.append(fr_list)
# ID based Grouping is only available from GT
if not is_test:
id_list = self.create_id_lists(self.seq_lists[i])
self.id_lists.append(id_list)
def read_dets(self, seq_list, is_test=False):
"""
Read ground truth tracking information seq-by-seq.
Each sequence consists of float type data.
:return: list containing data of whole sequences
"""
seq_lists = []
for seq_name in seq_list:
if is_test:
det_path = os.path.join(seq_path, seq_name, "det", "det.txt")
else:
det_path = os.path.join(seq_path, seq_name, "gt", "gt.txt")
lines = [line.rstrip('\n').split(',') for line in open(det_path) if len(line) > 1]
if is_test:
seq_list = [list(list(map(float, line))) for line in lines]
else:
if 'MOT16' in seq_name:
# human related class labels in MOT16 dataset
valid_cls = [1, 2, 7]
seq_list = [list(map(round, list(map(float, line)))) for line in lines if
(int(line[6]) == 1) & (float(line[8]) > 0.2) & (int(line[7]) in valid_cls)]
else:
seq_list = [list(map(round, list(map(float, line)))) for line in lines]
seq_lists.append(seq_list)
return seq_lists
def create_fr_lists(self, seq_list):
"""
Create usable lists sorted by frame number.
format : [[fr, [id, x, y, w, h, conf, class], ...], [fr, ...], ...]
:param seq_list: list, consists of information of each sequence
:return: list, sorted by frame
"""
fr_lists = [[1]]
max_fr = 1
seq_list = sorted(seq_list, key=operator.itemgetter(0, 1))
for i in range(0, len(seq_list)):
tmp_list = seq_list[i]
cur_fr = int(tmp_list[0])
tmp_fr_list = tmp_list[1:8]
# Interpolate missing frames in list
if cur_fr > max_fr:
for fr in range(max_fr + 1, cur_fr + 1):
fr_lists.append([fr])
max_fr = cur_fr
fr_lists[cur_fr - 1].append(tmp_fr_list)
return fr_lists
def create_id_lists(self, seq_list):
"""
Create usable lists arranged by id.
format : [[id, [fr, x, y, w, h, conf, class], ...], [id, ...], ...]
:param seq_list: list, consists of information of each sequence
:return: list, sorted by id
"""
id_lists = []
seq_list = sorted(seq_list, key=operator.itemgetter(1, 0))
for i in range(0, len(seq_list)):
cur_id = seq_list[i][1]
tmp_id_list = [seq_list[i][0], *seq_list[i][2:8]]
while len(id_lists) < cur_id:
id_lists.append([len(id_lists) + 1])
id_lists[cur_id - 1].append(tmp_id_list)
return id_lists
def read_seq_names(self, is_test, seq_lists=[]):
seq_names = []
train_idxs = []
val_idxs = []
if is_test:
seq_names = seq_lists
else:
with open(os.path.join('sequence_groups', 'trainval_group.json')) as json_file:
json_data = json.load(json_file)
seq_names.extend(json_data['train'])
seq_names.extend(json_data['validation'])
train_idxs = [i for i in range(len(json_data['train']))]
val_idxs = [len(json_data['train']) + i for i in range(len(json_data['validation']))]
return seq_names, train_idxs, val_idxs
def read_seq_info(self):
seq_infos = []
for seq_name in self.seq_names:
# Since MOT17 shares same sequences with MOT16
if "MOT17" in seq_name:
seq_name = "MOT16-"+seq_name.split('-')[1]
# sequence info format [width, height, fps, total_frame_num]
with open(os.path.join('sequence_infos', '{}.txt'.format(seq_name))) as seq_info_file:
line = seq_info_file.readline()
seq_infos.append(list(map(float, line.split(','))))
return seq_infos
def get_seq_info(self, seq_name):
name_idx = np.where(seq_name == np.array(self.seq_names))[0][0]
return self.seq_infos[name_idx]
def get_frame_info(self, seq_name, frame_num):
"""
format : [fr, [id, x, y, w, h], [id, x, y, w, h], ..., [id, x, y, w, h]]
:param seq_name: name of the sequence
:param frame_num: current frame number
:return: bgr image, current frame bbox list
"""
seq_idx = np.where(np.array(self.seq_names) == seq_name)[0][0]
fr_list = self.fr_lists[seq_idx]
cur_img = self.read_bgr(seq_name, frame_num)
if frame_num > len(fr_list):
return cur_img, np.array([])
else:
cur_fr_list = fr_list[frame_num-1]
assert frame_num == cur_fr_list[0], "Frame number doesn't match!"
return cur_img, np.array(cur_fr_list[1:])
def get_cropped_template(self, seq_name, fr, bbox):
img = self.read_bgr(seq_name, fr)
template = img[max(0, int(bbox[1])):min(img.shape[0], int(bbox[1]) + int(bbox[3])),
max(0, int(bbox[0])):min(img.shape[1], int(bbox[0]) + int(bbox[2]))]
is_valid = True
if template.shape[0] < 10 or template.shape[1] < 10:
is_valid = False
else:
template = cv2.resize(template, (64, 128))
return template, is_valid
def read_bgr(self, seq_name, frame_num):
img_path = os.path.join(seq_path, seq_name, "img1", "{0:06d}.jpg".format(int(frame_num)))
img = cv2.imread(img_path)
return img
def create_jinet_batch(self, batch_sz, train_val):
"""
Create training batch.
"""
collected_num = 0
img_batch = np.zeros((batch_sz, 128, 64, 6), dtype='float')
label_batch = []
# Select train&test sequence indexes
all_name_set = np.array(self.seq_names)
if train_val == "train":
name_set = np.array(self.seq_names)[self.train_idxs]
seq_info = np.array(self.seq_infos)[self.train_idxs]
else:
name_set = np.array(self.seq_names)[self.val_idxs]
seq_info = np.array(self.seq_infos)[self.val_idxs]
seq_idxs = [np.where(all_name_set == name)[0][0] for name in name_set]
while collected_num < batch_sz:
if collected_num % 100 == 0:
print('collected : {}'.format(collected_num))
name_idx = random.choice(range(len(seq_idxs)))
seq_name = name_set[name_idx]
seq_idx = seq_idxs[name_idx]
id_list = self.id_lists[seq_idx]
fr_rate = seq_info[name_idx][2]
max_fr_diff = fr_rate * 3
# Random anchor ID choice
anchor_track_idx = random.choice(range(0, len(id_list)))
anchor_track = id_list[anchor_track_idx][1:]
if len(anchor_track) < 2:
continue
anchor_bb_idx = random.choice(range(0, len(anchor_track)))
anchor_bb = np.array(anchor_track[anchor_bb_idx], dtype='float')
# Random pos bb choice
st_idx = 0
for idx in range(anchor_bb_idx - 1, -1, -1):
if abs(anchor_track[st_idx][0] - anchor_bb[0]) > max_fr_diff:
st_idx = idx + 1
break
ed_idx = len(anchor_track) - 1
for idx in range(anchor_bb_idx + 1, len(anchor_track), 1):
if abs(anchor_track[ed_idx][0] - anchor_bb[0]) > max_fr_diff:
ed_idx = idx - 1
break
if ed_idx == st_idx:
continue
pos_bb_idx = random.choice(range(st_idx, ed_idx + 1))
while pos_bb_idx == anchor_bb_idx:
pos_bb_idx = random.choice(range(st_idx, ed_idx + 1))
pos_bb = np.array(anchor_track[pos_bb_idx], dtype='float')
# Random neg ID & bb choice
neg_track_idx = random.choice(range(0, len(id_list)))
while neg_track_idx == anchor_track_idx:
neg_track_idx = random.choice(range(0, len(id_list)))
neg_track = id_list[neg_track_idx][1:]
if len(neg_track) == 0:
continue
neg_bb_idx = random.choice(range(0, len(neg_track)))
neg_bb = np.array(neg_track[neg_bb_idx], dtype='float')
# Get RGB templates after applying random noise
cropped_anchor, is_valid1 = self.get_cropped_template(seq_name, anchor_bb[0], augment_bbox(anchor_bb[1:5]))
cropped_pos, is_valid2 = self.get_cropped_template(seq_name, pos_bb[0], augment_bbox(pos_bb[1:5]))
cropped_neg, is_valid3 = self.get_cropped_template(seq_name, neg_bb[0], augment_bbox(neg_bb[1:5], very_noisy = True))
if not (is_valid1 and is_valid2 and is_valid3):
continue
anchor_img = normalization(cropped_anchor)
pos_img = normalization(cropped_pos)
neg_img = normalization(cropped_neg)
img_batch[collected_num, :, :, :] = np.concatenate((anchor_img, pos_img), 2)
collected_num += 1
img_batch[collected_num, :, :, :] = np.concatenate((anchor_img, neg_img), 2)
collected_num += 1
label_batch.extend([[1, 0], [0, 1]])
return img_batch, np.array(label_batch)
def create_deeptama_batch(self, max_trk_len, batch_sz, train_val):
"""
Create LSTM training batch
"""
all_name_set = np.array(self.seq_names)
if train_val == "train":
name_set = np.array(self.seq_names)[self.train_idxs]
seq_info = np.array(self.seq_infos)[self.train_idxs]
else:
name_set = np.array(self.seq_names)[self.val_idxs]
seq_info = np.array(self.seq_infos)[self.val_idxs]
seq_idxs = [np.where(all_name_set == name)[0][0] for name in name_set]
img_batch = np.zeros((batch_sz, max_trk_len, 128, 64, 6), dtype='float')
shp_batch = np.zeros((batch_sz, max_trk_len, 3), dtype='float')
label_batch = []
track_len = []
min_len = 1
collected_num = 0
while collected_num < batch_sz:
if collected_num % 100 == 0:
print("collected : {}".format(collected_num))
# Get an anchor sequence
name_idx = random.choice(range(len(seq_idxs)))
seq_idx = seq_idxs[name_idx]
seq_name = name_set[name_idx]
fr_rate = seq_info[name_idx][2]
max_fr_diff = fr_rate * 2
# Get a positive anchor
anchor_idx = random.choice([i for i in range(0, len(self.id_lists[seq_idx]))])
recur_cnt = 0
while len(self.id_lists[seq_idx][anchor_idx][1:]) <= min_len and recur_cnt <= 5:
anchor_idx = random.choice([i for i in range(0, len(self.id_lists[seq_idx]))])
recur_cnt += 1
if recur_cnt > 5:
print('while 1 stuck')
continue
anchor_id = self.id_lists[seq_idx][anchor_idx][0]
anchor_dets = self.id_lists[seq_idx][anchor_idx][1:]
anchor_det_idx = random.choice([i for i in range(min_len, len(anchor_dets))])
anchor_det = np.array(anchor_dets[anchor_det_idx], dtype='float')
# Make a positive track
# Limit a searching range
st_idx = 0
for idx in range(anchor_det_idx - 1, -1, -1):
if anchor_dets[idx][0] - anchor_det[0] > max_fr_diff:
st_idx = idx + 1
# Infeasible case
if (anchor_det_idx - st_idx) <= min_len:
continue
pos_pool = anchor_dets[st_idx:anchor_det_idx]
sampling_num = random.choice([i for i in range(min_len, min(len(pos_pool), max_trk_len))])
pos_dets = random.sample(pos_pool, sampling_num)
pos_dets.sort(key=lambda x: x[0])
# Take a negative anchor from a same frame of the positive anchor
anchor_fr_dets = self.fr_lists[seq_idx][int(anchor_det[0]) - 1][1:]
if not len(anchor_fr_dets) > 1:
continue
neg_det = random.sample(anchor_fr_dets, 1)[0]
recur_cnt = 1
while neg_det[0] == anchor_id and recur_cnt <= 5:
neg_det = random.sample(anchor_fr_dets, 1)[0]
recur_cnt += 1
if recur_cnt > 5:
print('while 2 stuck')
continue
neg_det[0] = anchor_det[0]
neg_det = np.array(neg_det, dtype='float')
# Make batch
anchor_det[1:5] = augment_bbox(anchor_det[1:5])
anchor_img, is_valid1 = self.get_cropped_template(seq_name, anchor_det[0], anchor_det[1:5])
neg_det[1:5] = augment_bbox(neg_det[1:5], very_noisy=True)
neg_img, is_valid2 = self.get_cropped_template(seq_name, neg_det[0], neg_det[1:5])
if not (is_valid1 and is_valid2):
continue
anchor_img = normalization(anchor_img)
neg_img = normalization(neg_img)
anchor_shp = np.array([anchor_det[0], *anchor_det[3:5]])
neg_shp = np.array([neg_det[0], *neg_det[3:5]])
tmp_pos_img_batch = np.zeros((0, 128, 64, 6), dtype='float')
tmp_neg_img_batch = np.zeros((0, 128, 64, 6), dtype='float')
tmp_pos_shp_batch = np.zeros((0, 3), dtype='float')
tmp_neg_shp_batch = np.zeros((0, 3), dtype='float')
cur_trk_len = len(pos_dets)
tmp_padding_img_batch = np.zeros((max_trk_len - cur_trk_len, 128, 64, 6), dtype='float')
tmp_padding_shp_batch = np.zeros((max_trk_len - cur_trk_len, 3), dtype='float')
is_valid3 = True
for idx, pos_det in enumerate(pos_dets):
pos_det = np.array(pos_det, dtype='float')
if idx == len(pos_dets)-1:
pos_det[1:5] = augment_bbox(pos_det[1:5], very_noisy=True)
else:
pos_det[1:5] = augment_bbox(pos_det[1:5])
pos_img, is_valid = self.get_cropped_template(seq_name, pos_det[0], pos_det[1:5])
if not is_valid:
is_valid3 = False
break
pos_img = normalization(pos_img)
pos_shp = np.array([pos_det[0], *pos_det[3:5]], dtype='float')
pos_shp_diff = pos_shp - anchor_shp
pos_shp_diff[0] /= fr_rate
pos_shp_diff[1:3] /= anchor_shp[1:3]
neg_shp_diff = pos_shp - neg_shp
neg_shp_diff[0] /= fr_rate
neg_shp_diff[1:3] /= neg_shp[1:3]
tmp_pos_img_batch = np.vstack(
(tmp_pos_img_batch, np.expand_dims(np.concatenate((pos_img, anchor_img), 2), 0)))
tmp_pos_shp_batch = np.vstack(
(tmp_pos_shp_batch, np.expand_dims((pos_shp - anchor_shp) / anchor_shp, 0)))
tmp_neg_img_batch = np.vstack(
(tmp_neg_img_batch, np.expand_dims(np.concatenate((pos_img, neg_img), 2), 0)))
tmp_neg_shp_batch = np.vstack((tmp_neg_shp_batch, np.expand_dims((pos_shp - neg_shp) / neg_shp, 0)))
if not is_valid3:
continue
tmp_pos_img_batch = np.vstack((tmp_padding_img_batch, tmp_pos_img_batch))
tmp_neg_img_batch = np.vstack((tmp_padding_img_batch, tmp_neg_img_batch))
tmp_pos_shp_batch = np.vstack((tmp_padding_shp_batch, tmp_pos_shp_batch))
tmp_neg_shp_batch = np.vstack((tmp_padding_shp_batch, tmp_neg_shp_batch))
img_batch[collected_num, :, :, :, :] = tmp_pos_img_batch
shp_batch[collected_num, :, :] = tmp_pos_shp_batch
collected_num += 1
img_batch[collected_num, :, :, :, :] = tmp_neg_img_batch
shp_batch[collected_num, :, :] = tmp_neg_shp_batch
collected_num += 1
label_batch.extend([[1, 0], [0, 1]])
track_len.extend([cur_trk_len, cur_trk_len])
return img_batch, shp_batch, np.array(label_batch), track_len
def get_deeptama_batch(self, max_trk_length, batch_sz, train_val):
img_batch, shp_batch, label_batch, track_len = self.create_deeptama_batch(max_trk_length, batch_sz, train_val)
return img_batch, shp_batch, label_batch, track_len
def get_jinet_batch(self, batch_sz, train_val):
img_batch, label_batch = self.create_jinet_batch(batch_sz, train_val)
return img_batch, label_batch
| StarcoderdataPython |
163906 | #Crie um pgm que leia dois valores e mostre um menu na tela:
'''[1]somar
[2]multiplicar
[3]maior
[4]novos numeros
[5]sair do programa'''
#seu pgm deverá realizar a operação solicitada em cada caso
from time import sleep
opção = 0
n1 = int(input('1º VALOR: '))
n2 = int(input('2ª VALOR: '))
while opção != 5:
print('''\
[1]somar
[2]multiplicar
[3]maior
[4]novos numeros
[5]sair do programa''')
opção = int(input('Digite aqui sua opção: '))
if opção == 1:
print('{} + {} = {}'.format(n1, n2, n1 + n2))
elif opção == 2:
print('{} x {} = {}'.format(n1, n2, n1*n2))
elif opção == 3:
if n1 > n2:
print('{} é maior que {}'.format(n1, n2))
elif n1< n2:
print('{} é maior que {}'.format(n2, n1))
else:
print('Os dois valores são IGUAIS')
elif opção == 4:
n1 = int(input('1º VALOR: '))
n2 = int(input('2ª VALOR: '))
elif opção == 5:
print('Finalizando...')
else:
print('Opção inválida')
print('=-'*15)
sleep(2)
print('FIM')
| StarcoderdataPython |
3262771 | <reponame>for-code0216/PoseNAS
""" Training augmented model """
import os
import sys
import argparse
import time
import glob
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from tensorboardX import SummaryWriter
from core.loss import JointsMSELoss
from core.config import config
from core.config import update_config
from core.function import *
from models.model_augment import Network
from models import genotypes as gt
import dataset
from utils.utils import save_checkpoint
from utils.utils import create_logger
from utils.utils import count_parameters_in_MB
device = torch.device("cuda")
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
# searching
parser.add_argument('--gpus',
help='gpus',
type=str)
args = parser.parse_args()
return args
def reset_config(config, args):
if args.gpus:
config.GPUS = args.gpus
def main():
args = parse_args()
reset_config(config, args)
# tensorboard
logger, final_output_dir, tb_log_dir = create_logger(config, args.cfg, 'train', 'train')
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
torch.backends.cudnn.benchmark = True
model = Network(config, gt.PoseDecoder)
model.init_weights()
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
logger.info("param size = %fMB", count_parameters_in_MB(model))
gpus = [int(i) for i in config.GPUS.split(',')]
criterion = JointsMSELoss(use_target_weight = config.LOSS.USE_TARGET_WEIGHT).to(device)
model = nn.DataParallel(model, device_ids=gpus).to(device)
logger.info("Logger is set - training start")
# weights optimizer
optimizer = torch.optim.Adam(model.parameters(), config.TRAIN.LR)
# prepare dataloader
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
train_dataset = eval('dataset.'+config.DATASET.DATASET)(
config,
config.DATASET.ROOT,
config.TRAIN.TRAIN_SET,
True,
transforms.Compose([
transforms.ToTensor(),
normalize,
]))
valid_dataset = eval('dataset.'+config.DATASET.DATASET)(
config,
config.DATASET.ROOT,
config.TRAIN.TEST_SET,
False,
transforms.Compose([
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=config.TRAIN.BATCH_SIZE*len(gpus),
shuffle=True,
num_workers=config.WORKERS,
pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=config.TRAIN.BATCH_SIZE*len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR)
# training loop
best_top1 = 0.
best_model = False
for epoch in range(config.TRAIN.EPOCHS):
# training
train(config, train_loader, model, criterion, optimizer, epoch,
final_output_dir, tb_log_dir, writer_dict)
# validation
top1 = validate(
config, valid_loader, valid_dataset, model, criterion,
final_output_dir, tb_log_dir, writer_dict
)
# save
if best_top1 < top1:
best_top1 = top1
best_model = True
else:
best_model = False
logger.info('=> saving checkpoint to {}'.format(final_output_dir))
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_state_dict': model.module.state_dict(),
'perf': best_top1,
'optimizer': optimizer.state_dict(),
}, best_model, final_output_dir)
lr_scheduler.step()
final_model_state_file = os.path.join(
final_output_dir, 'final_state.pth'
)
logger.info('=> saving final model state to {}'.format(
final_model_state_file)
)
logger.info('=> best accuracy is {}'.format(best_top1))
torch.save(model.module.state_dict(), final_model_state_file)
writer_dict['writer'].close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
139382 | <filename>fedml_api/distributed/fedavg_robust/FedAvgRobustAPI.py
from mpi4py import MPI
from fedml_api.distributed.fedavg_robust.FedAvgRobustAggregator import FedAvgRobustAggregator
from fedml_api.distributed.fedavg_robust.FedAvgRobustClientManager import FedAvgRobustClientManager
from fedml_api.distributed.fedavg_robust.FedAvgRobustServerManager import FedAvgRobustServerManager
from fedml_api.distributed.fedavg_robust.FedAvgRobustTrainer import FedAvgRobustTrainer
def FedML_init():
comm = MPI.COMM_WORLD
process_id = comm.Get_rank()
worker_number = comm.Get_size()
return comm, process_id, worker_number
def FedML_FedAvgRobust_distributed(process_id, worker_number, device, comm, model, train_data_num, train_data_global,
test_data_global, train_data_local_num_dict, train_data_local_dict,
test_data_local_dict, poisoned_train_loader, targetted_task_test_loader,
num_dps_poisoned_dataset, args):
if process_id == 0:
init_server(args, device, comm, process_id, worker_number, model, train_data_num, train_data_global,
test_data_global, train_data_local_dict, test_data_local_dict, train_data_local_num_dict,
targetted_task_test_loader)
else:
init_client(args, device, comm, process_id, worker_number, model, train_data_num, train_data_local_num_dict,
train_data_local_dict, poisoned_train_loader, num_dps_poisoned_dataset)
def init_server(args, device, comm, rank, size, model, train_data_num, train_data_global, test_data_global,
train_data_local_dict, test_data_local_dict, train_data_local_num_dict, targetted_task_test_loader):
# aggregator
worker_num = size - 1
aggregator = FedAvgRobustAggregator(train_data_global, test_data_global, train_data_num,
train_data_local_dict, test_data_local_dict, train_data_local_num_dict,
worker_num, device, model, targetted_task_test_loader, args)
# start the distributed training
server_manager = FedAvgRobustServerManager(args, aggregator, comm, rank, size)
server_manager.send_init_msg()
server_manager.run()
def init_client(args, device, comm, process_id, size, model, train_data_num, train_data_local_num_dict,
train_data_local_dict, poisoned_train_loader, num_dps_poisoned_dataset):
# trainer
client_index = process_id - 1
trainer = FedAvgRobustTrainer(client_index, train_data_local_dict, train_data_local_num_dict, train_data_num,
device, model, poisoned_train_loader, num_dps_poisoned_dataset, args)
client_manager = FedAvgRobustClientManager(args, trainer, comm, process_id, size)
client_manager.run()
| StarcoderdataPython |
60454 | <reponame>mercycoach/FlaskTemplatelApp
# -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import login_required, login_user, logout_user, current_user
from diytravelsite.extensions import login_manager
from diytravelsite.public.forms import LoginForm
from diytravelsite.user.forms import RegisterForm
from diytravelsite.user.models import User
from diytravelsite.utils import flash_errors
blueprint = Blueprint('public', __name__, static_folder='../static')
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route('/', methods=['GET', 'POST'])
def index():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash('You are logged in.', 'success')
redirect_url = request.args.get('next') or url_for('user.members')
return redirect(redirect_url)
else:
flash_errors(form)
return render_template('public/index.html', form=form)
@blueprint.route('/login', methods=['GET', 'POST'])
def login():
if current_user and current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('public/login.html', title='Sign In', form=form)
@blueprint.route('/logout/')
@login_required
def logout():
"""Logout."""
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route('/register/', methods=['GET', 'POST'])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(username=form.username.data, email=form.email.data, password=form.password.data, active=True)
flash('Thank you for registering. You can now log in.', 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route('/about/')
def about():
"""About page."""
form = LoginForm(request.form)
return render_template('public/about.html', form=form)
@blueprint.route('/dashboard/')
def dashboard():
return render_template('public/dashboard.html', title='Dashboard')
@blueprint.route('/table_list/')
def table_list():
return render_template('public/table_list.html', title='table_list')
@blueprint.route('/typography/')
def typography():
return render_template('public/typography.html', title='Typography')
@blueprint.route('/icons/')
def icons():
return render_template('public/icons.html', title='Icons')
@blueprint.route('/maps/')
def maps():
return render_template('public/maps.html', title='Maps')
@blueprint.route('/notifications/')
def notifications():
return render_template('public/notifications.html', title='Notifications')
@blueprint.route('/user_profile/')
def user_profile():
return render_template('public/user_profile.html', title='User Profile')
| StarcoderdataPython |
3209270 | WEATHER = [
"rainy",
"thunderstorms",
"sunny",
"dusk",
"dawn",
"night",
"snowy",
"hazy rain",
"windy",
"partly cloudy",
"overcast",
"cloudy",
]
| StarcoderdataPython |
26917 | from __future__ import division
from netCDF4 import Dataset
import glob,os.path
import numpy as np
import numpy.ma as ma
from scipy.interpolate import UnivariateSpline
from matplotlib import cm
from matplotlib import ticker
import matplotlib.pyplot as plt
#import site
#site.addsitedir('/tera/phil/nchaparr/SAM2/sam_main/python')
#from Percentiles import *
from matplotlib.patches import Patch
import sys
#sys.path.insert(0, '/tera/phil/nchaparr/python')
import nchap_fun as nc
from Make_Timelist import *
import warnings
warnings.simplefilter('ignore', np.RankWarning)
#import pywt
from scipy import stats
from datetime import datetime
import fastfit as fsft
"""
In testing phase -- get_fit() for identifying ML top
To plot gradient maxima ie BL heights, and w on a 2d horizontal domain,
and get a histogram or contour plot of BL heigths
for an individual case
added function to get ticks and labels based on mean and standard deviation
"""
#TODO: a mess right now. but can be tidied up once regression code is included
def get_ticks(mean, stddev, max, min):
"""
gets ticks and tick lavels for contour plot based on mean and standard deviation
Arguments:
mean, stddev, max, min
Returns:
ticks, tick_labels
"""
tick_list = []
label_list = []
int1=int(np.ceil((mean-min)/stddev))
int2=int(np.ceil((max-mean)/stddev))
for i in range(int1):
if int1==1:
tick_list.append(min)
label_list.append(r'$\mu - %.1f \sigma$' %((mean-min)/stddev))
elif i > 0:
tick_list.append(mean - (int1-i)*stddev)
label_list.append(r'$\mu - %.1f \sigma$' %(int1-i))
#else:
#tick_list.append(min)
#label_list.append(r'$\mu - %.1f \sigma$' %((mean-min)/stddev))
tick_list.append(mean)
label_list.append(r'$\mu$')
for i in range(int2):
if int2==1:
tick_list.append(max)
label_list.append(r'$\mu + %.1f \sigma$' %((max-mean)/stddev))
elif i< int2-1:
tick_list.append(mean + (i+1)*stddev)
label_list.append(r'$\mu + %.1f \sigma$' %(i+1))
#else:
#tick_list.append(max)
#label_list.append(r'$\mu + %.1f \sigma$' %((max-mean)/stddev))
return label_list, tick_list
def get_fit(theta, height):
"""
Fitting the local theta profile with three lines
"""
fitvals = np.zeros_like(theta)
RSS = np.empty((290, 290))+ np.nan
print RSS[0,0]
for j in range(290):
if j > 2:
for k in range(290):
if k>j+1 and k<289:
b_1 = (np.sum(np.multiply(height[:j], theta[:j])) - 1/j*np.sum(height[:j])*np.sum(theta[:j]))/(np.sum(height[:j]**2) - 1/j*np.sum(height[:j])**2)
a_1 = np.sum(np.multiply(height[:j], theta[:j]))/np.sum(height[:j]) - b_1*np.sum(height[:j]**2)/np.sum(height[:j])
b_2 = (np.sum(theta[j:k]) - (k-j)*(a_1+b_1*height[j]))/(np.sum(height[j:k]) - (k-j)*height[j])
a_2 = np.sum(np.multiply(height[j:k], theta[j:k]))/np.sum(height[j:k]) - b_2*np.sum(height[j:k]**2)/np.sum(height[j:k])
b_3 = (np.sum(theta[k:290]) - (290-k)*(a_2+b_2*height[k]))/(np.sum(height[k:290]) - (290-k)*height[k])
a_3 = np.sum(np.multiply(height[k:290], theta[k:290]))/np.sum(height[k:290]) - b_3*np.sum(height[k:290]**2)/np.sum(height[k:290])
RSS[j, k] = np.sum(np.add(theta[2:j], -(a_1+ b_1*height[2:j]))**2) + np.sum(np.add(theta[j:k], -(a_2+ b_2*height[j:k]))**2) + np.sum(np.add(theta[k:290], -(a_3+ b_3*height[k:290]))**2)
RSS = ma.masked_where(np.isnan(RSS), RSS)
[j, k] = np.unravel_index(ma.argmin(RSS), RSS.shape)
b_1 = (np.sum(np.multiply(height[:j], theta[:j])) - 1/j*np.sum(height[:j]*np.sum(theta[:j])))/(np.sum(height[:j]**2) - 1/j*np.sum(height[2:j])**2)
a_1 = np.sum(np.multiply(height[:j], theta[:j]))/np.sum(height[:j]) - b_1*np.sum(height[:j]**2)/np.sum(height[:j])
b_2 = (np.sum(theta[j:k]) - (k-j)*(a_1+b_1*height[j]))/(np.sum(height[j:k]) - (k-j)*height[j])
a_2 = np.sum(np.multiply(height[j:k], theta[j:k]))/np.sum(height[j:k]) - b_2*np.sum(height[j:k]**2)/np.sum(height[j:k])
b_3 = (np.sum(theta[k:290]) - (290-k)*(a_2+b_2*height[k]))/(np.sum(height[k:290]) - (290-k)*height[k])
a_3 = np.sum(np.multiply(height[k:290], theta[k:290]))/np.sum(height[k:290]) - b_3*np.sum(height[k:290]**2)/np.sum(height[k:290])
fitvals[:j] = b_1*height[:j] + a_1
fitvals[j:k] = b_2*height[j:k] + a_2
fitvals[k:290] = b_3*height[k:290] + a_3
return fitvals, RSS, j, k
#Lists of times relating to output (nc) files
dump_time_list, time_hrs = Make_Timelists(1, 600, 28800)
dump_time = dump_time_list[11]
print dump_time
for k in range(1):
#getting variables from nc files
[wvels, theta, tracer, height] = nc.Get_Var_Arrays("/tera2/nchaparr/Mar52014/runs/sam_case", "/OUT_3D/keep/NCHAPP1_testing_doscamiopdata_24_", dump_time, k+1)
#getting points of maximum theta gradient, getting rid of this soon
#[dvardz, grad_peaks] = nc.Domain_Grad(theta, height)
#tops_indices=np.where(np.abs(grad_peaks - 1400)<10)
#choosing one horizontal point
for i in range(1):
#top_index = [tops_indices[0][i], tops_indices[1][i]]
#[i, j] = top_index
[i, j] = [50, 50]
thetavals = theta[:, i, j]
startTime = datetime.now()
#print 'Start', startTime#1
top = np.where(np.abs(height-2300)<100)[0][0]
print top, height[top]
RSS, J, K = fsft.get_fit(thetavals, height, top)
#print J, height[J]
#print 'RSS time', (datetime.now()-startTime)
fitvals = np.zeros_like(thetavals[:top])
b_1 = (np.sum(np.multiply(height[9:J], thetavals[9:J])) - 1.0/(J-9)*np.sum(height[9:J]*np.sum(thetavals[9:J])))/(np.sum(height[9:J]**2) - 1.0/(J-9)*np.sum(height[9:J])**2)
#print np.sum(np.multiply(height[9:J], thetavals[9:J])), - 1.0/(J-9)*np.sum(height[9:J]*np.sum(thetavals[9:J])), np.sum(height[9:J]**2), - 1.0/(J-9)*np.sum(height[9:J])**2
a_1 = np.sum(np.multiply(height[9:J], thetavals[9:J]))/np.sum(height[9:J]) - b_1*np.sum(height[9:J]**2)/np.sum(height[9:J])
b_2 = (np.sum(thetavals[J:K]) - (K-J)*(a_1+b_1*height[J]))/(np.sum(height[J:K]) - (K-J)*height[J])
a_2 = np.sum(np.multiply(height[J:K], thetavals[J:K]))/np.sum(height[J:K]) - b_2*np.sum(height[J:K]**2)/np.sum(height[J:K])
b_3 = (np.sum(thetavals[K:top]) - (top-K)*(a_2+b_2*height[K]))/(np.sum(height[K:top]) - (top-K)*height[K])
a_3 = np.sum(np.multiply(height[K:top], thetavals[K:top]))/np.sum(height[K:top]) - b_3*np.sum(height[K:top]**2)/np.sum(height[K:top])
#print b_2, b_3
fitvals[:J] = b_1*height[:J] + a_1
fitvals[J:K] = b_2*height[J:K] + a_2
fitvals[K:top] = b_3*height[K:top] + a_3
#set up plot
theFig = plt.figure(i)
theFig.clf()
theAx = theFig.add_subplot(121)
theAx.set_title('Fit')
theAx.set_xlabel(r'$\overline{\theta} (K)$')
theAx.set_ylabel('z (m)')
theAx1 = theFig.add_subplot(122)
theAx1.set_title('Profile and Fit')
theAx1.set_xlabel(r'$\overline{\theta} (K) $')
theAx1.set_ylabel('z (m)')
theAx1.plot(thetavals, height[:], 'wo')
theAx.plot(fitvals[:J], height[:J], 'r-')
theAx.plot(fitvals[J:K], height[J:K], 'b-')
theAx.plot(fitvals[K:top], height[K:top], 'g-')
theAx1.plot(fitvals[:top], height[:top], 'r-')
theAx1.set_xlim(300, 320)
theAx1.set_ylim(0, 2000)
theAx.set_ylim(0, 2000)
theAx.set_xlim(300, 320)
plt.show()
| StarcoderdataPython |
1766911 | <filename>oldcode/bool_parser.py
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 12 16:40:16 2015
@author: noore
"""
from pythonds.basic.stack import Stack
import re
import numpy as np
class BoolParser(object):
PREC = {'(': 1, ')': 1, 'and' : 2, 'or' : 2}
@staticmethod
def isBoolVariable(token):
return re.findall('^b\d+$', token) != []
def __init__(self, expr):
# pad all paranthese with spaces to help the tokenizer
expr = expr.replace('(', ' ( ')
expr = expr.replace(')', ' ) ')
self.prefix = self.infix_to_prefix(expr)
self.postfix = self.infix_to_postfix(expr)
def infix_to_prefix(self, infixexpr):
def invert_paranth(s):
if s == '(':
return ')'
elif s == ')':
return '('
else:
return s
tokenList = map(invert_paranth, infixexpr.split())
tokenList.reverse()
postfixList = self.infix_to_postfix(' '.join(tokenList))
postfixList.reverse()
return postfixList
def infix_to_postfix(self, infixexpr):
opStack = Stack()
postfixList = []
tokenList = infixexpr.split()
for token in tokenList:
if BoolParser.isBoolVariable(token):
# if token is a boolean variable, just append to list
postfixList.append(token)
elif token == '(':
# start a new nested expression in the stack
opStack.push(token)
elif token == ')':
# end the nested expression by moving the operators
# from the stack to the list (i.e. in reverse order)
topToken = opStack.pop()
while topToken != '(':
postfixList.append(topToken)
topToken = opStack.pop()
else:
while (not opStack.isEmpty()) and \
(BoolParser.PREC[opStack.peek()] >= BoolParser.PREC[token]):
postfixList.append(opStack.pop())
opStack.push(token)
while not opStack.isEmpty():
postfixList.append(opStack.pop())
return postfixList
def __str__(self):
return BoolParser.printPrefix(self.prefix)[0]
@staticmethod
def printPrefix(tokenList, pos=0):
if BoolParser.isBoolVariable(tokenList[pos]):
return tokenList[pos], pos+1
else: # this is an operator
s1, pos1 = BoolParser.printPrefix(tokenList, pos+1)
s2, pos2 = BoolParser.printPrefix(tokenList, pos1)
return tokenList[pos] + '(' + s1 + ', ' + s2 + ')', pos2
@staticmethod
def calcPrefix(tokenList, value_dict, pos=0, defval=0):
if tokenList == []:
return np.nan, 0
if BoolParser.isBoolVariable(tokenList[pos]):
return value_dict.get(tokenList[pos], defval), pos+1
else: # this is an operator
val1, pos1 = BoolParser.calcPrefix(tokenList, value_dict, pos+1, defval)
val2, pos2 = BoolParser.calcPrefix(tokenList, value_dict, pos1, defval)
if np.isnan(val1):
val = val2
elif np.isnan(val2):
val = val1
elif tokenList[pos] == 'and':
val = min(val1, val2)
elif tokenList[pos] == 'or':
val = val1 + val2
return val, pos2
def evaluate(self, value_dict, defval=0):
return BoolParser.calcPrefix(self.prefix, value_dict, defval=defval)[0]
#expr = "((b2 or (b5 and b4)) or (b1 and b4) or (b1 and b2)) and (b5 or b5)"
#a = BoolParser(expr)
#print expr
#print a.prefix
#print a
#
#x = a.evaluate({'b1': 10, 'b2': 20, 'b3': 30, 'b4': 40, 'b5': 50, 'b6': 60})
#print x | StarcoderdataPython |
1674900 | <reponame>Berailitz/bupt-passport<filename>passport/mess.py
"""Utils."""
import datetime
import functools
import itertools
import logging
import logging.handlers
import threading
import time
from typing import Callable
get_current_time = lambda: time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
def set_logger(log_file_path: str, console_level=logging.INFO, file_level=logging.INFO):
"""Initialize logging module.
:param log_file_path: Path of the log file.
:type log_file_path: str.
"""
prefix_format = '[%(levelname)s] %(asctime)s %(filename)s:%(lineno)d %(message)s'
date_format = '%Y %b %d %H:%M:%S'
rotation_time = datetime.time(hour=4)
logging.basicConfig(
level=console_level,
format=prefix_format,
datefmt=date_format,
)
file_hanfler = logging.handlers.TimedRotatingFileHandler(
filename=log_file_path,
when='midnight',
interval=1,
backupCount=10,
atTime=rotation_time,
encoding='utf8'
)
file_hanfler.setLevel(file_level)
formatter = logging.Formatter(fmt=prefix_format, datefmt=date_format)
file_hanfler.setFormatter(formatter)
logging.getLogger(name=None).addHandler(file_hanfler)
logging.info("Start ....")
| StarcoderdataPython |
1670558 | import numpy as np
import pandas as pd
import random
ENG_INPUT_PATH = 'eng\English Wordlist.csv'
DEU_INPUT_PATH = 'deu\GoetheA1.csv'
JAP_INPUT_PATH = 'eng\AdvanceIELTS.csv'
file_lst = ['eng\English Wordlist.csv','deu\GoetheA1.csv','eng\AdvanceIELTS.csv', 'deu\Duolingo.csv']
class WordGenerator:
input_df = ''
input_path = ''
def __init__(self, input):
try:
self.input_df = pd.DataFrame()
self.input_df = pd.read_csv(input, header=0,encoding='latin-1')
self.input_path= input
except:
print('Error when parsing input to dataframe')
# This function generate k unlearned words from the list
def GeneratedWord(self, k):
valid_df = self.input_df[self.input_df['Checked'] == 0]
r_choices = valid_df.sample(n = k)
print(self.input_path)
print('')
print(r_choices.head(k).sort_index())
self.SaveLearnedWord(r_choices.index)
# This function save generated words
def SaveLearnedWord(self, ids):
next_val = self.input_df['Checked'].max() + 1
for id in ids:
self.input_df.loc[id,'Checked'] = next_val
self.SaveToFile()
# This function Reset all generated words
def ResetWords(self):
self.input_df.loc[:,'Checked'] = np.zeros(self.input_df.shape[0])
self.SaveToFile()
# This function reset last words generated by GenerateWords
def ResetLasLearnedtWords(self):
max_val = self.input_df['Checked'].max()
if max_val == 0:
return
last_ids = self.input_df[self.input_df['Checked'] == max_val ].index
for id in last_ids:
self.input_df.loc[id, 'Checked'] = 0
print(str(id) + ' ' + str(self.input_df.loc[id, 'Checked']))
self.SaveToFile()
# This function reroll the generated results if you don't like it
def RerollWords(self, k):
self.ResetLasLearnedtWords()
self.GeneratedWord(k)
# This function show your last generated words:
def ShowLastGeneratedWords(self):
max_val = self.input_df['Checked'].max()
if max_val == 0:
print('You have not take any words')
return
print('You generated time(s): ' + str(max_val))
print(self.input_df[self.input_df['Checked'] == max_val])
# This function save to file
def SaveToFile(self):
self.input_df.to_csv(self.input_path, index = False)
# This function give statistics for your learning
def Stats(self):
uncheck = self.input_df[self.input_df['Checked'] == 0].shape[0]
total = self.input_df.shape[0]
print('Total: ' + str(total))
print('Words remaining: ' + str(uncheck))
WordGen = WordGenerator(ENG_INPUT_PATH)
while True:
print('1. Take words')
print('2. Reroll Words')
print('3. Reset Last Learned Words')
print('4. Reset words')
print('5. Stats')
print('6. Show your newest words')
print('7. ChangeList')
print('Other.Quit')
x = input()
if (int(x) == 1):
WordGen.GeneratedWord(15)
elif (int(x) == 2):
WordGen.RerollWords(15)
elif(int(x) ==3):
WordGen.ResetLasLearnedtWords()
elif(int(x) == 4):
WordGen.ResetWords()
elif(int(x) == 5):
WordGen.Stats()
elif(int(x) == 6):
WordGen.ShowLastGeneratedWords()
elif(int(x) == 7):
print('Chooose File:')
l = len(file_lst)
for i in range (0,l):
print(str(i + 1) + '. ' +file_lst[i])
print(str(l + 1) + '. Return')
y = input()
if int(y) <= l and int(y) > 0:
WordGen = WordGenerator(file_lst[int(y)-1])
else:
print('Oki back to the last')
else:
break | StarcoderdataPython |
6820 | <filename>dlk/core/schedulers/__init__.py
# Copyright 2021 cstsunfu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""schedulers"""
import importlib
import os
from dlk.utils.register import Register
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
import math
scheduler_config_register = Register("Schedule config register.")
scheduler_register = Register("Schedule register.")
class BaseScheduler(object):
"""interface for Schedule"""
def get_scheduler(self)->LambdaLR:
"""return the initialized scheduler
Returns:
Schedule
"""
raise NotImplementedError
def __call__(self):
"""the same as self.get_scheduler()
"""
return self.get_scheduler()
def import_schedulers(schedulers_dir, namespace):
for file in os.listdir(schedulers_dir):
path = os.path.join(schedulers_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
scheduler_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + scheduler_name)
# automatically import any Python files in the schedulers directory
schedulers_dir = os.path.dirname(__file__)
import_schedulers(schedulers_dir, "dlk.core.schedulers")
| StarcoderdataPython |
1625937 | <reponame>zigonk/MST_inpainting<filename>utils/utils.py
import math
import os
import sys
import time
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as FF
import yaml
from PIL import Image
from torch.optim.lr_scheduler import LambdaLR
class Config(object):
def __init__(self, config_path):
with open(config_path, 'r') as f:
self._yaml = f.read()
self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)
self._dict['path'] = os.path.dirname(config_path)
def __getattr__(self, name):
if self._dict.get(name) is not None:
return self._dict[name]
return None
def print(self):
print('Model configurations:')
print('---------------------------------')
print(self._yaml)
print('')
print('---------------------------------')
print('')
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def stitch_images(inputs, outputs, img_per_row=2):
gap = 5
columns = len(outputs) + 1
width, height = inputs[0][:, :, 0].shape
img = Image.new('RGB', (width * img_per_row * columns + gap * (img_per_row - 1),
height * int(len(inputs) / img_per_row)))
images = [inputs, *outputs]
for ix in range(len(inputs)):
xoffset = int(ix % img_per_row) * width * columns + int(ix % img_per_row) * gap
yoffset = int(ix / img_per_row) * height
for cat in range(len(images)):
im = np.array((images[cat][ix]).cpu()).astype(np.uint8).squeeze()
im = Image.fromarray(im)
img.paste(im, (xoffset + cat * width, yoffset))
return img
def torch_show_all_params(model, rank=0):
params = list(model.parameters())
k = 0
for i in params:
l = 1
for j in i.size():
l *= j
k = k + l
return k
def get_lr_schedule_with_steps(decay_type,
optimizer,
drop_steps=None,
gamma=None,
total_steps=None):
def lr_lambda(current_step):
if decay_type == 'fix':
return 1.0
elif decay_type == 'linear':
return 1.0 * (current_step / total_steps)
elif decay_type == 'cos':
return 1.0 * (math.cos(
(current_step / total_steps) * math.pi) + 1) / 2
elif decay_type == 'milestone':
return 1.0 * math.pow(gamma, int(current_step / drop_steps))
else:
raise NotImplementedError
return LambdaLR(optimizer, lr_lambda)
def to_cuda(meta, device):
for k in meta:
if type(meta[k]) is torch.Tensor:
meta[k] = meta[k].to(device)
return meta
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self,
target,
max_iters=None,
width=25,
verbose=1,
interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.max_iters = max_iters
self.iters = 0
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty')
and sys.stdout.isatty())
or 'ipykernel' in sys.modules
or 'posix' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [
v * (current - self._seen_so_far),
current - self._seen_so_far
]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
self._values[k] = v
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval
and self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
# if self.target is not None and current < self.target:
if self.max_iters is None or self.iters < self.max_iters:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60,
eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] /
max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] /
max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.iters += 1
self.update(self._seen_so_far + n, values)
def postprocess(img, norm=False, simple_norm=False):
# [-1, 1] => [0, 255]
if img.shape[2] < 256:
img = F.interpolate(img, (256, 256))
if norm: # to [-1~1]
img = (img - img.min()) / (img.max() - img.min() + 1e-7) # [0~1]
img = img * 2 - 1 # [-1~1]
if simple_norm:
img = img * 2 - 1
img = (img + 1) / 2 * 255.0
img = img.permute(0, 2, 3, 1)
return img.int()
def to_tensor(img):
img = Image.fromarray(img)
img_t = FF.to_tensor(img).float()
return img_t
def to_device(data, device):
if isinstance(data, torch.Tensor):
return data.to(device)
if isinstance(data, dict):
for key in data:
if isinstance(data[key], torch.Tensor):
data[key] = data[key].to(device)
return data
if isinstance(data, list):
return [to_device(d, device) for d in data]
def imsave(img, path):
im = Image.fromarray(img.cpu().numpy().astype(np.uint8).squeeze())
im.save(path)
| StarcoderdataPython |
3372408 | <gh_stars>100-1000
"""\
Code generator functions for wxTextCtrl objects
@copyright: 2002-2007 <NAME>
@copyright: 2014-2016 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common
import wcodegen
class PythonTextCtrlGenerator(wcodegen.PythonWidgetCodeWriter):
tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s, %(value)s%(style)s)\n'
class CppTextCtrlGenerator(wcodegen.CppWidgetCodeWriter):
tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, %(value)s%(style)s);\n'
def initialize():
klass = 'wxTextCtrl'
common.class_names['EditTextCtrl'] = klass
common.register('python', klass, PythonTextCtrlGenerator(klass))
common.register('C++', klass, CppTextCtrlGenerator(klass))
| StarcoderdataPython |
3298418 | from random import randint
print('-='*20)
print('VAMOS JOGAR PAR OU IMPAR')
print('-='*20)
v = 0
while True:
numero = int(input('Digite um numero'))
computador = randint(0, 11)
escolha = ' '
total = numero+computador
while escolha not in 'PI':
escolha = str(input('Par ou Impar? [P/I]')).strip().upper()[0]
print('Voçê jogou {} eo computador jogou {}. Total de {}'.format(numero,computador,total),end=' ')
print('DEU PAR' if total%2==0 else 'DEU IMPAR')
if escolha == 'P':
if total%2==0:
print('Voçê VENCEU!')
v =v+1
else:
print('Voçê PERDEU!')
break
elif escolha == 'I':
if total%2==0:
print('Voçê VENCEU!')
v=v+1
else:
print('Voçê PERDEU')
print('Vamos jogar novamente...')
print('GAME OVER! Voçê venceu {} vezes.'.format(v))
| StarcoderdataPython |
1696749 | <gh_stars>0
from direction import Direction
from gpiozero import Robot
from motor import Motor
import os
from dotenv import load_dotenv
load_dotenv()
class Rover:
def __init__(self) -> None:
self.rv = Robot(
(os.getenv('MOTOR_A_FL'), os.getenv(
'MOTOR_A_RL'), os.getenv('MOTOR_A_PWML')),
(os.getenv('MOTOR_B_FR'), os.getenv(
'MOTOR_B_RR'), os.getenv('MOTOR_B_PWMR'))
)
def move(self, direction: Direction):
if direction == direction.FORWARD:
self.rv.forward(Motor.FWD_SPD)
elif direction == direction.BACKWARD:
self.rv.backward(Motor.RWD_SPD)
elif direction == direction.FORWARD_LEFT:
self.rv.forward(curve_left=Motor.CURVE_SPD)
elif direction == direction.FORWARD_RIGHT:
self.rv.forward(curve_right=Motor.CURVE_SPD)
elif direction == direction.BACKWARD_LEFT:
self.rv.backward(Motor.SPIN_TURN_SPD,
curve_left=Motor.CURVE_SPD)
elif direction == direction.BACKWARD_RIGHT:
self.rv.backward(Motor.SPIN_TURN_SPD,
curve_right=Motor.CURVE_SPD)
else:
self.stop()
def stop(self):
self.rv.stop()
| StarcoderdataPython |
1713482 | class ProcessNotFoundError(Exception):
"""Raised when a process is not found"""
pass
class ProcessServiceError(Exception):
"""Raised when an error happen when running a process"""
pass
| StarcoderdataPython |
3300904 | def substitute_dict(d, replacement):
return dict( (k, ((d[k] in replacement) and replacement[d[k]] or d[k])) for k in d )
x = { "foo": "bar", "bum": "butt" }
y = { "butt": "poo" }
print substitute_dict(x, y)
| StarcoderdataPython |
32767 | """ This Script contain the different function used in the framework
part1. Data processing
part2. Prediction and analisys
part3. Plotting
"""
import numpy as np
import librosa
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import pickle
import time
import struct
""" Data processing """
def get_mel_spectrogram(file_path, mfcc_max_padding=0, n_fft=2048, hop_length=512, n_mels=128):
"""Generates/extracts Log-MEL Spectrogram coefficients with LibRosa """
try:
# Load audio file
y, sr = librosa.load(file_path)
# Normalize audio data between -1 and 1
normalized_y = librosa.util.normalize(y)
# Generate mel scaled filterbanks
mel = librosa.feature.melspectrogram(normalized_y, sr=sr, n_mels=n_mels)
# Convert sound intensity to log amplitude:
mel_db = librosa.amplitude_to_db(abs(mel))
# Normalize between -1 and 1
normalized_mel = librosa.util.normalize(mel_db)
# Should we require padding
shape = normalized_mel.shape[1]
if (mfcc_max_padding > 0 & shape < mfcc_max_padding):
xDiff = mfcc_max_padding - shape
xLeft = xDiff//2
xRight = xDiff-xLeft
normalized_mel = np.pad(normalized_mel, pad_width=((0,0), (xLeft, xRight)), mode='constant')
except Exception as e:
print("Error parsing wavefile: ", e)
return None
return normalized_mel
def get_mfcc(file_path, mfcc_max_padding=0, n_mfcc=40, robots_noise = None, noise_amp = 1):
"""Generates/extracts MFCC coefficients with LibRosa"""
try:
# Load audio file
y, sr = librosa.load(file_path,sr=None)
if robots_noise != None :
y_n, _ = librosa.load(robots_noise)
y = (y + noise_amp * y_n)/(noise_amp + 1)
# Normalize audio data between -1 and 1
normalized_y = librosa.util.normalize(y)
# Compute MFCC coefficients
mfcc = librosa.feature.mfcc(y=normalized_y, sr=sr, n_mfcc=n_mfcc)
# Normalize MFCC between -1 and 1
normalized_mfcc = librosa.util.normalize(mfcc)
# Should we require padding
shape = normalized_mfcc.shape[1]
if (shape < mfcc_max_padding):
pad_width = mfcc_max_padding - shape
normalized_mfcc = np.pad(normalized_mfcc,
pad_width=((0, 0), (0, pad_width)),
mode ='constant',
constant_values=(0,))
except Exception as e:
print("Error parsing wavefile: ", e)
return None
return normalized_mfcc
def add_padding(features, mfcc_max_padding=174):
"""Given an numpy array of features, zero-pads each ocurrence to max_padding"""
padded = []
# Add padding
for i in range(len(features)):
px = features[i]
size = len(px[0])
# Add padding if required
if (size < mfcc_max_padding):
xDiff = mfcc_max_padding - size
xLeft = xDiff//2
xRight = xDiff-xLeft
px = np.pad(px, pad_width=((0,0), (xLeft, xRight)), mode='constant')
padded.append(px)
return padded
def scale(X, x_min, x_max, axis=0):
"""Scales data between x_min and x_max"""
nom = (X-X.min(axis=axis))*(x_max-x_min)
denom = X.max(axis=axis) - X.min(axis=axis)
denom[denom==0] = 1
return x_min + nom/denom
def save_split_distributions(test_split_idx, train_split_idx, file_path=None):
if (path == None):
print("You must enter a file path to save the splits")
return false
# Create split dictionary
split = {}
split['test_split_idx'] = test_split_idx
split['train_split_idx'] = train_split_idx
with open(file_path, 'wb') as file_pi:
pickle.dump(split, file_pi)
return file
def load_split_distributions(file_path):
file = open(file_path, 'rb')
data = pickle.load(file)
return [data['test_split_idx'], data['train_split_idx']]
def find_dupes(array):
seen = {}
dupes = []
for x in array:
if x not in seen:
seen[x] = 1
else:
if seen[x] == 1:
dupes.append(x)
seen[x] += 1
return len(dupes)
def read_header(filename):
"""Reads a file's header data and returns a list of wavefile properties"""
wave = open(filename,"rb")
riff = wave.read(12)
fmat = wave.read(36)
num_channels_string = fmat[10:12]
num_channels = struct.unpack('<H', num_channels_string)[0]
sample_rate_string = fmat[12:16]
sample_rate = struct.unpack("<I",sample_rate_string)[0]
bit_depth_string = fmat[22:24]
bit_depth = struct.unpack("<H",bit_depth_string)[0]
return (num_channels, sample_rate, bit_depth)
def play_dataset_sample(dataset_row, audio_path):
"""Given a dataset row it returns an audio player and prints the audio properties"""
fold_num = dataset_row.iloc[0]['fold']
file_name = dataset_row.iloc[0]['file']
file_path = os.path.join(audio_path, fold_num, file_name)
file_path = os.path.join(audio_path, dataset_row.iloc[0]['fold'], dataset_row.iloc[0]['file'])
print("Class:", dataset_row.iloc[0]['class'])
print("File:", file_path)
print("Sample rate:", dataset_row.iloc[0]['sample_rate'])
print("Bit depth:", dataset_row.iloc[0]['bit_depth'])
print("Duration {} seconds".format(dataset_row.iloc[0]['duration']))
# Sound preview
return IP.display.Audio(file_path)
"""
Prediction and analisys
"""
def evaluate_model(model, X_train, y_train, X_test, y_test):
train_score = model.evaluate(X_train, y_train, verbose=0)
test_score = model.evaluate(X_test, y_test, verbose=0)
return train_score, test_score
def model_evaluation_report(model, X_train, y_train, X_test, y_test, calc_normal=True):
dash = '-' * 38
# Compute scores
train_score, test_score = evaluate_model(model, X_train, y_train, X_test, y_test)
# Pint Train vs Test report
print('{:<10s}{:>14s}{:>14s}'.format("", "LOSS", "ACCURACY"))
print(dash)
print('{:<10s}{:>14.4f}{:>14.4f}'.format( "Training:", train_score[0], 100 * train_score[1]))
print('{:<10s}{:>14.4f}{:>14.4f}'.format( "Test:", test_score[0], 100 * test_score[1]))
# Calculate and report normalized error difference?
if (calc_normal):
max_err = max(train_score[0], test_score[0])
error_diff = max_err - min(train_score[0], test_score[0])
normal_diff = error_diff * 100 / max_err
print('{:<10s}{:>13.2f}{:>1s}'.format("Normal diff ", normal_diff, ""))
def acc_per_class(np_probs_array):
"""
Expects a NumPy array with probabilities and a confusion matrix data, retuns accuracy per class
"""
accs = []
for idx in range(0, np_probs_array.shape[0]):
correct = np_probs_array[idx][idx].astype(int)
total = np_probs_array[idx].sum().astype(int)
acc = (correct / total) * 100
accs.append(acc)
return accs
"""
Plotting
"""
def plot_train_history(history, x_ticks_vertical=False):
history = history.history
# min loss / max accs
min_loss = min(history['loss'])
min_val_loss = min(history['val_loss'])
max_accuracy = max(history['accuracy'])
max_val_accuracy = max(history['val_accuracy'])
# x pos for loss / acc min/max
min_loss_x = history['loss'].index(min_loss)
min_val_loss_x = history['val_loss'].index(min_val_loss)
max_accuracy_x = history['accuracy'].index(max_accuracy)
max_val_accuracy_x = history['val_accuracy'].index(max_val_accuracy)
# summarize history for loss, display min
plt.figure(figsize=(16,8))
plt.plot(history['loss'], color="#1f77b4", alpha=0.7)
plt.plot(history['val_loss'], color="#ff7f0e", linestyle="--")
plt.plot(min_loss_x, min_loss, marker='o', markersize=3, color="#1f77b4", alpha=0.7, label='Inline label')
plt.plot(min_val_loss_x, min_val_loss, marker='o', markersize=3, color="#ff7f0e", alpha=7, label='Inline label')
plt.title('Model loss', fontsize=20)
plt.ylabel('Loss', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(['Train',
'Test',
('%.3f' % min_loss),
('%.3f' % min_val_loss)],
loc='upper right',
fancybox=True,
framealpha=0.9,
shadow=True,
borderpad=1)
if (x_ticks_vertical):
plt.xticks(np.arange(0, len(history['loss']), 5.0), rotation='vertical')
else:
plt.xticks(np.arange(0, len(history['loss']), 5.0))
plt.show()
# summarize history for accuracy, display max
plt.figure(figsize=(16,6))
plt.plot(history['accuracy'], alpha=0.7)
plt.plot(history['val_accuracy'], linestyle="--")
plt.plot(max_accuracy_x, max_accuracy, marker='o', markersize=3, color="#1f77b4", alpha=7)
plt.plot(max_val_accuracy_x, max_val_accuracy, marker='o', markersize=3, color="orange", alpha=7)
plt.title('Model accuracy', fontsize=20)
plt.ylabel('Accuracy', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(['Train',
'Test',
('%.2f' % max_accuracy),
('%.2f' % max_val_accuracy)],
loc='upper left',
fancybox=True,
framealpha=0.9,
shadow=True,
borderpad=1)
plt.figure(num=1, figsize=(10, 6))
if (x_ticks_vertical):
plt.xticks(np.arange(0, len(history['accuracy']), 5.0), rotation='vertical')
else:
plt.xticks(np.arange(0, len(history['accuracy']), 5.0))
plt.show()
def compute_confusion_matrix(y_true,
y_pred,
classes,
normalize=False):
# Compute confusion matrix
cm = metrics.confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
return cm
def plot_confusion_matrix(cm,
classes,
normalized=False,
title=None,
cmap=plt.cm.Blues,
size=(10,10)):
"""Plots a confussion matrix"""
fig, ax = plt.subplots(figsize=size)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalized else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.show() | StarcoderdataPython |
193757 | """Template Tags"""
import itertools
from importlib import import_module
from collections import OrderedDict
from django.apps import apps
from django import template
from activflow.core.constants import REQUEST_IDENTIFIER
from activflow.core.helpers import (
activity_config,
wysiwyg_config
)
from activflow.core.models import Task
register = template.Library()
@register.filter(is_safe=True)
def label_with_class(value, arg):
"""Style adjustments"""
return value.label_tag(attrs={'class': arg})
@register.simple_tag(takes_context=True)
def activity_data(context, instance, option, _type):
"""Returns activity data as in field/value pair"""
app = context['app_title']
model = type(instance)
def filter(configuration):
"""Filter fields to display based on configuration"""
for field_name in configuration:
if option in configuration[field_name]:
yield field_name
def get_fields_from_config(model, instance, config):
"""Returns field/value pairs from configuration"""
return OrderedDict([
(
model().class_meta.get_field(field_name).verbose_name,
getattr(instance, field_name)
)
for field_name in itertools.islice(filter(config), len(config))
])
def get_all_fields(instance, exclude=None):
"""Returns all field/value pairs on the model"""
exclude = exclude or []
fields = [field for field in (
(
field.name,
field.verbose_name
) for field in instance.class_meta.get_fields()
) if field[0] not in ['id', 'task', 'task_id'] + exclude]
return {field[1]: getattr(
instance, field[0]) for field in fields}
if _type == 'model':
try:
field_config = activity_config(
app, model.__name__)['Fields']
return get_fields_from_config(model, instance, field_config)
except KeyError:
return get_all_fields(instance)
else:
related_model_fields = {}
for relation in model().class_meta.related_objects:
related_model = relation.related_model
for field in related_model().class_meta.fields:
if all([
field.get_internal_type() == 'ForeignKey',
field.related_model == model]
):
instances = related_model.objects.filter(
**{field.name: instance})
try:
field_config = activity_config(
app,
model.__name__
)['Relations'][related_model.__name__]
relatd_items_detail = [get_fields_from_config(
related_model,
inst,
field_config
) for inst in instances]
except KeyError:
relatd_items_detail = []
for inst in instances:
relatd_items_detail.append(
get_all_fields(inst, exclude=[field.name]))
related_model_fields[related_model.__name__] = relatd_items_detail
return related_model_fields
@register.simple_tag(takes_context=True)
def wysiwyg_form_fields(context):
"""Returns activity data as in field/value pair"""
app = context['app_title']
model = context['entity_title']
try:
return wysiwyg_config(app, model)
except (KeyError, AttributeError):
return
@register.simple_tag
def activity_title(ref, app):
"""Returns activity name"""
return import_module(
'{}.flow'.format(apps.get_app_config(app).name)
).FLOW[ref]['model']().title
@register.simple_tag
def activity_friendly_name(ref, app):
"""Returns activity friendly name"""
return import_module(
'{}.flow'.format(apps.get_app_config(app).name)
).FLOW[ref]['name']
@register.simple_tag
def request_instance(task_identifier):
"""Returns request instance"""
return Task.objects.get(
id=task_identifier
).request if task_identifier != REQUEST_IDENTIFIER else None
| StarcoderdataPython |
48983 | <gh_stars>1-10
from datetime import datetime
from pyboletox.Contracts.Cnab.Retorno.Cnab400.detalhe import Detalhe as DetalheContract
from pyboletox.magicTrait import MagicTrait
class Detalhe(MagicTrait, DetalheContract):
def __init__(self) -> None:
super().__init__()
self._carteira = None
self._nossoNumero = None
self._numeroDocumento = None
self._numeroControle = None
self._codigoLiquidacao = None
self._ocorrencia = None
self._ocorrenciaTipo = None
self._ocorrenciaDescricao = None
self._dataOcorrencia = None
self._rejeicao = None
self._dataVencimento = None
self._dataCredito = None
self._valor = None
self._valorTarifa = None
self._valorOutrasDespesas = None
self._valorIOF = None
self._valorAbatimento = None
self._valorRecebido = None
self._valorMora = None
self._valorMulta = None
self._error = None
def getCarteira(self):
return self._carteira
def setCarteira(self, carteira):
self._carteira = carteira
return self
def getNossoNumero(self):
return self._nossoNumero
def setNossoNumero(self, nossoNumero):
self._nossoNumero = nossoNumero
return self
def getNumeroDocumento(self):
return self._numeroDocumento
def setNumeroDocumento(self, numeroDocumento):
self._numeroDocumento = str(numeroDocumento).strip(' ').lstrip('0')
return self
def getNumeroControle(self):
return self._numeroControle
def setNumeroControle(self, numeroControle):
self._numeroControle = numeroControle
return self
def getCodigoLiquidacao(self):
return self._codigoLiquidacao
def setCodigoLiquidacao(self, codigoLiquidacao):
self._codigoLiquidacao = codigoLiquidacao
return self
def hasOcorrencia(self, *args):
ocorrencias = args
if len(ocorrencias) == 0 and not self.getOcorrencia():
return True
if len(ocorrencias) == 1 and type(ocorrencias[0]) is list:
ocorrencias = ocorrencias[0]
if self.getOcorrencia() in ocorrencias:
return True
return False
def getOcorrencia(self):
return self._ocorrencia
def setOcorrencia(self, ocorrencia):
self._ocorrencia = "{0:0>2s}".format(ocorrencia)
return self
def getOcorrenciaDescricao(self):
return self._ocorrenciaDescricao
def setOcorrenciaDescricao(self, ocorrenciaDescricao):
self._ocorrenciaDescricao = ocorrenciaDescricao
return self
def getOcorrenciaTipo(self):
return self._ocorrenciaTipo
def setOcorrenciaTipo(self, ocorrenciaTipo):
self._ocorrenciaTipo = ocorrenciaTipo
return self
def getDataOcorrencia(self, format='%d/%m/%Y'):
return self._dataOcorrencia.strftime(format) if self._dataOcorrencia else None
def setDataOcorrencia(self, dataOcorrencia, format='%d%m%y'):
self._dataOcorrencia = datetime.strptime(dataOcorrencia, format) if dataOcorrencia.strip("0 ") else None
return self
def getRejeicao(self):
return self._rejeicao
def setRejeicao(self, rejeicao):
self._rejeicao = rejeicao
return self
def getDataVencimento(self, format='%d/%m/%Y'):
return self._dataVencimento.strftime(format) if self._dataVencimento else None
def setDataVencimento(self, dataVencimento, format='%d%m%y'):
self._dataVencimento = datetime.strptime(dataVencimento, format) if dataVencimento.strip("0 ") else None
return self
def getDataCredito(self, format='%d/%m/%Y'):
return self._dataCredito.strftime(format) if self._dataCredito else None
def setDataCredito(self, dataCredito, format='%d%m%y'):
self._dataCredito = datetime.strptime(dataCredito, format) if dataCredito.strip("0 ") else None
return self
def getValor(self):
return self._valor
def setValor(self, valor):
self._valor = valor
return self
def getValorTarifa(self):
return self._valorTarifa
def setValorTarifa(self, valorTarifa):
self._valorTarifa = valorTarifa
return self
def getValorOutrasDespesas(self):
return self._valorOutrasDespesas
def setValorOutrasDespesas(self, valorOutrasDespesas):
self._valorOutrasDespesas = valorOutrasDespesas
return self
def getValorIOF(self):
return self._valorIOF
def setValorIOF(self, valorIOF):
self._valorIOF = valorIOF
return self
def getValorAbatimento(self):
return self._valorAbatimento
def setValorAbatimento(self, valorAbatimento):
self._valorAbatimento = valorAbatimento
return self
def getValorDesconto(self):
return self._valorDesconto
def setValorDesconto(self, valorDesconto):
self._valorDesconto = valorDesconto
return self
def getValorRecebido(self):
return self._valorRecebido
def setValorRecebido(self, valorRecebido):
self._valorRecebido = valorRecebido
return self
def getValorMora(self):
return self._valorMora
def setValorMora(self, valorMora):
self._valorMora = valorMora
return self
def getValorMulta(self):
return self._valorMulta
def setValorMulta(self, valorMulta):
self._valorMulta = valorMulta
return self
def hasError(self):
return self.getOcorrencia == self.OCORRENCIA_ERRO
def getError(self):
return self._error
def setError(self, error):
self._ocorrenciaTipo = self.OCORRENCIA_ERRO
self._error = error
return self
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.