id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
28290 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-17 13:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='\u6807\u9898')),
('en_title', models.CharField(max_length=100, verbose_name='\u82f1\u6587\u6807\u9898')),
('img', models.CharField(default='/static/img/article/default.jpg', max_length=200)),
('tags', models.CharField(blank=True, help_text='\u7528\u9017\u53f7\u5206\u9694', max_length=200, null=True, verbose_name='\u6807\u7b7e')),
('summary', models.TextField(verbose_name='\u6458\u8981')),
('content', models.TextField(verbose_name='\u6b63\u6587')),
('view_times', models.IntegerField(default=0)),
('zan_times', models.IntegerField(default=0)),
('is_top', models.BooleanField(default=False, verbose_name='\u7f6e\u9876')),
('rank', models.IntegerField(default=0, verbose_name='\u6392\u5e8f')),
('status', models.IntegerField(choices=[(0, '\u6b63\u5e38'), (1, '\u8349\u7a3f'), (2, '\u5220\u9664')], default=0, verbose_name='\u72b6\u6001')),
('pub_time', models.DateTimeField(default=False, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
],
options={
'ordering': ['rank', '-is_top', '-pub_time', '-create_time'],
'verbose_name': '\u6587\u7ae0',
'verbose_name_plural': '\u6587\u7ae0',
},
),
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='\u540d\u79f0')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='\u6807\u9898')),
('en_title', models.CharField(max_length=100, verbose_name='\u82f1\u6587\u6807\u9898')),
('img', models.CharField(default='/static/img/article/default.jpg', max_length=200)),
('tags', models.CharField(blank=True, help_text='\u7528\u9017\u53f7\u5206\u9694', max_length=200, null=True, verbose_name='\u6807\u7b7e')),
('summary', models.TextField(verbose_name='\u6458\u8981')),
('view_times', models.IntegerField(default=0)),
('zan_times', models.IntegerField(default=0)),
('is_top', models.BooleanField(default=False, verbose_name='\u7f6e\u9876')),
('rank', models.IntegerField(default=0, verbose_name='\u6392\u5e8f')),
('status', models.IntegerField(choices=[(0, '\u6b63\u5e38'), (1, '\u8349\u7a3f'), (2, '\u5220\u9664')], default=0, verbose_name='\u72b6\u6001')),
('pub_time', models.DateTimeField(default=False, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Author', verbose_name='\u4f5c\u8005')),
],
options={
'ordering': ['rank', '-is_top', '-pub_time', '-create_time'],
'verbose_name': '\u4e66\u7c4d',
'verbose_name_plural': '\u4e66\u7c4d',
},
),
migrations.CreateModel(
name='Carousel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='\u6807\u9898')),
('summary', models.TextField(blank=True, null=True, verbose_name='\u6458\u8981')),
('img', models.CharField(default='/static/img/carousel/default.jpg', max_length=200, verbose_name='\u8f6e\u64ad\u56fe\u7247')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article', verbose_name='\u6587\u7ae0')),
],
options={
'ordering': ['-create_time'],
'verbose_name': '\u8f6e\u64ad',
'verbose_name_plural': '\u8f6e\u64ad',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='\u540d\u79f0')),
('rank', models.IntegerField(default=0, verbose_name='\u6392\u5e8f')),
('status', models.IntegerField(choices=[(0, '\u6b63\u5e38'), (1, '\u8349\u7a3f'), (2, '\u5220\u9664')], default=0, verbose_name='\u72b6\u6001')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('parent', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='articles.Category', verbose_name='\u4e0a\u7ea7\u5206\u7c7b')),
],
options={
'ordering': ['rank', '-create_time'],
'verbose_name': '\u5206\u7c7b',
'verbose_name_plural': '\u5206\u7c7b',
},
),
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='\u6807\u9898')),
('en_title', models.CharField(max_length=100, verbose_name='\u82f1\u6587\u6807\u9898')),
('img', models.CharField(default='/static/img/article/default.jpg', max_length=200)),
('tags', models.CharField(blank=True, help_text='\u7528\u9017\u53f7\u5206\u9694', max_length=200, null=True, verbose_name='\u6807\u7b7e')),
('summary', models.TextField(verbose_name='\u6458\u8981')),
('content', models.TextField(verbose_name='\u6b63\u6587')),
('view_times', models.IntegerField(default=0)),
('zan_times', models.IntegerField(default=0)),
('rank', models.IntegerField(default=0, verbose_name='\u6392\u5e8f')),
('status', models.IntegerField(choices=[(0, '\u6b63\u5e38'), (1, '\u8349\u7a3f'), (2, '\u5220\u9664')], default=0, verbose_name='\u72b6\u6001')),
('pub_time', models.DateTimeField(default=False, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Book', verbose_name='\u4e66\u540d')),
],
options={
'ordering': ['rank', '-pub_time', '-create_time'],
'verbose_name': '\u7ae0\u8282',
'verbose_name_plural': '\u7ae0\u8282',
},
),
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='\u4e13\u680f\u5185\u5bb9')),
('summary', models.TextField(verbose_name='\u4e13\u680f\u6458\u8981')),
('status', models.IntegerField(choices=[(0, '\u6b63\u5e38'), (1, '\u8349\u7a3f'), (2, '\u5220\u9664')], default=0, verbose_name='\u72b6\u6001')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('article', models.ManyToManyField(to='articles.Article', verbose_name='\u6587\u7ae0')),
],
options={
'ordering': ['-create_time'],
'verbose_name': '\u4e13\u680f',
'verbose_name_plural': '\u4e13\u680f',
},
),
migrations.CreateModel(
name='Nav',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='\u5bfc\u822a\u6761\u5185\u5bb9')),
('url', models.CharField(blank=True, max_length=200, null=True, verbose_name='\u6307\u5411\u5730\u5740')),
('status', models.IntegerField(choices=[(0, '\u6b63\u5e38'), (1, '\u8349\u7a3f'), (2, '\u5220\u9664')], default=0, verbose_name='\u72b6\u6001')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
],
options={
'ordering': ['-create_time'],
'verbose_name': '\u5bfc\u822a\u6761',
'verbose_name_plural': '\u5bfc\u822a\u6761',
},
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='\u6807\u9898')),
('summary', models.TextField(verbose_name='\u6458\u8981')),
('news_from', models.IntegerField(choices=[(0, 'oschina'), (1, 'chiphell'), (2, 'freebuf'), (3, 'cnBeta')], default=0, verbose_name='\u6765\u6e90')),
('url', models.CharField(max_length=200, verbose_name='\u6e90\u5730\u5740')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('pub_time', models.DateTimeField(default=False, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
],
options={
'ordering': ['-title'],
'verbose_name': '\u8d44\u8baf',
'verbose_name_plural': '\u8d44\u8baf',
},
),
migrations.AddField(
model_name='book',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Category', verbose_name='\u5206\u7c7b'),
),
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Author', verbose_name='\u4f5c\u8005'),
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Category', verbose_name='\u5206\u7c7b'),
),
]
| StarcoderdataPython |
143998 | from typing import List, Set, Dict
import json
import pytumblr
from api_tumblr.pytumblr_wrapper import RateLimitClient
API_KEYS_TYPE = List[str]
class BotSpecificConstants:
"""Values specific to my development environment and/or the social context of my bot, e.g. specific posts IDs where I need apply some override, or specific users I need to treat specially, etc"""
def __init__(
self,
blogName: str,
dash_blogName: str,
REBLOG_START_TS: int,
DASH_START_TS: int,
private_clients_api_keys: List[API_KEYS_TYPE],
dashboard_clients_api_keys: List[API_KEYS_TYPE],
bridge_service_host: str,
bridge_service_port: int,
BRIDGE_SERVICE_REMOTE_HOST: str,
BUCKET_NAME: str,
ask_min_words: int,
NO_REBLOG_IDS: Set[int] = set(),
DEF_REBLOG_IDS: Set[int] = set(),
FORCE_TRAIL_HACK_IDS: Set[int] = set(),
USER_AVOID_LIST: Set[str] = set(),
TAG_AVOID_LIST: Set[str] = set(),
DASH_TAG_AVOID_LIST: Set[str] = set(),
REPLY_USER_AUTO_ACCEPT_LIST: Set[str] = set(),
bad_strings: Set[str] = set(),
bad_strings_shortwords: Set[str] = set(),
okay_superstrings: Set[str] = set(),
likely_obscured_strings: Set[str] = set(),
profane_strings: Set[str] = set(),
hardstop_strings_review: Set[str] = set(),
hardstop_strings_reject: Set[str] = set(),
LIMITED_USERS: Dict[str, float] = dict(),
LIMITED_SUBSTRINGS: Dict[str, float] = dict(),
SCREENED_USERS: Set[str] = set(),
NO_SCRAPE_USERS: Set[str] = set(),
):
# TODO: standardize case in names
self.blogName = blogName
self.dash_blogName = dash_blogName
# when reblog feature started
self.REBLOG_START_TS = REBLOG_START_TS
# when reblog-from-dash feature started
self.DASH_START_TS = DASH_START_TS
# don't reblog these post IDs -- generally used when I want to write about the bot and then reblog to the bot
# i don't want a separate bot reblog "responding" to me
self.NO_REBLOG_IDS = NO_REBLOG_IDS
self.DEF_REBLOG_IDS = DEF_REBLOG_IDS
# overrides for tumblr blockquote weirdness
self.FORCE_TRAIL_HACK_IDS = FORCE_TRAIL_HACK_IDS
# tumblr api keys (4 strings per key)
self.private_clients_api_keys = private_clients_api_keys
self.dashboard_clients_api_keys = dashboard_clients_api_keys
# host name of the bridge service used in clients we expect to be running on the same machine
# (i.e. should be localhost under normal circumstances)
self.bridge_service_host = bridge_service_host
# port of the bridge service
self.bridge_service_port = bridge_service_port
# name of Google Cloud Storage bucket used to store models and data
self.BUCKET_NAME = BUCKET_NAME
# host name of the bridge service used in ML code
# if the ML code is running remotely, this will differ from `bridge_service_host`
self.BRIDGE_SERVICE_REMOTE_HOST = BRIDGE_SERVICE_REMOTE_HOST
# don't interact or mention these users
self.USER_AVOID_LIST = USER_AVOID_LIST
# bot-written post tags are removed if they contain any of these (substring matches, case-insensitive)
self.TAG_AVOID_LIST = TAG_AVOID_LIST
# don't reblog from dash if tags contain these (substring matches)
self.DASH_TAG_AVOID_LIST = DASH_TAG_AVOID_LIST
# for frequent repliers who don't otherwise trigger "OK to respond to this reply" logic
self.REPLY_USER_AUTO_ACCEPT_LIST = REPLY_USER_AUTO_ACCEPT_LIST
# write draft instead of auto-publish when post/tags contain these substrings
self.bad_strings = bad_strings
# form elements of bad_strings from these surrounded by various whitespace/punctuation
self.bad_strings_shortwords = bad_strings_shortwords
# ignore items from `bad_strings` when they appear inside of these longer strings
# e.g. if we wanted to filter "sex" without filtering "anne sexton"
self.okay_superstrings = okay_superstrings
# like bad_strings, but we attempt to detect these even if the user is trying to obscure them
# with e.g. zero-width unicode or l33tsp34k
self.likely_obscured_strings = likely_obscured_strings
# like bad_strings, but only used in contexts where we're trying to keep the language rated PG
self.profane_strings = profane_strings
# force write draft instead of auto-publish on these strings, even if ML model accepts post
self.hardstop_strings_review = hardstop_strings_review
self.hardstop_strings_review.update(USER_AVOID_LIST)
self.hardstop_strings_review.update(likely_obscured_strings)
# force ignore post on these strings, even if ML model accepts post
self.hardstop_strings_reject = hardstop_strings_reject
# `LIMITED_USERS` allows limiting the rate at which we interact with certain users, e.g. bots who post extremely often or people who send huge numbers of asks
#
# `LIMITED_USERS` should be a dict with usernames as keys. the values are floats. a value of X means approximately "respond to this user at most once per X hours."
self.LIMITED_USERS = LIMITED_USERS
# like `LIMITED_USERS`, but triggers the limiting on the presence of a substring in the input, rather than the name of the user
self.LIMITED_SUBSTRINGS = LIMITED_SUBSTRINGS
# write draft instead of auto-publish when responding to these users
self.SCREENED_USERS = SCREENED_USERS
self.NO_SCRAPE_USERS = NO_SCRAPE_USERS
self.ask_min_words = ask_min_words
@staticmethod
def load(path: str = "config.json") -> "BotSpecificConstants":
with open(path, "r", encoding="utf-8") as f:
constants = json.load(f)
list_to_set_keys = {
"NO_REBLOG_IDS",
"FORCE_TRAIL_HACK_IDS",
"USER_AVOID_LIST",
"TAG_AVOID_LIST",
"DASH_TAG_AVOID_LIST",
"REPLY_USER_AUTO_ACCEPT_LIST",
"bad_strings",
"bad_strings_shortwords",
"okay_superstrings",
"likely_obscured_strings",
"profane_strings",
"hardstop_strings_review",
"hardstop_strings_reject",
"SCREENED_USERS",
"NO_SCRAPE_USERS",
}
for list_to_set_key in list_to_set_keys:
constants[list_to_set_key] = set(constants[list_to_set_key])
return BotSpecificConstants(**constants)
@property
def private_clients(self) -> List[RateLimitClient]:
return [
RateLimitClient.from_tumblr_rest_client(
pytumblr.TumblrRestClient(*keys), self.blogName
)
for keys in self.private_clients_api_keys
]
@property
def dashboard_clients(self) -> List[RateLimitClient]:
return [
RateLimitClient.from_tumblr_rest_client(
pytumblr.TumblrRestClient(*keys), self.dash_blogName
)
for keys in self.dashboard_clients_api_keys
]
@property
def bridge_service_url(self):
return self.bridge_service_host + ":" + str(self.bridge_service_port)
def LIMITED_USERS_PROBS(self, EFFECTIVE_SLEEP_TIME) -> dict:
LIMITED_USERS_MINUTES_LOWER_BOUNDS = {
name: hours * 60 for name, hours in self.LIMITED_USERS.items()
}
LIMITED_USERS_PROBS = {
name: EFFECTIVE_SLEEP_TIME / (60 * lb)
for name, lb in LIMITED_USERS_MINUTES_LOWER_BOUNDS.items()
}
return LIMITED_USERS_PROBS
| StarcoderdataPython |
1788201 | pip install plotly_express
#pip install category_encoders
import calendar
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
pd.options.display.max_columns = None
april20 = pd.read_csv('assets/2020_04.csv')
march20 = pd.read_csv('assets/2020_03.csv')
march19 = pd.read_csv('assets/2019_03.csv')
april19 = pd.read_csv('assets/2019_04.csv')
AM20 = pd.concat([april20, march20], ignore_index=True)
AM19 = pd.concat([march19,april19], ignore_index=True)
AM20['starttime']= pd.to_datetime(AM20['starttime'])
AM20['stoptime']= pd.to_datetime(AM20['stoptime'])
AM19['starttime']= pd.to_datetime(AM19['starttime'])
AM19['stoptime']= pd.to_datetime(AM19['stoptime'])
AM19['tripduration'] = AM19['tripduration'][AM19['tripduration'].between(AM19['tripduration'].quantile(.1), AM19['tripduration'].quantile(.9))]
AM20['tripduration'] = AM20['tripduration'][AM20['tripduration'].between(AM20['tripduration'].quantile(.1), AM20['tripduration'].quantile(.9))]
AM20['tripduration'].mean()
AM20['covid']='yes'
AM19['covid']='no'
AM19and20 = pd.concat([AM19, AM20], ignore_index=True)
AM19and20['starttimedayofweek']=AM19and20['starttime'].dt.day_name()
AM19and20['gender'].replace(1, 'Male',inplace=True)
AM19and20['gender'].replace(2, 'Female',inplace=True)
AM19and20['gender'].replace(0, 'Unspecified',inplace=True)
AM19and20
AM19and20['starttimehour'] = AM19and20['starttime'].dt.hour
AM19and20
#AM19and20['start station id'].replace('', np.nan, inplace=True)
#AM19and20['start station id'].replace('?', np.nan, inplace=True)
AM19and20['start station id'] = AM19and20['start station id'].dropna()
AM19and20['end station id'] = AM19and20['end station id'].dropna()
AM19and20['birth year'] = AM19and20['birth year'].dropna()
AM19and20['start station id'].isnull().sum()
train, test = train_test_split(AM19and20, test_size=0.30, random_state=42)
| StarcoderdataPython |
199704 | import functools
import tensorflow as tf
from gpflow import default_float
from gpflow.utilities import to_default_float
from gpflow.utilities.ops import square_distance, difference_matrix
def cached(variable):
def cache(func):
func = tf.function(func, experimental_compile=True, experimental_relax_shapes=True)
@functools.wraps(func)
def caching_wrapper(self, *args, **kwargs):
if not hasattr(self, variable) or getattr(self, variable) is None:
mat = func(self.X)
if self._cache:
setattr(self, variable, mat)
else:
mat = getattr(self, variable)
return mat
return caching_wrapper
return cache
class DistanceCache:
def __init__(self, X: tf.Tensor, cache=True):
self.X = X
self._cache = cache
@property
@cached("_squaredEuclidean")
def squaredEuclideanDistance(X):
return square_distance(to_default_float(X), None)
@property
@cached("_sumDiff")
def sumOfDifferences(X):
return tf.reduce_sum(difference_matrix(to_default_float(X), None), axis=-1)
| StarcoderdataPython |
3258198 | # coding: utf-8
'''
This script reads data from Scopus xlsx files to process and laod in MongoDB.
'''
import logging
import pyexcel
import models
import keycorrection
from accent_remover import *
logging.basicConfig(filename='logs/scopus_loader.info.txt', level=logging.INFO)
logger = logging.getLogger(__name__)
def scopus_loader(file_name, keycorrection):
models.Scopus.drop_collection()
sheet = pyexcel.get_sheet(file_name=file_name, name_columns_by_row=0)
# Key correction
for i, k in enumerate(keycorrection):
sheet.colnames[i] = k
sheet.column.format('print_issn', str)
sheet.column.format('e_issn', str)
sheet_json = sheet.to_records()
for rec in sheet_json:
if type(rec['sourcerecord_id']) == str:
rec['sourcerecord_id'] = int(rec['sourcerecord_id'])
rec['country'] = rec['publishers_country']
rec['title_country'] = '%s-%s' % (
accent_remover(rec['title']).lower().replace(' & ', ' and ').replace('&', ' and '),
rec['publishers_country'].lower())
rec['issn_list'] = []
if rec['print_issn']:
rec['issn_list'].append(rec['print_issn'][0:4] + '-' + rec['print_issn'][4:8])
if rec['e_issn']:
rec['issn_list'].append(rec['e_issn'][0:4] + '-' + rec['e_issn'][4:8])
# remove empty keys
rec = {k: v for k, v in rec.items() if v or v == 0}
for year in ['2014', '2015', '2016']:
for k in ['citescore', 'sjr', 'snip']:
if k+'_'+year in rec:
if year not in rec:
rec[year] = {}
rec[year].update({k: float(rec[k+'_'+year])})
del rec[k+'_'+year]
# Codes ASJC
codes = []
codes = rec['all_science_classification_codes_asjc'].replace(' ', '').split(';')
for c in codes:
if c == '':
codes.pop()
rec['asjc_code_list'] = codes
# Save data
mdata = models.Scopus(**rec)
mdata.save()
num_posts = models.Scopus.objects().count()
msg = u'Registred %d posts in Scopus collection' % num_posts
logger.info(msg)
print(msg)
def main():
'''
scopus_loader(file_name, keycorrection)
file_name = xlsx path and file name
keycorrection = dict name of keycorrection module
'''
scopus_loader(
'data/scopus/ext_list_October_2017.xlsx',
keycorrection.scopus_columns_names)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3321712 | <reponame>claudiocassiano/ClaudioParticular
print("Bem vindos ao nosso exercício!!!!\n")
n1 = float (input("Digite a primeira nota: "))
n2 = float (input("digite a segunda nota: "))
soma = n1 + n2
if (soma <10) :
print(f"O valor da soma que é {soma} é menor que 10.")
elif (soma ==10) :
print(f"O valor da soma que é {soma} é igual a 10.")
else:
print(f"O valor da soma que é {soma} é maior que 10.")
| StarcoderdataPython |
1795271 | from PyQt5.QtCore import QObject
import socket
import time
import struct
import base64
import subprocess
import logging
from Module.Packages import ClassBroadcastFlag
class ClassBroadcast(QObject):
parent = None
current_ip = None
socket_ip = None
socket_port = None
socket_buffer_size = None
socket_obj = None
def __init__(self, parent, current_ip, socket_ip, socket_port, socket_buffer_size):
super(ClassBroadcast, self).__init__(parent)
self.parent = parent
self.current_ip = current_ip
self.socket_ip = socket_ip
self.socket_port = socket_port
self.socket_buffer_size = socket_buffer_size
self.__init_socket_obj()
def __init_socket_obj(self):
self.socket_obj = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket_obj.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
self.socket_obj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket_obj.bind(('', self.socket_port))
self.socket_obj.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(self.socket_ip) + socket.inet_aton(self.current_ip)
)
@staticmethod
def execute_remote_command(command):
subprocess.call(command, shell=True)
def batch_send_decode(self, unpacked_data):
integer_length = struct.calcsize('!i')
targets_length = struct.unpack('!i', unpacked_data[:integer_length])[0]
targets = unpacked_data[integer_length:integer_length + targets_length].split(b'\x00')
targets = [socket.inet_ntoa(item) for item in targets]
if self.current_ip in targets:
data = unpacked_data[integer_length + targets_length:]
return data
return None
def start(self):
payload_size = self.socket_buffer_size - struct.calcsize('!2i')
while True:
try:
socket_data, socket_addr = self.socket_obj.recvfrom(self.socket_buffer_size)
unpacked_flag, unpacked_length, unpacked_data = struct.unpack(f'!2i{payload_size}s', socket_data)
unpacked_data = unpacked_data[:unpacked_length]
if unpacked_flag in (
ClassBroadcastFlag.Message,
ClassBroadcastFlag.Command,
ClassBroadcastFlag.RemoteSpyStart,
ClassBroadcastFlag.RemoteQuit,
ClassBroadcastFlag.ClientFileRecieved
):
data = self.batch_send_decode(unpacked_data)
if data is None:
continue
if unpacked_flag == ClassBroadcastFlag.Message:
message = base64.b64decode(data).decode('utf-8')
self.parent.message_recieved.emit(str(message))
elif unpacked_flag == ClassBroadcastFlag.Command:
message = base64.b64decode(data).decode('utf-8')
self.execute_remote_command(str(message))
elif unpacked_flag == ClassBroadcastFlag.RemoteSpyStart:
self.parent.start_remote_spy.emit()
elif unpacked_flag == ClassBroadcastFlag.RemoteQuit:
self.parent.quit_self.emit()
return
elif unpacked_flag == ClassBroadcastFlag.ClientFileRecieved:
self.parent.client_file_recieved.emit()
elif unpacked_flag == ClassBroadcastFlag.StartScreenBroadcast:
self.parent.toggle_screen_broadcats.emit(True)
elif unpacked_flag == ClassBroadcastFlag.StopScreenBroadcast:
self.parent.toggle_screen_broadcats.emit(False)
elif unpacked_flag == ClassBroadcastFlag.ConsoleQuit:
self.parent.reset_all.emit()
return
except Exception as e:
logging.warning(f'Failed to decode socket data: {e}')
| StarcoderdataPython |
3283351 | # Добавить один билет на утилизацию по игре бинго 75 с помощью экранной клавиатуры
def test_add_one_barcode_bingo_75_current_draw_keyboard(app, fixture_barcode_bingo75):
app.utiliz.open_page_utilization()
app.utiliz.click_bingo_75()
get_value = app.utiliz.get_input_value()
app.utiliz.modal_draw_ok()
assert app.utiliz.show_draw_in_util_menu() == get_value
app.utiliz.button_add()
app.utiliz.modal_one_input_ticket_barcode_keyboard(fixture_barcode_bingo75)
app.utiliz.modal_ticket_barcode_add()
app.utiliz.barcode_in_util_menu()
assert f'{fixture_barcode_bingo75} {fixture_barcode_bingo75} 1' in app.utiliz.barcode_in_util_menu()
app.utiliz.delete_barcode(fixture_barcode_bingo75)
app.utiliz.comeback_main_page() | StarcoderdataPython |
186020 | # The following templates are markdowns
overview = """
## Context
Manufacturing process feature selection and categorization
## Content
Abstract: Data from a semi-conductor manufacturing process
Data Set Characteristics: Multivariate
Number of Instances: 1567
Area: Computer
Attribute Characteristics: Real
Number of Attributes: 591
Date Donated: 2008-11-19
Associated Tasks: Classification, Causal-Discovery
Missing Values? Yes
A complex modern semi-conductor manufacturing process is normally under consistent
surveillance via the monitoring of signals/variables collected from sensors and or
process measurement points. However, not all of these signals are equally valuable
in a specific monitoring system. The measured signals contain a combination of
useful information, irrelevant information as well as noise. It is often the case
that useful information is buried in the latter two. Engineers typically have a
much larger number of signals than are actually required. If we consider each type
of signal as a feature, then feature selection may be applied to identify the most
relevant signals. The Process Engineers may then use these signals to determine key
factors contributing to yield excursions downstream in the process. This will
enable an increase in process throughput, decreased time to learning and reduce the
per unit production costs.
"""
data = """
In order to get the data simply run the following command:
```python
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data', sep=' ', header=None)
```
Please ask the admin in order to get the target and the random seed used for train/test split.
"""
evaluation = """
The predictions are evaluated according to the PR-AUC score.
You can get it using
```python
from sklearn.metrics import average_precision_score
```
More details [here](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html).
"""
| StarcoderdataPython |
1638744 | from redis import Redis, RedisError
# Don't know what much to do here
redis = Redis(host='redis', db=0)
| StarcoderdataPython |
133790 | <filename>equilibrium_points/statistics_brief.py
import os
import warnings
import numpy as np
import random as rand
import matplotlib.pyplot as plt
import dynalysis.basics as bcs
import dynalysis.classes as clss
from itertools import combinations
from scipy.stats import pearsonr
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA, IncrementalPCA
from dynalysis.visualization import subplot
from sub_basics import *
combs=[p for p in combinations([0,1,2,3],1)]+[p for p in combinations([0,1,2,3],2)]+\
[p for p in combinations([0,1,2,3],3)]+[p for p in combinations([0,1,2,3],4)]
def warn(*args, **kwargs):
pass
warnings.warn = warn
def filter_data(l):
count, res = 0, []
for comb in combs:
res += l[40+100*count:90+100*count]
count += 1
return res
def get_state_train(b, runnum, repeat=1):
'''
state_train is a list of coordinates.
returns: 1) list, a list of all coordinates.
2) list, a list containing sub-lists of coordinates, with each sub-list being a different trial.
3) list, a list containing sub-lists of firing rates for each neuron
'''
os.chdir(b.pathlink)
state_train_all, state_train_trucks, state_neurons = [], [], [[] for i in range(4)]
for rp in range(1,repeat+1):
#obtain firing rate trains
x = bcs.readcolumn(bcs.filename_generator('Frate.txt', rp), to_float=True)
x = [filter_data(x[i]) for i in range(1,len(x))]
#state train
state_train = list(zip(x[0],x[1],x[2],x[3])) #for 4 neurons only!
state_train_trucks.append(state_train)
state_train_all += state_train
for i in range(4): state_neurons[i] += x[i]
return state_train_all, state_train_trucks, state_neurons
def EP(state_train, nmax=4, plot=False):
'''
Performs the elbow test and returns relevant info about EP.
returns: 1) int, number of EP, 2) list, coors of EP
3) list, state_train labeled into clusters, 4) float, ratio of SSE
'''
all_sse, all_mean, all_labels = [], [], [] #SSE represents how well the data performs
for k in range(1,nmax+1):
SSE=0
labels = KMeans(n_clusters=k, max_iter=400, precompute_distances = 'auto').fit_predict(state_train)
all_labels.append(labels)
mean_train, temp = [[] for c in range(k)], []
for s in range(len(labels)):
mean_train[labels[s]].append(list(state_train[s]))
for i in range(k):
if mean_train[i]==[]: temp.append([0,0,0,0])
else: temp.append(list(np.mean(np.array(mean_train[i]), axis=0)))
all_mean.append(temp)
for j in range(k):
for ss in mean_train[j]:
SSE += vect_len(ss, all_mean[k-1][j])**2
all_sse.append(SSE)
diff = [all_sse[i]-all_sse[i+1] for i in range(nmax-1)]
ratios = []
for i in range(nmax-2):
if diff[i+1]!=0:ratios.append(diff[i]/float(diff[i+1]))
else: ratios.append(np.inf)
#plot
if plot:
plt.plot([i for i in range(1,nmax+1)], all_sse, 'k', alpha=0.5)
plt.scatter([i for i in range(1,nmax+1)], all_sse, s=20, c='b')
plt.xticks([i for i in range(1, nmax+1)])
plt.xlabel('k (number of clusters)', fontsize=14)
plt.ylabel('SSE (Hz squared)', fontsize=14)
plt.title('Ratios: '+', '.join(bcs.to_string(ratios)))
plt.savefig('elbow.png', dpi=100)
plt.close('all')
#determine if elbow exists
for d in range(nmax-2):
if much_larger_than(diff[d], diff[d+1], 7): return d+2, all_mean[d+1], all_labels[d+1], ratios[d]
return 1, all_mean[0], all_labels[0], 0
def pca(fit_train, trans_train, dim=2):
'''
Performs pca on fit_train, and transforms trans_train using the results.
returns: 1) list, new_trans_train, i.e. a list of coordinates, under pca transformation.
2) list, eigenvectors of the pca transformation.
'''
pca = IncrementalPCA(n_components=dim)
try:
pca.fit(fit_train)
except:
return [[0]*dim]*len(fit_train), 0
comp = pca.components_
new_trans_train = pca.transform(trans_train)
return new_trans_train, comp
def sort_clusters(nclusters, sorted_train, bin_size=0.01):
'''
returns: storage, which stores the attractors as garages
and len(time periods) that the system stays in said attractors items.
'''
#count transitions and store
STRG = storage([i for i in range(nclusters)], ['list' for i in range(nclusters)])
previous_n, previous_name=0, sorted_train[0]
for n in range(len(sorted_train)):
name = sorted_train[n]
if name != previous_name:
STRG.store_list(previous_name, (n-previous_n)*bin_size)
previous_name=name
previous_n=n
return STRG
def escape_rate(nclusters, STRG):
'''
returns: dict, with attractors as keys and its escape rate as values
'''
escape_dic={}
for nc in range(nclusters):
try:
escape_dic[nc]=np.mean(STRG.retrieve_garage(nc))
except RuntimeWarning:
escape_dic[nc]=0
return escape_dic
def get_fano(state_neurons):
'''
returns: the fano factor averaged the trials for each neuron.
'''
res=[]
for neuron in state_neurons:
res.append(bcs.fano(neuron))
return res
def print_commands(plot, plot_fr, plot_pca, plot_elbow, trans, runnum, only):
print('CAUTION: combs must be adjusted manually.')
print('***Just to be clear, these are the commands for this run:')
print('The trial you are running is: run'+runnum)
print('Trans code is: '+trans+'\n')
if plot and plot_fr: print('The firing rate graph will be plotted.')
if plot and plot_pca: print('The pca graph will be plotted.')
if plot and plot_elbow: print('The elbow graph will be plotted.')
if not plot: print('Nothing will be plotted.')
print('These actions will be done: '+', '.join(only))
def confidence(FFlist, ratio, esc_rate, harsh_criteria=10):
deter1=lower_than(FFlist, harsh_criteria) #whether FF is VERY low
#deter2=lower_than([float(ratio)], 15) #whether ratio is <15
deter3=lower_than(bcs.to_float(esc_rate.split('_')),0.05) #whether all escape rate is < 0.05
if deter1 and deter3: return '90_FP'
elif deter1 and (not deter3): return '70_FP'
else: return '30_FP'
def continuity(lists):
res=[]
for l in lists:
previous_i, count, accumulate = 0, 1, []
for j in range(len(l)):
i=l[j]
if j==(len(l)-2):
accumulate.append(count)
elif i !=0: count+=1
elif previous_i!=0 and i==0:
accumulate.append(count)
count=1
previous_i=i
if accumulate==[]: res.append(0)
else: res.append(np.mean(accumulate))
return res
def correlation(lists):
res=[]
combs=combinations('0123',2)
for comb in combs:
A, B = lists[int(comb[0])], lists[int(comb[1])]
try:
corr, pval = pearsonr(A,B)
except RuntimeWarning:
corr, pval = 0, 0
to_append='_'.join([comb[0]+comb[1],str(corr),str(pval)])
res.append(to_append)
return res
def determine_corr(num, FFlist, comb):
FF1, FF2 = FFlist[int(comb[0])], FFlist[int(comb[1])]
if FF1==0 and FF2==0: return 'none'
elif num > 0.5: return 'pos'
elif num < -0.5: return 'neg'
elif FF1<5 and FF2<5 and num < 0.5: return 'pos'
elif FF1<5 and FF2<5 and num > -0.5: return 'neg'
else: return 'none'
def determine_states(runnum, motherpath=os.getcwd()):
b_res=clss.branch('results_'+str(runnum), motherpath)
corrlink=os.path.join(b_res.pathlink,'corr.txt')
infolink=os.path.join(b_res.pathlink,'info.txt')
info_entry, corr_entry=clss.entry([' 0', ' 0_6'], [' 1', ' 4']), clss.entry([' 0', ' 0_6'], [])
infodata = info_entry.readdata_and_fix(infolink)
corrdata = corr_entry.readdata_and_fix(corrlink, quick_format=True)
motdic = {} #{motif: [[numEP, (comb, relation),..],..]}
for key in corrdata.keys():
#arrange each ps into [numEP, (comb, relation),..]
numEP, motif, FF = infodata[key][0], key[1], bcs.to_float(infodata[key][1].split('_'))
if motif not in motdic.keys(): motdic[motif]=[]
temp = [numEP]
for val in corrdata[key]:
comb, crvalue, pvalue = val.split('_')
relation = determine_corr(float(crvalue), FF, comb)
temp.append((comb, relation))
#Try to catch errors in states:
relations = [combo[1] for combo in temp[1:]]
if relations.count('none')>=3: temp=[numEP,('01','none'),('02','none'),('03','none'),\
('12','none'),('13','none'),('23','none')]
if relations.count('pos')>=3: temp=[numEP,('01','pos'),('02','pos'),('03','pos'),\
('12','pos'),('13','pos'),('23','pos')]
#Determine if there is already a qualitatively similar parameter set in the motif
to_append = True
for pms in motdic[motif]: #[[numEP, (comb, relation),..],..]
exit = True
for item in temp: #[numEP, (comb, relation),..]
if item not in pms:
exit = False
break
if exit:
to_append = False
break
if to_append: motdic[motif].append(temp)
return motdic
#determine_states('3')
def main(runnum, plot=False, outfile='info.txt', trans='ffffssff', motherpath=os.getcwd(), remedy=False, **kwargs):
'''
1) Plots the elbow and the frate graphs, moves all of them to a folder called 'graphs'.
2) Performs kmeans on the data to get EP-relevant info, and:
[1] Returns the pms along with their corresponding cooridnates of the EPs on 2-dimensions (determined by pca).
[2] Plots the data of each param_set (PS) onto pca, labels the EPs, and moves it to folder 'graphs'.
[3] Outputs PS, #EP, EP_coor, ratio, FF to file outfile.
parameters:
*plot: If any graph is plotted at all, it must be set to True.
*only: Can select a few actions only.
'''
#kwargs
kw={'plot_fr':False, 'plot_pca':False, 'plot_elbow':False, 'corrfile':'corr.txt', 'only':[], 'EPfile':'EPcoor.txt'}
kw.update(kwargs)
plot_fr, plot_pca, plot_elbow = kw['plot_fr'], kw['plot_pca'], kw['plot_elbow']
corrfile, only, EPfile = kw['corrfile'], kw['only'], kw['EPfile']
print_commands(plot, plot_fr, plot_pca, plot_elbow, trans, runnum, only)
#dir
runpath=os.path.join(os.getcwd(),'run'+runnum)
os.chdir(runpath)
alldirs=[dr for dr in os.listdir(runpath) if os.path.isdir(os.path.join(runpath,dr))]
allpms=[]
#deters
deter_esc = (only==[] or ('esc' in only))
deter_pca = (only==[] or ('pca' in only))
deter_FF = (only==[] or ('FF' in only))
deter_pw = (only==[] or ('pw' in only))
deter_corr = (only==[] or ('corr' in only))
deter_info = (only==[] or ('info' in only))
deter_EP = (only==[] or ('EP' in only))
#result files and outputs
b_res=clss.branch('results_'+str(runnum), motherpath)
b_graphs=clss.branch('graphs',b_res.pathlink)
if os.path.exists(os.path.join(b_res.pathlink, outfile)):
done=bcs.readcolumn(os.path.join(b_res.pathlink, outfile))[0]
else:
done=[]
if plot: b_graphs.mkdir()
else: b_res.mkdir()
if deter_info and not remedy: bcs.output_clf(os.path.join(b_res.pathlink,outfile))
if deter_corr and not remedy: bcs.output_clf(os.path.join(b_res.pathlink,corrfile))
#analysis
count=0
for dr in alldirs:
if dr not in done:
count+=1
print(str(count)+':'+dr)
#specifications
pm=parameter([],[])
pm.assign_name(dr, trans_method=trans)
b=clss.branch(dr,runpath)
rp=10 if len(os.listdir(b.pathlink))>9 else 1 #due to flawed dataset
#get EPs
state_train_all, state_train_truck, state_neurons = get_state_train(b, runnum, repeat=rp)
nclusters, EP_coors, label_train, ratio = EP(state_train_all, nmax=5, plot=(plot and plot_elbow))
EP4 = ['_'.join(bcs.to_string(item)) for item in EP_coors]
#calculate escape rate
if deter_esc:
accumulation=0
STRG = storage([nc for nc in range(nclusters)], ['list' for i in range(nclusters)])
for state_train in state_train_truck:
lt=label_train[accumulation:len(state_train)+accumulation]
accumulation+=len(state_train)
new_STRG=sort_clusters(nclusters,lt)
for nc in range(nclusters): STRG.massive[nc]+=new_STRG.massive[nc]
ed = escape_rate(nclusters, STRG)
#pcaPS
if deter_pca:
trans_train, comp = pca(state_train_all, state_train_all+EP_coors)
x, y = [item[0] for item in trans_train], [item[1] for item in trans_train]
pm.add_pair(('EP',EP_coors))
allpms.append(pm)
if plot and plot_pca:
plt.plot(x[:-(nclusters)], y[:-(nclusters)], 'k.', alpha=0.5)
plt.plot(x[-(nclusters):], y[-(nclusters):], 'b.')
plt.xlabel('dim1', fontsize=14)
plt.ylabel('dim2', fontsize=14)
plt.savefig(dr+'_pcaPS.png', dpi=100)
plt.close('all')
#fano factor
if deter_FF:
FF=get_fano(state_neurons)
FFall='_'.join(bcs.to_string(FF))
#pulse width
if deter_pw:
pwidth=continuity(state_neurons)
pall='_'.join(bcs.to_string(pwidth))
#correlation
if deter_corr:
all_corrs=correlation(state_neurons)
#move graphs and outputs
if plot:
if plot_elbow: os.rename('elbow.png',dr+'_elbow.png')
if plot_fr: subplot(fname=os.path.join(b.pathlink, 'Frate.txt'), outputfigname=dr+'_frate.png', tstep=5,\
title='Fano Factors: '+FFall, tight=False, dpi=100)
if plot_elbow: b_graphs.move_from(dr+'_elbow.png',b.pathlink)
if plot_fr: b_graphs.move_from(dr+'_frate.png',b.pathlink)
if plot_pca: b_graphs.move_from(dr+'_pcaPS.png',b.pathlink)
if deter_info:
vals=[ed[key] for key in sorted(ed.keys())]
numEP, esc_rate, ratio= str(len(ed.keys())), '_'.join(bcs.to_string(vals)), str(ratio)
bcs.output_line(os.path.join(b_res.pathlink,outfile),\
' '.join([dr,numEP, esc_rate,ratio,FFall,pall]))
if deter_corr:
bcs.output_line(os.path.join(b_res.pathlink,corrfile),\
' '.join([dr]+all_corrs))
if deter_EP:
bcs.output_line(os.path.join(b_res.pathlink,EPfile),\
' '.join([dr]+EP4))
os.chdir(motherpath)
return allpms
def plot_pcaEPs(runnum, plot=False, motherpath=os.getcwd(), trans='fffsfss', feature='i'):
'''
Plots the cooridnates of the EPs on 2-dimensions (determined by pca).
'''
b_res=clss.branch('results_'+str(runnum),motherpath)
b_pcaEP=clss.branch('pcaEP',b_res.pathlink)
b_pcaEP.mkdir()
os.chdir(b_pcaEP.pathlink)
#reevaluation
newfile=os.path.join(b_res.pathlink, 'new-info.txt')
EPfile=os.path.join(b_res.pathlink, 'EPcoor.txt')
Ent=clss.entry(' 0', [' 6'])
Ent2=clss.entry(' 0', [])
data=Ent.readdata_and_fix(newfile)
EPcoorss=Ent2.readdata_and_fix(EPfile, quick_format=True)
#sort pms by motifs(ID)
IDdic, coldic={}, {}
for key in data.keys():
pm=parameter([], [])
pm.assign_name(key, trans_method='iiiiss')
motID, col=pm.extract('ID'), pm.extract(feature)
if motID not in IDdic.keys(): IDdic[motID]=[]
if motID not in coldic.keys(): coldic[motID]=[]
actual_coors=[bcs.to_float(cr.split('_')) for cr in EPcoorss[key]]
#reevaluation
if data[key][0]=='o':
IDdic[motID]+=actual_coors
coldic[motID]+=[int(col)]*len(actual_coors)
else:
IDdic[motID]+=[list(np.mean(actual_coors,axis=0))]
coldic[motID]+=[int(col)]
for motID in IDdic.keys():
print(motID)
EP_coors=IDdic[motID]
#pca
trans_train, vectors = pca(EP_coors, EP_coors)
vec_strs = [str(round(vec[0],2))+' '+str(round(vec[1],2)) for vec in vectors]
#elbow then pca
nclusters, new_EP_coors, label_train, ratio = EP(EP_coors, nmax=6, plot=False)
new_trans_train = pca(EP_coors, new_EP_coors)[0]
if plot:
plt.plot([item[0] for item in trans_train], [item[1] for item in trans_train], 'k.')
plt.plot([item[0] for item in new_trans_train], [item[1] for item in new_trans_train], 'b.')
plt.xlabel('dim1: '+vec_strs[0], fontsize=14)
plt.ylabel('dim2: '+vec_strs[1], fontsize=14)
plt.savefig('pcaEP_'+motID+'.png', dpi=200)
plt.close('all')
#try
from dynalysis.data_visualization import plot_continuous_colorbar
plot_continuous_colorbar([item[0] for item in trans_train], [item[1] for item in trans_train],\
coldic[motID], 'dim1: '+vec_strs[0], 'dim2: '+vec_strs[1], feature,\
svf='pcaEP_'+motID+'_'+feature+'.png')
bcs.output_clf('pcaEP_'+motID+'.txt')
bcs.output_double('pcaEP_'+motID+'.txt', EP_coors)
os.chdir(motherpath)
return 0
def evaluation_by_FF(runnum, infile='info.txt', FF_criteria=30, cp=False):
'''
Evaluates the number of clusters based off ratio, Fano Factor, correlation and ON/OFF.
parameters:
*data: a dictionary with motifID as key and [numEP, esc_rate, ratio, FF1, FF2, FF3, FF4] as values
'''
#dir
file=os.path.join(os.getcwd(),'results_'+runnum, infile)
ofile=os.path.join(os.getcwd(),'results_'+runnum, 're-'+infile)
nfile=os.path.join(os.getcwd(),'results_'+runnum, 'new-'+infile)
b_res_path=os.path.join(os.getcwd(),'results_'+runnum)
b_graphs=clss.branch('graphs', b_res_path)
if cp:
b_regraphs_s=clss.branch('re-graphs-s', b_res_path)
b_regraphs_n=clss.branch('re-graphs-n', b_res_path)
b_regraphs_s.mkdir()
b_regraphs_n.mkdir()
bcs.output_clf(ofile)
bcs.output_clf(nfile)
Ent=clss.entry(' 0', [' 1', ' 2', ' 3', ' 4_0', ' 4_1', ' 4_2', ' 4_3', ' 5_0', ' 5_1', ' 5_2', ' 5_3'])
data=Ent.readdata_and_fix(file)
#main
new_data={}
for key in data:
numEP, esc_rate, ratio, FF1, FF2, FF3, FF4, pw1, pw2, pw3, pw4 = data[key]
FFlist=bcs.to_float([FF1, FF2, FF3, FF4])
FFstring='_'.join([FF1, FF2, FF3, FF4])
pwlist=bcs.to_float([pw1, pw2, pw3, pw4])
pwstring='_'.join([pw1, pw2, pw3, pw4])
deter1=(int(numEP)>1) #can only have FPs if numEP>1 by definition
deter2=lower_than(FFlist, FF_criteria) #if FF is too low
deter3=lower_than(bcs.to_float(esc_rate.split('_')),0.1) #whether all escape rate is < 0.1
deter4=lower_than(pwlist,5) #whether all pulse width < 5
if deter1 and deter2 and deter3: #identify False Positives type-s (saturation)
conf=confidence(FFlist, ratio, esc_rate)
new_data[key]=['1', '0', '0', FFstring, pwstring, conf]
bcs.output_line(ofile,' '.join([key]+new_data[key]))
if cp: b_graphs.cp_to(key+'_frate.png', b_regraphs_s.pathlink)
elif deter1 and deter4: #identify False Positives type-n (noise-induced firing)
new_data[key]=['1', '0', '0', FFstring, pwstring, 'n']
bcs.output_line(ofile,' '.join([key]+new_data[key]))
if cp: b_graphs.cp_to(key+'_frate.png', b_regraphs_n.pathlink)
else: #False Negatives not implemented
new_data[key]=[numEP, esc_rate, ratio, FFstring, pwstring, 'o'] #correct trials with confidence='o'
bcs.output_line(nfile,' '.join([key]+new_data[key]))
return data
def evaluation_by_FF_only(runnum, infile='info.txt', FF_criteria=30, cp=False):
'''
Evaluates the number of clusters based off Fano Factor.
parameters:
*data: a dictionary with motifID as key and [numEP, esc_rate, ratio, FF1, FF2, FF3, FF4] as values
'''
#dir
file=os.path.join(os.getcwd(),'results_'+runnum, infile)
ofile=os.path.join(os.getcwd(),'results_'+runnum, 're-'+infile)
nfile=os.path.join(os.getcwd(),'results_'+runnum, 'new-'+infile)
b_res_path=os.path.join(os.getcwd(),'results_'+runnum)
b_graphs=clss.branch('graphs', b_res_path)
if cp:
b_regraphs_s=clss.branch('re-graphs-s', b_res_path)
b_regraphs_n=clss.branch('re-graphs-n', b_res_path)
b_regraphs_s.mkdir()
b_regraphs_n.mkdir()
bcs.output_clf(ofile)
bcs.output_clf(nfile)
Ent=clss.entry(' 0', [' 1', ' 2', ' 3', ' 4_0', ' 4_1', ' 4_2', ' 4_3', ' 5_0', ' 5_1', ' 5_2', ' 5_3'])
data=Ent.readdata_and_fix(file)
#main
new_data={}
for key in data:
numEP, esc_rate, ratio, FF1, FF2, FF3, FF4, pw1, pw2, pw3, pw4 = data[key]
FFlist=bcs.to_float([FF1, FF2, FF3, FF4])
FFstring='_'.join([FF1, FF2, FF3, FF4])
pwlist=bcs.to_float([pw1, pw2, pw3, pw4])
pwstring='_'.join([pw1, pw2, pw3, pw4])
deter1=(int(numEP)>1) #can only have FPs if numEP>1 by definition
deter2=lower_than(FFlist, FF_criteria) #if FF is too low
if deter1 and deter2: #identify False Positives type-s (saturation)
conf=confidence(FFlist, ratio, esc_rate)
new_data[key]=['1', '0', '0', FFstring, pwstring, conf]
bcs.output_line(ofile,' '.join([key]+new_data[key]))
if cp: b_graphs.cp_to(key+'_frate.png', b_regraphs_s.pathlink)
else: #False Negatives not implemented
new_data[key]=[numEP, esc_rate, ratio, FFstring, pwstring, 'o'] #correct trials with confidence='o'
bcs.output_line(nfile,' '.join([key]+new_data[key]))
return data
def evaluation_by_mean(runnum, infile='new-info.txt', outfile='motif-numEP.txt'):
#dir
file=os.path.join(os.getcwd(),'results_'+runnum, infile)
ofile=os.path.join(os.getcwd(),'results_'+runnum, outfile)
bcs.output_clf(ofile)
Ent=clss.entry(' 0_6', [' 1', ' 4_0', ' 4_1', ' 4_2', ' 4_3'])
data=Ent.readdata(file)
for key in data.keys():
all_numEP = [int(num[0]) for num in data[key]]
tot = np.sum(all_numEP)
avg = np.mean(all_numEP)
c1, c2, c3 = all_numEP.count(1), all_numEP.count(2), all_numEP.count(3)
adj_avg_list, c0=[], 0
for num in data[key]:
if np.sum([float(FF) for FF in num[1:]])!=0: adj_avg_list.append(int(num[0]))
else: c0+=1
adj_avg = np.mean(adj_avg_list)
to_output = [key, str(tot), str(avg), str(adj_avg), str(c0), str(c1-c0), str(c2), str(c3)]
bcs.output_line(ofile, ' '.join(to_output))
return 0
def evaluation_by_mean_set(setnum, infile='new-info.txt', ofile='motif-numEP', allnum=['1']):
alls=np.load('set'+str(setnum)+'.npy')
ofile=os.path.join(os.getcwd(), ofile+'-set'+str(setnum)+'.txt')
bcs.output_clf(ofile)
Ent, data=clss.entry(' 0', [' 1', ' 4_0', ' 4_1', ' 4_2', ' 4_3', ' 0_5']), {}
for runnum in allnum:
file=os.path.join(os.getcwd(),'results_'+runnum, infile)
data.update(Ent.readdata(file))
motifdic={}
for sett in alls:
e,i,n,comb,amp,bi,mot,ie,pb=sett ##
pm=parameter([e,i,n,amp,bi,str(mot.typeID)+'-'+str(mot.ID),ie,pb],['e','i','noise','A','Bi','ID','gIE','pb']) ##
if data[pm.name][0][5] not in motifdic: motifdic[data[pm.name][0][5]]=[]
motifdic[data[pm.name][0][5]].append(data[pm.name][0][:5])
for key in motifdic:
all_numEP = [int(num[0]) for num in motifdic[key]]
tot = np.sum(all_numEP)
avg = np.mean(all_numEP)
c1, c2, c3, c4 = all_numEP.count(1), all_numEP.count(2), all_numEP.count(3), all_numEP.count(4)
adj_avg_list, c0=[], 0
for num in motifdic[key]:
if np.sum([float(FF) for FF in num[1:]])!=0: adj_avg_list.append(int(num[0]))
else: c0+=1
adj_avg = np.mean(adj_avg_list)
to_output = [key, str(tot), str(avg), str(adj_avg), str(c0), str(c1-c0), str(c2), str(c3), str(c4)]
bcs.output_line(ofile, ' '.join(to_output))
def evaluation_by_ratioSSE(runnum, criteria, infile='info.txt', cp=False):
#dir
file=os.path.join(os.getcwd(),'results_'+runnum, infile)
nfile=os.path.join(os.getcwd(),'results_'+runnum, 'new2-'+infile)
b_res_path=os.path.join(os.getcwd(),'results_'+runnum)
b_graphs=clss.branch('graphs', b_res_path)
if cp:
b_regraphs_R=clss.branch('re-graphs-R', b_res_path)
b_regraphs_R.mkdir()
bcs.output_clf(nfile)
Ent=clss.entry(' 0', [' 3', ' 1'])
data=Ent.readdata_and_fix(file)
Ent=clss.entry(' 0', [])
alldata=Ent.readdata_and_fix(file, quick_format=True)
#main
for key in data:
ratios = bcs.to_float(data[key][0].split('_'))
passed=[]
for r in range(len(ratios)):
if ratios[r] > criteria: passed.append(r)
if alldata[key][0] != str(r+1) and cp:
b_graphs.cp_to(key+'_frate.png', b_regraphs_R.pathlink)
if len(passed)==1: bcs.output_line(nfile,' '.join([key, str(r+1)]+alldata[key][1:]))
else: bcs.output_line(nfile,' '.join([key, str(1)]+alldata[key][1:]))
return data
if __name__=='__main__':
pass
runnum='31'
pms=main(runnum, plot=True, plot_fr=True, plot_pca=True, plot_elbow=True, remedy=True)
evaluation_by_ratioSSE(runnum, 7, infile='info.txt', cp=True)
data=evaluation_by_FF_only(runnum, cp=True, FF_criteria=37)
plot_pcaEPs(runnum, plot=True, feature='i')
setnum='31'
evaluation_by_mean_set(setnum, allnum=[setnum])
| StarcoderdataPython |
1600980 | from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from replies.models import Reply
class PublicManager(models.Manager):
def get_queryset(self):
return super(PublicManager, self).get_queryset().filter(hidden=False).order_by('-created_time')
class Post(models.Model):
title = models.CharField("标题", max_length=255)
body = models.TextField("正文", blank=True)
views = models.PositiveIntegerField("浏览量", default=0, editable=False)
created_time = models.DateTimeField("创建时间", auto_now_add=True)
modified_time = models.DateTimeField("修改时间", auto_now=True)
pinned = models.BooleanField("置顶", default=False)
highlighted = models.BooleanField("加精", default=False)
hidden = models.BooleanField("隐藏", default=False)
tags = models.ManyToManyField('tags.Tag', verbose_name="标签")
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="作者")
replies = GenericRelation(Reply, object_id_field='object_pk', content_type_field='content_type',
verbose_name="回复")
objects = models.Manager()
# 未隐藏的帖子
public = PublicManager()
class Meta:
verbose_name = "帖子"
verbose_name_plural = "帖子"
def __str__(self):
return self.title
| StarcoderdataPython |
3310957 | <gh_stars>0
def metade(p = 0, form=False):
p /= 2
return p if form is False else moeda(p)
def dobro(p = 0, form=False):
p *= 2
return p if form is False else moeda(p)
def aumentar(p = 0, quant = 0, form=False):
percent = p + (p*quant / 100)
return percent if form is False else moeda(percent)
def diminuir(p = 0, quant = 0, form=False):
percent = p - (p*quant / 100)
return percent if form is False else moeda(percent)
def moeda(p = 0, formatacao = 'R$'):
return f'{formatacao}{p:.2f}'.replace('.', ',')
def write(msg):
tam = len(msg) + 20
print(tam * '-')
print(f'{msg}'.center(30))
print(tam * '-')
def resumo(p = 0, aumen = 0, redu = 0):
write('RESUMO VALOR')
print(f' Preço analisado:\t{moeda(p)}')
print(f' Dobro do preço:\t{dobro(p, True)}')
print(f' Metade do preço:\t{metade(p, True)}')
print(f' {aumen}% de aumento:\t{aumentar(p, aumen, True)}')
print(f' {redu}% de redução:\t{diminuir(p, redu, True)}')
print(32 * '-') | StarcoderdataPython |
52559 | import requests
lis = [
{'http': '172.16.17.32:8888'},
{'http': '192.168.3.11:3129'},
{'http': '172.16.58.3:8181'},
{'http': '172.16.31.10:8010'},
{'http': '172.16.31.10:80'},
{'http': '192.168.3.11:31773'},
]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
}
url = 'http://www.baidu.com/s?ie=UTF-8&wd=ip'
while len(lis):
try:
proxy = lis.pop()
r = requests.get(url=url, headers=headers, proxies=proxy)
break
except Exception as e:
print(e)
print(r.content)
with open('pic/daili1.html', 'wb') as fp:
fp.write(r.content)
# 打开daili.html
# 页面显示 本机IP: 172.16.31.10陕西省西安市 联通
# 证明使用了代理,访问;因为本地ip是北京是联通
| StarcoderdataPython |
1725917 | <reponame>DrZlo77/Python_Lesson3<gh_stars>0
# Получаем текст истории из файла 'Text_story'
file = open('Text_story','r', encoding= 'utf-8')
text_story = file.read()
#==================================================================================
# 1) методами строк очистить текст от знаков препинания;
#Удаляем каждый знак
text_story_task_1 = text_story
text_story_task_1 = text_story_task_1.replace("."," ")
text_story_task_1 = text_story_task_1.replace(","," ")
text_story_task_1 = text_story_task_1.replace("!"," ")
text_story_task_1 = text_story_task_1.replace("?"," ")
text_story_task_1 = text_story_task_1.replace("—"," ")
text_story_task_1 = text_story_task_1.replace("«"," ")
text_story_task_1 = text_story_task_1.replace("»"," ")
text_story_task_1 = text_story_task_1.replace("("," ")
text_story_task_1 = text_story_task_1.replace(")"," ")
text_story_task_1 = text_story_task_1.replace(";"," ")
text_story_task_1 = text_story_task_1.replace(":"," ")
print(text_story_task_1)
# Удаляем в цикле
text_story_task_1 = text_story
znaki = (".",",","!","—","«","»","(",")",";",":")
for znak in znaki:
text_story_task_1 = text_story_task_1.replace(znak," ")
print(text_story_task_1)
#==================================================================================
# 2) сформировать list со словами (split);
text_story_task_2 = text_story
list_text = text_story_task_2.split()
print(list_text)
#==================================================================================
# 3) привести все слова к нижнему регистру (map);
# функциями строк
text_story_task_3 = text_story
text_story_task_3 = text_story_task_3.lower()
print(text_story_task_3)
# С Использованием map
text_story_task_3 = text_story
list_story_task_3 = list(map(lambda story_text: story_text.lower(), list_text ))
#==================================================================================
# (5) Дополнительное задание - выполняем лимитизацию
import pymorphy2
pymorphy = pymorphy2.MorphAnalyzer()
morphy_list = []
for elem in list_story_task_3:
morphy_list.append(pymorphy.parse(elem)[0].normal_form)
text_collect = ' '.join(morphy_list)
print(text_collect)
# text_collect = ''
# for elem in list_story_task_3:
# text_collect = text_collect + elem
# print(text_collect)
# - получить из list пункта 3 dict, ключами которого являются слова, а значениями их количество появлений в тексте;
dict_story_text = {}
for elem in list_story_task_3:
counter = dict_story_text.get(elem,0)
dict_story_text[elem] = counter + 1
print(dict_story_text)
#==================================================================================
# 4) вывести 5 наиболее часто встречающихся слов (sort), вывести количество разных слов в тексте (set);
list_story_text_elem_dict = list(dict_story_text.items()) # делаем из словаря список, элементом списка будет пара ключ, значение словаря
list_story_text_elem_dict.sort(key=lambda a: a[1]) # сортируем по значению словаря в элементе списка
list_story_text_elem_dict.reverse()
print(list_story_text_elem_dict[:5])
| StarcoderdataPython |
3357148 | <filename>rest_server.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : <NAME> (<EMAIL>)
import os
import uvicorn
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="args of rest service")
parser.add_argument("--host", default='0.0.0.0')
parser.add_argument("-p", "--port", type=int, default=9055)
parser.add_argument("-r", "--reload", type=int, default=1)
args = parser.parse_args()
uvicorn.run('rest_service.handlers:app', host=args.host, port=args.port, reload=args.reload == 1)
| StarcoderdataPython |
1677640 | """Utils to make train/test splits"""
import random
flatten = lambda l: [item for sublist in l for item in sublist]
def _train_test_keys_split(
grouped_keys, n_train, if_insufficient_data='only_train'
):
groups = list(grouped_keys)
if n_train > len(groups):
if if_insufficient_data == 'only_train':
return set(groups), set([])
else:
raise ValueError(
f"Don't know how to handle if_insufficient_data: {if_insufficient_data}"
)
else:
train_groups = random.sample(groups, n_train)
test_groups = set(groups) - set(train_groups)
return (
set(flatten([grouped_keys[g] for g in train_groups])),
set(flatten([grouped_keys[g] for g in test_groups])),
)
def train_test_keys_split(grouped_keys, train_prop=0.8):
return _train_test_keys_split(
grouped_keys, int(len(grouped_keys) * train_prop)
)
def train_test_keys_leave_one_out_split(grouped_keys):
return _train_test_keys_split(grouped_keys, len(grouped_keys) - 1)
#
# def group_keys(self, group_keys, keys=None):
# """Make a util data structure that organizes the keys into separate universes.
#
# Args:
# keys: The keys to split
# *group_keys: The sequence of group_keys that define how to group the keys.
# Fundementally, a group_key is a function that takes a key and spits out a hashable value to group by
# But if the specified group key is a string or a tuple of strings,
# the function that groups by the those namedtuple attributes will be
#
# Returns: A nested dictionary of group_keys whose leaves are the lists of the subsets of the input keys that
# match the group_keys path.
#
# Example:
# >>> dacc = PreppedDacc()
# >>> t = dacc.group_keys(['pump_type', ('pump_serial_number', 'session'), len])
# >>> # groups the keys by pump_type, then by ('pump_serial_number', 'session'), then by the length.
# >>> def nested_depth(d, _depth_so_far=0):
# ... if not isinstance(d, dict):
# ... return _depth_so_far
# ... else:
# ... return nested_depth(next(iter(d.values())), _depth_so_far + 1)
# >>> nested_depth(t)
# 3
# """
# if keys is None:
# keys = list(self.filterd_prepped_data)
#
# _key_funcs = list()
# for group_key in group_keys:
# if isinstance(group_key, str):
# # _key_funcs.append(lambda k: sub_namedtuple(k, tuple([group_key])))
# _key_funcs.append(partial(sub_namedtuple, index=(group_key,)))
# elif not callable(group_key):
# _key_funcs.append(partial(sub_namedtuple, index=tuple(group_key)))
# else:
# _key_funcs.append(group_key)
#
# return regroupby(keys, *_key_funcs)
def random_train_test_split_keys(
self,
keys=None,
test_size=0.2,
group_key=('pump_type', 'test_phase'),
cannot_be_separated='pump_serial_number',
keep_phase_9_keys=False,
):
"""Split keys randomly in view of train/test testing.
Args:
keys: The keys to split (all of dacc.filterd_prepped_data by default)
test_size: A proportion to assign to test
group_key: The key function of fields used to group by.
This is usually used so as to separate training sets
cannot_be_separated: The field of fields that should not be separated in train and test:
That is, keys agreeing on these fields should be either entirely in train or in test.
Returns:
A {group: (train_key_set, test_key_set), ...} dict
"""
if keys is None:
keys = list(self.filterd_prepped_data)
elif callable(keys):
keys_filter = keys
keys = filter(keys_filter, self.filterd_prepped_data)
if not keep_phase_9_keys:
# Systematically remove phase 9 keys
keys = list(keys)
n_keys = len(keys)
keys = list(filter(lambda x: x.test_phase != 9, keys))
if len(keys) != n_keys:
from warnings import warn
warn(
f'I removed {n_keys - len(keys)} test_phase==9 of the {n_keys} keys you specified'
)
if group_key is not None:
groups = self.group_keys([group_key, cannot_be_separated], keys)
n_groups = len(groups)
else:
n_groups = len(keys)
if isinstance(test_size, float):
n_train = int(n_groups * (1 - test_size))
elif isinstance(test_size, int):
n_train = n_groups - test_size
else:
raise TypeError(
f"I don't recognize that type of test_size: {test_size}"
)
def _train_test_keys_split_output_as_dict(*args, **kwargs):
train, test = _train_test_keys_split(*args, **kwargs)
return {'train': train, 'test': test}
if group_key is not None:
return {
group_key: _train_test_keys_split_output_as_dict(
grouped_keys, n_train
)
for group_key, grouped_keys in groups.items()
}
else:
return _train_test_keys_split_output_as_dict(keys, n_train)
| StarcoderdataPython |
1789858 | <filename>GPflow/testing/test_config.py<gh_stars>10-100
import unittest
import os
import tensorflow as tf
import gpflow
from testing.gpflow_testcase import GPflowTestCase
class TestConfigParsing(GPflowTestCase):
def setUp(self):
directory = os.path.dirname(os.path.realpath(__file__))
f = os.path.join(directory, 'gpflowrc_test.txt')
self.conf = gpflow._settings.read_config_file(f)
self.settings = gpflow._settings.namedtuplify(self.conf._sections)
def test(self):
self.assertTrue(all([
self.settings.first_section.a_bool is False,
self.settings.first_section.a_float == 1e-3,
self.settings.first_section.a_string == 'hello',
self.settings.first_section.a_type is tf.float64,
self.settings.second_section.a_bool is True,
self.settings.second_section.another_bool is True,
self.settings.second_section.yet_another_bool is False]))
def test_config_not_found(self):
"""GPflow config cannot be found."""
filename = "./config_not_found.txt"
self.assertRaises(RuntimeError, gpflow._settings.read_config_file, filename)
def test_parser(self):
with self.assertRaises(ValueError):
gpflow._settings.parse(None)
with self.assertRaises(ValueError):
gpflow._settings.parse(12)
with self.assertRaises(ValueError):
gpflow._settings.parse([])
self.assertTrue(gpflow._settings.parse('false') is False)
self.assertTrue(gpflow._settings.parse('False') is False)
self.assertTrue(gpflow._settings.parse('true') is True)
self.assertTrue(gpflow._settings.parse('True') is True)
self.assertTrue(gpflow._settings.parse('int32') is tf.int32)
self.assertTrue(gpflow._settings.parse('32') is 32)
self.assertTrue(gpflow._settings.parse('32.') == 32.)
self.assertTrue(gpflow._settings.parse('int') == 'int')
self.assertTrue(gpflow._settings.parse('hello') == 'hello')
self.assertTrue(gpflow._settings.parse('1E2') == 1e2)
self.assertTrue(gpflow._settings.parse('1e-9') == 1e-9)
class TestSettingsManager(GPflowTestCase):
def testRaises(self):
with self.assertRaises(AttributeError):
gpflow.settings.undefined_setting_to_raise_error
def testMutability(self):
orig = gpflow.settings.verbosity.hmc_verb
gpflow.settings.verbosity.hmc_verb = False
self.assertTrue(gpflow.settings.verbosity.hmc_verb is False)
gpflow.settings.verbosity.hmc_verb = True
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
gpflow.settings.verbosity.hmc_verb = orig
def testContextManager(self):
orig = gpflow.settings.verbosity.hmc_verb
gpflow.settings.verbosity.hmc_verb = True
config = gpflow.settings.get_settings()
config.verbosity.hmc_verb = False
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
with gpflow.settings.temp_settings(config):
self.assertTrue(gpflow.settings.verbosity.hmc_verb is False)
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
gpflow.settings.verbosity.hmc_verb = orig
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1665074 | from unittest import TestCase
import datetime
from hamcrest import assert_that, is_, contains
from backdrop.core.timeseries import timeseries, HOUR, DAY, WEEK, MONTH, QUARTER, YEAR
from tests.support.test_helpers import d, d_tz
class TestTimeseries(TestCase):
def test_returns_a_full_timeseries(self):
ts = timeseries(start=d_tz(2013, 4, 1),
end=d_tz(2013, 4, 15),
period=WEEK,
data=[],
default={"value": 0})
assert_that(ts, contains(
{"_start_at": d_tz(2013, 4, 1), "_end_at": d_tz(2013, 4, 8), "value": 0},
{"_start_at": d_tz(2013, 4, 8), "_end_at": d_tz(2013, 4, 15), "value": 0},
))
def test_adds_data_at_appropriate_places(self):
data = [
{"_start_at": d_tz(2013, 4, 1), "_end_at": d_tz(2013, 4, 8), "value": 12}
]
ts = timeseries(start=d_tz(2013, 4, 1),
end=d_tz(2013, 4, 15),
period=WEEK,
data=data,
default={"value": 0})
assert_that(ts, contains(
{"_start_at": d_tz(2013, 4, 1), "_end_at": d_tz(2013, 4, 8), "value": 12},
{"_start_at": d_tz(2013, 4, 8), "_end_at": d_tz(2013, 4, 15), "value": 0},
))
def test_start_and_end_are_expanded_to_week_limits(self):
data = [
{"_start_at": d_tz(2013, 4, 8), "_end_at": d_tz(2013, 4, 15), "value": 12},
{"_start_at": d_tz(2013, 4, 15), "_end_at": d_tz(2013, 4, 22), "value": 23}
]
ts = timeseries(start=d_tz(2013, 4, 5),
end=d_tz(2013, 4, 25),
period=WEEK,
data=data,
default={"value": 0})
assert_that(ts, contains(
{"_start_at": d_tz(2013, 4, 1), "_end_at": d_tz(2013, 4, 8), "value": 0},
{"_start_at": d_tz(2013, 4, 8), "_end_at": d_tz(2013, 4, 15), "value": 12},
{"_start_at": d_tz(2013, 4, 15), "_end_at": d_tz(2013, 4, 22), "value": 23},
{"_start_at": d_tz(2013, 4, 22), "_end_at": d_tz(2013, 4, 29), "value": 0},
))
class TestWeek_start(TestCase):
def test_that_it_returns_previous_monday_for_midweek(self):
tuesday = datetime.datetime(2013, 4, 9)
start = WEEK.start(tuesday)
assert_that(start, is_(datetime.datetime(2013, 4, 8)))
def test_that_it_truncates_the_time_part(self):
tuesday = datetime.datetime(2013, 4, 9, 23, 12)
start = WEEK.start(tuesday)
assert_that(start, is_(datetime.datetime(2013, 4, 8)))
def test_that_it_returns_the_same_day_for_monday(self):
monday = datetime.datetime(2013, 4, 8, 23, 12)
start = WEEK.start(monday)
assert_that(start, is_(datetime.datetime(2013, 4, 8)))
def test_that_it_returns_the_same_day_for_monday_midnight(self):
monday = datetime.datetime(2013, 4, 8, 0, 0)
start = WEEK.start(monday)
assert_that(start, is_(datetime.datetime(2013, 4, 8)))
class TestWeek_end(TestCase):
def test_that_it_returns_next_monday_for_midweek(self):
tuesday = datetime.datetime(2013, 4, 9)
end = WEEK.end(tuesday)
assert_that(end, is_(datetime.datetime(2013, 4, 15)))
def test_that_it_truncates_the_time_part(self):
tuesday = datetime.datetime(2013, 4, 9, 23, 12)
end = WEEK.end(tuesday)
assert_that(end, is_(datetime.datetime(2013, 4, 15)))
def test_that_it_returns_the_same_day_for_monday_midnight(self):
monday = datetime.datetime(2013, 4, 8, 0, 0)
end = WEEK.end(monday)
assert_that(end, is_(datetime.datetime(2013, 4, 8)))
def test_that_it_returns_the_next_monday_for_monday_after_midnight(self):
monday = datetime.datetime(2013, 4, 8, 23, 12)
end = WEEK.end(monday)
assert_that(end, is_(datetime.datetime(2013, 4, 15)))
class TestWeek_range(TestCase):
def test_that_it_produces_a_sequence_of_weekly_time_periods(self):
range = WEEK.range(d_tz(2013, 4, 1), d_tz(2013, 4, 15))
assert_that(list(range), contains(
(d_tz(2013, 4, 1), d_tz(2013, 4, 8)),
(d_tz(2013, 4, 8), d_tz(2013, 4, 15))
))
def test_that_it_expands_the_limits_of_the_range_if_midweek(self):
range = WEEK.range(d_tz(2013, 4, 3), d_tz(2013, 4, 19))
assert_that(list(range), contains(
(d_tz(2013, 4, 1), d_tz(2013, 4, 8)),
(d_tz(2013, 4, 8), d_tz(2013, 4, 15)),
(d_tz(2013, 4, 15), d_tz(2013, 4, 22))
))
class TestWeek_valid_start_at(TestCase):
def test_start_of_week_is_valid(self):
assert_that(WEEK.valid_start_at(d(2013, 4, 8, 0, 0, 0)), is_(True))
def test_start_of_week_plus_second_is_invalid(self):
assert_that(WEEK.valid_start_at(d(2013, 4, 8, 0, 0, 1)), is_(False))
def test_start_of_week_plus_minute_is_invalid(self):
assert_that(WEEK.valid_start_at(d(2013, 4, 8, 0, 1, 0)), is_(False))
def test_start_of_week_plus_hour_is_invalid(self):
assert_that(WEEK.valid_start_at(d(2013, 4, 8, 1, 0, 0)), is_(False))
def test_start_of_week_plus_day_is_invalid(self):
assert_that(WEEK.valid_start_at(d(2013, 4, 9, 0, 0, 0)), is_(False))
class TestMonth_start(TestCase):
def test_that_it_returns_first_of_current_month_for_midmonth(self):
some_datetime = d(2013, 4, 9)
start = MONTH.start(some_datetime)
assert_that(start, is_(d(2013, 4, 1)))
def test_that_it_truncates_the_time_part(self):
some_datetime = d(2013, 5, 7, 10, 12, 13)
start = MONTH.start(some_datetime)
assert_that(start.hour, is_(0))
assert_that(start.minute, is_(0))
assert_that(start.second, is_(0))
assert_that(start.microsecond, is_(0))
def test_that_it_returns_same_day_for_first_of_month(self):
some_datetime = d(2013, 12, 1, 12, 32, 34)
start = MONTH.start(some_datetime)
assert_that(start, is_(d(2013, 12, 1)))
def test_that_it_returns_same_day_for_first_of_month_midnight(self):
some_datetime = datetime.datetime(
year=2013, month=11, day=1, hour=0, minute=0, second=0,
microsecond=0)
start = MONTH.start(some_datetime)
assert_that(start, is_(some_datetime))
class TestMonth_end(object):
def test_that_it_returns_the_end_of_the_current_month(self):
some_datetime = d(2013, 10, 4, 10, 23, 43)
some_other_datetime = d(2013, 10, 4)
end = MONTH.end(some_datetime)
other_end = MONTH.end(some_other_datetime)
assert_that(end, is_(d(2013, 11, 1)))
assert_that(other_end, is_(d(2013, 11, 1)))
def test_that_it_truncates_the_time_part(self):
some_datetime = d(2013, 4, 9, 23, 12)
end = MONTH.end(some_datetime)
assert_that(end, is_(d(2013, 5, 1)))
def test_that_it_returns_the_same_month_for_month_boundary_midnight(self):
some_datetime = d(2013, 5, 1, 0, 0)
end = MONTH.end(some_datetime)
assert_that(end, is_(d(2013, 5, 1)))
def test_that_it_returns_the_next_month_for_boundary_after_midnight(self):
some_datetime = d(2013, 5, 1, 0, 12)
end = MONTH.end(some_datetime)
assert_that(end, is_(d(2013, 6, 1)))
class TestMonth_range(TestCase):
def test_that_it_produces_a_sequence_of_monthly_time_periods(self):
range = MONTH.range(d_tz(2013, 4, 1), d_tz(2013, 6, 1))
assert_that(list(range), contains(
(d_tz(2013, 4, 1), d_tz(2013, 5, 1)),
(d_tz(2013, 5, 1), d_tz(2013, 6, 1))
))
def test_that_it_expands_the_limits_of_the_range_if_midmonth(self):
range = MONTH.range(d_tz(2013, 4, 3), d_tz(2013, 5, 19))
assert_that(list(range), contains(
(d_tz(2013, 4, 1), d_tz(2013, 5, 1)),
(d_tz(2013, 5, 1), d_tz(2013, 6, 1)),
))
class TestMonth_valid_start_at(TestCase):
def test_start_of_month_is_valid(self):
assert_that(MONTH.valid_start_at(d(2013, 4, 1, 0, 0, 0)), is_(True))
def test_start_of_month_plus_second_is_invalid(self):
assert_that(MONTH.valid_start_at(d(2013, 4, 1, 0, 0, 1)), is_(False))
def test_start_of_month_plus_minute_is_invalid(self):
assert_that(MONTH.valid_start_at(d(2013, 4, 1, 0, 1, 0)), is_(False))
def test_start_of_month_plus_hour_is_invalid(self):
assert_that(MONTH.valid_start_at(d(2013, 4, 1, 1, 0, 0)), is_(False))
def test_start_of_month_plus_day_is_invalid(self):
assert_that(MONTH.valid_start_at(d(2013, 4, 2, 0, 0, 0)), is_(False))
class TestDay(TestCase):
def test_that_returns_the_beginning_of_the_current_day(self):
some_datetime = d(2013, 10, 4, 10, 23, 43)
start = DAY.start(some_datetime)
assert_that(start, is_(d(2013, 10, 4, 0, 0, 0)))
def test_that_midday_is_not_a_valid_start_at(self):
naughty_starttime = d(2013, 10, 18, 12, 00)
assert_that(DAY.valid_start_at(naughty_starttime), is_(False))
def test_start_of_day_is_valid(self):
assert_that(DAY.valid_start_at(d(2013, 10, 18, 0, 0, 0)), is_(True))
def test_start_of_day_plus_second_is_invalid(self):
assert_that(DAY.valid_start_at(d(2013, 10, 18, 0, 0, 1)), is_(False))
def test_start_of_day_plus_minute_is_invalid(self):
assert_that(DAY.valid_start_at(d(2013, 10, 18, 0, 1, 0)), is_(False))
def test_start_of_day_plus_hour_is_invalid(self):
assert_that(DAY.valid_start_at(d(2013, 10, 18, 1, 0, 0)), is_(False))
def test_that_end_of_the_day_is_the_beginning_of_the_next_day(self):
late_in_the_day = d(2013, 10, 18, 21, 00)
assert_that(DAY.end(late_in_the_day), is_(d(2013, 10, 19, 00, 00)))
def test_that_a_range_of_one_week_gives_us_seven_days(self):
range = DAY.range(d_tz(2013, 4, 3), d_tz(2013, 4, 10))
assert_that(list(range), contains(
(d_tz(2013, 4, 3), d_tz(2013, 4, 4)),
(d_tz(2013, 4, 4), d_tz(2013, 4, 5)),
(d_tz(2013, 4, 5), d_tz(2013, 4, 6)),
(d_tz(2013, 4, 6), d_tz(2013, 4, 7)),
(d_tz(2013, 4, 7), d_tz(2013, 4, 8)),
(d_tz(2013, 4, 8), d_tz(2013, 4, 9)),
(d_tz(2013, 4, 9), d_tz(2013, 4, 10))
))
class TestHour(TestCase):
def test_that_returns_the_beginning_of_the_current_hour(self):
some_datetime = d(2013, 10, 4, 10, 23, 43)
start = HOUR.start(some_datetime)
assert_that(start, is_(d(2013, 10, 4, 10, 0, 0)))
def test_that_middle_of_the_hour_is_not_a_valid_start_at(self):
middle_of_the_hour = d(2013, 10, 18, 12, 31)
assert_that(HOUR.valid_start_at(middle_of_the_hour), is_(False))
def test_start_of_hour_is_valid(self):
assert_that(HOUR.valid_start_at(d(2013, 10, 18, 12, 0, 0)), is_(True))
def test_start_of_hour_plus_second_is_invalid(self):
assert_that(HOUR.valid_start_at(d(2013, 10, 18, 12, 0, 1)), is_(False))
def test_start_of_hour_plus_minute_is_invalid(self):
assert_that(HOUR.valid_start_at(d(2013, 10, 18, 12, 1, 0)), is_(False))
def test_that_returns_the_end_of_the_current_hour(self):
some_datetime = d(2013, 10, 4, 10, 23, 43)
end = HOUR.end(some_datetime)
assert_that(end, is_(d(2013, 10, 4, 11, 0, 0)))
def test_that_a_range_of_five_hours_gives_us_five_data_points(self):
range = HOUR.range(d_tz(2013, 4, 3, 12), d_tz(2013, 4, 3, 17))
assert_that(list(range), contains(
(d_tz(2013, 4, 3, 12), d_tz(2013, 4, 3, 13)),
(d_tz(2013, 4, 3, 13), d_tz(2013, 4, 3, 14)),
(d_tz(2013, 4, 3, 14), d_tz(2013, 4, 3, 15)),
(d_tz(2013, 4, 3, 15), d_tz(2013, 4, 3, 16)),
(d_tz(2013, 4, 3, 16), d_tz(2013, 4, 3, 17))
))
class TestQuarter(TestCase):
def test_that_returns_the_beginning_of_the_first_quarter(self):
some_datetime = d(2013, 1, 20, 0, 23, 43)
assert_that(QUARTER.start(some_datetime), is_(d(2013, 1, 1, 0, 0, 0)))
def test_that_returns_the_beginning_of_the_second_quarter(self):
some_datetime = d(2013, 5, 20, 0, 23, 43)
assert_that(QUARTER.start(some_datetime), is_(d(2013, 4, 1, 0, 0, 0)))
def test_that_returns_the_beginning_of_the_third_quarter(self):
some_datetime = d(2013, 9, 20, 0, 23, 43)
assert_that(QUARTER.start(some_datetime), is_(d(2013, 7, 1, 0, 0, 0)))
def test_that_returns_the_beginning_of_the_fourth_quarter(self):
some_datetime = d(2013, 12, 4, 10, 23, 43)
assert_that(QUARTER.start(some_datetime), is_(d(2013, 10, 1, 0, 0, 0)))
def test_start_of_quarters_are_valid(self):
assert_that(QUARTER.valid_start_at(d(2013, 1, 1, 0, 0, 0)), is_(True))
assert_that(QUARTER.valid_start_at(d(2013, 4, 1, 0, 0, 0)), is_(True))
assert_that(QUARTER.valid_start_at(d(2013, 7, 1, 0, 0, 0)), is_(True))
assert_that(QUARTER.valid_start_at(d(2013, 10, 1, 0, 0, 0)), is_(True))
def test_start_of_quarters_plus_second_are_invalid(self):
assert_that(QUARTER.valid_start_at(d(2013, 1, 1, 0, 0, 1)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 4, 1, 0, 0, 1)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 7, 1, 0, 0, 1)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 10, 1, 0, 0, 1)), is_(False))
def test_start_of_quarters_plus_minute_are_invalid(self):
assert_that(QUARTER.valid_start_at(d(2013, 1, 1, 0, 1, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 4, 1, 0, 1, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 7, 1, 0, 1, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 10, 1, 0, 1, 0)), is_(False))
def test_start_of_quarters_plus_hour_are_invalid(self):
assert_that(QUARTER.valid_start_at(d(2013, 1, 1, 1, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 4, 1, 1, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 7, 1, 1, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 10, 1, 1, 0, 0)), is_(False))
def test_start_of_quarters_plus_day_are_invalid(self):
assert_that(QUARTER.valid_start_at(d(2013, 1, 2, 0, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 4, 2, 0, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 7, 2, 0, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 10, 2, 0, 0, 0)), is_(False))
def test_start_of_quarters_plus_month_are_invalid(self):
assert_that(QUARTER.valid_start_at(d(2013, 2, 1, 0, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 5, 1, 0, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 8, 1, 0, 0, 0)), is_(False))
assert_that(QUARTER.valid_start_at(d(2013, 11, 1, 0, 0, 0)), is_(False))
def test_that_middle_of_quarters_are_invalid(self):
middle_first_quarter = d(2013, 1, 10, 0, 0, 0)
middle_second_quarter = d(2013, 4, 15, 0, 0, 0)
middle_third_quarter = d(2013, 7, 20, 0, 0, 0)
middle_fourth_quarter = d(2013, 10, 13, 0, 0, 0)
assert_that(QUARTER.valid_start_at(middle_first_quarter), is_(False))
assert_that(QUARTER.valid_start_at(middle_second_quarter), is_(False))
assert_that(QUARTER.valid_start_at(middle_third_quarter), is_(False))
assert_that(QUARTER.valid_start_at(middle_fourth_quarter), is_(False))
def test_end_of_quarter_is_beginning_of_next_quarter(self):
first_quarter = d(2013, 1, 1, 0, 0, 0)
second_quarter = d(2013, 4, 1, 0, 0, 0)
third_quarter = d(2013, 7, 1, 0, 0, 0)
fourth_quarter = d(2013, 10, 1, 0, 0, 0)
first_quarter_2014 = d(2014, 1, 1, 0, 0, 0)
assert_that(QUARTER.end(first_quarter.replace(hour=1)), is_(second_quarter))
assert_that(QUARTER.end(second_quarter.replace(hour=1)), is_(third_quarter))
assert_that(QUARTER.end(third_quarter.replace(hour=1)), is_(fourth_quarter))
assert_that(QUARTER.end(fourth_quarter.replace(hour=1)), is_(first_quarter_2014))
def test_range_of_quarters(self):
range = QUARTER.range(d_tz(2012, 10, 1), d_tz(2013, 12, 30))
assert_that(list(range), contains(
(d_tz(2012, 10, 1), d_tz(2013, 1, 1)),
(d_tz(2013, 1, 1), d_tz(2013, 4, 1)),
(d_tz(2013, 4, 1), d_tz(2013, 7, 1)),
(d_tz(2013, 7, 1), d_tz(2013, 10, 1)),
(d_tz(2013, 10, 1), d_tz(2014, 1, 1))
))
class TestYear(TestCase):
def test_year_start_returns_the_beginning_of_the_given_year(self):
some_datetime = d(2013, 1, 20, 0, 23, 43)
assert_that(YEAR.start(some_datetime), is_(d(2013, 1, 1, 0, 0, 0)))
def test_start_of_year_is_valid(self):
assert_that(YEAR.valid_start_at(d(2013, 1, 1, 0, 0, 0)), is_(True))
def test_start_of_year_plus_second_is_invalid(self):
assert_that(YEAR.valid_start_at(d(2013, 1, 1, 0, 0, 1)), is_(False))
def test_start_of_year_plus_minute_is_invalid(self):
assert_that(YEAR.valid_start_at(d(2013, 1, 1, 0, 1, 0)), is_(False))
def test_start_of_year_plus_hour_is_invalid(self):
assert_that(YEAR.valid_start_at(d(2013, 1, 1, 1, 0, 0)), is_(False))
def test_start_of_year_plus_day_is_invalid(self):
assert_that(YEAR.valid_start_at(d(2013, 1, 2, 0, 0, 0)), is_(False))
def test_start_of_year_plus_month_is_invalid(self):
assert_that(YEAR.valid_start_at(d(2013, 2, 1, 0, 0, 0)), is_(False))
def test_end_of_year_is_beginning_of_next_year(self):
some_datetime = d(2013, 10, 4, 10, 23, 43)
end = YEAR.end(some_datetime)
assert_that(end, is_(d(2014, 1, 1, 0, 0, 0)))
def test_that_a_range_of_five_years_gives_us_five_data_points(self):
range = YEAR.range(d_tz(2010, 1, 1), d_tz(2015, 1, 1))
assert_that(list(range), contains(
(d_tz(2010, 1, 1), d_tz(2011, 1, 1)),
(d_tz(2011, 1, 1), d_tz(2012, 1, 1)),
(d_tz(2012, 1, 1), d_tz(2013, 1, 1)),
(d_tz(2013, 1, 1), d_tz(2014, 1, 1)),
(d_tz(2014, 1, 1), d_tz(2015, 1, 1)),
))
| StarcoderdataPython |
48 | <filename>garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py
"""GaussianMLPRegressorModel."""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models import GaussianMLPModel
class GaussianMLPRegressorModel(GaussianMLPModel):
"""GaussianMLPRegressor based on garage.tf.models.Model class.
This class can be used to perform regression by fitting a Gaussian
distribution to the outputs.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the std network.
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the std network.
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_shape,
output_dim,
name='GaussianMLPRegressorModel',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_hidden_b_init=tf.zeros_initializer(),
std_output_nonlinearity=None,
std_output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_parameterization='exp',
layer_normalization=False):
super().__init__(output_dim=output_dim,
name=name,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_output_nonlinearity=std_output_nonlinearity,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization)
self._input_shape = input_shape
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'normalized_dist', 'normalized_mean', 'normalized_log_std', 'dist',
'mean', 'log_std', 'x_mean', 'x_std', 'y_mean', 'y_std'
]
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tfp.distributions.MultivariateNormalDiag: Normlizaed distribution.
tf.Tensor: Normalized mean.
tf.Tensor: Normalized log_std.
tfp.distributions.MultivariateNormalDiag: Vanilla distribution.
tf.Tensor: Vanilla mean.
tf.Tensor: Vanilla log_std.
tf.Tensor: Mean for data.
tf.Tensor: log_std for data.
tf.Tensor: Mean for label.
tf.Tensor: log_std for label.
"""
with tf.compat.v1.variable_scope('normalized_vars'):
x_mean_var = tf.compat.v1.get_variable(
name='x_mean',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
x_std_var = tf.compat.v1.get_variable(
name='x_std_var',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
y_mean_var = tf.compat.v1.get_variable(
name='y_mean_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
y_std_var = tf.compat.v1.get_variable(
name='y_std_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
normalized_xs_var = (state_input - x_mean_var) / x_std_var
_, normalized_dist_mean, normalized_dist_log_std = super()._build(
normalized_xs_var)
# Since regressor expects [N, *dims], we need to squeeze the extra
# dimension
normalized_dist_log_std = tf.squeeze(normalized_dist_log_std, 1)
with tf.name_scope('mean_network'):
means_var = normalized_dist_mean * y_std_var + y_mean_var
with tf.name_scope('std_network'):
log_stds_var = normalized_dist_log_std + tf.math.log(y_std_var)
normalized_dist = tfp.distributions.MultivariateNormalDiag(
loc=normalized_dist_mean,
scale_diag=tf.exp(normalized_dist_log_std))
vanilla_dist = tfp.distributions.MultivariateNormalDiag(
loc=means_var, scale_diag=tf.exp(log_stds_var))
return (normalized_dist, normalized_dist_mean, normalized_dist_log_std,
vanilla_dist, means_var, log_stds_var, x_mean_var, x_std_var,
y_mean_var, y_std_var)
def clone(self, name):
"""Return a clone of the model.
It copies the configuration and parameters of the primitive.
Args:
name (str): Name of the newly created model. It has to be
different from source model if cloned under the same
computational graph.
Returns:
garage.tf.policies.GaussianMLPModel: Newly cloned model.
"""
new_regressor = self.__class__(
name=name,
input_shape=self._input_shape,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
learn_std=self._learn_std,
adaptive_std=self._adaptive_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
min_std=self._min_std,
max_std=self._max_std,
std_hidden_sizes=self._std_hidden_sizes,
std_hidden_nonlinearity=self._std_hidden_nonlinearity,
std_hidden_w_init=self._std_hidden_w_init,
std_hidden_b_init=self._std_hidden_b_init,
std_output_nonlinearity=self._std_output_nonlinearity,
std_output_w_init=self._std_output_w_init,
std_parameterization=self._std_parameterization,
layer_normalization=self._layer_normalization)
new_regressor.parameters = self.parameters
return new_regressor
| StarcoderdataPython |
1736217 | """
I/O for FLAC3D format.
"""
import logging
import struct
import time
import numpy
from ..__about__ import __version__ as version
from .._common import _pick_first_int_data
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register
from .._mesh import Mesh
meshio_only = {
"zone": {
"tetra": "tetra",
"tetra10": "tetra",
"pyramid": "pyramid",
"pyramid13": "pyramid",
"wedge": "wedge",
"wedge12": "wedge",
"wedge15": "wedge",
"wedge18": "wedge",
"hexahedron": "hexahedron",
"hexahedron20": "hexahedron",
"hexahedron24": "hexahedron",
"hexahedron27": "hexahedron",
},
"face": {
"triangle": "triangle",
"triangle6": "triangle",
"triangle7": "triangle",
"quad": "quad",
"quad8": "quad",
"quad9": "quad",
},
}
numnodes_to_meshio_type = {
"zone": {4: "tetra", 5: "pyramid", 6: "wedge", 8: "hexahedron"},
"face": {3: "triangle", 4: "quad"},
}
meshio_to_flac3d_type = {
"triangle": "T3",
"quad": "Q4",
"tetra": "T4",
"pyramid": "P5",
"wedge": "W6",
"hexahedron": "B8",
}
flac3d_to_meshio_order = {
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 3],
"pyramid": [0, 1, 4, 2, 3],
"wedge": [0, 1, 3, 2, 4, 5],
"hexahedron": [0, 1, 4, 2, 3, 6, 7, 5],
}
meshio_to_flac3d_order = {
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 3],
"pyramid": [0, 1, 3, 4, 2],
"wedge": [0, 1, 3, 2, 4, 5],
"hexahedron": [0, 1, 3, 4, 2, 7, 5, 6],
}
meshio_to_flac3d_order_2 = {
"tetra": [0, 2, 1, 3],
"pyramid": [0, 3, 1, 4, 2],
"wedge": [0, 2, 3, 1, 5, 4],
"hexahedron": [0, 3, 1, 4, 2, 5, 7, 6],
}
flag_to_numdim = {
"zone": 3,
"face": 2,
}
def read(filename):
"""Read FLAC3D f3grid grid file."""
# Read a small block of the file to assess its type
# See <http://code.activestate.com/recipes/173220/>
with open_file(filename, "rb") as f:
block = f.read(8)
binary = b"\x00" in block
mode = "rb" if binary else "r"
with open_file(filename, mode) as f:
out = read_buffer(f, binary)
return out
def read_buffer(f, binary):
"""Read binary or ASCII file."""
flags = {
"Z": "zone",
"F": "face",
"ZGROUP": "zone",
"FGROUP": "face",
}
points = []
point_ids = {}
cells = []
field_data = {}
# Zones and faces do not share the same cell ID pool in FLAC3D
# i.e. a given cell ID can be assigned to a zone and a face concurrently
mapper = {"zone": {}, "face": {}}
slots = {"zone": set(), "face": set()}
pidx = 0
cidx = 0
gidx = 0
if binary:
# Not sure what the first bytes represent, the format might be wrong
# It does not seem to be useful anyway
_ = struct.unpack("<2I", f.read(8))
(num_nodes,) = struct.unpack("<I", f.read(4))
for pidx in range(num_nodes):
pid, point = _read_point(f, binary)
points.append(point)
point_ids[pid] = pidx
for flag in ["zone", "face"]:
(num_cells,) = struct.unpack("<I", f.read(4))
for _ in range(num_cells):
cid, cell = _read_cell(f, point_ids, binary)
cells = _update_cells(cells, cell, flag)
mapper[flag][cid] = [cidx]
cidx += 1
(num_groups,) = struct.unpack("<I", f.read(4))
for _ in range(num_groups):
name, slot, data = _read_group(f, binary)
field_data, mapper[flag] = _update_field_data(
field_data,
mapper[flag],
data,
name,
gidx + 1,
flag,
)
slots[flag] = _update_slots(slots[flag], slot)
gidx += 1
else:
line = f.readline().rstrip().split()
while line:
if line[0] == "G":
pid, point = _read_point(line, binary)
points.append(point)
point_ids[pid] = pidx
pidx += 1
elif line[0] in {"Z", "F"}:
flag = flags[line[0]]
cid, cell = _read_cell(line, point_ids, binary)
cells = _update_cells(cells, cell, flag)
mapper[flag][cid] = [cidx]
cidx += 1
elif line[0] in {"ZGROUP", "FGROUP"}:
flag = flags[line[0]]
name, slot, data = _read_group(f, binary, line)
field_data, mapper[flag] = _update_field_data(
field_data,
mapper[flag],
data,
name,
gidx + 1,
flag,
)
slots[flag] = _update_slots(slots[flag], slot)
gidx += 1
line = f.readline().rstrip().split()
if field_data:
num_cells = numpy.cumsum([len(c[1]) for c in cells])
cell_data = numpy.zeros(num_cells[-1], dtype=int)
for k, v in mapper.items():
if not slots[k]:
continue
for cid, zid in v.values():
cell_data[cid] = zid
cell_data = {"flac3d:group": numpy.split(cell_data, num_cells[:-1])}
else:
cell_data = {}
return Mesh(
points=numpy.array(points),
cells=[(k, numpy.array(v)[:, flac3d_to_meshio_order[k]]) for k, v in cells],
cell_data=cell_data,
field_data=field_data,
)
def _read_point(buf_or_line, binary):
"""Read point coordinates."""
if binary:
pid, x, y, z = struct.unpack("<I3d", buf_or_line.read(28))
point = [x, y, z]
else:
pid = int(buf_or_line[1])
point = [float(l) for l in buf_or_line[2:]]
return pid, point
def _read_cell(buf_or_line, point_ids, binary):
"""Read cell connectivity."""
if binary:
cid, num_verts = struct.unpack("<2I", buf_or_line.read(8))
cell = struct.unpack("<{}I".format(num_verts), buf_or_line.read(4 * num_verts))
is_b7 = num_verts == 7
else:
cid = int(buf_or_line[2])
cell = buf_or_line[3:]
is_b7 = buf_or_line[1] == "B7"
cell = [point_ids[int(l)] for l in cell]
if is_b7:
cell.append(cell[-1])
return cid, cell
def _read_group(buf_or_line, binary, line=None):
"""Read cell group."""
if binary:
# Group name
(num_chars,) = struct.unpack("<H", buf_or_line.read(2))
(name,) = struct.unpack("<{}s".format(num_chars), buf_or_line.read(num_chars))
name = name.decode("utf-8")
# Slot name
(num_chars,) = struct.unpack("<H", buf_or_line.read(2))
(slot,) = struct.unpack("<{}s".format(num_chars), buf_or_line.read(num_chars))
slot = slot.decode("utf-8")
# Zones
(num_zones,) = struct.unpack("<I", buf_or_line.read(4))
data = struct.unpack("<{}I".format(num_zones), buf_or_line.read(4 * num_zones))
else:
name = line[1].replace('"', "")
data = []
slot = "" if "SLOT" not in line else line[-1]
i = buf_or_line.tell()
line = buf_or_line.readline()
while True:
line = line.rstrip().split()
if line and (line[0] not in {"*", "ZGROUP"}):
data += [int(l) for l in line]
else:
buf_or_line.seek(i)
break
i = buf_or_line.tell()
line = buf_or_line.readline()
return name, slot, data
def _update_cells(cells, cell, flag):
"""Update cell list."""
cell_type = numnodes_to_meshio_type[flag][len(cell)]
if len(cells) > 0 and cell_type == cells[-1][0]:
cells[-1][1].append(cell)
else:
cells.append((cell_type, [cell]))
return cells
def _update_field_data(field_data, mapper, data, name, gidx, flag):
"""Update field data dict."""
for cid in data:
mapper[cid].append(gidx)
field_data[name] = numpy.array([gidx, flag_to_numdim[flag]])
return field_data, mapper
def _update_slots(slots, slot):
"""Update slot set. Only one slot is supported."""
slots.add(slot)
if len(slots) > 1:
raise ReadError("Multiple slots are not supported")
return slots
def write(filename, mesh, float_fmt=".16e", binary=False):
"""Write FLAC3D f3grid grid file."""
if not any(c.type in meshio_only["zone"].keys() for c in mesh.cells):
raise WriteError("FLAC3D format only supports 3D cells")
# Pick out material
material = None
if mesh.cell_data:
key, other = _pick_first_int_data(mesh.cell_data)
if key:
material = numpy.concatenate(mesh.cell_data[key])
if other:
logging.warning(
"FLAC3D can only write one cell data array. "
"Picking {}, skipping {}.".format(key, ", ".join(other))
)
mode = "wb" if binary else "w"
with open_file(filename, mode) as f:
if binary:
f.write(
struct.pack("<2I", 1375135718, 3)
) # Don't know what these values represent
else:
f.write("* FLAC3D grid produced by meshio v{}\n".format(version))
f.write("* {}\n".format(time.ctime()))
_write_points(f, mesh.points, binary, float_fmt)
for flag in ["zone", "face"]:
_write_cells(f, mesh.points, mesh.cells, flag, binary)
_write_groups(f, mesh.cells, material, mesh.field_data, flag, binary)
def _write_points(f, points, binary, float_fmt=None):
"""Write points coordinates."""
if binary:
f.write(struct.pack("<I", len(points)))
for i, point in enumerate(points):
f.write(struct.pack("<I3d", i + 1, *point))
else:
f.write("* GRIDPOINTS\n")
for i, point in enumerate(points):
fmt = "G\t{:8}\t" + "\t".join(3 * ["{:" + float_fmt + "}"]) + "\n"
f.write(fmt.format(i + 1, *point))
def _write_cells(f, points, cells, flag, binary):
"""Write cells."""
if flag == "zone":
count = 0
cells = _translate_zones(points, cells)
else:
count = sum(len(c[1]) for c in cells if c.type in meshio_only["zone"])
cells = _translate_faces(cells)
if binary:
f.write(
struct.pack(
"<I", sum(len(c[1]) for c in cells if c[0] in meshio_only[flag])
)
)
for _, cdata in cells:
num_cells, num_verts = cdata.shape
tmp = numpy.column_stack(
(
numpy.arange(1, num_cells + 1) + count,
numpy.full(num_cells, num_verts),
cdata + 1,
)
)
f.write(
struct.pack("<{}I".format((num_verts + 2) * num_cells), *tmp.ravel())
)
count += num_cells
else:
flag_to_text = {
"zone": ("ZONES", "Z"),
"face": ("FACES", "F"),
}
f.write("* {}\n".format(flag_to_text[flag][0]))
for ctype, cdata in cells:
fmt = (
"{} {{}} {{}} ".format(flag_to_text[flag][1])
+ " ".join(["{}"] * cdata.shape[1])
+ "\n"
)
for entry in cdata + 1:
count += 1
f.write(fmt.format(meshio_to_flac3d_type[ctype], count, *entry))
def _write_groups(f, cells, cell_data, field_data, flag, binary):
"""Write groups."""
if cell_data is not None:
groups, labels = _translate_groups(cells, cell_data, field_data, flag)
if binary:
slot = "Default".encode("utf-8")
f.write(struct.pack("<I", len(groups)))
for k in sorted(groups.keys()):
num_chars, num_zones = len(labels[k]), len(groups[k])
fmt = "<H{}sH7sI{}I".format(num_chars, num_zones)
tmp = [
num_chars,
labels[k].encode("utf-8"),
7,
slot,
num_zones,
*groups[k],
]
f.write(struct.pack(fmt, *tmp))
else:
flag_to_text = {
"zone": "ZGROUP",
"face": "FGROUP",
}
f.write("* {} GROUPS\n".format(flag.upper()))
for k in sorted(groups.keys()):
f.write('{} "{}"\n'.format(flag_to_text[flag], labels[k]))
_write_table(f, groups[k])
else:
if binary:
f.write(struct.pack("<I", 0))
def _translate_zones(points, cells):
"""Reorder meshio cells to FLAC3D zones.
Four first points must form a right-handed coordinate system (outward normal vectors).
Reorder corner points according to sign of scalar triple products.
"""
# See <https://stackoverflow.com/a/42386330/353337>
def slicing_summing(a, b, c):
c0 = b[:, 1] * c[:, 2] - b[:, 2] * c[:, 1]
c1 = b[:, 2] * c[:, 0] - b[:, 0] * c[:, 2]
c2 = b[:, 0] * c[:, 1] - b[:, 1] * c[:, 0]
return a[:, 0] * c0 + a[:, 1] * c1 + a[:, 2] * c2
zones = []
for key, idx in cells:
if key not in meshio_only["zone"].keys():
continue
# Compute scalar triple products
key = meshio_only["zone"][key]
tmp = points[idx[:, meshio_to_flac3d_order[key][:4]].T]
det = slicing_summing(tmp[1] - tmp[0], tmp[2] - tmp[0], tmp[3] - tmp[0])
# Reorder corner points
data = numpy.where(
(det > 0)[:, None],
idx[:, meshio_to_flac3d_order[key]],
idx[:, meshio_to_flac3d_order_2[key]],
)
zones.append((key, data))
return zones
def _translate_faces(cells):
"""Reorder meshio cells to FLAC3D faces."""
faces = []
for key, idx in cells:
if key not in meshio_only["face"].keys():
continue
key = meshio_only["face"][key]
data = idx[:, meshio_to_flac3d_order[key]]
faces.append((key, data))
return faces
def _translate_groups(cells, cell_data, field_data, flag):
"""Convert meshio cell_data to FLAC3D groups."""
num_dims = numpy.concatenate(
[numpy.full(len(c[1]), 2 if c[0] in meshio_only["face"] else 3) for c in cells]
)
groups = {
k: numpy.nonzero(
numpy.logical_and(cell_data == k, num_dims == flag_to_numdim[flag])
)[0]
+ 1
for k in numpy.unique(cell_data)
}
groups = {k: v for k, v in groups.items() if v.size}
labels = {k: str(k) for k in groups.keys()}
labels[0] = "None"
if field_data:
labels.update(
{v[0]: k for k, v in field_data.items() if v[1] == flag_to_numdim[flag]}
)
return groups, labels
def _write_table(f, data, ncol=20):
"""Write group data table."""
nrow = len(data) // ncol
lines = numpy.split(data, numpy.full(nrow, ncol).cumsum())
for line in lines:
if len(line):
f.write(" {}\n".format(" ".join([str(l) for l in line])))
register("flac3d", [".f3grid"], read, {"flac3d": write})
| StarcoderdataPython |
4813394 | # Generated by Django 2.0.8 on 2018-10-14 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedbacks', '0012_auto_20181013_1230'),
]
operations = [
migrations.CreateModel(
name='SurveyCover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.URLField(verbose_name='image')),
('label', models.CharField(blank=True, max_length=50, verbose_name='label')),
('label_it', models.CharField(blank=True, max_length=50, null=True, verbose_name='label')),
('label_en', models.CharField(blank=True, max_length=50, null=True, verbose_name='label')),
('active', models.BooleanField(default=False, verbose_name='active')),
],
options={
'verbose_name': 'Survey Cover',
'verbose_name_plural': 'Survey Covers',
},
),
migrations.AddField(
model_name='survey',
name='image',
field=models.URLField(default='', verbose_name='image'),
preserve_default=False,
),
migrations.AddField(
model_name='survey',
name='label',
field=models.CharField(blank=True, max_length=50, verbose_name='label'),
),
]
| StarcoderdataPython |
4810980 | <reponame>chenzhengda/tensorflow
import tensorflow as tf
from tensorflow.python import ipu
# Configure the IPU device.
config = ipu.config.IPUConfig()
config.auto_select_ipus = 2
config.configure_ipu_system()
# Create a dataset for the model.
def create_dataset():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (_, _) = mnist.load_data()
x_train = x_train / 255.0
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32, drop_remainder=True)
train_ds = train_ds.map(lambda d, l:
(tf.cast(d, tf.float32), tf.cast(l, tf.int32)))
return train_ds.repeat().prefetch(16)
dataset = create_dataset()
# Create a strategy for execution on the IPU.
strategy = ipu.ipu_strategy.IPUStrategy()
with strategy.scope():
# Using standard keras Sequential model.
model = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
# IPU-specific arguments passed into separate configuration methods.
model.set_pipeline_stage_assignment([0, 0, 1, 1])
# Replication factor is 1 in this example.
model.set_pipelining_options(gradient_accumulation_steps_per_replica=16)
# steps_per_execution specified to improve performance.
model.compile(steps_per_execution=256,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=tf.keras.optimizers.RMSprop())
model.fit(dataset, epochs=2, steps_per_epoch=128)
| StarcoderdataPython |
1735478 | <gh_stars>0
"""
Copyright 2016 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from opencmiss.zinc.status import OK as ZINC_OK
from opencmiss.argon.argonerror import ArgonError
class ArgonMaterials(object):
"""
Manages and serializes Zinc Materials.
"""
def __init__(self, zincContext):
self._zincContext = zincContext
self._materialsmodule = zincContext.getMaterialmodule()
def getZincContext(self):
return self._zincContext
def deserialize(self, dictInput):
materialsDescription = json.dumps(dictInput)
result = self._materialsmodule.readDescription(materialsDescription)
if result != ZINC_OK:
raise ArgonError("Failed to read materials")
def serialize(self):
materialsDescription = self._materialsmodule.writeDescription()
dictOutput = json.loads(materialsDescription)
return dictOutput
| StarcoderdataPython |
3276765 | #!/usr/bin/env python
'''
Author: <NAME> @ RIKEN
Copyright (c) 2020 RIKEN
All Rights Reserved
See file LICENSE for details.
'''
import os,sys,glob
def init(args, version):
# pythonpath
global base
base=os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(base, 'scripts'))
# make output dir
if args.do_not_overwrite is True:
if os.path.exists(args.outdir) is True:
if args.outdir[-1] == '/':
dir=args.outdir[:-1]
else:
dir=args.outdir
if len(glob.glob('%s/*' % dir)) >= 1:
print('Error: %s already exists. Please specify another directory name.' % args.outdir, file=sys.stderr)
exit(1)
else:
os.makedirs(args.outdir, exist_ok=True)
else:
if os.path.exists(args.outdir) is False:
os.makedirs(args.outdir, exist_ok=True)
def init_geno(args, version):
# pythonpath
global base
base=os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(base, 'scripts'))
# make output dir
if args.do_not_overwrite is True:
if os.path.exists(args.outdir) is True:
if args.outdir[-1] == '/':
dir=args.outdir[:-1]
else:
dir=args.outdir
if len(glob.glob('%s/*' % dir)) >= 1:
print('Error: %s already exists. Please specify another directory name.' % args.outdir, file=sys.stderr)
exit(1)
else:
os.makedirs(args.outdir, exist_ok=True)
else:
if os.path.exists(args.outdir) is False:
os.makedirs(args.outdir, exist_ok=True)
def init_jointcall(args, version):
# pythonpath
global base
base=os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(base, 'scripts'))
# make output dir
if args.chunk_vcf_list is not None:
args.outdir=os.path.join(args.outdir, 'chunks_merged')
if args.input_scaffold is not None and args.chunk_f is not None:
with open(args.chunk_f) as infile:
first_sample=next(infile).split()[0]
for line in infile:
pass
last_sample=line.split()[0]
dir_name='%s_to_%s' % (first_sample, last_sample)
args.outdir=os.path.join(args.outdir, dir_name)
if args.chr is not None:
args.outdir=os.path.join(args.outdir, args.chr.replace(',', '_'))
if args.do_not_overwrite is True:
if os.path.exists(args.outdir) is True:
if args.outdir[-1] == '/':
dir=args.outdir[:-1]
else:
dir=args.outdir
if len(glob.glob('%s/*' % dir)) >= 1:
print('Error: %s already exists. Please specify another directory name.' % args.outdir, file=sys.stderr)
exit(1)
else:
os.makedirs(args.outdir, exist_ok=True)
else:
if os.path.exists(args.outdir) is False:
os.makedirs(args.outdir, exist_ok=True)
def init_reshape_vcf(args, version):
# pythonpath
global base
base=os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(base, 'scripts'))
# make output dir
if args.do_not_overwrite is True:
if os.path.exists(args.outdir) is True:
if args.outdir[-1] == '/':
dir=args.outdir[:-1]
else:
dir=args.outdir
if len(glob.glob('%s/*' % dir)) >= 1:
print('Error: %s already exists. Please specify another directory name.' % args.outdir, file=sys.stderr)
exit(1)
else:
os.makedirs(args.outdir, exist_ok=True)
else:
if os.path.exists(args.outdir) is False:
os.makedirs(args.outdir, exist_ok=True)
def init_build_kmer(args, version):
# pythonpath
global base
base=os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(base, 'scripts'))
# make output dir
os.makedirs(args.outdir, exist_ok=True)
| StarcoderdataPython |
76161 | """
n = 4
1
2 3
4 5 6
7 8 9 10
11 12 13 14 15
count: i + 1
f(i, j) = i (i + 1) / 2 + 1 + j
1 + 2 + 3 + 4 + ... + n = n (n + 1) / 2
"""
n = int(input())
# k = 1
# for i in range(n):
# for _ in range(i + 1):
# print(k, end=' ')
# k += 1
# print()
"""
Time Complexity: O(n^2)
Space Complexity: O(1)
"""
for i in range(n):
for j in range(i + 1):
print(i * (i + 1) // 2 + 1 + j, end=' ')
print()
| StarcoderdataPython |
116069 | import time
import multiprocessing
class SubprocessFunctionCaller(object):
class CliFunction(object):
def __init__(self, s2c, c2s, lock):
self.s2c = s2c
self.c2s = c2s
self.lock = lock
def __call__(self, *args, **kwargs):
self.lock.acquire()
self.c2s.put ( {'args':args, 'kwargs':kwargs} )
while True:
if not self.s2c.empty():
obj = self.s2c.get()
self.lock.release()
return obj
time.sleep(0.005)
class HostProcessor(object):
def __init__(self, s2c, c2s, func):
self.s2c = s2c
self.c2s = c2s
self.func = func
def process_messages(self):
while not self.c2s.empty():
obj = self.c2s.get()
result = self.func ( *obj['args'], **obj['kwargs'] )
self.s2c.put (result)
@staticmethod
def make_pair( func ):
s2c = multiprocessing.Queue()
c2s = multiprocessing.Queue()
lock = multiprocessing.Lock()
host_processor = SubprocessFunctionCaller.HostProcessor (s2c, c2s, func)
cli_func = SubprocessFunctionCaller.CliFunction (s2c, c2s, lock)
return host_processor, cli_func
| StarcoderdataPython |
112514 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script just show the basic workflow to compute TF-IDF similarity matrix with Gensim
OUTPUT :
clemsos@miner $ python gensim_workflow.py
How to use Gensim to compute TF-IDF similarity step by step
----------
Let's start with a raw corpus :<type 'list'>
STEP 1 : Index and vectorize
----------
We create a dictionary, an index of all unique values: <class 'gensim.corpora.dictionary.Dictionary'>
Then convert convert tokenized documents to vectors: <type 'list'>
Save the vectorized corpus as a .mm file
STEP 2 : Transform and compute similarity between corpuses
----------
We load our dictionary : <class 'gensim.corpora.dictionary.Dictionary'>
We load our vector corpus : <class 'gensim.corpora.mmcorpus.MmCorpus'>
We initialize our TF-IDF transformation tool : <class 'gensim.models.tfidfmodel.TfidfModel'>
We convert our vectors corpus to TF-IDF space : <class 'gensim.interfaces.TransformedCorpus'>
STEP 3 : Create similarity matrix of all files
----------
We compute similarities from the TF-IDF corpus : <class 'gensim.similarities.docsim.MatrixSimilarity'>
We get a similarity matrix for all documents in the corpus <type 'numpy.ndarray'>
Done in 0.011s
'''
from gensim import corpora, models, similarities
from time import time
t0=time()
# keywords have been extracted and stopwords removed.
tweets=[['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
print("How to use Gensim to compute TF-IDF similarity step by step")
print('-'*10)
print("Let's start with a raw corpus :%s"%type(tweets))
# STEP 1 : Compile corpus and dictionary
print("STEP 1 : Index and vectorize")
print('-'*10)
# create dictionary (index of each element)
dictionary = corpora.Dictionary(tweets)
dictionary.save('/tmp/tweets.dict') # store the dictionary, for future reference
print("We create a dictionary, an index of all unique values: %s"%type(dictionary))
# compile corpus (vectors number of times each elements appears)
raw_corpus = [dictionary.doc2bow(t) for t in tweets]
print("Then convert convert tokenized documents to vectors: %s"% type(raw_corpus))
corpora.MmCorpus.serialize('/tmp/tweets.mm', raw_corpus) # store to disk
print("Save the vectorized corpus as a .mm file")
# STEP 2 : similarity between corpuses
print("STEP 2 : Transform and compute similarity between corpuses")
print('-'*10)
dictionary = corpora.Dictionary.load('/tmp/tweets.dict')
print("We load our dictionary : %s"% type(dictionary))
corpus = corpora.MmCorpus('/tmp/tweets.mm')
print("We load our vector corpus : %s "% type(corpus))
# Transform Text with TF-IDF
tfidf = models.TfidfModel(corpus) # step 1 -- initialize a model
print("We initialize our TF-IDF transformation tool : %s"%type(tfidf))
# corpus tf-idf
corpus_tfidf = tfidf[corpus]
print("We convert our vectors corpus to TF-IDF space : %s"%type(corpus_tfidf))
# STEP 3 : Create similarity matrix of all files
print("STEP 3 : Create similarity matrix of all files")
print('-'*10)
index = similarities.MatrixSimilarity(tfidf[corpus])
print("We compute similarities from the TF-IDF corpus : %s"%type(index))
index.save('/tmp/deerwester.index')
index = similarities.MatrixSimilarity.load('/tmp/deerwester.index')
sims = index[corpus_tfidf]
print("We get a similarity matrix for all documents in the corpus %s"% type(sims))
print("Done in %.3fs"%(time()-t0))
# print sims
# print list(enumerate(sims))
# sims = sorted(enumerate(sims), key=lambda item: item[1])
# print sims # print sorted (document number, similarity score) 2-tuples
| StarcoderdataPython |
3356520 | import numpy as np
import random
def supervised_model_cv_fit_predict(X_train_df, y_train, X_test_df, model, runs=5):
y_preds = []
for i in range(runs):
random.seed(i)
model.fit(X_train_df, y_train)
y_pred = model.predict(X_test_df)
y_preds.append(y_pred)
return y_preds
def pyod_fit_predict(X_train_df, X_test_df, y_train, model, semisupervised=False):
if semisupervised == True:
X_train_df = X_train_df[y_train == 0]
model.fit(X_train_df)
y_pred = model.predict(X_test_df)
return y_pred
def pyod_predict_scores(X_train_df, X_test_df, y_train, model, predict_on='test', semisupervised=False):
if semisupervised == True:
X_train_df = X_train_df[y_train == 0]
model.fit(X_train_df)
if predict_on == 'test':
predicted_scores = model.decision_function(X_test_df)
elif predict_on == 'train':
predicted_scores = model.decision_scores_
return predicted_scores
def contamination_to_threshold(contamination, prediction_scores):
prediction_threshold = np.quantile(prediction_scores, 1 - contamination)
return prediction_threshold
def predict_based_on_threshold(threshold, predicted_scores, formula='greater_or_equal'):
if formula == 'greater_or_equal':
y_pred = [1 if score >= threshold else 0 for score in predicted_scores]
if formula == 'greater':
y_pred = [1 if score > threshold else 0 for score in predicted_scores]
return y_pred
def get_thresholds_for_all_contamination_levels(contamination_levels, predicted_scores):
thresholds = {}
for contamination in contamination_levels:
thresholds[contamination] = contamination_to_threshold(contamination, predicted_scores)
return thresholds
def pyod_per_contamination_level(X_train_df, X_test_df, y_train, contamination_levels, model, predict_on='test',
semisupervised=False):
""" Accepts a list of contamination levels and a single model"""
predicted_scores = pyod_predict_scores(X_train_df, X_test_df, y_train, model, predict_on, semisupervised)
thresholds = get_thresholds_for_all_contamination_levels(contamination_levels, predicted_scores)
predictions_at_contamination_levels = {}
for level, thresh in thresholds.items():
predictions = predict_based_on_threshold(thresh, predicted_scores)
predictions_at_contamination_levels[level] = predictions
return predictions_at_contamination_levels, predicted_scores
def batch_pyod_per_contamination_level(X_train_df, X_test_df, y_train, contamination_levels, model_dict, predict_on='test',
semisupervised=False):
""" Accepts a dictionary of {'model_name': model} and a list of contamination levels"""
predictions_dict = model_dict.copy()
predicted_scores_dict = {key: None for key in model_dict}
for model_name, model in model_dict.items():
print('Starting model ', model_name)
predictions, predicted_scores = pyod_per_contamination_level(X_train_df, X_test_df, y_train,
contamination_levels, model, predict_on,
semisupervised)
predictions_dict[model_name] = predictions
predicted_scores_dict[model_name] = predicted_scores
return predictions_dict, predicted_scores_dict
def supervised_model_per_contamination_level(X_train_df, X_test_df, y_train, contamination_levels, model):
""" Accepts a list of contamination levels and a single model"""
model.fit(X_train_df, y_train)
predicted_scores = model.predict_proba(X_test_df)[:, 1]
thresholds = get_thresholds_for_all_contamination_levels(contamination_levels, predicted_scores)
predictions_at_contamination_levels = {}
for level, thresh in thresholds.items():
predictions = predict_based_on_threshold(thresh, predicted_scores, formula='greater_or_equal')
if level == 0:
predictions = predict_based_on_threshold(thresh, predicted_scores, formula='greater')
predictions_at_contamination_levels[level] = predictions
return predictions_at_contamination_levels, predicted_scores
| StarcoderdataPython |
4813757 | """Application base, containing global templates."""
default_app_config = 'pontoon.base.apps.BaseConfig'
| StarcoderdataPython |
1713528 | import numpy as np
class SistemaLinear:
def __init__(self, matriz = None , vetor_constante = None, dimensao = None, tipo = 'C'):
self.matriz = matriz #Recebe a matriz dos coeficientes
self.vetor_constante = vetor_constante # Recebe o vetor de constantates, tambem conhecido como vetor b
self.dimensao = matriz.shape #Recebe uma tupla-ordenada com as dimensões do sistema
self.tipo = tipo #Rece o tipo de sistema, se a instancia possui a matriz de coeficientes e o vetor constante (C), ou se estao numa mesma matriz ,que recebe o nome de matriz extendida (E)
if np.any(vetor_constante) == None and tipo != 'E': # Caso nao seja informado o vetor constante é criado um vetor com zeros no lugar
self.vetor_constante = np.zeros(shape=(self.matriz.shape[0],1))
if tipo == 'E':
self.matriz = matriz[:,:-1] #Recebe a matriz dos coeficientes
self.vetor_constante = matriz[:,-1].reshape((dimensao[0],1)) # Recebe o vetor de constantates, tambem conhecido como vetor b
def cramer(self):
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Método: Cramer <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
print(f'Sistema:')
print(f'{self.matriz}') #Exibe a matriz de coeficientes do sistema
print()
if (np.linalg.det(self.matriz) != 0):
matriz_transicao = np.copy(self.matriz) #Esta matriz_transição recebe uma copia da matriz original para nao alterar seus dados
dets = [np.linalg.det(matriz_transicao)] #Armazena o valor das determinantes que são necessarias para a reolução do sistema. A determinante da matriz dos coeficientes já está adicionada
vetor_incognitas = np.zeros_like(self.vetor_constante) #Criamos um vetor com as dimensões do vetor constante
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Determinantes <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
for i in range(0,self.dimensao[0],1): #Loop responsável pelo calculo e armazenamento das determinantes
matriz_transicao[:,i] = self.vetor_constante.reshape((self.dimensao[0],)) #Substitui a coluna da matriz pelo vetor constante na coluna de x_i
dets.append(np.linalg.det(matriz_transicao)) #Armazena o calculo da determinante
print()
print(f'Dx[{i+1}]: {dets[i+1]}')#Exibe o resultado do determinante calculado
print()
matriz_transicao[:,i] = np.copy(self.matriz[:,i]) # Retorna o valor original da culuna subistituida pelo vetor constante
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Soluções <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
for s in range(0,self.dimensao[0],1): #Loop responsavel por armazenar e calcular os valores de x
vetor_incognitas[s] = dets[s+1]/dets[0] #Armazena os resultados de x
print()
print(f'x[{s+1}]: {vetor_incognitas[s]}')#Exibe os valroes de x
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim do Método: Cramer <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
return vetor_incognitas #retorna os valores de x, podendo ser utilizados ou armazenados em uma variavel
else:
return print("determinante é igual a 0. Não há solução para o sistema, utilizando este método.")
def triangular_matriz(self):
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Triangulando Sistema <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
print(f'Sistema:')
print(f'{self.matriz}') #Exibe a matriz dos coeficientes
print()
print(f'Vetor Solução:')
print(f'{self.vetor_constante}') #Exibe o vetor constante
print()
for k in range(0,self.matriz.shape[0]-1,1): #Loop responsável pelas iterações necessarias para trinagular a matriz
#print(f'k: {k + 1}')
for i in range(k + 1,self.matriz.shape[0],1): #Loop responsável pelos calculos das tranformações da matriz
m = self.matriz[i,k]/self.matriz[k,k] #constante utilizada na multiplicação da linha anterior a que será subitraida. Exemplo L_2 = L_2 - m*L_1
self.matriz[i,k] = 0 #Zeramos o elemento abaixo da diagonal principal
#print(f'i: {i+1}')
for j in range(k + 1,self.matriz.shape[0],1):
self.matriz[i,j] = self.matriz[i,j] - (m*(self.matriz[k,j])) #Realiza um proceso de operação de um elemento da matriz com outro que se encontra uma linha abaixo. EX: a_12 = a_12 - m*a_22
#print(f'j: {j+1}')
print(f'Triangulando.....')
print(f'{self.matriz}') #Exibe a matriz triangulada, dependendo do passo, não estará completamente triangulada.
print()
self.vetor_constante[i,0] = self.vetor_constante[i,0] - (m*(self.vetor_constante[k,0])) #Realiza um proceso de operação de um elemento do vetor constante com outro que se encontra uma linha abaixo. EX: b_2 = b_2 - m*b_1
print(f'Vetor Solução:')
print(f'{self.vetor_constante}')#Exibe o vetor constante
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim da Triangulação <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
def eliminacao_gaussiana(self):
self.triangular_matriz() #É necessário triangular o sistema para utilizar esse método
print()
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Método: Eliminação Gaussiana <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
print(f'Sistema:')
print(f'{self.matriz}')#Exibe a matriz
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Soluções <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
x = np.zeros_like(self.vetor_constante)#Vetor incógnita, ou vetor x
x[-1 ,0] = self.vetor_constante[-1,0]/self.matriz[-1,-1] #O ultimao valor é xn = b_n/ann, a partir disso é possivel substituir o valor de xn nas outras linhas para obter x_(n-1) até x_1
for i in range(self.dimensao[0]-1,-1,-1): #Loop responsavel por armazenar x_(n-1) até x_1. Como x_1 é na verdade x_0, range possui intervalo aberto [a,b[ então é preciso ir até -1 para i ser igual a 0 e preencher x
soma = 0
for j in range(i+1,self.dimensao[0],1): #Loop responsavel por realizar os calculos de x
soma += self.matriz[i,j]*x[j,0] #Somando a_ij*x_j de i entre [n-1,-1[ com j = i +1 entre [i+1,n]
x[i] = (self.vetor_constante[i] - soma)/matriz[i,i] #Calulo de x, x_(n-1) = (b_(n-1) - soma)/a_(n-1,n-1)
print('vetor x:')
print(f'{x}') #Exibindo x
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim do Método <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
return x
def convergencia_linhas(self):
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Teste de convergencia: Critéro de linha <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
alfas = np.zeros_like(self.vetor_constante)#Vetor que armazena os valores de alfa
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Alfas <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
for i in range(self.dimensao[0]):#Loop para armazenar os valores de alfa
soma = 0
for j in range(self.dimensao[0]-1):#Loop responsavel pela soma dos elementos da linha i
if (i==0):
soma += np.abs(self.matriz[i,j+1])#Caso a estejamos na peimeira linha não poderemos utilizar seu primeira elemento, pois ele faz parte da diagonal principal. Entao somamos a partir do proximo elemento
elif (i == j):
soma += np.abs(self.matriz[i,j+1])#Caso o nº da linha coincidir com o nº da coluna entao, ou seja, caso seja elemento da diagonal principal, somamos o proximo elemento da linha
else:
soma += np.abs(self.matriz[i,j])#Para qualquer outro caso realize a soma do elementos da linha
alfas[i,0] = soma/np.abs(self.matriz[i,i])#Armazena o valor de alfa na linha i até n
print(f'alfa[{i+1}]: {alfas[i,0]}')#Exibe os valores do vetor alfa
print()
max_alpha = np.max(alfas) #Armazeno o maior valor de alfa em módulo
print(f'Alfa máximo: {max_alpha}') #Exibe o maior valor de alfa em módulo
if max_alpha < 1: #Se o maior valor de alfa em módulo for menor do que 1 ele converge, e então, o sistema pode ser resolvido com o método de Gauss-Jacobi
print("O método de Gauss-Jacobi pode ser usado neste Sistema Linear")
return True
else: #Caso dele nao convergir o método de Gauss-Jacobi nao convirgirá para as soluções desse sistema
print()
print("O método não converge para este sistema")
print("O método de Gauss-Jacobi pode ser usado neste Sistema Linear")
print()
def gauss_jacobi(self,tol = 10**(-3),iteracoes=30):
if self.convergencia_linhas() == True: #Caso o sistema convirja suas soluções podem ser calculadas a partir desse método iterativo
iteracao = 1
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Método: Gauss-Jacobi <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
print(f'Sistema:')
print(f'{self.matriz}')# Exibe o sistema
print()
x = np.zeros_like(self.vetor_constante)#Irá receber as aproxiamações do método
for i in range(0,self.dimensao[0],1):#Loop responsável por nos dar o valor de um 'chute'inicial para x que segue a regra: x_i = b_i/a_ii
x[i,0] = self.vetor_constante[i,0]/self.matriz[i,i]
x_next = np.zeros(shape = x.shape)#Irá receber os valores das novas aproximações de x
while iteracao <= iteracoes: #Este método será iterado até um valor limite de iterações que pode ser alterado ná hora de utilizar o método(da classe Sistema Linear), ou utilizar o limite pradrao do método
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> {iteracao}ª iteração. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
print(f'Nº máximo de iterações {iteracoes}') #Exibe o valor máximo de iterações
print(f'{iteracao}ª iteração de x') #Exibe em qual iteração de x estamos
print(f'{x}') #Exibe o valor de x
print()
print()
print(f'Nova aproximação:')
print()
for i in range(self.matriz.shape[0]): #Loop responsável por atribuir os valores para nossa proxima aproxima para x, que se chama: x_next
soma = self.vetor_constante[i,0] #Primeiro é adicionado o valor de b_i que será utilizado para a proxima apraximação de x^(k)_i, ou seja x^(k+1)
for j in range(self.matriz.shape[1]):#Loop responsável por realizar o somatório
if (i != j): #Caso o elemento da matriz dos coeficientes não pertença a diagonal principal realize a soma
soma -= self.matriz[i,j]*x[j,0]#Ele decrementa o valor da soma a_ij*x^(k)_(j), pois esta variavél soma é na verdade a seguinte espressão: b_i - Somatorio de a_ij*x^(k)_j quando i é diferente de j
x_next[i,0] = soma/self.matriz[i,i]#É adicionado a próxima valor de x^(k+1)_j que é (b_i - Somatorio de a_ij*x^(k)_j quando i é diferente de j) dividido por a_ii
print(f'x_next[{i+1}] = {x_next[i,0]}')#exibe o vetor de x^(k+1)_i
erro = np.max(np.abs(x_next-x)) #Calcula o erro em módolo entre x^(k+1) e x^(k) e seleciona o maior valor
print()
print(f'Dados sobre a iteração ')
print(f'erro: {erro}') #Exibe o erro
print()
print(f'x_next:')
print(x_next)##exibe o vetor de x^(k+1)
print()
if erro < tol: #Caso o erro seja menor do que a tolerância, temos a solução do sistema sendo x^(k + 1)
print()
print('Solunção final')
print(x_next)#Exibe x^(k+1)
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim desta iteração <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim do Método <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
break
else:#Caso o erro seja maior do que a tolerância, temos que x assumirá os valores de x^(k+1), e assim até ele satisfazer a condição de parada ou atingir o limite de iterações
x = np.copy(x_next) #x recebe os valores de x^(k+1) desta iteração
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim desta iteração <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
iteracao += 1 #Acrescentar mais um ao nosso iterador
else: #Caso o sistema não atenda a convegencia de linhas
print()
print("O método não converge para este sistema")
print("O método de Gauss-Jacobi pode ser usado neste Sistema Linear")
print()
def convergencia_sassenfeld(self):
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Teste de convergencia: Critério de Sassenfeld <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
betas = np.zeros_like(self.vetor_constante)#Armazena os valores de betas
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Betas <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
for i in range(self.dimensao[0]):#Loop responsavel por adicionar os valores de beta_i no vetor betas
soma = 0
for j in range(self.dimensao[1]):#Loop responsavel pelo calculo dos betas
if (i != j ) and (i == 0) or (i<j):#Caso i seja diferente de j e a_ij com i e j são iguais ao primeiro elemento da diagonal principal ou i menor que j faça:
soma += (np.abs(self.matriz[i,j])) #Somando |a_ij|
elif (i!=j) and (i != 0):
soma += (np.abs(self.matriz[i,j]))*betas[j]#Somando |a_ij|*beta_j
betas[i,0] = soma/(np.abs(self.matriz[i,i]))#Adicionando b_i no vetor betas na posição i
print(f'beta[{i+1}]: {betas[i,0]}')#Exibindo beta_i
print()
max_beta = np.max(betas)
print(f'Beta máximo: {max_beta}')#Exibe o beta máximo
print()
if max_beta < 1: #Caso o beta máximo seja menor que 1 então o sistema converge
print("O método de Gauss-Seidel pode ser usado neste Sistema Linear")
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim do Método <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
return True
else:
print()
print("O método não converge para este sistema")
print("O método de Gauss-Seidel pode ser usado neste Sistema Linear")
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim do Método <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
def gauss_seidel(self,iteracoes=30,tol = 10**(-3)):
if self.convergencia_sassenfeld() == True: #Caso o sistema convirja então a solução do sistema pode ser calculada com este método
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Método: Gauss-Seidel <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
print(f'Sistema:')
print(f'{self.matriz}')#Exibe a Matriz
print()
iteracao = 1 #Setando nosso iterador para 1, pois este será usado no laço while
x = np.zeros_like(self.vetor_constante)#Irá receber as aproxiamações do método
for i in range(0,self.matriz.shape[0],1):#Loop responsável por nos dar o valor de um 'chute'inicial para x que segue a regra: x_i = b_i/a_ii
x[i,0] = self.vetor_constante[i,0]/self.matriz[i,i] #x_i = b_i/a_ii
x_next = np.zeros_like(x)#Irá receber os valores das novas aproximações de x
while iteracao <= iteracoes: #Este método será iterado até um valor limite de iterações que pode ser alterado ná hora de utilizar o método(da classe Sistema Linear), ou utilizar o limite pradrao do método
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> {iteracao}ª iteração. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
print(f'Nº máximo de iterações {iteracoes}') #Exibe o valor máximo de iterações
print(f'{iteracao}ª iteração de x')#Exibe em qual iteração de x estamos
print(f'{x}') #Exibe o valor de x
print()
print()
print(f'Nova aproximação:')
print()
for i in range(self.matriz.shape[0]):#Loop responsavel por adicionar x^(k+1)_i
soma = self.vetor_constante[i,0] #Primeiro é adicionado o valor de b_i que será utilizado para a proxima apraximação de x^(k)_i, ou seja x^(k+1)
for j in range(self.matriz.shape[1]): #Loop responsável por realizar os somatórios
if (i > j): #Caso i seja maior que j será decrementado a_ij*x^(k+1)
soma -= self.matriz[i,j]*x_next[j,0] #Entende-se por - somatoriio de i > j de a_ij com a aproximação de x^(k+1)_j, caso já haja um x^(k+1)_j nesta iteração
elif (i < j): #Caso i seja menor que j será decrementado a_ij*x^(k)
soma -= self.matriz[i,j]*x[j,0]#Entende-se por - somatoriio de i > j de a_ij com a aproximação de x^(k)_j, para ques seja utilizado x^(k+1)_j na próxima iteração
x_next[i,0] = soma/self.matriz[i,i] # entende-se por (b_i - somatoriio de i > j de a_ij com a aproximação de x^(k+1)_j - somatoriio de i > j de a_ij com a aproximação de x^(k)_j)/a_ii
print(f'x_next[{i+1}] = {x_next[i,0]}')#exibe o vetor de x^(k+1)_i
erro = np.max(np.abs(x_next - x))#Calcula o erro em módolo entre x^(k+1) e x^(k) e seleciona o maior valor
print()
print(f'Dados sobre a iteração ')
print(f'erro: {erro}')
print()
print(f'x_next:')
print(x_next)
print()
if erro > tol:#Caso o erro seja maior do que a tolerância, temos que x assumirá os valores de x^(k+1), e assim até ele satisfazer a condição de parada ou atingir o limite de iterações
x = np.copy(x_next)#x recebe os valores de x^(k+1) desta iteração
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim desta iteração <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print()
iteracao += 1
else:
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Solução <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print(x_next)#Exibe x^(k+1)
print()
print(f'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Fim do Método <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
break
| StarcoderdataPython |
175777 | import os
from java.awt import Color, GridLayout
from javax.swing import JPanel, JComboBox, JLabel, JFrame, JScrollPane, JColorChooser, JButton, JSeparator, SwingConstants, SpinnerNumberModel, JSpinner, BorderFactory, JCheckBox
from net.miginfocom.swing import MigLayout
from ij import IJ, WindowManager, ImagePlus, ImageStack
from ij.process import Blitter
from script.imglib.math import Multiply, Difference, Subtract, Xor, Add, Or, Min
from script.imglib.color import Red, Green, Blue, RGBA
from mpicbg.imglib.image.display.imagej import ImageJFunctions as IJF
def AWTColorToArray(color):
return [color.getRed(), color.getGreen(), color.getBlue()]
class StackOverlay:
def __init__(self):
self.frame = None
self.overlayColorPreviewLabel = None
self.showStackOverlayWindow()
self.overlayColor = None
def onQuit(self, e):
print "Exiting..."
self.frame.dispose()
def showColorChooser(self, e):
colorChooser = JColorChooser()
self.overlayColor = colorChooser.showDialog(self.frame, "Choose color", Color.red)
self.overlayColorPreviewLabel.setBackground(self.overlayColor)
def showStackOverlayWindow(self):
all = JPanel()
all.setLayout(MigLayout())
self.imageIDs = WindowManager.getIDList()
self.imageNames = []
if self.imageIDs is None:
IJ.error("No open images", "Stack Overlay requires at least one image to be already open.")
return
for i in self.imageIDs:
self.imageNames.append(WindowManager.getImage(i).getTitle())
self.baseImageBox = JComboBox(self.imageNames)
baseImageBoxLabel = JLabel("Base image")
self.baseImageBox.setSelectedIndex(0)
all.add(baseImageBoxLabel)
all.add(self.baseImageBox, "wrap")
self.overlayImageBox = JComboBox(self.imageNames)
overlayImageBoxLabel = JLabel("Overlay image")
if len(self.imageNames) > 1:
self.overlayImageBox.setSelectedIndex(1)
all.add(overlayImageBoxLabel)
all.add(self.overlayImageBox, "wrap")
all.add(JSeparator(SwingConstants.HORIZONTAL), "span, wrap")
overlayStyleFrame = JPanel()
overlayStyleFrame.setLayout(MigLayout())
overlayStyleFrame.setBorder(BorderFactory.createCompoundBorder(BorderFactory.createTitledBorder("Overlay Style"), BorderFactory.createEmptyBorder(5,5,5,5)))
colorLabel = JLabel("Overlay color")
self.overlayColorPreviewLabel = JLabel(" ")
self.overlayColorPreviewLabel.setBorder(BorderFactory.createEmptyBorder(0,0,1,0))
self.overlayColorPreviewLabel.setOpaque(True)
self.overlayColorPreviewLabel.setBackground(Color.red)
self.overlayColor = Color.red
colorPicker = JColorChooser()
colorPicker.setPreviewPanel(self.overlayColorPreviewLabel)
colorButton = JButton("Select color...", actionPerformed=self.showColorChooser)
opacityLabel = JLabel("Overlay opacity (%)")
opacitySpinnerModel = SpinnerNumberModel(100, 0, 100, 1)
self.opacitySpinner = JSpinner(opacitySpinnerModel)
overlayStyleFrame.add(colorLabel)
overlayStyleFrame.add(self.overlayColorPreviewLabel)
overlayStyleFrame.add(colorButton, "wrap")
overlayStyleFrame.add(opacityLabel)
overlayStyleFrame.add(self.opacitySpinner, "wrap")
all.add(overlayStyleFrame, "span, wrap")
self.virtualStackCheckbox = JCheckBox("Use Virtual Stack", True)
all.add(self.virtualStackCheckbox, "span, wrap")
# TODO: add non-thermonuclear cancel button functionality
overlayCancelButton = JButton("Cancel", actionPerformed=self.onQuit)
overlayStartButton = JButton("Overlay images", actionPerformed=self.overlayImages)
all.add(overlayCancelButton, "gapleft push")
all.add(overlayStartButton, "gapleft push")
self.frame = JFrame("Stack Overlay")
self.frame.getContentPane().add(JScrollPane(all))
self.frame.pack()
self.frame.setLocationRelativeTo(None)
self.frame.setVisible(True)
def overlayImages(self, e):
impBase = WindowManager.getImage(self.imageIDs[self.baseImageBox.getSelectedIndex()])
refBase = impBase.getStack().getProcessor(1)
impOverlay = WindowManager.getImage(self.imageIDs[self.overlayImageBox.getSelectedIndex()])
refOverlay = impOverlay.getStack().getProcessor(1)
print "Overlaying for stack sizes " + str(impBase.getStackSize()) + "/" + str(impOverlay.getStackSize()) + "..."
stack = None
if self.virtualStackCheckbox.isSelected():
stack = OverlayVirtualStack()
stack.overlayOpacity = float(self.opacitySpinner.getValue())/100.0
stack.overlayColor = AWTColorToArray(self.overlayColorPreviewLabel.getBackground())
stack.base = impBase
stack.overlay = impOverlay
ImagePlus("Stack Overlay from " + self.imageNames[self.baseImageBox.getSelectedIndex()] + " and " + self.imageNames[self.overlayImageBox.getSelectedIndex()], stack).show()
else:
IJ.error("Not implemented yet", "Using normal stacks is not implemented yet. Please use the Virtual Stack option.")
def blendImages(base, overlay, mode="screen"):
print type(base), type(overlay)
if mode == "screen":
image = None
if mode == "multiply":
image = Multiply(base, overlay)
if mode == "add":
image = Add(base, overlay)
if mode == "difference":
image = Difference(base, overlay)
return image
class OverlayVirtualStack(VirtualStack):
def __init__(self):
self.last = None
self.overlayColor = [255, 255, 255]
self.overlayOpacity = 1.0
self.blendMode = "screen"
self.base = None
self.overlay = None
def getProcessor(self, i):
overlay = IJF.wrap(ImagePlus("", self.overlay.getStack().getProcessor(i)))
base = self.base.getStack().getProcessor(i).convertToRGB()
R = Min(overlay, self.overlayColor[0])
G = Min(overlay, self.overlayColor[1])
B = Min(overlay, self.overlayColor[2])
print "Opacity is " + str(self.overlayOpacity)
overlayrgb = IJF.copyToImagePlus(RGBA(R, G, B, self.overlayOpacity).asImage())
base.copyBits(overlayrgb.getProcessor(), 0, 0, Blitter.COPY_ZERO_TRANSPARENT)
baseImage = IJF.wrap(ImagePlus("", base))
self.last = IJF.displayAsVirtualStack(baseImage).getProcessor()
return self.last
def getSize(self):
return self.base.getStackSize()
def getSliceLabel(self, i):
return str(i)
def getWidth(self):
self.last.getWidth()
def getHeight(self):
self.last.getHeight()
def getPixels(self, i):
return self.getProcessor(i).getPixels()
def setPixels(self, pixels, i):
pass
stackOverlay = StackOverlay()
print "Done."
| StarcoderdataPython |
3231302 | # TestSwiftRegex.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test Swift's regex support
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
class TestSwiftRegex(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
@swiftTest
def test_swift_regex(self):
"""Test Swift's regex support"""
self.build()
lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', self.main_source_spec)
self.expect('v regex',
substrs=['_StringProcessing.Regex<(Substring, Substring, Substring, Substring)>) regex = {'])
self.expect('po regex',
substrs=['Regex<(Substring, Substring, Substring, Substring)>'])
self.expect('v dslRegex',
substrs=['(_StringProcessing.Regex<Substring>) dslRegex = {'])
self.expect('po dslRegex',
substrs=['Regex<Substring>'])
@swiftTest
# Expected to fail because of availability checking while Regex support isn't stabilized
@expectedFailureDarwin
def test_swift_regex_in_exp(self):
"""Test Swift's regex support"""
self.build()
lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', self.main_source_spec)
self.runCmd(
"settings set target.experimental.swift-enable-bare-slash-regex true")
self.expect('e -- /Order from <(.*)>, type: (.*), count in dozen: ([0-9]+)/',
substrs=['_StringProcessing.Regex<(Substring, Substring, Substring, Substring)>'])
| StarcoderdataPython |
137967 | <reponame>arthurtibame/cvat<filename>serverless/openvino/omz/public/faster_rcnn_inception_v2_coco/nuclio/model_handler.py
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
from model_loader import ModelLoader
class ModelHandler:
def __init__(self, labels):
base_dir = os.environ.get("MODEL_PATH",
"/opt/nuclio/open_model_zoo/public/faster_rcnn_inception_v2_coco/FP32")
model_xml = os.path.join(base_dir, "faster_rcnn_inception_v2_coco.xml")
model_bin = os.path.join(base_dir, "faster_rcnn_inception_v2_coco.bin")
self.model = ModelLoader(model_xml, model_bin)
self.labels = labels
def infer(self, image, threshold):
output_layer = self.model.infer(image)
results = []
prediction = output_layer[0][0]
for obj in prediction:
obj_class = int(obj[1])
obj_value = obj[2]
obj_label = self.labels.get(obj_class, "unknown")
if obj_value >= threshold:
xtl = obj[3] * image.width
ytl = obj[4] * image.height
xbr = obj[5] * image.width
ybr = obj[6] * image.height
results.append({
"confidence": str(obj_value),
"label": obj_label,
"points": [xtl, ytl, xbr, ybr],
"type": "rectangle",
})
return results | StarcoderdataPython |
3387563 | <reponame>defianceblack/PyPattyrn<gh_stars>1000+
from abc import ABCMeta, abstractmethod
from unittest import TestCase
from pypattyrn.structural.composite import Composite
class CompositeTestCase(TestCase):
"""
Unit testing class for the Composite class.
"""
def setUp(self):
"""
Initialize testing data.
"""
class Component(object, metaclass=ABCMeta):
@abstractmethod
def do_something(self):
pass
class Leaf(Component):
def __init__(self):
self.did_something = False
def do_something(self):
self.did_something = True
self.component_class = Component
self.leaf_one = Leaf()
self.leaf_two = Leaf()
self.leaf_three = Leaf()
def test_add_component(self):
"""
Test the add_component method.
@raise AssertionError: If the test fails.
"""
composite = Composite(self.component_class)
composite.add_component(self.leaf_one)
composite_two = Composite(self.component_class)
composite_two.add_component(self.leaf_two)
composite_three = Composite(self.component_class)
composite_three.add_component(self.leaf_three)
composite_two.add_component(composite_three)
composite.add_component(composite_two)
try:
composite.add_component(composite_two)
composite_two.add_component(composite_three)
except:
raise AssertionError()
else:
self.assertSetEqual({self.leaf_one, composite_two}, composite.components)
self.assertSetEqual({self.leaf_two, composite_three}, composite_two.components)
self.assertSetEqual({self.leaf_three}, composite_three.components)
def test_remove_component(self):
"""
Test the remove_component method.
@raise AssertionError: If the test fails.
"""
composite = Composite(self.component_class)
composite_two = Composite(self.component_class)
composite_two.add_component(self.leaf_one)
composite_two.add_component(self.leaf_two)
composite.add_component(self.leaf_one)
composite.add_component(self.leaf_two)
composite.add_component(composite_two)
composite.remove_component(self.leaf_one)
composite.remove_component(self.leaf_two)
composite.remove_component(composite_two)
try:
composite.remove_component(composite_two)
except:
raise AssertionError
else:
self.assertSetEqual(set(), composite.components)
def test_delegate(self):
"""
Test the delegate method.
@raise AssertionError: If the test fails
"""
composite = Composite(self.component_class)
composite_two = Composite(self.component_class)
composite_three = Composite(self.component_class)
composite.add_component(self.leaf_one)
composite_two.add_component(self.leaf_two)
composite_three.add_component(self.leaf_three)
composite_two.add_component(composite_three)
composite.add_component(composite_two)
composite._delegate('do_something')
self.assertTrue(self.leaf_one.did_something)
self.assertTrue(self.leaf_two.did_something)
self.assertTrue(self.leaf_three.did_something)
self.leaf_one.did_something = False
self.leaf_two.did_something = False
self.leaf_three.did_something = False
def test_getattr(self):
"""
Test the getattr method.
@raise AssertionError: If the test fails.
"""
composite = Composite(self.component_class)
composite_two = Composite(self.component_class)
composite_three = Composite(self.component_class)
composite.add_component(self.leaf_one)
composite_two.add_component(self.leaf_two)
composite_three.add_component(self.leaf_three)
composite_two.add_component(composite_three)
composite.add_component(composite_two)
composite.do_something()
self.assertTrue(self.leaf_one.did_something)
self.assertTrue(self.leaf_two.did_something)
self.assertTrue(self.leaf_three.did_something)
self.leaf_one.did_something = False
self.leaf_two.did_something = False
self.leaf_three.did_something = False
def test_invalid_getattr(self):
"""
Test the getattr method with an invalid attribute.
@raise AssertionError: If the test fails.
"""
composite = Composite(self.component_class)
composite_two = Composite(self.component_class)
composite_three = Composite(self.component_class)
composite.add_component(self.leaf_one)
composite_two.add_component(self.leaf_two)
composite_three.add_component(self.leaf_three)
composite_two.add_component(composite_three)
composite.add_component(composite_two)
with self.assertRaises(AttributeError):
composite.foo()
composite.did_something()
def test_interface(self):
"""
Test the interface functionality.
@raise AssertionError: If the test fails.
"""
class BadComponent(object):
def foo(self):
raise NotImplementedError()
class BadLeaf(BadComponent):
def __init__(self):
pass
def foo(self):
pass
composite = Composite(self.component_class)
composite_two = Composite(BadComponent)
composite_two.add_component(BadLeaf())
self.assertRaises(AttributeError, composite_two.add_component, self.leaf_one)
self.assertRaises(AttributeError, composite.add_component, composite_two)
self.assertRaises(AttributeError, composite.add_component, BadLeaf())
| StarcoderdataPython |
21836 | <gh_stars>0
from django.conf import settings
from django.urls.conf import include, path
from rest_framework.routers import DefaultRouter, SimpleRouter
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
app_name = "api"
urlpatterns = [
path("", include("summers_api.users.api.urls")),
]
urlpatterns += router.urls
| StarcoderdataPython |
3348188 | <reponame>mmmmlz/VQA_tencent
"""Test Demo for Quality Assessment of In-the-Wild Videos, ACM MM 2019"""
#
# Author: <NAME>
# Email: <EMAIL> AT <EMAIL> DOT edu DOT cn
# Date: 2018/3/27
#
import torch
from torchvision import transforms
import skvideo
#skvideo.setFFmpegPath(r'D:\apps\ffmpeg-N-102166-g1ab74bc193-win64-gpl\bin')
import os
import skvideo.io
from PIL import Image
import numpy as np
from VSFA import VSFA
from CNNfeatures import get_features
from argparse import ArgumentParser
import time
with open("video_info.txt","r") as f:
all_data = f.readlines()
video_dir = "/cfs/cfs-3cab91f9f/liuzhang/video_data/video_clarity_vid"
model_path ="./models/VSFA.pt"numpy
print(skvideo.getFFmpegPath())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#start = time.time()
video_list = os.listdir(video_dir)
model = VSFA()
model.load_state_dict(torch.load(model_path)) #
model.to(device)
model.eval()
A = {}
C = {}
for data in all_data:
name = data.split(" ")[0][:-3]+".mp4"
label = data.split(" ")[0][-2:-1]
if name in video_list:
video_path = os.path.join(video_dir,name)
video_data = skvideo.io.vread(video_path)
print(video_data.shape)
video_length = video_data.shape[0]
video_channel = video_data.shape[3]
video_height = video_data.shape[1]
video_width = video_data.shape[2]
transformed_video = torch.zeros([video_length, video_channel, video_height, video_width])
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
for frame_idx in range(video_length):
frame = video_data[frame_idx]
frame = Image.fromarray(frame)
frame = transform(frame)
transformed_video[frame_idx] = frame
print('Video length: {}'.format(transformed_video.shape[0]))
# feature extraction
features = get_features(transformed_video, frame_batch_size=32, device=device)
features = torch.unsqueeze(features, 0) # batch size 1
# quality prediction using VSFA
with torch.no_grad():
input_length = features.shape[1] * torch.ones(1, 1)
outputs = model(features, input_length)
y_pred = outputs[0][0].to('cpu').numpy()
print("Predicted quality: {}".format(y_pred))
print("labeled qulity:{}".format(label))
if label =="A":
A[name] = y_pred
elif label =="C":
C[name] = y_pred
# end = time.time()
# print('Time: {} s'.format(end-start))
with open("resultA.txt","a") as f:
total = 0
for name,socre in A.items():
f.write(name)
f.write(" ")
f.write(str(socre))
f.write("\n")
total += float(socre)
f.write(str(total/len(A)))
with open("resultC.txt","a") as f:
total = 0
for name,socre in C.items():
f.write(name)
f.write(" ")
f.write(str(socre))
f.write("\n")
total += float(socre)
f.write(str(total/len(C)))
| StarcoderdataPython |
1787855 | <filename>qcelemental/models/common_models.py
from enum import Enum
from typing import Any
import numpy as np
from pydantic import BaseModel, Extra
ndarray_encoder = {np.ndarray: lambda v: v.flatten().tolist()}
class Provenance(BaseModel):
creator: str
version: str = None
routine: str = None
class Config:
extra = Extra.allow
class Model(BaseModel):
method: str
basis: str = None
# basis_spec: BasisSpec = None # This should be exclusive with basis, but for now will be omitted
class Config:
allow_mutation = False
extra = Extra.allow
class DriverEnum(str, Enum):
energy = 'energy'
gradient = 'gradient'
hessian = 'hessian'
class ComputeError(BaseModel):
"""The type of error message raised"""
error_type: str # Error enumeration not yet strict
error_message: str
class Config:
extra = Extra.forbid
class FailedOperation(BaseModel):
id: str = None
input_data: Any = None
success: bool = False
error: ComputeError
class Config:
extra = Extra.allow
allow_mutation = False
json_encoders = {**ndarray_encoder}
qcschema_input_default = "qcschema_input"
qcschema_output_default = "qcschema_output"
qcschema_optimization_input_default = "qcschema_optimization_input"
qcschema_optimization_output_default = "qcschema_optimization_output"
| StarcoderdataPython |
3306135 | import socket
import select
import struct
from threading import Thread
import time
import rsparse
import doctest
import base64
def make_sensor_list(lis):
"""
>>> make_sensor_list(['a', 1, 'b', 2])
[('a', 1), ('b', 2)]
"""
list = []
for i in range(len(lis)/2):
list.append((lis[i*2], lis[i*2+1]))
return list
def make_header(message):
size = len(message)
a3 = size % 256
a2 = (size >> 8) % 256
a1 = (size >> 16) % 256
a0 = (size >> 24) % 256
return struct.pack("B"*4, a0, a1, a2, a3)
class RemoteSensorServer:
def __init__(self, host='', port=42001):
self.host = host
self.port = port
self.sensors = {}
self.socket = None
self.controller = None
self.clients = {}
def set_controller(self, controller):
self.controller = controller
def start(self):
if self.socket is not None:
return
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clients = set([self.socket])
self.thread = Thread(target=self._server_thread)
self.thread.daemon = True
self.thread.start()
def _server_thread(self):
print("server_thread")
backlog = 10
bufsize = 4096
try:
self.socket.bind((self.host, self.port))
self.socket.listen(backlog)
print("Start select")
while self.socket is not None:
rready, wready, xready = select.select(self.clients, [], [], 1)
for sock in rready:
if sock is self.socket:
conn, address = self.socket.accept()
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.clients.add(conn)
message = self._make_sensor_update(self.sensors, True)
if message != "":
conn.send(message)
else:
msg = sock.recv(4)
if len(msg) == 0:
self._detach_client(sock)
else:
a = struct.unpack("B"*4, msg)
sz = a[3]+(a[2]<<8)+(a[1]<<16)+(a[0]<<24)
print(sz)
msg = sock.recv(sz)
if len(msg) == 0:
self._detach_client(sock)
else:
print(msg+"!")
self._dispatch_message(msg)
finally:
self.stop()
def _detach_client(self, socket):
print("detach_client")
socket.close()
self.clients.remove(socket)
def stop(self):
print("RemoteSensorServer stop")
if self.socket is None:
return
self.socket = None
for socket in self.clients:
socket.close()
self.clients = set()
def is_running(self):
return self.socket is not None
def _dispatch_message(self, message):
if self.controller is None:
return
text = unicode(message, 'utf-8')
command, lis = rsparse.parse_message(text)
if command == 'sensor-update':
dic = make_sensor_list(lis)
self.controller.sensor_update(dic)
elif command == 'broadcast':
self.controller.broadcast(lis[0])
def _make_sensor_update(self, dic, forceAll=False):
message = ""
for x in dic:
v = dic[x]
if not (x in self.sensors) or (v != self.sensors[x]) or forceAll:
if type(v) is str:
v = '"'+v+'"'
message += ' '+x+' '+str(v)+''
self.sensors[x] = v
if message == "":
return ""
message = "sensor-update"+message
message = message.encode('utf-8')
return make_header(message)+message
def _make_broadcast(self, str):
if str == "":
return ""
message = "broadcast "+str
message = message.encode('utf-8')
return make_header(message)+message
def _make_image(self, image, image_format):
message = image_format+" "+base64.b64encode(image)
message = message.encode('utf-8')
return make_header(message)+message
def _send(self, message):
if message != "":
for socket in self.clients:
if socket is not self.socket:
socket.send(message)
def send_sensor_update(self, dic):
message = self._make_sensor_update(dic)
self._send(message)
def send_broadcast(self, str):
message = self._make_broadcast(str)
self._send(message)
def send_image(self, image, image_format):
message = self._make_image(image, image_format)
self._send(message)
def test(self):
# To test, enter following code in python shell.
# % python
# >>> import rsserver
# >>> s = rsserver.RemoteSensorServer('')
# >>> s.test()
# Then start mesh-enabled Scratch and connect to this machine
# using Share-Join Mesh menu.
self.start()
ret = ""
while ret != "q":
ret = raw_input("'q' to quit>")
self.stop()
quit()
if __name__ == "__main__":
doctest.testmod()
| StarcoderdataPython |
92084 | <reponame>grantps/superhelp
from textwrap import dedent
from tests import check_as_expected
ROOT = 'superhelp.helpers.packing_help.'
def test_misc():
test_conf = [
(
dedent("""\
pet = 'cat'
"""),
{
ROOT + 'unpacking': 0,
ROOT + 'unpacking_opportunity': 0,
}
),
(
dedent("""\
x, y = coord
"""),
{
ROOT + 'unpacking': 1,
ROOT + 'unpacking_opportunity': 0,
}
),
(
dedent("""\
x, y = coord1
x, y = coord2
"""),
{
ROOT + 'unpacking': 2,
ROOT + 'unpacking_opportunity': 0,
}
),
(
dedent("""\
for i in range(2):
x, y = coord1
x, y = coord2
"""),
{
ROOT + 'unpacking': 1, ## in one block so one message
ROOT + 'unpacking_opportunity': 0,
}
),
(
dedent("""\
x = coord[0]
y = coord[1]
"""),
{
ROOT + 'unpacking': 0,
ROOT + 'unpacking_opportunity': 1,
}
),
(
dedent("""\
for i in range(2):
x = coord[0]
y = coord[1]
"""),
{
ROOT + 'unpacking': 0,
ROOT + 'unpacking_opportunity': 1,
}
),
(
dedent("""\
for i in range(2):
x = coord1[0]
y = coord1[1]
for i in range(2):
x = coord2[0]
y = coord2[1]
"""),
{
ROOT + 'unpacking': 0,
ROOT + 'unpacking_opportunity': 1, ## snippet-level message so only the one
}
),
(
dedent("""\
nz_capital = capitals['NZ']
aus_capital = capitals['Australia']
"""),
{
ROOT + 'unpacking': 0,
ROOT + 'unpacking_opportunity': 0, ## snippet-level message so only the one
}
),
]
check_as_expected(test_conf, execute_code=True)
check_as_expected(test_conf, execute_code=False)
# test_misc()
| StarcoderdataPython |
1751533 | import os, re
def tex_escape(text):
"""Return text with problematic escape sequences parsed for Latex use.
Note: This function was copied from the following StackOverflow answer,
<https://stackoverflow.com/a/25875504/10134974>
Parameters
----------
text : str
a plain text message
Returns
-------
the message escaped to appear correctly in LaTeX
"""
conv = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\textasciitilde{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
'<': r'\textless{}',
'>': r'\textgreater{}',
}
regex = re.compile('|'.join(re.escape(str(key))
for key
in sorted(conv.keys(), key=lambda item: - len(item))))
return regex.sub(lambda match: conv[match.group()], text)
def append_before_extension(filename: str, text_to_append: str) -> str:
"""Take a filename and add a string to the end before the extension.
Note that an underscore is inserted in between.
Parameters
----------
filename : str
text_to_append : str
Returns
-------
str
filename with the text appended before the extension
"""
return "{0}_{2}{1}".format(*os.path.splitext(filename) + (text_to_append,)) | StarcoderdataPython |
1793855 | # Apply STG on XOR dataset
from stg import STG
import numpy as np
import scipy.stats # for creating a simple dataset
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
def generate_XOR_labels(X):
y = np.exp(X[:,0]*X[:,1])
prob_1 = np.expand_dims(1 / (1+y) ,1)
prob_0 = np.expand_dims(y / (1+y) ,1)
# y = np.concatenate((prob_0,prob_1), axis = 1)
y = np.where(prob_0 > prob_1, 0, 1)
return y
X_data = np.random.randn(100, 10)
y_i = generate_XOR_labels(X_data)
y_data = y_i.flatten()
print(X_data.shape)
print(y_i)
print(y_data.shape)
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, train_size=0.3)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, train_size=0.8)
model = STG(task_type='classification',input_dim=X_train.shape[1], output_dim=2, hidden_dims=[60, 20], activation='tanh',
optimizer='SGD', learning_rate=0.1, batch_size=X_train.shape[0], feature_selection=True, sigma=0.5, lam=0.5, random_state=1, device="cpu")
model.fit(X_train, y_train, nr_epochs=6000, valid_X=X_valid, valid_y=y_valid, print_interval=1000)
print(model.get_gates(mode='prob'))
print(model.get_gates(mode='raw'))
y_pred=model.predict(X_test)
print(accuracy_score(y_pred,y_test)) # | StarcoderdataPython |
3214215 | a = 'Hello Python'
print(a)
#length
print(len(a))
#index - slice string
print(a[0])
print(a[1])
print(a[2:5])
#repeat
print((a+'\n')*2) | StarcoderdataPython |
3385731 | from flask import Flask,request,Response,jsonify, make_response
from flask_cors import CORS, cross_origin
from recognizer import Recognizer;
import cv2
import base64
import numpy as np
app = Flask(__name__)
@app.route('/')
def application():
return "<center><h1>You are Hacked!!</h1></center>"
@app.route('/recognizer',methods=['POST'])
@cross_origin()
def recognizer():
try:
print(request)
file = request.files['image']
image = cv2.imdecode(np.fromstring(file.read(), np.uint8), cv2.IMREAD_UNCHANGED)
# print(type(image))
recognizer_a = Recognizer()
faces = recognizer_a.detect_faces(image)
if(len(faces)==1):
image,userID,confidence= recognizer_a.recognize_uploaded_pic(image,faces)
print(userID)
image_content = cv2.imencode('.jpg', image)[1].tostring()
# encoded_image = base64.encodestring(image_content)
encoded_image = base64.b64encode(image_content).decode('ascii')
if userID == "unknown":
response = jsonify(message="Unkown Face Detected.",status=False,data={"user_id":userID,"image":encoded_image,"confidence":confidence})
else:
response = jsonify(message="Face Recognized.",status=True,data={"user_id":userID,"image":encoded_image,"confidence":confidence})
elif(len(faces)==0):
response = jsonify(message="No face detected.",status=False)
elif(len(faces)>0):
response = jsonify(message="More than one Faces are detected.",status=False)
return response
except Exception as e:
print("error gotcha")
print(str(e))
return {"message":"Falied","status":False,"error":str(e)}
| StarcoderdataPython |
1668253 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 <NAME> r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package qe
# The quantum efficiency window
#
import os
from tab import tab_class
from icon_lib import icon_get
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QToolBar,QSizePolicy,QAction,QTabWidget
from PyQt5.QtGui import QPainter,QIcon
#python modules
import webbrowser
from help import help_window
from QWidgetSavePos import QWidgetSavePos
from css import css_apply
from gpvdm_json import gpvdm_data
import i18n
_ = i18n.language.gettext
from experiment import experiment
class window_eqe(experiment):
def __init__(self):
experiment.__init__(self,"tab_eqe",window_save_name="eqe", window_title=_("Quantum efficiency editor"),json_search_path="gpvdm_data().eqe",icon="qe")
#w=self.ribbon_simulation()
#self.ribbon.addTab(w,_("Simulation"))
self.notebook.currentChanged.connect(self.switch_page)
self.switch_page()
def switch_page(self):
tab = self.notebook.currentWidget()
#self.tb_lasers.update(tab.data)
| StarcoderdataPython |
1685835 | import argparse
import os
import re
class ShowUsageException(Exception):
pass
def dir_path(s):
if os.path.isdir(s):
return s
else:
raise ShowUsageException(f'"{s}" is not a directory')
def origin_directory_pair(s):
try:
origin, path = s.split(':')
except ValueError:
raise argparse.ArgumentTypeError(
f'Expected colon-delimited pair, not "{s}"')
expected_format = r'[0-9a-f-]{36}'
if not re.match(expected_format, origin):
raise argparse.ArgumentTypeError(
f'Origin format wrong; expected {expected_format}')
return {
'origin': origin,
'path': path
}
| StarcoderdataPython |
11772 | # pylint: disable=missing-module-docstring
#
# Copyright (C) 2020 by UsergeTeam@Github, < https://github.com/UsergeTeam >.
#
# This file is part of < https://github.com/UsergeTeam/Userge > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/uaudith/Userge/blob/master/LICENSE >
#
# All rights reserved.
__all__ = ['OnFilters']
from pyrogram.filters import Filter as RawFilter
from ... import types
from . import RawDecorator
class OnFilters(RawDecorator): # pylint: disable=missing-class-docstring
def on_filters(self, # pylint: disable=arguments-differ
filters: RawFilter,
group: int = 0,
allow_private: bool = True,
allow_bots: bool = True,
allow_groups: bool = True,
allow_channels: bool = True,
only_admins: bool = False,
allow_via_bot: bool = True,
check_client: bool = True,
check_downpath: bool = False,
check_change_info_perm: bool = False,
check_edit_perm: bool = False,
check_delete_perm: bool = False,
check_restrict_perm: bool = False,
check_promote_perm: bool = False,
check_invite_perm: bool = False,
check_pin_perm: bool = False) -> RawDecorator._PYRORETTYPE:
"""\nDecorator for handling filters
Parameters:
filters (:obj:`~pyrogram.filters`):
Pass one or more filters to allow only a subset of
messages to be passed in your function.
group (``int``, *optional*):
The group identifier, defaults to 0.
allow_private (``bool``, *optional*):
If ``False``, prohibit private chats, defaults to True.
allow_bots (``bool``, *optional*):
If ``False``, prohibit bot chats, defaults to True.
allow_groups (``bool``, *optional*):
If ``False``, prohibit group chats, defaults to True.
allow_channels (``bool``, *optional*):
If ``False``, prohibit channel chats, defaults to True.
only_admins (``bool``, *optional*):
If ``True``, client should be an admin, defaults to False.
allow_via_bot (``bool``, *optional*):
If ``True``, allow this via your bot, defaults to True.
check_client (``bool``, *optional*):
If ``True``, check client is bot or not before execute, defaults to True.
check_downpath (``bool``, *optional*):
If ``True``, check downpath and make if not exist, defaults to False.
check_change_info_perm (``bool``, *optional*):
If ``True``, check user has change_info permission before execute,
defaults to False.
check_edit_perm (``bool``, *optional*):
If ``True``, check user has edit permission before execute,
defaults to False.
check_delete_perm (``bool``, *optional*):
If ``True``, check user has delete permission before execute,
defaults to False.
check_restrict_perm (``bool``, *optional*):
If ``True``, check user has restrict permission before execute,
defaults to False.
check_promote_perm (``bool``, *optional*):
If ``True``, check user has promote permission before execute,
defaults to False.
check_invite_perm (``bool``, *optional*):
If ``True``, check user has invite permission before execute,
defaults to False.
check_pin_perm (``bool``, *optional*):
If ``True``, check user has pin permission before execute,
defaults to False.
"""
return self._build_decorator(
types.raw.Filter.parse(client=self,
filters=filters,
group=group,
allow_private=allow_private,
allow_bots=allow_bots,
allow_groups=allow_groups,
allow_channels=allow_channels,
only_admins=only_admins,
allow_via_bot=allow_via_bot,
check_client=check_client,
check_downpath=check_downpath,
check_change_info_perm=check_change_info_perm,
check_edit_perm=check_edit_perm,
check_delete_perm=check_delete_perm,
check_restrict_perm=check_restrict_perm,
check_promote_perm=check_promote_perm,
check_invite_perm=check_invite_perm,
check_pin_perm=check_pin_perm))
| StarcoderdataPython |
3314659 | """
@Author : Ailitonia
@Date : 2021/08/15 1:19
@FileName : __init__.py.py
@Project : nonebot2_miya
@Description :
@GitHub : https://github.com/Ailitonia
@Software : PyCharm
"""
from datetime import datetime
from nonebot import on_command, logger
from nonebot.plugin.export import export
from nonebot.typing import T_State
from nonebot.rule import to_me
from nonebot.permission import SUPERUSER
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import MessageEvent, GroupMessageEvent, PrivateMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP_ADMIN, GROUP_OWNER, PRIVATE_FRIEND
from nonebot.adapters.cqhttp.message import MessageSegment
from omega_miya.utils.omega_plugin_utils import init_export, init_processor_state, PicEncoder
from omega_miya.database import DBStatistic
from .utils import draw_statistics
# Custom plugin usage text
__plugin_custom_name__ = '统计信息'
__plugin_usage__ = r'''【Omega 插件使用统计】
查询插件使用统计信息
**Permission**
Friend Private
Command & Lv.10
or AuthNode
**AuthNode**
basic
**Usage**
/统计信息 [条件]'''
# Init plugin export
init_export(export(), __plugin_custom_name__, __plugin_usage__)
# 注册事件响应器
statistic = on_command(
'统计信息',
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
state=init_processor_state(
name='statistic',
command=True,
level=10),
aliases={'插件统计', 'statistic'},
permission=SUPERUSER | GROUP_ADMIN | GROUP_OWNER | PRIVATE_FRIEND,
priority=10,
block=True)
# 修改默认参数处理
@statistic.args_parser
async def parse(bot: Bot, event: MessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
await statistic.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args[0]
if state[state["_current_key"]] == '取消':
await statistic.finish('操作已取消')
@statistic.handle()
async def handle_first_receive(bot: Bot, event: MessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
state['condition'] = '本月'
elif args and len(args) == 1:
state['condition'] = args[0]
else:
await statistic.finish('参数错误QAQ')
@statistic.got('condition', prompt='请输入查询条件:\n【全部/本月/本年】')
async def handle_statistic(bot: Bot, event: MessageEvent, state: T_State):
condition = state['condition']
self_id = event.self_id
now = datetime.now()
if condition == '本月':
start_time = datetime(year=now.year, month=now.month, day=1)
elif condition == '本年':
start_time = datetime(year=now.year, month=1, day=1)
else:
condition = '全部'
start_time = None
if isinstance(event, GroupMessageEvent):
title = f'本群{condition}插件使用统计'
group_id = event.group_id
statistic_result = await DBStatistic(
self_bot_id=self_id).get_group_statistic(group_id=group_id, start_time=start_time)
elif isinstance(event, PrivateMessageEvent):
title = f'用户{condition}插件使用统计'
user_id = event.user_id
statistic_result = await DBStatistic(
self_bot_id=self_id).get_user_statistic(user_id=user_id, start_time=start_time)
else:
return
if statistic_result.error:
logger.error(f'查询统计信息失败, error: {statistic_result.info}')
await statistic.finish('查询统计信息失败QAQ')
draw_bytes = await draw_statistics(data=statistic_result.result, title=title)
img_result = await PicEncoder.bytes_to_file(image=draw_bytes, folder_flag='statistic')
if img_result.error:
logger.error(f'生成统计图表失败, error: {img_result.info}')
await statistic.finish('生成统计图表失败QAQ')
await statistic.finish(MessageSegment.image(img_result.result))
admin_statistic = on_command(
'全局统计信息',
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
rule=to_me(),
state=init_processor_state(
name='admin_statistic',
command=True,
level=10),
aliases={'全局插件统计', 'total_stat'},
permission=SUPERUSER,
priority=10,
block=True)
@admin_statistic.handle()
async def handle_admin_statistic(bot: Bot, event: MessageEvent, state: T_State):
self_id = event.self_id
statistic_result = await DBStatistic(self_bot_id=self_id).get_bot_statistic()
if statistic_result.error:
logger.error(f'查询全局统计信息失败, error: {statistic_result.info}')
await statistic.finish('查询全局统计信息失败QAQ')
title = f'Bot:{self_id} 全局插件使用统计'
draw_bytes = await draw_statistics(data=statistic_result.result, title=title)
img_result = await PicEncoder.bytes_to_file(image=draw_bytes, folder_flag='statistic')
if img_result.error:
logger.error(f'生成全局统计图表失败, error: {img_result.info}')
await statistic.finish('生成全局统计图表失败QAQ')
await statistic.finish(MessageSegment.image(img_result.result))
| StarcoderdataPython |
124719 | import math
try:
from ulab import scipy, numpy as np
except ImportError:
import scipy
import numpy as np
A = np.array([[3, 0, 2, 6], [2, 1, 0, 1], [1, 0, 1, 4], [1, 2, 1, 8]])
b = np.array([4, 2, 4, 2])
# forward substitution
result = scipy.linalg.solve_triangular(A, b, lower=True)
ref_result = np.array([1.333333333, -0.666666666, 2.666666666, -0.083333333])
for i in range(4):
print(math.isclose(result[i], ref_result[i], rel_tol=1E-6, abs_tol=1E-6))
# backward substitution
result = scipy.linalg.solve_triangular(A, b, lower=False)
ref_result = np.array([-1.166666666, 1.75, 3.0, 0.25])
for i in range(4):
print(math.isclose(result[i], ref_result[i], rel_tol=1E-6, abs_tol=1E-6))
| StarcoderdataPython |
4837301 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'symmGui.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(536, 421)
MainWindow.setMinimumSize(QtCore.QSize(493, 404))
MainWindow.setMaximumSize(QtCore.QSize(1000, 1000))
MainWindow.setBaseSize(QtCore.QSize(493, 404))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.lbl_equations = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Arial Unicode MS")
font.setBold(True)
font.setWeight(75)
self.lbl_equations.setFont(font)
self.lbl_equations.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.lbl_equations.setTextFormat(QtCore.Qt.AutoText)
self.lbl_equations.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.lbl_equations.setObjectName("lbl_equations")
self.verticalLayout_2.addWidget(self.lbl_equations)
self.lbl_units = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Arial Unicode MS")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.lbl_units.setFont(font)
self.lbl_units.setObjectName("lbl_units")
self.verticalLayout_2.addWidget(self.lbl_units)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.lbl_phaseBMag = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseBMag.setFont(font)
self.lbl_phaseBMag.setObjectName("lbl_phaseBMag")
self.gridLayout.addWidget(self.lbl_phaseBMag, 1, 0, 1, 1)
self.ledt_phaseAOmega = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseAOmega.setObjectName("ledt_phaseAOmega")
self.gridLayout.addWidget(self.ledt_phaseAOmega, 0, 3, 1, 1)
self.lbl_phaseAPhi = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseAPhi.setFont(font)
self.lbl_phaseAPhi.setObjectName("lbl_phaseAPhi")
self.gridLayout.addWidget(self.lbl_phaseAPhi, 0, 4, 1, 1)
self.lbl_phaseAMag = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseAMag.setFont(font)
self.lbl_phaseAMag.setObjectName("lbl_phaseAMag")
self.gridLayout.addWidget(self.lbl_phaseAMag, 0, 0, 1, 1)
self.ledt_phaseAMag = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseAMag.setObjectName("ledt_phaseAMag")
self.gridLayout.addWidget(self.ledt_phaseAMag, 0, 1, 1, 1)
self.ledt_phaseAPhi = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseAPhi.setObjectName("ledt_phaseAPhi")
self.gridLayout.addWidget(self.ledt_phaseAPhi, 0, 5, 1, 1)
self.ledt_phaseCMag = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseCMag.setObjectName("ledt_phaseCMag")
self.gridLayout.addWidget(self.ledt_phaseCMag, 2, 1, 1, 1)
self.ledt_phaseBPhi = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseBPhi.setObjectName("ledt_phaseBPhi")
self.gridLayout.addWidget(self.ledt_phaseBPhi, 1, 5, 1, 1)
self.lbl_phaseCMag = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseCMag.setFont(font)
self.lbl_phaseCMag.setObjectName("lbl_phaseCMag")
self.gridLayout.addWidget(self.lbl_phaseCMag, 2, 0, 1, 1)
self.ledt_phaseCOmega = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseCOmega.setObjectName("ledt_phaseCOmega")
self.gridLayout.addWidget(self.ledt_phaseCOmega, 2, 3, 1, 1)
self.lbl_phaseCOmega = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseCOmega.setFont(font)
self.lbl_phaseCOmega.setObjectName("lbl_phaseCOmega")
self.gridLayout.addWidget(self.lbl_phaseCOmega, 2, 2, 1, 1)
self.ledt_pllOmega = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_pllOmega.setObjectName("ledt_pllOmega")
self.gridLayout.addWidget(self.ledt_pllOmega, 3, 1, 1, 1)
self.lbl_pllOmega = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_pllOmega.setFont(font)
self.lbl_pllOmega.setObjectName("lbl_pllOmega")
self.gridLayout.addWidget(self.lbl_pllOmega, 3, 0, 1, 1)
self.ledt_phaseCPhi = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseCPhi.setObjectName("ledt_phaseCPhi")
self.gridLayout.addWidget(self.ledt_phaseCPhi, 2, 5, 1, 1)
self.lbl_phaseCPhi = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseCPhi.setFont(font)
self.lbl_phaseCPhi.setObjectName("lbl_phaseCPhi")
self.gridLayout.addWidget(self.lbl_phaseCPhi, 2, 4, 1, 1)
self.ledt_pllPhi = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_pllPhi.setObjectName("ledt_pllPhi")
self.gridLayout.addWidget(self.ledt_pllPhi, 3, 3, 1, 1)
self.ledt_time = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_time.setObjectName("ledt_time")
self.gridLayout.addWidget(self.ledt_time, 4, 1, 1, 1)
self.lbl_time = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_time.setFont(font)
self.lbl_time.setObjectName("lbl_time")
self.gridLayout.addWidget(self.lbl_time, 4, 0, 1, 1)
self.ledt_phaseBOmega = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseBOmega.setObjectName("ledt_phaseBOmega")
self.gridLayout.addWidget(self.ledt_phaseBOmega, 1, 3, 1, 1)
self.lbl_phaseBPhi = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseBPhi.setFont(font)
self.lbl_phaseBPhi.setObjectName("lbl_phaseBPhi")
self.gridLayout.addWidget(self.lbl_phaseBPhi, 1, 4, 1, 1)
self.lbl_pllPhase = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_pllPhase.setFont(font)
self.lbl_pllPhase.setObjectName("lbl_pllPhase")
self.gridLayout.addWidget(self.lbl_pllPhase, 3, 2, 1, 1)
self.lbl_phaseBOmega = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseBOmega.setFont(font)
self.lbl_phaseBOmega.setObjectName("lbl_phaseBOmega")
self.gridLayout.addWidget(self.lbl_phaseBOmega, 1, 2, 1, 1)
self.ledt_phaseBMag = QtWidgets.QLineEdit(self.centralwidget)
self.ledt_phaseBMag.setObjectName("ledt_phaseBMag")
self.gridLayout.addWidget(self.ledt_phaseBMag, 1, 1, 1, 1)
self.lbl_phaseAOmega = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.lbl_phaseAOmega.setFont(font)
self.lbl_phaseAOmega.setObjectName("lbl_phaseAOmega")
self.gridLayout.addWidget(self.lbl_phaseAOmega, 0, 2, 1, 1)
self.btn_update = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Arial Unicode MS")
font.setBold(True)
font.setWeight(75)
self.btn_update.setFont(font)
self.btn_update.setObjectName("btn_update")
self.gridLayout.addWidget(self.btn_update, 4, 5, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 536, 26))
self.menubar.setObjectName("menubar")
self.menu1 = QtWidgets.QMenu(self.menubar)
self.menu1.setObjectName("menu1")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.file_saveData = QtWidgets.QAction(MainWindow)
self.file_saveData.setObjectName("file_saveData")
self.help_Documentation = QtWidgets.QAction(MainWindow)
self.help_Documentation.setObjectName("help_Documentation")
self.help_About = QtWidgets.QAction(MainWindow)
self.help_About.setObjectName("help_About")
self.file_saveSetting = QtWidgets.QAction(MainWindow)
self.file_saveSetting.setObjectName("file_saveSetting")
self.menu1.addSeparator()
self.menu1.addAction(self.file_saveData)
self.menu1.addAction(self.file_saveSetting)
self.menuHelp.addAction(self.help_Documentation)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.help_About)
self.menubar.addAction(self.menu1.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.file_saveData.triggered.connect(self.test)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.ledt_phaseAMag, self.ledt_phaseAOmega)
MainWindow.setTabOrder(self.ledt_phaseAOmega, self.ledt_phaseAPhi)
MainWindow.setTabOrder(self.ledt_phaseAPhi, self.ledt_phaseBMag)
MainWindow.setTabOrder(self.ledt_phaseBMag, self.ledt_phaseBOmega)
MainWindow.setTabOrder(self.ledt_phaseBOmega, self.ledt_phaseBPhi)
MainWindow.setTabOrder(self.ledt_phaseBPhi, self.ledt_phaseCMag)
MainWindow.setTabOrder(self.ledt_phaseCMag, self.ledt_phaseCOmega)
MainWindow.setTabOrder(self.ledt_phaseCOmega, self.ledt_phaseCPhi)
MainWindow.setTabOrder(self.ledt_phaseCPhi, self.ledt_pllOmega)
MainWindow.setTabOrder(self.ledt_pllOmega, self.ledt_pllPhi)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.lbl_equations.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt; font-weight:600;\">Equations:</span></p><p><img src=\":/equations_/equations_all_inputs_150.png\"/></p></body></html>"))
self.lbl_units.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#ff0000;\">Units : angular freq. are in rad/s; phases are in rad, time in seconds</span></p></body></html>"))
self.lbl_phaseBMag.setText(_translate("MainWindow", "<html><head/><body><p>Mag<span style=\" vertical-align:sub;\">b</span><span style=\" font-style:normal;\"> : </span></p></body></html>"))
self.ledt_phaseAOmega.setToolTip(_translate("MainWindow", "The angular frequency of Phase-A input"))
self.lbl_phaseAPhi.setText(_translate("MainWindow", "<html><head/><body><p>φ<span style=\" vertical-align:sub;\">a</span><span style=\" font-style:normal;\"> :</span></p></body></html>"))
self.lbl_phaseAMag.setText(_translate("MainWindow", "<html><head/><body><p>Mag<span style=\" vertical-align:sub;\">a</span><span style=\" font-style:normal;\"> : </span></p></body></html>"))
self.ledt_phaseAMag.setToolTip(_translate("MainWindow", "The magnitude of Phase-A input"))
self.ledt_phaseAPhi.setToolTip(_translate("MainWindow", "The initial phase of Phase-A input"))
self.ledt_phaseCMag.setToolTip(_translate("MainWindow", "The magnitude of Phase-C input"))
self.ledt_phaseBPhi.setToolTip(_translate("MainWindow", "The initial phase of Phase-B input"))
self.lbl_phaseCMag.setText(_translate("MainWindow", "<html><head/><body><p>Mag<span style=\" vertical-align:sub;\">c</span><span style=\" font-style:normal;\"> : </span></p></body></html>"))
self.ledt_phaseCOmega.setToolTip(_translate("MainWindow", "The angular frequency of Phase-C input"))
self.lbl_phaseCOmega.setText(_translate("MainWindow", "<html><head/><body><p>ω<span style=\" vertical-align:sub;\">c</span><span style=\" font-style:normal;\"> :</span></p></body></html>"))
self.ledt_pllOmega.setToolTip(_translate("MainWindow", "The angular frequency of the PLL"))
self.lbl_pllOmega.setText(_translate("MainWindow", "<html><head/><body><p>ω<span style=\" vertical-align:sub;\">PLL</span><span style=\" font-style:normal;\"> :</span></p></body></html>"))
self.ledt_phaseCPhi.setToolTip(_translate("MainWindow", "The initial phase of Phase-C input"))
self.lbl_phaseCPhi.setText(_translate("MainWindow", "<html><head/><body><p>φ<span style=\" vertical-align:sub;\">c</span><span style=\" font-style:normal;\"> :</span></p></body></html>"))
self.ledt_pllPhi.setToolTip(_translate("MainWindow", "The initial phase of the PLL"))
self.ledt_time.setToolTip(_translate("MainWindow", "The total time"))
self.lbl_time.setText(_translate("MainWindow", "<html><head/><body><p>t<span style=\" font-style:normal;\"> : </span></p></body></html>"))
self.ledt_phaseBOmega.setToolTip(_translate("MainWindow", "The angular frequency of Phase-B input"))
self.lbl_phaseBPhi.setText(_translate("MainWindow", "<html><head/><body><p>φ<span style=\" vertical-align:sub;\">b</span><span style=\" font-style:normal;\"> :</span></p></body></html>"))
self.lbl_pllPhase.setText(_translate("MainWindow", "<html><head/><body><p>φ<span style=\" vertical-align:sub;\">PLL</span><span style=\" font-style:normal;\"> :</span></p></body></html>"))
self.lbl_phaseBOmega.setText(_translate("MainWindow", "<html><head/><body><p>ω<span style=\" vertical-align:sub;\">b</span><span style=\" font-style:normal;\"> :</span></p></body></html>"))
self.ledt_phaseBMag.setToolTip(_translate("MainWindow", "The magnitude of Phase-B input"))
self.lbl_phaseAOmega.setText(_translate("MainWindow", "<html><head/><body><p>ω<span style=\" vertical-align:sub;\">a</span><span style=\" font-style:normal;\"> :</span></p></body></html>"))
self.btn_update.setText(_translate("MainWindow", "Update"))
self.menu1.setTitle(_translate("MainWindow", "File"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.file_saveData.setText(_translate("MainWindow", "Save Data"))
self.help_Documentation.setText(_translate("MainWindow", "Documentation"))
self.help_About.setText(_translate("MainWindow", "About"))
self.file_saveSetting.setText(_translate("MainWindow", "Save Setting"))
def test(self):
print('You clicked!')
import equations
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1696392 | <reponame>ryosuke0825/atcoder_python
switch, light = map(int, input().split())
K = []
for _ in range(light):
k = list(map(int, input().split()))
K.append(k)
P = list(map(int, input().split()))
ans = 0
# bit全探索で全パターンをチェックする
for i in range(2**switch):
# 各電球につながっているスイッチが何個ONか
switch_on_list = [0]*light
# 各スイッチの状態チェック
for j in range(switch):
# スイッチが付いているか
if ((i >> j) & 1):
# スイッチがつながっているライトを+1
for k in range(light):
for l in range(1, K[k][0]+1):
if j == K[k][l]-1:
switch_on_list[k] += 1
# 電球が点灯する数のスイッチがONか
for j in range(light):
if switch_on_list[j] % 2 != P[j]:
break
else:
ans += 1
print(ans)
| StarcoderdataPython |
3331474 | # converts yaml configuration file to json file
# usage: python <yaml_input_filename> <json_output_filename>
# called by build_resources.sh script
import json
import sys
import yaml
from yaml_tools import Loader
yaml_file = sys.argv[1]
json_file = sys.argv[2]
Loader.add_constructor('!include', Loader.include)
with open(yaml_file, 'r') as yaml_in, open(json_file, "w") as json_out:
yaml_object = yaml.load(yaml_in, Loader=Loader)
json.dump(yaml_object, json_out)
| StarcoderdataPython |
3253084 | from .controller import Controller
from .view import View
from .module import Module
from abc import ABC
class Options(ABC):
Controller = Controller
View = View
Module = Module
| StarcoderdataPython |
1617796 | <reponame>collaborative-robotics/ABT
#!/usr/bin/python
#
#
# Revised to match fig BT-01164_Huge.png
# from BT-Hmm proposal May 18
import os as os
# b3 class modified by BH, local version in current dir
import b3 as b3 # behavior trees
import random as random
import math as m
import numpy as np
#import abt_constants
from abtclass import *
# BT and HMM parameters here
from model01 import *
def ABTtree(mod):
####################################################################################
##
# ABT for Peg-In-Hole Task
#
#
# Returns an ABT for the task to be modeled
#
demo_bt = b3.BehaviorTree()
LeafDebug = False
SolverDebug = False
#print outputs
#quit()
leafs = []
######## Step 1 Position Left Grasper over block
l1 = aug_leaf(1.0)
l1.Name = 'l1'
leafs.append(l1)
######## Step 2 Insert and Grasp block
# try 1
l2a1 = aug_leaf(0.9)
l2a1.Name = 'l2a1'
leafs.append(l2a1)
l2b1 = aug_leaf(0.95)
l2b1.Name = 'l2b1'
leafs.append(l2b1)
node_21 = b3.Sequence([l2a1,l2b1])
node_21.Name = 'Node 21'
# try 2
l2a2 = aug_leaf(0.9)
l2a2.Name = 'l2a2'
leafs.append(l2a2)
l2b2 = aug_leaf(0.95)
l2b2.Name = 'l2b2'
leafs.append(l2b2)
node_22 = b3.Sequence([l2a2,l2b2])
node_22.Name = 'Node 22'
node_2 = b3.Priority([node_21,node_22])
node_2.Name = 'Node 2'
########## Steps 3-5 Lift clear / reorient / move
l345 = aug_leaf(1.0)
l345.Name = 'l345'
leafs.append(l345)
########## Step 6 Insert Right grasper / grasp
# try 1
l6a1 = aug_leaf(0.6)
l6a1.Name = 'l6a1'
leafs.append(l6a1)
l6b1 = aug_leaf(0.75)
l6b1.Name = 'l6b1'
leafs.append(l6b1)
# try 2
l6a2 = aug_leaf(0.6)
l6a2.Name = 'l6a2'
leafs.append(l6a2)
l6b2 = aug_leaf(0.75)
l6b2.Name = 'l6b2'
leafs.append(l6b2)
node_61 = b3.Sequence([l6a1,l6b1])
node_62 = b3.Sequence([l6a2,l6b2])
node_6 = b3.Priority([node_61,node_62])
node_6.Name = "node 6"
######## Steps 7-9 Release Left / Reorient / Position
l789 = aug_leaf(1.0)
l789.Name = 'l789'
leafs.append(l789)
######## Step 10 Place on peg / Release / Clear
l10a1 = aug_leaf(0.9)
l10a1.Name = 'l10a1'
leafs.append(l10a1)
l10b1 = aug_leaf(0.95)
l10b1.Name = 'l10b1'
leafs.append(l10b1)
l10c1 = aug_leaf(0.8)
l10c1.Name = 'l10c1'
leafs.append(l10c1)
node_10 = b3.Sequence([l10a1,l10b1,l10c1])
node_10.Name = 'Node 10: Position/Release'
###### Top level sequence node
N1 = b3.Sequence([l1, node_2, l345, node_6, l789, node_10])
N1.Name = 'Sequencer Node'
N1.BHdebug = F
# make fake leafs for OutS and OutF
OS = aug_leaf(1.0)
OS.Name = 'OutS'
leafs.append(OS)
OF = aug_leaf(1.0)
OF.Name = 'OutF'
leafs.append(OF)
demo_bt.root = N1
demo_bt.root.ifroot = True # Implementation could be better
bb = b3.Blackboard()
##################################################################################################
## Set leaf params
# demo_bt.HMM_create() # Automated creation of ABT
# set up leaf probabilities
for l in leafs:
# output observeation mu, sigma
#print 'Setting Pobs for {:s} to ({:.2f},{:.2f})'.format(l.Name,outputs[l.Name],sig)
l.set_Obs_Density(mod.outputs[l.Name],sig)
# set up the Ps
#print 'setting PS for:', l.Name, PS[statenos[l.Name]]
l.set_Ps(mod.PS[mod.statenos[l.Name]])
#print ''
return [demo_bt, bb, leafs]
# demo_bt,bb = ABTtree()
# np.save("Original",A)
# np.save("Generated",demo_bt.htm)
| StarcoderdataPython |
125291 | import random
import re
import math
import numpy as np
from src import constants
from src.multi_agent.elements.camera import Camera, CameraRepresentation
from src.my_utils import constant_class
from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound
from src.my_utils.my_math.line import distance_btw_two_point, Line
from src.my_utils.string_operations import parse_list
class MobileCameraType:
"""
Camera types
dof = degree of freedom
1) FIX -- 1 dof beta
2) ROTATIVE -- 2 dof beta,alpha
3) RAIL -- 3 dof beta,alpha,(x,y)=f(s)
4) FREE -- 4 dof beta,alpha,x,y
"""
FIX = 0
ROTATIVE = 1
RAIL = 2
FREE = 3
class MobileCameraRepresentation(CameraRepresentation):
"""
Class MobileCameraRepresentation.
Description :
:param
8. (MobileCameraType) camera_type -- describe what feature the camera has
9. (Trajectory) trajectory -- only used for RAIL camera
:attibutes
8. (MobileCameraType) camera_type -- describe what feature the camera has
9. (Trajectory) trajectory -- only used for RAIL camera
"""
def __init__(self, id=None, xc=None, yc=None, alpha=None, beta=None, field_depth=None, type=None, color=None):
CameraRepresentation.__init__(self, id, xc, yc, alpha, beta, field_depth, color)
self.camera_type = type
self.trajectory = TrajectoryPlaner([])
def update_from_camera(self, camera):
super().update_from_camera(camera)
self.camera_type = camera.camera_type
self.trajectory = TrajectoryPlaner(camera.trajectory.trajectory)
class MobileCamera(Camera, MobileCameraRepresentation):
"""
Class MobileCameraRepresentation.
Description :
:param
:attibutes
"""
def __init__(self, id=None, xc=None, yc=None, alpha=None, beta=None, trajectory=None, field_depth=None, color=None,
t_add=None, t_del=None, type=None, vx_vy_min=None, vx_vy_max=None, v_alpha_min=None, v_alpha_max=None,
delta_beta=None, v_beta_min=None, v_beta_max=None):
Camera.__init__(self, id, xc, yc, alpha, beta, field_depth, color, t_add, t_del)
camera_attributes_not_to_txt = self.attributes_not_to_txt
MobileCameraRepresentation.__init__(self, id, xc, yc, alpha, beta, field_depth, type, color)
self.attributes_not_to_txt += [elem for elem in camera_attributes_not_to_txt if
elem not in self.attributes_not_to_txt]
self.attributes_not_to_txt += ["coeff_field", "coeff_std_position", "coeff_std_speed", "coeff_std_acc",
"swipe_angle_direction", "swipe_delta_alpha", "last_swipe_direction_change",
"dt_next_swipe_direction_change", "last_swipe_configuration",
"last_swipe_position_change","beta_min","beta_max"]
"""Limit the variation"""
self.vx_vy_min = vx_vy_min
self.vx_vy_max = vx_vy_max
self.v_alpha_min = v_alpha_min
self.v_alpha_max = v_alpha_max
self.v_beta_min = v_beta_min
self.v_beta_max = v_beta_max
self.delta_beta = delta_beta
"""Zoom"""
self.coeff_field = constants.COEFF_VARIATION_FROM_FIELD_DEPTH
self.coeff_std_position = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_POSITION
self.coeff_std_speed = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_SPEED
self.coeff_std_acc = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_ACCELERATION
"""Trajectory"""
self.trajectory = TrajectoryPlaner(trajectory)
"""Variables for the swipe"""
self.swipe_angle_direction = 1
self.swipe_delta_alpha = 0.2
self.last_swipe_direction_change = constants.get_time()
self.dt_next_swipe_direction_change = -10
self.last_swipe_position_change = -10
from src.multi_agent.tools.configuration import Configuration
self.last_swipe_configuration = Configuration(None, None, random.uniform(0, constants.ROOM_DIMENSION_X),
random.uniform(0, constants.ROOM_DIMENSION_Y), 1, 1,
self.field_depth,
False)
self.default_parameters()
def default_parameters(self):
"""Default option"""
if not self.camera_type == MobileCameraType.RAIL:
self.trajectory = TrajectoryPlaner([])
if self.camera_type == MobileCameraType.FIX or self.camera_type == MobileCameraType.ROTATIVE:
self.vx_vy_min = 0
self.vx_vy_max = 0
if self.camera_type == MobileCameraType.FIX:
self.v_alpha_min = 0
self.v_alpha_max = 0
if self.delta_beta is not None and self.beta is not None:
self.beta_min = bound_angle_btw_minus_pi_plus_pi(self.beta - self.delta_beta)
self.beta_max = bound_angle_btw_minus_pi_plus_pi(self.beta + self.delta_beta)
else:
self.beta_min = None
self.beta_max = None
def angle_degToRad(self):
"""
:description
Transforms angle attribues to radians supposing it is in degree
"""
super().angle_degToRad()
if self.delta_beta is not None:
self.delta_beta = math.radians(self.delta_beta)
if self.beta_min is not None:
self.beta_min = math.radians(self.beta_min)
if self.beta_max is not None:
self.beta_max = math.radians(self.beta_max)
self.v_alpha_min = math.radians(self.v_alpha_min)
self.v_alpha_max = math.radians(self.v_alpha_max)
self.v_beta_min = math.radians(self.v_beta_min)
self.v_beta_max = math.radians(self.v_beta_max)
def angle_radToDeg(self):
"""
:description
Transforms angle attribues to degrees supposing it is in radians
"""
super().angle_radToDeg()
if self.delta_beta is not None:
self.delta_beta = math.degrees(self.delta_beta)
if self.beta_min is not None:
self.beta_min = math.degrees(self.beta_min)
if self.beta_max is not None:
self.beta_max = math.degrees(self.beta_max)
self.v_alpha_min = math.degrees(self.v_alpha_min)
self.v_alpha_max = math.degrees(self.v_alpha_max)
self.v_beta_min = math.degrees(self.v_beta_min)
self.v_beta_max = math.degrees(self.v_beta_max)
def load_from_save_to_txt(self, s):
"""
:description
Load attributes for a txt string representation
:param
1. (string) s -- string description of the object, method save_to_txt.
"""
super().load_from_save_to_txt(s)
self.trajectory = TrajectoryPlaner(self.trajectory)
self.default_parameters()
def my_rand(self, bound):
"""
:description
Random function used in randomize
:param
1. ((int,int)) bound -- limit of the random variable that is created.
:return
1. (float) random value btw bound[0] and bound[1]
"""
return random.uniform(bound[0], bound[1])
def randomize(self, camera_type, beta_bound, delta_beta_bound, field_bound, v_xy_min_bound, v_xy_max_bound,
v_alpha_min_bound, v_alpha_max_bound, v_beta_min_bound, v_beta_max_bound):
"""
:description
Create a mobile camera with random
:param
1.(MobileCameraType) camera_type -- Camera type
2.((int,int))beta_bound - [degree] -- random bound of beta
3.((int,int))delta_beta_bound - [degree] -- random bound of delta_beta
4.((int,int))field_bound - [m] -- random bound of field_detph
5.((int,int))v_xx_min_bound -[m/s] -- random bound of v_min in x and y axis
6.((int,int))v_xy_max_bound -[m/s] -- random bound of v_max in x and y axis
7.((int,int))v_alpha_min_bound - [degree/s] -- random bound of alpha min
8.((int,int))v_alpha_max_bound - [degree/s] -- random bound of alpha max
9.((int,int))v_beta_min_bound - [degree/s] -- random bound of beta min
10((int,int))v_beta_ùax_bound - [degree/s] -- random bound of beta max
:return
set severals attributes to random values bounded btw parameters
"""
self.xc = self.my_rand((0, constants.ROOM_DIMENSION_X))
self.yc = self.my_rand((0, constants.ROOM_DIMENSION_Y))
self.alpha = bound_angle_btw_minus_pi_plus_pi(self.my_rand((-math.pi, math.pi)))
self.beta = self.my_rand(beta_bound)
self.delta_beta = self.my_rand(delta_beta_bound)
self.field_depth = self.my_rand(field_bound)
self.t_add = [0]
self.t_del = [1000]
self.vx_vy_min = self.my_rand(v_xy_min_bound)
self.vx_vy_max = self.my_rand(v_xy_max_bound)
self.v_alpha_min = self.my_rand(v_alpha_min_bound)
self.v_alpha_max = self.my_rand(v_alpha_max_bound)
self.v_beta_min = self.my_rand(v_beta_min_bound)
self.v_beta_max = self.my_rand(v_beta_max_bound)
self.trajectory = TrajectoryPlaner([])
self.camera_type = camera_type
"""Default values"""
self.set_default_values(xc=self.xc, yc=self.yc, alpha=self.alpha, beta=self.beta, field_depth=self.field_depth)
self.beta_min = self.beta - self.delta_beta
self.beta_max = self.beta + self.delta_beta
self.angle_degToRad()
def compute_field_depth_variation_for_a_new_beta(self, new_beta):
"""
:description
the field depth is inversaly propotional to beta
:param
1. (float) new_beta - [radians] -- new angle from the camera
:return
1. (float) field_depth - [m] -- field depth corresponding to the new beta
"""
delta = new_beta - self.beta
field_depth = self.field_depth - delta * self.coeff_field
field_depth = bound(field_depth, constants.AGENT_CAMERA_FIELD_MIN * self.default_field_depth,
constants.AGENT_CAMERA_FIELD_MAX * self.default_field_depth)
return field_depth
def zoom(self, speed, dt):
"""
:description
Modelize the zoom of a camera (modifies beta and field_depth)
effects :
zoom in / zoom out
1) on the field geometry:
a. Increase/decrease beta
b. Decrease/increase the field depth
2) on the precision
c. Decrease/increase the std on the measure
self.coeff_speed -- value > 0, defines the proportionality btw a. and b.
self.coeff_std -- value > 0, defines the proportionality btw a. and c.
:param
1. (float) speed -- going from -1 to 1, + to zoom out - to zoom
2. (float) dt -- time
"""
sign = np.sign(speed)
if self.beta_min <= self.beta <= self.beta_max:
if speed == 0:
delta = 0
else:
delta = sign * dt * (self.v_beta_min + math.fabs(speed) * (self.v_beta_max - self.v_beta_min))
elif self.beta < self.beta_min or self.beta_max > 0:
self.beta = bound(self.beta, self.beta_min, self.beta_max)
delta = 0
else:
delta = 0
print("problem in beta target")
self.field_depth = self.compute_field_depth_variation_for_a_new_beta(self.beta + delta)
self.beta += delta
if constants.ERROR_VARIATION_ZOOM:
self.std_measurement_error_position -= delta * self.coeff_std_position
self.std_measurement_error_speed -= delta * self.coeff_std_speed
self.std_measurement_error_acceleration -= delta * self.coeff_std_acc
self.std_measurement_error_position = bound(self.std_measurement_error_position, 0,
self.std_measurement_error_position * 10)
self.std_measurement_error_speed = bound(self.std_measurement_error_speed, 0,
self.std_measurement_error_speed * 10)
self.std_measurement_error_acceleration = bound(self.std_measurement_error_acceleration, 0,
self.std_measurement_error_acceleration * 10)
def rotate(self, speed, dt):
"""
:description
Rotate the camera in the room '(modifies angle alpha)
:param
1. (float) speed -- going from -1 to 1
2. (float) dt -- time
"""
if not self.camera_type == MobileCameraType.FIX:
sign = np.sign(speed)
if speed == 0:
delta = 0
else:
delta = sign * dt * (self.v_alpha_min + math.fabs(speed) * (self.v_alpha_max - self.v_alpha_min))
self.alpha += delta
self.alpha = bound_angle_btw_minus_pi_plus_pi(self.alpha)
def move(self, speed_x, speed_y, dt):
"""
:description
Move the camera in the room (modifies xc and yc)
:param
1. (float) speed_x -- going from -1 to 1
1. (float) speed_y -- going from -1 to 1
2. (float) dt -- time
"""
sign_x = np.sign(speed_x)
sign_y = np.sign(speed_y)
if speed_x == 0:
delta_x = 0
else:
delta_x = sign_x * dt * (self.vx_vy_min + math.fabs(speed_x) * (self.vx_vy_max - self.vx_vy_min))
if speed_y == 0:
delta_y = 0
else:
delta_y = sign_y * dt * (self.vx_vy_min + math.fabs(speed_y) * (self.vx_vy_max - self.vx_vy_min))
if self.camera_type == MobileCameraType.RAIL:
"On the rail it is only 1 dimension"
delta = delta_x
x_new, y_new = self.trajectory.move_on_trajectory(self.xc, self.yc, delta)
self.xc = x_new
self.yc = y_new
elif self.camera_type == MobileCameraType.FREE:
self.xc += delta_x
self.yc += delta_y
self.xc = bound(self.xc, self.xc_min, self.xc_max)
self.yc = bound(self.yc, self.yc_min, self.yc_max)
def set_configuration(self, configuration):
"""
:description
Set the parameters thanks to a configuration
:param
1. (Configuration) configuration -- group several parameters
"""
self.xc = configuration.x
self.yc = configuration.y
self.alpha = configuration.alpha
self.beta = configuration.beta
self.field_depth = configuration.field_depth
def get_edge_points_world_frame(self):
"""
:description
#TODO - petite description
"""
# angles of edge of field of view in cam frame
angle_min, angle_max = -self.beta / 2, self.beta / 2
# distance of depth field along these angles
min_edge = (self.field_depth * math.cos(angle_min), self.field_depth * math.sin(angle_min))
max_edge = (self.field_depth * math.sin(angle_max), self.field_depth * math.sin(angle_max))
min_edge_world_frame = self.coordinate_change_from_camera_frame_to_world_frame(min_edge[0], min_edge[1])
max_edge_world_frame = self.coordinate_change_from_camera_frame_to_world_frame(max_edge[0], max_edge[1])
return min_edge_world_frame, max_edge_world_frame
class TrajectoryPlaner:
"""
Class TrajectoryPlaner.
Description :
This class modelize the displacement from a camrea on a rail.
:param
1. (list[(float,float)]) trajectory - [m] -- List that contains the via points
:attibutes
1. (list[(float,float)]) trajectory - [m] -- List that contains the via points
2. (int) trajectory_index -- Current segment of the trajectory
3. (float) distance - [m] -- Distance travelled by the camera on the rail
(going forward increase the distance, going
backwards decrease the distance => [0,length])
"""
def __init__(self, trajectory):
self.trajectory = trajectory
self.trajectory_index = 0
self.distance = 0
def move_on_trajectory(self, x, y, delta):
"""
:description
x,y belong to the trajectory !!
:param
1. (float) x - [m] -- x coordinate of the point in world frame
2. (float) y - [m] -- y coordinate of the point in world frame
3. (float) delta - [m] -- distance to travel
:return
1. (float) x - [m] -- x moved new coordinate of the point in world frame
2. (float) y - [m] -- y moved new coordinate of the point in world frame
"""
if len(self.trajectory) > 1:
(xi, yi) = self.trajectory[self.trajectory_index]
(xf, yf) = self.trajectory[self.trajectory_index + 1]
(x_trajectory_frame, y_trajectory_frame) = self.from_world_frame_to_trajectory_frame(x, y)
(xf_trajectory_frame, yf_trajectory_frame) = self.from_world_frame_to_trajectory_frame(xf, yf)
"""Check to make the transformatio is ok, y shoud be 0 a the frame is place on the x axis"""
if y_trajectory_frame > 0.0001:
print("problème in move_on_trajectory y = %.2f", y_trajectory_frame)
"Variation"
self.distance += delta
x_trajectory_frame += delta
if x_trajectory_frame > xf_trajectory_frame:
"On the next segment"
if self.trajectory_index < len(self.trajectory) - 2:
"Changing to next segment"
self.trajectory_index += 1
delta_new = (x_trajectory_frame - xf_trajectory_frame)
return self.move_on_trajectory(xf, yf, delta_new)
else:
"Reaching the end point"
(self.distance, y) = self.compute_distance_for_point_x_y(xf, yf, self.trajectory_index)
return (xf, yf)
elif x_trajectory_frame < 0:
"On the previous segment"
if self.trajectory_index > 0:
"Changing to previous segment"
self.trajectory_index -= 1
delta_new = x_trajectory_frame
return self.move_on_trajectory(xi, yi, delta_new)
else:
"Reaching start point"
self.distance = 0
return (xi, yi)
else:
"The delta is on the same segment"
return self.from_trajectory_frame_to_world_frame(x_trajectory_frame, y_trajectory_frame)
else:
return x, y
def find_all_intersection(self, line):
all_possible_intersection = []
for index in range(len(self.trajectory) - 1):
(xi, yi) = self.trajectory[index]
(xf, yf) = self.trajectory[index + 1]
segment = Line(xi, yi, xf, yf)
x_intersection, y_intersection = segment.find_intersection_btw_two_line(line)
x_intersection_in_trajecotry_frame, y_intersection_in_trajectory_frame = self.from_world_frame_to_trajectory_frame_for_a_given_segment(
x_intersection, y_intersection, index)
xf_in_trajectory_frame, yf_in_trajectory_frame = self.from_world_frame_to_trajectory_frame_for_a_given_segment(
xf, yf, index)
if y_intersection_in_trajectory_frame > 0.001:
print(y_intersection)
print("problème")
elif 0 < x_intersection_in_trajecotry_frame < xf_in_trajectory_frame:
all_possible_intersection.append((x_intersection, y_intersection, index))
return all_possible_intersection
def find_closest_intersection(self, line, index):
if index < 0:
return (0, 0, 0)
elif index >= len(self.trajectory) - 1:
return (self.trajectory[-1][0], self.trajectory[-1][1], len(self.trajectory) - 1)
else:
(xi, yi) = self.trajectory[index]
(xf, yf) = self.trajectory[index + 1]
segment = Line(xi, yi, xf, yf)
x_intersection, y_intersection = segment.find_intersection_btw_two_line(line)
x_intersection_in_trajecotry_frame, y_intersection_in_trajectory_frame = self.from_world_frame_to_trajectory_frame_for_a_given_segment(
x_intersection, y_intersection, index)
xf_in_trajectory_frame, yf_in_trajectory_frame = self.from_world_frame_to_trajectory_frame_for_a_given_segment(
xf, yf, index)
if y_intersection_in_trajectory_frame > 0.001:
print("problème in find closest intersection")
return (None, None, None)
elif x_intersection_in_trajecotry_frame > xf_in_trajectory_frame or x_intersection is None:
return self.find_closest_intersection(line, index + 1)
elif x_intersection_in_trajecotry_frame < xi:
return self.find_closest_intersection(line, index - 1)
else:
return (x_intersection, y_intersection, index)
def get_angle(self):
(xi, yi) = self.trajectory[self.trajectory_index]
(xf, yf) = self.trajectory[self.trajectory_index + 1]
return math.atan2(yf - yi, xf - xi)
def rotate_angle(self, angle, x, y):
x_rotate = math.cos(angle) * x + math.sin(angle) * y
y_rotate = -math.sin(angle) * x + math.cos(angle) * y
return (x_rotate, y_rotate)
def from_world_frame_to_trajectory_frame(self, x, y):
(xi, yi) = self.trajectory[self.trajectory_index]
angle = self.get_angle()
x_no_offset = x - xi
y_no_offset = y - yi
return self.rotate_angle(angle, x_no_offset, y_no_offset)
def compute_distance_for_point_x_y(self, x, y, i_index):
sum = 0
for n in range(i_index):
(xi, yi) = self.trajectory[n]
(xf, yf) = self.trajectory[n + 1]
d = distance_btw_two_point(xi, yi, xf, yf)
sum += d
(xi, yi) = self.trajectory[i_index]
d = distance_btw_two_point(xi, yi, x, y)
sum += d
return sum, 0
def from_world_frame_to_trajectory_frame_for_a_given_segment(self, x, y, index):
(xi, yi) = self.trajectory[index]
(xf, yf) = self.trajectory[self.trajectory_index + 1]
angle = math.atan2(yf - yi, xf - xi)
x_no_offset = x - xi
y_no_offset = y - yi
return self.rotate_angle(angle, x_no_offset, y_no_offset)
def from_trajectory_frame_to_world_frame(self, x, y):
(xi, yi) = self.trajectory[self.trajectory_index]
angle = self.get_angle()
(x_rotate, y_rotate) = self.rotate_angle(-angle, x, y)
return (x_rotate + xi, y_rotate + yi)
def __str__(self):
return str(self.trajectory)
if __name__ == "__main__":
camera = MobileCameraRepresentation(0, 1, 1, 1, 1, 5, MobileCameraType.FIX, TrajectoryPlaner([]))
print(camera.attributes_to_string())
print(camera.save_to_txt())
camera = MobileCamera(delta_beta=20)
print(camera.attributes_to_string())
s = camera.save_to_txt()
print(s)
| StarcoderdataPython |
1671687 | from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten
from keras.models import Sequential
from mido import MidiFile, MidiTrack, Message
from keras.optimizers import Adam
from tensorflow.python.ops.init_ops import RandomNormal
from scripts.DataLoader import DataLoader
from scripts.GAN import GAN
import numpy as np
import datetime
class SimpleCnnGAN(GAN):
def __init__(self, dataloader: DataLoader = None, g_lr=0.001, g_beta=0.999, d_lr=0.001, d_beta=0.999, latent_dim=256,
content_shape=(128, 128, 1)):
GAN.__init__(self=self, data_generator=dataloader, name="simple-cnn-dcnn-GAN", latent_dim=latent_dim,
content_shape=content_shape)
self.generator = self.build_generator()
self.discriminator = self.build_discriminator(lr=d_lr, beta=d_beta)
self.combined = self.combined_model(lr=g_lr, beta=g_beta)
def build_generator(self):
model = Sequential()
# foundation for 8x8 image
n_nodes = 128 * 8 * 8
model.add(Dense(n_nodes, input_dim=self.latent_dim, kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Reshape((8, 8, 128)))
# upsample to 16X16
model.add(
Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
# upsample to 32x32
model.add(
Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
# upsample to 64x64
model.add(
Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
# upsample to 128x128
model.add(
Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(1, (7, 7), activation='sigmoid', padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
return model
def build_discriminator(self, lr=0.001, beta=0.999):
model = Sequential()
model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same', input_shape=self.content_shape,
kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Conv2D(64, (3, 3), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Conv2D(32, (3, 3), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid', kernel_initializer=RandomNormal(stddev=0.5)))
# compile model
opt = Adam(lr=lr, beta_1=lr)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def combined_model(self, lr=0.001, beta=0.999):
self.discriminator.trainable = False
model = Sequential()
model.add(self.generator)
model.add(self.discriminator)
opt = Adam(lr=lr, beta_1=beta)
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def generate_sample(self, epoch):
path = "../samples/%s_%s_epoch_%d.mid" % (datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"), self.name, epoch)
self.generate_sample_to(path=path)
def generate_sample_to(self, path):
generated = self.generator.predict(np.random.randn(1, self.latent_dim))
generated = generated.reshape(128, 128)
mid = MidiFile()
track = MidiTrack()
t = 0
for note in generated:
max_index = np.argmax(note)
msg = Message('note_on', note=max_index)
t = t + 1
msg.time = t
msg.velocity = 67
track.append(msg)
mid.tracks.append(track)
mid.save(path) | StarcoderdataPython |
4803039 | <reponame>hotpxl/minpy-jit<filename>minpy/segment.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import ast
import types
import inspect
from collections import OrderedDict
from functools import wraps, reduce
from mxnet import nd
from . import core
_segment_cnt = 0
_ndarray_funcs = nd.__dict__.values()
def segment_reform(function_ast, print_new_segment):
# CR(haoran): I feel this class definition is largly
# unnecessary. The functionality is quite specific and doesn't
# offer much generalization. Besides, try use `map` and `reduce`
# to generalize on functions instead of data structure, i.e. try
# to write in a functional fashion.
# XCR(yutian): The main reason to abstract this class out is I
# think it may be helpful when the program needs to walk through
# the ast nodes for collecting some information/doing
# computationsm, which relies on its children's result. I think
# this scenario may be common.
# XCR(haoran): "it may be helpful when the program needs to walk
# through the ast nodes" -> that's why we have ast.NodeVisitor and
# ast.NodeTransformer
# if you look at it more closely, you will find that it is not at
# all generic. InfoCollector's visit functions are not uniform;
# they depend on the exact type of node that is being visited. and
# this logic is particular to segmentation logic. a good rule of
# thumb is DRY (take it with a grain of salt though)
# this is why i propose the separation of rules (when can a node
# be fused)
# WHILE you are separating the logic of aggregation of rules but
# not the rules itself
# i think it also deals with problems mentioned below (ln 120 and
# 133). i'm still working on it. i'm trying to work from your
# existing rules and see if i can come up with a SOUND (but not
# COMPLETE) version. you might go ahead working on the codegen
# part with minjie in the mean time. at least the code runs
# smoothly now
# XCR(yutian): the last part of my comment "for collecting some
# information/doing computationsm, which relies on its children's
# result" -> this is the purpose for infohelper, which is originally
# written within visit_Node function.
# Its genericness/versatility is heavily related to the type-tracing
# result.
# And you're right that it seems poor given current type info.
# We would have better judgement on that once your changes are done.
# If still poor, I would remove it.
# For the point of "not the rules itself", I think it's possible
# to add more rules by making classes like 'NodeRewriterRuleA',
# 'NodeRewriterRuleB'.
# I agree the elegant solution is to support incremental fusion.
# Will think about that.
# XCR(haoran): it's genericness is NOT related to type-tracing
# results. this is why: the segment function consists of two
# parts. 1. marking expressions (and subexpressions) as
# fusable. 2. actually fuse statements together mind 1 only
# involves expressions and 2 only statements
# part 2 is your "fusing consecutive assignments" currently.
# now for part 1, we have to investigate expressions. there are
# two steps in this: determine the type of expr(call or attr or
# binop), and then do stuff (if it's a call then check for
# atomicity if it's binop then blablabla). two steps together is
# called a rule. there are many rules: BinOp says either one is
# ndarary, call says it must be atomic and so on. (i'm approaching
# the problem with this dimension).
# the problem is InfoHelper is doing step 2 and InfoCollector is
# doing step 1. that's why i'm saying you are separating logic at
# the wrong place/dimension. as a result, you still have to write
# visit_BLABLA for every expression in InfoCollector, but this is
# already done by ast.NodeVisitor
# anyways i find this discussion very helpful. none of this
# thinking was formed before
class InfoHelper():
def __init__(self,
name,
init_value,
get_default_value,
update_func=None,
rewrite_cond=None):
self._name = name
self.init_value = init_value
self._get_default_value = get_default_value
self._update_func = update_func
self._rewrite_cond = rewrite_cond
def set(self, node, value):
setattr(node, self._name, value)
def get(self, node):
return getattr(node, self._name, self._get_default_value)
def do_rewrite(self, node):
return self._rewrite_cond(self.get(node))
def update(self, *values):
return self._update_func(*values)
class InfoCollector(ast.NodeTransformer):
def __init__(self, info_helper, funcs=[]):
super(InfoCollector, self).__init__()
self._info_helper = info_helper
self._funcs = {func.__name__: func for func in funcs}
def _collect_info(self, node, attrs=[], funcs=[]):
self.generic_visit(node)
info = self._info_helper.init_value
for name in attrs:
child = getattr(node, name)
if isinstance(child, list):
info = reduce(
self._info_helper.update,
[info] + list(map(self._info_helper.get, child)))
else:
info = self._info_helper.update(
info, self._info_helper.get(child))
info = reduce(
self._info_helper.update, [info] +
list(map(lambda name: self._funcs[name](node), funcs)))
self._info_helper.set(node, info)
return node
def visit_FunctionDef(self, node):
self.generic_visit(node)
return node
def visit_If(self, node):
self.generic_visit(node)
return node
def visit_Assign(self, node):
return self._collect_info(node, attrs=['value'])
def visit_Expr(self, node):
return self._collect_info(node, attrs=['value'])
def visit_Call(self, node):
# CR(haoran): atomic functions could also take lists or
# dictionaries of ndarrays, or read-only objects. how do
# you deal with that?
# On the other hand, prevent stuff like `atomic(3 if
# some_flag else 2, ...)` from fusing
# I don't have a solution but i feel there is a simple
# solution
#
# XCR(yutian): It doesn't check the input list yet.
#
# I don't have a simple solution yet.
#
# List several questions come to my mind:
# - how to get the elements, and their types, of dict/list?
# - how to figure which elements of the list/dict
# are created inside the function?
# - let's assume we could get the function definition,
# how to handle the recursive function call?
# - how to do above things in a simple way?
return self._collect_info(
node, attrs=['args'], funcs=['is_atomic_func'])
def visit_BinOp(self, node):
# CR(haoran): incorrect? numpy.ndarray + integer_literal
# is also a valid fusable operation
# XCR(yutian): fixed
# XCR(haoran): this is incorrect either! The correct
# condition is: either or both sides is NDArray. Not
# including the case where both sides are numbers
# XCR(yutian): Take a = b + (c + d), where b is NDArray
# and c,d are numeric.
# For Binary Op, we might allow both-numeric-value case
# and add the NDArray checking at the very end, e.g.
# the type of right operand of assignment operation
# in this case.
# This final checkingis missing at present. I'll work
# on this.
return self._collect_info(
node, attrs=['left', 'right'], funcs=['is_ndarray_or_numeric'])
def visit_Name(self, node):
return self._collect_info(node, funcs=['is_ndarray_or_numeric'])
def visit_Num(self, node):
return self._collect_info(node)
def visit_Attribute(self, node):
# Treat an attribute expr as a whole
return self._collect_info(node, funcs=['is_ndarray_or_numeric'])
def visit_Subscript(self, node):
# Treat a subscript expr as a whole
return self._collect_info(node, funcs=['is_ndarray_or_numeric'])
class NodeRewriter(ast.NodeTransformer):
def __init__(self, info_helper):
super(NodeRewriter, self).__init__()
self._info_helper = info_helper
def fuse_consecutive_assign_and_expr(self, stmts):
def make_ast_call(func_name, ins, outs):
return ast.Assign(
targets=[
ast.Tuple(
elts=[
ast.Name(id=e, ctx=ast.Store()) for e in outs
],
ctx=ast.Store())
],
value=ast.Call(
func=ast.Name(id=func_name, ctx=ast.Load()),
args=[ast.Name(id=e, ctx=ast.Load()) for e in ins],
keywords=[]))
def make_ast_function_def(func_name, stmts, ins, outs):
return ast.FunctionDef(
name=func_name,
args=ast.arguments(
args=[ast.arg(arg=e, annotation=None) for e in ins],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[]),
body=[
*stmts,
ast.Return(value=ast.Tuple(
elts=[
ast.Name(id=e, ctx=ast.Load()) for e in outs
],
ctx=ast.Load()))
],
decorator_list=[],
returns=None)
def fuse(nodes):
ins, outs = infer_inputs_and_outputs_given_nodes(nodes)
global _segment_cnt
if print_new_segment:
print('Segment {} info: '.format(_segment_cnt))
print('\tinput list: ', ins)
print('\toutput list: ', outs)
for i, e in enumerate(nodes):
print('\t ast node {} {}'.format(i, type(e).__name__))
print('\n')
func_name = '_fuse_func_{}'.format(_segment_cnt)
_segment_cnt += 1
func_def = make_ast_function_def(func_name, nodes, ins, outs)
call_node = make_ast_call(func_name, ins, outs)
new_funcdefs.append(func_def)
return call_node
def get_consecutive_assign_and_expr(stmts):
pos, leng = (0, 0)
while pos < len(stmts):
if (isinstance(stmts[pos], ast.Assign)
or isinstance(stmts[pos], ast.Expr)
) and self._info_helper.do_rewrite(stmts[pos]):
leng += 1
else:
if leng > 0:
yield (pos - leng, leng)
leng = 0
pos += 1
if leng > 0:
yield (pos - leng, leng)
removed_num = 0
for (st, leng) in get_consecutive_assign_and_expr(stmts):
st -= removed_num
stmts[st] = fuse(stmts[st:st + leng])
removed_num += leng - 1
del stmts[st + 1:st + leng]
def visit_FunctionDef(self, node):
if not jit_helper.get(node):
return node
self.generic_visit(node)
self.fuse_consecutive_assign_and_expr(node.body)
return node
def visit_If(self, node):
self.generic_visit(node)
self.fuse_consecutive_assign_and_expr(node.body)
self.fuse_consecutive_assign_and_expr(node.orelse)
return node
def is_ndarray_or_numeric(node):
return hasattr(node, 'type') and issubclass(node.type,
(nd.NDArray, int, float))
def is_atomic_func(node):
if hasattr(node, 'ref') and hasattr(node.ref, '__dict__'):
return node.ref.__dict__.get('__minpy_atomic',
False) or node.ref in _ndarray_funcs
else:
return False
fuse_helper = InfoHelper('fuse_as_whole', True, False, lambda x, y: x & y,
lambda x: x)
jit_helper = InfoHelper('jit_func', True, False)
# CR(haoran): to my understanding, you need two kinds of information
# 1. recursive call of "fusability". this is basically what
# "fuse_helper" is doing right now. this acts on expressions,
# because only expressions can nest in itself
# 2. gather input and output arguments. this is what Rewriter is doing IIRC.
# one implication is, this information is only gathered from
# statements. BECAUSE, only assign statements can change the
# environment/scope/$$\Gamma$$
#
# of course after two steps you then need to find consecutive
# STATEMENTS and merge them together (ps here is bug, i mentioned
# down in the old code)
#
# So my proposition is, instead of abstracting data structure into
# InfoCollector and Helper, write the NodeTransformer or Visitor
# as is and just hard code the logic. NodeTransformer/Visitor
# itself is already using visitor pattern so should largely handle
# the plumbing
collector = InfoCollector(
fuse_helper, funcs=[is_ndarray_or_numeric, is_atomic_func])
collector.generic_visit(function_ast)
rewriter = NodeRewriter(fuse_helper)
new_funcdefs = []
jit_helper.set(function_ast.body[0], jit_helper.init_value)
rewriter.generic_visit(function_ast)
function_ast.body[0].body[0:0] = new_funcdefs
return function_ast
def segment(function_ast, print_new_segment):
"""Segment a function ast given its information collected in the runtime
Parameters
------
function_ast: function Ast
print_new_segment: print new segments if True
"""
class AstTypeHelper:
"""The helper class that categorizes the AST classes by purposes"""
always_segment_types = (
# Module, Function, Class Related
ast.Module,
ast.FunctionDef,
ast.AsyncFunctionDef,
ast.Lambda,
ast.arguments,
ast.ClassDef,
# Control Flow
ast.IfExp,
ast.Return,
ast.Delete,
ast.For,
ast.AsyncFor,
ast.While,
ast.If,
# Special Ops
ast.With,
ast.AsyncWith,
ast.Raise,
ast.Try,
ast.Assert,
ast.Import,
ast.ImportFrom,
ast.keyword,
# More Special Ops
ast.Global,
ast.Nonlocal,
ast.Expr,
ast.Pass,
ast.Break,
ast.Continue,
ast.Str)
# Check its or the computed result's type
maybe_segment_types = (ast.Name, ast.BinOp, ast.UnaryOp, ast.Compare,
ast.BoolOp, ast.Attribute, ast.Subscript)
# Types that are not doing any checking:
never_segment_types = (
# Assignment
ast.Assign,
ast.AugAssign,
# Basic Data Structure
ast.List,
ast.Tuple,
ast.Dict,
ast.Set,
ast.Num,
# Context Related Function
ast.Load,
ast.Store,
# Operators that are covered by BinOp and UnaryOp
ast.operator,
ast.boolop,
ast.unaryop,
ast.cmpop,
# arg
ast.arg)
# TODO: handle fuse of expression
never_fuse_types = (ast.arg, ast.Name, ast.expr, ast.expr_context,
ast.operator, ast.cmpop)
@classmethod
def fuse_check(cls, node):
if getattr(node, 'fused', False):
return False
if isinstance(node, cls.always_segment_types):
return False
if isinstance(node, ast.Call):
return is_atomic_func(node)
if isinstance(node, cls.maybe_segment_types):
return is_ndarray_type(node)
if isinstance(node, cls.never_segment_types):
return True
raise TypeError('Type {} not handled yet in fuse check'.format(
type(node).__name__))
def is_ndarray_type(node):
return hasattr(node, 'type') and issubclass(node.type, nd.NDArray)
def is_atomic_func(node):
if hasattr(node, 'ref') and hasattr(node.ref, '__dict__'):
return node.ref.__dict__.get(
'__minpy_atomic', False) or node.ref in nd.__dict__.values()
else:
return False
def make_fuse_func_def(func_name, statements, ins, outs):
return ast.FunctionDef(
name=func_name,
args=ast.arguments(
args=[ast.arg(arg=e, annotation=None) for e in ins],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[]),
body=[
*statements,
ast.Return(value=ast.Tuple(
elts=[ast.Name(id=e, ctx=ast.Load()) for e in outs],
ctx=ast.Load()))
],
decorator_list=[],
returns=None)
def make_call(func_name, ins, outs):
return ast.Assign(
targets=[
ast.Tuple(
elts=[ast.Name(id=e, ctx=ast.Store()) for e in outs],
ctx=ast.Store())
],
value=ast.Call(
func=ast.Name(id=func_name, ctx=ast.Load()),
args=[ast.Name(id=e, ctx=ast.Load()) for e in ins],
keywords=[]))
new_funcdefs = []
def fuse(nodes):
"""Fuse the node or the list of nodes
Parameters
------
nodes: the list of ast nodes
The expression could be re-writen to 'run_segment(inputs)'
The assignment statement should kept its outputs 'outputs = run_segments(inputs)'
"""
# Do nothing on unit op
if len(nodes) == 1 and isinstance(nodes[0],
AstTypeHelper.never_fuse_types):
return nodes[0]
ins, outs = infer_inputs_and_outputs_given_nodes(nodes)
global _segment_cnt
if print_new_segment:
print('Segment {} info: '.format(_segment_cnt))
print('\tinput list: ', ins)
print('\toutput list: ', outs)
for i, e in enumerate(nodes):
print('\t ast node {} {}'.format(i, type(e).__name__))
print('\n')
func_name = '_fuse_func_{}'.format(_segment_cnt)
_segment_cnt += 1
# TODO: handle subscript and attribute opertion
func_def = make_fuse_func_def(func_name, nodes, ins, outs)
call_node = make_call(func_name, ins, outs)
func_def.fused = True
call_node.fused = True
new_funcdefs.append(func_def)
return call_node
def get_consecutive_assignments(values, signs):
pos, leng = (0, 0)
while pos < len(values):
if isinstance(values[pos], ast.Assign) and signs[pos]:
leng += 1
else:
if leng > 0:
yield (pos - leng, leng)
leng = 0
pos += 1
if leng > 0:
yield (pos - leng, leng)
def iterate_and_fuse(node):
"""
Iterate over the AST by DFS and fuse the expr/stmt
Parameters
------
node
The ast node
Returns
------
bool
True, if all the children nodes can be fused. And fusion is done by some of its ancestor nodes
False, otherwise
"""
if getattr(node, 'fused', False):
return False
atom_signs = {}
fuse_entire_node = True
for name, value in ast.iter_fields(node):
if isinstance(value, ast.AST):
# ad-hoc: skip func attr of ast.Call,
# which could be an ast.Name with the function type
if isinstance(node, ast.Call) and name == 'func':
atom_signs[name] = False
continue
atom_signs[name] = iterate_and_fuse(value)
fuse_entire_node &= atom_signs[name]
elif isinstance(value, list):
atom_signs[name] = []
# ad-hoc: left operand of assign has no type
if isinstance(node, ast.Assign) and name == 'targets':
atom_signs[name] = [False] * len(value)
continue
for i, e in enumerate(value):
if isinstance(e, ast.AST):
atom_signs[name].append(iterate_and_fuse(e))
fuse_entire_node &= atom_signs[name][i]
if fuse_entire_node:
fuse_entire_node &= AstTypeHelper.fuse_check(node)
# If all child nodes are atomic and the operation itself is good, then
# leave it to its parent
if fuse_entire_node:
return True
# Rule 1: fuse consecutive atomic asssign statements in the body
for attr in ['body', 'orelse']:
if hasattr(node, attr):
values = getattr(node, attr)
signs = atom_signs[attr]
removed_num = 0
for (st, leng) in get_consecutive_assignments(values, signs):
st -= removed_num
values[st] = fuse(values[st:st + leng])
signs[st] = False
removed_num += leng - 1
del values[st + 1:st + leng]
del signs[st + 1:st + leng]
# CR(haoran): seems you are compiling atomic functions
# individually. Consider this case:
#
# a = a + 1 assignment
# atomic_fn(a) atomic call
# a = a + 1 assignment
#
# Would you be able to fuse all three together?
# XCR(yutian): i thnk atomic call could be handled if the definition of rule 1 is extended,
# i.e. fuse consecutive atomic assignments/expressions
# XCR(haoran): tested. doesn't work.
# i feel like you could separate treatment of stmt and expr
# refer to the AST documentation page of difference between two
# expr is almost always a nested structure and can be dealt with easily using recursion
# stmt is only used at top-level (since we are only considering one level of function definition)
# write function that handle a couple of cases of stmt, and then write another one to
# handle expr cases
# XCR(yutian): Missing visit_Expr caused the bug. Already fixed.
for name, value in ast.iter_fields(node):
if isinstance(value, ast.AST) and (atom_signs[name]):
new_value = fuse([value])
setattr(node, name, new_value)
elif isinstance(value, list):
new_values = []
for i, e in enumerate(value):
if isinstance(e, ast.AST) and atom_signs[name][i]:
e = fuse([e])
new_values.append(e)
value[:] = new_values
return False
if iterate_and_fuse(function_ast):
function_ast = fuse([function_ast])
# Insert new fuse function definitions to function body
function_ast.body[0].body[0:0] = new_funcdefs
return function_ast
def infer_inputs_and_outputs_given_nodes(nodes):
"""Given a/a list of ast-node, infer the input and output variables
Parameters
----------
nodes: a single node or a lsit of nodes
Returns
-------
ins: the input variable names
outs: the output variable names
"""
def infer_inputs_and_outputs_given_node(node):
"""Given a ast-node, infer the input and output variables
The node should be either assign-statement or expression
Parameters
----------
node: a single node
Returns
-------
ins: the input variable names
outs: the output variable names
"""
if isinstance(node, ast.Assign):
# get inputs from its value expression
ins, _ = infer_inputs_and_outputs_given_node(node.value)
# treat all the targets as outputs
outs = collect_names_given_exprs(node.targets)
return ins, outs
elif isinstance(node, (ast.expr, ast.Expr)):
return collect_names_given_exprs(node), set([])
else:
raise TypeError(
'Type {} not handled yet in inputs and outputs inference'.
format(type(node).__name__))
def collect_names_given_exprs(expr):
"""Given a ast-node, infer the input variables
As expression cannot define a new variable, output is not inferred
Parameters
----------
expr: an expression
Returns
-------
ins: the input variable names
TODO:
- handle the slice object
- need to know the actual type of the left operand of attribute
- if it's module or class, then return []
"""
if isinstance(expr, list):
return set.union(*map(collect_names_given_exprs,
expr)) if expr else set()
elif isinstance(expr, ast.Call):
return collect_names_given_exprs(expr.args)
elif isinstance(expr, ast.Expr):
return collect_names_given_exprs(expr.value)
elif isinstance(expr, ast.BinOp):
return collect_names_given_exprs([expr.left, expr.right])
elif isinstance(expr, ast.UnaryOp):
return collect_names_given_exprs(expr.operand)
elif isinstance(expr, ast.Tuple):
return collect_names_given_exprs(expr.elts)
elif isinstance(expr, ast.Attribute):
# Assumption: left operand is a Name
assert isinstance(expr.expr, ast.Name)
return set([expr.expr.id + "." + expr.attr])
elif isinstance(expr, ast.Subscript):
# Assumption: left operand is a Name
assert isinstance(expr.expr, ast.Name)
return set([expr.expr.id + "_subscript_"])
elif isinstance(expr, ast.Name):
return set([expr.id])
elif isinstance(expr, (ast.Num, ast.Str, ast.Bytes)):
return set()
raise TypeError('{} not handled yet in inference of inputs'.format(
type(expr).__name__))
if isinstance(nodes, list):
ins = []
outs = []
for node in nodes:
sub_ins, sub_outs = infer_inputs_and_outputs_given_node(node)
ins += [x for x in sub_ins if x not in outs]
outs += sub_outs
return [e for e in (set(ins))], [e for e in set(outs)]
else:
return infer_inputs_and_outputs_given_node(nodes)
| StarcoderdataPython |
149239 | <reponame>ma-compbio/MATCHA<gh_stars>10-100
import os
import time
import numpy as np
import networkx as nx
import random
from tqdm import tqdm
import torch
from concurrent.futures import as_completed, ProcessPoolExecutor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_ids = [0, 1]
class Graph():
def __init__(self, nx_G, p, q, is_directed=False):
self.G = nx_G
self.is_directed = is_directed
self.p = p
self.q = q
self.neighbors = []
print("initialization")
for i in range(len(nx_G.nodes())
): # actualy nx_G.nodes() is already increasing order
self.neighbors.append(sorted(nx_G.neighbors(i)))
self.degree = np.zeros((len(nx_G.nodes())))
for i in range(len(nx_G.nodes())):
self.degree[i] = np.sum([nx_G[i][nbr]['weight']
for nbr in self.neighbors[i]])
print(self.degree)
def get_alias_edge(src, dst):
'''
Get the alias edge setup lists for a given edge.
'''
global sG
G = sG.G
p = sG.p
q = sG.q
unnormalized_probs = []
for dst_nbr in sG.neighbors[dst]:
if dst_nbr == src:
unnormalized_probs.append(
(G[dst][dst_nbr]['weight'] / p) / np.sqrt(sG.degree[dst_nbr]))
# unnormalized_probs.append((G[dst][dst_nbr]['weight'] / p))
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(
(G[dst][dst_nbr]['weight']) /
np.sqrt(
sG.degree[dst_nbr]))
# unnormalized_probs.append((G[dst][dst_nbr]['weight']))
else:
unnormalized_probs.append(
(G[dst][dst_nbr]['weight'] / q) / np.sqrt(sG.degree[dst_nbr]))
# unnormalized_probs.append((G[dst][dst_nbr]['weight'] / q))
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob) /
norm_const for u_prob in unnormalized_probs]
return alias_setup(normalized_probs)
def alias_some_edges(edges):
alias_edges = {}
for edge in tqdm(edges):
alias_edges[(edge[0], edge[1])] = get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = get_alias_edge(edge[1], edge[0])
return alias_edges
def preprocess_transition_probs(sg):
'''
Preprocessing of transition probabilities for guiding the random walks.
'''
global sG
sG = sg
G = sG.G
is_directed = sG.is_directed
print("transition probs: ")
alias_nodes = {}
for node in tqdm(G.nodes()):
unnormalized_probs = [
G[node][nbr]['weight'] /
np.sqrt(
sG.degree[nbr]) for nbr in sG.neighbors[node]]
# unnormalized_probs = [G[node][nbr]['weight'] for nbr in sG.neighbors[node]]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) /
norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = alias_setup(normalized_probs)
triads = {}
# Parallel alias edges
print("alias edges: ")
edges = G.edges()
threads_num = 100
pool = ProcessPoolExecutor(max_workers=threads_num)
process_list = []
edges = np.array_split(edges, threads_num * 2)
for e in edges:
process_list.append(pool.submit(alias_some_edges, e))
alias_edges = {}
for p in as_completed(process_list):
alias_t = p.result()
alias_edges.update(alias_t)
pool.shutdown(wait=True)
sG.alias_nodes = alias_nodes
sG.alias_edges = alias_edges
def alias_setup(probs):
'''
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
'''
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K * prob
if q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
J[small] = large
q[large] = q[large] + q[small] - 1.0
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
'''
Draw sample from a non-uniform discrete distribution using alias sampling.
'''
K = len(J)
kk = int(np.floor(np.random.rand() * K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
def add_weight(G, u, v):
if 'weight' not in G[u][v]:
G[u][v]['weight'] = 1
else:
G[u][v]['weight'] += 1
def node2vec_walk(sG, walk_length, start_node):
'''
Simulate a random walk starting from start node.
'''
alias_nodes = sG.alias_nodes
alias_edges = sG.alias_edges
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = sG.neighbors[cur]
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(cur_nbrs[alias_draw(
alias_nodes[cur][0], alias_nodes[cur][1])])
else:
prev = walk[-2]
next_n = cur_nbrs[alias_draw(alias_edges[(prev, cur)][0],
alias_edges[(prev, cur)][1])]
walk.append(next_n)
else:
walk.append(cur)
continue
return walk
def simulate_walks(sG, num_walks, walk_length):
'''
Repeatedly simulate random walks from each node.
'''
print("sample walks:")
walks = []
nodes = sG.G.nodes()
for node in tqdm(nodes):
for walk_iter in range(num_walks):
temp = node2vec_walk(sG, walk_length, node)
if len(temp) == walk_length:
walks.append(temp)
random.shuffle(walks)
return walks
def read_graph(num, hyperedge_list):
'''
Transfer the hyperedge to pairwise edge & Reads the input network in networkx.
'''
G = nx.Graph()
tot = sum(num)
G.add_nodes_from(range(tot))
for ee in tqdm(hyperedge_list):
e = ee
edges_to_add = []
for i in range(len(e)):
for j in range(i + 1, len(e)):
edges_to_add.append((e[i], e[j]))
G.add_edges_from(edges_to_add)
for i in range(len(e)):
for j in range(i + 1, len(e)):
add_weight(G, e[i], e[j])
G = G.to_undirected()
return G
def toint(hyperedge_list):
return np.array([h.astype('int') for h in hyperedge_list])
def random_walk(args, num, hyperedge_list):
'''
Learn embeddings by optimizing the Skipgram objective using SGD.
'''
# p, q = 1, 1
# num_walks, walk_length, window_size = 10, 80, 10
hyperedge_list = toint(hyperedge_list)
p, q = args.p, args.q
num_walks, walk_length, window_size = args.num_walks, args.walk_length, args.window_size
# emb_save_path = '../embs/{}/p{}_q{}_r{}_l{}_k{}_i{}.embs'.format(args.data, p, q, num_walks, walk_length, window_size, iteration)
if not os.path.exists("../walks/{}/".format(args.data)):
os.mkdir("../walks/{}/".format(args.data))
walks_save_path = '../walks/{}/p{}_q{}_r{}_l{}_walks.txt'.format(
args.data, p, q, num_walks, walk_length)
start = time.time()
if not args.TRY and os.path.exists(walks_save_path):
return walks_save_path
else:
nx_G = read_graph(num.numpy(), hyperedge_list)
G = Graph(nx_G, p, q)
preprocess_transition_probs(G)
walks = simulate_walks(G, num_walks, walk_length)
walks = np.array(walks)
print(walks.shape)
np.savetxt(walks_save_path, walks, fmt="%d", delimiter=" ")
#np.save(walks_save_path, walks)
print("RandomWalk running time: %.2lf" % (time.time() - start))
return walks_save_path
| StarcoderdataPython |
54388 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 19 08:43:36 2020
@author: HP
"""
#dataFrameName[column_name].dtype
import statistics
import pandas as pd
import numpy as np
import random
#dataset=pd.read_csv(r"C:\Users\HP\Desktop\pp.csv")
def missing(input_file):
dataset=pd.read_csv(input_file)
#row_count=dataset.shape[0]
column_count=dataset.shape[1]
column_type=dataset.dtypes
print(dataset.head(10))
#print(column_type)
for i in range(0,column_count) :
# Check if the dataset has object type column
if(column_type[i]=='object'):
#Check if the values in the dataset are overall Null or not
if(dataset.iloc[:,i].isnull().sum()):
list1=dataset.iloc[:,i].value_counts()
#print("list having frequency count")
#print(list1)
#Cg=heck if the freq count list has a mode
if(len(set(list1))!=1):
mode=statistics.mode(dataset.iloc[:,i])
#df1['weight'] = df1['weight'].fillna(0)
dataset.iloc[:,i]=dataset.iloc[:,i].fillna(mode)
#If the freq count has no mode then place a random value
else:
#data_listt=pd.notnull(dataset.iloc[:,i])
#try to get the list for non null values
data_list = list(dataset.iloc[:,i])
#print(data_list)
data_list = [x for x in data_list if pd.notnull(x)]
random_value=random.choice(data_list)
#print(data_list)
#print(random_value)
dataset.iloc[:,i]=dataset.iloc[:,i].fillna(random_value)
else:
mean=np.mean(dataset.iloc[:,i])
dataset.iloc[:,i]=dataset.iloc[:,i].fillna(mean)
dataset.iloc[:,i]=round(dataset.iloc[:,i],1)
print(dataset.head(10))
print("missing data has been settled")
import sys
def main():
dataset=pd.read_csv(sys.argv[1]).values
missing(dataset)
if __name__=="__main__":
main() | StarcoderdataPython |
1798456 | <gh_stars>0
from yql import YQL
from yql import Filter
f1 = Filter('symbol', 'in', ['YHOO', 'GOOG', 'AAPL'])
f2 = Filter('symbol', 'eq', 'YHOO')
f3 = Filter('startDate', 'eq', '2014-02-11')
f4 = Filter('endDate', 'eq', '2014-02-18')
f1_expected = 'symbol IN ("YHOO", "GOOG", "AAPL")'
f2_expected = 'symbol = "YHOO"'
f3_expected = 'startDate = "2014-02-11"'
f4_expected = 'endDate = "2014-02-18"'
assert str(f1) == f1_expected
assert str(f2) == f2_expected
assert str(f1 + f2) == '{} and {}'.format(f1_expected, f2_expected)
assert str(f1 * f2) == '{} or {}'.format(f1_expected, f2_expected)
assert str(f1 + f2 + f1 + f2) == '{} and {} and {} and {}'.format(
f1_expected, f2_expected, f1_expected, f2_expected)
assert str(f1 + f2 * f1) == '{} and {} or {}'.format(
f1_expected, f2_expected, f1_expected)
assert str(f2 + f3 + f4) == '{} and {} and {}'.format(
f2_expected, f3_expected, f4_expected)
resp1 = YQL('yahoo.finance.quote').select().where(f1).run()
resp2 = YQL('yahoo.finance.historicaldata').select().where(str(f2+f3+f4)).run()
assert resp1['query']['count'] == 3
assert resp2['query']['count'] == 5
| StarcoderdataPython |
145886 | # SdsViewProperty.py
#
# Copyright (C) 2018 OSIsoft, LLC. All rights reserved.
#
# THIS SOFTWARE CONTAINS CONFIDENTIAL INFORMATION AND TRADE SECRETS OF
# OSIsoft, LLC. USE, DISCLOSURE, OR REPRODUCTION IS PROHIBITED WITHOUT
# THE PRIOR EXPRESS WRITTEN PERMISSION OF OSIsoft, LLC.
#
# RESTRICTED RIGHTS LEGEND
# Use, duplication, or disclosure by the Government is subject to restrictions
# as set forth in subparagraph (c)(1)(ii) of the Rights in Technical Data and
# Computer Software clause at DFARS 252.227.7013
#
# OSIsoft, LLC
# 1600 Alvarado St, San Leandro, CA 94577
import json
import SdsView
class SdsViewProperty(object):
"""Sds View Property definition"""
@property
def SourceId(self):
return self.__sourceId
@SourceId.setter
def SourceId(self, id):
self.__sourceId = id
@property
def TargetId(self):
return self.__targetId
@TargetId.setter
def TargetId(self, name):
self.__targetId = name
@property
def SdsView(self):
return self.__sdsView
@SdsView.setter
def SdsView(self, description):
self.__sdsView = description
def toJson(self):
return json.dumps(self.toDictionary())
def toDictionary(self):
# required properties
dictionary = { 'SourceId' : self.SourceId, 'TargetId' : self.TargetId }
if hasattr(self, 'SdsView'):
dictionary['SdsView'] = self.SdsView.toDictionary()
return dictionary
@staticmethod
def fromJson(jsonObj):
return SdsViewProperty.fromDictionary(jsonObj)
@staticmethod
def fromDictionary(content):
viewProperty = SdsViewProperty()
if len(content) == 0:
return viewProperty
if 'SourceId' in content:
viewProperty.SourceId = content['SourceId']
if 'TargetId' in content:
viewProperty.TargetId = content['TargetId']
if 'SdsView' in content:
viewProperty.SdsView = SdsView.fromDictionary(content['SdsView'])
return viewProperty
| StarcoderdataPython |
18833 | <filename>src/fireo/utils/utils.py<gh_stars>0
import re
from google.cloud import firestore
def collection_name(model):
return re.sub('(?!^)([A-Z]+)', r'_\1', model).lower()
def ref_path(key):
return key.split('/')
def collection_path(key):
return '/'.join(key.split('/')[:-1])
def get_parent(key):
return collection_path(key)
def get_parent_doc(key):
return '/'.join(key.split('/')[:-2])
def get_id(key):
try:
return key.split('/')[-1]
except AttributeError:
return None
def GeoPoint(latitude: float, longitude: float):
return firestore.GeoPoint(latitude, longitude)
def get_nested(dict, *args):
if args and dict:
element = args[0]
if element:
value = dict.get(element)
return value if len(args) == 1 else get_nested(value, *args[1:]) | StarcoderdataPython |
1793255 | # Script written by <NAME>
# Last Update: November 23, 2020
# License: MIT
from pathlib import Path
import logging
import os
import subprocess
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import requests
log = logging.getLogger()
esp32 = "http://board_ip"
# Original Source: https://www.peterbe.com/plog/best-practice-with-retries-with-requests
# Pulled function to handle connection errors/timeouts
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# Get the matching process
p1 = subprocess.Popen(['ps', 'x'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "-i", "zoom.us"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(
["grep", "-E", "\-key [0-9]{10,10}"], stdin=p2.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
output = p3.communicate()[0]
if output:
args = output.decode().split()
for item in args:
if item == "-pid":
pid = args[args.index(item)-len(args) + 1]
# Attempt to open the crashlog corresponding to the PID of the process
logfile = Path.joinpath(Path.home(), "Library",
"Logs", "zoom.us", "crashlog", f"{pid}.log")
if os.path.exists(logfile):
meeting_id = "unknown"
logdata = open(logfile, 'r').readlines()
logdata.reverse()
# Parse through the log and find the most recent meeting-id
for line in logdata:
try:
key, value = line.split(":", 1)
if key == "meeting-id":
meeting_id = value
break
except ValueError:
pass
print(f"Zoom Meeting # {value}")
else:
# If the log doesn't exist, just use the key
code = output.split()[-1].decode()
print("Zoom Meeting # ", str(code))
try:
requests_retry_session().get(f"{esp32}/meeting/on")
except Exception as e:
log.error(f"Unable to set meeting status as on. Reason was {e}")
else:
try:
requests_retry_session().get(f"{esp32}/meeting/off")
except Exception as e:
log.error(f"Unable to set meeting status as off. Reason was {e}")
print("Avail.")
| StarcoderdataPython |
1683620 | """
3D plotting
============
Demo 3D plotting with matplotlib and style the figure.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
ax = plt.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contourf(X, Y, Z)
ax.clabel(cset, fontsize=9, inline=1)
plt.xticks(())
plt.yticks(())
ax.set_zticks(())
ax.text2D(-0.05, 1.05, " 3D plots \n\n",
horizontalalignment='left',
verticalalignment='top',
family='Lint McCree Intl BB',
size='x-large',
bbox=dict(facecolor='white', alpha=1.0, width=350,height=60),
transform=plt.gca().transAxes)
ax.text2D(-0.05, .975, " Plot 2D or 3D data",
horizontalalignment='left',
verticalalignment='top',
family='Lint McCree Intl BB',
size='medium',
transform=plt.gca().transAxes)
plt.show()
| StarcoderdataPython |
1628646 | import pymysql
from threading import Thread
from sys import exit
class Database(object):
def __init__(self, user, password, database):
try:
self.db = pymysql.connect (
host="127.0.0.1",
port=3306,
user=user,
password=password,
db=database
)
self.cursor = self.db.cursor()
except Exception as e:
print("Error 0x01:")
print(e)
exit()
def fetch(self, number):
self.cursor.execute("SELECT ID, url FROM queue WHERE visited = 1 AND indexed = 0 ORDER BY ID DESC LIMIT " + str(number) + ";")
results = self.cursor.fetchall()
return results
def update(self, id):
try:
self.cursor.execute("UPDATE queue SET indexed = '1' WHERE ID = " + str(id) + ";")
except Exception as e:
print("Error 0x02:")
print(e)
def addKeywords(self, id_url, keywords):
try:
for keyword in keywords:
print(keyword)
self.cursor.execute("INSERT INTO keywords (id_url, keywords) values (" + str(id_url) + ", '" + keyword + "');")
self.db.commit()
except Exception as e:
print("Error 0x03:")
print(e)
def setQueue(self, obj):
for url in obj:
t = Thread(target=self.writeToDb(url))
t.daemon = True
t.start()
return True
def execute(self, command):
self.cursor.execute(command)
self.db.commit()
def close(self):
self.db.close()
| StarcoderdataPython |
59714 | <gh_stars>0
import xadmin
from .models import Course, UserAsk, UserCourse, UserFavorite, UserMessage
class CourseAdmin(object):
pass
class UserAskAdmin(object):
pass
class UserCourseAdmin(object):
pass
class UserFavoriteAdmin(object):
pass
class UserMessageAdmin(object):
pass
xadmin.site.register(Course, CourseAdmin)
xadmin.site.register(UserAsk, UserAskAdmin)
xadmin.site.register(UserCourse, UserCourseAdmin)
xadmin.site.register(UserFavorite, UserFavoriteAdmin)
xadmin.site.register(UserMessage, UserMessageAdmin)
| StarcoderdataPython |
1729905 | <reponame>imasnyper/ltd-priority-list
import random
from list.models import Job
def create_jobs(num_jobs, machine, customers):
for x, i in enumerate(range(num_jobs)):
rand_job_number = random.randrange(1000, 9999)
rand_customer = random.choice(customers)
rand_tools = random.choice([True, False])
Job.objects.create(
job_number=rand_job_number,
machine=machine,
customer=rand_customer,
add_tools=rand_tools,
description=f"{machine.name} job {i}"
) | StarcoderdataPython |
1603240 | <gh_stars>0
import sys
sys.path.append("..")
from preprocesspack import Attribute,DataSet,graphics,utils
def test_all():
##ATTRIBUTE
##Numeric Attribute
attr=Attribute.Attribute(name="age",vector=[34,16,78,90,12])
attrContinuous=Attribute.Attribute(name="age",vector=[1.2,3.4,6.7,8.9,4.7])
##Categorized Attribute
#attrCat=Attribute.Attribute(name="age",vector=[34,16,78,90,12])
attrCatEF=attr.discretize(3,"EF")
attrCatEW=attr.discretize(3,"EW")
print (attrCatEF.vector)
print (attrCatEW.vector)
print (attr.vector)
##Normalized Attribute
attrNorm=attr.normalize()
attrStd=attr.standardize()
attrCatStd=attrCatEW.standardize()
attrCatNorm=attrCatEF.standardize()
print(attrNorm.vector)
print(attrStd.vector)
print(attr.vector)
print(attrCatStd.vector)
print(attrCatNorm.vector)
##Attribute entropy
print(attrCatEF.entropy())
print (attr.entropy())
print(attrContinuous.entropy())
##Attribute variance
print (attrStd.variance())
print (attr.variance())
print (attrCatEF.variance())
##GetName and getVector
print(attr.discretize(4,"EF").getVector())
print(attr.discretize(4,"EF").getName())
##DATASET
dataset=DataSet.DataSet([],name="students")
print(dataset.data)
print(dataset.name)
dataset.addAttribute(attr)
dataset.addAttribute(attrContinuous)
dataset.addAttribute(attrCatEW)
dataset.addAttribute([0,1,0,1,1])
dataset.printDataSet()
##NORMALIZE DATASET
dsNorm=dataset.normalize()
dsNorm.printDataSet()
##STANDARDIZE DATASET
dsStd=dataset.standardize()
dsStd.printDataSet()
## VARIANCE
print (dsStd.variance())
## ENTROPY
print (dsStd.entropy())
##DISCRETIZATION
print ("Discretization")
dsDisc=dataset.discretize(3,"EW",[0,1,2,3])
dsDisc.printDataSet()
##ROC AUC
dataset.printDataSet()
print(dataset.rocAuc(1,3))
##GET NAMES
print(attr.isCategorical())
print(dataset.getNames())
##CORRELATION MATRIX
#graphics.correlationMatrix(dataset)
##ENTROPY PLOT
#graphics.entropyPlot(dataset)
##ROC CURVE PLOT
#graphics.rocPlot(dataset,1,3)
##LOAD DATASET
ds=DataSet.loadDataSet("datos.csv")
ds.printDataSet()
attr.printAttribute()
dataset.printDataSet()
#graphics.correlationPlot(dataset)
DataSet.saveDataSet(dataset,"pruebaGuardar.csv")
loadedDS=DataSet.loadDataSet("pruebaGuardar.csv")
print (loadedDS)
print (Attribute.computeCorrelation(attrNorm,attrCatEW))
print (Attribute.computeCorrelation(attrCatEF,attrCatEW))
print (Attribute.computeCorrelation(attrNorm,attrStd))
dataset.printDataSet()
loadedDS.printDataSet()
attr.printAttribute()
test_all() | StarcoderdataPython |
1642558 | from django.apps import AppConfig
class ReversionDynamoDBBackend(AppConfig):
name = 'reversion.backends.dynamodb'
label = 'reversion_backends_dynamodb'
| StarcoderdataPython |
86248 | import os
import xml.etree.ElementTree as et
import argparse
import pkgutil
import shutil
from subprocess import call
import time
from common import get_host_ip, get_unoccupied_port, is_port_in_use, get_pid_by_name
from client import Client
import re
from colorama import init, Fore, Back, Style
from logger import setup_logger
from git import Repo
import git
init()
_REMOTE_RESOURCE = 'pypeep_resource'
_VIRTUAL_ENV_NAME = 'virtual_decade'
_LOGGER = setup_logger('Main', color=Fore.BLUE)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--remote-path",
help="project path on remote client")
parser.add_argument("--entry",
help="the entry python file of source code, or a executable file in the remote")
parser.add_argument("--server-name", default='pypeep',
help="IDE server name (optional, default pypeep)")
parser.add_argument("--hostname",
help="remote client hostname or docker container id")
parser.add_argument("--ssh-user",
help="remote client ssh user, do not set if is docker container")
parser.add_argument("--ssh-password",
help="remote client ssh password, do not set if is docker container")
parser.add_argument("--ssh-port",
help="remote client ssh port (optional, default 22)", type=int, default=22)
parser.add_argument("--local-path",
help="project path on local server, will download from remote if not exist")
parser.add_argument("--venv",
help="specify virtualenv in local for package mapping, if not set, will use current python env")
arguments = parser.parse_args()
return arguments
# setup virtualenv
def setup_virtualenv(client, local_project_path, src_entry, remote_path):
new_cmd = 'virtualenv {0}'.format(_VIRTUAL_ENV_NAME)
client.execute(new_cmd)
activate_cmd = 'source ./{0}/bin/activate'.format(_VIRTUAL_ENV_NAME)
client.execute(activate_cmd)
def _find_requirements_until_root(root_dir, sub_dir):
assert root_dir in sub_dir
if os.path.exists(os.path.join(sub_dir, 'requirements.txt')):
return os.path.join(sub_dir.replace(root_dir, ''), 'requirements.txt')
else:
return _find_requirements_until_root(root_dir, os.path.dirname(sub_dir))
# Install pydevd in remote
client.execute('pip install pydevd')
# Check the direct folder of the src entry file first, if no requirements.txt, then check the upper folder...
src_path = os.path.join(local_project_path, *re.split(r'[/\\]*', src_entry))
if os.path.exists(os.path.join(local_project_path, 'requirements.txt')):
config_cmd = 'pip install -r {0}'.format(
os.path.join(remote_path, _find_requirements_until_root(local_project_path, src_path)))
client.execute(config_cmd)
def inject_sitecustomize(commands, client, local_ip, local_port):
# hijack export keyword
commands.append('source /tmp/{0}/hijack_export.sh'.format(_REMOTE_RESOURCE))
# Declare the env variables in remote
commands.append('export PYPEEP_IP={0};export PYPEEP_PORT={1}'.format(local_ip, local_port))
def edit_config_files(f, file_location, local_path, args_list):
init_config = et.parse(file_location)
root = init_config.getroot()
for item in args_list:
for ele in root.iter(item['tag']):
for attrib_key in item['attrib'].keys():
ele.set(attrib_key, item['attrib'][attrib_key])
init_config.write(os.path.join(local_path, '.idea', f))
def git_check_version(local_project_path):
if '.git' not in os.listdir(local_project_path):
return
repo = Repo(local_project_path)
commits_ahead = repo.iter_commits('origin/' + repo.active_branch.name + '..' + repo.active_branch.name)
count_ahead = sum([1 for c in commits_ahead])
if count_ahead:
current_head = git.refs.head.HEAD(repo, path='HEAD')
git.refs.head.HEAD.reset(current_head, commit='HEAD~' + str(count_ahead))
origin = repo.remote()
if repo.is_dirty():
repo.git.stash('save')
origin.fetch()
origin.pull()
def config_IDE(server_name, remote_path, project_name, local_path, local_ip, local_port, ssh_port):
ide_config = {
"deployment": [
{'tag': 'component', 'attrib': {'serverName': server_name}},
{'tag': 'paths', 'attrib': {'name': server_name}},
{'tag': 'mapping', 'attrib': {'deploy': remote_path, 'local': '$PROJECT_DIR$' + remote_path}}
],
"misc": [
],
"remote-mappings": [
{'tag': 'mapping', 'attrib': {'local-root': '$PROJECT_DIR$' + remote_path, 'remote-root': remote_path}},
],
"webServers": [
],
}
local_idea_path = os.path.join(local_path, '.idea')
if os.path.exists(local_idea_path):
shutil.rmtree(local_idea_path)
os.mkdir(local_idea_path)
script_path = pkgutil.get_loader("pypeep").filename
print script_path
# other config files
pycharm_config_dir = os.path.join(script_path, 'pycharm_config')
raw_files = os.listdir(pycharm_config_dir)
for f in raw_files:
file_location = os.path.join(pycharm_config_dir, f)
file_name = os.path.splitext(f)[0]
if file_name == 'workspace' or file_name == 'webServer' or file_name == 'try':
continue
config_list = ide_config[file_name]
edit_config_files(f, file_location, local_path, config_list)
# webServers.xml
webservers_config = et.parse(os.path.join(pycharm_config_dir, 'webServers.xml'))
webservers_root = webservers_config.getroot()
for ele in webservers_root.iter('option'):
if 'name' in ele.attrib.keys() and ele.get('name') == "port":
ele.attrib['value'] = str(ssh_port)
webservers_config.write(os.path.join(pycharm_config_dir, 'webServers.xml'))
edit_config_files('webServers.xml', os.path.join(pycharm_config_dir, 'webServers.xml'), local_path,
ide_config['webServers'])
# workspace.xml
workspace_config = et.parse(os.path.join(pycharm_config_dir, 'workspace.xml'))
workspace_root = workspace_config.getroot()
for ele in workspace_root.iter('option'):
if 'name' in ele.attrib.keys() and ele.get('name') == "myItemId":
ele.attrib['value'] = project_name
for ele in workspace_root.iter('component'):
if 'name' in ele.attrib.keys() and ele.get('name') == "RunManager":
ele.attrib['selected'] = 'Python Remote Debug.debug1'
debugger_list = ele.findall('configuration')
for debugger in debugger_list:
if 'type' in debugger.attrib.keys() and debugger.get('type') == "PyRemoteDebugConfigurationType":
debugger.set('name', 'debug1')
for option in debugger.iter('option'):
if 'name' in option.attrib.keys() and option.get('name') == 'PORT':
option.set('value', str(local_port))
if 'name' in option.attrib.keys() and option.get('name') == 'HOST':
option.set('value', local_ip)
if 'name' in option.attrib.keys() and option.get('name') == 'pathMappings':
for mapping in option.iter('mapping'):
mapping.set('local-root', '$PROJECT_DIR$')
mapping.set('remote-root', remote_path)
workspace_config.write(os.path.join(local_path, '.idea', 'workspace.xml'))
# iml
shutil.copyfile(os.path.join(pycharm_config_dir, 'try.iml'),
os.path.join(local_path, '.idea', project_name + '.iml'))
def main():
args = parse_args()
remote_path = args.remote_path or os.environ.get('PYPEEP_REMOTE_PATH')
assert remote_path
server_name = args.server_name
ssh_port = args.ssh_port
local_path = args.local_path or os.environ.get('PYPEEP_LOCAL_PATH')
assert local_path
assert os.path.isdir(local_path), "local project path is not a directory."
local_ip = get_host_ip()
local_port = get_unoccupied_port()
project_name = os.path.basename(remote_path)
client = Client(args.hostname, args.ssh_user, args.ssh_password, args.ssh_port)
client.send_files(os.path.join(pkgutil.get_loader("pypeep").filename, _REMOTE_RESOURCE),
os.path.join('/tmp', _REMOTE_RESOURCE))
# remote project is placed in the local project path. Modify this for consistency
# local project path is empty
local_project_path = os.path.join(local_path, project_name)
if not os.path.exists(local_project_path):
client.fetch_files(remote_path, local_project_path)
# Make sure the code is the latest?
git_check_version(local_path)
config_IDE(server_name, remote_path, project_name, local_project_path, local_ip, local_port, ssh_port)
commands = []
inject_sitecustomize(commands, client, local_ip, local_port)
# setup_virtualenv(client, local_project_path, args.entry, remote_path)
call(['open', '-a', 'PyCharm', local_project_path])
_LOGGER.info('>> Please start the debug server in the PyCharm to continue <<')
# use a loop to check if the debugger started(if port is occupied).
while 1:
port_open = False
pid_list = get_pid_by_name('pycharm')
for pid in pid_list:
port_open = port_open or is_port_in_use(pid, local_port)
if port_open:
break
_LOGGER.info('Still waiting...')
time.sleep(10)
_LOGGER.info('Detect the debugging port is open, ready to start')
if args.entry.endswith('.py'):
commands.append('python {0}'.format(args.entry))
else:
commands.append('source {0}'.format(args.entry))
client.execute('\n'.join(commands))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1638180 | <reponame>mcjczapiewski/work<filename>check_photo_dpi.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# import bibliotek
from PIL import Image
import os
import datetime
import codecs
from natsort import natsort_keygen
nkey = natsort_keygen()
# root jesli chcemy wrzucac plik pythona do
# foleru, w ktorym jest folder do sprawdzenia
# rootdir = 'PARTIA 1.0'
Image.MAX_IMAGE_PIXELS = None
# zmienna-licznik przeskanowanych folderow i separator
countope = 1
separ = "\t"
# aktualna data i godzina
czasstart = datetime.datetime.now()
print("~~~~~~START~~~~~~\t" + str(czasstart).split(".")[0])
# usunac jesli stosujemy rootdir a w os.walk() wstawic 'rootdir'
print("\nPodaj dokładną ścieżkę folderu, z którego chcesz sprawdzać DPI:")
liczenie = input()
print("\nPodaj ścieżkę dla pliku wynikowego:")
sciezka = input()
wynikowy = os.path.basename(os.path.normpath(sciezka))
bledny = (
sciezka
+ "\\"
+ wynikowy
+ "_BLEDY_"
+ czasstart.strftime("%Y-%m-%d")
+ ".txt"
)
print("\nPlik zostanie umieszczony w:\n" + bledny)
input("\nWciśnij ENTER aby kontynuować...")
# glowna petla
for subdir, dirs, files in os.walk(liczenie):
dirs.sort(key=nkey)
if not any(
fname.upper().endswith((".JPG", ".JPEG"))
for fname in os.listdir(subdir)
):
continue
# rozbija sciezke do folderu i bierze tylko ostatni czlon
# jako numer operatu
nrope = os.path.basename(os.path.normpath(subdir))
# licznik petli, wskazujacy aktualnie skanowany folder z operatem
print(countope, separ, nrope)
countope += 1
# poczatek petli skanujacej pliki jpg
for file in sorted(files):
if file.upper().endswith((".JPG", ".JPEG")):
# tworzenie pelnej sciezki do skanowanego pliku na podstawie
# sciezki folderu i nazwy pliku
filename = os.path.join(subdir, file)
try:
# otwarcie zdjecia
img = Image.open(filename)
# czy jest informacja o DPI zdjecia
if img.info.get("dpi"):
# odczytanie i spisanie wartosci pikseli i dpi
width, height = img.size
xdpi, ydpi = img.info["dpi"]
# jesli zdjecie nie ma DPI zapisz komunikat
else:
with open(bledny, "a") as bl:
bl.write("Zdjęcie nie ma DPI: " + filename + "\r\n")
except:
with open(bledny, "a") as bl:
bl.write(
"Nie udało się otworzyć zdjęcia: " + filename + "\r\n"
)
if xdpi < 300 or ydpi < 300:
with codecs.open(bledny, "a", "utf-8") as bl:
bl.write(
str(xdpi) + "\t" + str(ydpi) + "\t" + filename + "\r\n"
)
# czas trwania calego skryptu
czaskoniec = datetime.datetime.now()
roznicaczas = czaskoniec - czasstart
czastrwania = roznicaczas.total_seconds() / 60
print("\nCałość zajęła (minuty):")
print("%.2f" % czastrwania)
print("\n~~~~~~KONIEC~~~~~~\t" + str(czaskoniec).split(".")[0])
input("Wciśnij ENTER aby zamknąć.")
| StarcoderdataPython |
1702057 | <reponame>knp19i/story-prompt
import sys
import lib
from collections import defaultdict
# Get number of invalid prompts
def get_num_errors():
try:
errors_file = open(lib.ERRORS_FILE, 'r')
error_lines = errors_file.readlines()
errors_file.close()
return len(error_lines)
except FileNotFoundError:
return 0
except Exception as e:
sys.exit("Error opening errors file: {}".format(e))
if __name__ == '__main__':
num_errors = get_num_errors()
try:
prompts_file = open(lib.PROMPTS_FILE, 'r')
except FileNotFoundError:
# This likely means that prompt.py has not been run yet with a valid input
print("Statistics for past prompts\n")
print("Total number of prompts is {}".format(num_errors))
print("Total number of valid prompts is 0")
print("Total number of prompts with errors is {}".format(num_errors))
exit()
except Exception as general_exception:
sys.exit("Error opening prompts file: {}".format(general_exception))
prompt_lines = prompts_file.readlines()
num_valid_prompts = len(prompt_lines)
# To track min and max numerical value encountered across prompts
min_number = float('inf')
max_number = float('-inf')
# To track frequency of each prompt key
number_frequency = defaultdict(int)
unit_of_measure_frequency = defaultdict(int)
place_frequency = defaultdict(int)
adjective_frequency = defaultdict(int)
noun_frequency = defaultdict(int)
# Gather statistics from prompts file
for line in prompt_lines:
# Only valid prompts are stored, so it is not needed to validate them again
prompt = lib.convert_json_to_object(line)
min_number = min(min_number, prompt.number)
max_number = max(max_number, prompt.number)
number_frequency[prompt.number] += 1
unit_of_measure_frequency[prompt.unit_of_measure] += 1
place_frequency[prompt.place] += 1
adjective_frequency[prompt.adjective] += 1
noun_frequency[prompt.noun] += 1
prompts_file.close()
print("Statistics for past prompts\n")
print("Total number of prompts is {}".format(num_valid_prompts + num_errors))
print("Total number of valid prompts is {}".format(num_valid_prompts))
print("Total number of prompts with errors is {}".format(num_errors))
print()
print("Minimum value for the numerical input is {}".format(min_number))
print("Maximum value for the numerical input is {}".format(max_number))
print("Most common value for number is {}".format(max(number_frequency, key=number_frequency.get)))
print("Most common value for unit of measure is {}".format(
max(unit_of_measure_frequency, key=unit_of_measure_frequency.get)))
print("Most common value for place is {}".format(max(place_frequency, key=place_frequency.get)))
print("Most common value for adjective is {}".format(max(adjective_frequency, key=adjective_frequency.get)))
print("Most common value for noun is {}".format(max(noun_frequency, key=noun_frequency.get)))
| StarcoderdataPython |
3298092 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import os
import numpy as np
# mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# print "basic information of mnist dataset"
# print "mnist training data size: ", mnist.train.num_examples
# print "mnist validating data size: ", mnist.validation.num_examples
# print "mnist testing data size: ", mnist.test.num_examples
# print "mnist example training data: ", mnist.train.images[0]
# print "mnist example training data label", mnist.train.labels[0]
# define input and output data size
INPUT_NODE = 784
OUTPUT_NODE = 10
# params for neural network
LAYER1_NODE = 500
BATCH_SIZE = 1000
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.999
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 100000
MOVING_AVERAGE_DECAY = 0.99
# calc the result of forward propagation,
# ***original method***
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
# use current value when there's no moving average model
if avg_class is None:
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
return tf.matmul(layer1, weights2) + biases2
else:
layer1 = tf.nn.relu(
tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))
return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)
# training process
def train(mnist):
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name="x-input")
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name="y-input")
# generate hidden layer params
weight1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))
# generate output layer params
weight2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))
# forward propagation
y = inference(x, None, weight1, biases1, weight2, biases2)
# used to store training cycles
global_step = tf.Variable(0, trainable=False)
# define EMA function to increase robustness when predict
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
# # forward propagation with moving average function
# average_y = inference(x, variable_averages, weight1, biases1, weight2, biases2)
average_y = inference(x, variable_averages, weight1, biases1, weight2, biases2)
# cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1))
# calc cross_entropy mean for current batch
cross_entropy_mean = tf.reduce_mean(cross_entropy)
# calc L2 regularization loss function
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
regularization = regularizer(weight1) + regularizer(weight2)
loss = cross_entropy_mean + regularization
# learning rate = learning rate * LEARNING_RATE_DECAY ^ (global_step / decay_step)
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# combine backward propagation and EMA value modification
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name="train")
correct_prediction = tf.equal(tf.arg_max(average_y, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# prepare validation dataset to stop optimization
validation_feed = {x: mnist.validation.images,
y_: mnist.validation.labels}
# define test dataset for final evaluation
test_feed = {x: mnist.test.images,
y_: mnist.test.labels}
validation_result = range(TRAINING_STEPS / 1000)
test_result = range(TRAINING_STEPS / 1000)
for i in range(TRAINING_STEPS):
if i % 1000 == 0:
# print "average_y: ", average_y, sess.run(average_y, feed_dict=validation_feed)
# print "y_: ", y_, sess.run(y_, feed_dict=validation_feed)
validate_acc = sess.run(accuracy, feed_dict=validation_feed)
validation_result[i / 1000] = validate_acc
# print "after %d training step(s), validation accuracy using average model is %g " % (i, validate_acc)
xs, ys = mnist.train.next_batch(BATCH_SIZE)
sess.run(train_op, feed_dict={x: xs, y_: ys})
test_acc = sess.run(accuracy, feed_dict=test_feed)
test_result[i / 1000] = test_acc
# print "after %d training step(s), test accuracy using average model is %g " % (i, test_acc)
print validation_result
print test_result
# draw a graph of accuracy using matplotlib
iteration_count = range(0, TRAINING_STEPS, 1000)
plt.figure(num=1, figsize=(15, 8))
plt.title("Plot accuracy", size=20)
plt.xlabel("iteration count", size=14)
plt.ylabel("accuracy/%", size=14)
validation_note = [TRAINING_STEPS - 1000, validation_result[TRAINING_STEPS / 1000 - 1]]
test_note = [TRAINING_STEPS - 1000, test_result[TRAINING_STEPS / 1000 - 1]]
plt.annotate('validate-' + str(validation_note), xy=(test_note[0], test_note[1]),
xytext=(test_note[0] - 1000, test_note[1] - 0.1), arrowprops=dict(facecolor='black', shrink=0.05))
plt.annotate('test-' + str(test_note), xy=(test_note[0], test_note[1]),
xytext=(test_note[0] + 1000, test_note[1] - 0.07), arrowprops=dict(facecolor='black', shrink=0.05))
plt.grid(True)
plt.plot(iteration_count, validation_result, color='b', linestyle='-', marker='o', label='validation data')
plt.plot(iteration_count, test_result, linestyle='-.', marker='X', label='test data')
plt.legend(loc="upper left")
try:
os.mkdir('images/')
except:
print("directory already exist")
plt.savefig('images/mnist_accuracy_evaluation.png', format='png')
img_vector = mnist.train.images[5]
img_length = int(np.sqrt(INPUT_NODE))
img = np.ndarray([img_length, img_length])
# print "image size: ", img_length, "*", img_length
for c in range(INPUT_NODE):
# print "image indices: ", c / img_length, "*", c % img_length
img[c / img_length][c % img_length] = img_vector[c]
plt.figure(num=2, figsize=(15, 8))
plt.imshow(img)
plt.show()
def main(argv=None):
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
print "basic information of mnist dataset"
print "mnist training data size: ", mnist.train.num_examples
print "mnist validating data size: ", mnist.validation.num_examples
print "mnist testing data size: ", mnist.test.num_examples
train(mnist)
if __name__ == '__main__':
tf.app.run()
| StarcoderdataPython |
3315754 | data = input("STDIN: ")
print("STDOUT: " + data)
raise ValueError("This is an error")
| StarcoderdataPython |
3288031 | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.regularizer import L2Decay
from paddle.nn import Conv2D, BatchNorm
from paddle.nn import MaxPool2D, AvgPool2D
from ..registry import BACKBONES
from ..weight_init import weight_init_
from ...utils import load_ckpt
__all__ = ["ResNetTSN_MRI"]
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
groups=1,
is_tweaks_mode=False,
act=None,
lr_mult=1.0,
name=None):
super(ConvBNLayer, self).__init__()
self.is_tweaks_mode = is_tweaks_mode
self._pool2d_avg = AvgPool2D(kernel_size=2,
stride=2,
padding=0,
ceil_mode=True)
self._conv = Conv2D(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + "_weights",
learning_rate=lr_mult),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
self._batch_norm = BatchNorm(
out_channels,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale',
learning_rate=lr_mult,
regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(bn_name + '_offset',
learning_rate=lr_mult,
regularizer=L2Decay(0.0)),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def forward(self, inputs):
if self.is_tweaks_mode:
inputs = self._pool2d_avg(inputs)
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class BottleneckBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels,
stride,
shortcut=True,
if_first=False,
lr_mult=1.0,
name=None):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
act='relu',
lr_mult=lr_mult,
name=name + "_branch2a")
self.conv1 = ConvBNLayer(in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
act='relu',
lr_mult=lr_mult,
name=name + "_branch2b")
self.conv2 = ConvBNLayer(in_channels=out_channels,
out_channels=out_channels * 4,
kernel_size=1,
act=None,
lr_mult=lr_mult,
name=name + "_branch2c")
if not shortcut:
self.short = ConvBNLayer(in_channels=in_channels,
out_channels=out_channels * 4,
kernel_size=1,
stride=1,
is_tweaks_mode=False if if_first else True,
lr_mult=lr_mult,
name=name + "_branch1")
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(x=short, y=conv2)
y = F.relu(y)
return y
class BasicBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels,
stride,
shortcut=True,
if_first=False,
lr_mult=1.0,
name=None):
super(BasicBlock, self).__init__()
self.stride = stride
self.conv0 = ConvBNLayer(in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
act='relu',
lr_mult=lr_mult,
name=name + "_branch2a")
self.conv1 = ConvBNLayer(in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
act=None,
lr_mult=lr_mult,
name=name + "_branch2b")
if not shortcut:
self.short = ConvBNLayer(in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
is_tweaks_mode=False if if_first else True,
lr_mult=lr_mult,
name=name + "_branch1")
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(x=short, y=conv1)
y = F.relu(y)
return y
@BACKBONES.register()
class ResNetTSN_MRI(nn.Layer):
"""ResNetTweaksTSN backbone.
Args:
depth (int): Depth of resnet model.
pretrained (str): pretrained model. Default: None.
"""
def __init__(self,
layers=50,
pretrained=None,
lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0],
in_channels=1):
super(ResNetTSN_MRI, self).__init__()
self.pretrained = pretrained
self.layers = layers
supported_layers = [18, 34, 50, 101, 152, 200]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(
supported_layers, layers)
self.lr_mult_list = lr_mult_list
self.in_channels = in_channels
assert isinstance(
self.lr_mult_list,
(list, tuple
)), "lr_mult_list should be in (list, tuple) but got {}".format(
type(self.lr_mult_list))
assert len(
self.lr_mult_list
) == 5, "lr_mult_list length should should be 5 but got {}".format(
len(self.lr_mult_list))
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
num_channels = [64, 256, 512, 1024
] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512]
self.conv1_1 = ConvBNLayer(in_channels=self.in_channels,
out_channels=32,
kernel_size=3,
stride=2,
act='relu',
lr_mult=self.lr_mult_list[0],
name="conv1_1")
self.conv1_2 = ConvBNLayer(in_channels=32,
out_channels=32,
kernel_size=3,
stride=1,
act='relu',
lr_mult=self.lr_mult_list[0],
name="conv1_2")
self.conv1_3 = ConvBNLayer(in_channels=32,
out_channels=64,
kernel_size=3,
stride=1,
act='relu',
lr_mult=self.lr_mult_list[0],
name="conv1_3")
self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
self.block_list = []
if layers >= 50:
for block in range(len(depth)):
shortcut = False
for i in range(depth[block]):
if layers in [101, 152, 200] and block == 2:
if i == 0:
conv_name = "res" + str(block + 2) + "a"
else:
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name = "res" + str(block + 2) + chr(97 + i)
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
in_channels=num_channels[block]
if i == 0 else num_filters[block] * 4,
out_channels=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut,
if_first=block == i == 0,
lr_mult=self.lr_mult_list[block + 1],
name=conv_name))
self.block_list.append(bottleneck_block)
shortcut = True
else:
for block in range(len(depth)):
shortcut = False
for i in range(depth[block]):
conv_name = "res" + str(block + 2) + chr(97 + i)
basic_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BasicBlock(in_channels=num_channels[block]
if i == 0 else num_filters[block],
out_channels=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut,
if_first=block == i == 0,
name=conv_name,
lr_mult=self.lr_mult_list[block + 1]))
self.block_list.append(basic_block)
shortcut = True
def init_weights(self):
"""Initiate the parameters.
Note:
1. when indicate pretrained loading path, will load it to initiate backbone.
2. when not indicating pretrained loading path, will follow specific initialization initiate backbone. Always, Conv2D layer will be
initiated by KaimingNormal function, and BatchNorm2d will be initiated by Constant function.
Please refer to https://www.paddlepaddle.org.cn/documentation/docs/en/develop/api/paddle/nn/initializer/kaiming/KaimingNormal_en.html
"""
# XXX: check bias!!! check pretrained!!!
if isinstance(self.pretrained, str) and self.pretrained.strip() != "":
load_ckpt(self, self.pretrained)
elif self.pretrained is None or self.pretrained.strip() == "":
for layer in self.sublayers():
if isinstance(layer, nn.Conv2D):
# XXX: no bias
weight_init_(layer, 'KaimingNormal')
elif isinstance(layer, nn.BatchNorm2D):
weight_init_(layer, 'Constant', value=1)
def forward(self, inputs):
y = self.conv1_1(inputs)
y = self.conv1_2(y)
y = self.conv1_3(y)
y = self.pool2d_max(y)
for block in self.block_list:
y = block(y)
return y
| StarcoderdataPython |
4807180 | import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
experiments = []
result = 0
last_expr = 0
for i in range(int(input())):
j, d = [int(j) for j in input().split()]
# We will create a matrix of all the experiments
experiments.append((j, j + d))
last_expr = max(last_expr, experiments[i][1])
# we will sort all the experiments by the second argument which is the end date
experiments.sort(key=lambda exp: exp[1])
# Initialize a timeline line
timeline = [0] * last_expr
for c in experiments:
# We check if there is a free timeline for the experiment
if 1 not in timeline[c[0]:c[1]]:
result += 1
# We mark the time as taken
timeline[c[0]:c[1]] = [1] * (c[1] - c[0])
print(result)
# Write an answer using print
# To debug: print("Debug messages...", file=sys.stderr, flush=True)
| StarcoderdataPython |
1631546 | <filename>tests/testutils.py
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Files download/upload REST API similar to S3 for Invenio."""
import sys
from io import BytesIO
def login_user(client, user):
"""Log in a specified user."""
with client.session_transaction() as sess:
sess['user_id'] = user.id if user else None
sess['_fresh'] = True
class BadBytesIO(BytesIO):
"""Class for closing the stream for further reading abruptly."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.called = False
return super(BadBytesIO, self).__init__(*args, **kwargs)
def read(self, *args, **kwargs):
"""Fail on second read."""
if self.called:
self.close()
self.called = True
return super(BadBytesIO, self).read(*args, **kwargs)
| StarcoderdataPython |
3345965 | from datetime import datetime
from decimal import Decimal
from unittest import mock
import pytest
from django.conf import settings
from django_countries.fields import Country
from django_scopes import scopes_disabled
from pytz import UTC
from pretix.base.models import (
Event, InvoiceAddress, Order, OrderPosition, SeatingPlan,
)
from pretix.base.models.orders import OrderFee
@pytest.fixture
def variations(item):
v = list()
v.append(item.variations.create(value="ChildA1"))
v.append(item.variations.create(value="ChildA2"))
return v
@pytest.fixture
def order(event, item, taxrule):
testtime = datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC)
with mock.patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = testtime
o = Order.objects.create(
code='FOO', event=event, email='<EMAIL>',
status=Order.STATUS_PENDING, secret="k24fiuwvu8kxz3y1",
datetime=datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC),
expires=datetime(2017, 12, 10, 10, 0, 0, tzinfo=UTC),
total=23, locale='en'
)
o.fees.create(fee_type=OrderFee.FEE_TYPE_PAYMENT, value=Decimal('0.25'), tax_rate=Decimal('19.00'),
tax_value=Decimal('0.05'), tax_rule=taxrule)
InvoiceAddress.objects.create(order=o, company="Sample company", country=Country('NZ'))
return o
@pytest.fixture
def order_position(item, order, taxrule, variations):
op = OrderPosition.objects.create(
order=order,
item=item,
variation=variations[0],
tax_rule=taxrule,
tax_rate=taxrule.rate,
tax_value=Decimal("3"),
price=Decimal("23"),
attendee_name_parts={'full_name': "Peter"},
secret="z3fsn8jyufm5kpk768q69gkbyr5f4h6w"
)
return op
TEST_EVENT_RES = {
"name": {"en": "Dummy"},
"live": False,
"testmode": False,
"currency": "EUR",
"date_from": "2017-12-27T10:00:00Z",
"date_to": None,
"date_admission": None,
"is_public": True,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "dummy",
"has_subevents": False,
"seating_plan": None,
"seat_category_mapping": {},
"meta_data": {"type": "Conference"},
'plugins': [
'pretix.plugins.banktransfer',
'pretix.plugins.ticketoutputpdf'
]
}
@pytest.fixture
def item(event):
return event.items.create(name="Budget Ticket", default_price=23)
@pytest.fixture
def free_item(event):
return event.items.create(name="Free Ticket", default_price=0)
@pytest.fixture
def free_quota(event, free_item):
q = event.quotas.create(name="Budget Quota", size=200)
q.items.add(free_item)
return q
@pytest.mark.django_db
def test_event_list(token_client, organizer, event):
resp = token_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert resp.status_code == 200
assert TEST_EVENT_RES == resp.data['results'][0]
resp = token_client.get('/api/v1/organizers/{}/events/?live=true'.format(organizer.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/?live=false'.format(organizer.slug))
assert resp.status_code == 200
assert [TEST_EVENT_RES] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/?is_public=false'.format(organizer.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/?is_public=true'.format(organizer.slug))
assert resp.status_code == 200
assert [TEST_EVENT_RES] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/?has_subevents=true'.format(organizer.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/?has_subevents=false'.format(organizer.slug))
assert resp.status_code == 200
assert [TEST_EVENT_RES] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/?ends_after=2017-12-27T10:01:00Z'.format(organizer.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/?ends_after=2017-12-27T09:59:59Z'.format(organizer.slug))
assert resp.status_code == 200
assert [TEST_EVENT_RES] == resp.data['results']
@pytest.mark.django_db
def test_event_get(token_client, organizer, event):
resp = token_client.get('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert TEST_EVENT_RES == resp.data
@pytest.mark.django_db
def test_event_create(token_client, organizer, event, meta_prop):
resp = token_client.post(
'/api/v1/organizers/{}/events/'.format(organizer.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": False,
"currency": "EUR",
"date_from": "2017-12-27T10:00:00Z",
"date_to": "2017-12-28T10:00:00Z",
"date_admission": None,
"is_public": False,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "2030",
"meta_data": {
meta_prop.name: "Conference"
}
},
format='json'
)
assert resp.status_code == 201
with scopes_disabled():
assert not organizer.events.get(slug="2030").testmode
assert organizer.events.get(slug="2030").meta_values.filter(
property__name=meta_prop.name, value="Conference"
).exists()
assert organizer.events.get(slug="2030").plugins == settings.PRETIX_PLUGINS_DEFAULT
resp = token_client.post(
'/api/v1/organizers/{}/events/'.format(organizer.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": False,
"currency": "EUR",
"date_from": "2017-12-27T10:00:00Z",
"date_to": "2017-12-28T10:00:00Z",
"date_admission": None,
"is_public": False,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "2020",
"meta_data": {
"foo": "bar"
}
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"meta_data":["Meta data property \'foo\' does not exist."]}'
resp = token_client.post(
'/api/v1/organizers/{}/events/'.format(organizer.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": False,
"currency": "EUR",
"date_from": "2017-12-27T10:00:00Z",
"date_to": "2017-12-28T10:00:00Z",
"date_admission": None,
"is_public": False,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": event.slug,
"meta_data": {
"type": "Conference"
}
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"slug":["This slug has already been used for a different event."]}'
resp = token_client.post(
'/api/v1/organizers/{}/events/'.format(organizer.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": True,
"currency": "EUR",
"date_from": "2017-12-27T10:00:00Z",
"date_to": "2017-12-28T10:00:00Z",
"date_admission": None,
"is_public": False,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "2031",
"meta_data": {
"type": "Conference"
}
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"live":["Events cannot be created as \'live\'. Quotas and payment must be added ' \
'to the event before sales can go live."]}'
@pytest.mark.django_db
def test_event_create_with_clone(token_client, organizer, event, meta_prop):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/clone/'.format(organizer.slug, event.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": False,
"testmode": True,
"currency": "EUR",
"date_from": "2018-12-27T10:00:00Z",
"date_to": "2018-12-28T10:00:00Z",
"date_admission": None,
"is_public": False,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "2030",
"meta_data": {
"type": "Conference"
},
"plugins": [
"pretix.plugins.ticketoutputpdf"
]
},
format='json'
)
assert resp.status_code == 201
with scopes_disabled():
cloned_event = Event.objects.get(organizer=organizer.pk, slug='2030')
assert cloned_event.plugins == 'pretix.plugins.ticketoutputpdf'
assert cloned_event.is_public is False
assert cloned_event.testmode
assert organizer.events.get(slug="2030").meta_values.filter(
property__name=meta_prop.name, value="Conference"
).exists()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/clone/'.format(organizer.slug, event.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": False,
"currency": "EUR",
"date_from": "2018-12-27T10:00:00Z",
"date_to": "2018-12-28T10:00:00Z",
"date_admission": None,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "2031",
"meta_data": {
"type": "Conference"
}
},
format='json'
)
assert resp.status_code == 201
with scopes_disabled():
cloned_event = Event.objects.get(organizer=organizer.pk, slug='2031')
assert cloned_event.plugins == "pretix.plugins.banktransfer,pretix.plugins.ticketoutputpdf"
assert cloned_event.is_public is True
assert organizer.events.get(slug="2031").meta_values.filter(
property__name=meta_prop.name, value="Conference"
).exists()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/clone/'.format(organizer.slug, event.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": False,
"currency": "EUR",
"date_from": "2018-12-27T10:00:00Z",
"date_to": "2018-12-28T10:00:00Z",
"date_admission": None,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "2032",
"plugins": []
},
format='json'
)
assert resp.status_code == 201
with scopes_disabled():
cloned_event = Event.objects.get(organizer=organizer.pk, slug='2032')
assert cloned_event.plugins == ""
@pytest.mark.django_db
def test_event_put_with_clone(token_client, organizer, event, meta_prop):
resp = token_client.put(
'/api/v1/organizers/{}/events/{}/clone/'.format(organizer.slug, event.slug),
{},
format='json'
)
assert resp.status_code == 405
@pytest.mark.django_db
def test_event_patch_with_clone(token_client, organizer, event, meta_prop):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/clone/'.format(organizer.slug, event.slug),
{},
format='json'
)
assert resp.status_code == 405
@pytest.mark.django_db
def test_event_delete_with_clone(token_client, organizer, event, meta_prop):
resp = token_client.delete(
'/api/v1/organizers/{}/events/{}/clone/'.format(organizer.slug, event.slug),
{},
format='json'
)
assert resp.status_code == 405
@pytest.mark.django_db
def test_event_update(token_client, organizer, event, item, meta_prop):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"date_from": "2018-12-27T10:00:00Z",
"date_to": "2018-12-28T10:00:00Z",
"currency": "DKK",
},
format='json'
)
assert resp.status_code == 200
with scopes_disabled():
event = Event.objects.get(organizer=organizer.pk, slug=resp.data['slug'])
assert event.currency == "DKK"
assert organizer.events.get(slug=resp.data['slug']).meta_values.filter(
property__name=meta_prop.name, value="Conference"
).exists()
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"date_from": "2017-12-27T10:00:00Z",
"date_to": "2017-12-26T10:00:00Z"
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["The event cannot end before it starts."]}'
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"presale_start": "2017-12-27T10:00:00Z",
"presale_end": "2017-12-26T10:00:00Z"
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["The event\'s presale cannot end before it starts."]}'
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"slug": "testing"
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"slug":["The event slug cannot be changed."]}'
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"has_subevents": True
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"has_subevents":["Once created an event cannot change between an series and a ' \
'single event."]}'
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"meta_data": {
meta_prop.name: "Workshop"
}
},
format='json'
)
assert resp.status_code == 200
with scopes_disabled():
assert organizer.events.get(slug=resp.data['slug']).meta_values.filter(
property__name=meta_prop.name, value="Workshop"
).exists()
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"meta_data": {
}
},
format='json'
)
assert resp.status_code == 200
with scopes_disabled():
assert not organizer.events.get(slug=resp.data['slug']).meta_values.filter(
property__name=meta_prop.name
).exists()
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"meta_data": {
"test": "test"
}
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"meta_data":["Meta data property \'test\' does not exist."]}'
@pytest.mark.django_db
def test_event_test_mode(token_client, organizer, event):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"testmode": True
},
format='json'
)
assert resp.status_code == 200
event.refresh_from_db()
assert event.testmode
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"testmode": False
},
format='json'
)
assert resp.status_code == 200
event.refresh_from_db()
assert not event.testmode
@pytest.mark.django_db
def test_event_update_live_no_product(token_client, organizer, event):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"live": True
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"live":["You need to configure at least one quota to sell anything."]}'
@pytest.mark.django_db
def test_event_update_live_no_payment_method(token_client, organizer, event, item, free_quota):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"live": True
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"live":["You have configured at least one paid product but have not enabled any ' \
'payment methods."]}'
@pytest.mark.django_db
def test_event_update_live_free_product(token_client, organizer, event, free_item, free_quota):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"live": True
},
format='json'
)
assert resp.status_code == 200
@pytest.mark.django_db
def test_event_update_plugins(token_client, organizer, event, free_item, free_quota):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"plugins": [
"pretix.plugins.ticketoutputpdf",
"pretix.plugins.pretixdroid"
]
},
format='json'
)
assert resp.status_code == 200
assert set(resp.data.get('plugins')) == {
"pretix.plugins.ticketoutputpdf",
"pretix.plugins.pretixdroid"
}
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"plugins": {
"pretix.plugins.banktransfer"
}
},
format='json'
)
assert resp.status_code == 200
assert resp.data.get('plugins') == [
"pretix.plugins.banktransfer"
]
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"plugins": {
"pretix.plugins.test"
}
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"plugins":["Unknown plugin: \'pretix.plugins.test\'."]}'
@pytest.mark.django_db
def test_event_detail(token_client, organizer, event, team):
team.all_events = True
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert TEST_EVENT_RES == resp.data
@pytest.mark.django_db
def test_event_delete(token_client, organizer, event):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 204
with scopes_disabled():
assert not organizer.events.filter(pk=event.id).exists()
@pytest.mark.django_db
def test_event_with_order_position_not_delete(token_client, organizer, event, item, order_position):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 403
assert resp.content.decode() == '{"detail":"The event can not be deleted as it already contains orders. Please ' \
'set \'live\' to false to hide the event and take the shop offline instead."}'
with scopes_disabled():
assert organizer.events.filter(pk=event.id).exists()
@pytest.fixture
def seatingplan(event, organizer, item):
return SeatingPlan.objects.create(
name="Plan", organizer=organizer, layout="""{
"name": "<NAME>",
"categories": [
{
"name": "Stalls",
"color": "red"
}
],
"zones": [
{
"name": "Main Area",
"position": {
"x": 0,
"y": 0
},
"rows": [
{
"row_number": "0",
"seats": [
{
"seat_guid": "0-0",
"seat_number": "0-0",
"position": {
"x": 0,
"y": 0
},
"category": "Stalls"
},
{
"seat_guid": "0-1",
"seat_number": "0-1",
"position": {
"x": 33,
"y": 0
},
"category": "Stalls"
},
{
"seat_guid": "0-2",
"seat_number": "0-2",
"position": {
"x": 66,
"y": 0
},
"category": "Stalls"
}
],
"position": {
"x": 0,
"y": 0
}
}
]
}
],
"size": {
"width": 600,
"height": 400
}
}"""
)
@pytest.mark.django_db
def test_event_update_seating(token_client, organizer, event, item, seatingplan):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seating_plan": seatingplan.pk,
"seat_category_mapping": {
"Stalls": item.pk
}
},
format='json'
)
assert resp.status_code == 200
event.refresh_from_db()
assert event.seating_plan == seatingplan
with scopes_disabled():
assert event.seats.count() == 3
assert event.seats.filter(product=item).count() == 3
m = event.seat_category_mappings.get()
assert m.layout_category == 'Stalls'
assert m.product == item
@pytest.mark.django_db
def test_event_update_seating_invalid_product(token_client, organizer, event, item, seatingplan):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seating_plan": seatingplan.pk,
"seat_category_mapping": {
"Stalls": item.pk + 2
}
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"seat_category_mapping":["Item \'%d\' does not exist."]}' % (item.pk + 2)
@pytest.mark.django_db
def test_event_update_seating_change_mapping(token_client, organizer, event, item, seatingplan):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seating_plan": seatingplan.pk,
"seat_category_mapping": {
"Stalls": item.pk
}
},
format='json'
)
assert resp.status_code == 200
event.refresh_from_db()
assert event.seating_plan == seatingplan
with scopes_disabled():
assert event.seats.count() == 3
assert event.seats.filter(product=item).count() == 3
m = event.seat_category_mappings.get()
assert m.layout_category == 'Stalls'
assert m.product == item
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seat_category_mapping": {
"VIP": item.pk,
}
},
format='json'
)
assert resp.status_code == 200
event.refresh_from_db()
assert event.seating_plan == seatingplan
with scopes_disabled():
assert event.seats.count() == 3
m = event.seat_category_mappings.get()
assert event.seats.filter(product=None).count() == 3
assert m.layout_category == 'VIP'
assert m.product == item
@pytest.mark.django_db
def test_remove_seating(token_client, organizer, event, item, seatingplan):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seating_plan": seatingplan.pk,
"seat_category_mapping": {
"Stalls": item.pk
}
},
format='json'
)
assert resp.status_code == 200
event.refresh_from_db()
assert event.seating_plan == seatingplan
with scopes_disabled():
assert event.seats.count() == 3
assert event.seat_category_mappings.count() == 1
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seating_plan": None
},
format='json'
)
assert resp.status_code == 200
event.refresh_from_db()
assert event.seating_plan is None
with scopes_disabled():
assert event.seats.count() == 0
assert event.seat_category_mappings.count() == 0
@pytest.mark.django_db
def test_remove_seating_forbidden(token_client, organizer, event, item, seatingplan, order_position):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seating_plan": seatingplan.pk,
"seat_category_mapping": {
"Stalls": item.pk
}
},
format='json'
)
assert resp.status_code == 200
event.refresh_from_db()
assert event.seating_plan == seatingplan
with scopes_disabled():
assert event.seats.count() == 3
assert event.seat_category_mappings.count() == 1
order_position.seat = event.seats.first()
order_position.save()
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seating_plan": None
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"seating_plan":["You can not change the plan since seat \\"0-0\\" is not ' \
'present in the new plan and is already sold."]}'
@pytest.mark.django_db
def test_no_seating_for_series(token_client, organizer, event, item, seatingplan, order_position):
event.has_subevents = True
event.save()
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug),
{
"seating_plan": seatingplan.pk,
"seat_category_mapping": {
"Stalls": item.pk
}
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["Event series should not directly be assigned a seating plan."]}'
@pytest.mark.django_db
def test_event_create_with_seating(token_client, organizer, event, meta_prop, seatingplan):
resp = token_client.post(
'/api/v1/organizers/{}/events/'.format(organizer.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": False,
"currency": "EUR",
"date_from": "2017-12-27T10:00:00Z",
"date_to": "2017-12-28T10:00:00Z",
"date_admission": None,
"is_public": False,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "2030",
"seating_plan": seatingplan.pk,
"meta_data": {
meta_prop.name: "Conference"
}
},
format='json'
)
assert resp.status_code == 201
with scopes_disabled():
event = Event.objects.get(slug=resp.data['slug'])
assert event.seating_plan == seatingplan
assert event.seats.count() == 3
assert event.seat_category_mappings.count() == 0
@pytest.mark.django_db
def test_event_create_with_seating_maps(token_client, organizer, event, meta_prop, seatingplan):
resp = token_client.post(
'/api/v1/organizers/{}/events/'.format(organizer.slug),
{
"name": {
"de": "Demo Konference 2020 Test",
"en": "Demo Conference 2020 Test"
},
"live": False,
"currency": "EUR",
"date_from": "2017-12-27T10:00:00Z",
"date_to": "2017-12-28T10:00:00Z",
"date_admission": None,
"is_public": False,
"presale_start": None,
"presale_end": None,
"location": None,
"slug": "2030",
"seating_plan": seatingplan.pk,
"seat_category_mapping": {
"Foo": 1,
},
"meta_data": {
meta_prop.name: "Conference"
}
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"seat_category_mapping":["You cannot specify seat category mappings on event creation."]}'
| StarcoderdataPython |
117203 | <filename>my_code.py
from py_extras import change_stmt
def make_change(cost, amount_given):
twenties = 0
tens = 0
fives = 0
ones = 0
quarters = 0
dimes = 0
nickels = 0
pennies = 0
# Making change in pennies (to avoid float math)
change = round(((amount_given - cost) * 100), 2)
print ("Your change is: "+ str(change) + " cents")
#How many $20 bills?
while change >= 2000:
twenties += 1
change -= 2000
# How many $10 bills?
while change >= 1000:
tens += 1
change -= 1000
# fives
while change >= 500:
fives += 1
change -= 500
# ones
while change >= 100:
ones += 1
change -= 100
#Coins
while change >= 25:
quarters += 1
change -= 25
while change >= 10:
dimes += 1
change -= 10
while change >= 5:
nickels += 1
change -= 5
while change >= .5:
pennies += 1
change -= 1
print (change)
# This will be the final statement of make_change() and will return the appropriate string
return change_stmt(twenties, tens, fives, ones, quarters, dimes, nickels, pennies)
if __name__ == '__main__':
# Test your code with this first
# Change the cost and paid values to try different inputs
cost = 60
paid = 117.24
#cost = float(input("What is the total cost? "))
#paid = float(input("How much did you pay? "))
s = make_change(cost, paid)
print(s)
# After you are satisfied with your results, use input() calls to prompt the user for values:
# cost = float(input("Cost of the items: "))
# paid = float(input("Amount paid: "))
# print(make_change(cost, paid)) | StarcoderdataPython |
4823145 | # -*- coding: utf-8 -*-
"""
Created on 19-4-24 下午9:22
IDE PyCharm
@author: <NAME>
"""
from torch.utils.data import Dataset
import os
import h5py as hf
import torch
from config import config
import numpy as np
class macaque_h5(Dataset):
def __init__(self, data_path, list_path, mode):
self.data_path = data_path
#self.data_files = [filename for filename in os.listdir(self.data_path) \
# if os.path.isfile(os.path.join(self.data_path, filename))]
self.data_files = [os.path.join(data_path, item.rstrip()+config.DATA.SUFFIX) for item in open(list_path)]
self.num_samples = len(self.data_files)
self.mode = mode
self.gt_factor = config.DATA.LOG_PARA
self.peak_alpha = config.DATA.PEAK_ALPHA
def __getitem__(self, index):
fname = self.data_files[index]
if self.mode=='train' or self.mode == 'valid':
input_size = config.TRAIN.IMAGE_SIZE
else:
input_size = config.TEST.IMAGE_SIZE
img, gt_den, gt_cls, gt_vor, gt_clu, fname = self.read_image_and_gt(fname, input_size)
img = self.img_transform(img)
gt_den, gt_cls, gt_vor, gt_clu = self.gt_transform(gt_den, gt_cls, gt_vor, gt_clu)
return img, gt_den, gt_cls, gt_vor, gt_clu, fname
def __len__(self):
return self.num_samples
'''
def read_image_and_gt(self, fname, input_size):
h5f = hf.File(os.path.join(self.data_path, fname), 'r')
img = h5f['image'].value#.T # hdf5 created by matlab
den = h5f['density'].value#.T
mask = h5f['mask'].value
weight = h5f['weight'].value
assert (list(img.shape) == input_size) & (img.shape == den.shape) & (img.shape == mask.shape) & (img.shape == weight.shape),\
'the shape of image or density map does not match the input size'
img = img[np.newaxis, :, :, :]
den = den[np.newaxis, :, :, :]
return img, den, mask, weight, fname
'''
def read_image_and_gt(self, fname, input_size):
h5f = hf.File(os.path.join(self.data_path, fname), 'r')
img = h5f['image'][()]
den = h5f['density'][()]
cls = h5f['prob'][()]
vor = h5f['voronoi'][()]
clu = h5f['cluster'][()]
if len(img.shape) == 3:
assert (list(img.shape) == input_size) & (img.shape == den.shape) & (img.shape == cls.shape),\
'the shape of image or density map does not match the input size'
img = img[np.newaxis, :]
elif len(img.shape) == 4:
assert (list((img.shape[0], img.shape[2], img.shape[3])) == input_size) & ((img.shape[0], img.shape[2], img.shape[3]) == den.shape) \
& ((img.shape[0], img.shape[2], img.shape[3]) == cls.shape), \
'the shape of image or density map does not match the input size'
img = np.transpose(img, (1, 0, 2, 3))
den = den[np.newaxis, :]
cls = cls[np.newaxis, :] # ch, s, h, w
# encode voronoi and cluster labels
new_label = np.ones((vor.shape[0], vor.shape[1], vor.shape[2]), dtype=np.uint8) * 2 # ignored
new_label[vor[:, :, :, 0] == 255] = 0 # background
new_label[vor[:, :, :, 1] == 255] = 1 # nuclei
vor = new_label[:, :, :] # s, h, w
new_label = np.ones((clu.shape[0], clu.shape[1], clu.shape[2]), dtype=np.uint8) * 2 # ignored
new_label[clu[:, :, :, 0] == 255] = 0 # background
new_label[clu[:, :, :, 1] == 255] = 1 # nuclei
clu = new_label[:, :, :] # s, h, w
return img, den, cls, vor, clu, fname
def get_num_samples(self):
return self.num_samples
def img_transform(self, img):
img = img.astype(np.float32)
if img.shape[0] == 1:
avg = config.DATA.MEAN
var = config.DATA.VAR
img = (img-avg[0])/var[0]
elif img.shape[0] == 2:
avg = config.DATA.MEAN
var = config.DATA.VAR
img[0,:] = (img[0,:] - avg[0]) / var[0]
img[1, :] = (img[1, :] - avg[1]) / var[1]
return img
def gt_transform(self, den, cls, vor=None, clu=None):
den = den.astype(np.float32)
den = 1. / (1. + self.peak_alpha * den)
den = den/np.max(den)
den = np.concatenate((1. - den, den), axis=0)
cls = cls.astype(np.float32)
cls = np.concatenate((1. - cls, cls), axis=0)
if vor is not None:
vor = vor.astype(np.long)
clu = clu.astype(np.long)
return den, cls, vor, clu
return den, cls
| StarcoderdataPython |
3262354 | <filename>deploy/python/downloader/SAP_Scenarios.py
#!/usr/bin/env python3
#
# SMP Downloader
#
# License: GNU General Public License (GPL)
# (c) 2019 Microsoft Corp.
#
class Package(object):
selector_newest = 'max(range(len(results)), key=lambda index: results[index]["ReleaseDate"])'
selector_oldest = 'min(range(len(results)), key=lambda index: results[index]["ReleaseDate"])'
dir_db = "DB"
dir_app = "APP"
dir_rti = "RTI"
os_linux = "LINUX_X64"
os_windows = "NT_X64"
os_indep = "OSINDEP"
def __init__(self, name=None, target_dir=None, retr_params=None, condition=None, filter=None, os_avail=None, os_var=None, selector=None):
self.name = name if name else ""
self.target_dir = target_dir if target_dir else ""
self.retr_params = retr_params if retr_params else ""
self.condition = condition if condition else []
self.filter = filter if filter else []
self.os_avail = os_avail if os_avail else []
self.os_var = os_var if os_var else ""
self.selector = selector if selector else ""
class Scenario(object):
def __init__(self, name=None, required_params=None, packages=None):
self.name = name
self.required_params = required_params if required_params else []
self.packages = packages if packages else []
class DBScenario(Scenario):
def __init__(self, **kwargs):
super(DBComponent, self).__init__(**kwargs)
S4 = Scenario(
required_params = [],
packages = [
],
)
RTI = Scenario(
required_params = [],
packages = [
Package(
name = "SAPCAR",
target_dir = Package.dir_rti,
retr_params = {"ENR": "67838200100200019185"},
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'rti.os_type',
selector = Package.selector_newest,
),
],
)
HDB = Scenario(
required_params = [
"product_version",
],
packages = [
# _ _ _ _ __ ___
# | | | | /\ | \ | | /\ /_ | / _ \
# | |__| | / \ | \| | / \ | || | | |
# | __ | / /\ \ | . ` | / /\ \ | || | | |
# | | | |/ ____ \| |\ |/ ____ \ | || |_| |
# |_| |_/_/ \_\_| \_/_/ \_\ |_(_)___/
#
#############################################################
# HANA Platform 1.0
Package(
name = "IMDB_PLATFORM100",
target_dir = Package.dir_db,
retr_params = {"ENR": "01200314690900003484", "SWTYPSC": "N", "PECCLSC": "NONE", "V": "INST", "TA": "ACTUAL"},
condition = ['db.product_version == "1.0"', '"PLATFORM" in db.components'],
filter = ['"1.0" in r["Description"]', '"SAP HANA Platf" in r["Description"]','"ZIP" in r["Infotype"]'],
os_avail = [Package.os_indep],
os_var = 'Package.os_indep',
selector = Package.selector_newest,
),
# HANA Database 1.0 (Linux only)
Package(
name = "IMDB_SERVER100",
target_dir = Package.dir_db,
retr_params = {"ENR": "01200615320200017790"},
condition = ['db.product_version == "1.0"', '"DATABASE" in db.components'],
filter = ['"Maintenance Revision" in r["Description"]'],
os_avail = [Package.os_linux],
os_var = 'Package.os_linux',
selector = Package.selector_newest,
),
#############################################################
# HANA Client for HANA 1.0 (Windows/Linux)
Package(
name = "IMDB_CLIENT100",
target_dir = Package.dir_app,
retr_params = {"ENR": "01200615320200017866"},
condition = ['db.product_version == "1.0"', '"CLIENT" in db.components'],
filter = None,
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'app.os_type',
selector = Package.selector_newest,
),
#############################################################
# HANA Studio for HANA 1.0 (Windows/Linux)
Package(
name = "IMC_STUDIO1",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100200000585"},
condition = ['db.product_version == "1.0"', '"STUDIO" in db.components'],
filter = ['"Revision 1" in r["Description"]'],
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'Config.bastion_os',
selector = Package.selector_newest,
),
# _ _ _ _ ___ ___
# | | | | /\ | \ | | /\ |__ \ / _ \
# | |__| | / \ | \| | / \ ) || | | |
# | __ | / /\ \ | . ` | / /\ \ / / | | | |
# | | | |/ ____ \| |\ |/ ____ \ / /_ | |_| |
# |_| |_/_/ \_\_| \_/_/ \_\ |____(_)___/
#
#############################################################
# HANA Platform 2.0
Package(
name = "IMDB_PLATFORM200",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100900001301", "SWTYPSC": "N", "PECCLSC": "NONE", "V": "INST", "TA": "ACTUAL"},
condition = ['db.product_version == "2.0"', '"PLATFORM" in db.components'],
filter = ['"2.0" in r["Description"]', '"86_64" in r["Description"]','"ZIP" in r["Infotype"]'],
os_avail = [Package.os_indep],
os_var = 'Package.os_indep',
selector = Package.selector_newest,
),
# HANA Database 2.0 (Linux only)
Package(
name = "IMDB_SERVER200",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100200005327"},
condition = ['db.product_version == "2.0"', '"DATABASE" in db.components'],
filter = ['"Revision" in r["Description"]'],
os_avail = [Package.os_linux],
os_var = 'Package.os_linux',
selector = Package.selector_newest,
),
#############################################################
# HANA Client for HANA 2.0 (Windows/Linux)
Package(
name = "IMDB_CLIENT20-NT_X64",
target_dir = Package.dir_app,
retr_params = {"ENR": "73554900100200005390"},
condition = ['db.product_version == "2.0"', '"CLIENT" in db.components'],
filter = None,
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'app.os_type',
selector = Package.selector_newest,
),
#############################################################
# HANA Studio for HANA 2.0 (Windows/Linux)
Package(
name = "IMC_STUDIO2-NT_X64",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100200000585"},
condition = ['db.product_version == "2.0"', '"STUDIO" in db.components'],
filter = ['"Revision 2" in r["Description"]'],
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'Config.bastion_os',
selector = Package.selector_newest,
),
# __ __ _____
# \ \ / // ____| /\
# \ V /| (___ / \
# > < \___ \ / /\ \
# / . \ ____) / ____ \
# /_/ \_\_____/_/ \_\
#
#############################################################
# XS Advanced Runtime (SAP Extended App Services)
Package(
name = "EXTAPPSER00P",
target_dir = Package.dir_db,
retr_params = {"ENR": "73555000100200004274"},
condition = ['"XSA" in db.components'],
filter = None,
os_avail = [Package.os_linux],
os_var = 'Package.os_linux',
selector = Package.selector_newest,
),
# DI Core
Package(
name = "XSACDEVXDI",
target_dir = Package.dir_db,
retr_params = {
"ENR" : "73554900100200003056",
"PECCLSC" : "PLTFRM",
"INCL_PECCLSC2" : "DB",
"PECGRSC2" : "HDB"
},
condition = ['"XSA" in db.components'],
filter = None,
os_avail = [Package.os_indep],
os_var = 'Package.os_indep',
selector = Package.selector_newest,
),
# SAPUI5 FESV4
Package(
name = "XSACUI5FESV4",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100200006811"},
condition = ['"XSA" in db.components'],
filter = None,
os_avail = [Package.os_indep],
os_var = 'Package.os_indep',
selector = Package.selector_newest,
),
]
)
avail_apps = {"S4": S4}
avail_dbs = {"HANA": HDB}
avail_rtis = {"RTI": RTI}
| StarcoderdataPython |
3306692 | from . import _ST
from . import pyFunctions as __pf
from . import __plotPatterns__ as __pp
_ST.SpikingTempotron.getVoltageTrace = __pf.__SpikingTempotron_getVoltageTrace
_ST.SpikingTempotron.getVoltageTraceFromInputLayer = __pf.__SpikingTempotron_getVoltageTrace_1
_ST.Tempotron.w = __pf.__tempotron__w
_ST.SpikeTrain.plot = __pp._st_plot
from ._ST import *
from .pyFunctions import times2SpikeTrain
| StarcoderdataPython |
1781448 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stages', '0014_added_supervisionbill_model'),
]
operations = [
migrations.AddField(
model_name='student',
name='supervision_attest_received',
field=models.BooleanField(default=False, verbose_name='Attest. supervision reçue'),
),
]
| StarcoderdataPython |
3277241 | """
Pre-train classifiers
Author: <NAME>
Date: 04/19/2016
"""
import os
import re
import Porter_stemming as ps
import math
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
# Helper function
def tokenize_text(input_str):
temp_list = re.findall(r"\b[a-z'-]+\b", input_str)
# temp_list = re.split("\s+|\.|,|\"", removeSGML_str)
tokenizeText_list = list(temp_list)
for index, item in enumerate(temp_list):
if "we're" in item:
tokenizeText_list[index] = ""
tokenizeText_list.append("we")
tokenizeText_list.append("are")
elif "i'm" in item:
tokenizeText_list[index] = ""
tokenizeText_list.append("I")
tokenizeText_list.append("am")
elif "isn't" in item:
tokenizeText_list[index] = ""
tokenizeText_list.append("is")
tokenizeText_list.append("not")
elif "doesn't" in item:
tokenizeText_list[index] = ""
tokenizeText_list.append("does")
tokenizeText_list.append("not")
elif "don't" in item:
tokenizeText_list[index] = ""
tokenizeText_list.append("do")
tokenizeText_list.append("not")
elif "can't" in item:
tokenizeText_list[index] = ""
tokenizeText_list.append("can")
tokenizeText_list.append("not")
elif "we've" in item:
tokenizeText_list[index] = ""
tokenizeText_list.append("we")
tokenizeText_list.append("have")
elif "it's" in item:
tokenizeText_list[index] = ""
tokenizeText_list.append("it")
tokenizeText_list.append("is")
for i in range(tokenizeText_list.count("")):
tokenizeText_list.remove("")
return tokenizeText_list
def remove_stopwords(tokenizeText_list, stopwords_dict):
removeStopwords_list = list(tokenizeText_list)
for word in tokenizeText_list:
if word in stopwords_dict:
removeStopwords_list.remove(word)
return removeStopwords_list
def stem_words(removeStopwords_list):
stemWords_list = list(removeStopwords_list)
p = ps.PorterStemmer()
for index, word in enumerate(removeStopwords_list):
temp1 = p.stem(word, 0, len(word) - 1)
temp2 = p.stem(temp1, 0, len(temp1) - 1)
while temp1 != temp2:
temp1 = temp2
temp2 = p.stem(temp1, 0, len(temp1) - 1)
stemWords_list[index] = temp2
return stemWords_list
def train_naive_bayes(X_train, t_train):
n_train = len(t_train)
free_words = []
not_words = []
num_free = 0 # number of free documents(events)
for i in range(n_train):
if t_train[i] == 0:
not_words += X_train[i]
else:
num_free += 1
free_words += X_train[i]
voc_num = len(set(free_words + not_words))
free_num = len(free_words) # number of free words
not_num = len(not_words)
num = [free_num, not_num, voc_num]
p_free = 1.0 * num_free / n_train
p_not = 1 - p_free
prior = [p_not, p_free]
not_dict = {}
free_dict = {}
for word in not_words:
if word not in not_dict:
not_dict[word] = 1
else:
not_dict[word] += 1
for word in not_dict:
not_dict[word] = 1.0 * (not_dict[word] + 1.0) / (not_num + voc_num)
for word in free_words:
if word not in free_dict:
free_dict[word] = 1
else:
free_dict[word] += 1
for word in free_dict:
free_dict[word] = 1.0 * (free_dict[word] + 1.0) / (free_num + voc_num)
likelihood = [not_dict, free_dict]
return prior, likelihood, num
def train():
# Load stopwords list
stopwords_filename = 'stopwords.txt'
INFILE = open(stopwords_filename)
stopwords = INFILE.read()
INFILE.close()
stopwords = re.split('\s+', stopwords)
stopwords_dict = {}
for index, word in enumerate(stopwords):
stopwords_dict[word] = index
# Load training data
file_list = os.listdir('trainingdata/')
if '.DS_Store' in file_list:
file_list.remove('.DS_Store')
voc = [] # vocabulary
X = [] # documents
t = [] # true label
for i in range(len(file_list)):
file_name = 'trainingdata/' + file_list[i]
INFILE = open(file_name)
input_str = INFILE.read().lower()
INFILE.close()
text = tokenize_text(input_str)
# You can change the pre-processing methods here if you want
text = remove_stopwords(text, stopwords_dict)
text = stem_words(text)
voc += text
X.append(text)
if 'not' in file_name:
t.append(0)
else:
t.append(1)
# Calculate TF-IDF to construct vectors
voc = set(voc)
word_dict = {}
doc_num = len(t)
idx = 0
for word in voc:
word_dict[word] = [idx, 0] # idx in vector, df
idx += 1
# Document Frequency
for x in X:
x = set(x)
for word in x:
word_dict[word][1] += 1
# Term Frequency
X_vectors = []
for x in X:
vector = [0] * len(voc)
for word in x:
vector[word_dict[word][0]] += 1
for word in x:
vector[word_dict[word][0]] *= math.log10(1.0 * doc_num / word_dict[word][1])
X_vectors.append(vector)
prior, likelihood, num = train_naive_bayes(X, t)
nb_clf = [prior, likelihood, num]
joblib.dump(nb_clf, 'nb.pkl')
clf = joblib.load('nb.pkl')
svm_clf = SVC()
svm_clf.fit(X_vectors, t)
joblib.dump(svm_clf, 'svm.pkl')
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_vectors, t)
joblib.dump(knn_clf, 'knn.pkl')
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_vectors, t)
joblib.dump(dt_clf, 'dt.pkl')
joblib.dump(word_dict, 'word_dict.pkl')
train() | StarcoderdataPython |
1768288 | <gh_stars>100-1000
'''
Helper functions for writing out basis set references in various formats
'''
from .. import api
_lib_refs = ["pritchard2019a", "feller1996a", "schuchardt2007a"]
_lib_refs_desc = 'If you downloaded data from the basis set\nexchange or used the basis set exchange python library, please cite:\n'
def get_library_citation():
'''Return a descriptive string and reference data for what users of the library should cite'''
all_ref_data = api.get_reference_data()
lib_refs_data = {k: all_ref_data[k] for k in _lib_refs}
return (_lib_refs_desc, lib_refs_data)
| StarcoderdataPython |
158262 | from flask import request, jsonify
from . import api
from app.models import Comment, MessageBoard, PostView, History, Post
import datetime
from sqlalchemy import func
from collections import OrderedDict
def get_certain_day_sum_visit_count(visit_date):
certain_day_visit_res = PostView.query.filter_by(
visit_date=visit_date.strftime("%Y-%m-%d")).with_entities(func.sum(PostView.views).label("certain_day_visit_count")).all()
return certain_day_visit_res[0].certain_day_visit_count if certain_day_visit_res[0].certain_day_visit_count is not None else 0
def get_comment_count():
comment_count = Comment.query.count()
return comment_count
def get_message_board_count():
message_board_count = MessageBoard.query.count()
return message_board_count
def get_today_visit_count():
return get_certain_day_sum_visit_count(datetime.datetime.today())
def get_sum_visit_count():
sum_visit_res = PostView.query.with_entities(
func.sum(PostView.views).label("sum_visit_count")).all()
return sum_visit_res[0].sum_visit_count if sum_visit_res[0].sum_visit_count is not None else 0
def get_init_today_visit_data_dict():
key_prefix = datetime.datetime.today().strftime("%Y-%m-%d")
today_visit_data_dict = OrderedDict()
for i in range(24):
if i < 10:
key_suffix = "0{}".format(i)
else:
key_suffix = str(i)
key = "{} {}:00".format(key_prefix, key_suffix)
today_visit_data_dict[key] = 0
return today_visit_data_dict
def get_today_visit_chart():
visit_time_like = "{}%".format(
datetime.datetime.today().strftime("%Y-%m-%d"))
rows = History.query.filter(History.visit_time.like(visit_time_like)).with_entities(func.count(History.id).label(
"count"), func.date_format(History.visit_time, "%Y-%m-%d %H").label("today_time")).group_by(func.date_format(History.visit_time, "%Y-%m-%d %H"))
today_visit_data_dict = get_init_today_visit_data_dict()
for row in rows:
key = "{}:00".format(row.today_time)
today_visit_data_dict[key] = row.count
today_visit_chart_data_dict = {}
today_visit_chart_data_dict["xAxis"] = list(today_visit_data_dict.keys())
today_visit_chart_data_dict["series"] = list(
today_visit_data_dict.values())
return today_visit_chart_data_dict
def get_top_ten_posts():
rows = Post.query.join(PostView, Post.id == PostView.post_id).with_entities(func.sum(PostView.views).label(
"sum_views"), Post.id, Post.title).group_by(PostView.post_id).order_by(func.sum(PostView.views).desc()).limit(10)
top_ten_post_list = []
for row in rows:
top_ten_post_list.append([row.id, row.title])
return top_ten_post_list
def get_sum_seven_day_visit_chart():
seven_day_visit_xAxis_list = []
sever_day_visit_series_list = []
today = datetime.datetime.now()
for i in range(7):
sub_day = i - 6
certain_day = (datetime.datetime.now() +
datetime.timedelta(days=sub_day))
seven_day_visit_xAxis_list.append(certain_day.strftime("%Y-%m-%d"))
sever_day_visit_series_list.append(
get_certain_day_sum_visit_count(certain_day))
sum_seven_day_visit_chart_data_dict = {}
sum_seven_day_visit_chart_data_dict['xAxis'] = seven_day_visit_xAxis_list
sum_seven_day_visit_chart_data_dict['series'] = sever_day_visit_series_list
return sum_seven_day_visit_chart_data_dict
def get_sum_device_visit_chart():
rows = History.query.with_entities(func.count(History.id).label(
"device_count"), History.browser).group_by(History.browser)
sum_device_visit_data_list = []
for row in rows:
temp_dict = {}
temp_dict['name'] = row.browser
temp_dict['value'] = row.device_count
sum_device_visit_data_list.append(temp_dict)
return sum_device_visit_data_list
@api.route("/dashboard", methods=["GET"])
def dashboard():
# https://github.com/pallets/flask/issues/835
return jsonify({"comment_count": get_comment_count(), "message_board_count": get_message_board_count(), "today_visit_count": get_today_visit_count(), "sum_visit_count": get_sum_visit_count(), "today_visit_chart": get_today_visit_chart(), "top_ten_posts": get_top_ten_posts(), "sum_seven_day_visit_chart": get_sum_seven_day_visit_chart(), "sum_device_visit": get_sum_device_visit_chart()})
| StarcoderdataPython |
1738457 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import joblib
import tqdm
import glob
import imageio
import copy
import numpy as np
from typing import ClassVar, Dict, List
from collections import defaultdict
import torch
import habitat
from habitat import Config, logger
from habitat.utils.visualizations import maps
from pointnav_vo.utils.tensorboard_utils import TensorboardWriter
from pointnav_vo.vis.utils import resize_top_down_map
from pointnav_vo.vo.common.common_vars import *
EPSILON = 1e-8
class BaseTrainer:
r"""Generic trainer class that serves as a base template for more
specific trainer classes like RL trainer, SLAM or imitation learner.
Includes only the most basic functionality.
"""
supported_tasks: ClassVar[List[str]]
def train(self) -> None:
raise NotImplementedError
def eval(self) -> None:
raise NotImplementedError
def save_checkpoint(self, file_name) -> None:
raise NotImplementedError
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
raise NotImplementedError
class BaseRLTrainer(BaseTrainer):
r"""Base trainer class for RL trainers. Future RL-specific
methods should be hosted here.
"""
device: torch.device
config: Config
video_option: List[str]
_flush_secs: int
def __init__(self, config: Config):
super().__init__()
assert config is not None, "needs config file to initialize trainer"
self.config = config
self._flush_secs = 30
# Define Corruptions
self.corruptions_sequence = (
config.TASK_CONFIG.SIMULATOR.CORRUPTIONS.CORRUPTIONS_SEQUENCE
)
self.severity_sequence = (
config.TASK_CONFIG.SIMULATOR.CORRUPTIONS.SEVERITY_SEQUENCE
)
self.corruptions_sequence_depth= (
config.TASK_CONFIG.SIMULATOR.CORRUPTIONS.CORRUPTIONS_SEQUENCE_DEPTH
)
self.severity_sequence_depth = (
config.TASK_CONFIG.SIMULATOR.CORRUPTIONS.SEVERITY_SEQUENCE_DEPTH
)
@property
def flush_secs(self):
return self._flush_secs
@flush_secs.setter
def flush_secs(self, value: int):
self._flush_secs = value
def train(self) -> None:
raise NotImplementedError
def eval(self) -> None:
r"""Main method of trainer evaluation. Calls _eval_checkpoint() that
is specified in Trainer class that inherits from BaseRLTrainer
Returns:
None
"""
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if "tensorboard" in self.config.VIDEO_OPTION:
assert (
len(self.config.TENSORBOARD_DIR) > 0
), "Must specify a tensorboard directory for video display"
if "disk" in self.config.VIDEO_OPTION:
assert (
len(self.config.VIDEO_DIR) > 0
), "Must specify a directory for storing videos on disk"
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if os.path.isfile(self.config.EVAL.EVAL_CKPT_PATH):
# evaluate singe checkpoint
eval_f_list = [self.config.EVAL.EVAL_CKPT_PATH]
else:
# evaluate multiple checkpoints in order
eval_f_list = list(
glob.glob(
os.path.join(self.config.EVAL.EVAL_CKPT_PATH, "*.pth")
)
)
eval_f_list = sorted(
eval_f_list, key=lambda x: os.stat(x).st_mtime
)
for ckpt_id, current_ckpt in tqdm.tqdm(enumerate(eval_f_list)):
logger.info(f"======= current_ckpt: {current_ckpt} =======\n")
(
current_episode_result,
current_overall_result,
) = self._eval_checkpoint(
checkpoint_path=current_ckpt,
writer=writer,
checkpoint_index=ckpt_id,
)
try:
# assume the file name is ckpt_XX.update_XX.frames_XX.pth
current_ckpt_filename = os.path.basename(current_ckpt)
current_frame = int(
current_ckpt_filename.split("frames_")[1].split(".")[0]
)
current_overall_result["frames"] = [current_frame]
except:
current_overall_result["frames"] = [None]
self._save_info_dict(
current_episode_result,
os.path.join(
self.config.INFO_DIR,
"{}.infos.p".format(
current_ckpt_filename.split(".pth")[0]
),
),
)
self._save_info_dict(
current_overall_result,
os.path.join(self.config.INFO_DIR, "eval_infos.p"),
)
if (
self.config.EVAL.SAVE_RANKED_IMGS
and self.config.VO.use_vo_model
):
logger.info("Start post processing ...\n")
self._eval_ckpt_post_process(current_episode_result)
logger.info("... post processing done.\n")
def _eval_ckpt_post_process(self, ckpt_eval_result):
cur_config = ckpt_eval_result["config"]
top_k = cur_config.EVAL.RANK_TOP_K
for k in tqdm.tqdm(ckpt_eval_result):
if k != "config":
delta_type_dict = {
"dx": defaultdict(lambda: defaultdict(list)),
"dz": defaultdict(lambda: defaultdict(list)),
"dyaw": defaultdict(lambda: defaultdict(list)),
}
# sort all steps in this scene
for episode_info in tqdm.tqdm(ckpt_eval_result[k].values()):
cur_map_info = episode_info["map"]
for tmp in episode_info["traj"]:
step_info = copy.deepcopy(tmp)
step_info["map"] = cur_map_info
act = ACT_IDX2NAME[step_info["action"]]
for i, d_type in enumerate(["dx", "dz", "dyaw"]):
step_info[f"{d_type}_abs"] = np.abs(
step_info["gt_delta"][i]
- step_info["pred_delta"][i]
)
step_info[f"{d_type}_rel"] = np.abs(
step_info["gt_delta"][i]
- step_info["pred_delta"][i]
) / (np.abs(step_info["gt_delta"][i]) + EPSILON)
delta_type_dict[d_type][act][f"abs"].append(
step_info
)
delta_type_dict[d_type][act][f"rel"].append(
step_info
)
for d_type in ["dx", "dz", "dyaw"]:
for act in delta_type_dict[d_type]:
ranked_list_abs = delta_type_dict[d_type][act][
f"abs"
]
ranked_list_abs = sorted(
ranked_list_abs,
key=lambda x: x[f"{d_type}_abs"],
reverse=True,
)
delta_type_dict[d_type][act][
"abs"
] = ranked_list_abs[:top_k]
ranked_list_rel = delta_type_dict[d_type][act][
"rel"
]
ranked_list_rel = sorted(
ranked_list_rel,
key=lambda x: x[f"{d_type}_rel"],
reverse=True,
)
delta_type_dict[d_type][act][
"rel"
] = ranked_list_rel[:top_k]
# plot figures
cur_scene = os.path.basename(k).split(".")[0]
cur_scene_dir = os.path.join(self.config.VIDEO_DIR, cur_scene)
os.makedirs(cur_scene_dir)
cur_config.defrost()
cur_config.TASK_CONFIG.DATASET.CONTENT_SCENES = [cur_scene]
cur_config.TASK_CONFIG.TASK.TOP_DOWN_MAP.TYPE = "TopDownMap"
cur_config.freeze()
with habitat.Env(config=cur_config.TASK_CONFIG) as env:
for i, d_type in enumerate(["dx", "dz", "dyaw"]):
for compare_type in ["abs", "rel"]:
cur_d_dir = os.path.join(
cur_scene_dir, f"{d_type}_{compare_type}"
)
os.makedirs(cur_d_dir, exist_ok=False)
for act in delta_type_dict[d_type]:
ranked_list = delta_type_dict[d_type][act][
compare_type
]
assert len(ranked_list) == top_k
for j, step_info in enumerate(ranked_list):
# obtain observation
prev_obs = env._sim.get_observations_at(
position=step_info["prev_agent_state"][
"position"
],
rotation=step_info["prev_agent_state"][
"rotation"
],
keep_agent_at_new_pose=False,
)
cur_obs = env._sim.get_observations_at(
position=step_info["cur_agent_state"][
"position"
],
rotation=step_info["cur_agent_state"][
"rotation"
],
keep_agent_at_new_pose=False,
)
prev_rgb = prev_obs["rgb"].astype(np.uint8)
cur_rgb = cur_obs["rgb"].astype(np.uint8)
prev_depth = (
np.repeat(prev_obs["depth"], 3, axis=2)
* 255.0
).astype(np.uint8)
cur_depth = (
np.repeat(cur_obs["depth"], 3, axis=2)
* 255.0
).astype(np.uint8)
# plot map
prev_top_down_map = self._get_top_down_map(
step_info, "prev", cur_rgb.shape[0]
)
cur_top_down_map = self._get_top_down_map(
step_info, "cur", cur_rgb.shape[0]
)
# set layout of the image
first_row = np.concatenate(
(
prev_top_down_map,
prev_rgb,
prev_depth,
),
axis=1,
)
second_row = np.concatenate(
(cur_top_down_map, cur_rgb, cur_depth),
axis=1,
)
out_img = np.concatenate(
(first_row, second_row), axis=0,
)
tmp_k = f"{d_type}_{compare_type}"
out_f = os.path.join(
cur_d_dir,
f"{act}-rank_{j:02d}-gt_{step_info['gt_delta'][i]:.3f}-"
f"pred_{step_info['pred_delta'][i]:.3f}-"
f"{compare_type}_{step_info[tmp_k]:.3f}-"
f"collision_{step_info['collision']}.png",
)
imageio.imsave(out_f, out_img)
def _get_top_down_map(self, step_info, state_k, target_size):
map_info = step_info["map"]
top_down_map = map_info["blank_top_down_map"]
top_down_map = maps.colorize_topdown_map(top_down_map)
map_agent_x, map_agent_y = maps.to_grid(
step_info[f"{state_k}_agent_state"]["position"][0], # x
step_info[f"{state_k}_agent_state"]["position"][2], # z
map_info["coordinate_min"],
map_info["coordinate_max"],
map_info["map_resolution"],
)
agent_map_coord = (
map_agent_x - (map_info["ind_x_min"] - map_info["grid_delta"]),
map_agent_y - (map_info["ind_y_min"] - map_info["grid_delta"]),
)
if self.config.EVAL.RESIZE_TOPDOWN_MAP:
top_down_map = resize_top_down_map(
top_down_map,
[[agent_map_coord, step_info[f"{state_k}_agent_angle"]]],
target_size,
)
return top_down_map
def _setup_eval_config(self, checkpoint_config: Config) -> Config:
r"""Sets up and returns a merged config for evaluation. Config
object saved from checkpoint is merged into config file specified
at evaluation time with the following overwrite priority:
eval_opts > ckpt_opts > eval_cfg > ckpt_cfg
If the saved config is outdated, only the eval config is returned.
Args:
checkpoint_config: saved config from checkpoint.
Returns:
Config: merged config for eval.
"""
config = self.config.clone()
ckpt_cmd_opts = checkpoint_config.CMD_TRAILING_OPTS
eval_cmd_opts = config.CMD_TRAILING_OPTS
try:
config.merge_from_other_cfg(checkpoint_config)
config.merge_from_other_cfg(self.config)
config.merge_from_list(ckpt_cmd_opts)
config.merge_from_list(eval_cmd_opts)
except KeyError:
logger.info("Saved config is outdated, using solely eval config")
config = self.config.clone()
config.merge_from_list(eval_cmd_opts)
if config.TASK_CONFIG.DATASET.SPLIT == "train":
config.TASK_CONFIG.defrost()
config.TASK_CONFIG.DATASET.SPLIT = "val"
config.TASK_CONFIG.freeze()
config.TASK_CONFIG.defrost()
config.TASK_CONFIG.SIMULATOR.AGENT_0.SENSORS = self.config.SENSORS
config.freeze()
return config
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint. Trainer algorithms should
implement this.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
raise NotImplementedError
def save_checkpoint(self, file_name) -> None:
raise NotImplementedError
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
raise NotImplementedError
@staticmethod
def _pause_envs(
envs_to_pause,
envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
rgb_frames,
):
# pausing self.envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# indexing along the batch dimensions
test_recurrent_hidden_states = test_recurrent_hidden_states[
:, state_index
]
not_done_masks = not_done_masks[state_index]
current_episode_reward = current_episode_reward[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
try:
batch[k] = v[state_index]
except:
print(
f"\nin base_trainer.py _pause_envs(): {k}, {len(v)}, {state_index}, {envs_to_pause}\n"
)
rgb_frames = [rgb_frames[i] for i in state_index]
return (
envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
rgb_frames,
)
def _save_info_dict(self, save_dict: Dict[str, List], f_path: str):
if not os.path.isfile(f_path):
tmp_dict = save_dict
else:
with open(f_path, "rb") as f:
tmp_dict = joblib.load(f)
for k, v in save_dict.items():
if k in tmp_dict:
tmp_dict[k].extend(v)
else:
tmp_dict[k] = v
with open(f_path, "wb") as f:
joblib.dump(tmp_dict, f, compress="lz4")
| StarcoderdataPython |
3313411 | <gh_stars>0
"""A show queue which can will be played sequentially."""
from collections import deque
from typing import Tuple
from mpf.assets.show import Show, RunningShow, ShowConfig
from mpf.core.system_wide_device import SystemWideDevice
class ShowQueue(SystemWideDevice):
"""Represents a show queue."""
config_section = 'show_queues'
collection = 'show_queues'
class_label = 'show_queue'
__slots__ = ["shows_queue", "_current_show"]
def __init__(self, machine, name):
"""Initialise show queue."""
super().__init__(machine, name)
self.shows_queue = deque() # type: Deque[Tuple[ShowConfig, int]]
self._current_show = None # type: RunningShow
def enqueue_show(self, show_config: ShowConfig, start_step: int):
"""Add a show to the end of the queue."""
self.shows_queue.append((show_config, start_step))
if not self._current_show:
self._play_next_show()
def _play_next_show(self):
"""Play the next show."""
if not self.shows_queue:
# no show queued
self._current_show = None
return
show_config, start_step = self.shows_queue.popleft()
self._current_show = self.machine.show_controller.replace_or_advance_show(self._current_show, show_config,
start_step=start_step,
stop_callback=self._play_next_show)
| StarcoderdataPython |
1626052 | <reponame>overflowin-st-hackers/YouGotAppoint
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from .models import Doctor
from .serializers import DoctorSerializer, UserSerializer, CurrentUserSerializer, AppointmentSerializer
from django.shortcuts import render
from django.contrib.auth.models import User
@api_view(['GET'])
def get_doctors(request):
if request.method == 'GET':
docs = Doctor.objects.all()
serializer = DoctorSerializer(docs, many=True)
return Response(serializer.data)
return Response({})
@api_view(['GET'])
def get_doctor(request, pk):
try:
doc = Doctor.objects.get(pk=pk)
except Doctor.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = DoctorSerializer(doc)
return Response(serializer.data)
return Response({})
@api_view(['GET'])
def get_user(request, pk):
try:
doc = User.objects.get(pk=pk)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UserSerializer(doc)
if request.user == doc:
return Response(serializer.data)
return Response({})
@api_view(['GET'])
def get_current_user(request):
user = CurrentUserSerializer(request.user)
return Response(user.data)
def home(request):
return render(request, 'frontend/index.html')
@api_view(['POST'])
def create_appointment(request):
data = {
'time': request.data.get('time'),
'date': request.data.get('date'),
'duration': request.data.get('duration'),
'doctor': Doctor.objects.get(pk=request.data.get('doctor')),
'patient': request.user,
}
serializer = AppointmentSerializer(data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| StarcoderdataPython |
82152 | <filename>verticapy/learn/pipeline.py
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality for conducting
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to do all of the above. The idea is simple: instead of moving
# data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# VerticaPy Modules
from verticapy import vDataFrame
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
from verticapy.learn.vmodel import *
# Standard Python Modules
from typing import Union
# ---#
class Pipeline:
"""
---------------------------------------------------------------------------
Creates a Pipeline object. Sequentially apply a list of transforms and a
final estimator. The intermediate steps must implement a transform method.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are chained,
in the order in which they are chained, with the last object an estimator.
"""
def __init__(self, steps: list):
check_types([("steps", steps, [list])])
self.type = "Pipeline"
self.steps = []
for idx, elem in enumerate(steps):
if len(elem) != 2:
raise ParameterError(
"The steps of the Pipeline must be composed of 2 elements "
"(name, transform). Found {}.".format(len(elem))
)
elif not (isinstance(elem[0], str)):
raise ParameterError(
"The steps 'name' of the Pipeline must be of "
"type str. Found {}.".format(type(elem[0]))
)
else:
try:
if idx < len(steps) - 1:
elem[1].transform
elem[1].fit
except:
if idx < len(steps) - 1:
raise ParameterError(
"The estimators of the Pipeline must have a "
"'transform' and a 'fit' method."
)
else:
raise ParameterError(
"The last estimator of the Pipeline must have a "
"'fit' method."
)
self.steps += [elem]
# ---#
def __getitem__(self, index):
if isinstance(index, slice):
return self.steps[index]
elif isinstance(index, int):
return self.steps[index][1]
else:
return getattr(self, index)
# ---#
def drop(self):
"""
---------------------------------------------------------------------------
Drops the model from the Vertica database.
"""
for step in self.steps:
step[1].drop()
# ---#
def fit(
self,
input_relation: Union[str, vDataFrame],
X: list,
y: str = "",
test_relation: Union[str, vDataFrame] = "",
):
"""
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
X: list
List of the predictors.
y: str, optional
Response column.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
model
"""
if isinstance(X, str):
X = [X]
if isinstance(input_relation, str):
vdf = vDataFrameSQL(relation=input_relation)
else:
vdf = input_relation
if verticapy.options["overwrite_model"]:
self.drop()
else:
does_model_exist(name=self.name, raise_error=True)
X_new = [elem for elem in X]
current_vdf = vdf
for idx, step in enumerate(self.steps):
if (idx == len(self.steps) - 1) and (y):
step[1].fit(current_vdf, X_new, y, test_relation)
else:
step[1].fit(current_vdf, X_new)
if idx < len(self.steps) - 1:
current_vdf = step[1].transform(current_vdf, X_new)
X_new = step[1].get_names(X=X)
self.input_relation = self.steps[0][1].input_relation
self.X = [column for column in self.steps[0][1].X]
try:
self.y = self.steps[-1][1].y
self.test_relation = self.steps[-1][1].test_relation
except:
pass
return self
# ---#
def get_params(self):
"""
---------------------------------------------------------------------------
Returns the models Parameters.
Returns
-------
dict
models parameters
"""
params = {}
for step in self.steps:
params[step[0]] = step[1].get_params()
return params
# ---#
def predict(
self, vdf: Union[str, vDataFrame] = None, X: list = [], name: str = "estimator"
):
"""
---------------------------------------------------------------------------
Applies the model on a vDataFrame.
Parameters
----------
vdf: str/vDataFrame, optional
Input vDataFrame. You can also specify a customized relation,
but you must enclose it with an alias. For example "(SELECT 1) x" is
correct whereas "(SELECT 1)" and "SELECT 1" are incorrect.
X: list, optional
List of the input vcolumns.
name: str, optional
Name of the added vcolumn.
Returns
-------
vDataFrame
object result of the model transformation.
"""
if isinstance(X, str):
X = [X]
try:
self.steps[-1][1].predict
except:
raise ModelError(
"The last estimator of the Pipeline has no 'predict' method."
)
if not (vdf):
vdf = self.input_relation
if isinstance(vdf, str):
vdf = vDataFrameSQL(relation=vdf)
X_new, X_all = [elem for elem in X], []
current_vdf = vdf
for idx, step in enumerate(self.steps):
if idx == len(self.steps) - 1:
try:
current_vdf = step[1].predict(
current_vdf, X_new, name=name, inplace=False
)
except:
current_vdf = step[1].predict(current_vdf, X_new, name=name)
else:
current_vdf = step[1].transform(current_vdf, X_new)
X_new = step[1].get_names(X=X)
X_all += X_new
return current_vdf[vdf.get_columns() + [name]]
# ---#
def report(self):
"""
---------------------------------------------------------------------------
Computes a regression/classification report using multiple metrics to evaluate
the model depending on its type.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(self.steps[-1][1], Regressor):
return self.steps[-1][1].regression_report()
else:
return self.steps[-1][1].classification_report()
# ---#
def score(self, method: str = ""):
"""
---------------------------------------------------------------------------
Computes the model score.
Parameters
----------
method: str, optional
The method to use to compute the score.
Depends on the final estimator type (classification or regression).
Returns
-------
float
score
"""
if not (method):
if isinstance(self.steps[-1][1], Regressor):
method = "r2"
else:
method = "accuracy"
return self.steps[-1][1].score(method)
# ---#
def transform(self, vdf: Union[str, vDataFrame] = None, X: list = []):
"""
---------------------------------------------------------------------------
Applies the model on a vDataFrame.
Parameters
----------
vdf: str/vDataFrame, optional
Input vDataFrame. You can also specify a customized relation,
but you must enclose it with an alias. For example "(SELECT 1) x" is
correct whereas "(SELECT 1)" and "SELECT 1" are incorrect.
X: list, optional
List of the input vcolumns.
Returns
-------
vDataFrame
object result of the model transformation.
"""
if isinstance(X, str):
X = [X]
try:
self.steps[-1][1].transform
except:
raise ModelError(
"The last estimator of the Pipeline has no 'transform' method."
)
if not (vdf):
vdf = self.input_relation
if isinstance(vdf, str):
vdf = vDataFrameSQL(relation=vdf)
X_new, X_all = [elem for elem in X], []
current_vdf = vdf
for idx, step in enumerate(self.steps):
current_vdf = step[1].transform(current_vdf, X_new)
X_new = step[1].get_names(X=X)
X_all += X_new
return current_vdf
# ---#
def inverse_transform(self, vdf: Union[str, vDataFrame] = None, X: list = []):
"""
---------------------------------------------------------------------------
Applies the inverse model transformation on a vDataFrame.
Parameters
----------
vdf: str/vDataFrame, optional
Input vDataFrame. You can also specify a customized relation,
but you must enclose it with an alias. For example "(SELECT 1) x" is
correct whereas "(SELECT 1)" and "SELECT 1" are incorrect.
X: list, optional
List of the input vcolumns.
Returns
-------
vDataFrame
object result of the model inverse transformation.
"""
if isinstance(X, str):
X = [X]
try:
for idx in range(len(self.steps)):
self.steps[idx][1].inverse_transform
except:
raise ModelError(
f"The estimator [{idx}] of the Pipeline has "
"no 'inverse_transform' method."
)
if not (vdf):
vdf = self.input_relation
if isinstance(vdf, str):
vdf = vDataFrameSQL(relation=vdf)
X_new, X_all = [elem for elem in X], []
current_vdf = vdf
for idx in range(1, len(self.steps) + 1):
step = self.steps[-idx]
current_vdf = step[1].inverse_transform(current_vdf, X_new)
X_new = step[1].get_names(inverse=True, X=X)
X_all += X_new
return current_vdf
# ---#
def set_params(self, parameters: dict = {}):
"""
---------------------------------------------------------------------------
Sets the parameters of the model.
Parameters
----------
parameters: dict, optional
New parameters. It must be a dictionary with as keys the Pipeline
names and as value the parameters dictionary.
"""
for param in parameters:
for step in self.steps:
if param.lower() == step[0].lower():
step[1].set_params(parameters[param])
# ---#
def to_python(
self,
name: str = "predict",
return_proba: bool = False,
return_distance_clusters: bool = False,
return_str: bool = False,
):
"""
---------------------------------------------------------------------------
Returns the Python code needed to deploy the pipeline without using
built-in Vertica functions.
Parameters
----------
name: str, optional
Function Name.
return_proba: bool, optional
If set to True and the model is a classifier, the function will return
the model probabilities.
return_distance_clusters: bool, optional
If set to True and the model type is KMeans or NearestCentroids, the
function will return the model clusters distances.
return_str: bool, optional
If set to True, the function str will be returned.
Returns
-------
str / func
Python function
"""
if not (return_str):
func = self.to_python(
name=name,
return_proba=return_proba,
return_distance_clusters=return_distance_clusters,
return_str=True,
)
_locals = locals()
exec(func, globals(), _locals)
return _locals[name]
str_representation = "def {}(X):\n".format(name)
final_function = "X"
for idx, step in enumerate(self.steps):
str_representation += (
"\t"
+ step[1]
.to_python(
name=step[0],
return_proba=return_proba,
return_distance_clusters=return_distance_clusters,
return_str=True,
)
.replace("\n", "\n\t")
+ "\n"
)
final_function = step[0] + "({})".format(final_function)
str_representation += "\treturn {}".format(final_function)
return str_representation
| StarcoderdataPython |
3365877 | '''OpenGL extension OES.fbo_render_mipmap
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.fbo_render_mipmap to provide a more
Python-friendly API
Overview (from the spec)
OES_framebuffer_object allows rendering to the base level of a
texture only. This extension removes this limitation by
allowing implementations to support rendering to any mip-level
of a texture(s) that is attached to a framebuffer object(s).
If this extension is supported, FramebufferTexture2DOES, and
FramebufferTexture3DOES can be used to render directly into
any mip level of a texture image
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/fbo_render_mipmap.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.fbo_render_mipmap import *
from OpenGL.raw.GLES1.OES.fbo_render_mipmap import _EXTENSION_NAME
def glInitFboRenderMipmapOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | StarcoderdataPython |
3328243 | import json
class ActorInfo:
def __init__(self, name, id, html):
self.name = name
self.id = id
self.titles = None
self.html = html
def set_titles(self, titles):
self.titles = {title.id: title for title in titles}
def add_titles(self, titles):
if self.titles is None:
self.titles = {}
for title in titles:
self.add_title(title)
def add_title(self, title):
if self.titles is None:
self.titles = {}
self.titles[title.id] = title
def __str__(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class TitleInfo:
def __init__(self, name, id, year):
self.name = name
self.id = id
self.year = year
self.actors = None
self.html = None
def set_actors(self, actors):
self.actors = {actor.id: actor for actor in actors}
def add_actors(self, actors):
if self.actors is None:
self.actors = {}
for actor in actors:
self.add_actor(actor)
def add_actor(self, actor_info):
if self.actors is None:
self.actors = {}
self.actors[actor_info.id] = actor_info
def __str__(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class ActorTitleActorRelationship:
def __init__(self, from_actor, to_actor, via_title, depth):
self.from_actor = from_actor
self.to_actor = to_actor
self.via_title = via_title
self.depth = depth
#self.degree = degree
self.key = '({}) {}.{}.{}'.format(self.depth, self.from_actor.id, self.via_title.id, self.to_actor.id)
#self.key2 = self.to_actor.id + "." + self.via_title.id + "." + self.from_actor.id
def __str__(self):
return '({}): {} ==> {} ==> {}'.format(self.depth, self.from_actor.name, self.via_title.name, self.to_actor.name)
| StarcoderdataPython |
3397874 | # Generated by Django 2.2.19 on 2021-06-07 20:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("archive", "0013_revise_protected_field"),
]
operations = [
migrations.AlterField(
model_name="digitizedwork",
name="source",
field=models.CharField(
choices=[("HT", "HathiTrust"), ("G", "Gale"), ("O", "Other")],
default="HT",
help_text="Source of the record.",
max_length=2,
),
),
]
| StarcoderdataPython |
1763582 | from datetime import datetime
import tweepy
import logging
import requests
from api import create_api
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def get_random_quote(quotes_api_url, quotes_api_key):
""" Get a random quote from quotes api. """
headers = {"Authorization": "Bearer {}".format(quotes_api_key)}
res = requests.get(quotes_api_url + "/quotes/random", headers=headers)
data = res.json()
return data
def format_random_quote(quote):
""" Format a random quote for display. """
content = '"{}"'.format(quote["quote_text"])
author = quote["author_name"].strip(",")
tags = " ".join(["#" + tag for tag in quote["tags"]])
random_quote = "{}\n\u2014 {}\n{}".format(content, author, tags)
return random_quote
def tweet_quote_of_the_day(api, quotes_api_url, quotes_api_key):
""" Tweets a quote of the day status for the api app account. """
try:
logger.info("Tweeting quote of the day. Time is {}".format(datetime.now()))
quote = get_random_quote(quotes_api_url, quotes_api_key)
status = format_random_quote(quote)
tweet_max_lenght = 280
if len(status) < tweet_max_lenght:
api.update_status(status=status)
else:
logger.info("Tweet exceeds max length. Getting new quote.")
tweet_quote_of_the_day(api, quotes_api_url, quotes_api_key)
except Exception as e:
logger.error("Error on quote of the day tweet", exc_info=True)
raise e
| StarcoderdataPython |
1711970 | <reponame>uva-slpl/embedalign
import numpy as np
import sys
from collections import defaultdict
data = defaultdict(list)
metric = None
for line in sys.stdin:
if line:
parts = line.split(' ')
for part in parts:
k, v = part.split('=')
if k == 'metric':
metric = v
elif k == 'value':
data[metric].append(float(v))
for metric, values in data.items():
print('metric %s' % metric)
print('samples %d' % len(values))
print('mean %f' % np.mean(values))
print('std %f' % np.std(values))
print('min %f' % np.min(values))
print('max %f' % np.max(values))
print('')
| StarcoderdataPython |
3367700 | <gh_stars>0
from flask import Flask
from config import Config
import os
from flask_mail import Mail, Message
app = Flask(__name__,
template_folder = '../template',
static_folder = '../static')
mail = Mail(app)
app.config.from_object(Config)
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = '<EMAIL>'
app.config['MAIL_PASSWORD'] = os.getenv('chef_pass')
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
#calling the secret_key
app.config.from_object(Config)
#instantiate the mail
mail = Mail(app)
from app import main | StarcoderdataPython |
1690122 | <reponame>zipated/src
#!/usr/bin/python
# Copyright 2016 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_format_map.py:
# Code generation for GL format map. The format map matches between
# {format,type} and internal format.
from datetime import date
import sys
sys.path.append('renderer')
import angle_format
template_cpp = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {data_source_name}.
// ES3 format info from {es3_data_source_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// format_map:
// Determining the sized internal format from a (format,type) pair.
// Also check es3 format combinations for validity.
#include "angle_gl.h"
#include "common/debug.h"
namespace gl
{{
GLenum GetSizedFormatInternal(GLenum format, GLenum type)
{{
switch (format)
{{
{format_cases} case GL_NONE:
return GL_NONE;
default:
break;
}}
return GL_NONE;
}}
bool ValidES3Format(GLenum format)
{{
switch (format)
{{
{es3_format_cases} return true;
default:
return false;
}}
}}
bool ValidES3Type(GLenum type)
{{
switch (type)
{{
{es3_type_cases} return true;
default:
return false;
}}
}}
bool ValidES3FormatCombination(GLenum format, GLenum type, GLenum internalFormat)
{{
ASSERT(ValidES3Format(format) && ValidES3Type(type));
switch (format)
{{
{es3_combo_cases} default:
UNREACHABLE();
break;
}}
return false;
}}
}} // namespace gl
"""
template_format_case = """ case {format}:
switch (type)
{{
{type_cases} default:
break;
}}
break;
"""
template_simple_case = """ case {key}:
return {result};
"""
template_es3_combo_type_case = """ case {type}:
{{
switch (internalFormat)
{{
{internal_format_cases} return true;
default:
break;
}}
break;
}}
"""
def parse_type_case(type, result):
return template_simple_case.format(
key = type, result = result)
def parse_format_case(format, type_map):
type_cases = ""
for type, internal_format in sorted(type_map.iteritems()):
type_cases += parse_type_case(type, internal_format)
return template_format_case.format(
format = format, type_cases = type_cases)
input_script = 'format_map_data.json'
format_map = angle_format.load_json(input_script)
format_cases = ""
for format, type_map in sorted(format_map.iteritems()):
format_cases += parse_format_case(format, type_map)
combo_data_file = 'es3_format_type_combinations.json'
es3_combo_data = angle_format.load_json(combo_data_file)
combo_data = [combo for sublist in es3_combo_data.values() for combo in sublist]
types = set()
formats = set()
combos = {}
for internal_format, format, type in combo_data:
types.update([type])
formats.update([format])
if format not in combos:
combos[format] = {}
if type not in combos[format]:
combos[format][type] = [internal_format]
else:
combos[format][type] += [internal_format]
es3_format_cases = ""
for format in sorted(formats):
es3_format_cases += " case " + format + ":\n"
es3_type_cases = ""
for type in sorted(types):
es3_type_cases += " case " + type + ":\n"
es3_combo_cases = ""
for format, type_combos in combos.iteritems():
this_type_cases = ""
for type, combos in type_combos.iteritems():
internal_format_cases = ""
for internal_format in combos:
internal_format_cases += " case " + internal_format + ":\n"
this_type_cases += template_es3_combo_type_case.format(
type = type, internal_format_cases = internal_format_cases)
es3_combo_cases += template_format_case.format(
format = format, type_cases = this_type_cases)
with open('format_map_autogen.cpp', 'wt') as out_file:
output_cpp = template_cpp.format(
script_name = sys.argv[0],
data_source_name = input_script,
es3_data_source_name = combo_data_file,
copyright_year = date.today().year,
format_cases = format_cases,
es3_format_cases = es3_format_cases,
es3_type_cases = es3_type_cases,
es3_combo_cases = es3_combo_cases)
out_file.write(output_cpp)
| StarcoderdataPython |
3362254 |
from .linear import Linear
from .rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell
__all__ = [
'Linear',
'LSTM',
'GRU',
'LSTMCell',
'RNNCell',
'GRUCell',
]
| StarcoderdataPython |
120336 | import csv
import requests
from collections import Counter
from pprint import pprint as pp
CSV_URL = 'https://bit.ly/2HiD2i8'
def get_csv():
"""Use requests to download the csv and return the
decoded content"""
resp = requests.get(CSV_URL)
resp.raise_for_status()
return resp.text
def create_user_bar_chart(content):
"""Receives csv file (decoded) content and returns a table of timezones
and their corresponding member counts in pluses (see Bite/tests)"""
c = Counter()
result = ''
for row in content.split()[1:]:
c[row.split(',')[-1]] += 1
for k in sorted(c):
result += '{:20} | {}\n'.format(k, '+' * c[k])
print(result.strip())
if __name__ == "__main__":
create_user_bar_chart(get_csv())
| StarcoderdataPython |
1795306 | <filename>Backup/backup_190324/utils.py<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import os
import wave as we
import numpy as np
import mir_eval
import csv
import re
def melody_eval(ref, est):
ref_time = ref[:,0]
ref_freq = ref[:,1]
est_time = est[:,0]
est_freq = est[:,1]
output_eval = mir_eval.melody.evaluate(ref_time,ref_freq,est_time,est_freq)
VR = output_eval['Voicing Recall']*100.0
VFA = output_eval['Voicing False Alarm']*100.0
RPA = output_eval['Raw Pitch Accuracy']*100.0
RCA = output_eval['Raw Chroma Accuracy']*100.0
OA = output_eval['Overall Accuracy']*100.0
eval_arr = np.array([VR, VFA, RPA, RCA, OA])
return eval_arr
def est(output, CenFreq, time_arr):
CenFreq[0] = 0
est_time = time_arr
output = output[0,0,:,:]
est_freq = np.argmax(output, axis=0)
for j in range(len(est_freq)):
est_freq[j] = CenFreq[int(est_freq[j])]
est_arr = np.concatenate((est_time[:,None],est_freq[:,None]),axis=1)
return est_arr
def seg(data,label,seg_frames_length=3120):
frames = data.shape[-1]
cutnum = int(frames / seg_frames_length)
remain = frames - (cutnum*seg_frames_length)
xlist = []
ylist = []
for i in range(cutnum):
x = data[:,:, i*seg_frames_length:(i+1)*seg_frames_length]
y = label[i*seg_frames_length:(i+1)*seg_frames_length]
xlist.append(x)
ylist.append(y)
if frames % seg_frames_length != 0:
x = data[:,:, cutnum*seg_frames_length:]
y = label[cutnum*seg_frames_length:]
xlist.append(x)
ylist.append(y)
return xlist,ylist,len(xlist)
def iseg(data, seg_frames_length=256):
x = data[0]
for i in range(len(data)-1):
x = np.concatenate((x, data[i+1]), axis=-1)
return x | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.