id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6644003 | """
Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding:utf8 -*-
import sys
import subprocess
import time
import json
import click
import shutil
from paddleflow.cli.output import print_output, OutputFormat
from paddleflow.utils.format_help import command_required_option_from_option
@click.group()
def pipeline():
"""manage pipeline resources"""
pass
@pipeline.command(context_settings=dict(max_content_width=2000), cls=command_required_option_from_option())
@click.argument('fsname')
@click.argument('yamlpath')
@click.option('-n', '--name', help="Custom pipeline name.")
@click.option('-u', '--username', help="Only the root user can specify other users.")
@click.pass_context
def create(ctx, fsname, yamlpath, name=None, username=None):
""" create pipeline.\n
FSNAME: specified name.
YAMLPATH: relative path of yaml file under storage volume.
"""
client = ctx.obj['client']
if not fsname or not yamlpath:
click.echo('pipelinecreate must provide fsname or yamlpath .', err=True)
sys.exit(1)
valid, response, id = client.create_pipeline(fsname, yamlpath, name, username)
if valid:
click.echo("pipeline[%s] create success, id[%s]" % (response, id))
else:
click.echo("pipeline create failed with message[%s]" % response)
sys.exit(1)
@pipeline.command()
@click.option('-u', '--userfilter', help="List the pipeline by user.")
@click.option('-f', '--fsfilter', help="List the pipeline by fs.")
@click.option('-n', '--namefilter', help="List the pipeline by name.")
@click.option('-m', '--maxkeys', help="Max size of the listed pipeline.")
@click.option('-mk', '--marker', help="Next page ")
@click.pass_context
def list(ctx, userfilter=None, fsfilter=None, namefilter=None, maxkeys=None, marker=None):
"""list pipeline. \n"""
client = ctx.obj['client']
output_format = ctx.obj['output']
valid, response, nextmarker = client.list_pipeline(userfilter, fsfilter, namefilter, maxkeys, marker)
if valid:
if len(response):
_print_pipeline(response, output_format)
click.echo('marker: {}'.format(nextmarker))
else:
msg = "no pipeline found "
click.echo(msg)
else:
click.echo("pipeline list failed with message[%s]" % response)
sys.exit(1)
@pipeline.command()
@click.argument('pipelineid')
@click.pass_context
def show(ctx, pipelineid):
""" show pipeline info.\n
PIPELINEID: the id of pipeline.
"""
client = ctx.obj['client']
output_format = ctx.obj['output']
if not pipelineid:
click.echo('pipeline show must pipeline id.', err=True)
sys.exit(1)
valid, response = client.show_pipeline(pipelineid)
if valid:
_print_pipeline_info(response, output_format)
else:
click.echo("pipeline show failed with message[%s]" % response)
sys.exit(1)
@pipeline.command()
@click.argument('pipelineid')
@click.pass_context
def delete(ctx, pipelineid):
""" delete pipeline. \n
PIPELINEID: the id of pipeline.
"""
client = ctx.obj['client']
if not pipelineid:
click.echo('delete must provide pipelineid.', err=True)
sys.exit(1)
valid, response = client.delete_pipeline(pipelineid)
if valid:
click.echo('pipelineid[%s] delete success' % pipelineid)
else:
click.echo("pipeline delete failed with message[%s]" % response)
sys.exit(1)
def _print_pipeline(pipelines, out_format):
"""print pipelines """
headers = [
'pipeline id', 'name', 'fsname', 'username',
'pipeline md5', 'create time', 'update time'
]
data = [[pipeline.pipelineid, pipeline.name, pipeline.fsname, pipeline.username, pipeline.pipelinemd5,
pipeline.createtime, pipeline.updatetime] for pipeline in pipelines]
print_output(data, headers, out_format, table_format='grid')
def _print_pipeline_info(pipeline, out_format):
"""print pipeline info"""
headers = [
'pipeline id', 'name', 'fsname', 'username',
'pipeline md5', 'create time', 'update time'
]
data = [[
pipeline.pipelineid, pipeline.name, pipeline.fsname,
pipeline.username, pipeline.pipelinemd5, pipeline.createtime,
pipeline.updatetime
]]
print_output(data, headers, out_format, table_format='grid')
print_output([[pipeline.pipelineyaml]], ['pipeline yaml'],
out_format,
table_format='grid')
| StarcoderdataPython |
1910037 | import logging
import sys
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
| StarcoderdataPython |
3468045 | <filename>migrations/versions/3361a5e3e546_.py
"""empty message
Revision ID: 3<PASSWORD>
Revises: <PASSWORD>
Create Date: 2020-04-05 20:28:21.986854
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('subjects',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=60), nullable=True),
sa.Column('description', sa.String(length=200), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('tasks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=60), nullable=True),
sa.Column('description', sa.String(length=200), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('students',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=60), nullable=True),
sa.Column('username', sa.String(length=60), nullable=True),
sa.Column('first_name', sa.String(length=60), nullable=True),
sa.Column('last_name', sa.String(length=60), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('subject_id', sa.Integer(), nullable=True),
sa.Column('task_id', sa.Integer(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['subject_id'], ['subjects.id'], ),
sa.ForeignKeyConstraint(['task_id'], ['tasks.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_students_email'), 'students', ['email'], unique=True)
op.create_index(op.f('ix_students_first_name'), 'students', ['first_name'], unique=False)
op.create_index(op.f('ix_students_last_name'), 'students', ['last_name'], unique=False)
op.create_index(op.f('ix_students_username'), 'students', ['username'], unique=True)
op.drop_index('name', table_name='departments')
op.drop_table('departments')
op.drop_index('name', table_name='roles')
op.drop_table('roles')
op.drop_index('ix_student_email', table_name='student')
op.drop_index('ix_student_first_name', table_name='student')
op.drop_index('ix_student_last_name', table_name='student')
op.drop_index('ix_student_username', table_name='student')
op.drop_table('student')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('student',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('email', mysql.VARCHAR(length=60), nullable=True),
sa.Column('username', mysql.VARCHAR(length=60), nullable=True),
sa.Column('first_name', mysql.VARCHAR(length=60), nullable=True),
sa.Column('last_name', mysql.VARCHAR(length=60), nullable=True),
sa.Column('password_hash', mysql.VARCHAR(length=128), nullable=True),
sa.Column('department_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('role_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('is_admin', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['department_id'], ['departments.id'], name='student_ibfk_1'),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], name='student_ibfk_2'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_index('ix_student_username', 'student', ['username'], unique=True)
op.create_index('ix_student_last_name', 'student', ['last_name'], unique=False)
op.create_index('ix_student_first_name', 'student', ['first_name'], unique=False)
op.create_index('ix_student_email', 'student', ['email'], unique=True)
op.create_table('roles',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('name', mysql.VARCHAR(length=60), nullable=True),
sa.Column('description', mysql.VARCHAR(length=200), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_index('name', 'roles', ['name'], unique=True)
op.create_table('departments',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('name', mysql.VARCHAR(length=60), nullable=True),
sa.Column('description', mysql.VARCHAR(length=200), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_index('name', 'departments', ['name'], unique=True)
op.drop_index(op.f('ix_students_username'), table_name='students')
op.drop_index(op.f('ix_students_last_name'), table_name='students')
op.drop_index(op.f('ix_students_first_name'), table_name='students')
op.drop_index(op.f('ix_students_email'), table_name='students')
op.drop_table('students')
op.drop_table('tasks')
op.drop_table('subjects')
# ### end Alembic commands ###
| StarcoderdataPython |
11263920 | <reponame>ahriknow/ahriknow
# Generated by Django 3.0.4 on 2020-04-10 06:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notebook', '0006_auto_20200409_0940'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('date', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'notebook_tag',
},
),
migrations.AlterModelOptions(
name='book',
options={'ordering': ['id']},
),
migrations.AddField(
model_name='book',
name='tags',
field=models.ManyToManyField(related_name='tags', to='notebook.Tag'),
),
]
| StarcoderdataPython |
4808333 | <filename>coursebuilder/third_party/gae_mini_profiler/templatetags.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# use json in Python 2.7, fallback to simplejson for Python 2.5
try:
import json
except ImportError:
import simplejson as json
import profiler
def profiler_includes_request_id(request_id, show_immediately=False):
if not request_id:
return ""
js_path = "/gae_mini_profiler/static/js/profiler.js"
css_path = "/gae_mini_profiler/static/css/profiler.css"
return """
<link rel="stylesheet" type="text/css" href="%s" />
<script type="text/javascript" src="%s"></script>
<script type="text/javascript">GaeMiniProfiler.init("%s", %s)</script>
""" % (css_path, js_path, request_id, json.dumps(show_immediately))
def profiler_includes():
return profiler_includes_request_id(profiler.CurrentRequestId.get())
| StarcoderdataPython |
5158474 | <filename>data/transcoder_evaluation_gfg/python/COUNT_PAIRS_TWO_SORTED_ARRAYS_WHOSE_SUM_EQUAL_GIVEN_VALUE_X_1.py
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr1 , arr2 , m , n , x ) :
count = 0
us = set ( )
for i in range ( m ) :
us.add ( arr1 [ i ] )
for j in range ( n ) :
if x - arr2 [ j ] in us :
count += 1
return count
#TOFILL
if __name__ == '__main__':
param = [
([1, 2, 5, 5, 9, 11, 12, 14, 16, 18, 35, 36, 39, 44, 50, 52, 52, 59, 69, 81, 82, 84, 85, 87, 87, 87, 88, 88, 89, 90, 90, 92, 97],[5, 5, 8, 20, 20, 24, 25, 29, 34, 37, 43, 45, 48, 49, 59, 60, 68, 70, 70, 72, 72, 75, 76, 77, 79, 81, 84, 85, 86, 88, 95, 96, 96],17,29,32,),
([52, 28, -38, 78, -86, 78, -48, -70, -80, 28, -8, 60, -28, 90, 6, 76, 32, -54, 30, 30, -32, -24, -36, 62, 36, -66, 56, 92, -20, 90, 32],[-88, -32, 30, 32, -46, 62, -92, -90, -18, -18, 10, 16, 60, -40, 32, -88, 60, -82, 76, 50, 86, -82, -48, -68, -42, 34, 4, 0, 98, 92, -78],30,27,17,),
([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],13,11,8,),
([91, 95, 13, 13, 76, 18, 36, 86, 26, 13, 17, 68, 58, 42, 38, 9, 42, 90, 14, 74, 38, 64, 15],[16, 96, 8, 35, 12, 27, 81, 21, 32, 82, 95, 81, 53, 76, 72, 16, 9, 16, 61, 1, 36, 71, 28],11,12,15,),
([-96, -94, -94, -92, -74, -70, -66, -54, -48, -20, -18, -10, -6, -2, 2, 18, 36, 48, 52, 58, 68, 74, 88, 90, 94],[-92, -72, -72, -64, -58, -52, -30, -28, -24, -24, -16, -10, -2, 4, 12, 22, 30, 38, 44, 62, 64, 68, 86, 88, 90],19,14,21,),
([1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0],[1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0],18,19,29,),
([7, 18, 19, 20, 24, 25, 25, 27, 30, 35, 39, 42, 58, 59, 63, 64, 64, 66, 66, 68, 69, 77, 86, 93],[2, 2, 18, 20, 22, 22, 31, 35, 36, 40, 41, 41, 41, 42, 42, 43, 45, 61, 79, 83, 87, 91, 95, 96],22,18,18,),
([86, 44, 10, 80, 12, 52, -92, 2, 42, -32, -14, 2, -42, 40, 96, 22, 58, -90, -20, 22, 96, 10, -92, -28, -28, 80, 36, 72, -2, 32, -46, 62, -58, 20, 22, 32, -98, -2, -42, -90, 10, 70, 54, -32],[-4, -76, -98, 14, 30, -10, -10, 62, 88, -94, -74, -82, 84, 44, 58, 8, -42, -66, -18, 68, -78, 42, -32, 38, -98, 38, -78, 42, 86, -38, -6, -72, -44, 8, -6, -48, -62, 82, 94, -92, -56, 28, -54, 34],26,36,31,),
([0, 0, 1, 1, 1, 1],[0, 0, 1, 1, 1, 1],5,3,5,),
([43, 2, 4, 99, 45, 80, 27, 8, 64, 77, 57, 55, 71, 67, 51, 42, 58, 70, 5, 62, 55, 20, 61, 47, 66, 80, 70, 24, 56, 22, 58, 63, 61, 41, 20, 97, 47],[11, 66, 41, 17, 93, 25, 24, 17, 12, 33, 62, 86, 48, 68, 36, 36, 39, 82, 7, 66, 5, 48, 27, 9, 56, 6, 61, 91, 98, 74, 61, 63, 98, 96, 57, 63, 85],24,29,21,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | StarcoderdataPython |
3533880 | import numpy as np
import pandas as pd
from regression import (logreg, utils)
from sklearn.preprocessing import StandardScaler
def main():
# load data with default settings
X_train, X_val, y_train, y_val = utils.loadDataset(features=['Penicillin V Potassium 500 MG', 'Computed tomography of chest and abdomen',
'Plain chest X-ray (procedure)', 'Low Density Lipoprotein Cholesterol',
'Creatinine', 'AGE_DIAGNOSIS'], split_percent=0.8, split_state=42)
# scale data since values vary across features
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_val = sc.transform (X_val)
print(X_train.shape, X_val.shape, y_val.shape, y_train.shape)
"""
# for testing purposes once you've added your code
# CAUTION: hyperparameters have not been optimized
"""
# log_model = logreg.LogisticRegression(num_feats=6, max_iter=10, tol=0.01, learning_rate=0.00001, batch_size=12)
# A = [100, 200, 300, 500, 1000]
# B = [0.0001, 0.001, 0.1]
# C = [10, 20, 30, 40, 45, 50, 55]
# for a in A:
# for b in B:
# for c in C:
# log_model = logreg.LogisticRegression(num_feats=6, max_iter=a, tol=0.01, learning_rate=b, batch_size=c)
# log_model.train_model(X_train, y_train, X_val, y_val)
# print("a: ", a, "b:", b, "c: ", c)
# log_model.plot_loss_history()
log_model = logreg.LogisticRegression(num_feats=6, max_iter=10000, tol=0.000001, learning_rate=0.001, batch_size=55)
log_model.train_model(X_train, y_train, X_val, y_val)
log_model.plot_loss_history()
if __name__ == "__main__":
main()
| StarcoderdataPython |
11270270 | <gh_stars>1-10
import numpy as np
# ========================================================================
def p0_printer(par):
iproc = par.rank
def printer(*args, **kwargs):
if iproc == 0:
print(*args, **kwargs)
return printer
# ========================================================================
def hill(x):
h = 28.0
xstar = x * h
xstar[xstar > 128] = 252 - xstar[xstar > 128]
ystar = np.zeros(x.shape)
idx = (0.0 <= xstar) & (xstar < 9.0)
ystar[idx] = np.minimum(
28 * np.ones(x[idx].shape),
2.800000000000e01
+ 0.000000000000e00 * xstar[idx]
+ 6.775070969851e-03 * xstar[idx] ** 2
- 2.124527775800e-03 * xstar[idx] ** 3,
)
idx = (9.0 <= xstar) & (xstar < 14.0)
ystar[idx] = (
2.507355893131e01
+ 9.754803562315e-01 * xstar[idx]
- 1.016116352781e-01 * xstar[idx] ** 2
+ 1.889794677828e-03 * xstar[idx] ** 3
)
idx = (14.0 <= xstar) & (xstar < 20.0)
ystar[idx] = (
2.579601052357e01
+ 8.206693007457e-01 * xstar[idx]
- 9.055370274339e-02 * xstar[idx] ** 2
+ 1.626510569859e-03 * xstar[idx] ** 3
)
idx = (20.0 <= xstar) & (xstar < 30.0)
ystar[idx] = (
4.046435022819e01
- 1.379581654948e00 * xstar[idx]
+ 1.945884504128e-02 * xstar[idx] ** 2
- 2.070318932190e-04 * xstar[idx] ** 3
)
idx = (30.0 <= xstar) & (xstar < 40.0)
ystar[idx] = (
1.792461334664e01
+ 8.743920332081e-01 * xstar[idx]
- 5.567361123058e-02 * xstar[idx] ** 2
+ 6.277731764683e-04 * xstar[idx] ** 3
)
idx = (40.0 <= xstar) & (xstar < 50.0)
ystar[idx] = np.maximum(
np.zeros(x[idx].shape),
5.639011190988e01
- 2.010520359035e00 * xstar[idx]
+ 1.644919857549e-02 * xstar[idx] ** 2
+ 2.674976141766e-05 * xstar[idx] ** 3,
)
return ystar / h
# ========================================================================
def xplanes():
return [0.05, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
| StarcoderdataPython |
8013462 | <reponame>Nullius-2020/SSL-Competitioin-Top-10-solution
# coding: utf8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import os.path as osp
import sys
import argparse
from PIL import Image
from tqdm import tqdm
import imghdr
import logging
import pickle
import cv2
#import gdal
def parse_args():
parser = argparse.ArgumentParser(
description='Data analyse and data check before training.')
parser.add_argument(
'--data_dir',
dest='data_dir',
help='Dataset directory',
default=None,
type=str)
parser.add_argument(
'--num_classes',
dest='num_classes',
help='Number of classes',
default=None,
type=int)
parser.add_argument(
'--separator',
dest='separator',
help='file list separator',
default="\t",
type=str)
parser.add_argument(
'--ignore_index',
dest='ignore_index',
help='Ignored class index',
default=255,
type=int)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def read_img(img_path):
img_format = imghdr.what(img_path)
name, ext = osp.splitext(img_path)
if ext == '.png' or ext == '.jpg':
#dataset = Image.open(img_path)
#print(img_path,dataset)
dataset=cv2_imread(img_path, cv2.IMREAD_UNCHANGED)
if dataset == None:
raise Exception('Can not open', img_path)
#im_data = dataset.ReadAsArray()
im_data=np.asarray(dataset)
#return im_data
return im_data.transpose((1, 2, 0))
elif ext == '.npy':
return np.load(img_path)
else:
raise Exception('Not support {} image format!'.format(ext))
def img_pixel_statistics(img, img_value_num, img_min_value, img_max_value):
img.transpose((1, 2, 0))
channel = img.shape[2]
means = np.zeros(channel)
stds = np.zeros(channel)
for k in range(channel):
img_k = img[:, :, k]
# count mean, std
means[k] = np.mean(img_k)
stds[k] = np.std(img_k)
# count min, max
min_value = np.min(img_k)
max_value = np.max(img_k)
if img_max_value[k] < max_value:
img_max_value[k] = max_value
if img_min_value[k] > min_value:
img_min_value[k] = min_value
# count the distribution of image value, value number
unique, counts = np.unique(img_k, return_counts=True)
add_num = []
max_unique = np.max(unique)
add_len = max_unique - len(img_value_num[k]) + 1
if add_len > 0:
img_value_num[k] += ([0] * add_len)
for i in range(len(unique)):
value = unique[i]
img_value_num[k][value] += counts[i]
img_value_num[k] += add_num
return means, stds, img_min_value, img_max_value, img_value_num
def data_distribution_statistics(data_dir, img_value_num, logger):
"""count the distribution of image value, value number
"""
logger.info(
"\n-----------------------------\nThe whole dataset statistics...")
if not img_value_num:
return
logger.info("\nImage pixel statistics:")
total_ratio = []
[total_ratio.append([]) for i in range(len(img_value_num))]
for k in range(len(img_value_num)):
total_num = sum(img_value_num[k])
total_ratio[k] = [i / total_num for i in img_value_num[k]]
total_ratio[k] = np.around(total_ratio[k], decimals=4)
with open(os.path.join(data_dir, 'img_pixel_statistics.pkl'), 'wb') as f:
pickle.dump([total_ratio, img_value_num], f)
def data_range_statistics(img_min_value, img_max_value, logger):
"""print min value, max value
"""
logger.info("value range: \nimg_min_value = {} \nimg_max_value = {}".format(
img_min_value, img_max_value))
def cal_normalize_coefficient(total_means, total_stds, total_img_num, logger):
"""count mean, std
"""
total_means = total_means / total_img_num
total_stds = total_stds / total_img_num
logger.info("\nCount the channel-by-channel mean and std of the image:\n"
"mean = {}\nstd = {}".format(total_means, total_stds))
def error_print(str):
return "".join(["\nNOT PASS ", str])
def correct_print(str):
return "".join(["\nPASS ", str])
def cv2_imread(file_path, flag=cv2.IMREAD_COLOR):
"""
解决 cv2.imread 在window平台打开中文路径的问题.
"""
return cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), flag)
def pil_imread(file_path):
"""read pseudo-color label"""
im = Image.open(file_path).convert('L')
return np.asarray(im)
def get_img_shape_range(img, max_width, max_height, min_width, min_height):
"""获取图片最大和最小宽高"""
img_shape = img.shape
height, width = img_shape[0], img_shape[1]
max_height = max(height, max_height)
max_width = max(width, max_width)
min_height = min(height, min_height)
min_width = min(width, min_width)
return max_width, max_height, min_width, min_height
def get_img_channel_num(img, img_channels):
"""获取图像的通道数"""
img_shape = img.shape
if img_shape[-1] not in img_channels:
img_channels.append(img_shape[-1])
return img_channels
def is_label_single_channel(label):
"""判断标签是否为灰度图"""
label_shape = label.shape
if len(label_shape) == 2:
return True
else:
return False
def image_label_shape_check(img, label):
"""
验证图像和标注的大小是否匹配
"""
flag = True
img_height = img.shape[0]
img_width = img.shape[1]
label_height = label.shape[0]
label_width = label.shape[1]
if img_height != label_height or img_width != label_width:
flag = False
return flag
def ground_truth_check(label, label_path):
"""
验证标注图像的格式
统计标注图类别和像素数
params:
label: 标注图
label_path: 标注图路径
return:
png_format: 返回是否是png格式图片
unique: 返回标注类别
counts: 返回标注的像素数
"""
if imghdr.what(label_path) == "png":
png_format = True
else:
png_format = False
unique, counts = np.unique(label, return_counts=True)
return png_format, unique, counts
def sum_label_check(label_classes, num_of_each_class, ignore_index, num_classes,
total_label_classes, total_num_of_each_class):
"""
统计所有标注图上的类别和每个类别的像素数
params:
label_classes: 标注类别
num_of_each_class: 各个类别的像素数目
"""
is_label_correct = True
if ignore_index in label_classes:
label_classes2 = np.delete(label_classes,
np.where(label_classes == ignore_index))
else:
label_classes2 = label_classes
if min(label_classes2) < 0 or max(label_classes2) > num_classes - 1:
is_label_correct = False
add_class = []
add_num = []
for i in range(len(label_classes)):
gi = label_classes[i]
if gi in total_label_classes:
j = total_label_classes.index(gi)
total_num_of_each_class[j] += num_of_each_class[i]
else:
add_class.append(gi)
add_num.append(num_of_each_class[i])
total_num_of_each_class += add_num
total_label_classes += add_class
#print(total_num_of_each_class,total_label_classes)
if len(total_label_classes)>0 and len(total_num_of_each_class)>0:
return is_label_correct, total_num_of_each_class, total_label_classes
else:
return is_label_correct,[0.0001],[0.0001]
def label_class_check(num_classes, total_label_classes, total_num_of_each_class,
wrong_labels, logger):
"""
检查实际标注类别是否和配置参数`num_classes`,`ignore_index`匹配。
**NOTE:**
标注图像类别数值必须在[0~(`num_classes`-1)]范围内或者为`ignore_index`。
标注类别最好从0开始,否则可能影响精度。
"""
total_ratio = total_num_of_each_class / sum(total_num_of_each_class)
total_ratio = np.around(total_ratio, decimals=4)
total_nc = sorted(
zip(total_label_classes, total_ratio, total_num_of_each_class))
if len(wrong_labels) == 0 and not total_nc[0][0]:
logger.info(correct_print("label class check!"))
else:
logger.info(error_print("label class check!"))
if total_nc[0][0]:
logger.info("Warning: label classes should start from 0")
if len(wrong_labels) > 0:
logger.info(
"fatal error: label class is out of range [0, {}]".format(
num_classes - 1))
for i in wrong_labels:
logger.debug(i)
return total_nc
def label_class_statistics(total_nc, logger):
"""
对标注图像进行校验,输出校验结果
"""
logger.info(
"\nLabel class statistics:\n"
"(label class, percentage, total pixel number) = {} ".format(total_nc))
def shape_check(shape_unequal_image, logger):
"""输出shape校验结果"""
if len(shape_unequal_image) == 0:
logger.info(correct_print("shape check"))
logger.info("All images are the same shape as the labels")
else:
logger.info(error_print("shape check"))
logger.info(
"Some images are not the same shape as the labels as follow: ")
for i in shape_unequal_image:
logger.debug(i)
def separator_check(wrong_lines, file_list, separator, logger):
"""检查分割符是否复合要求"""
if len(wrong_lines) == 0:
logger.info(
correct_print(
file_list.split(os.sep)[-1] + " DATASET.separator check"))
else:
logger.info(
error_print(
file_list.split(os.sep)[-1] + " DATASET.separator check"))
logger.info(
"The following list is not separated by {}".format(separator))
for i in wrong_lines:
logger.debug(i)
def imread_check(imread_failed, logger):
if len(imread_failed) == 0:
logger.info(correct_print("dataset reading check"))
logger.info("All images can be read successfully")
else:
logger.info(error_print("dataset reading check"))
logger.info("Failed to read {} images".format(len(imread_failed)))
for i in imread_failed:
logger.debug(i)
def single_channel_label_check(label_not_single_channel, logger):
if len(label_not_single_channel) == 0:
logger.info(correct_print("label single_channel check"))
logger.info("All label images are single_channel")
else:
logger.info(error_print("label single_channel check"))
logger.info(
"{} label images are not single_channel\nLabel pixel statistics may be insignificant"
.format(len(label_not_single_channel)))
for i in label_not_single_channel:
logger.debug(i)
def img_shape_range_statistics(max_width, min_width, max_height, min_height,
logger):
logger.info("\nImage size statistics:")
logger.info(
"max width = {} min width = {} max height = {} min height = {}".
format(max_width, min_width, max_height, min_height))
def img_channels_statistics(img_channels, logger):
logger.info("\nImage channels statistics\nImage channels = {}".format(
np.unique(img_channels)))
def data_analyse_and_check(data_dir, num_classes, separator, ignore_index,
logger):
train_file_list = osp.join(data_dir, 'train.txt')
val_file_list = osp.join(data_dir, 'val.txt')
test_file_list = osp.join(data_dir, 'test.txt')
total_img_num = 0
has_label = False
for file_list in [train_file_list, val_file_list, test_file_list]:
# initialization
imread_failed = []
max_width = 0
max_height = 0
min_width = sys.float_info.max
min_height = sys.float_info.max
label_not_single_channel = []
shape_unequal_image = []
wrong_labels = []
wrong_lines = []
total_label_classes = []
total_num_of_each_class = []
img_channels = []
with open(file_list, 'r') as fid:
logger.info("\n-----------------------------\nCheck {}...".format(
file_list))
lines = fid.readlines()
if not lines:
logger.info("File list is empty!")
continue
for line in tqdm(lines):
line = line.strip()
parts = line.split(separator)
if len(parts) == 1:
has_label = False
if file_list == train_file_list or file_list == val_file_list:
logger.info("Train or val list must have labels!")
break
img_name = parts
img_path =img_name[0]
try:
img = cv2_imread(img_path)
except Exception as e:
imread_failed.append((line, str(e)))
continue
elif len(parts) == 2:
has_label = True
img_name, label_name = parts[0], parts[1]
#print( img_name, label_name)
img_path =img_name
label_path =label_name
try:
img = cv2_imread(img_path)
label = pil_imread(label_path)
except Exception as e:
imread_failed.append((line, str(e)))
continue
is_single_channel = is_label_single_channel(label)
if not is_single_channel:
label_not_single_channel.append(line)
continue
is_equal_img_label_shape = image_label_shape_check(
img, label)
if not is_equal_img_label_shape:
shape_unequal_image.append(line)
png_format, label_classes, num_of_each_class = ground_truth_check(
label, label_path)
is_label_correct, total_num_of_each_class, total_label_classes = sum_label_check(
label_classes, num_of_each_class, ignore_index,
num_classes, total_label_classes,
total_num_of_each_class)
if not is_label_correct:
wrong_labels.append(line)
else:
wrong_lines.append(lines)
continue
if total_img_num == 0:
channel = img.shape[2]
total_means = np.zeros(channel)
total_stds = np.zeros(channel)
img_min_value = [sys.float_info.max] * channel
img_max_value = [0] * channel
img_value_num = []
[img_value_num.append([]) for i in range(channel)]
means, stds, img_min_value, img_max_value, img_value_num = img_pixel_statistics(
img, img_value_num, img_min_value, img_max_value)
total_means += means
total_stds += stds
max_width, max_height, min_width, min_height = get_img_shape_range(
img, max_width, max_height, min_width, min_height)
img_channels = get_img_channel_num(img, img_channels)
total_img_num += 1
# data check
separator_check(wrong_lines, file_list, separator, logger)
imread_check(imread_failed, logger)
if has_label:
single_channel_label_check(label_not_single_channel, logger)
shape_check(shape_unequal_image, logger)
total_nc = label_class_check(num_classes, total_label_classes,
total_num_of_each_class,
wrong_labels, logger)
# data analyse on train, validation, test set.
img_channels_statistics(img_channels, logger)
img_shape_range_statistics(max_width, min_width, max_height,
min_height, logger)
if has_label:
label_class_statistics(total_nc, logger)
# data analyse on the whole dataset.
data_range_statistics(img_min_value, img_max_value, logger)
data_distribution_statistics(data_dir, img_value_num, logger)
cal_normalize_coefficient(total_means, total_stds, total_img_num, logger)
def main():
args = parse_args()
data_dir = args.data_dir
ignore_index = args.ignore_index
num_classes = args.num_classes
separator = args.separator
logger = logging.getLogger()
logger.setLevel('DEBUG')
BASIC_FORMAT = "%(message)s"
formatter = logging.Formatter(BASIC_FORMAT)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel('INFO')
th = logging.FileHandler(
os.path.join(data_dir, 'data_analyse_and_check.log'), 'w')
th.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(th)
data_analyse_and_check(data_dir, num_classes, separator, ignore_index,
logger)
print("\nDetailed error information can be viewed in {}.".format(
os.path.join(data_dir, 'data_analyse_and_check.log')))
if __name__ == "__main__":
main()
| StarcoderdataPython |
5090598 | import pytest
# from pytest import approx # approx wasn't working on numpy arrays, use the builtin numpy testing functions
from HSTPBin import PyPeekXTF
from HSTB.drivers import HDCSio
from HSTB.drivers import hipsio
import numpy
# @pytest.fixture # creates an object used in the parameters of a function
# @pytest.fixture(scope="module", params=["smtp.gmail.com", "mail.python.org"]) #create only once for all tests in the module -- also can paramterize for multiple runs (use the request fixture that is always available)
# def smtp(request):
# return smtplib.SMTP(request.param, port=587, timeout=5)
mb_path = r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data\E350_H11529_G\RU_MB_2006\Caris2006-289\311_1835"
sb_path = r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data\E350_H11529_G\RU_SB_2006\2006-226\100_1600"
sss_path = r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data\A910_Buffer\WH14\2000-310\800_2108"
numpy.set_printoptions(formatter={'float': '{: 0.7f}'.format})
@pytest.fixture(scope="module", params=[HDCSio, hipsio])
def CarisIO(request):
if request.param == HDCSio:
HDCSio.InitLicenseHDCS()
yield request.param
@pytest.mark.parametrize("year,doy,expected", [
(1980, 1, 19),
(2012, 200, 35),
# (2012, 200, 36), # an intentional error
])
def test_leap_seconds(year, doy, expected):
assert PyPeekXTF.TmGetTAIUTCOffset(year, doy) == expected
@pytest.mark.parametrize("sensor_type,hdcspath,indices,values", [
("Tide", mb_path, [0, -1], [[845490947.8760000, 0.4090000, 0.0000000], [845491113.3789999, 0.4250000, 0.0000000]]),
("Roll", mb_path, [0, -1], [[845490947.8760000, -0.0275161, 0.0000000], [845491113.3789999, -0.0313671, 0.0000000]]),
("Pitch", mb_path, [0, -1], [[845490947.8760000, 0.0102635, 0.0000000], [845491113.3789999, 0.0113249, 0.0000000]]),
("Gyro", mb_path, [0, -1], [[845490947.8760000, 1.4602781, 0.0000000], [845491113.3789999, 1.4487763, 0.0000000]]),
("SSSGyro", mb_path, [0, -1], [[845490947.6059999, 1.4603257, 0.0000000], [845491113.1610000, 1.4490309, 0.0000000]]),
("GPSHeight", mb_path, [0, -1], [[845490947.8760000, -39.0230000, 0.0000000], [845491113.3789999, -37.9820000, 0.0000000]]),
("Heave", mb_path, [0, -1], [[845490947.8760000, -0.0840000, 0.0000000], [845491113.3789999, 0.0040000, 0.0000000]]),
])
def test_attitude(CarisIO, sensor_type, hdcspath, indices, values):
att = CarisIO.HDCSAttitude(sensor_type).ReadTimeSeries(hdcspath)
numpy.testing.assert_almost_equal(att[indices], values)
# assert (att[indices] == approx(numpy.array(values))).all()
def test_write_tide(CarisIO):
t = CarisIO.HDCSAttitude("Tide")
o = t.ReadTimeSeries(mb_path, bVerbose=True)
print o
v = o[0]
v[:, 1] += 1
t.WriteTimeSeries(mb_path + "_o", v, o[1])
o2 = t.ReadTimeSeries(mb_path + "_o", bVerbose=True)
print o2
numpy.testing.assert_almost_equal(v[:, 1], o2[0][:, 1])
def test_write_nav(CarisIO):
n = CarisIO.HDCSNav("Navigation")
o = n.ReadTimeSeries(mb_path, bVerbose=True)
print o
v = o[0]
v[:, 0] += 1.0
fake = numpy.array([[0., 0., -0., 0., 0.], [1., 0., -0., 0., 0.]])
v = fake
n.WriteTimeSeries(mb_path + "_o", v, o[1])
o2 = n.ReadTimeSeries(mb_path + "_o", bVerbose=True)
print o2
numpy.testing.assert_almost_equal(v[:, 0], o2[0][:, 0])
@pytest.mark.parametrize("sensor_type,hdcspath,indices,values", [
("SLRange", mb_path, [0, -1], [[845490947.6059999, 0.0000000, 0.0164830, -1.0428000, 0.0000000, 0.0000000],
[845491113.1610000, 0.0000000, 0.0140960, 1.0428000, 0.0000000, 0.0000000]]),
("ObservedDepths", mb_path, [0, -1], [[845490947.6860000, -0.1230000, -21.8550000, 13.7640000, 0.0000000, 201326592.0000000],
[845491113.2410001, -0.1280000, 18.0570000, 13.0430000, 0.0000000, 201326592.0000000]]),
("ProcessedDepths", mb_path, [0, -1], [[845490947.6860000, 0.6528447, -1.3276895, 14.0250000, 0.0000000, 12582912.0000000],
[845491113.2410001, 0.6528390, -1.3275605, 12.2440000, 0.0000000, 12582912.0000000]]),
])
def test_bathy(sensor_type, hdcspath, indices, values):
att = HDCSio.HDCSBathy(sensor_type).ReadTimeSeries(hdcspath)
numpy.testing.assert_almost_equal(att[indices], values)
@pytest.mark.parametrize("sensor_type,hdcspath,indices,values", [
("Navigation", mb_path, [0, -1], [[845490947.8760000, 0.6528409, -1.3276877, 0.0000000, 0.0000000], [845491113.3789999, 0.6528415, -1.3275597, 0.0000000, 0.0000000]]),
("SSSNavigation", sss_path, [0, -1], [[657925744.6600000, 0.7518598, -1.2342281, 0.0000000, 3221225472.0000000], [657926131.6760000, 0.7517919, -1.2339690, 0.0000000, 0.0000000]]),
])
def test_navigation(CarisIO, sensor_type, hdcspath, indices, values):
att = CarisIO.HDCSNav(sensor_type).ReadTimeSeries(hdcspath)
numpy.testing.assert_almost_equal(att[indices], values)
def test_tideerror():
tef = PyPeekXTF.TideErrorFile(mb_path)
assert 7 == tef.getNumberOfRecords()
assert tef.read(1) == [0, 845490947.876, 0.0, 0]
assert tef.read(7) == [0, 845491113.379, 0.0, 0]
"""
>>> HDCSio.InitLicenseHDCS()
(True, '')
>>> PyPeekXTF.InitLicense()
'c2c98f39ba82acc965'
>>> PyPeekXTF.IsLicensed()
True
>>> PyPeekXTF.HDCSInit()
True
>>> HDCSio.InitLicenseHDCS()
(True, '')
>>> PyPeekXTF.TmGetTAIUTCOffset(1980, 1)
19
>>> PyPeekXTF.TmGetTAIUTCOffset(2012, 185)
35
>>> hdcsdatapath = r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data"
>>> PyPeekXTF.SetEnvironment('HDCS_DATA_PATH', hdcsdatapath)
True
>>> PyPeekXTF.HDCSInit()
True
>>> pathToPVDL = r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data\E350_H11529_G\RU_MB_2006\Caris2006-289\311_1835"
>>> attitude, bOK = PyPeekXTF.TideOpenDir(pathToPVDL, "query")
>>> attitude, bOK
(<Swig Object of type 'HDCS_ProcessedDepths' at 0xd98ee30>, 0)
>>> (rcodeCaris,numLineSegments, numRecords,minTime, maxTime,minSensor, maxSensor,summaryStatus) = PyPeekXTF.TideSummary(attitude)
>>> (rcodeCaris,numLineSegments, numRecords,minTime, maxTime,minSensor, maxSensor,summaryStatus)
(0, 1, 7, 845490947.876, 845491113.379, 0.342, 0.425, 0)
>>> PyPeekXTF.TideReadSeq(attitude)
[0, 845490947.876, 0.409, 0]
>>> PyPeekXTF.TideReadSeq(attitude)
[0, 845490977.877, 0.41, 0]
>>> PyPeekXTF.TideReadSeq(attitude)
[0, 845491007.927, 0.411, 0]
>>> PyPeekXTF.TideReadSeq(attitude)
[0, 845491037.977, 0.342, 0]
>>> PyPeekXTF.TideReadSeq(attitude)
[0, 845491067.978, 0.422, 0]
>>> PyPeekXTF.TideReadSeq(attitude)
[0, 845491098.028, 0.424, 0]
>>> PyPeekXTF.TideReadSeq(attitude)
[0, 845491113.379, 0.425, 0]
>>> PyPeekXTF.TideReadSeq(attitude)
[5001225, -9.255963134931783e+61, -9.255963134931783e+61, 3435973836L]
>>> PyPeekXTF.TideClose(attitude)
0
>>> tide = HDCSio.HDCSAttitude("Tide")
>>> tide
<HSTB.drivers.HDCSio.HDCSAttitude instance at 0x000000000E63ACC8>
>>> numpy.set_printoptions(formatter={'float': '{: 0.7f}'.format})
>>> sss_path = r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data\A910_Buffer\WH14\2000-310\800_2108"
>>> sb_path = r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data\E350_H11529_G\RU_SB_2006\2006-226\100_1600"
>>> mb_path = r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data\E350_H11529_G\RU_MB_2006\Caris2006-289\311_1835"
>>> numpy.set_printoptions(formatter={'float': '{: 0.7f}'.format})
>>> HDCSio.HDCSAttitude("Tide").ReadTimeSeries(mb_path)
array([[ 845490947.8760000, 0.4090000, 0.0000000],
[ 845490977.8770000, 0.4100000, 0.0000000],
[ 845491007.9270000, 0.4110000, 0.0000000],
[ 845491037.9770000, 0.3420000, 0.0000000],
[ 845491067.9780000, 0.4220000, 0.0000000],
[ 845491098.0280000, 0.4240000, 0.0000000],
[ 845491113.3789999, 0.4250000, 0.0000000]])
>>> HDCSio.HDCSAttitude("Roll").ReadTimeSeries(mb_path)
array([[ 845490947.8760000, -0.0275161, 0.0000000],
[ 845490947.9260000, -0.0274741, 0.0000000],
[ 845490947.9760000, -0.0274360, 0.0000000],
...,
[ 845491113.2790000, -0.0308430, 0.0000000],
[ 845491113.3290000, -0.0311052, 0.0000000],
[ 845491113.3789999, -0.0313671, 0.0000000]])
>>> HDCSio.HDCSAttitude("Pitch").ReadTimeSeries(mb_path)
array([[ 845490947.8760000, 0.0102635, 0.0000000],
[ 845490947.9260000, 0.0103094, 0.0000000],
[ 845490947.9760000, 0.0103478, 0.0000000],
...,
[ 845491113.2790000, 0.0113203, 0.0000000],
[ 845491113.3290000, 0.0113225, 0.0000000],
[ 845491113.3789999, 0.0113249, 0.0000000]])
>>> HDCSio.HDCSAttitude("Gyro").ReadTimeSeries(mb_path)
array([[ 845490947.8760000, 1.4602781, 0.0000000],
[ 845490947.9260000, 1.4602304, 0.0000000],
[ 845490947.9760000, 1.4601867, 0.0000000],
...,
[ 845491113.2790000, 1.4493045, 0.0000000],
[ 845491113.3290000, 1.4490309, 0.0000000],
[ 845491113.3789999, 1.4487763, 0.0000000]])
>>> HDCSio.HDCSAttitude("SSSGyro").ReadTimeSeries(mb_path)
array([[ 845490947.6059999, 1.4603257, 0.0000000],
[ 845490947.6920000, 1.4602304, 0.0000000],
[ 845490947.7780000, 1.4601867, 0.0000000],
...,
[ 845491113.0150000, 1.4498329, 0.0000000],
[ 845491113.0870000, 1.4493045, 0.0000000],
[ 845491113.1610000, 1.4490309, 0.0000000]])
>>> HDCSio.HDCSAttitude("GPSHeight").ReadTimeSeries(mb_path)
array([[ 845490947.8760000, -39.0230000, 0.0000000],
[ 845490947.9260000, -39.0240000, 0.0000000],
[ 845490947.9760000, -39.0240000, 0.0000000],
...,
[ 845491113.2790000, -37.9700000, 0.0000000],
[ 845491113.3290000, -37.9760000, 0.0000000],
[ 845491113.3789999, -37.9820000, 0.0000000]])
>>> HDCSio.HDCSAttitude("Heave").ReadTimeSeries(mb_path)
array([[ 845490947.8760000, -0.0840000, 0.0000000],
[ 845490947.9260000, -0.0840000, 0.0000000],
[ 845490947.9760000, -0.0850000, 0.0000000],
...,
[ 845491113.2790000, 0.0040000, 0.0000000],
[ 845491113.3290000, 0.0040000, 0.0000000],
[ 845491113.3789999, 0.0040000, 0.0000000]])
>>> HDCSio.HDCSBathy("SLRange").ReadTimeSeries(mb_path)
array([[ 845490947.6059999, 0.0000000, 0.0164830, -1.0428000, 0.0000000,
0.0000000],
[ 845490947.6059999, 0.0000000, 0.0162190, -1.0341000, 0.0000000,
0.0000000],
[ 845490947.6059999, 0.0000000, 0.0159950, -1.0253000, 0.0000000,
0.0000000],
...,
[ 845491113.1610000, 0.0000000, 0.0137620, 1.0253000, 0.0000000,
0.0000000],
[ 845491113.1610000, 0.0000000, 0.0139860, 1.0341000, 0.0000000,
0.0000000],
[ 845491113.1610000, 0.0000000, 0.0140960, 1.0428000, 0.0000000,
0.0000000]])
>>> HDCSio.HDCSBathy("ObservedDepths").ReadTimeSeries(mb_path)
array([[ 845490947.6860000, -0.1230000, -21.8550000, 13.7640000,
0.0000000, 201326592.0000000],
[ 845490947.6860000, -0.1230000, -21.4020000, 13.7650000,
0.0000000, 201326592.0000000],
[ 845490947.6860000, -0.1230000, -21.0020000, 13.7900000,
0.0000000, 201326592.0000000],
...,
[ 845491113.2410001, -0.1280000, 17.4350000, 13.0890000,
0.0000000, 201326592.0000000],
[ 845491113.2410001, -0.1290000, 17.8190000, 13.1130000,
0.0000000, 201326592.0000000],
[ 845491113.2410001, -0.1280000, 18.0570000, 13.0430000,
0.0000000, 201326592.0000000]])
>>> HDCSio.HDCSBathy("ProcessedDepths").ReadTimeSeries(mb_path)
array([[ 845490947.6860000, 0.6528447, -1.3276895, 14.0250000,
0.0000000, 12582912.0000000],
[ 845490947.6860000, 0.6528446, -1.3276895, 14.0120000,
0.0000000, 12582912.0000000],
[ 845490947.6860000, 0.6528446, -1.3276894, 14.0240000,
0.0000000, 12582912.0000000],
...,
[ 845491113.2410001, 0.6528391, -1.3275605, 12.3090000,
0.0000000, 12582912.0000000],
[ 845491113.2410001, 0.6528391, -1.3275605, 12.3210000,
0.0000000, 12582912.0000000],
[ 845491113.2410001, 0.6528390, -1.3275605, 12.2440000,
0.0000000, 12582912.0000000]])
>>> HDCSio.HDCSNav("Navigation").ReadTimeSeries(mb_path)
array([[ 845490947.8760000, 0.6528409, -1.3276877, 0.0000000, 0.0000000],
[ 845490947.9260000, 0.6528409, -1.3276876, 0.0000000, 0.0000000],
[ 845490947.9760000, 0.6528409, -1.3276876, 0.0000000, 0.0000000],
...,
[ 845491113.2790000, 0.6528415, -1.3275598, 0.0000000, 0.0000000],
[ 845491113.3290000, 0.6528415, -1.3275598, 0.0000000, 0.0000000],
[ 845491113.3789999, 0.6528415, -1.3275597, 0.0000000, 0.0000000]])
>>> HDCSio.HDCSNav("SSSNavigation").ReadTimeSeries(sss_path)
array([[ 657925744.6600000, 0.7518598, -1.2342281, 0.0000000,
3221225472.0000000],
[ 657925744.9800000, 0.7518599, -1.2342277, 0.0000000,
3221225472.0000000],
[ 657925745.5410000, 0.7518599, -1.2342273, 0.0000000,
3221225472.0000000],
...,
[ 657926130.6350000, 0.7517925, -1.2339694, 0.0000000, 0.0000000],
[ 657926131.1460000, 0.7517922, -1.2339692, 0.0000000, 0.0000000],
[ 657926131.6760000, 0.7517919, -1.2339690, 0.0000000, 0.0000000]])
>>> tef = PyPeekXTF.TideErrorFile(r"C:\PydroTrunk\DocsAndDemoData\HDCS_Data\E350_H11529_G\RU_MB_2006\Caris2006-289\311_1835")
>>> tef.getNumberOfRecords()
7
>>> tef.read(0)
[5000064, -9.255963134931783e+61, -9.255963134931783e+61, 3435973836L]
>>> tef.read(1\)
Traceback ( File "<interactive input>", line 1
tef.read(1\)
^
SyntaxError: unexpected character after line continuation character
>>> tef.read(1)
[0, 845490947.876, 0.0, 0]
>>> tef.read(2)
[0, 845490977.877, 0.0, 0]
>>> tef.read(3)
[0, 845491007.927, 0.0, 0]
>>> tef.read(4)
[0, 845491037.977, 0.0, 0]
>>> tef.read(5)
[0, 845491067.978, 0.0, 0]
>>> tef.read(6)
[0, 845491098.028, 0.0, 0]
>>> tef.read(7)
[0, 845491113.379, 0.0, 0]
>>> tef.read(8)
[5000064, -9.255963134931783e+61, -9.255963134931783e+61, 3435973836L]
"""
| StarcoderdataPython |
206939 | from setuptools import setup, find_packages
long_description = '''
Headliner is a sequence modeling library that eases the training and
**in particular, the deployment of custom sequence models** for both researchers and developers.
You can very easily deploy your models in a few lines of code. It was originally
built for our own research to generate headlines from news articles.
That's why we chose the name, Headliner. Although this library was created internally to
generate headlines, you can also use it for other tasks like machine translations,
text summarization and many more.
Read the documentation at: https://as-ideas.github.io/headliner/
Headliner is compatible with Python 3.6 and is distributed under the MIT license.
'''
setup(
name='headliner',
version='0.0.18',
author='<NAME>',
author_email='<EMAIL>',
description='Easy training and deployment of seq2seq models.',
long_description=long_description,
license='MIT',
install_requires=['scikit-learn', 'nltk', 'pyyaml'],
extras_require={
'tests': ['pytest', 'pytest-cov', 'codecov', 'tensorflow~=2.0.0'],
'docs': ['mkdocs', 'mkdocs-material'],
'dev': ['bumpversion']
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(exclude=('tests',)),
) | StarcoderdataPython |
9719911 | # -*- coding:utf8 -*-
# File : desc_discogan_edges2shoes_cnn.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 4/2/17
#
# This file is part of TensorArtist.
import re
from tartist.app import gan
from tartist.app.gan import GANGraphKeys
from tartist.core import get_env, get_logger
from tartist.core.utils.naming import get_dump_directory, get_data_directory
from tartist.nn import opr as O, optimizer, summary
logger = get_logger(__file__)
__envs__ = {
'dir': {
'root': get_dump_directory(__file__),
'data': get_data_directory('Pix2Pix/edges2faces')
},
'dataset': {
'name': 'edges2shoes',
'db_a': 'train_edges_db',
'db_b': 'train_face_db',
},
'trainer': {
'learning_rate': 2e-4,
'batch_size': 64,
'epoch_size': 1000,
'nr_epochs': 200,
'nr_g_per_iter': 1,
'nr_d_per_iter': 1,
}
}
__trainer_cls__ = gan.GANTrainer
__trainer_env_cls__ = gan.GANTrainerEnv
def make_network(env):
with env.create_network() as net:
h, w, c = 64, 64, 3
def bn_leaky_relu(x, name='bn_leaky_relu'):
with env.name_scope(name):
return O.leaky_relu(O.bn_nonlin(x))
dpc = env.create_dpcontroller()
with dpc.activate():
def inputs():
img_a = O.placeholder('img_a', shape=(None, h, w, c))
img_b = O.placeholder('img_b', shape=(None, h, w, c))
return [img_a, img_b]
def encoder(x, nonlin):
w_init = O.truncated_normal_initializer(stddev=0.02)
with O.argscope(O.conv2d, O.deconv2d, kernel=4, stride=2, W=w_init),\
O.argscope(O.leaky_relu, alpha=0.2):
_ = x
_ = O.conv2d('conv1', _, 64, nonlin=O.leaky_relu)
_ = O.conv2d('conv2', _, 128, nonlin=nonlin, use_bias=False)
_ = O.conv2d('conv3', _, 256, nonlin=nonlin, use_bias=False)
_ = O.conv2d('conv4', _, 512, nonlin=nonlin, use_bias=False)
z = _
return z
def decoder(z):
w_init = O.truncated_normal_initializer(stddev=0.02)
with O.argscope(O.conv2d, O.deconv2d, kernel=4, stride=2, W=w_init),\
O.argscope(O.fc, W=w_init):
_ = z
_ = O.deconv2d('deconv1', _, 256, nonlin=O.bn_relu)
_ = O.deconv2d('deconv2', _, 128, nonlin=O.bn_relu)
_ = O.deconv2d('deconv3', _, 64, nonlin=O.bn_relu)
_ = O.deconv2d('deconv4', _, c)
_ = O.sigmoid(_, name='out')
x = _
return x
def generator(x, name, reuse):
with env.variable_scope(GANGraphKeys.GENERATOR_VARIABLES, reuse=reuse):
with env.variable_scope(name):
z = encoder(x, nonlin=O.bn_relu)
y = decoder(z)
return y
def discriminator(x, name, reuse):
with env.variable_scope(GANGraphKeys.DISCRIMINATOR_VARIABLES, reuse=reuse):
with env.variable_scope(name):
z = encoder(x, nonlin=bn_leaky_relu)
logit = O.fc('fccls', z, 1)
return logit
def forward(img_a, img_b):
img_a /= 255.
img_b /= 255.
img_ab = generator(img_a, name='atob', reuse=False)
img_ba = generator(img_b, name='btoa', reuse=False)
img_aba = generator(img_ab, name='btoa', reuse=True)
img_bab = generator(img_ba, name='atob', reuse=True)
logit_fake_a = discriminator(img_ba, name='a', reuse=False)
logit_fake_b = discriminator(img_ab, name='b', reuse=False)
score_fake_a = O.sigmoid(logit_fake_a)
score_fake_b = O.sigmoid(logit_fake_b)
for name in ['img_a', 'img_b', 'img_ab', 'img_ba', 'img_aba', 'img_bab', 'score_fake_a', 'score_fake_b']:
dpc.add_output(locals()[name], name=name)
if env.phase is env.Phase.TRAIN:
logit_real_a = discriminator(img_a, name='a', reuse=True)
logit_real_b = discriminator(img_b, name='b', reuse=True)
score_real_a = O.sigmoid(logit_real_a)
score_real_b = O.sigmoid(logit_real_b)
all_g_loss = 0.
all_d_loss = 0.
r_loss_ratio = 0.9
for pair_name, (real, fake), (logit_real, logit_fake), (score_real, score_fake) in zip(
['lossa', 'lossb'],
[(img_a, img_aba), (img_b, img_bab)],
[(logit_real_a, logit_fake_a), (logit_real_b, logit_fake_b)],
[(score_real_a, score_fake_a), (score_real_b, score_fake_b)]):
with env.name_scope(pair_name):
d_loss_real = O.sigmoid_cross_entropy_with_logits(logits=logit_real, labels=O.ones_like(logit_real)).mean(name='d_loss_real')
d_loss_fake = O.sigmoid_cross_entropy_with_logits(logits=logit_fake, labels=O.zeros_like(logit_fake)).mean(name='d_loss_fake')
g_loss = O.sigmoid_cross_entropy_with_logits(logits=logit_fake, labels=O.ones_like(logit_fake)).mean(name='g_loss')
d_acc_real = (score_real > 0.5).astype('float32').mean(name='d_acc_real')
d_acc_fake = (score_fake < 0.5).astype('float32').mean(name='d_acc_fake')
g_accuracy = (score_fake > 0.5).astype('float32').mean(name='g_accuracy')
d_accuracy = O.identity(.5 * (d_acc_real + d_acc_fake), name='d_accuracy')
d_loss = O.identity(.5 * (d_loss_real + d_loss_fake), name='d_loss')
# r_loss = O.raw_l2_loss('raw_r_loss', real, fake).flatten2().sum(axis=1).mean(name='r_loss')
r_loss = O.raw_l2_loss('raw_r_loss', real, fake).mean(name='r_loss')
# r_loss = O.raw_cross_entropy_prob('raw_r_loss', real, fake).flatten2().sum(axis=1).mean(name='r_loss')
# all_g_loss += g_loss + r_loss
all_g_loss += (1 - r_loss_ratio) * g_loss + r_loss_ratio * r_loss
all_d_loss += d_loss
for v in [d_loss_real, d_loss_fake, g_loss, d_acc_real, d_acc_fake, g_accuracy, d_accuracy, d_loss, r_loss]:
dpc.add_output(v, name=re.sub('^tower/\d+/', '', v.name)[:-2], reduce_method='sum')
dpc.add_output(all_g_loss, name='g_loss', reduce_method='sum')
dpc.add_output(all_d_loss, name='d_loss', reduce_method='sum')
dpc.set_input_maker(inputs).set_forward_func(forward)
if env.phase is env.Phase.TRAIN:
for p in ['lossa', 'lossb']:
for v in ['d_loss_real', 'd_loss_fake', 'd_acc_real', 'd_acc_fake', 'd_accuracy', 'd_loss']:
name = p + '/' + v
summary.scalar(name, dpc.outputs[name], collections=[GANGraphKeys.DISCRIMINATOR_SUMMARIES])
for v in ['g_loss', 'g_accuracy', 'r_loss']:
name = p + '/' + v
summary.scalar(name, dpc.outputs[name], collections=[GANGraphKeys.GENERATOR_SUMMARIES])
for name in ['img_a', 'img_b', 'img_ab', 'img_ba', 'img_aba', 'img_bab']:
summary.image(name, dpc.outputs[name], collections=[GANGraphKeys.GENERATOR_SUMMARIES])
net.add_all_dpc_outputs(dpc)
def make_optimizer(env):
lr = optimizer.base.make_optimizer_variable('learning_rate', get_env('trainer.learning_rate'))
wrapper = optimizer.OptimizerWrapper()
wrapper.set_base_optimizer(optimizer.base.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3))
wrapper.append_grad_modifier(optimizer.grad_modifier.LearningRateMultiplier([
('*/b', 2.0),
]))
env.set_g_optimizer(wrapper)
env.set_d_optimizer(wrapper)
from data_provider_discogan import *
def main_train(trainer):
from tartist.plugins.trainer_enhancer import summary
summary.enable_summary_history(trainer)
summary.enable_echo_summary_scalar(trainer)
from tartist.plugins.trainer_enhancer import progress
progress.enable_epoch_progress(trainer)
from tartist.plugins.trainer_enhancer import snapshot
snapshot.enable_snapshot_saver(trainer)
trainer.train()
| StarcoderdataPython |
6656598 | import json
import datetime
from copy import deepcopy
from inflection import underscore, pluralize
def set_root_name(object, root_name=None):
if(root_name):
return root_name
else:
if(object):
object_type = type(object)
if(object_type is list):
return pluralize(
underscore(type(object[0]).__name__))
else:
return underscore(object_type.__name__)
else:
raise KeyError("Cannot automatically determine type, "
"please provide 'root_name' explicitly.")
def format_droid_value(droid_value, datetime_format=None):
droid_value_type = type(droid_value)
if(droid_value_type is dict):
return json.dumps(droid_value)
elif(droid_value_type is datetime.datetime):
if(datetime_format):
return droid_value.strftime(datetime_format)
else:
return droid_value
else:
return droid_value
def make_json(root, object, root_name, value):
if(root):
root_name = set_root_name(object, root_name)
object_type = type(object)
if(object_type is list):
return {root_name: value, 'total': len(value)}
else:
return {root_name: value}
else:
return value
class StarFleetsHelper():
@classmethod
def serialize(self, star_fleet_instance, *droid_names, **config):
datetime_format = config.get('datetime_format')
star_fleet_json = {}
for droid_name in droid_names:
if(droid_name):
if(hasattr(star_fleet_instance, droid_name)):
droid_value = getattr(star_fleet_instance, droid_name)
star_fleet_json[droid_name] = format_droid_value(
droid_value, datetime_format)
star_fleet_instance_json = make_json(
config.get('root', True), star_fleet_instance,
config.get('root_name'), star_fleet_json)
return star_fleet_instance_json
@classmethod
def bulk_serialize(self, star_fleet_instances, *droid_names, **config):
star_fleet_instances = star_fleet_instances or []
star_fleets_json = {}
instance_config = deepcopy(config)
instance_config['root'] = False
star_fleets = []
for star_fleet_instance in star_fleet_instances:
star_fleets.append(self.serialize(star_fleet_instance,
*droid_names, **instance_config))
star_fleets_json = make_json(
config.get('root', True), star_fleet_instances,
config.get('root_name'), star_fleets)
return star_fleets_json
| StarcoderdataPython |
3485307 | <filename>meiduo_mall/meiduo_mall/apps/oauth/migrations/0002_remove_oauthqquser_is_delete.py
# Generated by Django 2.1.8 on 2019-05-30 12:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('oauth', '0001_initial'),
]
operations = [
]
| StarcoderdataPython |
8055520 | import numpy as np
import matplotlib.pyplot as plt
import MatplotlibSettings
xi = 0.5
x, y = [-1, 1, xi, xi, 1, -1, -xi, -xi, -1], [1, 1, xi, -xi, -1, -1, -xi, xi, 1]
x1, y1 = [-xi, 0, -xi], [xi, 0, -xi]
x2, y2 = [xi, 0, xi], [xi, 0, -xi]
x3, y3 = [-xi, xi, xi, -xi], [1, 1, -1, -1]
plt.axis("off")
plt.arrow(-1.2, 0, 2.3, 0, lw = 2, head_width = 0.05, head_length = 0.04, fc = "k")
plt.arrow(0, -1.2, 0, 2.3, lw = 2, head_width = 0.03, head_length = 0.05, fc = "k")
plt.plot([1, 1], [-0.02, 0.02], lw = 1, c = "k")
plt.plot([-1, -1], [-0.02, 0.02], lw = 1, c = "k")
plt.xlim(-1.2, 1.2)
plt.ylim(-1.2, 1.2)
plt.text(0.05, 1.1, r"$x'$")
plt.text(1.1, -0.15, r"$x$")
plt.text(0.98, 0.07, r"$1$")
plt.text(-1.05, 0.07, r"$-1$")
plt.text(-0.06, 0.88, r"$1$")
plt.text(-0.15, -0.98, r"$-1$")
plt.text(xi + 0.02, 0.07, r"$|\xi|$")
plt.text(- xi - 0.22, 0.07, r"$-|\xi|$")
plt.text(xi+(1-xi-0.33)/2, xi+(1-xi)/2, r"$A$", color = "blue")
plt.text(-xi-(1-xi-0.13)/2, xi+(1-xi)/2, r"$\overline{A}$", color = "blue")
plt.text(xi+(1-xi-0.33)/2, -xi-(1-xi+0.2)/2, r"$\overline{A}$", color = "blue")
plt.text(-xi-(1-xi-0.13)/2, -xi-(1-xi+0.2)/2, r"$A$", color = "blue")
plt.text((xi-0.13)/2, xi+(1-xi-0.33)/2, r"$B$", color = "red")
plt.text(-(xi+0.07)/2, xi+(1-xi-0.33)/2, r"$\overline{B}$", color = "red")
plt.text((xi-0.13)/2, -xi-(1-xi-0.13)/2, r"$\overline{B}$", color = "red")
plt.text(-(xi+0.07)/2, -xi-(1-xi-0.13)/2, r"$B$", color = "red")
plt.text(xi/2, (xi - 0.33)/2, r"$C$", color = "red")
plt.text(-(xi+0.2)/2, (xi - 0.33)/2, r"$\overline{C}$", color = "red")
plt.text(-(xi+0.2)/2, -(xi - 0.13)/2, r"$C$", color = "red")
plt.text(xi/2, -(xi - 0.13)/2, r"$\overline{C}$", color = "red")
t = np.linspace(-1, 1, 100)
#plt.plot(t, t, "k--", lw = 1.5)
#plt.plot(t, -t, "k--", lw = 1.5)
plt.plot([xi for y in t], t, "k--", lw = 1.5)
plt.plot([-xi for y in t], t, "k--", lw = 1.5)
plt.plot(t, t, "k--", lw = 1.5)
plt.plot(t, -t, "k--", lw = 1.5)
plt.plot(x, y, c = "k", lw = 1.5)
plt.fill(x, y, alpha = 0.3)
#plt.fill(x1, y1, alpha = 0.3, c = "r")
#plt.fill(x2, y2, alpha = 0.3, c = "r")
plt.fill(x3, y3, alpha = 0.3, c = "r")
plt.savefig('GPDIntDomain.pdf')
#plt.show()
| StarcoderdataPython |
6419496 | <reponame>christophesaintjean/IntroProgS1_2020
lettres = 'abcd'
for i, lettre in enumerate(lettres):
print(i, lettre)
for il in enumerate(lettres):
print(il) | StarcoderdataPython |
8158706 | <reponame>steel-a/python-pandas-tutorial<gh_stars>0
def rootPackageFunction():
print('This is the rootPachageFunction') | StarcoderdataPython |
6415872 | from dataclasses import dataclass
from quran.domain.entity import Entity
@dataclass
class Audio(Entity):
id: str
ayah_id: str
ayah_number: int
edition_id: str
type: str # Translation or Arabic
audio: str
| StarcoderdataPython |
3426380 | # SPDX-License-Identifier: MIT
# Copyright © 2020 <NAME>
# ==============================================================================
"""High-level converter functions and CLI entry point"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from typing import Optional
import argparse
import os
import re
import sys
import time
import tensorflow as tf
import tensorflowjs as tfjs
import tfjs_graph_converter.api as api
import tfjs_graph_converter.common as common
import tfjs_graph_converter.version as version
class SplitCommaSeparatedValues(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values.split(','))
class SplitCommaSeparatedTuples(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ok = re.search('^[^:,]+:[^:,]+(,[^:,]+:[^:,]+)*$', values)
if not ok:
raise argparse.ArgumentError(self,
'Argument must be comma-separated '
'pairs of name:key')
pairs = values.split(',')
setattr(namespace, self.dest, [tuple(p.split(':')) for p in pairs])
def get_arg_parser():
"""Create the argument parser for the converter binary."""
parser = argparse.ArgumentParser(
description='TensorFlow.js Graph Model converter.')
parser.add_argument(
common.CLI_INPUT_PATH,
nargs='?',
type=str,
help='Path to the TFJS Graph Model directory containing the '
'model.json')
parser.add_argument(
common.CLI_OUTPUT_PATH,
nargs='?',
type=str,
help=f'For output format "{common.CLI_SAVED_MODEL}", '
'a SavedModel target directory. '
f'For output format "{common.CLI_FROZEN_MODEL}", '
'a frozen model file.'
)
parser.add_argument(
'--' + common.CLI_OUTPUT_FORMAT,
type=str,
default=common.CLI_FROZEN_MODEL,
choices=set([common.CLI_SAVED_MODEL, common.CLI_FROZEN_MODEL]),
help=f'Output format. Default: "{common.CLI_FROZEN_MODEL}".'
)
parser.add_argument(
'--' + common.CLI_COMPATIBLE,
'-c',
dest='compat_mode',
action='store_true',
help='Keep the input types compatible with TFJS <=2.4.x'
)
group = parser.add_argument_group(f'{common.CLI_SAVED_MODEL} specific',
'Arguments that apply to SavedModel '
'export only.')
group.add_argument(
'--' + common.CLI_SAVED_MODEL_TAGS,
action=SplitCommaSeparatedValues,
type=str,
help='Tags of the MetaGraphDef to save, in comma separated string '
f'format. Defaults to "{tf.saved_model.SERVING}".',
metavar='TAG[,TAG1[,...]]'
)
group.add_argument(
'--' + common.CLI_OUTPUTS,
action=SplitCommaSeparatedValues,
type=str,
help='Outputs of the model to add to the signature separated by comma',
metavar='OUTPUT[,OUTPUT1[,...]]'
)
group.add_argument(
'--' + common.CLI_SIGNATURE_KEY,
type=str,
help='Specifies the signature key to be used in the MetaGraphDef. '
f' REQUIRES "--{common.CLI_OUTPUTS}" to be set if specified. '
)
group.add_argument(
'--' + common.CLI_METHOD_NAME,
type=str,
help='Specifies the signature method name used in the MetaGraphDef. '
f' REQUIRES "--{common.CLI_OUTPUTS}" to be set if specified. '
)
group.add_argument(
'--' + common.CLI_RENAME,
action=SplitCommaSeparatedTuples,
type=str,
help='Specifies keys for inputs and outputs in the model signature. '
'Mappings are specified using name:key with multiple values '
'separated by comma.'
f' REQUIRES "--{common.CLI_OUTPUTS}" to be set if specified. ',
metavar='NAME:KEY[,NAME1:KEY1[,...]]'
)
parser.add_argument(
'--' + common.CLI_VERSION,
'-v',
dest='show_version',
action='store_true',
help='Show versions of the converter and its dependencies'
)
parser.add_argument(
'--' + common.CLI_SILENT_MODE,
'-s',
dest='silence',
action='store_true',
help='Suppress any output besides error messages'
)
return parser
def _get_signature(namespace: argparse.Namespace) -> Optional[dict]:
return {namespace.signature_key: {
api.SIGNATURE_OUTPUTS: namespace.outputs,
api.SIGNATURE_METHOD: namespace.method_name
}} if namespace.outputs is not None else None
def _get_signature_keys(namespace: argparse.Namespace
) -> Optional[api.RenameMap]:
if namespace.rename is not None:
return api.RenameMap(namespace.rename)
else:
return None
def convert(arguments):
"""
Convert a TensorflowJS-model to a TensorFlow-model.
Args:
arguments: List of command-line arguments
"""
args = get_arg_parser().parse_args(arguments)
if args.show_version:
print(f"\ntfjs_graph_converter {version.VERSION}\n")
print("Dependency versions:")
print(f" tensorflow {tf.version.VERSION}")
print(f" tensorflowjs {tfjs.__version__}")
return
def info(message, end=None):
if not args.silence:
print(message, end=end, flush=True)
if not args.input_path:
raise ValueError(
"Missing input_path argument. For usage, use the --help flag.")
if not args.output_path:
raise ValueError(
"Missing output_path argument. For usage, use the --help flag.")
if args.output_format == common.CLI_SAVED_MODEL:
if args.signature_key is not None and args.outputs is None:
raise ValueError(f'--{common.CLI_SIGNATURE_KEY} requires '
f'--{common.CLI_OUTPUTS} to be specified')
if args.method_name is not None and args.outputs is None:
raise ValueError(f'--{common.CLI_METHOD_NAME} requires '
f'--{common.CLI_OUTPUTS} to be specified')
if args.rename is not None and args.outputs is None:
raise ValueError(f'--{common.CLI_RENAME} requires '
f'--{common.CLI_OUTPUTS} to be specified')
info("TensorFlow.js Graph Model Converter\n")
info(f"Graph model: {args.input_path}")
info(f"Output: {args.output_path}")
info(f"Target format: {args.output_format}")
info("\nConverting....", end=" ")
start_time = time.perf_counter()
try:
if args.output_format == common.CLI_FROZEN_MODEL:
api.graph_model_to_frozen_graph(args.input_path, args.output_path,
args.compat_mode)
elif args.output_format == common.CLI_SAVED_MODEL:
api.graph_model_to_saved_model(
args.input_path, args.output_path,
tags=args.saved_model_tags,
signature_def_map=_get_signature(args),
signature_key_map=_get_signature_keys(args),
compat_mode=args.compat_mode)
else:
raise ValueError(
f"Unsupported output format: {args.output_format}")
except api.ModelFormatError as ex:
ex.input = args.input_path
ex.output = args.output_path
raise ex
end_time = time.perf_counter()
info("Done.")
info(f"Conversion took {end_time-start_time:.3f}s")
def pip_main():
"""Entry point for pip-packaged binary
Required because the pip-packaged binary calls the entry method
without arguments
"""
main([' '.join(sys.argv[1:])])
def main(argv):
"""
Entry point for debugging and running the script directly
Args:
argv: Command-line arguments as a single, space-separated string
"""
try:
convert(argv[0].split(' '))
except ValueError as ex:
msg = ex.args[0] if len(ex.args) > 0 else ex
print(f'Error: {msg}')
except api.ModelFormatError as ex:
if ex.format == tfjs.converters.common.TFJS_LAYERS_MODEL_FORMAT:
inputs = ex.input
outputs = (ex.output if os.path.isdir(ex.output)
else os.path.dirname(ex.output))
print('Error: The model is a KERAS layers-model.')
print('This converter only handles GRAPH models.')
print('You can load and convert Keras models directly ' +
'(using Python):\n')
print('\timport tensorflowjs as tfjs\n')
print(f'\tmodel = tfjs.converters.load_keras_model("{inputs}")')
print(f'\tmodel.save("{outputs}")')
elif ex.format:
print(f'Unknown model format: "{ex.format}"')
else:
print('The provided model is not a TensorFlow.js model.')
if __name__ == '__main__':
tf.compat.v1.app.run(main=main, argv=[' '.join(sys.argv[1:])])
| StarcoderdataPython |
56057 | import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.linear = nn.Linear(in_features=hidden_size, out_features=vocab_size)
self.lstm = nn.LSTM(input_size=embed_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True)
#initialize weights
self.init_weights()
def init_weights(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
torch.nn.init.xavier_uniform_(self.word_embeddings.weight)
def init_hidden_weights(self, batch_size):
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
return torch.zeros(1, batch_size, self.hidden_size).to(device), torch.zeros(1, batch_size, self.hidden_size).to(device)
def forward(self, features, captions):
captions = captions[:,:-1]
embeds = self.word_embeddings(captions)
self.batch_size = features.shape[0]
self.hidden = self.init_hidden_weights(self.batch_size)
features = features.unsqueeze(1)
inputs = torch.cat((features,embeds), dim=1)
lstm_out, self.hidden = self.lstm(inputs, self.hidden)
outputs = self.linear(lstm_out)
return outputs
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
preds = []
count = 0
word_item = None
while count < max_len and word_item != 1 :
#Predict output
lstm_out, states = self.lstm(inputs, states)
output = self.linear(lstm_out)
#Get max value
prob, word = output.max(2)
#append word
word_item = word.item()
preds.append(word_item)
#next input is current prediction
inputs = self.word_embeddings(word)
count+=1
return preds | StarcoderdataPython |
12810463 | <filename>core/domain/exp_domain.py<gh_stars>1-10
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for an exploration, its states, and their constituents.
Domain objects capture domain-specific logic and are agnostic of how the
objects they represent are stored. All methods and properties in this file
should therefore be independent of the specific storage models used."""
__author__ = '<NAME>'
import copy
import logging
import re
import string
from core.domain import fs_domain
from core.domain import html_cleaner
from core.domain import param_domain
from core.domain import rule_domain
from core.domain import widget_registry
import feconf
import jinja_utils
import utils
class ExplorationChange(object):
"""Domain object class for an exploration change.
IMPORTANT: Ensure that all changes to this class (and how these cmds are
interpreted in general) preserve backward-compatibility with the
exploration snapshots in the datastore. Do not modify the definitions of
cmd keys that already exist.
"""
STATE_PROPERTIES = (
'param_changes', 'content', 'widget_id',
'widget_customization_args', 'widget_sticky', 'widget_handlers')
EXPLORATION_PROPERTIES = (
'title', 'category', 'param_specs', 'param_changes')
def __init__(self, change_dict):
"""Initializes an ExplorationChange object from a dict.
change_dict represents a command. It should have a 'cmd' key, and one
or more other keys. The keys depend on what the value for 'cmd' is.
The possible values for 'cmd' are listed below, together with the other
keys in the dict:
- 'add_state' (with state_name)
- 'rename_state' (with old_state_name and new_state_name)
- 'delete_state' (with state_name)
- 'edit_state_property' (with state_name, property_name, new_value and,
optionally, old_value)
- 'edit_exploration_property' (with property_name, new_value and,
optionally, old_value)
For a state, property_name must be one of STATE_PROPERTIES. For an
exploration, property_name must be one of EXPLORATION_PROPERTIES.
"""
if 'cmd' not in change_dict:
raise Exception('Invalid change_dict: %s' % change_dict)
self.cmd = change_dict['cmd']
if self.cmd == 'add_state':
self.state_name = change_dict['state_name']
elif self.cmd == 'rename_state':
self.old_state_name = change_dict['old_state_name']
self.new_state_name = change_dict['new_state_name']
elif self.cmd == 'delete_state':
self.state_name = change_dict['state_name']
elif self.cmd == 'edit_state_property':
if change_dict['property_name'] not in self.STATE_PROPERTIES:
raise Exception('Invalid change_dict: %s' % change_dict)
self.state_name = change_dict['state_name']
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == 'edit_exploration_property':
if (change_dict['property_name'] not in
self.EXPLORATION_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
else:
raise Exception('Invalid change_dict: %s' % change_dict)
class Content(object):
"""Value object representing non-interactive content."""
def to_dict(self):
return {'type': self.type, 'value': self.value}
@classmethod
def from_dict(cls, content_dict):
return cls(content_dict['type'], content_dict['value'])
def __init__(self, content_type, value=''):
self.type = content_type
self.value = value
self.validate()
def validate(self):
# TODO(sll): Add HTML sanitization checking.
if not self.type == 'text':
raise utils.ValidationError('Invalid content type: %s' % self.type)
if not isinstance(self.value, basestring):
raise utils.ValidationError(
'Invalid content value: %s' % self.value)
def to_html(self, params):
"""Exports this content object to an HTML string.
The content object is parameterized using the parameters in `params`.
"""
if not isinstance(params, dict):
raise Exception(
'Expected context params for parsing content to be a dict, '
'received %s' % params)
return '<div>%s</div>' % jinja_utils.parse_string(self.value, params)
class RuleSpec(object):
"""Value object representing a rule specification."""
def to_dict(self):
return {
'definition': self.definition,
'dest': self.dest,
'feedback': self.feedback,
'param_changes': [param_change.to_dict()
for param_change in self.param_changes]
}
@classmethod
def from_dict(cls, rulespec_dict):
return cls(
rulespec_dict['definition'],
rulespec_dict['dest'],
rulespec_dict['feedback'],
[param_domain.ParamChange(
param_change['name'], param_change['generator_id'],
param_change['customization_args'])
for param_change in rulespec_dict['param_changes']],
)
def __init__(self, definition, dest, feedback, param_changes):
# A dict specifying the rule definition. E.g.
#
# {'rule_type': 'default'}
#
# or
#
# {
# 'rule_type': 'atomic',
# 'name': 'LessThan',
# 'subject': 'answer',
# 'inputs': {'x': 5}}
# }
#
self.definition = definition
# Id of the destination state.
# TODO(sll): Check that this state is END_DEST or actually exists.
self.dest = dest
# Feedback to give the reader if this rule is triggered.
self.feedback = feedback or []
# Exploration-level parameter changes to make if this rule is
# triggered.
self.param_changes = param_changes or []
@property
def is_default(self):
"""Returns True if this spec corresponds to the default rule."""
return self.definition['rule_type'] == 'default'
def get_feedback_string(self):
"""Returns a (possibly empty) string with feedback for this rule."""
return utils.get_random_choice(self.feedback) if self.feedback else ''
def __str__(self):
"""Returns a string representation of a rule (for the stats log)."""
if self.definition['rule_type'] == rule_domain.DEFAULT_RULE_TYPE:
return 'Default'
else:
# TODO(sll): Treat non-atomic rules too.
param_list = [utils.to_ascii(val) for
(key, val) in self.definition['inputs'].iteritems()]
return '%s(%s)' % (self.definition['name'], ','.join(param_list))
@classmethod
def get_default_rule_spec(cls, state_name):
return RuleSpec({'rule_type': 'default'}, state_name, [], [])
def validate(self):
if not isinstance(self.definition, dict):
raise utils.ValidationError(
'Expected rulespec definition to be a dict, received %s'
% self.definition)
if not isinstance(self.dest, basestring):
raise utils.ValidationError(
'Expected rulespec dest to be a string, received %s'
% self.dest)
if not self.dest:
raise utils.ValidationError(
'Every rulespec should have a destination.')
if not isinstance(self.feedback, list):
raise utils.ValidationError(
'Expected rulespec feedback to be a list, received %s'
% self.feedback)
for feedback_item in self.feedback:
if not isinstance(feedback_item, basestring):
raise utils.ValidationError(
'Expected rulespec feedback item to be a string, received '
'%s' % feedback_item)
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected rulespec param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
@classmethod
def validate_rule_definition(cls, rule_definition, exp_param_specs):
ATOMIC_RULE_DEFINITION_SCHEMA = [
('inputs', dict), ('name', basestring), ('rule_type', basestring),
('subject', basestring)]
COMPOSITE_RULE_DEFINITION_SCHEMA = [
('children', list), ('rule_type', basestring)]
DEFAULT_RULE_DEFINITION_SCHEMA = [('rule_type', basestring)]
ALLOWED_COMPOSITE_RULE_TYPES = [
rule_domain.AND_RULE_TYPE, rule_domain.OR_RULE_TYPE,
rule_domain.NOT_RULE_TYPE]
if 'rule_type' not in rule_definition:
raise utils.ValidationError(
'Rule definition %s contains no rule type.' % rule_definition)
rule_type = rule_definition['rule_type']
if rule_type == rule_domain.DEFAULT_RULE_TYPE:
utils.verify_dict_keys_and_types(
rule_definition, DEFAULT_RULE_DEFINITION_SCHEMA)
elif rule_type == rule_domain.ATOMIC_RULE_TYPE:
utils.verify_dict_keys_and_types(
rule_definition, ATOMIC_RULE_DEFINITION_SCHEMA)
if (rule_definition['subject'] not in exp_param_specs
and rule_definition['subject'] != 'answer'):
raise utils.ValidationError(
'Unrecognized rule subject: %s' %
rule_definition['subject'])
else:
if rule_type not in ALLOWED_COMPOSITE_RULE_TYPES:
raise utils.ValidationError(
'Unsupported rule type %s.' % rule_type)
utils.verify_dict_keys_and_types(
rule_definition, COMPOSITE_RULE_DEFINITION_SCHEMA)
for child_rule in rule_definition['children']:
cls.validate_rule_definition(child_rule, exp_param_specs)
DEFAULT_RULESPEC = RuleSpec.get_default_rule_spec(feconf.END_DEST)
DEFAULT_RULESPEC_STR = str(DEFAULT_RULESPEC)
class AnswerHandlerInstance(object):
"""Value object for an answer event stream (submit, click ,drag, etc.)."""
def to_dict(self):
return {
'name': self.name,
'rule_specs': [rule_spec.to_dict()
for rule_spec in self.rule_specs]
}
@classmethod
def from_dict(cls, handler_dict):
return cls(
handler_dict['name'],
[RuleSpec.from_dict(rs) for rs in handler_dict['rule_specs']],
)
def __init__(self, name, rule_specs=None):
if rule_specs is None:
rule_specs = []
self.name = name
self.rule_specs = [RuleSpec(
rule_spec.definition, rule_spec.dest, rule_spec.feedback,
rule_spec.param_changes
) for rule_spec in rule_specs]
@property
def default_rule_spec(self):
"""The default rule spec."""
assert self.rule_specs[-1].is_default
return self.rule_specs[-1]
@classmethod
def get_default_handler(cls, state_name):
return cls('submit', [RuleSpec.get_default_rule_spec(state_name)])
def validate(self):
if self.name != 'submit':
raise utils.ValidationError(
'Unexpected answer handler name: %s' % self.name)
if not isinstance(self.rule_specs, list):
raise utils.ValidationError(
'Expected answer handler rule specs to be a list, received %s'
% self.rule_specs)
if len(self.rule_specs) < 1:
raise utils.ValidationError(
'There must be at least one rule spec for each answer handler.'
% self.rule_specs)
for rule_spec in self.rule_specs:
rule_spec.validate()
class WidgetInstance(object):
"""Value object for a widget instance."""
def to_dict(self):
return {
'widget_id': self.widget_id,
'customization_args': self.customization_args,
'handlers': [handler.to_dict() for handler in self.handlers],
'sticky': self.sticky
}
@classmethod
def from_dict(cls, widget_dict):
return cls(
widget_dict['widget_id'],
widget_dict['customization_args'],
[AnswerHandlerInstance.from_dict(h)
for h in widget_dict['handlers']],
widget_dict['sticky'],
)
def __init__(self, widget_id, customization_args, handlers, sticky=False):
self.widget_id = widget_id
# Customization args for the interactive widget view. Parts of these
# args may be Jinja templates that refer to state parameters.
self.customization_args = customization_args
# Answer handlers and rule specs.
self.handlers = [AnswerHandlerInstance(h.name, h.rule_specs)
for h in handlers]
# If true, keep the widget instance from the previous state if both are
# of the same type.
self.sticky = sticky
def validate(self):
if not isinstance(self.widget_id, basestring):
raise utils.ValidationError(
'Expected widget id to be a string, received %s'
% self.widget_id)
try:
widget = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, self.widget_id)
except KeyError:
raise utils.ValidationError(
'Invalid widget name: %s' % self.widget_id)
widget_customization_arg_names = [wp.name for wp in widget.params]
if not isinstance(self.customization_args, dict):
raise utils.ValidationError(
'Expected widget customization args to be a dict, received %s'
% self.customization_args)
# Validate and clean up the customization args.
extra_args = []
for (arg_name, arg_value) in self.customization_args.iteritems():
if not isinstance(arg_name, basestring):
raise utils.ValidationError(
'Invalid widget customization arg name: %s' % arg_name)
if arg_name not in widget_customization_arg_names:
extra_args.append(arg_name)
logging.error(
'Parameter %s for widget %s is invalid.'
% (arg_name, self.widget_id))
# TODO(sll): Find a way to verify that the arg_values have the
# correct type. Can we get sample values for the state context
# parameters?
for extra_arg in extra_args:
del self.customization_args[extra_arg]
# TODO(sll): Shouldn't this be a dict?
if not isinstance(self.handlers, list):
raise utils.ValidationError(
'Expected widget answer handlers to be a list, received %s'
% self.handlers)
if len(self.handlers) < 1:
raise utils.ValidationError(
'At least one answer handler must be specified for each '
'state widget instance.')
for handler in self.handlers:
handler.validate()
if not isinstance(self.sticky, bool):
raise utils.ValidationError(
'Expected widget sticky flag to be a boolean, received %s'
% self.sticky)
@classmethod
def create_default_widget(cls, default_dest_state_name):
return cls(
feconf.DEFAULT_WIDGET_ID,
{},
[AnswerHandlerInstance.get_default_handler(
default_dest_state_name)]
)
class State(object):
"""Domain object for a state."""
def __init__(self, content, param_changes, widget):
# The content displayed to the reader in this state.
self.content = [Content(item.type, item.value) for item in content]
# Parameter changes associated with this state.
self.param_changes = [param_domain.ParamChange(
param_change.name, param_change.generator.id,
param_change.customization_args)
for param_change in param_changes]
# The interactive widget instance associated with this state.
self.widget = WidgetInstance(
widget.widget_id, widget.customization_args, widget.handlers,
widget.sticky)
def validate(self):
if not isinstance(self.content, list):
raise utils.ValidationError(
'Expected state content to be a list, received %s'
% self.content)
if len(self.content) != 1:
raise utils.ValidationError(
'The state content list must have exactly one element. '
'Received %s' % self.content)
self.content[0].validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected state param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
self.widget.validate()
def update_content(self, content_list):
# TODO(sll): Must sanitize all content in noninteractive widget attrs.
self.content = [Content.from_dict(content_list[0])]
def update_param_changes(self, param_change_dicts):
self.param_changes = [
param_domain.ParamChange.from_dict(param_change_dict)
for param_change_dict in param_change_dicts]
def update_widget_id(self, widget_id):
self.widget.widget_id = widget_id
# TODO(sll): This should also clear widget.handlers (except for the
# default rule).
def update_widget_customization_args(self, widget_customization_args):
self.widget.customization_args = widget_customization_args
def update_widget_sticky(self, widget_sticky):
self.widget.sticky = widget_sticky
def update_widget_handlers(self, widget_handlers_dict):
if not isinstance(widget_handlers_dict, dict):
raise Exception(
'Expected widget_handlers to be a dictionary, received %s'
% widget_handlers_dict)
ruleset = widget_handlers_dict['submit']
if not isinstance(ruleset, list):
raise Exception(
'Expected widget_handlers[submit] to be a list, received %s'
% ruleset)
widget_handlers = [AnswerHandlerInstance('submit', [])]
generic_widget = widget_registry.Registry.get_widget_by_id(
'interactive', self.widget.widget_id)
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for rule_ind in range(len(ruleset)):
rule_dict = ruleset[rule_ind]
rule_dict['feedback'] = [html_cleaner.clean(feedback)
for feedback in rule_dict['feedback']]
if 'param_changes' not in rule_dict:
rule_dict['param_changes'] = []
rule_spec = RuleSpec.from_dict(rule_dict)
rule_type = rule_spec.definition['rule_type']
if rule_ind == len(ruleset) - 1:
if rule_type != rule_domain.DEFAULT_RULE_TYPE:
raise ValueError(
'Invalid ruleset %s: the last rule should be a '
'default rule' % rule_dict)
else:
if rule_type == rule_domain.DEFAULT_RULE_TYPE:
raise ValueError(
'Invalid ruleset %s: rules other than the '
'last one should not be default rules.' % rule_dict)
# TODO(sll): Generalize this to Boolean combinations of rules.
matched_rule = generic_widget.get_rule_by_name(
'submit', rule_spec.definition['name'])
# Normalize and store the rule params.
# TODO(sll): Generalize this to Boolean combinations of rules.
rule_inputs = rule_spec.definition['inputs']
if not isinstance(rule_inputs, dict):
raise Exception(
'Expected rule_inputs to be a dict, received %s'
% rule_inputs)
for param_name, value in rule_inputs.iteritems():
param_type = rule_domain.get_obj_type_for_param_name(
matched_rule, param_name)
if (isinstance(value, basestring) and
'{{' in value and '}}' in value):
# TODO(jacobdavis11): Create checks that all parameters
# referred to exist and have the correct types
normalized_param = value
else:
try:
normalized_param = param_type.normalize(value)
except TypeError:
raise Exception(
'%s has the wrong type. It should be a %s.' %
(value, param_type.__name__))
rule_inputs[param_name] = normalized_param
widget_handlers[0].rule_specs.append(rule_spec)
self.widget.handlers = widget_handlers
def to_dict(self):
return {
'content': [item.to_dict() for item in self.content],
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
'widget': self.widget.to_dict()
}
@classmethod
def from_dict(cls, state_dict):
widget = WidgetInstance.from_dict(state_dict['widget'])
return cls(
[Content.from_dict(item) for item in state_dict['content']],
[param_domain.ParamChange.from_dict(param)
for param in state_dict['param_changes']],
widget
)
@classmethod
def create_default_state(cls, default_dest_state_name):
return cls(
[Content('text', '')], [],
WidgetInstance.create_default_widget(default_dest_state_name))
class Exploration(object):
"""Domain object for an Oppia exploration."""
def __init__(self, exploration_id, title, category, default_skin,
init_state_name, states_dict, param_specs_dict,
param_changes_list, version):
self.id = exploration_id
self.title = title
self.category = category
self.default_skin = default_skin
self.init_state_name = init_state_name
self.states = {}
for (state_name, state_dict) in states_dict.iteritems():
self.states[state_name] = State.from_dict(state_dict)
self.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in param_specs_dict.iteritems()
}
self.param_changes = [
param_domain.ParamChange.from_dict(param_change_dict)
for param_change_dict in param_changes_list]
self.version = version
@classmethod
def create_default_exploration(cls, exploration_id, title, category):
init_state_dict = State.create_default_state(
feconf.DEFAULT_STATE_NAME).to_dict()
states_dict = {
feconf.DEFAULT_STATE_NAME: init_state_dict
}
return cls(
exploration_id, title, category, 'conversation_v1',
feconf.DEFAULT_STATE_NAME, states_dict, {}, [], 0)
@classmethod
def _require_valid_name(cls, name, name_type):
"""Generic name validation.
Args:
name: the name to validate.
name_type: a human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
"""
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise utils.ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise utils.ValidationError(
'Names should not start or end with whitespace.')
if re.search('\s\s+', name):
raise utils.ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for c in feconf.INVALID_NAME_CHARS:
if c in name:
raise utils.ValidationError(
'Invalid character %s in %s: %s' % (c, name_type, name))
@classmethod
def _require_valid_state_name(cls, name):
cls._require_valid_name(name, 'a state name')
if name.lower() == feconf.END_DEST.lower():
raise utils.ValidationError(
'Invalid state name: %s' % feconf.END_DEST)
def validate(self, strict=False):
"""Validates the exploration before it is committed to storage.
If strict is True, performs advanced validation.
"""
if not isinstance(self.title, basestring):
raise utils.ValidationError(
'Expected title to be a string, received %s' % self.title)
self._require_valid_name(self.title, 'the exploration title')
if not isinstance(self.category, basestring):
raise utils.ValidationError(
'Expected category to be a string, received %s'
% self.category)
self._require_valid_name(self.category, 'the exploration category')
if not isinstance(self.default_skin, basestring):
raise utils.ValidationError(
'Expected default_skin to be a string, received %s (%s).'
% self.default_skin, type(self.default_skin))
# TODO(sll): Check that the skin name corresponds to a valid skin.
if not self.default_skin:
raise utils.ValidationError(
'Expected a default_skin to be specified.')
if not isinstance(self.states, dict):
raise utils.ValidationError(
'Expected states to be a dict, received %s' % self.states)
if not self.states:
raise utils.ValidationError('This exploration has no states.')
for state_name in self.states:
self._require_valid_state_name(state_name)
self.states[state_name].validate()
if not self.init_state_name:
raise utils.ValidationError(
'This exploration has no initial state name specified.')
if self.init_state_name not in self.states:
raise utils.ValidationError(
'There is no state corresponding to the exploration\'s '
'initial state name.')
if not isinstance(self.param_specs, dict):
raise utils.ValidationError(
'Expected param_specs to be a dict, received %s'
% self.param_specs)
for param_name in self.param_specs:
if not isinstance(param_name, basestring):
raise utils.ValidationError(
'Expected parameter name to be a string, received %s (%s).'
% param_name, type(param_name))
if not re.match(feconf.ALPHANUMERIC_REGEX, param_name):
raise utils.ValidationError(
'Only parameter names with characters in [a-zA-Z0-9] are '
'accepted.')
self.param_specs[param_name].validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'No parameter named %s exists in this exploration'
% param_change.name)
# TODO(sll): Find a way to verify the param change customization args
# when they depend on exploration/state parameters (e.g. the generated
# values must have the correct obj_type). Can we get sample values for
# the reader's answer and these parameters by looking at states that
# link to this one?
# Check that all state param changes are valid.
for state in self.states.values():
for param_change in state.param_changes:
param_change.validate()
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'The parameter %s was used in a state, but it does '
'not exist in this exploration.' % param_change.name)
# Check that all rule definitions, destinations and param changes are
# valid.
all_state_names = self.states.keys() + [feconf.END_DEST]
for state in self.states.values():
for handler in state.widget.handlers:
for rule_spec in handler.rule_specs:
RuleSpec.validate_rule_definition(
rule_spec.definition, self.param_specs)
if rule_spec.dest not in all_state_names:
raise utils.ValidationError(
'The destination %s is not a valid state.'
% rule_spec.dest)
for param_change in rule_spec.param_changes:
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'The parameter %s was used in a rule, but it '
'does not exist in this exploration'
% param_change.name)
if strict:
warnings_list = []
try:
self._verify_no_self_loops()
except utils.ValidationError as e:
warnings_list.append(unicode(e))
try:
self._verify_all_states_reachable()
except utils.ValidationError as e:
warnings_list.append(unicode(e))
try:
self._verify_no_dead_ends()
except utils.ValidationError as e:
warnings_list.append(unicode(e))
if len(warnings_list) > 0:
warning_str = ''
for ind, warning in enumerate(warnings_list):
warning_str += '%s. %s ' % (ind + 1, warning)
raise utils.ValidationError(
'Please fix the following issues before saving this '
'exploration: %s' % warning_str)
def _verify_no_self_loops(self):
"""Verify that there are no self-loops."""
for (state_name, state) in self.states.iteritems():
for handler in state.widget.handlers:
for rule in handler.rule_specs:
# Check that there are no feedback-less self-loops.
# NB: Sometimes it makes sense for a self-loop to not have
# feedback, such as unreachable rules in a ruleset for
# multiple-choice questions. This should be handled in the
# frontend so that a valid dict with feedback for every
# self-loop is always saved to the backend.
if (rule.dest == state_name and not rule.feedback
and not state.widget.sticky):
raise utils.ValidationError(
'State "%s" has a self-loop with no feedback. '
'This is likely to frustrate the reader.' %
state_name)
def _verify_all_states_reachable(self):
"""Verifies that all states are reachable from the initial state."""
# This queue stores state names.
processed_queue = []
curr_queue = [self.init_state_name]
while curr_queue:
curr_state_name = curr_queue[0]
curr_queue = curr_queue[1:]
if curr_state_name in processed_queue:
continue
processed_queue.append(curr_state_name)
curr_state = self.states[curr_state_name]
for handler in curr_state.widget.handlers:
for rule in handler.rule_specs:
dest_state = rule.dest
if (dest_state not in curr_queue and
dest_state not in processed_queue and
dest_state != feconf.END_DEST):
curr_queue.append(dest_state)
if len(self.states) != len(processed_queue):
unseen_states = list(
set(self.states.keys()) - set(processed_queue))
raise utils.ValidationError(
'The following states are not reachable from the initial '
'state: %s' % ', '.join(unseen_states))
def _verify_no_dead_ends(self):
"""Verifies that the END state is reachable from all states."""
# This queue stores state names.
processed_queue = []
curr_queue = [feconf.END_DEST]
while curr_queue:
curr_state_name = curr_queue[0]
curr_queue = curr_queue[1:]
if curr_state_name in processed_queue:
continue
if curr_state_name != feconf.END_DEST:
processed_queue.append(curr_state_name)
for (state_name, state) in self.states.iteritems():
if (state_name not in curr_queue
and state_name not in processed_queue):
for handler in state.widget.handlers:
for rule_spec in handler.rule_specs:
if rule_spec.dest == curr_state_name:
curr_queue.append(state_name)
break
if len(self.states) != len(processed_queue):
dead_end_states = list(
set(self.states.keys()) - set(processed_queue))
raise utils.ValidationError(
'The END state is not reachable from the following states: %s'
% ', '.join(dead_end_states))
# Derived attributes of an exploration,
@property
def init_state(self):
"""The state which forms the start of this exploration."""
return self.states[self.init_state_name]
@property
def param_specs_dict(self):
"""A dict of param specs, each represented as Python dicts."""
return {ps_name: ps_val.to_dict()
for (ps_name, ps_val) in self.param_specs.iteritems()}
@property
def param_change_dicts(self):
"""A list of param changes, represented as JSONifiable Python dicts."""
return [param_change.to_dict() for param_change in self.param_changes]
@classmethod
def is_demo_exploration_id(cls, exploration_id):
"""Whether the exploration id is that of a demo exploration."""
return exploration_id.isdigit() and (
0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS))
@property
def is_demo(self):
"""Whether the exploration is one of the demo explorations."""
return self.is_demo_exploration_id(self.id)
def update_title(self, title):
self.title = title
def update_category(self, category):
self.category = category
def update_param_specs(self, param_specs_dict):
self.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in param_specs_dict.iteritems()
}
def update_param_changes(self, param_changes_list):
self.param_changes = [
param_domain.ParamChange.from_dict(param_change)
for param_change in param_changes_list
]
# Methods relating to parameters.
def get_obj_type_for_param(self, param_name):
"""Returns the obj_type for the given parameter."""
try:
return self.param_specs[param_name].obj_type
except:
raise Exception('Exploration %s has no parameter named %s' %
(self.title, param_name))
def _get_updated_param_dict(self, param_dict, param_changes):
"""Updates a param dict using the given list of param_changes.
Note that the list of parameter changes is ordered. Parameter
changes later in the list may depend on parameter changes that have
been set earlier in the same list.
"""
new_param_dict = copy.deepcopy(param_dict)
for pc in param_changes:
obj_type = self.get_obj_type_for_param(pc.name)
new_param_dict[pc.name] = pc.get_normalized_value(
obj_type, new_param_dict)
return new_param_dict
def get_init_params(self):
"""Returns an initial set of exploration parameters for a reader."""
return self._get_updated_param_dict({}, self.param_changes)
def update_with_state_params(self, state_name, param_dict):
"""Updates a reader's params using the params for the given state.
Args:
- state_name: str. The name of the state.
- param_dict: dict. A dict containing parameter names and their
values. This dict represents the current context which is to
be updated.
Returns:
dict. An updated param dict after the changes in the state's
param_changes list have been applied in sequence.
"""
state = self.states[state_name]
return self._get_updated_param_dict(param_dict, state.param_changes)
# Methods relating to states.
def add_states(self, state_names):
"""Adds multiple states to the exploration."""
for state_name in state_names:
if state_name in self.states:
raise ValueError('Duplicate state name %s' % state_name)
for state_name in state_names:
self.states[state_name] = State.create_default_state(state_name)
def rename_state(self, old_state_name, new_state_name):
"""Renames the given state."""
if old_state_name not in self.states:
raise ValueError('State %s does not exist' % old_state_name)
if (old_state_name != new_state_name and
new_state_name in self.states):
raise ValueError('Duplicate state name: %s' % new_state_name)
if old_state_name == new_state_name:
return
self._require_valid_state_name(new_state_name)
if self.init_state_name == old_state_name:
self.init_state_name = new_state_name
self.states[new_state_name] = copy.deepcopy(
self.states[old_state_name])
del self.states[old_state_name]
# Find all destinations in the exploration which equal the renamed
# state, and change the name appropriately.
for other_state_name in self.states:
other_state = self.states[other_state_name]
for handler in other_state.widget.handlers:
for rule in handler.rule_specs:
if rule.dest == old_state_name:
rule.dest = new_state_name
def delete_state(self, state_name):
"""Deletes the given state."""
if state_name not in self.states:
raise ValueError('State %s does not exist' % state_name)
# Do not allow deletion of initial states.
if self.init_state_name == state_name:
raise ValueError('Cannot delete initial state of an exploration.')
# Find all destinations in the exploration which equal the deleted
# state, and change them to loop back to their containing state.
for other_state_name in self.states:
other_state = self.states[other_state_name]
for handler in other_state.widget.handlers:
for rule in handler.rule_specs:
if rule.dest == state_name:
rule.dest = other_state_name
del self.states[state_name]
def export_state_to_frontend_dict(self, state_name):
"""Gets a state dict with rule descriptions."""
state_dict = self.states[state_name].to_dict()
for handler in state_dict['widget']['handlers']:
for rule_spec in handler['rule_specs']:
widget = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX,
state_dict['widget']['widget_id']
)
rule_spec['description'] = rule_domain.get_rule_description(
rule_spec['definition'],
self.param_specs,
widget.get_handler_by_name(handler['name']).obj_type
)
return state_dict
def classify(self, state_name, handler_name, answer, params):
"""Return the first rule that is satisfied by a reader's answer."""
state = self.states[state_name]
# Get the widget to determine the input type.
generic_handler = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, state.widget.widget_id
).get_handler_by_name(handler_name)
handler = next(
h for h in state.widget.handlers if h.name == handler_name)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(self.id))
for rule_spec in handler.rule_specs:
if rule_domain.evaluate_rule(
rule_spec.definition, self.param_specs,
generic_handler.obj_type, params, answer, fs):
return rule_spec
raise Exception(
'No matching rule found for handler %s. Rule specs are %s.' % (
handler.name,
[rule_spec.to_dict() for rule_spec in handler.rule_specs]
)
)
# The current version of the exploration schema. If any backward-
# incompatible changes are made to the exploration schema in the YAML
# definitions, this version number must be changed and a migration process
# put in place.
CURRENT_EXPLORATION_SCHEMA_VERSION = 2
@classmethod
def _convert_v1_dict_to_v2_dict(cls, exploration_dict):
"""Converts a v1 exploration dict into a v2 exploration dict."""
exploration_dict['schema_version'] = 2
exploration_dict['init_state_name'] = (
exploration_dict['states'][0]['name'])
states_dict = {}
for state in exploration_dict['states']:
states_dict[state['name']] = state
del states_dict[state['name']]['name']
exploration_dict['states'] = states_dict
return exploration_dict
@classmethod
def from_yaml(cls, exploration_id, title, category, yaml_content):
"""Creates and returns exploration from a YAML text string."""
exploration_dict = utils.dict_from_yaml(yaml_content)
exploration_schema_version = exploration_dict.get('schema_version')
if exploration_schema_version is None:
raise Exception('Invalid YAML file: no schema version specified.')
if not (1 <= exploration_schema_version
<= cls.CURRENT_EXPLORATION_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1 and v2 YAML files at present.')
if exploration_schema_version == 1:
exploration_dict = cls._convert_v1_dict_to_v2_dict(
exploration_dict)
exploration = cls.create_default_exploration(
exploration_id, title, category)
exploration.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val) for
(ps_name, ps_val) in exploration_dict['param_specs'].iteritems()
}
init_state_name = exploration_dict['init_state_name']
exploration.rename_state(exploration.init_state_name, init_state_name)
exploration.add_states([
state_name for state_name in exploration_dict['states']
if state_name != init_state_name])
for (state_name, sdict) in exploration_dict['states'].iteritems():
state = exploration.states[state_name]
state.content = [
Content(item['type'], html_cleaner.clean(item['value']))
for item in sdict['content']
]
state.param_changes = [param_domain.ParamChange(
pc['name'], pc['generator_id'], pc['customization_args']
) for pc in sdict['param_changes']]
for pc in state.param_changes:
if pc.name not in exploration.param_specs:
raise Exception('Parameter %s was used in a state but not '
'declared in the exploration param_specs.'
% pc.name)
wdict = sdict['widget']
widget_handlers = [AnswerHandlerInstance.from_dict({
'name': handler['name'],
'rule_specs': [{
'definition': rule_spec['definition'],
'dest': rule_spec['dest'],
'feedback': [html_cleaner.clean(feedback)
for feedback in rule_spec['feedback']],
'param_changes': rule_spec.get('param_changes', []),
} for rule_spec in handler['rule_specs']],
}) for handler in wdict['handlers']]
state.widget = WidgetInstance(
wdict['widget_id'], wdict['customization_args'],
widget_handlers, wdict['sticky'])
exploration.states[state_name] = state
exploration.default_skin = exploration_dict['default_skin']
exploration.param_changes = [
param_domain.ParamChange.from_dict(pc)
for pc in exploration_dict['param_changes']]
return exploration
def to_yaml(self):
return utils.yaml_from_dict({
'default_skin': self.default_skin,
'init_state_name': self.init_state_name,
'param_changes': self.param_change_dicts,
'param_specs': self.param_specs_dict,
'states': {state_name: state.to_dict()
for (state_name, state) in self.states.iteritems()},
'schema_version': self.CURRENT_EXPLORATION_SCHEMA_VERSION
})
def get_interactive_widget_ids(self):
"""Get all interactive widget ids used in this exploration."""
result = set([])
for (state_name, state) in self.states.iteritems():
result.add(state.widget.widget_id)
return list(result)
| StarcoderdataPython |
1667313 | import base64
import json
import logging
import os
import subprocess
import tempfile
import zipfile
from datetime import datetime
import jwt
import requests
from celery import shared_task
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from django.apps import apps
from django.conf import settings
from django.core import files
from django.db.transaction import on_commit
from grandchallenge.algorithms.models import Algorithm
from grandchallenge.codebuild.tasks import create_codebuild_build
from grandchallenge.github.utils import CloneStatusChoices
logger = logging.getLogger(__name__)
def get_repo_url(payload):
installation_id = payload["installation"]["id"]
b64_key = settings.GITHUB_PRIVATE_KEY_BASE64
b64_bytes = b64_key.encode("ascii")
key_bytes = base64.b64decode(b64_bytes)
private_key = serialization.load_pem_private_key(
key_bytes, password=<PASSWORD>, backend=default_backend()
)
now = datetime.now()
msg = {
"iat": int(now.timestamp()) - 60,
"exp": int(now.timestamp()) + 60 * 5,
"iss": settings.GITHUB_APP_ID,
}
token = jwt.encode(msg, private_key, algorithm="RS256")
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/vnd.github.v3+json",
}
resp = requests.post(
f"https://api.github.com/app/installations/{installation_id}/access_tokens",
headers=headers,
timeout=10,
)
access_token = json.loads(resp.content)["token"]
repo_url = payload["repository"]["html_url"]
return repo_url.replace("//", f"//x-access-token:{access_token}@")
def install_lfs():
process = subprocess.check_output(
["git", "lfs", "install"], stderr=subprocess.STDOUT
)
return process
def fetch_repo(payload, repo_url, tmpdirname, recurse_submodules):
cmd = [
"git",
"clone",
"--branch",
payload["ref"],
"--depth",
"1",
repo_url,
tmpdirname,
]
if recurse_submodules:
cmd.insert(2, "--recurse-submodules")
process = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return process
def check_license(tmpdirname):
process = subprocess.Popen(
["licensee", "detect", tmpdirname, "--json", "--no-remote"],
stdout=subprocess.PIPE,
)
process.wait()
return json.loads(process.stdout.read().decode("utf-8"))
def save_zipfile(ghwm, tmpdirname):
zip_name = f"{ghwm.repo_name}-{ghwm.tag}.zip"
tmp_zip = tempfile.NamedTemporaryFile()
with zipfile.ZipFile(tmp_zip.name, "w") as zipf:
for foldername, _subfolders, filenames in os.walk(tmpdirname):
for filename in filenames:
file_path = os.path.join(foldername, filename)
zipf.write(file_path, file_path.replace(f"{tmpdirname}/", ""))
temp_file = files.File(tmp_zip, name=zip_name)
return temp_file
def build_repo(ghwm_pk):
on_commit(
lambda: create_codebuild_build.apply_async(kwargs={"pk": ghwm_pk})
)
@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS["acks-late-2xlarge"])
def get_zipfile(*, pk):
GitHubWebhookMessage = apps.get_model( # noqa: N806
app_label="github", model_name="GitHubWebhookMessage"
)
ghwm = GitHubWebhookMessage.objects.get(pk=pk)
if ghwm.clone_status == CloneStatusChoices.PENDING:
payload = ghwm.payload
repo_url = get_repo_url(payload)
ghwm.clone_status = CloneStatusChoices.STARTED
ghwm.save()
try:
recurse_submodules = Algorithm.objects.get(
repo_name=ghwm.payload["repository"]["full_name"]
).recurse_submodules
except Algorithm.DoesNotExist:
recurse_submodules = False
with tempfile.TemporaryDirectory() as tmpdirname:
try:
# Run git lfs install here, doing it in the dockerfile does not
# seem to work
install_lfs()
fetch_repo(payload, repo_url, tmpdirname, recurse_submodules)
license_check_result = check_license(tmpdirname)
temp_file = save_zipfile(ghwm, tmpdirname)
# update GithubWebhook object
ghwm.zipfile = temp_file
ghwm.license_check_result = license_check_result
ghwm.clone_status = CloneStatusChoices.SUCCESS
ghwm.save()
build_repo(ghwm.pk)
except Exception as e:
ghwm.stdout = str(getattr(e, "stdout", ""))
ghwm.stderr = str(getattr(e, "stderr", ""))
ghwm.clone_status = CloneStatusChoices.FAILURE
ghwm.save()
if not ghwm.user_error:
raise
@shared_task
def unlink_algorithm(*, pk):
GitHubWebhookMessage = apps.get_model( # noqa: N806
app_label="github", model_name="GitHubWebhookMessage"
)
ghwm = GitHubWebhookMessage.objects.get(pk=pk)
for repo in ghwm.payload["repositories"]:
Algorithm.objects.filter(repo_name=repo["full_name"]).update(
repo_name=""
)
| StarcoderdataPython |
9787827 | <gh_stars>0
import streamlit as st
import json
import requests
import pandas as pd
from pygooglenews import GoogleNews
import nltk
nltk.data.path.append('./nltk.txt')
nltk.download('vader_lexicon')
import plotly.graph_objects as go
st.set_page_config(page_title="News Classifier by Akash",page_icon='📰',layout="wide")
def segment(df):
## Form a pandas series with all value counts in the "Label" column in the Dataframe "df" ##
counts = df.label.value_counts(normalize=True) * 100
## Convert pandas series to a dataframe ##
counts=counts.to_frame()
## Form a column named 'Segment' that consist of '+1', '-1' and '0' for positive , negative , neutral respectively ##
counts['segment']=counts.index
counts.sort_values(by=['segment'],inplace=True)
## Build the Figure basically a pie chart with graph object of plotly ##
fig = go.Figure(data=[go.Pie(labels=['Negative','Neutral','Positive'], values=counts['label'])])
fig.update_layout(margin=dict(t=0, b=0, l=0, r=0))
## make two lists for positive and negative news ##
positive=list(df[df['label'] == 1].headline)
negative=list(df[df['label'] == -1].headline)
return (fig,positive,negative)
def sentiment(headlines):
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
sia = SIA()
results = []
for line in headlines:
pol_score = sia.polarity_scores(line)
pol_score['headline'] = line
results.append(pol_score)
df = pd.DataFrame.from_records(results)
df['label'] = 0
df.loc[df['compound'] > 0.17, 'label'] = 1
df.loc[df['compound'] < -0.17, 'label'] = -1
return(segment(df))
def get_news(news_p):
p=list()
## get news from PYGOOGLE ###
gn = GoogleNews()
search = gn.search(news_p)
search = search['entries']
df_pygoogle = pd.DataFrame(search)
for i in range(len(df_pygoogle['title'])):
p.append(df_pygoogle['title'][i])
##### GET NEWS FROM GNEWS MODULE ######
try:
url=('https://gnews.io/api/v3/search?country=in&q='+news_p+'&max=100&token=5d0f3e456daf637b39a5a88d09cf32f8')
response=requests.get(url)
news=response.text
jsondata=json.loads(news)
df=pd.DataFrame(jsondata)
for i in range(len(df)):
p.append(df['articles'][i]['description'])
except:
pass
try:
######### go to https://newsapi.org/ and grab your api key ##########
## q will be provided by user search string ######
url=('https://newsapi.org/v2/everything?q='+news_p+'&apiKey=<KEY>')
response = requests.get(url)
y=response.text
jsonData = json.loads(y)
z=jsonData['articles']
z=pd.DataFrame(z)
f=z['description']
f=f.dropna(how='all')
f=f.reset_index(drop=True)
for i in range(len(f)):
p.append(f[i])
except:
pass
## form a list that consists of news extarcted from NewsAPI ##
## Go to the 'sentiment' function for sentiment classification ##
#select country from country dropdown
return(sentiment(p),'India')
input_from_user = st.text_input("Enter your Search")
if st.button('Enter'):
x=get_news(input_from_user)
print(x)
## the object 'x' gets list consist of a tuple (figure,positive news,negative news) and the country name ##
fig=x[0][0]
pos=x[0][1]
neg=x[0][2]
country_name=x[1]
st.header('The Positive news about ' + input_from_user +' are')
for i in pos:
st.text(i)
st.header('Pie Chart Visualisation based on query search')
st.plotly_chart(fig)
st.header('The Negative news about ' + input_from_user +' are')
for i in neg:
st.text(i)
| StarcoderdataPython |
1999610 | # Generated by Django 3.2.4 on 2021-07-05 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokefriend', '0003_alter_trainer_last_modified'),
]
operations = [
migrations.AlterModelOptions(
name='trainer',
options={'ordering': ['-last_modified']},
),
migrations.AlterField(
model_name='trainer',
name='code',
field=models.CharField(max_length=14),
),
migrations.AlterField(
model_name='trainer',
name='team',
field=models.CharField(choices=[('ANY', 'Any'), ('MYSTIC', 'Mystic'), ('VALOR', 'Valor'), ('INSTINCT', 'Instinct')], default='ANY', max_length=10),
),
]
| StarcoderdataPython |
5086263 | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
matplotlib.use("Agg")
import datetime
from finrl.apps import config
from finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader
from finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.neo_finrl.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.drl_agents.stablebaselines3.models import DRLAgent
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
import itertools
def train_stock_trading():
"""
train an agent
"""
print("==============Start Fetching Data===========")
df = YahooDownloader(
start_date=config.START_DATE,
end_date=config.END_DATE,
ticker_list=config.DOW_30_TICKER,
).fetch_data()
print("==============Start Feature Engineering===========")
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature=False,
)
processed = fe.preprocess_data(df)
list_ticker = processed["tic"].unique().tolist()
list_date = list(pd.date_range(processed['date'].min(),processed['date'].max()).astype(str))
combination = list(itertools.product(list_date,list_ticker))
processed_full = pd.DataFrame(combination,columns=["date","tic"]).merge(processed,on=["date","tic"],how="left")
processed_full = processed_full[processed_full['date'].isin(processed['date'])]
processed_full = processed_full.sort_values(['date','tic'])
processed_full = processed_full.fillna(0)
# Training & Trading data split
train = data_split(processed_full, config.START_DATE, config.START_TRADE_DATE)
trade = data_split(processed_full, config.START_TRADE_DATE, config.END_DATE)
# calculate state action space
stock_dimension = len(train.tic.unique())
state_space = (
1
+ 2 * stock_dimension
+ len(config.TECHNICAL_INDICATORS_LIST) * stock_dimension
)
env_kwargs = {
"hmax": 100,
"initial_amount": 1000000,
"buy_cost_pct": 0.001,
"sell_cost_pct": 0.001,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
"action_space": stock_dimension,
"reward_scaling": 1e-4
}
e_train_gym = StockTradingEnv(df=train, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
agent = DRLAgent(env=env_train)
print("==============Model Training===========")
now = datetime.datetime.now().strftime("%Y%m%d-%Hh%M")
model_sac = agent.get_model("sac")
trained_sac = agent.train_model(
model=model_sac, tb_log_name="sac", total_timesteps=80000
)
print("==============Start Trading===========")
e_trade_gym = StockTradingEnv(df=trade, turbulence_threshold=250, **env_kwargs)
df_account_value, df_actions = DRLAgent.DRL_prediction(
model=trained_sac, environment = e_trade_gym
)
df_account_value.to_csv(
"./" + config.RESULTS_DIR + "/df_account_value_" + now + ".csv"
)
df_actions.to_csv("./" + config.RESULTS_DIR + "/df_actions_" + now + ".csv")
print("==============Get Backtest Results===========")
perf_stats_all = backtest_stats(df_account_value)
perf_stats_all = pd.DataFrame(perf_stats_all)
perf_stats_all.to_csv("./" + config.RESULTS_DIR + "/perf_stats_all_" + now + ".csv")
#def train_portfolio_allocation():
| StarcoderdataPython |
6571269 |
# =========================================
# IMPORT / EXPORTs
# --------------------------------------
import rootpath
rootpath.append()
from collections import *
| StarcoderdataPython |
1726766 | from base import BaseDataLoader
import data_loader.polyphonic_dataloader as poly
from data_loader.seq_util import seq_collate_fn
class PolyMusicDataLoader(BaseDataLoader):
def __init__(self,
batch_size,
data_dir='jsb',
split='train',
shuffle=True,
collate_fn=seq_collate_fn,
num_workers=1):
assert data_dir in ['jsb']
assert split in ['train', 'valid', 'test']
if data_dir == 'jsb':
self.dataset = poly.PolyDataset(poly.JSB_CHORALES, split)
self.data_dir = data_dir
self.split = split
super().__init__(self.dataset,
batch_size,
shuffle,
0.0,
num_workers,
seq_collate_fn)
| StarcoderdataPython |
4903794 | import os
import json
LEDGER_CHANNELS = {
channel: settings
for channels in json.loads(os.getenv('LEDGER_CHANNELS'))
for channel, settings in channels.items()
}
LEDGER_MSP_ID = os.getenv('LEDGER_MSP_ID')
LEDGER_USER_NAME = os.getenv('LEDGER_USER_NAME')
LEDGER_PEER_HOST = os.getenv('LEDGER_PEER_HOST')
LEDGER_PEER_PORT = int(os.getenv('LEDGER_PEER_PORT'))
LEDGER_PEER_NAME = 'peer'
LEDGER_PEER_TLS_CA_CERTS = '/var/hyperledger/ca/cacert.pem'
LEDGER_PEER_TLS_CLIENT_KEY = '/var/hyperledger/tls/client/pair/tls.key'
LEDGER_PEER_TLS_CLIENT_CERT = '/var/hyperledger/tls/client/pair/tls.crt'
LEDGER_CLIENT_STATE_STORE = '/var/substra/hfc-cvs'
LEDGER_CLIENT_KEY_PATH = '/var/hyperledger/msp/keystore/*'
LEDGER_CLIENT_CERT_PATH = '/var/hyperledger/msp/signcerts/cert.pem'
LEDGER_SYNC_ENABLED = True
LEDGER_CALL_RETRY = True
LEDGER_WAIT_FOR_EVENT_TIMEOUT_SECONDS = int(os.getenv('LEDGER_WAIT_FOR_EVENT_TIMEOUT_SECONDS'))
LEDGER_INVOKE_STRATEGY = os.getenv('LEDGER_INVOKE_STRATEGY')
LEDGER_QUERY_STRATEGY = os.getenv('LEDGER_QUERY_STRATEGY')
LEDGER_GRPC_MAX_SEND_MESSAGE_LENGTH = -1
LEDGER_GRPC_MAX_RECEIVE_MESSAGE_LENGTH = -1
LEDGER_GRPC_KEEPALIVE_TIMEOUT_MS = 20000
LEDGER_GRPC_HTTP2_MAX_PINGS_WITHOUT_DATA = 0
LEDGER_GRPC_KEEPALIVE_PERMIT_WITHOUT_CALLS = 1
LEDGER_GRPC_KEEPALIVE_TIME_MS = int(os.getenv('LEDGER_GRPC_KEEPALIVE_TIME_MS'))
LEDGER_GRPC_HTTP2_MIN_TIME_BETWEEN_PINGS_MS = int(os.getenv('LEDGER_GRPC_HTTP2_MIN_TIME_BETWEEN_PINGS_MS'))
| StarcoderdataPython |
4816022 | <gh_stars>1-10
"""
Data and methods to retrieve app specific configuration
"""
import json
import requests
APP_ID = {
"HOME": "00000000-0000-0000-0000-000000000000",
"YOUTUBE": "YouTube",
"NETFLIX": "Netflix",
"TICTACTOE": "TicTacToe",
"GOOGLE_MUSIC": "GoogleMusic",
"PLAY_MOVIES": "PlayMovies",
"HULU_PLUS": "Hulu_Plus",
"HBO": "HBO_App",
"PANDORA": "Pandora_App",
"REDBULLTV": "edaded98-5119-4c8a-afc1-de722da03562",
"VIKI": "1812335e-441c-4e1e-a61a-312ca1ead90e",
"PLEX_QA": "06ee44ee-e7e3-4249-83b6-f5d0b6f07f34",
"PLEX": "06ee44ee-e7e3-4249-83b6-f5d0b6f07f34_1",
"VEVO": "2be788b9-b7e0-4743-9069-ea876d97ac20",
"AVIA": "aa35235e-a960-4402-a87e-807ae8b2ac79",
"REVISION3": "Revision3_App",
"SONGZA": "Songza_App",
"REALPLAYER_CLOUD": "a7f3283b-8034-4506-83e8-4e79ab1ad794_2",
"BEYONDPOD": "18a8aeaa-8e3d-4c24-b05d-da68394a3476_1",
"WASHINGTON_POST": "Post_TV_App",
"DEFAULT_MEDIA_RECEIVER": "CC1AD845",
}
def get_possible_app_ids():
""" Returns all possible app ids. """
try:
req = requests.get(
"https://clients3.google.com/cast/chromecast/device/baseconfig")
data = json.loads(req.text[4:])
return [app['app_id'] for app in data['applications']] + \
data["enabled_app_ids"]
except ValueError:
# If json fails to parse
return []
def get_app_config(app_id):
""" Get specific configuration for 'app_id'. """
try:
req = requests.get(
("https://clients3.google.com/"
"cast/chromecast/device/app?a={}").format(app_id))
return json.loads(req.text[4:]) if req.status_code == 200 else {}
except ValueError:
# If json fails to parse
return {}
| StarcoderdataPython |
3472488 | #! /usr/bin/env python
import time
from context import RRT, utils
from RRT import RRT
import utils
from utils import adjustable_random_sampler as sampler
from utils import los_optimizer as path_optimizer
import matplotlib.pyplot as plt
if __name__ == '__main__':
# List of obtacles as a list of lists of points
general_obstacle_list = [
[ (8, 5), (7, 8), (2, 9), (3, 5) ],
[ (3, 3), (3, 5), (5, 5), (5, 3) ],
]
obstacle_list = general_obstacle_list
# Instatiate rrt planner object
my_tree = RRT(sample_area=(-5, 15), sampler=sampler, expand_dis=0.1)
# Plan path while timing
print('\n ' + '-'*30 + "\n> Starting operation ...\n " + '-'*30 + '\n')
start_time = time.time()
path, node_list = my_tree((1, 1), (10, 10), obstacle_list)
print("Path planned.")
print('\n ' + '-'*30 + "\n> Time taken: {:.4} seconds.\n ".format(time.time() - start_time) + '-'*30 + '\n')
# Visualize tree
RRT.visualize_tree(node_list, obstacle_list)
# Testing los path optimizer
print('\n ' + '-'*30 + "\n> Starting operation ...\n " + '-'*30 + '\n')
start_time = time.time()
optimized_path = path_optimizer(path, obstacle_list)
print("Path optimized.")
print('\n ' + '-'*30 + "\n> Time taken: {:.4} seconds.\n ".format(time.time() - start_time) + '-'*30 + '\n')
# Visualize path
utils.visualize_path(optimized_path, obstacle_list)
| StarcoderdataPython |
1637266 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 16:06:20 2018
@author: adam.rees
"""
import unittest
import DBEISCarbonFactors
class test_InstanceNames(unittest.TestCase):
def setUp(self):
pass
def test_new_flat_class_creation(self):
try:
# Create a class for a year which has a flat file format.
DBEISCarbonFactors.CarbonFactors(2018)
except Exception as e:
self.fail("\nCannot create CarbonFactors instance with "
f"modern flat file format. Error: {e}")
def test_old_flat_class_creation(self):
try:
# Create a class for a year which has an older flat file format.
DBEISCarbonFactors.CarbonFactors(2014)
except Exception as e:
self.fail("\nCannot create CarbonFactors instance with "
f"older flat file format. Error: {e}")
def test_non_flat_file_class_creation(self):
# New we are going to create a class with an advanced file format
with self.assertRaises(ValueError):
DBEISCarbonFactors.CarbonFactors(2012)
def test_letters_for_instance_name(self):
with self.assertRaises(ValueError):
DBEISCarbonFactors.CarbonFactors("xxxyyyxxxyyyxxy")
def test_old_year(self):
with self.assertRaises(ValueError):
DBEISCarbonFactors.CarbonFactors(1960)
def test_future_year(self):
with self.assertRaises(ValueError):
DBEISCarbonFactors.CarbonFactors(2050)
def test_urlCheckResponse(self):
pageurl = "http://google.co.uk/dfhsdkfnksldf"
try:
response = DBEISCarbonFactors.CarbonFactors.urlCheck(self, pageurl)
if response is False:
pass
elif response is True:
self.fail("\nReturned a True value")
except Exception as e:
self.fail("\nCannot handle response other than 200. "
f"Error: {e}")
if __name__=='__main__':
unittest.main() | StarcoderdataPython |
3518760 | <reponame>LC231/csws-week1<gh_stars>0
age = 12
if age < 4:
price = 0
elif age < 18:
price = 25
else:
price = 40
print(f"Your admission cost is ${price}.")
# doesnt have to end with an else statement | StarcoderdataPython |
1852727 | <reponame>kafi2016/chainer-kafi<filename>chainer/functions/evaluation/accuracy.py
import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Accuracy(function.Function):
def __init__(self, ignore_label=None):
self.ignore_label = ignore_label
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype == numpy.int32
)
t_ndim = t_type.ndim.eval()
type_check.expect(
x_type.ndim >= t_type.ndim,
x_type.shape[0] == t_type.shape[0],
x_type.shape[2: t_ndim + 1] == t_type.shape[1:]
)
for i in six.moves.range(t_ndim + 1, x_type.ndim.eval()):
type_check.expect(x_type.shape[i] == 1)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
y, t = inputs
if self.ignore_label is not None:
mask = (t == self.ignore_label)
ignore_cnt = mask.sum()
# will always be true when the true label is ignore_label
# TODO(henry0312)
# If cupy.where returns indexes, we could make the code better.
# Also, we would need Advanced Indexing.
pred = xp.where(mask, self.ignore_label,
y.argmax(axis=1).reshape(t.shape))
count = (pred == t).sum() - ignore_cnt
total = t.size - ignore_cnt
if total == 0:
return xp.asarray(0.0, dtype=y.dtype),
else:
return xp.asarray(float(count) / total, dtype=y.dtype),
else:
pred = y.argmax(axis=1).reshape(t.shape)
return xp.asarray((pred == t).mean(dtype=y.dtype)),
def accuracy(y, t, ignore_label=None):
"""Computes muticlass classification accuracy of the minibatch.
Args:
y (Variable): Variable holding a matrix whose (i, j)-th element
indicates the score of the class j at the i-th example.
t (Variable): Variable holding an int32 vector of ground truth labels.
ignore_label (int or None): Skip calculating accuracy
if the ture label is ``ignore_label``.
Returns:
Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
"""
return Accuracy(ignore_label=ignore_label)(y, t)
| StarcoderdataPython |
4947356 |
import configparser
import json
import os
import pprint
import pytest
from src.common import kubeclient
env_config_data = [
("wait_for_res_to_appear_num_attempts", 15, int),
("wait_for_res_to_appear_interval_secs", 1, int),
("wait_for_pod_to_be_done_num_attempts", 100, int),
("wait_for_pod_to_be_done_interval_secs", 3, int)
]
# This class encapsulates all the parameters that can be controlled
# using env variables.
class EnvConfig:
def __init__(self):
for name, default_val, factory in env_config_data:
self._set_env_config(name, default_val, factory)
def _set_env_config(self, name, default_val, factory):
setattr(self, name, factory(os.environ.get(name.upper(), default_val)))
class GlobalConfig:
def __init__(self, envconfig):
self.envconfig = envconfig
self.restic_password = "<PASSWORD>"
self.rootdir = os.environ['TESTS_ROOTDIR']
iniconfig = configparser.ConfigParser()
# pytest has a way of finding the path of "pytest.ini" using "config"
# object but it is not very well documented. So for now, directly
# construct the path.
iniconfig.read(os.path.join(self.rootdir, "pytest.ini"))
self.iniconfig = iniconfig
self.configdir = os.path.join(self.rootdir, "config")
self.testenv = None
testenv_f = os.path.join(self.configdir, "testenv.json")
if os.path.exists(testenv_f):
self.testenv = json.load(open(testenv_f))
self._init_apis()
def _init_apis(self):
self.namespace = "kubedr-system"
self.pod_api = kubeclient.PodAPI(self.namespace)
self.backuploc_api = kubeclient.BackupLocationAPI(self.namespace)
self.mbp_api = kubeclient.MetadataBackupPolicyAPI(self.namespace)
self.mr_api = kubeclient.MetadataRestoreAPI(self.namespace)
self.secret_api = kubeclient.SecretAPI(self.namespace)
self.pvc_api = kubeclient.PersistentVolumeClaimAPI(self.namespace)
self.pv_api = kubeclient.PersistentVolumeAPI()
# This is being set as a global variable so that library code
# such as "kubeclient" can easily access the configuration set
# through env variables.
envconfig = EnvConfig()
@pytest.fixture(scope = "session")
def globalconfig():
kubeclient.init()
pprint.pprint(envconfig.__dict__)
return GlobalConfig(envconfig)
| StarcoderdataPython |
6433585 | <gh_stars>1-10
#!/usr/bin/python
##
## THmain.py
##
## Written by <NAME> - January 2016
##
## The main function for the CL version of TourneyHelper
from THAssistant import THAssistant
def main():
print "Welcome to TourneyHelper"
print "What would you like to do?"
print "\t1 - Calculate Prize Pool Distribution"
print "\t2 - Calculate Blinds Structure"
print "\t3 - Initial Stack Size Calculator"
print "\t4 - Initial Chip Distribution"
command = int(raw_input())
{1:ppDist, 2:blindsStruct, 3:stackSize, 4:chipDist}[command]()
def ppDist():
players = int(raw_input("How many players are there: "))
prizepool = int(raw_input("What is the prizepool: "))
paidPos = int(raw_input("How many paid positions would you like: "))
print "What distribution would you like?"
print "\t1 - Uniform"
print "\t2 - Geometric"
print "\t3 - Log-Normal"
print "\t4 - Exponential"
distType = int(raw_input())
director = THAssistant(players, prizepool, paidPos)
total = 0
print ""
print "Prize Pool Distribution:"
for e, pos in enumerate(director.prizeDist({1:"uniform", 2:"geometric", 3:"lognormal", 4:"exponential"}[distType])):
print e+1, ":", pos
total += pos
print "Total:", total
def blindsStruct():
startingStack = int(raw_input("What is your starting stack: "))
hours = int(raw_input("How long do you want the game to go for in hours? Enter 0 if unsure: "))
director = THAssistant()
print ""
print "Blinds Structure"
structure, period = director.blindsStructure(startingStack, hours)
for e, blinds in enumerate(structure):
print e+1, ":", blinds
print "Each blind period should go for", period, "minutes."
def stackSize():
bigBlind = int(raw_input("What is the initial big blind: "))
chipValueArr = map(int, raw_input("What are your chip values? Separated by spaces: ").split(" "))
director = THAssistant()
print ""
print "Initial Stack Size Calculator"
stackSize = director.stackCount(bigBlind, chipValueArr)
print "With a starting BB of", bigBlind, "the stack should be", stackSize
def chipDist():
stackSize = int(raw_input("What is the stack size: "))
chipValueArr = map(int, raw_input("What are your chip values? Separated by spaces: ").split(" "))
director = THAssistant()
print ""
print "Initial Chip Distribution"
for (val, count) in director.chipDist(stackSize, chipValueArr):
print val, ":", count
if __name__ == "__main__": main() | StarcoderdataPython |
108040 | <filename>modules/pretrain_options.py
from collections import OrderedDict
pretrain_opts = OrderedDict()
pretrain_opts['use_gpu'] = True
pretrain_opts['init_model_path'] = './models/imagenet-vgg-m.mat'
pretrain_opts['model_path'] = './models/rt_mdnet.pth'
pretrain_opts['log_dir'] = './log'
pretrain_opts['batch_frames'] = 8
pretrain_opts['batch_pos'] = 64
pretrain_opts['batch_neg'] = 196
pretrain_opts['overlap_pos'] = [0.7, 1]
pretrain_opts['overlap_neg'] = [0, 0.5]
pretrain_opts['img_size'] = 107
pretrain_opts['lr'] = 0.0001
pretrain_opts['w_decay'] = 0.0005
pretrain_opts['momentum'] = 0.9
pretrain_opts['grad_clip'] = 10
pretrain_opts['ft_layers'] = ['conv', 'fc']
pretrain_opts['lr_mult'] = {'fc': 1}
pretrain_opts['n_cycles'] = 1000
##################################### from RCNN #############################################
pretrain_opts['padding'] = 1.2
pretrain_opts['padding_ratio'] = 5.
pretrain_opts['padded_img_size'] = pretrain_opts['img_size'] * int(pretrain_opts['padding_ratio'])
pretrain_opts['frame_interval'] = 2
| StarcoderdataPython |
1763998 | <reponame>BeryJu/passbook
# Generated by Django 3.1 on 2020-08-23 22:46
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_crypto", "0002_create_self_signed_kp"),
("authentik_providers_proxy", "0002_proxyprovider_cookie_secret"),
]
operations = [
migrations.AddField(
model_name="proxyprovider",
name="certificate",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="authentik_crypto.certificatekeypair",
),
),
]
| StarcoderdataPython |
6457356 | <filename>mypyapp/imagemanager.py
import os
import torch
from flask import current_app
from PIL import Image
from torchvision import models, transforms
from werkzeug.utils import secure_filename
from . import db
from .models import TrainingFile
class ImageManager:
def __init__(self, file, label):
self.file = file
self.label = label
def store_image(self):
filename = secure_filename(self.file.filename)
path = os.path.join(current_app.config["UPLOAD_FOLDER"], filename)
self.file.save(path)
tf = TrainingFile(filePath=path, label=self.label)
db.session.add(tf)
db.session.commit()
return tf.id
def predict(self):
image = Image.open(self.file)
transformed_image = transform(image)
batch_t = torch.unsqueeze(transformed_image, 0)
alexnet = models.alexnet(pretrained=True)
alexnet.eval()
out = alexnet(batch_t)
with open(current_app.config["IMAGE_CLASSES_URI"]) as f:
classes = [line.strip() for line in f.readlines()]
_, indices = torch.sort(out, descending=True)
percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100
[(classes[idx], percentage[idx].item()) for idx in indices[0][:5]]
return classes[indices[0][0]]
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)])
| StarcoderdataPython |
4920663 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from itertools import chain
import re
from alembic import op
from oslo.serialization import jsonutils
import six
import sqlalchemy as sa
from sqlalchemy.sql import text
from nailgun import consts
from nailgun.settings import settings
def upgrade_enum(table, column_name, enum_name, old_options, new_options):
old_type = sa.Enum(*old_options, name=enum_name)
new_type = sa.Enum(*new_options, name=enum_name)
tmp_type = sa.Enum(*new_options, name="_" + enum_name)
# Create a temporary type, convert and drop the "old" type
tmp_type.create(op.get_bind(), checkfirst=False)
op.execute(
u'ALTER TABLE {0} ALTER COLUMN {1} TYPE _{2}'
u' USING {1}::text::_{2}'.format(
table,
column_name,
enum_name
)
)
old_type.drop(op.get_bind(), checkfirst=False)
# Create and convert to the "new" type
new_type.create(op.get_bind(), checkfirst=False)
op.execute(
u'ALTER TABLE {0} ALTER COLUMN {1} TYPE {2}'
u' USING {1}::text::{2}'.format(
table,
column_name,
enum_name
)
)
tmp_type.drop(op.get_bind(), checkfirst=False)
def drop_enum(name):
op.execute(
u'DROP TYPE {0}'.format(name)
)
def convert_condition_value(val):
if isinstance(val, six.string_types):
return "'{0}'".format(val)
return str(val).lower()
def negate_condition(condition):
"""Negates condition.
"""
return "not ({0})".format(condition)
def remove_question_operator(expression):
"""Removes '?' operator from expressions, it was deprecated in 6.0
"""
return re.sub(r'(:[\w\.\-]+)\?', '\\1', expression)
def upgrade_release_attributes_50_to_51(attrs_meta):
if not attrs_meta.get('editable'):
return attrs_meta
def depends_to_restrictions(depends, restrictions):
for cond in depends:
expr = cond.keys()[0]
restrictions.append(
expr + " != " + convert_condition_value(cond[expr]))
def conflicts_to_restrictions(conflicts, restrictions):
for cond in conflicts:
expr = cond.keys()[0]
restrictions.append(
expr + " == " + convert_condition_value(cond[expr]))
for _, group in six.iteritems(attrs_meta.get('editable')):
for _, attr in six.iteritems(group):
restrictions = []
if attr.get('depends'):
depends_to_restrictions(attr['depends'], restrictions)
attr.pop('depends')
if attr.get('conflicts'):
conflicts_to_restrictions(attr['conflicts'], restrictions)
attr.pop('conflicts')
if restrictions:
attr['restrictions'] = restrictions
return attrs_meta
def upgrade_release_attributes_51_to_60(attrs_meta):
"""Remove '?' operator from expressions
"""
if not attrs_meta.get('editable'):
return attrs_meta
def convert_restrictions(restrictions):
result = []
for restriction in restrictions:
if isinstance(restriction, basestring):
restriction = remove_question_operator(restriction)
else:
restriction['condition'] = remove_question_operator(
restriction['condition'])
result.append(restriction)
return result
for _, group in six.iteritems(attrs_meta.get('editable')):
for _, attr in six.iteritems(group):
if 'restrictions' in attr:
attr['restrictions'] = convert_restrictions(
attr['restrictions'])
if 'values' in attr:
for value in attr['values']:
if 'restrictions' in value:
value['restrictions'] = convert_restrictions(
value['restrictions'])
return attrs_meta
def upgrade_release_roles_50_to_51(roles_meta):
for _, role in six.iteritems(roles_meta):
if role.get('depends'):
for depend in role['depends']:
cond = depend.get('condition')
if isinstance(cond, dict):
expr = cond.keys()[0]
depend['condition'] = \
expr + " == " + convert_condition_value(cond[expr])
return roles_meta
def upgrade_release_roles_51_to_60(roles_meta, add_meta=None):
"""Convert all role_metadata.depends values into
roles_metadata.restrictions.
"""
add_meta = add_meta or {}
for role_name, role in six.iteritems(roles_meta):
for depend in role.get('depends', []):
cond = depend.get('condition')
new_restriction = {
'condition': remove_question_operator(negate_condition(cond))
}
if 'warning' in depend:
new_restriction['message'] = depend['warning']
role.setdefault('restrictions', [])
role['restrictions'].append(new_restriction)
if 'depends' in role:
del role['depends']
if role_name in add_meta:
role.update(add_meta[role_name])
return roles_meta
def upgrade_clusters_replaced_info(connection):
select = text(
"""SELECT id, replaced_provisioning_info, replaced_deployment_info
FROM clusters""")
clusters = connection.execute(select)
for cluster in clusters:
nodes_select = text(
"""SELECT id FROM nodes WHERE cluster_id=:id""")
nodes = connection.execute(
nodes_select,
id=cluster[0])
provisioning_info = jsonutils.loads(cluster[1])
deployment_nodes = jsonutils.loads(cluster[2])
provisioning_nodes = provisioning_info.pop('nodes', [])
for node in nodes:
node_deploy = [d for d in deployment_nodes
if d['uid'] == str(node[0])]
node_provision = next((d for d in provisioning_nodes
if d['uid'] == str(node[0])), {})
update_node = text(
"""UPDATE nodes
SET replaced_deployment_info = :deploy,
replaced_provisioning_info = :provision
WHERE id = :id""")
connection.execute(
update_node,
deploy=jsonutils.dumps(node_deploy),
provision=jsonutils.dumps(node_provision),
id=node[0])
update_cluster = text(
"""UPDATE clusters
SET replaced_deployment_info = :deploy,
replaced_provisioning_info = :provision
WHERE id = :id""")
connection.execute(
update_cluster,
deploy=jsonutils.dumps({}),
provision=jsonutils.dumps(provisioning_info),
id=cluster[0])
def upgrade_release_set_deployable_false(connection, versions):
"""Set deployable=False for a given versions list.
:param connection: a database connection
:param versions: a list of versions to be forbidden
"""
update_query = text(
"UPDATE releases SET is_deployable = 'false' "
" WHERE version IN :versions")
connection.execute(update_query, versions=tuple(versions))
def upgrade_release_fill_orchestrator_data(connection, versions):
"""Fill release_orchestrator_data if it's not filled yet.
:param connection: a database connection
:param versions: a list of versions to be forbidden
"""
for version in versions:
select_query = text(
"SELECT id, operating_system FROM releases "
" WHERE version LIKE :version AND id NOT IN ("
" SELECT release_id FROM release_orchestrator_data "
" )")
releases = connection.execute(select_query, version=version)
for release in releases:
insert_query = text(
"INSERT INTO release_orchestrator_data ("
" release_id, repo_metadata, puppet_manifests_source, "
" puppet_modules_source)"
" VALUES ("
" :release_id, "
" :repo_metadata, "
" :puppet_manifests_source, "
" :puppet_modules_source)")
# if release_orchestrator_data isn't filled then releases'
# repos stores in unversioned directory with "fuelweb" word
repo_path = 'http://{MASTER_IP}:8080/{OS}/fuelweb/x86_64'.format(
MASTER_IP=settings.MASTER_IP, OS=release[1].lower())
# for ubuntu we need to add 'trusty main'
if release[1].lower() == 'ubuntu':
repo_path += ' trusty main'
connection.execute(
insert_query,
release_id=release[0],
repo_metadata=(
'{{ "nailgun": "{0}" }}'.format(repo_path)),
puppet_manifests_source=(
'rsync://{MASTER_IP}:/puppet/manifests/'.format(
MASTER_IP=settings.MASTER_IP)),
puppet_modules_source=(
'rsync://{MASTER_IP}:/puppet/modules/'.format(
MASTER_IP=settings.MASTER_IP)),
)
def move_orchestrator_data_to_attributes(connection):
"""Moving data from orchestrator data db table to cluster attributes
:param connection: a database connection
"""
select_query = text(
"SELECT "
"id, "
"release_id, "
"repo_metadata, "
"puppet_manifests_source, "
"puppet_modules_source "
"FROM release_orchestrator_data")
for odata in connection.execute(select_query):
select_query = text(
"SELECT id, attributes_metadata, operating_system "
" FROM releases WHERE id = :release_id")
for release in connection.execute(select_query, release_id=odata[1]):
repo_setup = {
'metadata': {
# old releases shouldn't be able to edit
# repos
'restrictions': [{
'condition': 'true',
'action': 'hide',
}],
'label': 'Repositories',
'weight': 50,
},
'repos': {
'type': 'custom_repo_configuration',
'value': [],
}}
puppet = {
'manifests': odata[3],
'modules': odata[4],
}
if release[2].lower() == 'ubuntu':
for name, repo in six.iteritems(jsonutils.loads(odata[2])):
uri, suite, section = repo.split()
repo_setup['repos']['value'].append({
'type': 'deb',
'name': name,
'uri': uri,
'suite': suite,
'section': section,
'priority': 1001
})
elif release[2].lower() == 'centos':
for name, repo in six.iteritems(jsonutils.loads(odata[2])):
repo_setup['repos']['value'].append({
'type': 'rpm',
'name': name,
'uri': repo,
'priority': 1
})
# update releases
attributes_metadata = jsonutils.loads(release[1])
attributes_metadata['editable'].update({'repo_setup': repo_setup})
attributes_metadata['generated'].update({'puppet': puppet})
update_query = text(
"UPDATE releases "
" SET attributes_metadata = :attributes_metadata "
" WHERE id = :release_id")
connection.execute(
update_query,
attributes_metadata=jsonutils.dumps(attributes_metadata),
release_id=odata[1])
# update cluster attributes
select_query = text(
"SELECT a.id, a.editable, a.generated "
" FROM attributes as a INNER JOIN clusters as c "
" ON a.cluster_id = c.id "
" WHERE c.release_id = :release_id")
for attr in connection.execute(select_query, release_id=odata[1]):
editable = jsonutils.loads(attr[1])
generated = jsonutils.loads(attr[2])
editable.update({'repo_setup': repo_setup})
generated.update({'puppet': puppet})
connection.execute(
text(
"UPDATE attributes "
" SET editable = :editable, generated = :generated "
" WHERE id = :attr_id"),
editable=jsonutils.dumps(editable),
generated=jsonutils.dumps(generated),
attr_id=attr[0])
def upgrade_attributes_metadata_6_0_to_6_1(attributes_meta):
attributes_meta['editable']['storage']['volumes_lvm']['description'] = \
'It is recommended to have at least one Storage - Cinder LVM node.'
attributes_meta['editable']['common']['use_vcenter'] = {
"value": False,
"weight": 30,
"type": "hidden"
}
return attributes_meta
def upgrade_master_node_settings_6_0_to_6_1(master_node_settings):
master_node_settings['statistics']['name']['type'] = 'hidden'
master_node_settings['statistics']['email']['type'] = 'hidden'
master_node_settings['statistics']['company']['type'] = 'hidden'
master_node_settings['tracking'] = {
"email": {
"type": "text",
"value": "",
"label": "Mirantis Account Email",
"weight": 10,
"regex": {
"source": "^\\S+@\\S+\.\\S+$",
"error": "Invalid email"
}
},
"password": {
"type": "password",
"value": "",
"label": "Password",
"weight": 20,
"regex": {
"source": "\\S",
"error": "Password cannot be empty"
}
}
}
master_node_settings['statistics']['name']['regex'] = {}
master_node_settings['statistics']['email']['regex'] = {}
master_node_settings['statistics']['company']['regex'] = {}
master_node_settings['statistics']['name']['restrictions'] = {}
master_node_settings['statistics']['email']['restrictions'] = {}
master_node_settings['statistics']['company']['restrictions'] = {}
return master_node_settings
def upgrade_role_limits_6_0_to_6_1(roles_meta, _limits_to_update):
for role_name, role_definition in six.iteritems(roles_meta):
if role_name in _limits_to_update:
role_definition['limits'] = _limits_to_update[role_name]
return roles_meta
def upgrade_role_restrictions_6_0_to_6_1(roles_meta, _new_role_restrictions):
for role_name, role_definition in six.iteritems(roles_meta):
if role_name in _new_role_restrictions:
role_definition['restrictions'] = _new_role_restrictions[role_name]
return roles_meta
def upgrade_vip_types_6_0_to_6_1(connection):
update_query_node_null = text(
"UPDATE ip_addrs SET vip_type = :haproxy WHERE node IS NULL")
connection.execute(update_query_node_null,
haproxy=consts.NETWORK_VIP_TYPES.haproxy)
def downgrade_vip_types_6_1_to_6_0(connection):
delete_query = text(
"DELETE FROM ip_addrs WHERE vip_type != :haproxy AND node IS NULL")
connection.execute(delete_query, haproxy=consts.NETWORK_VIP_TYPES.haproxy)
def upgrade_6_0_to_6_1_plugins_cluster_attrs_use_ids_mapping(connection):
"""In Fuel 6.0 we had plugin version in cluster attributes
to identify which plugin should be enabled or disabled.
In 6.1 release we have plugins updates feature, it means
that a single plugin can be updated/overwritten with newer
version. For example 1.0.0 can be replaced with 1.0.1.
As result we cannot rely on versions anymore, here we
convert version mapping to plugin ids.
See blueprint:
https://blueprints.launchpad.net/fuel/+spec/plugins-security-fixes-delivery
"""
select_attrs = text("""SELECT id, editable FROM attributes""")
select_plugins = text(
"""SELECT id FROM plugins
WHERE name = :plugin_name AND
version = :plugin_version""")
update_attrs = text(
"""UPDATE attributes
SET editable = :editable
WHERE id = :id""")
attrs_list = connection.execute(select_attrs)
for raw_attrs in attrs_list:
attr_id = raw_attrs[0]
attrs = jsonutils.loads(raw_attrs[1])
for key, attr in six.iteritems(attrs):
metadata = attr.get('metadata', {})
plugin_version = metadata.get('plugin_version')
if not plugin_version:
continue
plugin_name = key
# If there is no plugin with such version
# and name, it means that something was wrong
# and somebody deleted the plugin from database
# we must not fail migration in this case
plugin_id = None
plugins = list(connection.execute(
select_plugins,
plugin_name=plugin_name,
plugin_version=plugin_version))
if plugins:
plugin_id = plugins[0][0]
del attr['metadata']['plugin_version']
attr['metadata']['plugin_id'] = plugin_id
connection.execute(
update_attrs,
editable=jsonutils.dumps(attrs),
id=attr_id)
def upgrade_networks_metadata_to_6_1(networks_meta, _bonding_metadata):
networks_meta['bonding'] = _bonding_metadata
nets = [k for k, v in six.iteritems(networks_meta) if v.get('networks')]
for network in chain(*[networks_meta[net]['networks'] for net in nets]):
network = create_default_vips(network)
return networks_meta
def upgrade_network_groups_metadata_6_0_to_6_1(connection):
select_query = text("SELECT id, meta FROM network_groups")
update_query = text("UPDATE network_groups SET meta = :meta "
"WHERE id = :id")
net_groups = connection.execute(select_query)
for ng_id, ng_meta in net_groups:
updated_meta = create_default_vips(jsonutils.loads(ng_meta))
connection.execute(
update_query,
id=ng_id,
meta=jsonutils.dumps(updated_meta)
)
def create_default_vips(network):
if "assign_vip" in network:
if network["assign_vip"]:
network["vips"] = [consts.NETWORK_VIP_TYPES.haproxy]
del network["assign_vip"]
return network
def upgrade_ubuntu_cobbler_profile_6_0_to_6_1(connection):
select_query = text("SELECT id, generated FROM attributes")
update_query = text(
"UPDATE attributes SET generated = :generated WHERE id = :attr_id")
for attr_id, generated in connection.execute(select_query):
attrs = jsonutils.loads(generated)
if attrs['cobbler']['profile'] == 'ubuntu_1204_x86_64':
attrs['cobbler']['profile'] = 'ubuntu_1404_x86_64'
connection.execute(
update_query,
generated=jsonutils.dumps(attrs),
attr_id=attr_id)
select_query = text("SELECT id, attributes_metadata FROM releases")
update_query = text(
"UPDATE releases SET attributes_metadata = :attrs_meta"
" WHERE id = :release_id")
for release_id, attributes_metadata in connection.execute(select_query):
attrs = jsonutils.loads(attributes_metadata)
if attrs['generated']['cobbler']['profile']['generator_arg'] == \
'ubuntu_1204_x86_64':
attrs['generated']['cobbler']['profile']['generator_arg'] = \
'ubuntu_1404_x86_64'
connection.execute(
update_query,
attrs_meta=jsonutils.dumps(attrs),
release_id=release_id)
def upgrade_cluster_attributes_6_0_to_6_1(connection):
select_query = text("""SELECT id, editable FROM attributes""")
update_query = text(
"""UPDATE attributes SET editable = :editable WHERE id = :attr_id""")
for attr_id, editable in connection.execute(select_query):
attributes = jsonutils.loads(editable)
attributes['common']['use_vcenter'] = {
"value": False,
"weight": 30,
"type": "hidden"
}
connection.execute(
update_query,
editable=jsonutils.dumps(attributes),
attr_id=attr_id)
| StarcoderdataPython |
73793 |
from typing import List
from models.metrics import MetricBase
# from models.modeltrainer import ModelTrainerBase
class CallbackBase(object):
def __call__(self, *args, **kwargs):
raise NotImplementedError
def on_train_begin(self, *args, **kwargs):
raise NotImplementedError
def on_epoch_end(self, *args, **kwargs):
raise NotImplementedError
class RecordLossHistoryBase(CallbackBase):
def __init__(self,
loss_filename: str,
list_metrics: List[MetricBase] = None,
is_hist_validation: bool = True
) -> None:
self._loss_filename = loss_filename
self._names_hist_fields = ['loss']
if list_metrics:
self._names_hist_fields += [imetric._name_fun_out for imetric in list_metrics]
if is_hist_validation:
names_hist_fields_new = []
for iname in self._names_hist_fields:
names_hist_fields_new += [iname, 'val_%s' % (iname)]
self._names_hist_fields = names_hist_fields_new
def on_train_begin(self) -> None:
list_names_header = ['/epoch/'] + ['/%s/' % (elem) for elem in self._names_hist_fields]
str_header = ' '.join(list_names_header) + '\n'
with open(self._loss_filename, 'w') as fout:
fout.write(str_header)
def on_epoch_end(self, epoch: int, data_output: List[float]) -> None:
list_data_line = ['%d' % (epoch + 1)] + ['%0.6f' % (elem) for elem in data_output]
str_data_line = ' '.join(list_data_line) + '\n'
with open(self._loss_filename, 'a') as fout:
fout.write(str_data_line)
class EarlyStoppingBase(CallbackBase):
def __init__(self,
delta: float = 0.005,
patience: int = 10
) -> None:
self._threshold = (1.0 - delta)
self._patience = patience
def on_train_begin(self) -> None:
self._best_epoch = 0
self._best_valid_loss = 1.0e+03
self._waiting = -1.0e+03
def on_epoch_end(self, epoch: int, valid_loss: float) -> None:
if (valid_loss < self._threshold * self._best_valid_loss):
self._best_epoch = epoch
self._best_valid_loss = valid_loss
self._waiting = 0
else:
self._waiting += 1
class ModelCheckpointBase(CallbackBase):
def __init__(self,
model_filename: str,
model_trainer,
freq_save_model: int = 1,
type_save_model: str = 'full_model',
update_filename_epoch: bool = False
) -> None:
self._model_filename = model_filename
self._model_trainer = model_trainer
self._freq_save_model = freq_save_model
self._type_save_model = type_save_model
self._update_filename_epoch = update_filename_epoch
super(ModelCheckpointBase, self).__init__()
def on_train_begin(self) -> None:
pass
def on_epoch_end(self, epoch: int) -> None:
if (epoch % self._freq_save_model == 0):
if self._update_filename_epoch:
model_filename_this = self._model_filename % (epoch + 1)
else:
model_filename_this = self._model_filename
if self._type_save_model == 'only_weights':
self._model_trainer.save_model_only_weights(model_filename_this)
elif self._type_save_model == 'full_model':
self._model_trainer.save_model_full(model_filename_this)
| StarcoderdataPython |
1640580 | from django.contrib.auth.decorators import permission_required
from django.contrib.auth.models import User
from django.core.exceptions import SuspiciousOperation
from django.db.models import Q
from django.forms import model_to_dict
from django.shortcuts import render
from endportal import utils
from logs.models import Log
def log_to_dict(log):
"""
Transforms a log object into a dictionary. Source username field will be added.
:param log: Log object.
:return: Dictionary form of the given log.
:rtype dict
"""
log = model_to_dict(log)
log['src_name'] = User.objects.get(id=log['src_user']) if log['src_user'] != 0 else '未登录用户'
return log
@permission_required('logs.view_log', raise_exception=True)
def logs(request):
"""
Log page: render logs according to certain searching criteria. The current user must have permission to view logs.
"""
query_set, search = Log.objects.all().order_by('-src_time'), dict()
try:
src_time_s = request.GET.get('src_time_s', '')
src_time_e = request.GET.get('src_time_e', '')
src_user = request.GET.get('src_user', '')
src_addr = request.GET.get('src_addr', '')
keyword = request.GET.get('keyword', '')
if src_time_s != '':
search['src_time_s'] = src_time_s
query_set = query_set.filter(src_time__gte=src_time_s)
if src_time_e != '':
search['src_time_e'] = src_time_e
query_set = query_set.filter(src_time__lte=src_time_e)
if src_user != '':
search['src_user'] = src_user
query_set = query_set.filter(src_user=int(src_user))
if src_addr != '':
search['src_addr'] = src_addr
query_set = query_set.filter(src_addr=src_addr)
if keyword != '':
search['keyword'] = keyword
query_set = query_set.filter(Q(category__icontains=keyword) |
Q(behavior__icontains=keyword) |
Q(detailed__icontains=keyword))
except ValueError:
raise SuspiciousOperation()
context = dict()
context['page'], context['plim'], context['pcnt'], context['logs'] = utils.paginate(request, 50, query_set)
context['logs'] = [log_to_dict(log) for log in context['logs']]
context['search'] = search
return render(request, 'logs.html', context)
| StarcoderdataPython |
12860605 | import re
import copy
from collections import defaultdict
from string import Template
# initialize the dictionary for the methods with checked exceptions such as {fake method: real method}
method_dict_checked = {'deleteRecord' : 'delete', \
'editText' : 'setText_new', \
'insertData' : 'insert_new', \
'setLayout' : 'setContentView_new', \
'findViewId' : 'findViewById_new', \
'changeTextColor' : 'setTextColor_new', \
'getCursorString' : 'getString', \
'queryData' : 'query_new', \
'updateRecord' : 'update', \
'drawTxt' : 'drawText_new'}
# initialize the dictionary for the methods with unchecked exceptions such as {fake method: real method}
method_dict_unchecked = {'deleteRecord' : 'delete', \
'editText' : 'setText', \
'insertData' : 'insert', \
'setLayout' : 'setContentView', \
'findViewId' : 'findViewById', \
'changeTextColor' : 'setTextColor', \
'getCursorString' : 'getString', \
'queryData' : 'query', \
'updateRecord' : 'update', \
'drawTxt' : 'drawText'}
# answer_block is a dict of user's answers,
# i.e. answer_block = {'answer_1' : fake_answer}
# survey type refers to the different surveys
# (methods with checked exceptions Vs. methods with unchecked exceptions--documented and undocumented)
def glue_answer(filepath, answers, survey_type, email):
method_dict = set_dict(survey_type)
# open the file
filein = open(filepath)
# read it
src = Template(filein.read())
result = src.substitute(answers)
with open('static/%s-NoteEditor.java' % (email), 'w') as f:
f.write("%s" % result)
# dictionary for answers with real Android's API methods
real_answers = bind_method(answers, method_dict)
#do the substitution
result = src.substitute(real_answers)
return result
# Bind the answers' methods to the real Android's API methods
# answers is a dict, i.e. answers = {'answer_1' : fake_answer}
# This function returns a dict of answers with real Android's
# API methods, i.e. real_answers = {'answer_1' : real_answer}
def bind_method(answers, method_dict):
real_answers = {}
a_keys = list(answers.keys())
m_keys = list(method_dict.keys())
# for each user answer
for k, l in enumerate(a_keys):
# get the value of the answer
an = answers.get(a_keys[k])
# for each fake method
for m, n in enumerate(m_keys):
# search for fake method in the answer
fake = m_keys[m]
if (re.search(fake, an)):
#print ("find fake :" + fake)
# get real method
real = method_dict.get(fake)
if (a_keys[k] not in list(real_answers.keys())):
real_answers[a_keys[k]] = re.sub(fake+'\(', real+'(', an)
break
# check if finally there exists fake method in user's answer
for d, f in enumerate(a_keys):
if (a_keys[d] not in list(real_answers.keys())):
real_answers[a_keys[d]] = answers.get(a_keys[d])
return real_answers
def replace_methods(compiler_output, survey_type):
method_dict = set_dict(survey_type)
for fake, real in method_dict.items():
#compiler_output = compiler_output.replace(fake, real)
compiler_output = re.sub(real, fake, compiler_output)
if re.search("\bsetTextColor\b\(\bcolors\b\)", compiler_output):
compiler_output = re.sub("\bsetTextColor\b\(\bcolors\b\)", "changeTextColor(colors)", replace_output)
# check for line numbers
#comp_output = remove_line_numbers(compiler_output)
return compiler_output
# dict depending on the survey type
def set_dict(survey_type):
if (survey_type == 'unchecked'):
return method_dict_unchecked
elif (survey_type == 'checked'):
return method_dict_checked
# replace line numbers with spaces
def remove_line_numbers(output):
out = ''
#.java:118
print ("Here is the output.")
print (output)
#if re.seach('.java:/d+', output):
# print ("OKK")
out = re.sub(':[0-9]+', '', output)
return out
# vim: tabstop=8 noexpandtab shiftwidth=8 softtabstop=0
| StarcoderdataPython |
4909396 | <gh_stars>1-10
import os
import mozumder
from ...models.development import *
from .models import *
from .templates import *
from .views import *
from .urls import *
from .admin import *
from ..utilities.name_case import *
from shutil import copyfile
def write_app(app_obj):
app_name = app_obj.name
access_rights = 0o755
source_root = os.path.join(mozumder.__file__[:-12], 'include','app_template')
source_root_length = len(source_root)
target_root = os.path.join(os.getcwd(),app_name)
try:
os.mkdir(target_root, access_rights)
except OSError:
print (f"Creation of app directory {target_root} failed")
else:
print (f"Created app directory {target_root}")
for root, dirs, files in os.walk(source_root):
# Process files from source templates directory and install
# them in the new app directory
sub_dir = root[source_root_length+1:].replace('app_name',app_name)
target_path = os.path.join(target_root, sub_dir)
for name in dirs:
if name == 'app_name':
name = app_name
path = os.path.join(target_path, name)
try:
os.mkdir(path,mode=0o755)
except OSError:
print (f"Creation of the directory {path} failed")
for name in files:
source_filename = os.path.join(root, name)
if name[-4:] == '-tpl':
f = open(source_filename, "r")
fstring_from_file = 'f"""'+f.read()+'"""'
f.close()
# Evaluate F-String
compiled_fstring = compile(fstring_from_file, source_filename, 'eval')
formatted_output = eval(compiled_fstring)
name = name[:-4]
target_filename = os.path.join(target_path, name)
# Write evaluated F-String
f = open(target_filename, "w")
f.write(formatted_output)
f.close()
status = os.stat(source_filename).st_mode & 0o777
os.chmod(target_filename,status)
else:
target_filename = os.path.join(target_path, name)
copyfile(source_filename, target_filename)
# Write models.py, templates, and views.py
context = {}
context['app'] = app_obj
template_dir = os.path.join(os.getcwd(),app_name,'templates',app_name)
try:
os.mkdir(template_dir, access_rights)
except OSError:
print (f"Creation of template directory {template_dir} failed")
else:
print (f"Created template directory {template_dir}")
models_imports = ''
views_imports = ''
admin_imports = ''
model_objs = TrackedModel.objects.filter(owner=app_obj, abstract=True)
if model_objs:
context['models'] = model_objs
MetaModelsWriter().write(context)
models_imports += f"from .meta import *\n"
model_objs = TrackedModel.objects.filter(owner=app_obj, abstract=False)
for model_obj in model_objs:
context['model'] = model_obj
context['model_code_name'] = CamelCase_to_snake_case(model_obj.name)
# Write models.py as part of module
ModelWriter().write(context)
models_imports += f"from .{context['model_code_name']} import {model_obj.name}\n"
# Write views.py as part of module
ViewWriter().write(context)
view_objs = TrackedView.objects.filter(model=model_obj)
views_imports_list = ', '.join([str(view_obj.name) for view_obj in view_objs])
views_imports += f"from .{context['model_code_name']} import {views_imports_list}\n"
# Write URLs for views
URLsWriter().update(context)
APIURLsWriter().update(context)
# Write Admin
AdminWriter().write(context)
admin_imports += f"from .{context['model_code_name']} import {model_obj.name}Admin\n"
# Write Django templates
ModelListBlock().write(context)
ModelListPage().write(context)
ModelDetailBlock().write(context)
ModelDetailPage().write(context)
UpdateModelsListBlock().write(context)
CreateFormBlock().write(context)
CreateFormPage().write(context)
UpdateFormBlock().write(context)
UpdateFormPage().write(context)
CopyFormBlock().write(context)
CopyFormPage().write(context)
DeleteFormBlock().write(context)
DeleteFormPage().write(context)
# Write apps models
ModelsBlock().write(context)
# Write models/__init__.py for python module
model_package_file = os.path.join(target_root,'models','__init__.py')
f = open(model_package_file, "w")
f.write(models_imports)
f.close()
# Write views/__init__.py for python module
views_package_file = os.path.join(target_root,'views','__init__.py')
f = open(views_package_file, "w")
f.write(views_imports)
f.close()
# Write admin/__init__.py for python module
admin_package_file = os.path.join(target_root,'admin','__init__.py')
f = open(admin_package_file, "w")
f.write(admin_imports)
f.close()
| StarcoderdataPython |
1861576 | <filename>fetch_cord/run_command.py
#from __future__ import annotations
from typing import List
import subprocess
def run_command(command: List[str], shell: bool = False):
return subprocess.run(
command, encoding="utf-8", stdout=subprocess.PIPE, shell=shell
).stdout
class BashError(Exception):
pass
def exec_bash(command: str):
try:
out = (
subprocess.check_output(["bash", "-c", command], stderr=subprocess.STDOUT)
.decode("utf8")
.strip()
)
except subprocess.CalledProcessError as e:
out = e.stdout.decode("utf8")
raise BashError("Failed to execute '%s' :\n%s" % (command, out))
except FileNotFoundError as e:
raise BashError("BASH not installed on your computer...")
return out
| StarcoderdataPython |
5041313 | <reponame>RaymondDashWu/generative-structures-dapp
import hashlib
from typing import Union
from eth_typing import Hash32
def hash_eth2(data: Union[bytes, bytearray]) -> Hash32:
"""
Return SHA-256 hashed result.
Note: this API is currently under active research/development so is subject to change
without a major version bump.
Note: it's a placeholder and we aim to migrate to a S[T/N]ARK-friendly hash function in
a future Ethereum 2.0 deployment phase.
"""
return Hash32(hashlib.sha256(data).digest())
| StarcoderdataPython |
263754 | <reponame>SamLubbers/rebel_backup<filename>generate_key.py
#!/usr/bin/env python3
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
if __name__ == '__main__':
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
with open('private_key.pem', 'wb') as f:
f.write(pem)
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open('public_key.pem', 'wb') as f:
f.write(pem)
| StarcoderdataPython |
3203915 | from . import e1
from . import e2
from . import e3
from . import destijl
| StarcoderdataPython |
1801191 | #**********************************************************
#* CATEGORY JARVIS HOME AUTOMTION
#* GROUP SPEECH TO TEXT
#* AUTHOR <NAME> <<EMAIL>>
#**********************************************************
#Jarvis Home Automation
#Copyright (C) 2017 Haynie Research & Development
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along
#with this program; if not, write to the Free Software Foundation, Inc.,
#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import json
import apiai
import re
from core import plugin
class AiPlugin(plugin.SpeechHandlerPlugin):
def get_phrases(self):
return []
def handle(self, text, mic):
CLIENT_ACCESS_TOKEN = self.profile['keys']['apiai']
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
request = ai.text_request()
cleanText = re.sub('"}', "", re.sub('{"text": "', "",text))
request.query = cleanText
response = request.getresponse()
responseData = json.loads(response.read())
responseDataText = responseData["result"]["fulfillment"]["speech"]
mic.say(responseDataText)
def is_valid(self, text):
return True
| StarcoderdataPython |
11367572 | """CommitLimit class used to determine whether a commit is out of the desired range."""
from datetime import datetime
from typing import Optional
from git import Commit
class CommitLimit(object):
"""Represents the point in time at which to start analyzing commits of an evergreen project."""
def __init__(
self, stop_at_date: Optional[datetime] = None, stop_at_commit_sha: Optional[str] = None
):
"""
Create a CommitLimit object.
:param stop_at_date: The date at which to start analyzing commits of the repo.
:param stop_at_commit_sha: The commit at which to start analyzing commits of the repo.
"""
self.stop_at_date = stop_at_date
self.stop_at_commit_sha = stop_at_commit_sha
def __repr__(self) -> str:
"""Return the object representation of CommitLimit."""
return f"CommitLimit({self.stop_at_date}, {self.stop_at_commit_sha})"
def check_commit_before_limit(self, commit: Commit) -> bool:
"""
Check whether a commit comes before the limit set by stop_at_date or stop_at_commit_sha.
:param commit: The commit to compare against.
:return: Whether or not the commit comes before the limit.
"""
if self.stop_at_date:
if commit.committed_datetime < self.stop_at_date:
return True
else:
if commit.hexsha == self.stop_at_commit_sha:
return True
return False
| StarcoderdataPython |
11367915 | # -*- coding:utf-8 -*-
from os.path import abspath, join, dirname
from setuptools import find_packages, setup
with open('requirements.txt') as f:
required = f.read().splitlines()
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.md'), encoding='utf-8') as file:
long_description = file.read()
version = {}
with open(join(this_dir, "polyhymnia", "version.py")) as fp:
exec(fp.read(), version)
setup(
name='polyhymnia',
version=version['__version__'],
description='Polyhymnia: Natual Chinese Data Augmentation',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/luoy2/polyhymnia.git',
author='yikang',
author_email='<EMAIL>',
license='Apache License 2.0',
keywords='corpus,NLU,NLP',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
package_data={'': ['data/*', 'data/stopwords/*']},
install_requires=required,
python_requires='>=3.6',
) | StarcoderdataPython |
4964267 | from __future__ import print_function, absolute_import, division
import six
import jinja2
class FileConfiguration(object):
def __init__(self, filepath, local_variables, global_variables=None):
self.config = {}
if global_variables:
self.config.update(global_variables)
new_local_variables = {}
env = jinja2.Environment(undefined=jinja2.DebugUndefined)
for key, value in local_variables.items():
if six.PY2:
value = value.decode('utf8')
template = env.from_string(value)
new_local_variables[key] = template.render(
GLOBALS=global_variables)
self.config.update(new_local_variables)
self.path = filepath
@classmethod
def from_dict(cls, dic, global_variables):
return cls(dic['path'], dic, global_variables)
| StarcoderdataPython |
3230585 | <filename>erroranalysis/erroranalysis/analyzer/__init__.py<gh_stars>100-1000
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
"""Module for defining the analyzers."""
from .error_analyzer import ModelAnalyzer, PredictionsAnalyzer
__all__ = ["PredictionsAnalyzer", "ModelAnalyzer"]
| StarcoderdataPython |
4880192 | <gh_stars>0
"""Define the units/scaling tests."""
import unittest
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.core.tests.test_scaling_report import TestDriverScalingReport
class TestDriverScalingReportMPI(TestDriverScalingReport):
N_PROCS = 2
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1889923 | <reponame>yuxuan-lou/ColossalAI-Examples
from typing import Callable
import torch
from colossalai import nn as col_nn
from colossalai.registry import MODELS
from torch import dtype, nn
from model_zoo.vit.vit import ViTBlock, ViTEmbedding
from utils import heads, objectives
import torch.nn.functional as F
from colossalai.nn.layer.colossalai_layer import LayerNorm
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
@MODELS.register_module
class ViLT(nn.Module):
def __init__(
self,
config,
img_size: int = 384,
patch_size: int = 16,
in_chans: int = 3,
num_classes: int = 1000,
depth: int = 12,
num_heads: int = 12,
dim: int = 768,
mlp_ratio: int = 4,
attention_dropout: float = 0.,
dropout: float = 0.1,
dropout_prob=0.1,
drop_path: float = 0.,
init_std=0.02,
layernorm_epsilon: float = 1e-6,
activation: Callable = nn.functional.gelu,
representation_size: int = None,
convert_fp16_to_fp32_in_softmax=False,
dtype: dtype = None,
bias: bool = True,
checkpoint: bool = False,
init_method: str = 'torch',
first_stage=True,
last_stage=True,
start_idx=0,
end_idx=None,):
super().__init__()
max_sequence_length = config["max_text_len"]
num_layers = config["num_layers"]
vocab_size = config["vocab_size"]
self.vocab_size = vocab_size
hidden_size = config["hidden_size"]
self.first_stage = first_stage
self.last_stage = last_stage
self.init_std = init_std
self.num_layers = num_layers
bert_config = BertConfig(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_layers,
num_attention_heads=num_heads,
intermediate_size=hidden_size * mlp_ratio,
max_position_embeddings=max_sequence_length,
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
)
self.pooler = heads.Pooler(hidden_size)
self.token_type_embeddings = nn.Embedding(2, hidden_size)
self.token_type_embeddings.apply(objectives.init_weights)
self.text_embedding = BertEmbeddings(bert_config)
self.vis_embedding = ViTEmbedding(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embedding_dim=dim,
dropout=dropout,
dtype=dtype,
init_method=init_method)
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path, depth)]
blocks = [
ViTBlock(
dim=dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
attention_dropout=attention_dropout,
dropout=dropout,
drop_path=dpr[i],
activation=activation,
dtype=dtype,
bias=bias,
checkpoint=checkpoint,
init_method=init_method,
) for i in range(depth)
]
norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
if self.last_stage:
self.mlm_score = heads.MLMHead(bert_config)
self.mlm_score.apply(objectives.init_weights)
self.layer_norm = LayerNorm(hidden_size)
layers = []
layers.extend(blocks)
layers.extend([norm])
self.layers = nn.Sequential(
*layers
)
# self.layers = build_pipeline_model(self.layers, num_chunks=1, verbose=True)
def infer(self, x, image_token_type_idx=1):
do_mlm = "_mlm"
if f"image_{image_token_type_idx - 1}" in x:
imgkey = f"image_{image_token_type_idx - 1}"
else:
imgkey = "image"
img = x[imgkey]
text_ids = x[f"text_ids{do_mlm}"]
text_labels = x[f"text_labels{do_mlm}"]
image_embeds = self.vis_embedding(img)
text_embeds = self.text_embedding(text_ids)
co_embeds = torch.cat([text_embeds, image_embeds], dim=1)
x = co_embeds
x = self.layers(x)
text_feats, image_feats = (
x[:, : text_embeds.shape[1]],
x[:, text_embeds.shape[1] :],
)
cls_feats = self.pooler(x)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
"raw_cls_feats": x[:, 0],
"text_labels": text_labels,
"text_ids": text_ids,
}
return ret
def forward(self, x):
ret = dict()
ret.update(self.compute_mlm(x))
return ret
def compute_mlm(self, batch):
infer = self.infer(batch)
mlm_logits = self.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, self.vocab_size),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
return ret
def get_current_device():
'''
Returns the index of a currently selected device (gpu/cpu).
'''
if torch.cuda.is_available():
return torch.cuda.current_device()
else:
return 'cpu'
| StarcoderdataPython |
3586583 | <gh_stars>1-10
import getopt
import socket
import sys
import threading
import traceback
import os
from queue import Queue
from github.GithubException import RateLimitExceededException
from github.GithubException import GithubException
from github.GithubException import BadCredentialsException
from github.GithubException import UnknownObjectException
from githubutils.BaseGithubThreadedExtractor import BaseGitHubThreadedExtractor
from githubutils.Tokens import Tokens
from githubutils.NoAvailableTokenException import NoAvailableTokenException
from loggingcfg import initialize_logger
import numpy as np
from numba import jit, prange
import pandas as pd
import loggingcfg
logger = loggingcfg.initialize_logger('SZZ-EXTRACTOR')
class IssuesAndCommentExtractor(BaseGitHubThreadedExtractor):
__ISSUES_COLUMN_NAMES = ["SLUG", "ID", "NUMBER", "STATE", "CREATED_AT", "CLOSED_AT", "CREATED_BY_LOGIN",
"CLOSED_BY_LOGIN", "ASSIGNEE_LOGIN", "TITLE", "NUM_COMMENTS", "LABELS", "IS_PL"]
__COMMENTS_COLUMN_NAMES = ["SLUG", "ISSUE_ID", "ISSUE_NUMBER", "COMMENT_ID", "BODY", "CREATED_AT", "UPDATED_AT",
"USER_LOGIN", "USER_ID"]
@jit(parallel=True)
def __to_df(self, issues, slug, g, issue_data, comment_data):
repo = g.get_repo(slug)
for i in prange(0, np.size(issues)):
logger.debug("Looking for issue number %d", issues[i])
issue = repo.get_issue(int(issues[i]))
issue_data.append(self.__parse_issue(slug, issue))
comment_data += IssuesAndCommentExtractor.__parse_comments(self, slug, issue)
@jit
def __parse_issue(self, slug, issue):
issue_id = issue.id # int
issue_number = issue.number # int
state = issue.state # string
created_at = str(issue.created_at) # datetime
closed_at = str(issue.closed_at) # datetime
created_by = issue.user # NamedUser
created_by_login = None
if created_by is not None:
created_by_login = created_by.login
closed_by = issue.closed_by # NamedUser
closed_by_login = None
if closed_by is not None:
closed_by_login = closed_by.login
assignee = issue.assignee # NamedUser
assignee_login = None
if assignee is not None:
assignee_login = assignee.login
title = issue.title.strip().replace("\n", "").replace("\r", "") # string
num_comments = issue.comments # int
labels = ';'.join([l.name for l in issue.labels]) # [Label]
is_pl = issue.pull_request is not None
return [slug, issue_id, issue_number, state, created_at, closed_at, created_by_login,
closed_by_login, assignee_login, title, num_comments, labels, is_pl]
def __parse_github_pages(self, issues, slug, g, issue_data=None, comment_data=None):
if issue_data is None:
issue_data = []
if comment_data is None:
comment_data = []
logger.info("Issue detail to fetch: %d" % np.size(issues))
try:
self.__to_df(issues, slug, g, issue_data, comment_data)
except socket.timeout or RateLimitExceededException as ste:
logger.error("Socket timeout parsing issue", ste)
df_issue, df_comments = self.__manage_parsing_exception(issues, slug, g, issue_data, comment_data)
except RateLimitExceededException:
logger.warn("Rate limit parsing issue")
df_issue, df_comments = self.__manage_parsing_exception(issues, slug, g, issue_data, comment_data)
except GithubException as exc:
logger.warn("Generic exception", exc)
df_issue, df_comments = self.__manage_parsing_exception(issues, slug, g, issue_data, comment_data)
df_issue = pd.DataFrame(issue_data, columns=self.__ISSUES_COLUMN_NAMES)
df_comments = pd.DataFrame(comment_data, columns=self.__COMMENTS_COLUMN_NAMES)
return df_issue, df_comments
def __manage_parsing_exception(self, issues, slug, g, issue_data, comment_data):
pid = threading.get_ident()
g = self._get_github_instance(pid, g)
processed = np.array([x[2] for x in issue_data])
logger.info("Processed: {0}".format(processed))
remaining_issues = np.setdiff1d(issues, processed)
logger.info("Issue size: {0}; remaining: {1}".format(np.size(issues), np.size(remaining_issues)))
return self.__parse_github_pages(remaining_issues, slug, g, issue_data, comment_data)
def issues_to_csv(self, slug: str, out_dir: str):
df_issue: pd.DataFrame = None
df_comments: pd.DataFrame = None
pid = threading.get_ident()
logger.info('[tid: {0}] Processing {1}'.format(pid, slug))
try:
g = self._get_github_instance(pid)
repo = g.get_repo(slug)
if repo: # and repo.has_issues: sometimes returns False even when there are some
issues = np.array([issue.number for issue in repo.get_issues(state="closed")], dtype=int)
logger.info("Fetching {0} issues from repo {1}".format(np.size(issues), slug))
df_issue, df_comments = self.__parse_github_pages(issues, slug, g)
except BadCredentialsException:
logger.warning("Repository %s seems to be private (raised 401 error)" % slug)
except UnknownObjectException as e:
logger.warning(e)
except GithubException as ghe:
logger.warning("Error for repository {0}, most likely there is no tab Issues in the repo".format(slug))
traceback.print_exc(ghe)
except NoAvailableTokenException as e:
logger.fatal("No available tokens with sufficient valid rate limit.")
except Exception as e:
traceback.print_exc(e)
finally:
slug = slug.replace("/", "_")
if df_issue is not None:
df_issue.to_csv(os.path.join(out_dir, slug + "_issues.csv"), index=False)
if df_comments is not None:
df_comments.to_csv(os.path.join(out_dir, slug + "_comments.csv"), index=False)
@staticmethod
def __parse_comments(self, slug, issue):
comments = []
comments_pglist = issue.get_comments()
for comment in comments_pglist:
comment_id = comment.id
body = comment.body.strip()
created_at = comment.created_at
updated_at = comment.updated_at
user_login = comment.user.login
user_gh_id = comment.user.id
comments.append(
[slug, issue.id, issue.number, comment_id, body, created_at, updated_at, user_login, user_gh_id])
if issue.pull_request is not None: # is an actual issue: # is a PR
pr = issue.repository.get_pull(issue.number)
comments_pglist = pr.get_review_comments()
for comment in comments_pglist:
comment_id = comment.id
created_at = comment.created_at
updated_at = comment.updated_at
body = comment.body.strip()
try:
user_login = comment.user.login
user_gh_id = comment.user.id
comments.append(
[slug, pr.id, pr.number, comment_id, body, created_at, updated_at, user_login, user_gh_id])
except AttributeError:
logger.error("Skipped comment {0} in project {1} with None as user".format(comment_id, slug))
continue
return comments
if __name__ == '__main__':
help_message = 'Usage:\n IssuesAndCommentsProcessor.py -s|--slug=<slug> -t|--tokens=<tokens> -o|--output=<output_dir>'
slug = None
out_dir = None
tokens_file = None
logger = initialize_logger(name="SZZ:ISSUES_COMMENTS")
try:
if not sys.argv[1:]:
raise getopt.GetoptError('No arguments passed from the command line. See help instructions.')
opts, args = getopt.getopt(sys.argv[1:], "s:t:o:H", ["slug=", "output=", "tokens", "help"])
for opt, arg in opts:
if opt in ("-h", "--help"):
print(help_message)
sys.exit(0)
elif opt in ("-o", "--output"):
out_dir = arg
elif opt in ("-t", "--tokens"):
tokens_file = arg
elif opt in ("-s", "--slug"):
slug = arg
else:
assert False, "unhandled option"
except getopt.GetoptError as err:
# print help information and exit:
logger.error(err) # will print something like "option -a not recognized"
print(help_message)
sys.exit(1)
if tokens_file is not None:
tokens = Tokens(tokens_file)
else:
tokens = Tokens()
tokens_iter = tokens.iterator()
tokens_queue = Queue()
for token in tokens_iter:
tokens_queue.put(token)
tokens_map = dict()
try:
extractor = IssuesAndCommentExtractor(tokens, tokens_queue, tokens_map)
logger.info("Beginning data extraction.")
extractor.issues_to_csv(slug, out_dir)
logger.info("Done.")
exit(0)
except KeyboardInterrupt:
logger.error("Received Ctrl-C or another break signal. Exiting.")
| StarcoderdataPython |
1900178 | #ABC052b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n = int(input())
s = input()
x = 0
maX = 0
for i in s:
if (i == "I"):
x += 1
maX = max(maX, x)
else:
x -= 1
print(maX) | StarcoderdataPython |
6558733 | <filename>meerkat/datapanel.py
"""DataPanel class."""
from __future__ import annotations
import logging
import os
import pathlib
from contextlib import contextmanager
from copy import copy, deepcopy
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import cytoolz as tz
import datasets
import dill
import numpy as np
import pandas as pd
import torch
import ujson as json
import yaml
from datasets import DatasetInfo, NamedSplit
from datasets.arrow_dataset import DatasetInfoMixin
from jsonlines import jsonlines
import meerkat
from meerkat.columns.abstract import AbstractColumn
from meerkat.columns.cell_column import CellColumn
from meerkat.mixins.cloneable import CloneableMixin
from meerkat.mixins.copying import DataPanelCopyMixin
from meerkat.mixins.inspect_fn import FunctionInspectorMixin
from meerkat.mixins.mapping import MappableMixin
from meerkat.mixins.materialize import MaterializationMixin
from meerkat.mixins.state import StateDictMixin
from meerkat.provenance import ProvenanceMixin, capture_provenance
from meerkat.tools.identifier import Identifier
from meerkat.tools.utils import convert_to_batch_fn, recmerge
logger = logging.getLogger(__name__)
Example = Dict
Batch = Dict[str, Union[List, AbstractColumn]]
BatchOrDataset = Union[Batch, "DataPanel"]
class DataPanel(
CloneableMixin,
DataPanelCopyMixin,
FunctionInspectorMixin,
MappableMixin,
MaterializationMixin,
ProvenanceMixin,
StateDictMixin,
DatasetInfoMixin, # this should be the last in order of mixins
):
"""Meerkat DataPanel class."""
# Path to a log directory
logdir: pathlib.Path = pathlib.Path.home() / "meerkat/"
# Create a directory
logdir.mkdir(parents=True, exist_ok=True)
def __init__(
self,
*args,
identifier: Identifier = None,
column_names: List[str] = None,
info: DatasetInfo = None,
split: Optional[NamedSplit] = None,
**kwargs,
):
super(DataPanel, self).__init__(
info=info,
split=split,
*args,
**kwargs,
)
# TODO(karan, sabri): copy columns when they're passed in and prevent users
# from setting visible_rows inside columns that belong to a datapanel
logger.debug("Creating DataPanel.")
# Data is a dictionary of columns
self._data = {}
# Single argument
if len(args) == 1:
assert column_names is None, "Don't pass in column_names."
# The data is passed in
data = args[0]
# `data` is a dictionary
if isinstance(data, dict) and len(data):
data = self._create_columns(data)
self._assert_columns_all_equal_length(data)
self._data = data
# `data` is a list
elif isinstance(data, list) and len(data):
# Transpose the list of dicts to a dict of lists i.e. a batch
data = tz.merge_with(list, *data)
# Assert all columns are the same length
data = self._create_columns(data)
self._assert_columns_all_equal_length(data)
self._data = data
# `data` is a datasets.Dataset
elif isinstance(data, datasets.Dataset):
self._data = self._create_columns(data[:])
info, split = data.info, data.split
# No argument
elif len(args) == 0:
# Use column_names to setup the data dictionary
if column_names:
self._check_columns_unique(column_names)
self._data = {k: [] for k in column_names}
# Setup the DatasetInfo
info = info.copy() if info is not None else DatasetInfo()
DatasetInfoMixin.__init__(self, info=info, split=split)
# Create attributes for all columns and visible columns
self.all_columns = list(self._data.keys())
self._visible_columns = None
# Create an identifier
# TODO(Sabri): make _autobuild_identifier more informative
self._identifier = Identifier(
self._autobuild_identifier() if not identifier else identifier
)
# Create logging directory
self._create_logdir()
self._initialize_state()
# TODO(Sabri): fix add_index for new datset
# Add an index to the dataset
if not self.has_index:
self._add_index()
@classmethod
def _create_columns(cls, name_to_data: Dict[str, AbstractColumn.Columnable]):
new_data = {}
for column_name, data in name_to_data.items():
new_data[column_name] = AbstractColumn.from_data(data=data)
return new_data
def _repr_pandas_(self):
return pd.DataFrame(
{
f"{k} ({v.__class__.__name__})": v._repr_pandas_()
for k, v in self.items()
}
)
def _repr_html_(self):
return self._repr_pandas_()._repr_html_()
def streamlit(self):
return self._repr_pandas_()
def __repr__(self):
return f"{self.__class__.__name__}" f"(num_rows: {self.num_rows})"
def __len__(self):
# If only a subset of rows are visible
if len(self.visible_columns) == 0:
return 0
return len(self[self.visible_columns[0]])
def __contains__(self, item):
return item in self.visible_columns
def full_length(self):
# If there are columns, full_length of any column, since they must be same size
if self.column_names:
return self._data[self.column_names[0]].full_length()
return 0
@property
def column_names(self):
"""Column names in the dataset."""
return self.visible_columns
@property
def columns(self):
"""Column names in the dataset."""
return self.visible_columns
@property
def num_rows(self):
"""Number of rows in the dataset."""
return len(self)
@property
def shape(self):
"""Shape of the dataset (num_rows, num_columns)."""
return self.num_rows, len(self.columns)
@classmethod
def _assert_columns_all_equal_length(cls, batch: Batch):
"""Check that all columns have the same length so that the data is
tabular."""
assert cls._columns_all_equal_length(
batch
), "All columns must have equal length."
@classmethod
def _columns_all_equal_length(cls, batch: Batch):
"""Check that all columns have the same length so that the data is
tabular."""
if len(set([len(v) for k, v in batch.items()])) == 1:
return True
return False
def _check_columns_exist(self, columns: List[str]):
"""Check that every column in `columns` exists."""
for col in columns:
assert col in self.all_columns, f"{col} is not a valid column."
def _check_columns_unique(self, columns: List[str]):
"""Checks that all columns are unique."""
assert len(columns) == len(set(columns))
def _initialize_state(self):
"""Dataset state initialization."""
# Show all columns by default
self.visible_columns = copy(self.all_columns)
# Set the features
self._set_features()
@property
def visible_columns(self):
return self._visible_columns
@visible_columns.setter
def visible_columns(self, columns: Optional[Sequence[str]] = None):
if columns is None:
# do nothing, keep old visible columns
return
for c in columns:
if c not in self.all_columns:
raise ValueError(f"Trying to set nonexistant column {c} to visible.")
self._visible_columns = copy(columns)
if "index" not in self._visible_columns and "index" in self.all_columns:
self._visible_columns.append("index")
@contextmanager
def format(self, columns: List[str] = None):
"""Context where only `columns` will be visible."""
# Get the current format
current_format = self.get_format()
if columns:
# View only `columns`
self.set_format(columns)
else:
# Use all columns
self.set_format(self.column_names)
try:
yield
finally:
# Reset the format back
self.set_format(current_format)
def get_format(self) -> List[str]:
"""Get the dataset format."""
return self.visible_columns
def set_format(self, columns: List[str]):
"""Set the dataset format.
Only `columns` are visible after set_format is invoked.
"""
# Check that the columns exist
self._check_columns_exist(columns)
# Set visible columns
self.visible_columns = columns
def reset_format(self):
"""Reset the dataset format.
All columns are visible.
"""
# All columns are visible
self.visible_columns = self.all_columns
def _example_or_batch_to_batch(
self, example_or_batch: Union[Example, Batch]
) -> Batch:
# Check if example_or_batch is a batch
is_batch = all(
[isinstance(v, List) for v in example_or_batch.values()]
) and self._columns_all_equal_length(example_or_batch)
# Convert to a batch if not
if not is_batch:
batch = {k: [v] for k, v in example_or_batch.items()}
else:
batch = example_or_batch
return batch
@classmethod
def _merge_batch_and_output(cls, batch: Batch, output: Batch):
"""Merge an output during .map() into a batch."""
combined = batch
for k in output.keys():
if k not in batch:
combined[k] = output[k]
else:
if isinstance(batch[k][0], dict) and isinstance(output[k][0], dict):
combined[k] = [
recmerge(b_i, o_i) for b_i, o_i in zip(batch[k], output[k])
]
else:
combined[k] = output[k]
return combined
@classmethod
def _mask_batch(cls, batch: Batch, boolean_mask: List[bool]):
"""Remove elements in `batch` that are masked by `boolean_mask`."""
return {
k: [e for i, e in enumerate(v) if boolean_mask[i]] for k, v in batch.items()
}
@property
def identifier(self):
"""Identifier."""
return self._identifier
def _set_features(self):
"""Set the features of the dataset."""
with self.format():
self.info.features = None # Features.from_arrow_schema(
# pa.Table.from_pydict(
# self[:1],
# ).schema
# )
def add_column(
self, name: str, data: AbstractColumn.Columnable, overwrite=False
) -> None:
"""Add a column to the dataset."""
assert isinstance(
name, str
), f"Column name must of type `str`, not `{type(name)}`."
assert (name not in self.all_columns) or overwrite, (
f"Column with name `{name}` already exists, "
f"set `overwrite=True` to overwrite."
)
column = AbstractColumn.from_data(data)
assert len(column) == len(self), (
f"`add_column` failed. "
f"Values length {len(column)} != dataset length {len(self)}."
)
# Add the column
self._data[name] = column
if name not in self.all_columns:
self.all_columns.append(name)
self.visible_columns.append(name)
# Set features
self._set_features()
logger.info(f"Added column `{name}` with length `{len(column)}`.")
def remove_column(self, column: str) -> None:
"""Remove a column from the dataset."""
assert column in self.all_columns, f"Column `{column}` does not exist."
# Remove the column
del self._data[column]
self.all_columns = [col for col in self.all_columns if col != column]
self.visible_columns = [col for col in self.visible_columns if col != column]
# Set features
self._set_features()
logger.info(f"Removed column `{column}`.")
def select_columns(self, columns: List[str]) -> Batch:
"""Select a subset of columns."""
for col in columns:
assert col in self._data
return tz.keyfilter(lambda k: k in columns, self._data)
@capture_provenance(capture_args=["axis"])
def append(
self,
dp: DataPanel,
axis: Union[str, int] = "rows",
suffixes: Tuple[str] = None,
overwrite: bool = False,
) -> DataPanel:
"""Append a batch of data to the dataset.
`example_or_batch` must have the same columns as the dataset
(regardless of what columns are visible).
"""
if axis == 0 or axis == "rows":
# append new rows
return meerkat.concat([self, dp], axis="rows")
elif axis == 1 or axis == "columns":
# append new columns
if len(dp) != len(self):
raise ValueError(
"Can only append DataPanels along axis 1 (columns) if they have the"
f"same length. {len(self)} != {len(dp)}"
)
shared = set(dp.visible_columns).intersection(set(self.visible_columns))
if not overwrite and shared:
if suffixes is None:
raise ValueError()
left_suf, right_suf = suffixes
data = {
**{k + left_suf if k in shared else k: v for k, v in self.items()},
**{k + right_suf if k in shared else k: v for k, v in dp.items()},
}
else:
data = {**dict(self.items()), **dict(dp.items())}
return self._clone(data=data)
else:
raise ValueError("DataPanel `axis` must be either 0 or 1.")
def _add_index(self):
"""Add an index to the dataset."""
self.add_column("index", [str(i) for i in range(len(self))])
def head(self, n: int = 5) -> DataPanel:
"""Get the first `n` examples of the DataPanel."""
return self.lz[:n]
def tail(self, n: int = 5) -> DataPanel:
"""Get the last `n` examples of the DataPanel."""
return self.lz[-n:]
def _create_logdir(self):
"""Create and assign a directory for logging this dataset's files."""
if self.identifier.name == "RGDataset":
# TODO(karan): handle temporarily constructed datasets differently
self.logdir /= str(self.identifier)
self.logdir.mkdir(parents=True, exist_ok=True)
else:
self.logdir /= str(self.identifier)
self.logdir.mkdir(parents=True, exist_ok=True)
def _autobuild_identifier(self) -> Identifier:
"""Automatically build an identifier for the dataset using available
information."""
# Look for a name, otherwise assign a default
_name = self.info.builder_name if self.info.builder_name else "RGDataset"
# Check for split, version information
split = str(self.split) if self.split else None
version = str(self.version) if self.version else None
# Add all available information to kwargs dict
kwargs = {}
if split:
kwargs["split"] = split
if version:
kwargs["version"] = version
# Create identifier
return Identifier(_name=_name, **kwargs)
def _get(self, index, materialize: bool = False):
if isinstance(index, int):
# int index => single row (dict)
return {
k: self._data[k]._get(index, materialize=materialize)
for k in self.visible_columns
}
elif isinstance(index, str):
# str index => column selection (AbstractColumn)
if index in self.column_names:
return self._data[index]
raise AttributeError(f"Column {index} does not exist.")
# cases where `index` returns a datapanel
elif isinstance(index, slice):
# slice index => multiple row selection (DataPanel)
return self._clone(
data={
k: self._data[k]._get(index, materialize=materialize)
for k in self.visible_columns
}
)
elif (isinstance(index, tuple) or isinstance(index, list)) and len(index):
# tuple or list index => multiple row selection (DataPanel)
if isinstance(index[0], str):
if not set(index).issubset(self.visible_columns):
missing_cols = set(self.visible_columns) - set(index)
raise ValueError(f"DataPanel does not have columns {missing_cols}")
dp = self.view()
dp.visible_columns = index
return dp
return self._clone(
data={
k: self._data[k]._get(index, materialize=materialize)
for k in self.visible_columns
}
)
elif isinstance(index, np.ndarray):
if len(index.shape) != 1:
raise ValueError(
"Index must have 1 axis, not {}".format(len(index.shape))
)
# numpy array index => multiple row selection (DataPanel)
return self._clone(
data={
k: self._data[k]._get(index, materialize=materialize)
for k in self.visible_columns
}
)
elif isinstance(index, AbstractColumn):
# column index => multiple row selection (DataPanel)
return self._clone(
data={
k: self._data[k]._get(index, materialize=materialize)
for k in self.visible_columns
}
)
else:
raise TypeError("Invalid index type: {}".format(type(index)))
# @capture_provenance(capture_args=[])
def __getitem__(self, index):
return self._get(index, materialize=True)
def get(self, column, value=None):
if column in self:
return self[column]
return value
def __setitem__(self, index, value):
self.add_column(name=index, data=value, overwrite=True)
@property
def has_index(self) -> bool:
"""Check if the dataset has an index column."""
if self.column_names:
return "index" in self.column_names
# Just return True if the dataset is empty
return True
@classmethod
def uncached_batch(cls, batch: Batch, copy=True) -> Batch:
"""Return batch with the "cache" and "slices" columns removed."""
return tz.keyfilter(
lambda k: k not in ["cache", "slices"], deepcopy(batch) if copy else batch
)
@classmethod
def uncached_example(cls, example: Dict, copy=True) -> Dict:
"""Return example with the "cache" and "slices" columns removed."""
return tz.keyfilter(
lambda k: k not in ["cache", "slices"],
deepcopy(example) if copy else example,
)
@classmethod
def from_huggingface(cls, *args, **kwargs):
"""Load a Huggingface dataset as a DataPanel.
Use this to replace `datasets.load_dataset`, so
>>> dict_of_datasets = datasets.load_dataset('boolq')
becomes
>>> dict_of_datapanels = DataPanel.from_huggingface('boolq')
"""
# Load the dataset
dataset = datasets.load_dataset(*args, **kwargs)
if isinstance(dataset, dict):
return dict(
map(
lambda t: (t[0], cls(t[1])),
dataset.items(),
)
)
else:
return cls(dataset)
@classmethod
@capture_provenance()
def from_columns(
cls,
columns: Dict[str, AbstractColumn],
identifier: Identifier = None,
) -> DataPanel:
"""Create a Dataset from a dict of columns."""
return cls(
columns,
identifier=identifier,
)
@classmethod
@capture_provenance()
def from_jsonl(
cls,
json_path: str,
identifier: Identifier = None,
) -> DataPanel:
"""Load a dataset from a .jsonl file on disk, where each line of the
json file consists of a single example."""
with open(json_path) as f:
data = {k: [] for k in json.loads(f.readline())}
# Load the .jsonl file
with open(json_path) as f:
for line in f:
line = json.loads(line)
for k in data:
data[k].append(line[k])
return cls(
data,
identifier=identifier
if identifier
else Identifier("Jsonl", jsonl=json_path),
)
@classmethod
# @capture_provenance()
def from_batch(
cls,
batch: Batch,
identifier: Identifier = None,
) -> DataPanel:
"""Convert a batch to a Dataset."""
return cls(batch, identifier=identifier)
@classmethod
@capture_provenance()
def from_batches(
cls,
batches: Sequence[Batch],
identifier: Identifier = None,
) -> DataPanel:
"""Convert a list of batches to a dataset."""
return cls.from_batch(
tz.merge_with(
tz.compose(list, tz.concat),
*batches,
),
identifier=identifier,
)
@classmethod
@capture_provenance()
def from_dict(
cls,
d: Dict,
identifier: Identifier = None,
) -> DataPanel:
"""Convert a dictionary to a dataset.
Alias for Dataset.from_batch(..).
"""
return cls.from_batch(
batch=d,
identifier=identifier,
)
@classmethod
@capture_provenance()
def from_pandas(
cls,
df: pd.DataFrame,
identifier: Identifier = None,
):
"""Create a Dataset from a pandas DataFrame."""
return cls.from_batch(
df.to_dict("series"),
identifier=identifier,
)
@classmethod
@capture_provenance(capture_args=["filepath"])
def from_csv(cls, filepath: str, *args, **kwargs):
"""Create a Dataset from a csv file.
Args:
filepath (str): The file path or buffer to load from.
Same as :func:`pandas.read_csv`.
*args: Argument list for :func:`pandas.read_csv`.
**kwargs: Keyword arguments for :func:`pandas.read_csv`.
Returns:
DataPanel: The constructed datapanel.
"""
return cls.from_pandas(pd.read_csv(filepath, *args, **kwargs))
@classmethod
@capture_provenance()
def from_feather(
cls,
path: str,
identifier: Identifier = None,
):
"""Create a Dataset from a feather file."""
return cls.from_batch(
pd.read_feather(path).to_dict("list"),
identifier=Identifier("Feather", path=path)
if not identifier
else identifier,
)
@capture_provenance()
def to_pandas(self) -> pd.DataFrame:
"""Convert a Dataset to a pandas DataFrame."""
return pd.DataFrame({name: column.to_pandas() for name, column in self.items()})
def to_jsonl(self, path: str) -> None:
"""Save a Dataset to a jsonl file."""
with jsonlines.open(path, mode="w") as writer:
for example in self:
writer.write(example)
def _get_collate_fns(self, columns: Iterable[str] = None):
columns = self._data.keys() if columns is None else columns
return {name: self._data[name].collate for name in columns}
def _collate(self, batch: List):
batch = tz.merge_with(list, *batch)
column_to_collate = self._get_collate_fns(batch.keys())
new_batch = {}
for name, values in batch.items():
new_batch[name] = column_to_collate[name](values)
dp = self._clone(data=new_batch)
return dp
@staticmethod
def _convert_to_batch_fn(
function: Callable, with_indices: bool, materialize: bool = True
) -> callable:
return convert_to_batch_fn(
function=function, with_indices=with_indices, materialize=materialize
)
def batch(
self,
batch_size: int = 1,
drop_last_batch: bool = False,
num_workers: int = 0,
materialize: bool = True,
*args,
**kwargs,
):
"""Batch the dataset.
TODO:
Args:
batch_size: integer batch size
drop_last_batch: drop the last batch if its smaller than batch_size
Returns:
batches of data
"""
cell_columns, batch_columns = [], []
for name, column in self.items():
if isinstance(column, CellColumn):
cell_columns.append(name)
else:
batch_columns.append(name)
if batch_columns:
batch_indices = []
indices = np.arange(len(self))
for i in range(0, len(self), batch_size):
if drop_last_batch and i + batch_size > len(self):
continue
batch_indices.append(indices[i : i + batch_size])
batch_dl = torch.utils.data.DataLoader(
self[batch_columns] if materialize else self[batch_columns].lz,
sampler=batch_indices,
batch_size=None,
batch_sampler=None,
drop_last=drop_last_batch,
num_workers=num_workers,
*args,
**kwargs,
)
if cell_columns:
cell_dl = torch.utils.data.DataLoader(
self[cell_columns] if materialize else self[cell_columns].lz,
batch_size=batch_size,
collate_fn=self._collate,
drop_last=drop_last_batch,
num_workers=num_workers,
*args,
**kwargs,
)
if batch_columns and cell_columns:
for cell_batch, batch_batch in zip(cell_dl, batch_dl):
yield self._clone(data={**cell_batch._data, **batch_batch._data})
elif batch_columns:
for batch_batch in batch_dl:
yield batch_batch
elif cell_columns:
for cell_batch in cell_dl:
yield cell_batch
@capture_provenance(capture_args=["with_indices"])
def update(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
remove_columns: Optional[List[str]] = None,
num_workers: int = 0,
mmap: bool = False,
materialize: bool = True,
pbar: bool = False,
**kwargs,
) -> DataPanel:
"""Update the columns of the dataset."""
# TODO(karan): make this fn go faster
# most of the time is spent on the merge, speed it up further
# Return if the function is None
if function is None:
logger.info("`function` None, returning None.")
return self
# Return if `self` has no examples
if not len(self):
logger.info("Dataset empty, returning None.")
return self
# Get some information about the function
with self.format(input_columns):
function_properties = self._inspect_function(
function, with_indices, is_batched_fn, materialize=materialize
)
assert (
function_properties.dict_output
), f"`function` {function} must return dict."
if not is_batched_fn:
# Convert to a batch function
function = convert_to_batch_fn(
function, with_indices=with_indices, materialize=materialize
)
logger.info(f"Converting `function` {function} to batched function.")
# Update always returns a new dataset
logger.info("Running update, a new dataset will be returned.")
# Copy the ._data dict with a reference to the actual columns
new_dp = self.view()
# Calculate the values for the new columns using a .map()
output = new_dp.map(
function=function,
with_indices=with_indices,
is_batched_fn=True,
batch_size=batch_size,
num_workers=num_workers,
input_columns=input_columns,
mmap=mmap,
materialize=materialize,
pbar=pbar,
)
# Add new columns for the update
for col, vals in output._data.items():
if col == "index":
continue
new_dp.add_column(col, vals, overwrite=True)
# Remove columns
if remove_columns:
for col in remove_columns:
new_dp.remove_column(col)
logger.info(f"Removed columns {remove_columns}.")
return new_dp
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
drop_last_batch: bool = False,
num_workers: int = 0,
output_type: type = None,
mmap: bool = False,
materialize: bool = True,
pbar: bool = False,
**kwargs,
) -> Optional[Union[Dict, List, AbstractColumn]]:
input_columns = self.visible_columns if input_columns is None else input_columns
with self.format(input_columns):
return super().map(
function=function,
with_indices=with_indices,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_workers=num_workers,
output_type=output_type,
mmap=mmap,
materialize=materialize,
pbar=pbar,
**kwargs,
)
@capture_provenance(capture_args=["function"])
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
drop_last_batch: bool = False,
num_workers: int = 0,
materialize: bool = True,
pbar: bool = False,
**kwargs,
) -> Optional[DataPanel]:
"""Filter operation on the DataPanel."""
# Just return if the function is None
if function is None:
logger.info("`function` None, returning None.")
return None
# Return if `self` has no examples
if not len(self):
logger.info("DataPanel empty, returning None.")
return None
# Get some information about the function
with self.format(input_columns):
function_properties = self._inspect_function(
function,
with_indices,
is_batched_fn=is_batched_fn,
materialize=materialize,
)
assert function_properties.bool_output, "function must return boolean."
# Map to get the boolean outputs and indices
logger.info("Running `filter`, a new DataPanel will be returned.")
outputs = self.map(
function=function,
with_indices=with_indices,
input_columns=input_columns,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_workers=num_workers,
materialize=materialize,
pbar=pbar,
)
indices = np.where(outputs)[0]
# filter returns a new datapanel
new_datapanel = self.view()
for column in new_datapanel._data.values():
column.visible_rows = indices
return new_datapanel
def merge(
self,
right: meerkat.DataPanel,
how: str = "inner",
on: Union[str, List[str]] = None,
left_on: Union[str, List[str]] = None,
right_on: Union[str, List[str]] = None,
sort: bool = False,
suffixes: Sequence[str] = ("_x", "_y"),
validate=None,
keep_indexes: bool = False,
):
from meerkat import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
sort=sort,
suffixes=suffixes,
validate=validate,
keep_indexes=keep_indexes,
)
def items(self):
for name in self.visible_columns:
yield name, self._data[name]
def keys(self):
return self.visible_columns
def values(self):
for name in self.visible_columns:
yield self._data[name]
@classmethod
def read(
cls,
path: str,
*args,
**kwargs,
) -> DataPanel:
"""Load a DataPanel stored on disk."""
# Load the metadata
metadata = dict(
yaml.load(open(os.path.join(path, "meta.yaml")), Loader=yaml.FullLoader)
)
state = dill.load(open(os.path.join(path, "state.dill"), "rb"))
# Load the columns
if not metadata["write_together"]:
data = {
name: dtype.read(os.path.join(path, "columns", name), *args, **kwargs)
for name, dtype in metadata["column_dtypes"].items()
}
state["_data"] = data
# Create a DataPanel from the loaded state
datapanel = cls.from_state(state)
return datapanel
def write(
self,
path: str,
write_together: bool = False,
) -> None:
"""Save a DataPanel to disk."""
# Make all the directories to the path
os.makedirs(path, exist_ok=True)
# Get the DataPanel state
state = self.get_state()
# Get the metadata
metadata = {
"dtype": type(self),
"column_dtypes": {name: type(col) for name, col in self._data.items()},
"len": len(self),
"write_together": write_together,
}
if not write_together:
if "_data" not in state:
raise ValueError(
"DataPanel's state must include `_data` when using "
"`write_together=False`."
)
del state["_data"]
# Create a directory for the columns at `path`
columns_path = os.path.join(path, "columns")
os.makedirs(columns_path, exist_ok=True)
# Save each column in the DataPanel separately
for name, column in self._data.items():
column.write(os.path.join(columns_path, name))
# Write the state
state_path = os.path.join(path, "state.dill")
dill.dump(state, open(state_path, "wb"))
# Save the metadata as a yaml file
metadata_path = os.path.join(path, "meta.yaml")
yaml.dump(metadata, open(metadata_path, "w"))
@classmethod
def from_state(cls, state: Dict, *args, **kwargs) -> DataPanel:
datapanel = super(DataPanel, cls).from_state(state, *args, **kwargs)
datapanel._create_logdir()
datapanel._set_features()
return datapanel
@classmethod
def _state_keys(cls) -> set:
"""List of attributes that describe the state of the object."""
return {
"_identifier",
"_data",
"all_columns",
"_visible_columns",
"_info",
"_split",
}
def _clone_kwargs(self) -> Dict[str, Any]:
"""Returns __init__ kwargs for instantiating new object.
This function returns the default parameters that should be plumbed
from the current instance to the new instance.
This is the API that should be used by subclasses of :class:`DataPanel`.
Returns:
Dict[str, Any]: The keyword arguments for initialization
"""
# identifier, info, and split are not passed by default because they have
# not been plumbed so far.
return {}
def _clone(self, data=None, **kwargs):
default_kwargs = self._clone_kwargs()
if data is None:
data = kwargs.pop("data", self.data)
if kwargs:
default_kwargs.update(kwargs)
return self.__class__(data, **default_kwargs)
| StarcoderdataPython |
237242 | <reponame>tgbugs/mlab
#!/usr/bin/env python3.3
"""Main file for database stuff
Usage:
main.py [(-e | --echo) (-p | --pgsql) (-w | --wipe) (-s | --setup) (-t | --test) (-i | --ipython)]
main.py (-h | --help )
Options:
-h --help show this
-e --echo enable echo
-p --pgsql use postgres
-w --wipe wipe the database
-s --setup setupDB
-t --test run tests
-i --ipython drop into ipython after everything else is done
"""
from docopt import docopt
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.orm import aliased
from sqlalchemy.engine import Engine
from database.models import *
from database.models.base import initDBScience
from database.engines import sqliteMem, pgEng, pgTest
from database.setupDB import populateConstraints, populateTables
from database.TESTS import run_tests
from database.table_logic import logic_StepEdge
try:
import rpdb2
except:
pass
from debug import TDB,ploc
tdb=TDB()
printD=tdb.printD
printFD=tdb.printFuncDict
tdboff=tdb.tdbOff
###-------------
### print stuff
###-------------
def printStuff(cons=True,mice=True,data=True,notes=True):
if cons:
print('\n###***constraints***')
[printD(c,'\n') for c in session.query(SI_PREFIX)]
[printD(c,'\n') for c in session.query(SI_UNIT)]
[printD(c,'\n') for c in session.query(SEX)]
[printD(c,'\n') for c in session.query(HardwareType)]
if mice:
print('\n###***mice***')
for mouse in session.query(Mouse):
print('\n',mouse)
print('\n###***sires***')
for s in session.query(Sire):
print('\n',s)
print('\n###***dams***')
for d in session.query(Dam):
print('\n',d)
print('\n###***MatingRecords***')
for mate in session.query(MatingRecord):
print('\n',mate)
print('\n###***Litters***')
for lit in session.query(Litter):
print('\n',lit)
if data:
for d in session.query(DataFile):
#print('\n',[t for t in d.__dict__.values()])
print('\n',[t for t in d.experiment.person.__dict__.values()])
if notes:
for note in session.query(Note):
print('\n',note)
###----------
### Test it!
###----------
def main(echo=False,postgres=False,wipe_db=False,setupDB=False,test=False):
#create engine
if postgres: #FIXME THIS HURTS ME OW OW OW
if test:
engine=pgTest(echo=echo,wipe_db=wipe_db) #double sure
else:
engine=pgEng(echo=echo,wipe_db=wipe_db)
#engine=pgEng #FIXME have to use this to set up the global db >_<
if setupDB:
session=initDBScience(engine) #imported from base.py via *
#add table logic
logic_StepEdge(session)
#populate constraint tables
populateConstraints(session)
populateTables(session)
else:
session=Session(engine) #imported from base.py via *
#add table logic
logic_StepEdge(session)
else:
engine=sqliteMem(echo=echo) #XXX sqlite wont autoincrement compositie primary keys >_< DERP
session=initDBScience(engine) #imported from base.py via *
#add table logic
logic_StepEdge(session)
#populate constraint tables
populateConstraints(session)
populateTables(session)
#do some tests!
if test:
try:
run_tests(session)
pass
except:
raise
print('tests failed')
#print stuff!
printStuff(cons=0,mice=0,data=0,notes=0)
#query stuff
#queryAll(session)
#session.query(Cell).all()
return session
if __name__=='__main__':
args=docopt(__doc__, version='Main .0002')
#global ipython #FIXME LOL MASSIVE HACK
session=main(args['--echo'],args['--pgsql'],args['--wipe'],args['--setup'],args['--test']) #THAT WAS EASY
if args['--ipython']:
from rig.ipython import embed
s=session
embed()
| StarcoderdataPython |
59037 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from nose.tools import eq_, ok_
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from clastic import Application
from clastic.render import JSONRender, JSONPRender, render_basic
from common import (hello_world_str,
hello_world_html,
hello_world_ctx,
complex_context)
import json
_CUR_DIR = os.path.dirname(__file__)
def test_json_render(render_json=None):
if render_json is None:
render_json = JSONRender(dev_mode=True)
app = Application([('/', hello_world_ctx, render_json),
('/<name>/', hello_world_ctx, render_json),
('/beta/<name>/', complex_context, render_json)])
yield ok_, callable(app.routes[0]._execute)
yield ok_, callable(app.routes[0]._render)
c = Client(app, BaseResponse)
resp = c.get('/')
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'world'
resp = c.get('/Kurt/')
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Kurt'
resp = c.get('/beta/Rajkumar/')
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Rajkumar'
yield ok_, resp_data['date']
yield ok_, len(resp_data) > 4
def test_jsonp_render(render_json=None):
if render_json is None:
render_json = JSONPRender(qp_name='callback', dev_mode=True)
app = Application([('/', hello_world_ctx, render_json),
('/<name>/', hello_world_ctx, render_json),
('/beta/<name>/', complex_context, render_json)])
c = Client(app, BaseResponse)
resp = c.get('/?callback=test_callback')
yield eq_, resp.status_code, 200
yield ok_, resp.data.startswith('test_callback')
yield ok_, 'world' in resp.data
resp = c.get('/?callback=test_callback')
yield eq_, resp.status_code, 200
yield ok_, resp.data.startswith('test_callback')
yield ok_, 'world' in resp.data
#def test_default_json_render():
# from clastic.render import render_json
# for t in test_json_render(render_json):
# yield t
def test_default_render():
app = Application([('/', hello_world_ctx, render_basic),
('/<name>/', hello_world_ctx, render_basic),
('/text/<name>/', hello_world_str, render_basic),
('/html/<name>/', hello_world_html, render_basic),
('/beta/<name>/', complex_context, render_basic)])
yield ok_, callable(app.routes[0]._execute)
yield ok_, callable(app.routes[0]._render)
c = Client(app, BaseResponse)
resp = c.get('/') # test simple json with endpoint default
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'world'
resp = c.get('/Kurt/') # test simple json with url param
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Kurt'
resp = c.get('/beta/Rajkumar/') # test fancy json
yield eq_, resp.status_code, 200
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Rajkumar'
yield ok_, resp_data['date']
yield ok_, len(resp_data) > 4
resp = c.get('/text/Noam/') # test text
yield eq_, resp.status_code, 200
yield eq_, resp.data, 'Hello, Noam!'
resp = c.get('/html/Asia/') # test basic html
yield eq_, resp.status_code, 200
yield ok_, 'text/html' in resp.headers['Content-Type']
| StarcoderdataPython |
3377664 | <filename>train.py
'''
Train a fully convolutional network for earthquake magnitude.
'''
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "5,6"
import keras
from keras.datasets import mnist
from keras import losses
from keras import optimizers
from keras.models import Sequential
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Flatten,Activation,Reshape
from keras.layers import Conv1D,Conv2D,MaxPooling1D,MaxPooling2D,BatchNormalization
from keras.layers import UpSampling1D,UpSampling2D,AveragePooling1D,AveragePooling2D
from keras.layers import ZeroPadding1D,ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras import backend as K
from keras.utils import plot_model
import scipy.stats as stats
import read2
import numpy as np
import time
import matplotlib.pyplot as plt
import sys
import math
np.random.seed(7)
num=1000000 # num of training samples
num2=10000 # num of test samples
sm,sn,x_train,y_train=read2.load_data(sgynam='TRAIN DATA PATH',sgyf1=1,sgyt1=num,step1=1,sgyf2=1,sgyt2=1,step2=1,shuffle='true')
sm,sn,x_test,y_test=read2.load_data(sgynam='TEST DATA PATH',sgyf1=1,sgyt1=num2,step1=1,sgyf2=1,sgyt2=1,step2=1,shuffle='true')
batch_size = 4
epochs = 100
# input image dimensions
img_rows, img_cols = sm, sn
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = y_train.astype('float32')
y_test = y_test.astype('float32')
main_input = Input(shape=input_shape,name='main_input')
x=Conv2D(8, kernel_size=(3,3),padding='valid')(main_input)
x=MaxPooling2D(pool_size=(1,4),padding='valid')(x)
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(16, kernel_size=(3,3),padding='valid')(x)
x=MaxPooling2D(pool_size=(1,4),padding='valid')(x)
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(32, kernel_size=(3,3),padding='valid')(x)
x=MaxPooling2D(pool_size=(1,4),padding='valid')(x)
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(64, kernel_size=(3,3),padding='valid')(x)
x=MaxPooling2D(pool_size=(1,2),padding='valid')(x)
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(128, kernel_size=(3,3),padding='valid')(x)
x=MaxPooling2D(pool_size=(2,2),padding='valid')(x)
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(256, kernel_size=(3,3),padding='valid')(x)
x=MaxPooling2D(pool_size=(2,2),padding='valid')(x)
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(512, kernel_size=(1,3),padding='valid')(x)
x=MaxPooling2D(pool_size=(1,2),padding='valid')(x)
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(1024, kernel_size=(1,3),padding='valid')(x)
x=MaxPooling2D(pool_size=(1,2),padding='valid')(x)
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(2048, kernel_size=(1,1),padding='valid')(x)
x=UpSampling2D(size=(1,2))(x) #1
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(1024, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #2
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(512, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #3
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(256, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #4
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(128, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #5
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(64, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #6
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(32, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #7
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(16, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #8
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(8, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #9
x=LeakyReLU(alpha=0.2)(x)
x=Conv2D(4, kernel_size=(1,3),padding='same')(x)
x=UpSampling2D(size=(1,2))(x) #9
x=LeakyReLU(alpha=0.2)(x)
main_output=Conv2D(1, kernel_size=(1,3),padding='same')(x)
main_output=Reshape((1024,))(main_output)
model = Model(inputs=[main_input],outputs=[main_output])
optimizer = keras.optimizers.Adadelta(lr=0.2,rho=0.95,epsilon=1e-06)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
history_callback=model.fit([x_train],
[y_train],
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=([x_test], [y_test]))
model.save('test.cnn')
| StarcoderdataPython |
4844060 | <reponame>richardstrnad/acitoolkit
#!/usr/bin/env python
"""
Find out where a contract has been imported and consumed on an EPG.
"""
from acitoolkit import (Credentials, Session, Tenant, ContractInterface, AppProfile,
EPG)
from tabulate import tabulate
data = []
def main():
"""
Main execution routine
"""
description = ('Simple application that logs on to the APIC'
' and displays all the tenant info of the contract_interface related to the imported contract.')
creds = Credentials('apic', description)
creds.add_argument("-t", "--tenant_name", help="Tenant Name of where the contract is created")
creds.add_argument("-i", "--contract_name", help="Imported Contract Name")
args = creds.get()
if (args.tenant_name is not None) and (args.contract_name is None):
args.contract_name = raw_input("Contract Name: ")
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
tenants = Tenant.get_deep(session)
for tenant in tenants:
contracts_interfaces = tenant.get_children(only_class=ContractInterface)
for contract_interface in contracts_interfaces:
imported_contract = contract_interface.get_import_contract()
if imported_contract is not None:
if args.tenant_name is not None:
if (imported_contract.name == args.contract_name) and (imported_contract.get_parent().name == args.tenant_name):
apps = AppProfile.get(session, tenant)
for app in apps:
epgs = EPG.get(session, app, tenant)
for epg in epgs:
data.append((imported_contract.name, tenant.name, app.name, epg.name))
else:
apps = AppProfile.get(session, tenant)
for app in apps:
epgs = EPG.get(session, app, tenant)
for epg in epgs:
data.append((imported_contract.name, tenant.name, app.name, epg.name))
print(tabulate(data, headers=["IMPORTED_CONTRACT", "TENANT", "APP_PROFILE", "EPG"]))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6502674 | <gh_stars>1-10
# Exercise 087 - More on Array in Python
"""Enhance the previous challenge by showing at the end:
A) The sum of all even values entered.
B) The sum of the values in the third column.
C) The largest value of the second row."""
list_a = list()
list_b = list()
for i in range(0, 3): # For each line.
for j in range(0, 3): # For each column
list_b.append(int(input(f"Enter a number for [{i}, {j}]: \n")))
list_a.append(list_b[:])
list_b.clear()
for l in list_a:
print(l)
# A) The sum of all even values entered.
sum = 0
for i in range(3):
for j in range(3):
if list_a[i][j] % 2 == 0:
sum += list_a[i][j]
print(f"Sum of even values: {sum}.")
# B) The sum of the values in the third column.
sum_column = 0
for i in range(3):
for j in range(3):
if j == 2: # column 3
sum_column += list_a[i][j]
print(f"Third column sum: {sum_column}.")
# C) The largest value of the second row.
largest = 0
for i in range(3):
for j in range(3):
if i == 1: # linha 2
if list_a[i][j] > largest:
largest = list_a[i][j]
print(f"Highest value of the second row: {largest}.")
| StarcoderdataPython |
298000 | # Copyright 2020–2021 Cirq on IQM developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import cirq
import pytest
from iqm_client.iqm_client import (IQMClient, RunResult, RunStatus,
SingleQubitMapping)
from mockito import ANY, mock, when
from cirq_iqm import Adonis, Valkmusa
from cirq_iqm.iqm_sampler import IQMSampler, serialize_qubit_mapping
@pytest.fixture()
def circuit():
qubit_1 = cirq.NamedQubit('q1 log.')
qubit_2 = cirq.NamedQubit('q2 log.')
return cirq.Circuit(
cirq.measure(qubit_1, qubit_2, key='result')
)
@pytest.fixture()
def qubit_mapping():
return {
'q1 log.': 'QB1',
'q2 log.': 'QB2'
}
@pytest.fixture()
def adonis_sampler(base_url, settings_dict, qubit_mapping):
return IQMSampler(base_url, json.dumps(settings_dict), Adonis(), qubit_mapping)
def test_serialize_qubit_mapping(qubit_mapping):
assert serialize_qubit_mapping(qubit_mapping) == [
SingleQubitMapping(logical_name='q1 log.', physical_name='QB1'),
SingleQubitMapping(logical_name='q2 log.', physical_name='QB2'),
]
@pytest.mark.usefixtures('unstub')
def test_run_sweep_executes_circuit(adonis_sampler, circuit):
client = mock(IQMClient)
run_id = uuid.uuid4()
run_result = RunResult(status=RunStatus.READY, measurements={'some stuff': [[0], [1]]}, message=None)
when(client).submit_circuit(ANY, ANY, ANY).thenReturn(run_id)
when(client).wait_for_results(run_id).thenReturn(run_result)
adonis_sampler._client = client
results = adonis_sampler.run_sweep(circuit, None, repetitions=2)
assert isinstance(results[0], cirq.Result)
def test_credentials_are_passed_to_client(settings_dict):
username = 'a fake username'
api_key = 'a fake api key'
sampler = IQMSampler('some url', json.dumps(settings_dict), Adonis(), None, username=username, api_key=api_key)
assert sampler._client._credentials[0] == username
assert sampler._client._credentials[1] == api_key
def test_circuit_with_incorrect_device(adonis_sampler):
circuit = cirq.Circuit(device=Valkmusa())
with pytest.raises(ValueError, match='devices .* not the same'):
adonis_sampler.run(circuit)
def test_non_injective_qubit_mapping(base_url, settings_dict, qubit_mapping):
qubit_mapping['q2 log.'] = 'QB1'
with pytest.raises(ValueError, match='Multiple logical qubits map to the same physical qubit'):
IQMSampler(base_url, json.dumps(settings_dict), Adonis(), qubit_mapping)
def test_qubits_not_in_settings(base_url, settings_dict, qubit_mapping):
del settings_dict['subtrees']['QB1']
with pytest.raises(
ValueError,
match="The physical qubits {'QB1'} in the qubit mapping are not defined in the settings"
):
IQMSampler(base_url, json.dumps(settings_dict), Adonis(), qubit_mapping)
def test_incomplete_qubit_mapping(adonis_sampler, circuit):
new_qubit = cirq.NamedQubit('Eve')
circuit.append(cirq.X(new_qubit))
with pytest.raises(ValueError, match="The qubits {'Eve'} are not found in the provided qubit mapping"):
adonis_sampler.run(circuit)
| StarcoderdataPython |
11242330 | # -*- coding: utf-8 -*-
def test_files_comments_delete(slack_time):
assert slack_time.files.comments.delete
def test_files_delete(slack_time):
assert slack_time.files.delete
def test_files_info(slack_time):
assert slack_time.files.info
def test_files_list(slack_time):
assert slack_time.files.list
def test_files_revoke_public_url(slack_time):
assert slack_time.files.revoke_public_url
def test_files_shared_public_url(slack_time):
assert slack_time.files.shared_public_url
def test_files_upload(slack_time):
assert slack_time.files.upload
def test_files_remote_add(slack_time):
assert slack_time.files.remote.add
def test_files_remote_info(slack_time):
assert slack_time.files.remote.info
def test_files_remote_list(slack_time):
assert slack_time.files.remote.list
def test_files_remote_remove(slack_time):
assert slack_time.files.remote.remove
def test_files_remote_share(slack_time):
assert slack_time.files.remote.share
def test_files_remote_update(slack_time):
assert slack_time.files.remote.update
| StarcoderdataPython |
32928 | <reponame>GuanLab/DeepSleep
from __future__ import print_function
import os
import sys
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv1D, MaxPooling1D, Conv2DTranspose,Lambda
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import tensorflow as tf
import keras
import cv2
import scipy.io
# for Fourier Transform
from scipy import signal
#from spectrum import pmtm
#from keras.backend.tensorflow_backend import set_session
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.9
#set_session(tf.Session(config=config))
K.set_image_data_format('channels_last') # TF dimension ordering in this code
batch_size=5
ss = 10
def scaleImage (image,scale):
[x,y]= image.shape
x1=x
y1=int(round(y*scale))
image=cv2.resize(image.astype('float32'),(y1,x1)) # check this for multiple channnels!!
new=np.zeros((x,y))
if (y1>y):
start=int(round(y1/2-y/2))
end=start+y
new=image[:,start:end]
else:
new_start=int(round(y-y1)/2)
new_end=new_start+y1
new[:,new_start:new_end]=image
return new
def label_major_vote(input_data,scale_pool):
size_new=int(input_data.shape[1]/scale_pool)
input_data=input_data.reshape(size_new,scale_pool).T
input_data=input_data.astype(int) + 1 # bincount need non-negative, int dtype
counts=np.apply_along_axis(lambda x: np.bincount(x, minlength=3), axis=0, arr=input_data)
major=np.apply_along_axis(lambda x: np.argmax(x), axis=0, arr=counts) - 1
major=major.reshape(1,len(major))
return major
import unet
import random
model = unet.get_unet()
#model.load_weights('weights_' + sys.argv[1] + '.h5')
#model.summary()
from datetime import datetime
import random
path1='/ssd/hongyang/2018/physionet/data/multitaper96/'
path2='/ssd/hongyang/2018/physionet/data/multitaper96_label/'
new_path='/ssd/hongyang/2018/physionet/data/new_arousal/'
all_ids=open('whole_train.dat','r')
all_line=[]
for line in all_ids:
all_line.append(line.rstrip())
all_ids.close()
#random.seed(datetime.now())
random.seed(int(sys.argv[1]))
random.shuffle(all_line)
partition_ratio=0.8
train_line=all_line[0:int(len(all_line)*partition_ratio)]
test_line=all_line[int(len(all_line)*partition_ratio):len(all_line)]
random.seed(datetime.now())
def generate_data(train_line, batch_size, if_train):
"""Replaces Keras' native ImageDataGenerator."""
##### augmentation parameters ######
if_time=False
max_scale=1.15
min_scale=1
if_mag=True
max_mag=1.15
min_mag=0.9
if_flip=False
####################################
i = 0
while True:
image_batch = []
label_batch = []
for b in range(batch_size):
if i == len(train_line):
i = 0
random.shuffle(train_line)
sample = train_line[i]
i += 1
the_id=sample.split('/')[-1]
image = np.load(path1 + the_id + '.npy')
label = np.load(path2 + the_id + '.npy')
index=np.arange(0,20960,32)
random.shuffle(index)
for k in index: #
label_final=label_major_vote(label[:,k:(k+32)],2**4)
if np.sum(label_final!=-1) > 0:
image_batch.append(image[:,k:(k+32)].T)
label_batch.append(label_final.T)
image_batch=np.array(image_batch)
label_batch=np.array(label_batch)
# print(image_batch.shape,label_batch.shape)
yield image_batch, label_batch
#model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=False)
name_model='weights_' + sys.argv[1] + '.h5'
callbacks = [
# keras.callbacks.TensorBoard(log_dir='./',
# histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(os.path.join('./', name_model),
verbose=0,save_weights_only=False,monitor='val_loss')
#verbose=0,save_weights_only=False,monitor='val_loss',save_best_only=True)
]
model.fit_generator(
generate_data(train_line, batch_size,True),
steps_per_epoch=int(len(train_line) // batch_size), nb_epoch=25,
validation_data=generate_data(test_line,batch_size,False),
validation_steps=int(len(test_line) // batch_size),callbacks=callbacks)
| StarcoderdataPython |
11241580 | # -*- test-case-name: twisted.web2.test.test_httpauth -*-
# Copyright (c) 2006-2008 Twisted Matrix Laboratories.
"""
Implementation of RFC2617: HTTP Digest Authentication
http://www.faqs.org/rfcs/rfc2617.html
"""
import sys
import time
import random
from twisted.cred import credentials, error
from zope.interface import implements, Interface
from twisted.web2.auth.interfaces import ICredentialFactory
from twisted.python.hashlib import md5, sha1
# The digest math
algorithms = {
'md5': md5,
'md5-sess': md5,
'sha': sha1,
}
# DigestCalcHA1
def calcHA1(
pszAlg,
pszUserName,
pszRealm,
pszPassword,
pszNonce,
pszCNonce,
preHA1=None
):
"""
@param pszAlg: The name of the algorithm to use to calculate the digest.
Currently supported are md5 md5-sess and sha.
@param pszUserName: The username
@param pszRealm: The realm
@param pszPassword: The password
@param pszNonce: The nonce
@param pszCNonce: The cnonce
@param preHA1: If available this is a str containing a previously
calculated HA1 as a hex string. If this is given then the values for
pszUserName, pszRealm, and pszPassword are ignored.
"""
if (preHA1 and (pszUserName or pszRealm or pszPassword)):
raise TypeError(("preHA1 is incompatible with the pszUserName, "
"pszRealm, and pszPassword arguments"))
if preHA1 is None:
# We need to calculate the HA1 from the username:realm:password
m = algorithms[pszAlg]()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
else:
# We were given a username:realm:password
HA1 = preHA1.decode('hex')
if pszAlg == "md5-sess":
m = algorithms[pszAlg]()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode('hex')
# DigestCalcResponse
def calcResponse(
HA1,
algo,
pszNonce,
pszNonceCount,
pszCNonce,
pszQop,
pszMethod,
pszDigestUri,
pszHEntity,
):
m = algorithms[algo]()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
HA2 = m.digest().encode('hex')
m = algorithms[algo]()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce: # pszQop:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
respHash = m.digest().encode('hex')
return respHash
class IUsernameDigestHash(Interface):
"""
This credential is used when a CredentialChecker has access to the hash
of the username:realm:password as in an Apache .htdigest file.
"""
def checkHash(self, digestHash):
"""
@param digestHash: The hashed username:realm:password to check against.
@return: a deferred which becomes, or a boolean indicating if the
hash matches.
"""
class DigestedCredentials:
"""Yet Another Simple HTTP Digest authentication scheme"""
implements(credentials.IUsernameHashedPassword,
IUsernameDigestHash)
def __init__(self, username, method, realm, fields):
self.username = username
self.method = method
self.realm = realm
self.fields = fields
def checkPassword(self, password):
response = self.fields.get('response')
uri = self.fields.get('uri')
nonce = self.fields.get('nonce')
cnonce = self.fields.get('cnonce')
nc = self.fields.get('nc')
algo = self.fields.get('algorithm', 'md5').lower()
qop = self.fields.get('qop', 'auth')
expected = calcResponse(
calcHA1(algo, self.username, self.realm, password, nonce, cnonce),
algo, nonce, nc, cnonce, qop, self.method, uri, None
)
return expected == response
def checkHash(self, digestHash):
response = self.fields.get('response')
uri = self.fields.get('uri')
nonce = self.fields.get('nonce')
cnonce = self.fields.get('cnonce')
nc = self.fields.get('nc')
algo = self.fields.get('algorithm', 'md5').lower()
qop = self.fields.get('qop', 'auth')
expected = calcResponse(
calcHA1(algo, None, None, None, nonce, cnonce, preHA1=digestHash),
algo, nonce, nc, cnonce, qop, self.method, uri, None
)
return expected == response
class DigestCredentialFactory(object):
"""
Support for RFC2617 HTTP Digest Authentication
@cvar CHALLENGE_LIFETIME_SECS: The number of seconds for which an
opaque should be valid.
@ivar privateKey: A random string used for generating the secure opaque.
"""
implements(ICredentialFactory)
CHALLENGE_LIFETIME_SECS = 15 * 60 # 15 minutes
scheme = "digest"
def __init__(self, algorithm, realm):
"""
@type algorithm: C{str}
@param algorithm: case insensitive string that specifies
the hash algorithm used, should be either, md5, md5-sess
or sha
@type realm: C{str}
@param realm: case sensitive string that specifies the realm
portion of the challenge
"""
self.algorithm = algorithm
self.realm = realm
c = tuple([random.randrange(sys.maxint) for _ in range(3)])
self.privateKey = '%d%d%d' % c
def generateNonce(self):
c = tuple([random.randrange(sys.maxint) for _ in range(3)])
c = '%d%d%d' % c
return c
def _getTime(self):
"""
Parameterize the time based seed used in generateOpaque
so we can deterministically unittest it's behavior.
"""
return time.time()
def generateOpaque(self, nonce, clientip):
"""
Generate an opaque to be returned to the client.
This should be a unique string that can be returned to us and verified.
"""
# Now, what we do is encode the nonce, client ip and a timestamp
# in the opaque value with a suitable digest
key = "%s,%s,%s" % (nonce, clientip, str(int(self._getTime())))
digest = md5(key + self.privateKey).hexdigest()
ekey = key.encode('base64')
return "%s-%s" % (digest, ekey.strip('\n'))
def verifyOpaque(self, opaque, nonce, clientip):
"""
Given the opaque and nonce from the request, as well as the clientip
that made the request, verify that the opaque was generated by us.
And that it's not too old.
@param opaque: The opaque value from the Digest response
@param nonce: The nonce value from the Digest response
@param clientip: The remote IP address of the client making the request
@return: C{True} if the opaque was successfully verified.
@raise error.LoginFailed: if C{opaque} could not be parsed or
contained the wrong values.
"""
# First split the digest from the key
opaqueParts = opaque.split('-')
if len(opaqueParts) != 2:
raise error.LoginFailed('Invalid response, invalid opaque value')
# Verify the key
key = opaqueParts[1].decode('base64')
keyParts = key.split(',')
if len(keyParts) != 3:
raise error.LoginFailed('Invalid response, invalid opaque value')
if keyParts[0] != nonce:
raise error.LoginFailed(
'Invalid response, incompatible opaque/nonce values')
if keyParts[1] != clientip:
raise error.LoginFailed(
'Invalid response, incompatible opaque/client values')
if (int(self._getTime()) - int(keyParts[2]) >
DigestCredentialFactory.CHALLENGE_LIFETIME_SECS):
raise error.LoginFailed(
'Invalid response, incompatible opaque/nonce too old')
# Verify the digest
digest = md5(key + self.privateKey).hexdigest()
if digest != opaqueParts[0]:
raise error.LoginFailed('Invalid response, invalid opaque value')
return True
def getChallenge(self, peer):
"""
Generate the challenge for use in the WWW-Authenticate header
@param peer: The L{IAddress} of the requesting client.
@return: The C{dict} that can be used to generate a WWW-Authenticate
header.
"""
c = self.generateNonce()
o = self.generateOpaque(c, peer.host)
return {'nonce': c,
'opaque': o,
'qop': 'auth',
'algorithm': self.algorithm,
'realm': self.realm}
def decode(self, response, request):
"""
Decode the given response and attempt to generate a
L{DigestedCredentials} from it.
@type response: C{str}
@param response: A string of comma seperated key=value pairs
@type request: L{twisted.web2.server.Request}
@param request: the request being processed
@return: L{DigestedCredentials}
@raise: L{error.LoginFailed} if the response does not contain a
username, a nonce, an opaque, or if the opaque is invalid.
"""
def unq(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
response = ' '.join(response.splitlines())
parts = response.split(',')
auth = {}
for (k, v) in [p.split('=', 1) for p in parts]:
auth[k.strip()] = unq(v.strip())
username = auth.get('username')
if not username:
raise error.LoginFailed('Invalid response, no username given.')
if 'opaque' not in auth:
raise error.LoginFailed('Invalid response, no opaque given.')
if 'nonce' not in auth:
raise error.LoginFailed('Invalid response, no nonce given.')
# Now verify the nonce/opaque values for this client
if self.verifyOpaque(auth.get('opaque'),
auth.get('nonce'),
request.remoteAddr.host):
return DigestedCredentials(username,
request.method,
self.realm,
auth)
| StarcoderdataPython |
5090707 | <filename>neurolang/tests/test_regions.py
import numpy as np
import pytest
from numpy import random
import nibabel as nib
from ..aabb_tree import AABB
from ..CD_relations import (cardinal_relation,
cardinal_relation_prepare_regions,
direction_matrix, is_in_direction)
from ..exceptions import NeuroLangException
from ..regions import (ExplicitVBR, ExplicitVBROverlay, PlanarVolume, Region,
SphericalVolume, region_difference, region_intersection,
region_union)
def _generate_random_box(size_bounds, *args):
n = len(args)
lower_bound = np.array([np.random.uniform(*b) for b in tuple(args)])
upper_bound = lower_bound + np.random.uniform(*size_bounds, size=n)
return Region(lower_bound, upper_bound)
def test_region_eq():
r1 = Region((0, 0, 0), (1, 1, 1))
r2 = Region((0, 0, 0), (1, 1, 1))
assert r1 == r2
r3 = _generate_random_box((0, 10), (0, 0), (0, 0), (0, 0))
r4 = _generate_random_box((50, 100), (50, 50), (100, 100), (200, 200))
assert not r3 == r4
def test_invalid_regions_raise_exception():
with pytest.raises(NeuroLangException):
Region((0, 0, 0), (1, -1, 1))
with pytest.raises(NeuroLangException):
Region((0, 0, 0), (0, 10, 20))
def test_coordinates():
r1 = Region((0, 0, 0), (1, 1, 1))
assert np.array_equal(
r1.bounding_box.limits,
np.array([tuple([0, 1]), tuple([0, 1]), tuple([0, 1])])
)
r2 = Region((2, 0, 7), (4, 6, 8))
assert np.array_equal(
r2.bounding_box.limits,
np.array([tuple([2, 4]), tuple([0, 6]), tuple([7, 8])])
)
def _dir_matrix(region, other_region):
return direction_matrix(region.bounding_box, other_region.bounding_box)
def test_regions_dir_matrix():
# 2d regions (R-L, P-A)
r1 = Region((0, 0), (1, 1))
r2 = Region((0, 5), (1, 6))
assert is_in_direction(_dir_matrix(r1, r2), 'P')
# r1 A:B:P:RA:R:RP r2
r1 = Region((3, 3, 0), (8, 8, 1))
r2 = Region((2, 4, 0), (5, 6, 1))
dir_matrix = np.array([[0, 1, 1], [0, 1, 1], [0, 1, 1]])
assert np.array_equal(_dir_matrix(r1, r2)[1], dir_matrix)
# r1 L:LA:A:B r2
r1 = Region((1, 1, 0), (5, 5, 1))
r2 = Region((3, 3, 0), (5, 7, 1))
dir_matrix = np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]])
dm = _dir_matrix(r1, r2)[1]
assert np.array_equal(dm, dir_matrix)
# r1 LP r2
r1 = Region((6, 6, 0), (8, 8, 1))
r2 = Region((8, 4, 0), (10, 6, 1))
dir_matrix = np.array([[0, 0, 0], [0, 0, 0], [1, 0, 0]])
dm = _dir_matrix(r1, r2)
assert np.array_equal(dm[1], dir_matrix)
# r1 B r2
r1 = Region((5, 6, 0), (8, 8, 1))
r2 = Region((5, 5, 0), (10, 10, 1))
dir_matrix = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert np.array_equal(_dir_matrix(r1, r2)[1], dir_matrix)
# r1 LA:A:RA:L:B:R:LP:P:RP r2
r1 = Region((0, 0, 0), (10, 10, 1))
r2 = Region((5, 5, 0), (6, 6, 1))
dir_matrix = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
assert np.array_equal(_dir_matrix(r1, r2)[1], dir_matrix)
r1 = Region((0, 0, 2), (10, 1, 9))
r2 = Region((0, 0, 0), (10, 1, 1))
# r1 S r2 - r2 I r1
dir_tensor = np.array(np.zeros(shape=(3, 3, 3)))
dir_tensor[2, 1, 1] = 1
assert np.array_equal(_dir_matrix(r1, r2), dir_tensor)
dir_tensor = np.array(np.zeros(shape=(3, 3, 3)))
dir_tensor[0, 1, 1] = 1
assert np.array_equal(_dir_matrix(r2, r1), dir_tensor)
# r1 SL r2
r1 = Region((0, 0, 8), (10, 1, 9))
r2 = Region((15, 0, 0), (17, 1, 1))
dir_tensor = np.array(np.zeros(shape=(3, 3, 3)))
dir_tensor[2, 1, 0] = 1
assert np.array_equal(_dir_matrix(r1, r2), dir_tensor)
# r1 RA r2
r1 = Region((25, 0, 0), (30, 1, 1))
r2 = Region((15, 5, 0), (20, 6, 1))
dir_tensor = np.array(np.zeros(shape=(3, 3, 3)))
dir_tensor[1, 0, 2] = 1
assert np.array_equal(_dir_matrix(r1, r2), dir_tensor)
# 4d regions overlapping at time intervals: r1 Before r2 - r2 After r1
r1 = Region((0, 0, 0, 1), (1, 1, 1, 2))
r2 = Region((0, 0, 0, 5), (1, 1, 1, 6))
assert np.array_equal(
_dir_matrix(r1, r2)[0, 1, :, :],
np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
)
assert np.array_equal(
_dir_matrix(r1, r2)[1:],
np.zeros(shape=(2, 3, 3, 3))
)
assert np.array_equal(
_dir_matrix(r2, r1)[-1, 1, :, :],
np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
)
assert np.array_equal(
_dir_matrix(r2, r1)[:-1],
np.zeros(shape=(2, 3, 3, 3))
)
assert is_in_direction(_dir_matrix(r2, r1), 'F')
def test_basic_directionality():
r1 = Region((0, 0), (1, 1))
r2 = Region((0, -5), (1, -2))
assert is_in_direction(_dir_matrix(r1, r2), 'A')
assert is_in_direction(_dir_matrix(r2, r1), 'P')
r1 = Region((0, 0), (1, 1))
r2 = Region((4, 0), (6, 2))
assert is_in_direction(_dir_matrix(r1, r2), 'L')
assert is_in_direction(_dir_matrix(r2, r1), 'R')
assert is_in_direction(_dir_matrix(r2, r1), 'A')
assert is_in_direction(_dir_matrix(r2, r1), 'RA')
r1 = Region((0, 0, 0), (1, 1, 1))
r2 = Region((0, 0, 3), (1, 1, 4))
assert is_in_direction(_dir_matrix(r1, r2), 'I')
assert is_in_direction(_dir_matrix(r2, r1), 'S')
r1 = Region((0, 0), (2, 2))
r2 = Region((1, 0), (3, 2))
assert is_in_direction(_dir_matrix(r1, r2), 'AO')
assert is_in_direction(_dir_matrix(r2, r1), 'PO')
r1 = Region((0, 0), (6, 6))
r2 = Region((2, 3), (7, 4))
assert is_in_direction(_dir_matrix(r1, r2), 'LAPO')
assert is_in_direction(_dir_matrix(r2, r1), 'OR')
r1 = Region((0, 0, 0), (1, 1, 1))
r2 = Region((0, -5, -5), (1, 5, 5))
assert is_in_direction(_dir_matrix(r1, r2), 'O')
for rel in ['P', 'A', 'I', 'S', 'L', 'R']:
assert not is_in_direction(_dir_matrix(r1, r2), rel)
r1 = Region((0, 0, 0), (1, 3, 5))
r2 = Region((0, 2, 1), (1, 7, 4))
assert is_in_direction(_dir_matrix(r1, r2), 'O')
r1 = Region((0, 0), (1, 1))
r2 = Region((1, 0), (2, 1))
assert is_in_direction(_dir_matrix(r1, r2), 'L')
assert not is_in_direction(_dir_matrix(r1, r2), 'O')
def test_explicit_region():
def randint():
return random.randint(0, 1000)
voxels = [(randint(), randint(), randint()) for _ in range(50)]
affine = np.eye(4)
vbr = ExplicitVBR(voxels, affine)
assert np.array_equal(vbr.to_ijk(affine), vbr.voxels)
assert vbr.aabb_tree is not None
assert np.all(vbr.bounding_box.lb >= 0)
assert np.all(vbr.bounding_box.lb <= 1000)
affine = np.eye(4)
region1 = ExplicitVBR(voxels, affine)
assert np.array_equal(region1.voxels, region1.to_ijk(affine))
affine = np.eye(4) * 2
affine[-1] = 1
region1 = ExplicitVBR(voxels, affine)
assert np.array_equal(region1.voxels, region1.to_ijk(affine))
affine = np.eye(4)
affine[:, -1] = np.array([1, 1, 1, 1])
region1 = ExplicitVBR(voxels, affine)
assert np.array_equal(region1.voxels, region1.to_ijk(affine))
affine = np.array([
[-0.69999999, 0., 0., 90.],
[0., 0.69999999, 0., -126.],
[0., 0., 0.69999999, -72.],
[0., 0., 0., 1.]
]).round(2)
region1 = ExplicitVBR(voxels, affine)
assert np.array_equal(region1.voxels, region1.to_ijk(affine))
def test_explicit_region_overlay():
def randint(size=None):
return random.randint(0, 256, size=size)
voxels = [(randint(), randint(), randint()) for _ in range(50)]
overlay = randint(size=50)
affine = np.eye(4)
image = np.zeros((256, 256, 256))
image[tuple(zip(*voxels))] = overlay
vbr = ExplicitVBROverlay(voxels, affine, overlay, image_dim=(256,) * 3)
assert np.array_equal(vbr.to_ijk(affine), vbr.voxels)
assert vbr.aabb_tree is not None
assert np.all(vbr.bounding_box.lb >= 0)
assert np.all(vbr.bounding_box.lb <= 256)
res_spi = vbr.spatial_image()
assert np.array_equiv(
np.asanyarray(res_spi.dataobj, dtype=int),
image
)
spi = nib.spatialimages.SpatialImage(
image,
affine
)
assert np.array_equiv(res_spi.affine, spi.affine)
assert np.array_equiv(res_spi.dataobj, spi.dataobj)
assert res_spi.shape == spi.shape
spi_out = nib.spatialimages.SpatialImage(
np.empty_like(image),
affine
)
vbr.spatial_image(out=spi_out)
assert np.array_equiv(spi_out.affine, spi.affine)
assert np.array_equiv(spi_out.dataobj, spi.dataobj)
assert spi_out.shape == spi.shape
affine = np.eye(4)
region1 = ExplicitVBROverlay(voxels, affine, overlay)
assert np.array_equal(region1.voxels, region1.to_ijk(affine))
affine = np.eye(4) * 2
affine[-1] = 1
region1 = ExplicitVBR(voxels, affine, overlay)
assert np.array_equal(region1.voxels, region1.to_ijk(affine))
affine = np.eye(4)
affine[:, -1] = np.array([1, 1, 1, 1])
region1 = ExplicitVBROverlay(voxels, affine, overlay)
assert np.array_equal(region1.voxels, region1.to_ijk(affine))
affine = np.array([
[-0.69999999, 0., 0., 90.],
[0., 0.69999999, 0., -126.],
[0., 0., 0.69999999, -72.],
[0., 0., 0., 1.]
]).round(2)
region1 = ExplicitVBR(voxels, affine, overlay)
assert np.array_equal(region1.voxels, region1.to_ijk(affine))
def test_build_tree_one_voxel_regions():
region = ExplicitVBR(np.array([[2, 2, 2]]), np.eye(4))
assert region.bounding_box == AABB((2, 2, 2), (3, 3, 3))
assert region.aabb_tree.height == 0
other_region = ExplicitVBR(np.array([[2, 2, 2]]), np.diag((10, 10, 10, 1)))
assert other_region.bounding_box == AABB((20, 20, 20), (30, 30, 30))
assert other_region.aabb_tree.height == 0
assert is_in_direction(_dir_matrix(other_region, region), 'SA')
def test_tree_of_convex_regions():
cube = ExplicitVBR(np.array([[0, 0, 0], [5, 5, 5]]), np.eye(4))
assert cube.aabb_tree.height == 1
triangle = ExplicitVBR(
np.array([[0, 0, 0], [2, 0, 1], [5, 5, 5]]), np.eye(4)
)
assert triangle.aabb_tree.height == 2
region = ExplicitVBR(
np.array([[0, 0, 0], [2, 2, 1], [5, 5, 0], [8, 8, 0]]), np.eye(4)
)
assert region.aabb_tree.height == 2
region = ExplicitVBR(
np.array([[0, 0, 0], [2, 2, 1], [5, 5, 0], [10, 10, 0]]), np.eye(4)
)
assert region.aabb_tree.height == 3
def test_spherical_volumetric_region():
unit_sphere = SphericalVolume((0, 0, 0), 1)
assert (0, 0, 0) in unit_sphere
assert (1, 0, 0) in unit_sphere
assert not (1, 1, 1) in unit_sphere
unit_sphere.to_ijk(np.eye(4))
def randint():
return random.randint(0, 1000)
samples = 500
voxels = sorted(
[(randint(), randint(), randint()) for _ in range(samples)]
)
affine = np.eye(4)
center = voxels[samples//2]
radius = 15
sr = SphericalVolume(center, radius)
vbr_voxels = sr.to_ijk(affine)
rand_voxel = vbr_voxels[np.random.choice(len(vbr_voxels), 1)]
coordinate = nib.affines.apply_affine(affine, np.array(rand_voxel))
assert np.linalg.norm(np.array(coordinate) - np.array(center)) <= radius
explicit_sr = sr.to_explicit_vbr(affine, None)
assert np.all(
np.array([
np.linalg.norm(np.array(tuple([x, y, z])) - np.array(center))
for [x, y, z] in explicit_sr.to_xyz()
]) <= 15
)
def test_planar_region():
center = (1, 5, 6)
vector = (1, 0, 0)
pr = PlanarVolume(center, vector, limit=10)
assert center in pr
assert not (2, 8, 7) in pr
p = tuple(random.randint(1, 250, size=3))
p_proj = pr.project_point_to_plane(p)
assert p_proj in pr
assert np.array_equal(np.asanyarray([-1, -10, -10], dtype=float),
pr.bounding_box.lb)
assert np.array_equal(np.asanyarray([10, 10, 10], dtype=float),
pr.bounding_box.ub)
def test_points_contained_in_implicit_regions():
def randpoint(i, j):
return tuple(random.randint(i, j, size=3))
sphere = SphericalVolume((0, 0, 0), 10)
points = [[i, i, i] for i in range(5)] in sphere
assert [[j, 0, 0] for j in range(5, 10)] in sphere
center = (0, 0, 0)
vector = (1, 0, 0)
pr = PlanarVolume(center, vector, limit=10)
points = [pr.project_point_to_plane(randpoint(1, 250)) for _ in range(30)]
assert points in pr
assert not (1, 1, 1) in pr
def test_refinement_of_not_overlapping():
triangle = ExplicitVBR(
np.array([[0, 0, 0], [6, 0, 0], [6, 6, 1]]), np.eye(4)
)
other_region = ExplicitVBR(np.array([[0, 6, 0]]), np.eye(4))
assert not cardinal_relation(triangle, triangle, 'O')
assert cardinal_relation(
other_region, triangle, 'O', refine_overlapping=False
)
with pytest.raises(ValueError):
cardinal_relation(
other_region, triangle, 'O', refine_overlapping=True, stop_at=0
)
assert not cardinal_relation(
other_region, triangle, 'O', refine_overlapping=True
)
assert not cardinal_relation(
triangle, other_region, 'O', refine_overlapping=True
)
for r in ['L', 'A']:
assert cardinal_relation(
other_region, triangle, r, refine_overlapping=True
)
for r in ['R', 'P', 'I', 'S', 'O']:
assert not cardinal_relation(
other_region, triangle, r, refine_overlapping=True
)
outer = ExplicitVBR(np.array([[0, 0, 0], [10, 10, 0]]), np.eye(4))
inner = ExplicitVBR(np.array([[8, 0, 0]]), np.eye(4))
assert cardinal_relation(inner, outer, 'O', refine_overlapping=False)
assert not cardinal_relation(inner, outer, 'O', refine_overlapping=True)
for r in ['L', 'R', 'A', 'P', 'I', 'S']:
assert not cardinal_relation(inner, outer, r, refine_overlapping=False)
for r in ['L', 'R', 'P']:
assert cardinal_relation(inner, outer, r, refine_overlapping=True)
for r in ['A', 'I', 'S', 'O']:
assert not cardinal_relation(inner, outer, r, refine_overlapping=True)
region = ExplicitVBR(
np.array([[0, 0, 0], [0, 1, 0], [0, 2, 0]]), np.eye(4)
)
other_region = ExplicitVBR(
np.array([[0, 0, 0], [1, 0, 0], [2, 0, 0]]), np.eye(4)
)
assert cardinal_relation(
region, other_region, 'O', refine_overlapping=False
)
assert cardinal_relation(
region, other_region, 'O', refine_overlapping=True
)
def test_union_implicit_regions():
def all_points_in_spheres(points, spheres):
for p in points:
if not any([p in s for s in spheres]):
return False
return True
def randint():
return random.randint(0, 100)
radius = 5
sphere = SphericalVolume((randint(), randint(), randint()), radius)
other_sphere = SphericalVolume((randint(), randint(), randint()), radius)
affine = np.eye(4)
union = region_union([sphere, other_sphere], affine)
assert union.image_dim == (0,) * 3
assert all_points_in_spheres(union.voxels, {sphere, other_sphere})
def test_regions_union_intersection():
def randint():
return random.randint(70, 100)
voxels = [(randint(), randint(), randint()) for _ in range(50)]
affine = np.array([
[-0.69999999, 0., 0., 90.],
[0., 0.69999999, 0., -126.],
[0., 0., 0.69999999, -72.],
[0., 0., 0., 1.]
]).round(2)
region = ExplicitVBR(voxels, affine, tuple([2, 2, 2]))
union = region_union([region], affine)
assert union.bounding_box == region.bounding_box
center = region.bounding_box.ub
radius = 30
sphere = SphericalVolume(center, radius)
assert sphere.bounding_box.overlaps(region.bounding_box)
intersect = region_intersection([region, sphere], affine)
assert intersect is not None
def test_intersection_difference():
def randint():
return random.randint(1, 5)
affine = np.eye(4)
center = (randint(), randint(), randint())
radius = randint()
sphere = SphericalVolume(center, radius)
other_sphere = SphericalVolume((center[0] + radius,) + center[1:], radius)
intersect = region_intersection([sphere, other_sphere], affine)
d1 = region_difference([sphere, other_sphere], affine)
d2 = region_difference([other_sphere, sphere], affine)
union = region_union([sphere, other_sphere], affine)
intersect2 = region_difference([union, d1, d2], affine)
assert intersect.bounding_box == intersect2.bounding_box
def test_cardinal_relation_prepare_regions():
sphere_1 = SphericalVolume((0, 0, 0), 10)
affine_1 = np.eye(4)
affine_2 = np.diag((2, 2, 2, 1))
sphere_1_evbr = sphere_1.to_explicit_vbr(affine_1, (100, 100, 100))
sphere_2_evbr = sphere_1.to_explicit_vbr(affine_2, (100, 100, 100))
r1, r2 = cardinal_relation_prepare_regions(sphere_1_evbr, sphere_1_evbr)
assert r1 is sphere_1_evbr
assert r2 is sphere_1_evbr
r1, r2 = cardinal_relation_prepare_regions(sphere_1_evbr, sphere_1)
assert r1 is sphere_1_evbr
assert r2 is not sphere_1_evbr and r2 == r1
r1, r2 = cardinal_relation_prepare_regions(sphere_1, sphere_1_evbr)
assert r2 is sphere_1_evbr
assert r1 is not sphere_1_evbr and r2 == r1
r1, r2 = cardinal_relation_prepare_regions(sphere_1_evbr, sphere_2_evbr)
assert r1 is sphere_1_evbr
assert r2 is sphere_2_evbr
| StarcoderdataPython |
4904461 | <gh_stars>1-10
"""
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import input_pipe_line
import data_preprocessing
import tensorflow as tf
import unittest
import cv2 as cv
class test_input_pipe_line(unittest.TestCase):
"""
"""
def test_data(self):
"""
:return:
"""
height = 256
width = 256
batch_size = 4
folder_name = './test_input_pipe_line_results'
tfrecord_dir = '/media/dat/68fa98f8-9d03-4c1e-9bdb-c71ea72ab6fa/dat/zero_DCE/data/tfrecord'
train_data_generator = input_pipe_line.build_dataset(mode=tf.estimator.ModeKeys.TRAIN,
dataset_dir=tfrecord_dir,
preprocess_data=data_preprocessing.train_data_preprocess(target_height=height, target_width=width),
batch_size=4)
iterator = train_data_generator.make_one_shot_iterator()
nex_element = iterator.get_next()
with tf.Session() as sess:
for i in range(1000):
images = sess.run(nex_element[0])
for j in range(len(images)):
first_vid = images[j]
name = str(j) + str(i) + '.jpg'
if os.path.isdir(folder_name) is False:
os.mkdir(folder_name)
cv.imwrite(folder_name + "/" + name, cv.cvtColor(first_vid, cv.COLOR_RGB2BGR))
print(folder_name)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
4983254 | <reponame>ap98nb26u/pyvisa
# -*- coding: utf-8 -*-
"""
pyvisa.ctwrapper
~~~~~~~~~~~~~~~~
ctypes wrapper for NI-VISA library.
This file is part of PyVISA.
:copyright: 2014 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
from .highlevel import NIVisaLibrary
WRAPPER_CLASS = NIVisaLibrary
| StarcoderdataPython |
291453 | <gh_stars>1-10
import unittest
from .test_util import map_path
from simulator_diagnoser.matcher import MatcherResult, \
Terminal, \
Sequence, \
RuleException
class SequenceMatcherTest(unittest.TestCase):
def setUp(self):
self.s = Sequence([Terminal('A'),
Terminal('B'),
Terminal('C')],
'sequence')
def test_label(self):
self.assertEqual('sequence', self.s.get_label())
def test_simple_path(self):
path = map_path('AABCC')
matches = self.s.match_path(path)
expected_result = [MatcherResult(path, [(0, 'A'), (2, 'B'), (3, 'C')], counter=3),
MatcherResult(path, [(0, 'A'), (2, 'B'), (4, 'C')], counter=4),
MatcherResult(path, [(1, 'A'), (2, 'B'), (3, 'C')], counter=3),
MatcherResult(path, [(1, 'A'), (2, 'B'), (4, 'C')], counter=4)]
self.assertListEqual(matches, expected_result)
def test_nested(self):
s2 = Sequence([Terminal('A'),
self.s,
Terminal('C')],
'sequence2',
matchable='False')
path = map_path('AABCC')
matches = s2.match_path(path)
expected_result = \
[MatcherResult(path, [(0, 'A'), (1, 'A'), (2, 'B'), (3, 'C'), (4, 'C')], counter=4)]
self.assertListEqual(matches, expected_result)
def test_multiple_symbols(self):
path = [['A'], ['B', 'C']]
matches = self.s.match_path(path)
expected_result = [MatcherResult(path, [(0, 'A'), (1, 'B'), (1, 'C')], counter=1)]
self.assertListEqual(matches, expected_result)
def test_cardinality(self):
with self.assertRaises(RuleException):
Sequence('s')
with self.assertRaises(RuleException):
Sequence('s', Terminal('A'))
def test_incomplete_matches(self):
expected_result = []
self.assertListEqual(self.s.match_path(map_path('A')), expected_result)
self.assertListEqual(self.s.match_path(map_path('AB')), expected_result)
self.assertListEqual(self.s.match_path(map_path('AC')), expected_result)
self.assertListEqual(self.s.match_path(map_path('BC')), expected_result)
self.assertListEqual(self.s.match_path(map_path('XYZ')), expected_result)
def test_match_label(self):
path = [['x'], ['sequence'], ['y'], ['z']]
matches = self.s.match_path(path)
expected_result = [MatcherResult(path, [(1, 'sequence')], counter=1)]
self.assertListEqual(matches, expected_result)
path = [['A'], ['B'], ['sequence'], ['C']]
matches = self.s.match_path(path)
expected_result = [MatcherResult(path, [(0, 'A'), (1, 'B'), (3, 'C')], counter=3),
MatcherResult(path, [(2, 'sequence')], counter=2)]
self.assertListEqual(matches, expected_result)
def test_not_matchable(self):
self.s.matchable = False
path = [['x'], ['sequence'], ['y'], ['z']]
matches = self.s.match_path(path)
expected_result = []
self.assertListEqual(matches, expected_result)
def test_mid_match(self):
path = map_path('XYZABDC')
mid_match = MatcherResult(path, [(1, 'Y')], counter=1)
matches = self.s.match(mid_match)
expected_result = [MatcherResult(path, [(1, 'Y'), (3, 'A'), (4, 'B'), (6, 'C')], counter=6)]
self.assertListEqual(matches, expected_result)
def test_empty_path(self):
path = []
matches = self.s.match_path(path)
expected_result = []
self.assertListEqual(matches, expected_result)
def test_reverse(self):
path = map_path('CBA')
matches = self.s.match_reverse_path(path)
expected_result = \
[MatcherResult(path, [(0, 'C'), (1, 'B'), (2, 'A')], counter=0, reverse=True)]
self.assertListEqual(matches, expected_result)
def test_reverse2(self):
path = map_path('CCBAA')
matches = self.s.match_reverse_path(path)
expected_result = \
[MatcherResult(path, [(1, 'C'), (2, 'B'), (4, 'A')], counter=1, reverse=True),
MatcherResult(path, [(0, 'C'), (2, 'B'), (4, 'A')], counter=0, reverse=True),
MatcherResult(path, [(1, 'C'), (2, 'B'), (3, 'A')], counter=1, reverse=True),
MatcherResult(path, [(0, 'C'), (2, 'B'), (3, 'A')], counter=0, reverse=True)]
self.assertListEqual(matches, expected_result)
def test_reverse_incomplete_matches(self):
expected_result = []
self.assertListEqual(self.s.match_reverse_path(map_path('A')), expected_result)
self.assertListEqual(self.s.match_reverse_path(map_path('AB')), expected_result)
self.assertListEqual(self.s.match_reverse_path(map_path('AC')), expected_result)
self.assertListEqual(self.s.match_reverse_path(map_path('BC')), expected_result)
self.assertListEqual(self.s.match_reverse_path(map_path('XYZ')), expected_result)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1834014 | import collections
c = collections.Counter('hello world')
print(c)
print(c.most_common(3)) | StarcoderdataPython |
6665784 | <filename>seminars/09/02.py
#!/usr/bin/env python3
from os import get_terminal_size
import numpy as np
cols, rows = get_terminal_size()
display = np.zeros([rows, cols], dtype=np.uint8)
board = display[1:-1, 1:-1]
board[...] = np.random.randint(0, 2, size=board.shape)
Y, X = board.shape
while True:
for y in range(Y):
for x in range(X):
print('\033[' + str(y) + ';' + str(x) + 'H'
+ ('\x1b[7;32m \x1b[0m' if board[y, x] else '\x1b[7;39m \x1b[0m'), end='')
o = np.zeros(board.shape, dtype=np.uint8)
o[...] = (
display[0:-2, 0:-2] + display[0:-2, 1:-1] + display[0:-2, 2:] +
display[1:-1, 0:-2] + display[1:-1, 2:] +
display[2:, 0:-2] + display[2:, 1:-1] + display[2:, 2:]
)
board[...] = ((o == 3) | ((board == 1) & (o == 2)))
| StarcoderdataPython |
4823429 | <reponame>JDYuuki/genshin_task-resin-expedition_alert
def get_server(uid: int) -> str:
if str(uid).startswith('1'):
return 'cn_gf01' # 天空岛
elif str(uid).startswith('5'):
return 'cn_qd01' # 世界树
else:
return ''
| StarcoderdataPython |
3213960 | <reponame>syngenta-digital/package-python-alc
import time
import inspect
def troubleshoot(function):
def wrapper(*args, **kwargs):
print('called: {}.{}'.format(function.__module__,function.__name__))
arguments = inspect.signature(function).bind(*args, **kwargs).arguments
arguments = ', '.join('{} = {!r}'.format(*item) for item in arguments.items())
start = time.time()
result = function(*args, **kwargs)
end = time.time()
try:
print('arguments: {}'.format(arguments))
print('results: {}'.format(result))
except Exception:
print('something not printable')
print('finished in {} secs'.format(round(end - start, 4)))
return result
return wrapper
| StarcoderdataPython |
5061952 | # Entrada
while True:
fecha = int(input("Ingrese el año: "))
if fecha > 0:
break
# Proceso y salida
if fecha % 4 == 0 or (fecha % 100 == 0 and fecha % 400 == 0):
print("%d es bisiesto" % fecha)
else:
print("%d no es bisiesto" % fecha) | StarcoderdataPython |
8127428 | from bims.tasks.collection_record import * # noqa
from bims.tasks.cluster import * # noqa
from bims.tasks.search import * # noqa
from bims.tasks.search_version_2 import * # noqa
| StarcoderdataPython |
3445236 | from django.urls import path
from djf_surveys.admins import views as admin_views
urlpatterns = [
path('', admin_views.AdminSurveyListView.as_view(), name='admin_survey'),
path('create/survey/', admin_views.AdminCrateSurveyView.as_view(), name='admin_create_survey'),
path('edit/survey/<str:slug>/', admin_views.AdminEditSurveyView.as_view(), name='admin_edit_survey'),
path('delete/survey/<str:slug>/', admin_views.AdminDeleteSurveyView.as_view(), name='admin_delete_survey'),
path('forms/<str:slug>/', admin_views.AdminSurveyFormView.as_view(), name='admin_forms_survey'),
path('question/add/<int:pk>/', admin_views.AdminCreateQuestionView.as_view(), name='admin_create_question'),
path('question/edit/<int:pk>/', admin_views.AdminUpdateQuestionView.as_view(), name='admin_edit_question'),
path('question/delete/<int:pk>/', admin_views.AdminDeleteQuestionView.as_view(), name='admin_delete_question'),
path('question/ordering/', admin_views.AdminChangeOrderQuestionView.as_view(), name='admin_change_order_question'),
path('download/survey/<str:slug>/', admin_views.DownloadResponseSurveyView.as_view(), name='admin_download_survey'),
path('summary/survey/<str:slug>/', admin_views.SummaryResponseSurveyView.as_view(), name='admin_summary_survey'),
]
| StarcoderdataPython |
9658311 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas_rhino.artists import Artist
from compas.utilities import color_to_colordict
__all__ = ['NetworkArtist']
class NetworkArtist(Artist):
"""A network artist defines functionality for visualising COMPAS networks in Rhino.
Parameters
----------
network : compas.datastructures.Network
A COMPAS network.
layer : str, optional
The name of the layer that will contain the network.
Attributes
----------
defaults : dict
Default settings for color, scale, tolerance, ...
"""
__module__ = "compas_rhino.artists"
def __init__(self, network, layer=None):
super(NetworkArtist, self).__init__(layer=layer)
self.settings.update({
'color.vertex': (255, 255, 255),
'color.edge': (0, 0, 0),
})
@property
def network(self):
"""compas.datastructures.Network: The network that should be painted."""
return self.network
@network.setter
def network(self, network):
self.network = network
@classmethod
def from_data(cls, data):
module, attr = data['dtype'].split('/')
Network = getattr(__import__(module, fromlist=[attr]), attr)
network = Network.from_data(data['value'])
artist = cls(network)
return artist
def to_data(self):
return self.network.to_data()
# ==========================================================================
# clear
# ==========================================================================
def clear(self):
"""Clear the vertices and edges of the network, without clearing the
other elements in the layer."""
self.clear_vertices()
self.clear_edges()
self.clear_vertexlabels()
self.clear_edgelabels()
def clear_vertices(self, keys=None):
"""Clear all previously drawn vertices.
Parameters
----------
keys : list, optional
The keys of a specific set of vertices that should be cleared.
Default is to clear all vertices.
"""
if not keys:
name = '{}.vertex.*'.format(self.network.name)
guids = compas_rhino.get_objects(name=name)
else:
guids = []
for key in keys:
name = '{}.vertex.{}'.format(self.network.name, key)
guids += compas_rhino.get_objects(name=name)
compas_rhino.delete_objects(guids)
def clear_edges(self, keys=None):
"""Clear all previously drawn edges.
Parameters
----------
keys : list, optional
The keys of a specific set of edges that should be cleared.
Default is to clear all edges.
"""
if not keys:
name = '{}.edge.*'.format(self.network.name)
guids = compas_rhino.get_objects(name=name)
else:
guids = []
for u, v in keys:
name = '{}.edge.{}-{}'.format(self.network.name, u, v)
guids += compas_rhino.get_objects(name=name)
compas_rhino.delete_objects(guids)
def clear_vertexlabels(self, keys=None):
"""Clear all previously drawn vertex labels.
Parameters
----------
keys : list, optional
The keys of a specific set of vertex labels that should be cleared.
Default is to clear all vertex labels.
"""
if not keys:
name = '{}.vertex.label.*'.format(self.network.name)
guids = compas_rhino.get_objects(name=name)
else:
guids = []
for key in keys:
name = '{}.vertex.label.{}'.format(self.network.name, key)
guids += compas_rhino.get_objects(name=name)
compas_rhino.delete_objects(guids)
def clear_edgelabels(self, keys=None):
"""Clear all previously drawn edge labels.
Parameters
----------
keys : list, optional
The keys of a specific set of edges of which the labels should be cleared.
Default is to clear all edge labels.
"""
if not keys:
name = '{}.edge.label.*'.format(self.network.name)
guids = compas_rhino.get_objects(name=name)
else:
guids = []
for u, v in keys:
name = '{}.edge.label.{}-{}'.format(self.network.name, u, v)
guids += compas_rhino.get_objects(name=name)
compas_rhino.delete_objects(guids)
# ==========================================================================
# components
# ==========================================================================
def draw(self, settings=None):
raise NotImplementedError
def draw_vertices(self, keys=None, color=None):
"""Draw a selection of vertices.
Parameters
----------
keys : list
A list of vertex keys identifying which vertices to draw.
Default is ``None``, in which case all vertices are drawn.
color : str, tuple, dict
The color specififcation for the vertices.
Colors should be specified in the form of a string (hex colors) or
as a tuple of RGB components.
To apply the same color to all vertices, provide a single color
specification. Individual colors can be assigned using a dictionary
of key-color pairs. Missing keys will be assigned the default vertex
color (``self.settings['color.vertex']``).
The default is ``None``, in which case all vertices are assigned the
default vertex color.
Notes
-----
The vertices are named using the following template:
``"{}.vertex.{}".format(self.network.name, key)``.
This name is used afterwards to identify vertices in the Rhino model.
"""
keys = keys or list(self.network.vertices())
colordict = color_to_colordict(color,
keys,
default=self.settings.get('color.vertex'),
colorformat='rgb',
normalize=False)
points = []
for key in keys:
points.append({
'pos': self.network.vertex_coordinates(key),
'name': "{}.vertex.{}".format(self.network.name, key),
'color': colordict[key],
'layer': self.network.get_vertex_attribute(key, 'layer', None)
})
return compas_rhino.draw_points(points, layer=self.layer, clear=False, redraw=False)
def draw_edges(self, keys=None, color=None):
"""Draw a selection of edges.
Parameters
----------
keys : list
A list of edge keys (as uv pairs) identifying which edges to draw.
The default is ``None``, in which case all edges are drawn.
color : str, tuple, dict
The color specififcation for the edges.
Colors should be specified in the form of a string (hex colors) or
as a tuple of RGB components.
To apply the same color to all edges, provide a single color
specification. Individual colors can be assigned using a dictionary
of key-color pairs. Missing keys will be assigned the default face
color (``self.settings['edge.color']``).
The default is ``None``, in which case all edges are assigned the
default edge color.
Notes
-----
All edges are named using the following template:
``"{}.edge.{}-{}".fromat(self.network.name, u, v)``.
This name is used afterwards to identify edges in the Rhino model.
"""
keys = keys or list(self.network.edges())
colordict = color_to_colordict(color,
keys,
default=self.settings.get('color.edge'),
colorformat='rgb',
normalize=False)
lines = []
for u, v in keys:
lines.append({
'start': self.network.vertex_coordinates(u),
'end': self.network.vertex_coordinates(v),
'color': colordict[(u, v)],
'name': "{}.edge.{}-{}".format(self.network.name, u, v),
'layer': self.network.get_edge_attribute((u, v), 'layer', None)
})
return compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=False)
# ==========================================================================
# labels
# ==========================================================================
def draw_vertexlabels(self, text=None, color=None):
"""Draw labels for a selection vertices.
Parameters
----------
text : dict
A dictionary of vertex labels as key-text pairs.
The default value is ``None``, in which case every vertex will be labelled with its key.
color : str, tuple, dict
The color sepcification of the labels.
String values are interpreted as hex colors (e.g. ``'#ff0000'`` for red).
Tuples are interpreted as RGB component specifications (e.g. ``(255, 0, 0) for red``.
If a dictionary of specififcations is provided, the keys of the
should refer to vertex keys and the values should be color
specifications in the form of strings or tuples.
The default value is ``None``, in which case the labels are assigned
the default vertex color (``self.settings['color.vertex']``).
Notes
-----
All labels are assigned a name using the folling template:
``"{}.vertex.label.{}".format(self.network.name, key)``.
"""
if text is None:
textdict = {key: str(key) for key in self.network.vertices()}
elif isinstance(text, dict):
textdict = text
elif text == 'key':
textdict = {key: str(key) for key in self.network.vertices()}
elif text == 'index':
textdict = {key: str(index) for index, key in enumerate(self.network.vertices())}
else:
raise NotImplementedError
colordict = color_to_colordict(color,
textdict.keys(),
default=self.settings.get('color.vertex'),
colorformat='rgb',
normalize=False)
labels = []
for key, text in iter(textdict.items()):
labels.append({
'pos': self.network.vertex_coordinates(key),
'name': "{}.vertex.label.{}".format(self.network.name, key),
'color': colordict[key],
'text': textdict[key],
'layer': self.network.get_vertex_attribute(key, 'layer', None)
})
return compas_rhino.draw_labels(labels, layer=self.layer, clear=False, redraw=False)
def draw_edgelabels(self, text=None, color=None):
"""Draw labels for a selection of edges.
Parameters
----------
text : dict
A dictionary of edge labels as key-text pairs.
The default value is ``None``, in which case every edge will be labelled with its key.
color : str, tuple, dict
The color sepcification of the labels.
String values are interpreted as hex colors (e.g. ``'#ff0000'`` for red).
Tuples are interpreted as RGB component specifications (e.g. ``(255, 0, 0) for red``.
Individual colors can be assigned using a dictionary
of key-color pairs. Missing keys will be assigned the default face
color (``self.settings['edge.color']``).
The default is ``None``, in which case all edges are assigned the
default edge color.
Notes
-----
All labels are assigned a name using the folling template:
``"{}.edge.{}".format(self.network.name, key)``.
"""
if text is None:
textdict = {(u, v): "{}-{}".format(u, v) for u, v in self.network.edges()}
elif isinstance(text, dict):
textdict = text
else:
raise NotImplementedError
colordict = color_to_colordict(color,
textdict.keys(),
default=self.settings.get('color.edge'),
colorformat='rgb',
normalize=False)
labels = []
for (u, v), text in iter(textdict.items()):
labels.append({
'pos': self.network.edge_midpoint(u, v),
'name': "{}.edge.label.{}-{}".format(self.network.name, u, v),
'color': colordict[(u, v)],
'text': textdict[(u, v)],
'layer': self.network.get_edge_attribute((u, v), 'layer', None)
})
return compas_rhino.draw_labels(labels, layer=self.layer, clear=False, redraw=False)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import compas
from compas.datastructures import Network
network = Network.from_obj(compas.get('grid_irregular.obj'))
artist = NetworkArtist(network)
artist.clear()
artist.draw_vertices()
artist.draw_edges()
artist.draw_vertexlabels(text='key')
artist.draw_edgelabels(text={key: index for index, key in enumerate(network.edges())})
| StarcoderdataPython |
9785704 | <reponame>kirillzhosul/video-hosting
#!usr/bin/python
"""
Video hosting API videos CRUD operations.
Interface to database CRUD operations.
"""
from typing import Optional, List
from sqlalchemy.orm import Session
import models
import schemas
def get_popular_videos(db: Session, offset: int, limit: int) -> List[models.Videos]:
"""
Returns a list of videos from the database.
:param db: Database session.
:param offset: Offset to get.
:param limit: Limit to return.
:return: List of videos from the database.
"""
return db.query(models.Videos).order_by(models.Videos.views.desc()).offset(offset).limit(limit).all()
def get_video(db: Session, video_id: int) -> Optional[models.Videos]:
"""
Returns a video from the database by id.
:param db: Database session.
:param video_id: Index of the video to get.
:return: Video.
"""
return db.query(models.Videos).filter_by(id=video_id).first()
def create_video(db: Session, video_scheme: schemas.VideoCreate) -> Optional[models.Videos]:
"""
Creates a video from given video scheme.
:param db: Database session.
:param video_scheme: Video data scheme.
:return: Video.
"""
video = models.Videos(title=video_scheme.title, description=video_scheme.description)
db.add(video)
db.commit()
return video
| StarcoderdataPython |
6475106 | import itertools
from typing import Dict, Union
from ..set5.challenge39 import inverse_mod
from .challenge43 import (
get_dsa_constants,
get_private_dsa_key_message_val,
get_sha1_fingerprint,
)
def parse_text_file():
lines = []
with open("44.txt") as file_handle:
lines = file_handle.read().splitlines()
current: Dict[str, Union[str, int]] = {}
messages = []
for index, line in enumerate(lines):
if index != 0 and index % 4 == 0:
messages.append(current)
current = {}
value: Union[str, int]
key, value = line.split(": ")
if key == "r" or key == "s":
value = int(value)
elif key == "m":
value = int(value, 16)
current[key] = value
return messages
def repeated_nonce_find_private(messages):
message_pairs = itertools.combinations(messages, 2)
p, q, g = get_dsa_constants()
for message1, message2 in message_pairs:
if message1["r"] == message2["r"]:
# if k is the same, then r is the same because it only depends on the parameters
# (which are public and never change) and k
denominator = inverse_mod(message1["s"] - message2["s"], q)
numerator = message1["m"] - message2["m"]
k = (numerator * denominator) % q
signature = (message1["r"], message1["s"])
private = get_private_dsa_key_message_val(message1["m"], k, signature)
return private
def challenge44():
messages = parse_text_file()
private = repeated_nonce_find_private(messages)
fingerprint = get_sha1_fingerprint(private)
print(f"Private key fingerprint: {fingerprint}")
if __name__ == "__main__":
challenge44()
| StarcoderdataPython |
9760970 | """
Django settings for a simple project.
Generated by 'django-admin startproject' using Django 1.8.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys
import os
from os import path
import datetime
PROJECT_ROOT = path.dirname(path.dirname(path.abspath(__file__))) # project_dir
BASE_DIR = path.dirname(PROJECT_ROOT) # settings
APP_DIR = path.join(PROJECT_ROOT, "apps") # settings
ASSET_DIR = path.join(PROJECT_ROOT, "assets") # settings
MEDIA_ROOT = path.join(PROJECT_ROOT, "media")
LOG_DIR = path.join(BASE_DIR, "logs") # settings
sys.path.insert(0, path.abspath(APP_DIR))
sys.path.insert(0, path.abspath(ASSET_DIR)) # currently empty
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'projectrcuqwneioasdj9i093283eni0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# use custom user model
AUTH_USER_MODEL = 'profiles.User'
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'storages',
'rest_framework', # utilities for rest apis
'rest_framework.authtoken', # token authentication
)
PROJECT_APPS = (
'authentication',
'profiles',
'core',
)
INSTALLED_APPS += PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware', # csrf exempt
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
path.join(PROJECT_ROOT, "templates") # for some play ground web app
],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
]
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = path.join(path.dirname(BASE_DIR), 'staticfiles')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MEDIA_URL = '/media/'
APPEND_SLASH = False
DELETE_MEDIA_FILE_METHOD = 'core.utils.delete_file_local'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
)
APPSECRET_PROOF = False | StarcoderdataPython |
93808 | <reponame>ljod-is/poetcave
# Generated by Django 3.1.5 on 2021-03-07 18:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poem', '0012_bookmark'),
]
operations = [
migrations.AddField(
model_name='poem',
name='publicly_visible',
field=models.BooleanField(default=False),
),
]
| StarcoderdataPython |
3279783 | import json
import pandas as pd
import urllib.parse #input data
from tkinter import Tk
from tkinter import filedialog
from enum import Enum
import io
def get_file_path_csv():
root = Tk()
root.update()
def open_file():
file = filedialog.askopenfilename(filetypes=[("csv files", "*.csv")])
return file
file_path = open_file()
root.destroy()
return file_path
def read_data_frame_from_local_csv(col_names = [], delim_whitespace=False, header = 'infer'):
try:
from google.colab import files
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
uploaded = files.upload()
csv_file_name = list(uploaded.keys())[0]
df = pd.read_csv(io.BytesIO(uploaded[csv_file_name]), delim_whitespace=delim_whitespace, header = header)
if col_names != []:
df.columns = col_names
return(df)
else:
csv_path = get_file_path_csv()
df = pd.read_csv(csv_path, delim_whitespace=delim_whitespace, header = header)
if col_names != []:
df.columns = col_names
return(df)
def read_data_frame_from_remote_csv(csv_url, col_names = [], delim_whitespace=False, header = 'infer'):
df = pd.read_csv(csv_url, delim_whitespace=delim_whitespace, header = header)
if col_names != []:
df.columns = col_names
return(df)
| StarcoderdataPython |
6586408 | <filename>Utils.py
def read_json(filename):
import json
with open(filename) as i:
for line in i:
try:
d = json.loads(line)
yield d
except:
continue
def write_json(filename, rows):
import json
with open(filename, 'w') as o:
for row in rows:
o.write('%s\n' % json.dumps(row))
o.flush()
def select_kbest(k):
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
select = SelectKBest(chi2, k=k)
return select
def naive_bayes():
from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
return classifier
def kbest_naive_bayes(k):
from sklearn.pipeline import Pipeline
kbest = select_kbest(k)
nb = naive_bayes()
pipeline = Pipeline([('kbest', kbest), ('nb', nb)])
return pipeline
class Predictor:
def __init__(self, model, features, classes):
from sklearn.externals import joblib
self._features_by_name = dict(read_json(features))
self._classes_by_id = dict((id, name) for name, id in read_json(classes))
self._model = joblib.load(model)
def predict(self, content):
import Ngrams
import numpy
vector = [0] * len(self._features_by_name)
ngrams = Ngrams.generate(content)
for ngram, count in ngrams:
if ngram in self._features_by_name:
vector[self._features_by_name[ngram]] = count
x = numpy.array([vector])
pred = self._model.predict(x)
return self._classes_by_id[pred[0]]
| StarcoderdataPython |
6568273 | from setuptools import setup, find_packages
setup(
name="rasa_addons",
version="2.3.3-bf.4",
author="Botfront",
description="Rasa Addons - Components for Rasa and Botfront",
install_requires=[
"requests",
"requests_futures",
"fuzzy_matcher",
"fbmessenger",
"sgqlc",
'pypred @ git+https://git@github.com/dialoguemd/pypred.git@7<PASSWORD>ecf<PASSWORD>',
],
packages=find_packages(),
licence="Apache 2.0",
url="https://botfront.io",
author_email="<EMAIL>",
)
| StarcoderdataPython |
8039729 | # -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2003-2006 <NAME>.
# Copyright (C) 2006 <NAME>. <<EMAIL>>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
''' an attempt to implement readline for Python in Python using ctypes'''
import sys,os,re
from glob import glob
import clipboard,logger,console
from logger import log,log_sock
from error import ReadlineError,GetSetError
from pyreadline.keysyms.common import make_KeyPress_from_keydescr
import pyreadline.lineeditor.lineobj as lineobj
import pyreadline.lineeditor.history as history
import release
from modes import editingmodes
in_ironpython="IronPython" in sys.version
if in_ironpython:#ironpython does not provide a prompt string to readline
import System
default_prompt=">>> "
else:
default_prompt=""
import pdb
def quote_char(c):
if ord(c)>0:
return c
def inword(buffer,point):
return buffer[point:point+1] in [A-Za-z0-9]
class Readline(object):
def __init__(self):
self.startup_hook = None
self.pre_input_hook = None
self.completer = None
self.completer_delims = " \t\n\"\\'`@$><=;|&{("
self.console = console.Console()
self.size = self.console.size()
self.prompt_color = None
self.command_color = None
self.selection_color = self.console.saveattr<<4
self.key_dispatch = {}
self.previous_func = None
self.first_prompt = True
self.next_meta = False # True to force meta on next character
self.tabstop = 4
self.allow_ctrl_c=False
self.ctrl_c_tap_time_interval=0.3
self.debug=False
self.begidx = 0
self.endidx = 0
# variables you can control with parse_and_bind
self.show_all_if_ambiguous = 'off'
self.mark_directories = 'on'
self.bell_style = 'none'
self.mark=-1
self.l_buffer=lineobj.ReadLineTextBuffer("")
self._history=history.LineHistory()
# this code needs to follow l_buffer and history creation
self.editingmodes=[mode(self) for mode in editingmodes]
for mode in self.editingmodes:
mode.init_editing_mode(None)
self.mode=self.editingmodes[0]
self.read_inputrc()
log("\n".join(self.rl_settings_to_string()))
#Paste settings
#assumes data on clipboard is path if shorter than 300 characters and doesn't contain \t or \n
#and replace \ with / for easier use in ipython
self.enable_ipython_paste_for_paths=True
#automatically convert tabseparated data to list of lists or array constructors
self.enable_ipython_paste_list_of_lists=True
self.enable_win32_clipboard=True
self.paste_line_buffer=[]
#Below is for refactoring, raise errors when using old style attributes
#that should be refactored out
def _g(x):
def g(self):
raise GetSetError("GET %s"%x)
def s(self,q):
raise GetSetError("SET %s"%x)
return g,s
line_buffer=property(*_g("line_buffer"))
line_cursor=property(*_g("line_buffer"))
undo_stack =property(*_g("undo_stack")) # each entry is a tuple with cursor_position and line_text
history_length =property(*_g("history_length")) # each entry is a tuple with cursor_position and line_text
history =property(*_g("history")) # each entry is a tuple with cursor_position and line_text
history_cursor =property(*_g("history_cursor")) # each entry is a tuple with cursor_position and line_text
# To export as readline interface
def parse_and_bind(self, string):
'''Parse and execute single line of a readline init file.'''
try:
log('parse_and_bind("%s")' % string)
if string.startswith('#'):
return
if string.startswith('set'):
m = re.compile(r'set\s+([-a-zA-Z0-9]+)\s+(.+)\s*$').match(string)
if m:
var_name = m.group(1)
val = m.group(2)
try:
setattr(self, var_name.replace('-','_'), val)
except AttributeError:
log('unknown var="%s" val="%s"' % (var_name, val))
else:
log('bad set "%s"' % string)
return
m = re.compile(r'\s*(.+)\s*:\s*([-a-zA-Z]+)\s*$').match(string)
if m:
key = m.group(1)
func_name = m.group(2)
py_name = func_name.replace('-', '_')
try:
func = getattr(self.mode, py_name)
except AttributeError:
log('unknown func key="%s" func="%s"' % (key, func_name))
if self.debug:
print 'pyreadline parse_and_bind error, unknown function to bind: "%s"' % func_name
return
self.mode._bind_key(key, func)
except:
log('error')
raise
def get_line_buffer(self):
'''Return the current contents of the line buffer.'''
return self.l_buffer.get_line_text()
def insert_text(self, string):
'''Insert text into the command line.'''
self.l_buffer.insert_text(string)
def read_init_file(self, filename=None):
'''Parse a readline initialization file. The default filename is the last filename used.'''
log('read_init_file("%s")' % filename)
#History file book keeping methods (non-bindable)
def add_history(self, line):
'''Append a line to the history buffer, as if it was the last line typed.'''
self._history.add_history(line)
def get_history_length(self ):
'''Return the desired length of the history file.
Negative values imply unlimited history file size.'''
return self._history.get_history_length()
def set_history_length(self, length):
'''Set the number of lines to save in the history file.
write_history_file() uses this value to truncate the history file
when saving. Negative values imply unlimited history file size.
'''
self._history.set_history_length(length)
def clear_history(self):
'''Clear readline history'''
self._history.clear_history()
def read_history_file(self, filename=None):
'''Load a readline history file. The default filename is ~/.history.'''
self._history.read_history_file(filename)
def write_history_file(self, filename=None):
'''Save a readline history file. The default filename is ~/.history.'''
self._history.write_history_file(filename)
#Completer functions
def set_completer(self, function=None):
'''Set or remove the completer function.
If function is specified, it will be used as the new completer
function; if omitted or None, any completer function already
installed is removed. The completer function is called as
function(text, state), for state in 0, 1, 2, ..., until it returns a
non-string value. It should return the next possible completion
starting with text.
'''
log('set_completer')
self.completer = function
def get_completer(self):
'''Get the completer function.
'''
log('get_completer')
return self.completer
def get_begidx(self):
'''Get the beginning index of the readline tab-completion scope.'''
return self.begidx
def get_endidx(self):
'''Get the ending index of the readline tab-completion scope.'''
return self.endidx
def set_completer_delims(self, string):
'''Set the readline word delimiters for tab-completion.'''
self.completer_delims = string
def get_completer_delims(self):
'''Get the readline word delimiters for tab-completion.'''
return self.completer_delims
def set_startup_hook(self, function=None):
'''Set or remove the startup_hook function.
If function is specified, it will be used as the new startup_hook
function; if omitted or None, any hook function already installed is
removed. The startup_hook function is called with no arguments just
before readline prints the first prompt.
'''
self.startup_hook = function
def set_pre_input_hook(self, function=None):
'''Set or remove the pre_input_hook function.
If function is specified, it will be used as the new pre_input_hook
function; if omitted or None, any hook function already installed is
removed. The pre_input_hook function is called with no arguments
after the first prompt has been printed and just before readline
starts reading input characters.
'''
self.pre_input_hook = function
## Internal functions
def rl_settings_to_string(self):
out=["%-20s: %s"%("show all if ambigous",self.show_all_if_ambiguous)]
out.append("%-20s: %s"%("mark_directories",self.mark_directories))
out.append("%-20s: %s"%("bell_style",self.bell_style))
out.append("%-20s: %s"%("mark_directories",self.mark_directories))
out.append("------------- key bindings ------------")
tablepat="%-7s %-7s %-7s %-15s %-15s "
out.append(tablepat%("Control","Meta","Shift","Keycode/char","Function"))
bindings=[(k[0],k[1],k[2],k[3],v.__name__) for k,v in self.mode.key_dispatch.iteritems()]
bindings.sort()
for key in bindings:
out.append(tablepat%(key))
return out
def _bell(self):
'''ring the bell if requested.'''
if self.bell_style == 'none':
pass
elif self.bell_style == 'visible':
raise NotImplementedError("Bellstyle visible is not implemented yet.")
elif self.bell_style == 'audible':
self.console.bell()
else:
raise ReadlineError("Bellstyle %s unknown."%self.bell_style)
def _clear_after(self):
c = self.console
x, y = c.pos()
w, h = c.size()
c.rectangle((x, y, w+1, y+1))
c.rectangle((0, y+1, w, min(y+3,h)))
def _set_cursor(self):
c = self.console
xc, yc = self.prompt_end_pos
w, h = c.size()
xc += self.l_buffer.visible_line_width()
while(xc >= w):
xc -= w
yc += 1
c.pos(xc, yc)
def _print_prompt(self):
c = self.console
x, y = c.pos()
n = c.write_scrolling(self.prompt, self.prompt_color)
self.prompt_begin_pos = (x, y - n)
self.prompt_end_pos = c.pos()
self.size = c.size()
def _update_prompt_pos(self, n):
if n != 0:
bx, by = self.prompt_begin_pos
ex, ey = self.prompt_end_pos
self.prompt_begin_pos = (bx, by - n)
self.prompt_end_pos = (ex, ey - n)
def _update_line(self):
c=self.console
c.cursor(0) #Hide cursor avoiding flicking
c.pos(*self.prompt_end_pos)
ltext = self.l_buffer.quoted_text()
if self.l_buffer.enable_selection and self.l_buffer.selection_mark>=0:
start=len(self.l_buffer[:self.l_buffer.selection_mark].quoted_text())
stop=len(self.l_buffer[:self.l_buffer.point].quoted_text())
if start>stop:
stop,start=start,stop
n = c.write_scrolling(ltext[:start], self.command_color)
n = c.write_scrolling(ltext[start:stop], self.selection_color)
n = c.write_scrolling(ltext[stop:], self.command_color)
else:
n = c.write_scrolling(ltext, self.command_color)
x,y = c.pos() #Preserve one line for Asian IME(Input Method Editor) statusbar
w,h = c.size()
if y >= h - 1 or n > 0:
c.scroll_window(-1)
c.scroll((0,0,w,h),0,-1)
n += 1
self._update_prompt_pos(n)
if hasattr(c,"clear_to_end_of_window"): #Work around function for ironpython due
c.clear_to_end_of_window() #to System.Console's lack of FillFunction
else:
self._clear_after()
c.cursor(1) #Show cursor
self._set_cursor()
def readline(self, prompt=''):
return self.mode.readline(prompt)
def read_inputrc(self,inputrcpath=os.path.expanduser("~/pyreadlineconfig.ini")):
modes=dict([(x.mode,x) for x in self.editingmodes])
mode=self.editingmodes[0].mode
def setmode(name):
self.mode=modes[name]
def bind_key(key,name):
log("bind %s %s"%(key,name))
if hasattr(modes[mode],name):
modes[mode]._bind_key(key,getattr(modes[mode],name))
else:
print "Trying to bind unknown command '%s' to key '%s'"%(name,key)
def un_bind_key(key):
keyinfo = make_KeyPress_from_keydescr(key).tuple()
if keyinfo in modes[mode].key_dispatch:
del modes[mode].key_dispatch[keyinfo]
def bind_exit_key(key):
modes[mode]._bind_exit_key(key)
def un_bind_exit_key(key):
keyinfo = make_KeyPress_from_keydescr(key).tuple()
if keyinfo in modes[mode].exit_dispatch:
del modes[mode].exit_dispatch[keyinfo]
def setkill_ring_to_clipboard(killring):
import pyreadline.lineeditor.lineobj
pyreadline.lineeditor.lineobj.kill_ring_to_clipboard=killring
def sethistoryfilename(filename):
self._history.history_filename=os.path.expanduser(filename)
def setbellstyle(mode):
self.bell_style=mode
def sethistorylength(length):
self._history.history_length=int(length)
def allow_ctrl_c(mode):
log_sock("allow_ctrl_c:%s:%s"%(self.allow_ctrl_c,mode))
self.allow_ctrl_c=mode
def setbellstyle(mode):
self.bell_style=mode
def show_all_if_ambiguous(mode):
self.show_all_if_ambiguous=mode
def ctrl_c_tap_time_interval(mode):
self.ctrl_c_tap_time_interval=mode
def mark_directories(mode):
self.mark_directories=mode
def completer_delims(mode):
self.completer_delims=mode
def debug_output(on,filename="pyreadline_debug_log.txt"): #Not implemented yet
if on in ["on","on_nologfile"]:
self.debug=True
logger.start_log(on,filename)
logger.log("STARTING LOG")
# print release.branch
def set_prompt_color(color):
trtable={"black":0,"darkred":4,"darkgreen":2,"darkyellow":6,"darkblue":1,"darkmagenta":5,"darkcyan":3,"gray":7,
"red":4+8,"green":2+8,"yellow":6+8,"blue":1+8,"magenta":5+8,"cyan":3+8,"white":7+8}
self.prompt_color=trtable.get(color.lower(),7)
def set_input_color(color):
trtable={"black":0,"darkred":4,"darkgreen":2,"darkyellow":6,"darkblue":1,"darkmagenta":5,"darkcyan":3,"gray":7,
"red":4+8,"green":2+8,"yellow":6+8,"blue":1+8,"magenta":5+8,"cyan":3+8,"white":7+8}
self.command_color=trtable.get(color.lower(),7)
loc={"branch":release.branch,
"version":release.version,
"mode":mode,
"modes":modes,
"set_mode":setmode,
"bind_key":bind_key,
"bind_exit_key":bind_exit_key,
"un_bind_key":un_bind_key,
"un_bind_exit_key":un_bind_exit_key,
"bell_style":setbellstyle,
"mark_directories":mark_directories,
"show_all_if_ambiguous":show_all_if_ambiguous,
"completer_delims":completer_delims,
"debug_output":debug_output,
"history_filename":sethistoryfilename,
"history_length":sethistorylength,
"set_prompt_color":set_prompt_color,
"set_input_color":set_input_color,
"allow_ctrl_c":allow_ctrl_c,
"ctrl_c_tap_time_interval":ctrl_c_tap_time_interval,
"kill_ring_to_clipboard":setkill_ring_to_clipboard,
}
if os.path.isfile(inputrcpath):
try:
execfile(inputrcpath,loc,loc)
except Exception,x:
raise
import traceback
print >>sys.stderr, "Error reading .pyinputrc"
filepath,lineno=traceback.extract_tb(sys.exc_traceback)[1][:2]
print >>sys.stderr, "Line: %s in file %s"%(lineno,filepath)
print >>sys.stderr, x
raise ReadlineError("Error reading .pyinputrc")
def CTRL(c):
'''make a control character'''
assert '@' <= c <= '_'
return chr(ord(c) - ord('@'))
# create a Readline object to contain the state
rl = Readline()
def GetOutputFile():
'''Return the console object used by readline so that it can be used for printing in color.'''
return rl.console
# make these available so this looks like the python readline module
parse_and_bind = rl.parse_and_bind
get_line_buffer = rl.get_line_buffer
insert_text = rl.insert_text
read_init_file = rl.read_init_file
add_history = rl.add_history
get_history_length = rl.get_history_length
set_history_length = rl.set_history_length
clear_history = rl.clear_history
read_history_file = rl.read_history_file
write_history_file = rl.write_history_file
set_completer = rl.set_completer
get_completer = rl.get_completer
get_begidx = rl.get_begidx
get_endidx = rl.get_endidx
set_completer_delims = rl.set_completer_delims
get_completer_delims = rl.get_completer_delims
set_startup_hook = rl.set_startup_hook
set_pre_input_hook = rl.set_pre_input_hook
if __name__ == '__main__':
res = [ rl.readline('In[%d] ' % i) for i in range(3) ]
print res
else:
console.install_readline(rl.readline)
pass
| StarcoderdataPython |
8187318 | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Functions and classes for array-like objects, implementing common numpy
array features for datasets or nested sequences, while trying to avoid copying
data.
Classes:
- :class:`DatasetView`: Similar to a numpy view, to access
a h5py dataset as if it was transposed, without casting it into a
numpy array (this lets h5py handle reading the data from the
file into memory, as needed).
- :class:`ListOfImages`: Similar to a numpy view, to access
a list of 2D numpy arrays as if it was a 3D array (possibly transposed),
without casting it into a numpy array.
Functions:
- :func:`is_array`
- :func:`is_list_of_arrays`
- :func:`is_nested_sequence`
- :func:`get_shape`
- :func:`get_dtype`
- :func:`get_concatenated_dtype`
"""
from __future__ import absolute_import, print_function, division
import sys
import numpy
import six
import numbers
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "26/04/2017"
def is_array(obj):
"""Return True if object implements necessary attributes to be
considered similar to a numpy array.
Attributes needed are "shape", "dtype", "__getitem__"
and "__array__".
:param obj: Array-like object (numpy array, h5py dataset...)
:return: boolean
"""
# add more required attribute if necessary
for attr in ("shape", "dtype", "__array__", "__getitem__"):
if not hasattr(obj, attr):
return False
return True
def is_list_of_arrays(obj):
"""Return True if object is a sequence of numpy arrays,
e.g. a list of images as 2D arrays.
:param obj: list of arrays
:return: boolean"""
# object must not be a numpy array
if is_array(obj):
return False
# object must have a __len__ method
if not hasattr(obj, "__len__"):
return False
# all elements in sequence must be arrays
for arr in obj:
if not is_array(arr):
return False
return True
def is_nested_sequence(obj):
"""Return True if object is a nested sequence.
A simple 1D sequence is considered to be a nested sequence.
Numpy arrays and h5py datasets are not considered to be nested sequences.
To test if an object is a nested sequence in a more general sense,
including arrays and datasets, use::
is_nested_sequence(obj) or is_array(obj)
:param obj: nested sequence (numpy array, h5py dataset...)
:return: boolean"""
# object must not be a numpy array
if is_array(obj):
return False
if not hasattr(obj, "__len__"):
return False
# obj must not be a list of (lists of) numpy arrays
subsequence = obj
while hasattr(subsequence, "__len__"):
if is_array(subsequence):
return False
# strings cause infinite loops
if isinstance(subsequence, six.string_types + (six.binary_type, )):
return True
subsequence = subsequence[0]
# object has __len__ and is not an array
return True
def get_shape(array_like):
"""Return shape of an array like object.
In case the object is a nested sequence but not an array or dataset
(list of lists, tuples...), the size of each dimension is assumed to be
uniform, and is deduced from the length of the first sequence.
:param array_like: Array like object: numpy array, hdf5 dataset,
multi-dimensional sequence
:return: Shape of array, as a tuple of integers
"""
if hasattr(array_like, "shape"):
return array_like.shape
shape = []
subsequence = array_like
while hasattr(subsequence, "__len__"):
shape.append(len(subsequence))
# strings cause infinite loops
if isinstance(subsequence, six.string_types + (six.binary_type, )):
break
subsequence = subsequence[0]
return tuple(shape)
def get_dtype(array_like):
"""Return dtype of an array like object.
In the case of a nested sequence, the type of the first value
is inspected.
:param array_like: Array like object: numpy array, hdf5 dataset,
multi-dimensional nested sequence
:return: numpy dtype of object
"""
if hasattr(array_like, "dtype"):
return array_like.dtype
subsequence = array_like
while hasattr(subsequence, "__len__"):
# strings cause infinite loops
if isinstance(subsequence, six.string_types + (six.binary_type, )):
break
subsequence = subsequence[0]
return numpy.dtype(type(subsequence))
def get_concatenated_dtype(arrays):
"""Return dtype of array resulting of concatenation
of a list of arrays (without actually concatenating
them).
:param arrays: list of numpy arrays
:return: resulting dtype after concatenating arrays
"""
dtypes = {a.dtype for a in arrays}
dummy = []
for dt in dtypes:
dummy.append(numpy.zeros((1, 1), dtype=dt))
return numpy.array(dummy).dtype
class ListOfImages(object):
"""This class provides a way to access values and slices in a stack of
images stored as a list of 2D numpy arrays, without creating a 3D numpy
array first.
A transposition can be specified, as a 3-tuple of dimensions in the wanted
order. For example, to transpose from ``xyz`` ``(0, 1, 2)`` into ``yzx``,
the transposition tuple is ``(1, 2, 0)``
All the 2D arrays in the list must have the same shape.
The global dtype of the stack of images is the one that would be obtained
by casting the list of 2D arrays into a 3D numpy array.
:param images: list of 2D numpy arrays, or :class:`ListOfImages` object
:param transposition: Tuple of dimension numbers in the wanted order
"""
def __init__(self, images, transposition=None):
"""
"""
super(ListOfImages, self).__init__()
# if images is a ListOfImages instance, get the underlying data
# as a list of 2D arrays
if isinstance(images, ListOfImages):
images = images.images
# test stack of images is as expected
assert is_list_of_arrays(images), \
"Image stack must be a list of arrays"
image0_shape = images[0].shape
for image in images:
assert image.ndim == 2, \
"Images must be 2D numpy arrays"
assert image.shape == image0_shape, \
"All images must have the same shape"
self.images = images
"""List of images"""
self.shape = (len(images), ) + image0_shape
"""Tuple of array dimensions"""
self.dtype = get_concatenated_dtype(images)
"""Data-type of the global array"""
self.ndim = 3
"""Number of array dimensions"""
self.size = len(images) * image0_shape[0] * image0_shape[1]
"""Number of elements in the array."""
self.transposition = list(range(self.ndim))
"""List of dimension indices, in an order depending on the
specified transposition. By default this is simply
[0, ..., self.ndim], but it can be changed by specifying a different
``transposition`` parameter at initialization.
Use :meth:`transpose`, to create a new :class:`ListOfImages`
with a different :attr:`transposition`.
"""
if transposition is not None:
assert len(transposition) == self.ndim
assert set(transposition) == set(list(range(self.ndim))), \
"Transposition must be a sequence containing all dimensions"
self.transposition = transposition
self.__sort_shape()
def __sort_shape(self):
"""Sort shape in the order defined in :attr:`transposition`
"""
new_shape = tuple(self.shape[dim] for dim in self.transposition)
self.shape = new_shape
def __sort_indices(self, indices):
"""Return array indices sorted in the order needed
to access data in the original non-transposed images.
:param indices: Tuple of ndim indices, in the order needed
to access the transposed view
:return: Sorted tuple of indices, to access original data
"""
assert len(indices) == self.ndim
sorted_indices = tuple(idx for (_, idx) in
sorted(zip(self.transposition, indices)))
return sorted_indices
def __array__(self, dtype=None):
"""Cast the images into a numpy array, and return it.
If a transposition has been done on this images, return
a transposed view of a numpy array."""
return numpy.transpose(numpy.array(self.images, dtype=dtype),
self.transposition)
def __len__(self):
return self.shape[0]
def transpose(self, transposition=None):
"""Return a re-ordered (dimensions permutated)
:class:`ListOfImages`.
The returned object refers to
the same images but with a different :attr:`transposition`.
:param List[int] transposition: List/tuple of dimension numbers in the
wanted order.
If ``None`` (default), reverse the dimensions.
:return: new :class:`ListOfImages` object
"""
# by default, reverse the dimensions
if transposition is None:
transposition = list(reversed(self.transposition))
# If this ListOfImages is already transposed, sort new transposition
# relative to old transposition
elif list(self.transposition) != list(range(self.ndim)):
transposition = [self.transposition[i] for i in transposition]
return ListOfImages(self.images,
transposition)
@property
def T(self):
"""
Same as self.transpose()
:return: DatasetView with dimensions reversed."""
return self.transpose()
def __getitem__(self, item):
"""Handle a subset of numpy indexing with regards to the dimension
order as specified in :attr:`transposition`
Following features are **not supported**:
- fancy indexing using numpy arrays
- using ellipsis objects
:param item: Index
:return: value or slice as a numpy array
"""
# 1-D slicing -> n-D slicing (n=1)
if not hasattr(item, "__len__"):
# first dimension index is given
item = [item]
# following dimensions are indexed with : (all elements)
item += [slice(None) for _i in range(self.ndim - 1)]
# n-dimensional slicing
if len(item) != self.ndim:
raise IndexError(
"N-dim slicing requires a tuple of N indices/slices. " +
"Needed dimensions: %d" % self.ndim)
# get list of indices sorted in the original images order
sorted_indices = self.__sort_indices(item)
list_idx, array_idx = sorted_indices[0], sorted_indices[1:]
images_selection = self.images[list_idx]
# now we must transpose the output data
output_dimensions = []
frozen_dimensions = []
for i, idx in enumerate(item):
# slices and sequences
if not isinstance(idx, numbers.Integral):
output_dimensions.append(self.transposition[i])
# regular integer index
else:
# whenever a dimension is fixed (indexed by an integer)
# the number of output dimension is reduced
frozen_dimensions.append(self.transposition[i])
# decrement output dimensions that are above frozen dimensions
for frozen_dim in reversed(sorted(frozen_dimensions)):
for i, out_dim in enumerate(output_dimensions):
if out_dim > frozen_dim:
output_dimensions[i] -= 1
assert (len(output_dimensions) + len(frozen_dimensions)) == self.ndim
assert set(output_dimensions) == set(range(len(output_dimensions)))
# single list elements selected
if isinstance(images_selection, numpy.ndarray):
return numpy.transpose(images_selection[array_idx],
axes=output_dimensions)
# muliple list elements selected
else:
# apply selection first
output_stack = []
for img in images_selection:
output_stack.append(img[array_idx])
# then cast into a numpy array, and transpose
return numpy.transpose(numpy.array(output_stack),
axes=output_dimensions)
def min(self):
"""
:return: Global minimum value
"""
min_value = self.images[0].min()
if len(self.images) > 1:
for img in self.images[1:]:
min_value = min(min_value, img.min())
return min_value
def max(self):
"""
:return: Global maximum value
"""
max_value = self.images[0].max()
if len(self.images) > 1:
for img in self.images[1:]:
max_value = max(max_value, img.max())
return max_value
class DatasetView(object):
"""This class provides a way to transpose a dataset without
casting it into a numpy array. This way, the dataset in a file need not
necessarily be integrally read into memory to view it in a different
transposition.
.. note::
The performances depend a lot on the way the dataset was written
to file. Depending on the chunking strategy, reading a complete 2D slice
in an unfavorable direction may still require the entire dataset to
be read from disk.
:param dataset: h5py dataset
:param transposition: List of dimensions sorted in the order of
transposition (relative to the original h5py dataset)
"""
def __init__(self, dataset, transposition=None):
"""
"""
super(DatasetView, self).__init__()
self.dataset = dataset
"""original dataset"""
self.shape = dataset.shape
"""Tuple of array dimensions"""
self.dtype = dataset.dtype
"""Data-type of the array’s element"""
self.ndim = len(dataset.shape)
"""Number of array dimensions"""
size = 0
if self.ndim:
size = 1
for dimsize in self.shape:
size *= dimsize
self.size = size
"""Number of elements in the array."""
self.transposition = list(range(self.ndim))
"""List of dimension indices, in an order depending on the
specified transposition. By default this is simply
[0, ..., self.ndim], but it can be changed by specifying a different
`transposition` parameter at initialization.
Use :meth:`transpose`, to create a new :class:`DatasetView`
with a different :attr:`transposition`.
"""
if transposition is not None:
assert len(transposition) == self.ndim
assert set(transposition) == set(list(range(self.ndim))), \
"Transposition must be a list containing all dimensions"
self.transposition = transposition
self.__sort_shape()
def __sort_shape(self):
"""Sort shape in the order defined in :attr:`transposition`
"""
new_shape = tuple(self.shape[dim] for dim in self.transposition)
self.shape = new_shape
def __sort_indices(self, indices):
"""Return array indices sorted in the order needed
to access data in the original non-transposed dataset.
:param indices: Tuple of ndim indices, in the order needed
to access the view
:return: Sorted tuple of indices, to access original data
"""
assert len(indices) == self.ndim
sorted_indices = tuple(idx for (_, idx) in
sorted(zip(self.transposition, indices)))
return sorted_indices
def __getitem__(self, item):
"""Handle fancy indexing with regards to the dimension order as
specified in :attr:`transposition`
The supported fancy-indexing syntax is explained at
http://docs.h5py.org/en/latest/high/dataset.html#fancy-indexing.
Additional restrictions exist if the data has been transposed:
- numpy boolean array indexing is not supported
- ellipsis objects are not supported
:param item: Index, possibly fancy index (must be supported by h5py)
:return: Sliced numpy array or numpy scalar
"""
# no transposition, let the original dataset handle indexing
if self.transposition == list(range(self.ndim)):
return self.dataset[item]
# 1-D slicing: create a list of indices to switch to n-D slicing
if not hasattr(item, "__len__"):
# first dimension index (list index) is given
item = [item]
# following dimensions are indexed with slices representing all elements
item += [slice(None) for _i in range(self.ndim - 1)]
# n-dimensional slicing
if len(item) != self.ndim:
raise IndexError(
"N-dim slicing requires a tuple of N indices/slices. " +
"Needed dimensions: %d" % self.ndim)
# get list of indices sorted in the original dataset order
sorted_indices = self.__sort_indices(item)
output_data_not_transposed = self.dataset[sorted_indices]
# now we must transpose the output data
output_dimensions = []
frozen_dimensions = []
for i, idx in enumerate(item):
# slices and sequences
if not isinstance(idx, int):
output_dimensions.append(self.transposition[i])
# regular integer index
else:
# whenever a dimension is fixed (indexed by an integer)
# the number of output dimension is reduced
frozen_dimensions.append(self.transposition[i])
# decrement output dimensions that are above frozen dimensions
for frozen_dim in reversed(sorted(frozen_dimensions)):
for i, out_dim in enumerate(output_dimensions):
if out_dim > frozen_dim:
output_dimensions[i] -= 1
assert (len(output_dimensions) + len(frozen_dimensions)) == self.ndim
assert set(output_dimensions) == set(range(len(output_dimensions)))
return numpy.transpose(output_data_not_transposed,
axes=output_dimensions)
def __array__(self, dtype=None):
"""Cast the dataset into a numpy array, and return it.
If a transposition has been done on this dataset, return
a transposed view of a numpy array."""
return numpy.transpose(numpy.array(self.dataset, dtype=dtype),
self.transposition)
def __len__(self):
return self.shape[0]
def transpose(self, transposition=None):
"""Return a re-ordered (dimensions permutated)
:class:`DatasetView`.
The returned object refers to
the same dataset but with a different :attr:`transposition`.
:param List[int] transposition: List of dimension numbers in the wanted order.
If ``None`` (default), reverse the dimensions.
:return: Transposed DatasetView
"""
# by default, reverse the dimensions
if transposition is None:
transposition = list(reversed(self.transposition))
# If this DatasetView is already transposed, sort new transposition
# relative to old transposition
elif list(self.transposition) != list(range(self.ndim)):
transposition = [self.transposition[i] for i in transposition]
return DatasetView(self.dataset,
transposition)
@property
def T(self):
"""
Same as self.transpose()
:return: DatasetView with dimensions reversed."""
return self.transpose()
| StarcoderdataPython |
8027601 | <reponame>juju-solutions/interface-spark
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
class SparkRequires(RelationBase):
scope = scopes.GLOBAL
def is_spark_started(self):
return self.get_remote('spark_started', 'false').lower() == 'true'
@hook('{requires:spark}-relation-joined')
def joined(self):
conv = self.conversation()
conv.set_state('{relation_name}.joined')
@hook('{requires:spark}-relation-changed')
def changed(self):
conv = self.conversation()
conv.toggle_state('{relation_name}.ready',
active=self.is_spark_started())
conv.toggle_state('{relation_name}.master',
active=all([
self.get_master_url(),
self.get_master_ip(),
]))
@hook('{requires:spark}-relation-departed')
def departed(self):
conv = self.conversation()
if len(conv.units) <= 1:
conv.remove_state('{relation_name}.joined')
conv.remove_state('{relation_name}.ready')
def get_private_ip(self):
conv = self.conversation()
return conv.get_remote('private-address')
def get_rest_port(self):
conv = self.conversation()
return conv.get_remote('rest_port')
def get_master_info(self):
conv = self.conversation()
data = {
'connection_string': conv.get_remote('connection_string'),
'master': conv.get_remote('master'),
}
return data
def get_master_url(self):
conv = self.conversation()
return conv.get_remote('connection_string')
def get_master_ip(self):
conv = self.conversation()
return conv.get_remote('master')
| StarcoderdataPython |
1754731 | <filename>python/sdk/data/CANFrame.py
import base64
import json
import time
import sdk.util as util
from sdk.data.Mappable import Mappable
class CANFrame(Mappable):
"""CAN Frame data class"""
__slots__ = ('timestamp', 'frame_id', 'data', 'response')
def __init__(self, frame_id, data):
"""
:param int frame_id:
:param bytearray data:
"""
self.timestamp = int(round(time.time() * 1000))
self.frame_id = frame_id
self.data = data
self.response = False
def __eq__(self, other):
if isinstance(other, CANFrame):
return self.frame_id == other.frame_id and self.data == other.data
else:
return False
def __repr__(self):
return 'CANFrame(time=%s, frame_id=%s, data=%s, response=%s)' % \
(self.timestamp, hex(self.frame_id), CANFrame.marshalling_data(self.data), self.response)
def set_timestamp(self, t):
self.timestamp = t
def to_map(self):
return {
'timestamp': self.timestamp,
'id': self.frame_id,
'data': CANFrame.marshalling_data(self.data)
}
def to_json(self):
return json.dumps({
'timestamp': self.timestamp,
'id': hex(self.frame_id),
'data': [hex(b) for b in bytearray(self.data)]
})
@staticmethod
def from_json(json_str):
if util.P3 and 'decode' in json_str:
json_str = json_str.decode('utf-8')
while True:
i_0x = json_str.find('"0x')
if i_0x < 0:
i_0x = json_str.find('0x')
if i_0x < 0:
break
if i_0x >= 0:
i_comma = json_str[i_0x:].find(',')
if i_comma < 0:
i_comma = json_str[i_0x:].find(']')
i_comma += i_0x
val_str = json_str[i_0x:i_comma]
if val_str[0] == '"':
val_str = val_str[1:-1]
value = int(val_str, 16)
json_str = json_str[:i_0x] + str(value) + json_str[i_comma:]
loaded = json.loads(json_str)
can_frame = CANFrame(int(loaded['id']), bytearray(int(i) for i in loaded['data']))
can_frame.set_timestamp(loaded['timestamp'])
return can_frame
@staticmethod
def marshalling_data(data):
result = base64.b64encode(data)
if util.P3:
result = result.decode('UTF-8')
return result
@staticmethod
def unmarshalling_data(str):
return base64.b64decode(str)
@staticmethod
def from_obj(obj):
can_frame = CANFrame(obj['id'], CANFrame.unmarshalling_data(obj['data']))
can_frame.set_timestamp(obj['timestamp'])
can_frame.response = obj['response']
return can_frame
| StarcoderdataPython |
8136481 | import torch
class VGGPerceptron(torch.nn.Module):
"""
A class representing the standard VGG perceptron for feature extraction
Attributes
----------
model : torch.nn.Module
the inner architecture
Methods
-------
__create_model(cfg,in_channles,data_size,batch_norm)
creates the inner architecture of this layer
__create_layers(cfg,in_channels,batch_norm,dim)
creates the inner convolutions layers
forward(input)
evaluates the inner architecture with the given input
"""
def __init__(self,
*data_size,
cfg=[64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
in_channels=3,
batch_norm=False,
):
"""
Parameters
----------
*data_size : tuple
the data size in each dimension
cfg : list (optional)
a list containing the inner architecture configuration
in_channels : int (optional)
the number of input channels (default is 3)
batch_norm : bool
if True adds batch normalization layers (default is False)
"""
super(VGGPerceptron, self).__init__()
self.__create_model(cfg, in_channels, *data_size, batch_norm)
def __create_model(self, cfg, in_channels, data_size, batch_norm):
"""
Creates the inner architecture of this layer
Parameters
----------
cfg : list
the inner architecture configuration
in_channels : int
the number of input channels
data_size : tuple
the data size in each dimension
batch_norm : bool
if True adds batch normalization layers
Returns
-------
torch.nn.Module
the inner architecture
"""
layers, pool = self.__create_layers(cfg, in_channels=in_channels, batch_norm=batch_norm, dim=len(data_size))
self.model = torch.nn.Sequential(
*layers,
eval('torch.nn.AdaptiveAvgPool'+str(len(data_size))+'d('+(str(tuple((eval(str(x)+'//'+str(2**pool))) for x in data_size)))+')')
)
def __create_layers(self, cfg, in_channels=3, batch_norm=False, dim=2):
"""
Creates the inner convolutions layers
Parameters
----------
cfg : list
the inner architecture configuration
in_channels : int (optional)
the number of input channels (default is 3)
batch_norm : bool (optional)
if True adds batch normalization layers (default is False)
dim : int (optional)
the input data number of dimensions (default is 2)
Returns
-------
(list,int)
the inner architecture layers in a list and the number of maxpooling
"""
layers = []
pool = 0
for v in cfg:
if v == 'M':
layers += [eval('torch.nn.MaxPool'+str(dim)+'d(kernel_size=2, stride=2)')]
pool += 1
else:
layers += [eval('torch.nn.Conv'+str(dim)+'d(in_channels, v, kernel_size=3, padding=1)')]
if batch_norm:
layers += [eval('torch.nn.BatchNorm'+str(dim)+'d(v)')]
layers += [torch.nn.ReLU(inplace=True)]
in_channels = v
return layers, pool
def forward(self, input):
"""
Evaluates the inner architecture with the given input
Parameters
----------
input : Tensor
the input tensor
Returns
-------
Tensor
the extracted feature tensor
"""
return self.model(input)
| StarcoderdataPython |
6474097 | <reponame>Kalpavrikshika/python_modules
int_var = 1779
next(int_var)
my_string = "Yasoob"
my_iter = iter(my_string)
print(next(my_iter)) | StarcoderdataPython |
120817 | <gh_stars>0
import cv2
import pyzed.sl as sl
import numpy as np
import time
def print_camera_information(cam):
print("Resolution: {0}, {1}.".format(round(cam.get_resolution().width, 2), cam.get_resolution().height))
print("Camera FPS: {0}.".format(cam.get_camera_fps()))
print("Firmware: {0}.".format(cam.get_camera_information().firmware_version))
print("Serial number: {0}.\n".format(cam.get_camera_information().serial_number))
def init_cam():
camera_settings = sl.CAMERA_SETTINGS.CAMERA_SETTINGS_BRIGHTNESS
str_camera_settings = "BRIGHTNESS"
step_camera_settings = 1
init_params = sl.InitParameters()
init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE
init_params.coordinate_units = sl.UNIT.UNIT_CENTIMETER
cam = sl.Camera()
if not cam.is_opened():
print("Opening ZED Camera...")
status = cam.open(init_params)
if status != sl.ERROR_CODE.SUCCESS:
print(repr(status))
exit()
return cam
def init_runtime():
runtime = sl.RuntimeParameters()
runtime.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD
return runtime
def frame_center(width, height):
return (width//2, height//2)
def norm(x):
sum_ = sum([k*k for k in x])
return np.sqrt(sum_)
def avg_distance(pixel_rect, threshold):
len_ = len(pixel_rect)
sum_ = 0
for i, pixels in enumerate(pixel_rect):
temp = norm(pixels)
if temp < threshold:
sum_ += temp
print('dis:', temp)
else:
len_ -= 1
try:
return sum_/len_
except ZeroDivisionError:
return 0
def create_circle(disp_img, center, distance, color=(0, 0, 255)):
FONT = cv2.FONT_HERSHEY_SIMPLEX
FONT_SIZE = 1
cv2.circle(disp_img, center, 5, color, -1)
if not np.isnan(distance) and not np.isinf(distance):
print('distance:', distance, 'cm')
cv2.circle(disp_img, center, np.int(distance-50), color, 2)
cv2.putText(
disp_img,
'{} cm'.format(distance),
(center[0], center[1]-30),
FONT,
FONT_SIZE,
(0, 255, 0),
2,
cv2.LINE_AA
)
else:
print("Can't estimate distance at this position, move the camera\n")
cv2.circle(disp_img, center, 5, color, -1)
cv2.putText(
disp_img,
'< 70 cm',
(center[0], center[1]-10),
FONT,
FONT_SIZE,
(0, 0, 255),
2,
cv2.LINE_AA
)
def create_rect(disp_img, cx, cy, pts_cloud, diag=40, color=(0, 0, 255)):
tl, tr = (cx-diag, cy-diag), (cx+diag, cy-diag)
bl, br = (cx-diag, cy+diag), (cx+diag, cy+diag)
cv2.rectangle(disp_img, tl, br, (0, 0, 255), 2)
pixel_rect = []
step = 10
for col in range(tl[0], br[0]+1, step):
for row in range(tl[1], br[1]+1, step):
err, pixel = pts_cloud.get_value(col, row)
pixel_rect.append(pixel[:3])
avg_cm = avg_distance(pixel_rect, threshold=120)
cv2.putText(
disp_img,
'{:.2f} cm'.format(avg_cm),
tr,
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 255, 0),
2,
cv2.LINE_AA
)
return avg_cm
def main():
print("Running...")
cam = init_cam()
runtime = init_runtime()
img = sl.Mat()
depth = sl.Mat()
point_cloud = sl.Mat()
dx_list = [x for x in range(0, 300, 300)]
colors = []
for i in range(len(dx_list)):
colors.append(np.random.randint(0, 256, 3).tolist())
key = ''
while key != 113: # press 'q' to exit
err = cam.grab(runtime)
if err == sl.ERROR_CODE.SUCCESS:
# Recieve image and measurement
cam.retrieve_image(img, sl.VIEW.VIEW_LEFT)
cam.retrieve_measure(depth, sl.MEASURE.MEASURE_DEPTH)
cam.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA)
disp_img = img.get_data()
(cx, cy) = frame_center(img.get_width(), img.get_height())
# Drawing and Calculate area in rectangle
avg_area1 = create_rect(disp_img, cx, cy, point_cloud, diag=40, color=(0, 0, 255))
# avg_area2 = create_rect(disp_img, 300, 300, point_cloud, diag=40, color=(255, 0, 0))
# Drawing Circle
# for dx, color in zip(dx_list, colors):
# error, pc = point_cloud.get_value(cx+dx, cy)
# distance = np.round(norm(pc[:3]), decimals=2)
# create_circle(disp_img, (cx+dx, cy), distance, color)
cv2.imshow("Chicken Farm", disp_img)
key = cv2.waitKey(5)
# Delay
time.sleep(0.01)
# Writing to disk
with open('avg_log.txt', 'a') as file:
file.writelines('{:.2f},\n'.format(avg_area1))
else:
key = cv2.waitKey(5)
cam.close()
print('Closed camera')
if __name__ == '__main__':
main() | StarcoderdataPython |
12708 | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
import speechpro.cloud.speech.synthesis.rest.cloud_client.api.session_api
import speechpro.cloud.speech.synthesis.rest.cloud_client.api.synthesize_api
| StarcoderdataPython |
6504782 | from Xdmf import *
if __name__ == "__main__":
#//initialization begin
baseArray = XdmfArray.New()
for i in range(0, 10):
baseArray.pushBack(i)
initStart = UInt32Vector()
initStart.push_back(0)
initStride = UInt32Vector()
initStride.push_back(1)
initDimension = UInt32Vector()
initDimension.push_back(10)
exampleSubset = XdmfSubset.New(baseArray,
initStart,
initStride,
initDimension)
#//initialization end
#//getStart begin
exampleStart = exampleSubset.getStart()
#//getStart end
#//setStart begin
exampleSubset.setStart(exampleStart)
#//setStart end
#//getStride begin
exampleStride = exampleSubset.getStride()
#getStride end
#//setStride begin
exampleSubset.setStride(exampleStride)
#//setStride end
#//getDimensions begin
exampleDimensions = exampleSubset.getDimensions()
#//getDimensions end
#//setDimensions begin
exampleSubset.setDimensions(exampleDimensions)
#//setDimensions end
#//getReferenceArray begin
exampleInternalArray = exampleSubset.getReferenceArray()
#//getReferenceArray end
#//setReferenceArray begin
exampleSubset.setReferenceArray(exampleInternalArray)
#//setReferenceArray end
#//getSize begin
exampleSize = exampleSubset.getSize()
#//getSize end
#//setConstructedType begin
typeAttribute = XdmfAttribute.New()
exampleSubset.setConstructedType(typeAttribute.getItemTag())
#//setConstructedType end
#//getConstructedType begin
exampleType = exampleSubset.getConstructedType()
#//getConstructedType end
#//setConstructedProperties begin
propertyAttribute = XdmfAttribute.New()
exampleSubset.setConstructedProperties(propertyAttribute.getItemProperties())
#//setConstructedProperties end
#//getConstructedProperties begin
exampleProperties = exampleSubset.getConstructedProperties()
#//getConstructedProperties end
#//read begin
subsetResult = exampleSubset.read()
#//read end
| StarcoderdataPython |
1970064 | import numpy as np
import gym
import time
import gridworld
env = gym.make("CliffWalking-v0") # 0 up, 1 right, 2 down, 3 left
| StarcoderdataPython |
4941634 | # Optimal Account Balancing
# given a list of transactions between a group of people, with each transaction
# as a tuple (x, y, z), meaining person x send person y amount z of money
# assume x != y and z > 0, id x and y might not be linear
# return the minimum number of transactions required to settle the debt
# Optimal Account Balancing
# given a list of transactions between a group of people, with each transaction
# as a tuple (x, y, z), meaining person x send person y amount z of money
# assume x != y and z > 0, id x and y might not be linear
# return the minimum number of transactions required to settle the debt
class Solution(object):
def minTransfers(self, transactions):
"""
:type transactions: List[List[int]]
:rtype: int
"""
acc = {}
for p1, p2, amount in transactions:
acc[p1] = acc.get(p1, 0) - amount
acc[p2] = acc.get(p2, 0) + amount
# cancel out balance pairs with equal amount but different sign
# then filter out zero balances
bal = acc.values()
trans = 0
for i in range(len(bal)):
for j in range(i):
if bal[i] * bal[j] != 0 and bal[i] + bal[j] == 0:
bal[i] = bal[j] = 0
trans += 1
break
bal = [b for b in bal if b != 0]
return self.dfs(bal, 0, trans)
# min number of transactions to settle starting from bal[i]
# trans: transactions made so far
def dfs(self, bal, i, trans):
n = len(bal)
# find the next balance that needs to be settled
while i < n and bal[i] == 0:
i += 1
# end condition
if i >= len(bal):
return trans
res = float('inf')
for j in range(i + 1, n):
if bal[i] * bal[j] < 0: # different sign
# a transaction that sets balance at i to 0 (settled)
# and balance at j to bal[j] + bal[i]
# values before bal[i + 1] are virtually 0 and then added back
bal[j] += bal[i]
res = min(res, self.dfs(bal, i + 1, trans + 1))
# rollback
bal[j] -= bal[i]
return res
| StarcoderdataPython |
3484117 | <filename>Separate.py
#coding:utf-8
import cv2
import os
import sys
import numpy as np
#将其他文件夹的图片搬移到另外一个文件夹并重命名
ListDir='/home/kjin/caffe-master/examples/DeepID/CASIA-FaceV5-Train/'
List=os.listdir(ListDir)
num=2126
for list in List:
img=cv2.imread(ListDir+list)
cv2.imwrite('/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/MyImage/Test'+str(num)+'.jpg',img)
num+=1
| StarcoderdataPython |
116754 | import torch
import os
import datetime
import pickle
import dill
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch import optim
from random import choices
from scipy.stats import entropy, boxcox
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from Networks import Encoder
from Networks import DecoderGRUCover, DecoderSumCover
from Evaluation import EvaluationUtil
from Parameters import Params
params = Params()
class ModelTraining:
def __init__(self, device, patient_records_file, voc_file, ehr_matrix_file):
self.device = device
self.patient_records_file = patient_records_file
self.voc_file = voc_file
self.ehr_matrix_file = ehr_matrix_file
voc = dill.load(open(self.voc_file, 'rb'))
self.diag_voc = voc['diag_voc']
self.pro_voc = voc['pro_voc']
self.med_voc = voc['med_voc']
self.diagnose_count = len(self.diag_voc.word2idx)
self.procedure_count = len(self.pro_voc.word2idx)
self.medication_count = len(self.med_voc.word2idx)
self.ehr_matrix = dill.load(open(self.ehr_matrix_file, 'rb'))
self.evaluate_utils = EvaluationUtil()
def loss_function(self, target_medications, predict_medications, proportion_bce, proportion_multi,
coverage_loss=0.0, proportion_coverage=0.0):
loss_bce_target = np.zeros((1, self.medication_count))
loss_bce_target[:, target_medications] = 1
loss_multi_target = np.full((1, self.medication_count), -1)
for idx, item in enumerate(target_medications):
loss_multi_target[0][idx] = item
loss_bce = F.binary_cross_entropy_with_logits(predict_medications,
torch.FloatTensor(loss_bce_target).to(self.device))
loss_multi = F.multilabel_margin_loss(torch.sigmoid(predict_medications),
torch.LongTensor(loss_multi_target).to(self.device))
loss = proportion_bce * loss_bce + proportion_multi * loss_multi
if proportion_coverage != 0:
loss = loss + proportion_coverage * coverage_loss
return loss
def get_performance_on_testset(self, encoder, decoder, patient_records, coverage_type):
jaccard_avg, precision_avg, recall_avg, f1_avg, prauc_avg = [], [], [], [], []
count = 0
for patient in patient_records:
for idx, adm in enumerate(patient):
count += 1
current_records = patient[:idx + 1]
query, memory_keys, memory_values = encoder(current_records)
if coverage_type == 'gru_cover':
predict_output = decoder(query, memory_keys, memory_values)
else:
predict_output, _ = decoder(query, memory_keys, memory_values)
target_medications = adm[params.MEDICATION_INDEX]
target_multi_hot = np.zeros(self.medication_count)
target_multi_hot[target_medications] = 1
predict_prob = torch.sigmoid(predict_output).detach().cpu().numpy()[0]
predict_multi_hot = predict_prob.copy()
index_nan = np.argwhere(np.isnan(predict_multi_hot))
if index_nan.shape[0] != 0:
predict_multi_hot = np.zeros_like(predict_multi_hot)
predict_multi_hot[predict_multi_hot >= 0.5] = 1
predict_multi_hot[predict_multi_hot < 0.5] = 0
predict_medications = list(np.where(predict_multi_hot == 1)[0])
jaccard = self.evaluate_utils.metric_jaccard_similarity(predict_medications, target_medications)
precision = self.evaluate_utils.metric_precision(predict_medications, target_medications)
recall = self.evaluate_utils.metric_recall(predict_medications, target_medications)
f1 = self.evaluate_utils.metric_f1(precision, recall)
prauc = self.evaluate_utils.precision_auc(predict_prob, target_multi_hot)
jaccard_avg.append(jaccard)
precision_avg.append(precision)
recall_avg.append(recall)
f1_avg.append(f1)
prauc_avg.append(prauc)
jaccard_avg = np.mean(np.array(jaccard_avg))
precision_avg = np.mean(np.array(precision_avg))
recall_avg = np.mean(np.array(recall_avg))
f1_avg = np.mean(np.array(f1_avg))
prauc_avg = np.mean(np.array(prauc_avg))
return jaccard_avg, precision_avg, recall_avg, f1_avg, prauc_avg
def trainIters(self, encoder, decoder, encoder_optimizer, decoder_optimizer, coverage_type, patient_records_train,
patient_records_test, save_model_path, n_epoch, print_every_iteration=100, save_every_epoch=5,
trained_epoch=0, trained_iteration=0):
start_epoch = trained_epoch + 1
trained_n_iteration = trained_iteration
if not os.path.exists(save_model_path):
os.makedirs(save_model_path)
log_file = open(os.path.join(save_model_path, 'medrec_loss.log'), 'a+')
encoder_lr_scheduler = ReduceLROnPlateau(encoder_optimizer, mode='max', patience=5, factor=0.1)
decoder_lr_scheduler = ReduceLROnPlateau(decoder_optimizer, mode='max', patience=5, factor=0.1)
for epoch in range(start_epoch, start_epoch + n_epoch):
print_loss = []
iteration = 0
for patient in patient_records_train:
for idx, adm in enumerate(patient):
trained_n_iteration += 1
iteration += 1
current_records = patient[:idx + 1]
target_medications = adm[params.MEDICATION_INDEX]
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
query, memory_keys, memory_values = encoder(current_records)
if coverage_type == 'gru_cover':
predict_output = decoder(query, memory_keys, memory_values)
loss = self.loss_function(target_medications, predict_output, 0.8, 0.1)
print_loss.append(loss.item())
else: # sum_cover
predict_output, coverage_loss = decoder(query, memory_keys, memory_values)
loss = self.loss_function(target_medications, predict_output, 0.8, 0.1, coverage_loss, 0.1)
print_loss.append(loss.item())
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
if iteration % print_every_iteration == 0:
print_loss_avg = np.mean(np.array(print_loss))
print_loss = []
print(
'epoch: {}; time: {}; Iteration: {}; train loss: {:.4f}'.format(
epoch, datetime.datetime.now(), trained_n_iteration, print_loss_avg))
log_file.write(
'epoch: {}; time: {}; Iteration: {}; train loss: {:.4f}\n'.format(
epoch, datetime.datetime.now(), trained_n_iteration, print_loss_avg))
encoder.eval()
decoder.eval()
jaccard_avg, precision_avg, recall_avg, f1_avg, prauc_avg = self.get_performance_on_testset(encoder,
decoder,
patient_records_test,
coverage_type)
encoder.train()
decoder.train()
print(
'epoch: {}; time: {}; Iteration: {}; jaccard_test: {:.4f}; precision_test: {:.4f}; recall_test: {:.4f}; f1_test: {:.4f}; prauc_test: {:.4f}'.format(
epoch, datetime.datetime.now(), trained_n_iteration, jaccard_avg, precision_avg, recall_avg, f1_avg,
prauc_avg))
log_file.write(
'epoch: {}; time: {}; Iteration: {}; jaccard_test: {:.4f}; precision_test: {:.4f}; recall_test: {:.4f}; f1_test: {:.4f}; prauc_test: {:.4f}\n'.format(
epoch, datetime.datetime.now(), trained_n_iteration, jaccard_avg, precision_avg, recall_avg, f1_avg,
prauc_avg))
encoder_lr_scheduler.step(f1_avg)
decoder_lr_scheduler.step(f1_avg)
if epoch % save_every_epoch == 0:
torch.save(
{'medrec_epoch': epoch,
'medrec_iteration': trained_n_iteration,
'encoder': encoder.state_dict(),
'decoder': decoder.state_dict(),
'encoder_optimizer': encoder_optimizer.state_dict(),
'decoder_optimizer': decoder_optimizer.state_dict()},
os.path.join(save_model_path,
'medrec_{}_{}_{:.4f}.checkpoint'.format(epoch, trained_n_iteration, f1_avg)))
log_file.close()
def train(self, input_size, hidden_size, encoder_n_layers, encoder_embedding_dropout_rate,
encoder_gru_dropout_rate, encoder_learning_rate, decoder_type, decoder_dropout_rate, decoder_hop_count,
regular_hop_count, attn_type_kv, attn_type_embedding, least_adm_count, select_adm_count, coverage_dim,
decoder_learning_rate, save_model_dir='data/model', n_epoch=50, print_every_iteration=100,
save_every_epoch=1, load_model_name=None):
print('initializing >>>')
if load_model_name:
print('load model from checkpoint file: ', load_model_name)
checkpoint = torch.load(load_model_name)
encoder = Encoder(self.device, input_size, hidden_size, self.diagnose_count,
self.procedure_count, encoder_n_layers, encoder_embedding_dropout_rate,
encoder_gru_dropout_rate)
if decoder_type == 'gru_cover':
decoder = DecoderGRUCover(params.device, hidden_size, self.medication_count,
decoder_dropout_rate, least_adm_count, decoder_hop_count,
coverage_dim, attn_type_kv, attn_type_embedding,
regular_hop_count, self.ehr_matrix)
coverage_type = 'gru_cover'
elif decoder_type == 'sum_cover':
decoder = DecoderSumCover(params.device, hidden_size, self.medication_count,
decoder_dropout_rate, decoder_hop_count, attn_type_kv,
attn_type_embedding, least_adm_count, select_adm_count,
regular_hop_count, self.ehr_matrix)
coverage_type = 'sum_cover'
else:
print('wrong decoder type, choose from gru_cover and sum_cover')
return
if load_model_name:
encoder_sd = checkpoint['encoder']
decoder_sd = checkpoint['decoder']
encoder.load_state_dict(encoder_sd)
decoder.load_state_dict(decoder_sd)
encoder = encoder.to(self.device)
decoder = decoder.to(self.device)
encoder.train()
decoder.train()
print('build optimizer >>>')
encoder_optimizer = optim.Adam(encoder.parameters(), lr=encoder_learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=decoder_learning_rate)
if load_model_name:
encoder_optimizer_sd = checkpoint['encoder_optimizer']
decoder_optimizer_sd = checkpoint['decoder_optimizer']
encoder_optimizer.load_state_dict(encoder_optimizer_sd)
decoder_optimizer.load_state_dict(decoder_optimizer_sd)
print('start training >>>')
patient_records = pd.read_pickle(self.patient_records_file)
split_point = int(len(patient_records) * params.TRAIN_RATIO)
test_count = int(len(patient_records) * params.TEST_RATIO)
patient_records_train = patient_records[:split_point]
patient_records_test = patient_records[split_point:split_point + test_count]
medrec_trained_epoch = 0
medrec_trained_iteration = 0
if load_model_name:
medrec_trained_n_epoch_sd = checkpoint['medrec_epoch']
medrec_trained_n_iteration_sd = checkpoint['medrec_iteration']
medrec_trained_epoch = medrec_trained_n_epoch_sd
medrec_trained_iteration = medrec_trained_n_iteration_sd
save_model_structure = str(encoder_n_layers) + '_' + str(input_size) + '_' + str(hidden_size)
save_model_parameters = str(encoder_embedding_dropout_rate) + '_' + str(encoder_gru_dropout_rate) + '_' + str(
decoder_dropout_rate) + '_' + attn_type_kv + '_' + attn_type_embedding + '_' + str(
decoder_hop_count) + '_' + str(regular_hop_count)
save_model_path = os.path.join(save_model_dir, save_model_structure, save_model_parameters)
self.trainIters(encoder, decoder, encoder_optimizer, decoder_optimizer, coverage_type, patient_records_train,
patient_records_test, save_model_path, n_epoch, print_every_iteration, save_every_epoch,
medrec_trained_epoch, medrec_trained_iteration)
| StarcoderdataPython |
11245795 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
API functions that can be used by external software
"""
from collections import OrderedDict
from pathlib import Path
import os
import traceback
from rebulk.introspector import introspect
from .__version__ import __version__
from .options import parse_options, load_config, merge_options
from .rules import rebulk_builder
class GuessitException(Exception):
"""
Exception raised when guessit fails to perform a guess because of an internal error.
"""
def __init__(self, string, options):
super().__init__("An internal error has occured in guessit.\n"
"===================== Guessit Exception Report =====================\n"
"version=%s\n"
"string=%s\n"
"options=%s\n"
"--------------------------------------------------------------------\n"
"%s"
"--------------------------------------------------------------------\n"
"Please report at "
"https://github.com/guessit-io/guessit/issues.\n"
"====================================================================" %
(__version__, str(string), str(options), traceback.format_exc()))
self.string = string
self.options = options
def configure(options=None, rules_builder=rebulk_builder, force=False):
"""
Load configuration files and initialize rebulk rules if required.
:param options:
:type options: dict
:param rules_builder:
:type rules_builder:
:param force:
:type force: bool
:return:
"""
default_api.configure(options, rules_builder=rules_builder, force=force)
def guessit(string, options=None):
"""
Retrieves all matches from string as a dict
:param string: the filename or release name
:type string: str
:param options:
:type options: str|dict
:return:
:rtype:
"""
return default_api.guessit(string, options)
def properties(options=None):
"""
Retrieves all properties with possible values that can be guessed
:param options:
:type options: str|dict
:return:
:rtype:
"""
return default_api.properties(options)
def suggested_expected(titles, options=None):
"""
Return a list of suggested titles to be used as `expected_title` based on the list of titles
:param titles: the filename or release name
:type titles: list|set|dict
:param options:
:type options: str|dict
:return:
:rtype: list of str
"""
return default_api.suggested_expected(titles, options)
class GuessItApi(object):
"""
An api class that can be configured with custom Rebulk configuration.
"""
def __init__(self):
"""Default constructor."""
self.rebulk = None
self.config = None
self.load_config_options = None
self.advanced_config = None
@classmethod
def _fix_encoding(cls, value):
if isinstance(value, list):
return [cls._fix_encoding(item) for item in value]
if isinstance(value, dict):
return {cls._fix_encoding(k): cls._fix_encoding(v) for k, v in value.items()}
if isinstance(value, bytes):
return value.decode('ascii')
return value
@classmethod
def _has_same_properties(cls, dic1, dic2, values):
for value in values:
if dic1.get(value) != dic2.get(value):
return False
return True
def configure(self, options=None, rules_builder=rebulk_builder, force=False, sanitize_options=True):
"""
Load configuration files and initialize rebulk rules if required.
:param options:
:type options: str|dict
:param rules_builder:
:type rules_builder:
:param force:
:type force: bool
:return:
:rtype: dict
"""
if sanitize_options:
options = parse_options(options, True)
options = self._fix_encoding(options)
if self.config is None or self.load_config_options is None or force or \
not self._has_same_properties(self.load_config_options,
options,
['config', 'no_user_config', 'no_default_config']):
config = load_config(options)
config = self._fix_encoding(config)
self.load_config_options = options
else:
config = self.config
advanced_config = merge_options(config.get('advanced_config'), options.get('advanced_config'))
should_build_rebulk = force or not self.rebulk or not self.advanced_config or \
self.advanced_config != advanced_config
if should_build_rebulk:
self.advanced_config = advanced_config
self.rebulk = rules_builder(advanced_config)
self.config = config
return self.config
def guessit(self, string, options=None): # pylint: disable=too-many-branches
"""
Retrieves all matches from string as a dict
:param string: the filename or release name
:type string: str|Path
:param options:
:type options: str|dict
:return:
:rtype:
"""
if isinstance(string, Path):
try:
# Handle path-like object
string = os.fspath(string)
except AttributeError:
string = str(string)
try:
options = parse_options(options, True)
options = self._fix_encoding(options)
config = self.configure(options, sanitize_options=False)
options = merge_options(config, options)
result_decode = False
result_encode = False
if isinstance(string, bytes):
string = string.decode('ascii')
result_encode = True
matches = self.rebulk.matches(string, options)
if result_decode:
for match in matches:
if isinstance(match.value, bytes):
match.value = match.value.decode("utf-8")
if result_encode:
for match in matches:
if isinstance(match.value, str):
match.value = match.value.encode("ascii")
matches_dict = matches.to_dict(options.get('advanced', False), options.get('single_value', False),
options.get('enforce_list', False))
output_input_string = options.get('output_input_string', False)
if output_input_string:
matches_dict['input_string'] = matches.input_string
return matches_dict
except Exception as err:
raise GuessitException(string, options) from err
def properties(self, options=None):
"""
Grab properties and values that can be generated.
:param options:
:type options:
:return:
:rtype:
"""
options = parse_options(options, True)
options = self._fix_encoding(options)
config = self.configure(options, sanitize_options=False)
options = merge_options(config, options)
unordered = introspect(self.rebulk, options).properties
ordered = OrderedDict()
for k in sorted(unordered.keys(), key=str):
ordered[k] = list(sorted(unordered[k], key=str))
if hasattr(self.rebulk, 'customize_properties'):
ordered = self.rebulk.customize_properties(ordered)
return ordered
def suggested_expected(self, titles, options=None):
"""
Return a list of suggested titles to be used as `expected_title` based on the list of titles
:param titles: the filename or release name
:type titles: list|set|dict
:param options:
:type options: str|dict
:return:
:rtype: list of str
"""
suggested = []
for title in titles:
guess = self.guessit(title, options)
if len(guess) != 2 or 'title' not in guess:
suggested.append(title)
return suggested
default_api = GuessItApi()
| StarcoderdataPython |
4966724 | import torch.nn as nn
def initialize_decoder(module):
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, mode="fan_in", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def initialize_head(module):
for m in module.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.