content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Generated by Django 3.1.14 on 2022-01-31 15:17
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1415,
319,
33160,
12,
486,
12,
3132,
1315,
25,
1558,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.8 | 30 |
"""
New reaction ID code
"""
import autofile
import automol
from mechanalyzer.inf import rxn as rinfo
from mechanalyzer.inf import thy as tinfo
from phydat import phycon
from mechlib import filesys
def build_reaction(rxn_info, ini_thy_info, zma_locs, save_prefix,
id_missing=True, re_id=False):
""" For a given reaction, attempt to identify its reaction class obtain
the corresponding Z-Matrix reaction object.
Function will attempt to read the filesytem for the appropriate
reaction object in the appropriate latyer
save/RXN/THY/TS/CONFS/Z/
If file is not located it will build Z-Matrices for the reactants
and run the class identifier.
:param rxn_info: Mechanalyzer reaction info object
:type rxn_info: tuple(tuple(str/int))
:param ini_thy_info: Mechanalyzer theory info object (input lvl)
:type ini_thy_info: tuple(str)
:param zma_locs: locs for Z filesys to search for reaction object
:type zma_locs: tuple(int)
:param save_prefix: root path to the save filesys
:type save_prefix: str
"""
zrxns, zmas, rclasses = None, (), ()
status = 'MISSING'
# Try and read the reaction from filesys if requested
if not re_id:
zrxns, zmas = filesys.read.reactions(
rxn_info, ini_thy_info, zma_locs, save_prefix)
status = 'FOUND' if zrxns is not None else 'MISSING'
print(' Reading from fileysystem...')
else:
# unsafe without checking if zrxn id matches what is in save...
print(' Requested Reidentification regardless of what is in SAVE')
print('status test', status)
# Try and identify reaction if not rxn obj found
if status == 'MISSING':
if id_missing:
print(' Identifying class...')
zrxns, zmas = _id_reaction(rxn_info, ini_thy_info, save_prefix)
status = 'FOUND' if zrxns is not None else 'MISSING-SKIP'
else:
status = 'MISSING-ADD'
# Build a tuple with the full description of the reaction class, if ID'd
if status not in ('MISSING-SKIP', 'MISSING-ADD'):
for zrxn in zrxns:
rclasses += (_mod_class(zrxn.class_, rxn_info),)
print(' Reaction class identified as: '
f'{automol.par.string(rclasses[0])}')
print(f' There are {len(zrxns)} '
'configuration(s) of transition state')
return zrxns, zmas, rclasses, status
def _id_reaction(rxn_info, thy_info, save_prefix):
""" Identify the reaction and build the object
:param rxn_info: reaction info object
:type rxn_info: mechanalyzer.inf.rxn object
:rtype: (tuple(automol.Reaction object), tuple(automol.zmat object))
"""
# Check the save filesystem for the reactant and product geometries
rct_geos, prd_geos, rct_paths, prd_paths = reagent_geometries(
rxn_info, thy_info, save_prefix)
# Identify reactants and products from geoms or InChIs, depending
# We automatically assess and add stereo to the reaction object, as needed
if any(rct_geos) and any(prd_geos):
print(' Reaction ID from geometries from SAVE filesys')
for i, path in enumerate(rct_paths):
print(f' - reactant {i+1}: {path}')
for i, path in enumerate(prd_paths):
print(f' - product {i+1}: {path}')
zrxn_objs = automol.reac.rxn_objs_from_geometry(
rct_geos, prd_geos, indexing='zma', stereo=False)
# rct_geos, prd_geos, indexing='zma', stereo=True)
else:
print(' Reaction ID from geometries from input InChIs')
rxn_ichs = rinfo.value(rxn_info, 'inchi')
rct_ichs, prd_ichs = rxn_ichs[0], rxn_ichs[1]
zrxn_objs = automol.reac.rxn_objs_from_inchi(
rct_ichs, prd_ichs, indexing='zma', stereo=False)
# rct_ichs, prd_ichs, indexing='zma', stereo=True)
# Loop over the found reaction objects
if zrxn_objs is not None:
zrxns, zmas = (), ()
for obj_set in zrxn_objs:
zrxn, zma, _, _ = obj_set
zrxns += (zrxn,)
zmas += (zma,)
else:
zrxns, zmas = None, None
return zrxns, zmas
def _mod_class(class_typ, rxn_info):
""" Create the object containing the full description of the
reaction class, including its type, spin state, and whether
it is a radical-radical combination.
:param class_typ: reaction class type
:type class_typ: str
:param rxn_info: ???
:tpye rxn_info: ???
"""
# Set the spin of the reaction to high/low
_fake_class = (class_typ, '', '', False)
if automol.par.need_spin_designation(_fake_class):
ts_mul = rinfo.value(rxn_info, 'tsmult')
high_mul = rinfo.ts_mult(rxn_info, rxn_mul='high')
_spin = 'high-spin' if ts_mul == high_mul else 'low-spin'
else:
_spin = ''
# Determine if it iss intersystem crossing
# rxn_muls = rinfo.value(rxn_info, 'mult')
# is_isc = automol.reac.intersystem_crossing(rxn_muls)
return automol.par.reaction_class_from_data(
class_typ, _spin, rinfo.radrad(rxn_info), False)
# from direction (loop over the reactions around split)
def set_reaction_direction(reacs, prods, rxn_info,
thy_info, ini_thy_info, save_prefix,
direction='forw'):
""" Arrange the reactants and products in the order corresponding
to the desired direction of the reaction. If the direction
is the exothermic direction, than the species energies are read
from the filesystem at the level of theory.
:param reacs: list of names of the reactants
:type reacs: tuple(str)
:param prods: list of names of the products
:type prods: tuple(str)
:param rxn_info:
:type rxn_info: tuple(tuple(str), tuple(int), tuple(int), int)
:param thy_info: ???
:type thy_info: ??
:param ini_thy_info: ??
:param direction: direction to set reaction to
:type direction: str
:param save_prefix: root-path to the save-filesystem
:type save_prefix: str
"""
if direction == 'forw':
print(' User requested forward direction.')
elif direction == 'back':
print(' User requested reverse direction, flipping reaction.')
reacs, prods = prods, reacs
elif direction == 'exo':
print(' User requested exothermic direction.',
'Checking energies...')
reacs, prods = assess_rxn_ene(
reacs, prods, rxn_info, thy_info, ini_thy_info, save_prefix)
else:
raise NotImplementedError
rct_str, prd_str = '+'.join(reacs), '+'.join(prods)
print(f' Running reaction as: {rct_str} = {prd_str}')
return reacs, prods
# Functions for the exothermicity check
def reagent_geometries(rxn_info, thy_info, save_prefix):
""" Identify the reaction and build the object
:param rxn_info: reaction info object
:type rxn_info: mechanalyzer.inf.rxn object
:rtype: (tuple(automol.Reaction object), tuple(automol.zmat object))
"""
# Check the save filesystem for the reactant and product geometries
rct_info = rinfo.rgt_info(rxn_info, 'reacs')
prd_info = rinfo.rgt_info(rxn_info, 'prods')
_rcts_cnf_fs = filesys.rcts_cnf_fs(rct_info, thy_info, None, save_prefix)
_prds_cnf_fs = filesys.rcts_cnf_fs(prd_info, thy_info, None, save_prefix)
# If min cnfs found for all rcts and prds, read the geometries
rct_geos, prd_geos = (), ()
rct_paths, prd_paths = (), ()
if (
_rcts_cnf_fs.count(None) == 0 and _prds_cnf_fs.count(None) == 0
):
for (_, cnf_save_fs, min_locs, _) in _rcts_cnf_fs:
geo = cnf_save_fs[-1].file.geometry.read(min_locs)
path = cnf_save_fs[-1].file.geometry.path(min_locs)
rct_geos += (geo,)
rct_paths += (path,)
for (_, cnf_save_fs, min_locs, _) in _prds_cnf_fs:
geo = cnf_save_fs[-1].file.geometry.read(min_locs)
path = cnf_save_fs[-1].file.geometry.path(min_locs)
prd_geos += (geo,)
prd_paths += (path,)
return rct_geos, prd_geos, rct_paths, prd_paths
def assess_rxn_ene(reacs, prods, rxn_info,
thy_info, ini_thy_info, save_prefix):
""" Check the directionality of the reaction
"""
rxn_ene = reaction_energy(rxn_info, thy_info, ini_thy_info, save_prefix)
method1, method2 = thy_info, ini_thy_info
if rxn_ene is None:
rxn_ene = reaction_energy(
rxn_info, ini_thy_info, ini_thy_info, save_prefix)
method1, method2 = ini_thy_info, ini_thy_info
print(f' Reaction energy is {rxn_ene*phycon.EH2KCAL:.2f} '
f'at {method1[1]}//{method2[1]} level')
if rxn_ene > 0:
reacs, prods = prods, reacs
rxn_info = rinfo.reverse(rxn_info)
print(' Reaction is endothermic, flipping reaction.')
return reacs, prods
def reaction_energy(rxn_info, sp_thy_info, geo_thy_info, save_prefix):
""" reaction energy """
rct_enes = reagent_energies(
'reacs', rxn_info, sp_thy_info, geo_thy_info, save_prefix)
prd_enes = reagent_energies(
'prods', rxn_info, sp_thy_info, geo_thy_info, save_prefix)
if rct_enes is not None and prd_enes is not None:
rxn_ene = sum(prd_enes) - sum(rct_enes)
else:
rxn_ene = None
return rxn_ene
def reagent_energies(
rgt, rxn_info, sp_thy_info, geo_thy_info, save_prefix):
""" reagent energies """
assert rgt in ('reacs', 'prods')
enes = []
for rgt_info in rinfo.rgts_info(rxn_info, rgt):
# Set filesys
spc_save_fs = autofile.fs.species(save_prefix)
spc_save_path = spc_save_fs[-1].path(rgt_info)
mod_geo_thy_info = tinfo.modify_orb_label(geo_thy_info, rgt_info)
mod_sp_thy_info = tinfo.modify_orb_label(sp_thy_info, rgt_info)
thy_save_fs = autofile.fs.theory(spc_save_path)
thy_save_path = thy_save_fs[-1].path(mod_geo_thy_info[1:4])
cnf_save_fs = autofile.fs.conformer(thy_save_path)
min_locs, min_path = filesys.min_energy_conformer_locators(
cnf_save_fs, mod_geo_thy_info)
# Read energy
ene = None
if min_locs:
sp_fs = autofile.fs.single_point(min_path)
if sp_fs[-1].file.energy.exists(mod_sp_thy_info[1:4]):
ene = sp_fs[-1].file.energy.read(mod_sp_thy_info[1:4])
enes.append(ene)
if any(ene is None for ene in enes):
enes = None
return enes
| [
37811,
198,
968,
6317,
4522,
2438,
198,
37811,
198,
198,
11748,
1960,
1659,
576,
198,
11748,
3557,
349,
198,
6738,
3962,
3400,
9107,
13,
10745,
1330,
374,
87,
77,
355,
374,
10951,
198,
6738,
3962,
3400,
9107,
13,
10745,
1330,
11906,
3... | 2.16724 | 4,945 |
from flask import render_template
from . import app, globals
@app.route("/")
| [
6738,
42903,
1330,
8543,
62,
28243,
198,
198,
6738,
764,
1330,
598,
11,
15095,
874,
198,
198,
31,
1324,
13,
38629,
7203,
14,
4943,
198
] | 3.16 | 25 |
from app import app, db
from app.models import User, Notes
from app.forms import LoginForm, RegisterForm, CreateNoteForm, EditNoteForm, DeleteNoteForm
from flask import render_template, redirect, url_for, flash, request
from flask_login import current_user, login_user, logout_user, login_required
@app.route('/', methods=['GET', 'POST'])
@app.route('/login', methods=['GET', 'POST'])
@app.route('/logout')
@login_required
@app.route('/register', methods=['GET', 'POST'])
@app.route('/notes', methods=['GET'])
@login_required
@app.route('/notes/<id>', methods=['GET', 'POST'])
@login_required
@app.route('/edit/notes/<id>', methods=['GET', 'POST'])
@login_required
| [
6738,
598,
1330,
598,
11,
20613,
198,
6738,
598,
13,
27530,
1330,
11787,
11,
11822,
198,
6738,
598,
13,
23914,
1330,
23093,
8479,
11,
17296,
8479,
11,
13610,
6425,
8479,
11,
5312,
6425,
8479,
11,
23520,
6425,
8479,
198,
6738,
42903,
1... | 2.965066 | 229 |
import click
from game import Game
from parser import parser
from a_star import a_star
@click.command()
@click.option(
"--size",
default=3,
type=int,
help="Input a puzzel size to generate a start puzzel, default value is 3",
)
@click.option(
"--file",
type=click.Path(exists=True, readable=True),
help="Input a puzzel file path",
)
def main(file, size):
"""Program that solve N-puzzel game"""
if file:
puzzel_size, puzzel = parser(file)
game = Game(puzzel_size, puzzel)
elif size:
game = Game(size)
else:
raise SystemExit("Need puzzel size or puzzel file")
a_star(game)
if __name__ == "__main__":
main()
| [
11748,
3904,
198,
198,
6738,
983,
1330,
3776,
198,
6738,
30751,
1330,
30751,
198,
6738,
257,
62,
7364,
1330,
257,
62,
7364,
628,
198,
31,
12976,
13,
21812,
3419,
198,
31,
12976,
13,
18076,
7,
198,
220,
220,
220,
366,
438,
7857,
1600... | 2.469751 | 281 |
import random
import string
import uuid
from django.core import validators
from django.db import models
from django.db import transaction
from django.core.mail import send_mail
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin,
BaseUserManager)
from django.utils import timezone
from django.utils.translation import pgettext_lazy as _
class UserManager(BaseUserManager):
"""Customize the creatioin of user and super user
"""
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(
_('User field', 'username'),
max_length=20,
default='',
unique=True,
help_text=_(
'User field',
'Required. 20 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(
r'^[\w.@+-]+$',
_(
'User field', 'Enter a valid username. '
'This value may contain only letters, numbers '
'and @/./+/-/_ characters.'), 'invalid'),
])
first_name = models.CharField(_('User field', 'first name'),
max_length=30,
blank=True)
last_name = models.CharField(_('User field', 'last name'),
max_length=30,
blank=True)
email = models.EmailField(_('User field', 'email address'), unique=True)
is_staff = models.BooleanField(
_('User field', 'staff status'),
default=False,
help_text=_(
'User field',
'Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(
_('User field', 'active'),
default=True,
help_text=_(
'User field', 'Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('User field', 'date joined'),
default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""a customable user model
"""
mobile = models.CharField(_('User model', 'cell phone num'),
max_length=32,
unique=True,
default='')
# as an uuid for user
token = models.CharField(max_length=300, unique=True)
object = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
| [
11748,
4738,
198,
11748,
4731,
198,
11748,
334,
27112,
198,
198,
6738,
42625,
14208,
13,
7295,
1330,
4938,
2024,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
1330,
8611,
198,
6738,
42625,
14208,
13,
... | 2.217139 | 1,552 |
fibonacci_series = [0, 1] # first two values of series
while ___(fibonacci_series) ___ ___: # condition for the length
# add the new value
fibonacci_series.___(___)
print(fibonacci_series)
| [
69,
571,
261,
44456,
62,
25076,
796,
685,
15,
11,
352,
60,
220,
1303,
717,
734,
3815,
286,
2168,
198,
198,
4514,
11593,
41052,
69,
571,
261,
44456,
62,
25076,
8,
46444,
46444,
25,
220,
1303,
4006,
329,
262,
4129,
198,
220,
220,
22... | 2.68 | 75 |
from .agent import Agent
ACTION_LOOKUP = {
0 : '1', # Up
1 : '2', # Up right
2 : '3', # Down right
3 : '4', # Down
4 : '5', # Down left
5 : '6', # Up left
6 : 'end'
}
| [
6738,
764,
25781,
1330,
15906,
198,
198,
44710,
62,
43,
15308,
8577,
796,
1391,
198,
220,
220,
220,
657,
1058,
705,
16,
3256,
220,
1303,
3205,
198,
220,
220,
220,
352,
1058,
705,
17,
3256,
220,
1303,
3205,
826,
198,
220,
220,
220,
... | 1.980392 | 102 |
# ===========================================================================
# default.py --------------------------------------------------------------
# ===========================================================================
"""
Examples
^^^^^^^^
- To override some configuration options, you can use the flag ``--set``, for
instance, if you want to override the editor used and the opentool to open
documents, you can just type
.. code:: shell
dl_multi --set editor gedit --set opentool firefox edit
dl_multi --set editor gedit --set opentool firefox open
- If you want to list the libraries and pick one before sending a database
query to dl_multi, use ``--pick-lib`` as such
.. code:: shell
dl_multi --pick-lib open 'einstein relativity'
Cli
^^^
.. click:: dl_multi.dl_multi.commands.default:run
:prog: dl_multi
:dl_multi.commands: []
"""
# import ------------------------------------------------------------------
# ---------------------------------------------------------------------------
import dl_multi.__init__
import dl_multi.commands
import click
import colorama
import difflib
import logging
import os
import sys
# class -------------------------------------------------------------------
# ---------------------------------------------------------------------------
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
@click.group(
cls=MultiCommand,
invoke_without_command=True
)
@click.help_option(
"-h",
"--help"
)
@click.version_option(
version=dl_multi.__version__
)
@click.option(
"-v",
"--verbose",
help="Make the output verbose (equivalent to --log DEBUG)",
default=False,
is_flag=True
)
@click.option(
"--log",
help="Logging level",
type=click.Choice(["INFO", "DEBUG", "WARNING", "ERROR", "CRITICAL"]),
default="INFO"
)
@click.option(
"--color",
type=click.Choice(["always", "auto", "no"]),
default="auto",
help="Prevent the output from having color"
) | [
2,
38093,
2559,
855,
198,
2,
220,
220,
4277,
13,
9078,
20368,
1783,
26171,
198,
2,
38093,
2559,
855,
198,
37811,
198,
198,
27730,
198,
39397,
39397,
198,
198,
12,
1675,
20957,
617,
8398,
3689,
11,
345,
460,
779,
262,
6056,
7559,
438... | 3.635274 | 584 |
"""Coordinator for SleepIQ."""
from datetime import timedelta
import logging
from asyncsleepiq import AsyncSleepIQ
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
UPDATE_INTERVAL = timedelta(seconds=60)
class SleepIQDataUpdateCoordinator(DataUpdateCoordinator[dict[str, dict]]):
"""SleepIQ data update coordinator."""
def __init__(
self,
hass: HomeAssistant,
client: AsyncSleepIQ,
username: str,
) -> None:
"""Initialize coordinator."""
super().__init__(
hass,
_LOGGER,
name=f"{username}@SleepIQ",
update_method=client.fetch_bed_statuses,
update_interval=UPDATE_INTERVAL,
)
self.client = client
| [
37811,
7222,
585,
20900,
329,
17376,
33866,
526,
15931,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
18931,
198,
198,
6738,
355,
2047,
6359,
8892,
25011,
1330,
1081,
13361,
40555,
33866,
198,
198,
6738,
1363,
562,
10167,
13,
729... | 2.429799 | 349 |
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates data for clustering experimentation."""
import numpy as np
from clustering import clustering_params
def sample_uniform_sphere(num_points: int,
dim: int,
radius: float = 1.0) -> clustering_params.Points:
"""Returns points sampled uniformly in a L2-ball of specified radius.
Samples a point from the standard normal distribution, scales it to be of norm
equal to the radius and finally scales it further by a factor u^{1/dim} for a
uniformly random u in [0,1], to yield a point that is uniform within the
L2-ball of specified radius.
Reference:
https://en.wikipedia.org/wiki/N-sphere#Uniformly_at_random_within_the_n-ball
Args:
num_points: number of points to be sampled.
dim: dimension of points to be sampled.
radius: radius of the ball which contain all points.
"""
points = np.random.normal(0.0, 1.0, size=(num_points, dim))
new_radiuses = radius * (np.random.uniform(0, 1, num_points)**(1.0 / dim))
scale = new_radiuses / np.linalg.norm(points, axis=1)
result = (points.T * scale).T
assert max(np.linalg.norm(result, axis=1)) <= radius, (
f"Sampled points outside the sphere with radius {radius}, "
f"got {max(np.linalg.norm(result, axis=1))}")
return result
def generate_synthetic_dataset(
num_points: int = 1000000,
dim: int = 100,
num_clusters: int = 64,
cluster_ratio: float = 100.0,
radius: float = 1.0) -> clustering_params.Data:
"""Generates a synthetic dataset.
First samples cluster centers within a smaller radius of
radius*(1-1/cluster_ratio), so that points added around them stay within
radius. Next, num_points/num_clusters many points are sampled from the
Gaussian distribution centered at each cluster (if num_points/num_clusters is
not an integer, then excess points are in the last cluster). Finally, points
are clipped to norm=radius.
Args:
num_points: The number of data points.
dim: The dimension of data points.
num_clusters: The number of clusters to divide the points evenly into;
extras go in the last cluster.
cluster_ratio: The ratio of the intercluster distance to intracluster
distance.
radius: The radius for all the data to be confined in. At the end, this
radius is enforced by scaling any points that are outside the radius.
Returns:
Data containing sampled datapoints, radius, and labels.
"""
center_radius = radius * (1 - 1 / float(cluster_ratio))
rand_centers: np.ndarray = sample_uniform_sphere(
num_clusters, dim, center_radius) # shape=(num_clusters, dim)
datapoints: np.ndarray = np.random.normal(
0,
np.sqrt(radius) / (float(cluster_ratio) * np.sqrt(dim)),
size=(num_points, dim))
num_points_per_cluster: np.ndarray = np.ones(num_clusters, dtype=int) * (
num_points // num_clusters)
num_points_per_cluster[-1] += num_points % num_clusters
labels = np.concatenate([
np.ones(k, dtype=int) * i for (i, k) in enumerate(num_points_per_cluster)
])
shift_mat: np.ndarray = np.vstack([
np.outer(np.ones(k), v)
for (k, v) in zip(num_points_per_cluster, rand_centers)
])
datapoints += shift_mat
# Enforce the radius by scaling any points that are outside that range.
data = clustering_params.Data(datapoints, radius, labels)
return clustering_params.Data(data.clip_by_radius(), data.radius, data.labels)
| [
2,
15069,
33448,
3012,
11419,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,... | 2.935484 | 1,364 |
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
# Predefined matplotlib pattern:
# / - diagonal hatching
# \ - back diagonal
# | - vertical
# - - horizontal
# + - crossed
# x - crossed diagonal
# o - small circle
# O - large circle
# . - dots
# * - stars
# 1x sparse
# 2x normal
# 3x dense
HATCH_NAME_MAPPING = {
'ACAD_ISO02W100': '---',
'ACAD_ISO03W100': '---',
'ACAD_ISO04W100': '---',
'ACAD_ISO05W100': '---',
'ACAD_ISO06W100': '---',
'ACAD_ISO07W100': '---',
'ACAD_ISO08W100': '---',
'ACAD_ISO09W100': '---',
'ACAD_ISO10W100': '---',
'ACAD_ISO11W100': '---',
'ACAD_ISO12W100': '---',
'ACAD_ISO13W100': '---',
'ACAD_ISO14W100': '---',
'ACAD_ISO15W100': '---',
'ANCHORLOCK': '++',
'ANGLE': '+++',
'ANSI31': '///',
'ANSI32': '//',
'ANSI33': '///',
'ANSI34': '//',
'ANSI35': '///',
'ANSI36': '///',
'ANSI37': 'xxx',
'ANSI38': 'xxx',
'AR-RROOF': '---',
'AR-SAND': '...',
'ASPHALT': '---...',
'BOARD': '---...',
'BRASS': '---...',
'BOX': '+++',
'BRICK': '+++',
'BRICK_FLBOND': '+++',
'BRICK_INSULATING': '///...',
'BRICK_LWEIGHT': '///...',
'BRICK_PAIRS': '++',
'BRICK_STBOND': '++',
'BRICK_STRBOND': '+',
'BRSTONE': '+++',
'BUTTERFLY': 'xxx|||',
'CHECKER': '+++',
'CLAY': '...---',
'CONCRETE1': 'oo',
'CONCRETE2': 'ooo',
'CONCRETE3': 'oooo',
'CONC_DEMOLITION': 'xxxx',
'CONC_EXISTING': 'xxxx',
'CONC_PRECAST': 'xxxx',
'CORK': '\\\\\\---',
'CROSS': '++++',
'CROSSES': 'xxxx',
'DASH': '---',
'DIAMONDS': 'xxx',
'DOLMIT': '//---',
'DOTGRID': '..',
'DOTS': '...',
'DOTS1': '...',
'DOTS2': '...',
'EARTH': '+++',
'EARTH1': '++++',
'EARTH2': 'xxxx',
'EGYPTIAN': '++++',
'ESCHER': '//\\\\--',
'FLEX': '---',
'FLEXIBLE': '---',
'GLASS': '...',
'GOST_GLASS': '...',
'GOST_GROUND': '///',
'GOST_WOOD': '|||',
'GRASS': '.',
'GRASS1': '..',
'GRASS2': '..',
'GRATE': '+++++',
'GRAVEL': '..',
'GRAVEL1': 'ooo',
'GRID': '++',
'GROUT': '...',
'HERRING_45': '+',
'HERRING_H': 'xx--',
'HERRING_UNI': '++',
'HERRING_V': 'xx',
'HEX': 'xx',
'HEXAGONS': 'xx',
'HONEY': 'xxx',
'HONEYCOMB': 'xxx',
'HOUND': '+++++',
'INSUL': '---',
'INSULATION': 'xxxxx',
'ISO02W100': '---',
'ISO03W100': '---',
'ISO04W100': '---',
'ISO05W100': '---',
'ISO06W100': '---',
'ISO07W100': '---',
'ISO08W100': '---',
'ISO09W100': '---',
'ISO10W100': '---',
'ISO11W100': '---',
'ISO12W100': '---',
'ISO13W100': '---',
'ISO14W100': '---',
'ISO15W100': '---',
'JIS_LC_20': '//',
'JIS_LC_20A': '//',
'JIS_LC_8': '///',
'JIS_LC_8A': '///',
'JIS_RC_10': '///',
'JIS_RC_15': '///',
'JIS_RC_18': '//',
'JIS_RC_30': '//',
'JIS_STN_1E': '///',
'JIS_STN_2.5': '///',
'JIS_WOOD': '///',
'LINE': '---',
'LINES': '---',
'MUDST': '---...',
'NATURAL': '///...',
'NET': '+++++',
'NET3': 'xxxxx-----',
'OCTAGONS': '+++',
'PLAST': '---',
'PLASTI': '---',
'PLUSSES': '..',
'ROCK': '---///',
'SACNCR': '////',
'SAND': 'xxxx',
'SCREED': '....',
'SHAKES': '+++',
'SPANISH': '+++',
'SQUARE': '++++',
'SQUARES': '++++',
'STARS': '**',
'STEEL': '///',
'SWAMP': '...',
'TILEPAT1': '+++',
'TRANS': '---',
'TRIANG': 'xxx',
'TRIANGLES': '****',
'TRIHEX': 'xx',
'V_BATTEN_FLOOR': '--',
'V_MASONRY200x100': '+++',
'V_MASONRY200x60': '++++',
'V_MASONRY200x75': '++++',
'V_MASONRY220x80': '++++',
'V_MASONRY300x100': '++++',
'V_MASONRY300x150': '+++',
'V_MASONRY300x200': '+++',
'V_MASONRY300x75': '++++',
'V_MASONRY400x100': '+++',
'V_MASONRY400x200': '+++',
'V_PARQUET': '---',
'V_STANDING_SEAM': '|||',
'V_ZINC': '|||',
'WAFFLE': '+++',
'WATER': '---',
'WOOD1': '///',
'WOOD2': '\\\\\\',
'WOOD3': '---',
'WOOD4': '----',
'ZIGZAG': '///'
}
| [
2,
220,
15069,
357,
66,
8,
12131,
11,
1869,
39193,
4270,
4224,
72,
198,
2,
220,
13789,
25,
17168,
13789,
198,
2,
14322,
18156,
2603,
29487,
8019,
3912,
25,
198,
2,
1220,
532,
40039,
289,
19775,
198,
2,
3467,
532,
736,
40039,
198,
... | 1.831996 | 2,244 |
import os
import requests
| [
11748,
28686,
198,
198,
11748,
7007,
628
] | 4 | 7 |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input/Output utilities."""
import re
import tensorflow as tf
def tfrecords_to_dataset_tf2(tfrecords_pattern,
mapping_func,
batch_size,
buffer_size=5000,
shuffle=True):
"""Generates a TF Dataset from a rio pattern."""
with tf.name_scope('Input/'):
tfrecords_pattern = expand_rio_pattern(tfrecords_pattern)
dataset = tf.data.Dataset.list_files(tfrecords_pattern, shuffle=shuffle)
dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=16)
if shuffle:
dataset = dataset.shuffle(buffer_size=buffer_size)
dataset = dataset.map(mapping_func)
dataset = dataset.batch(batch_size)
return dataset
def get_dataset(tfrecords_pattern,
mapping_func,
buffer_size=500,
shuffle=True,
cycle_length=16):
"""Generates a TF Dataset from a rio pattern."""
with tf.name_scope('Input/'):
tfrecords_pattern = expand_rio_pattern(tfrecords_pattern)
dataset = tf.data.Dataset.list_files(tfrecords_pattern, shuffle=shuffle)
dataset = dataset.interleave(tf.data.TFRecordDataset,
cycle_length=cycle_length)
# for d in dataset.take(1):
# mapping_func(d)
if shuffle:
dataset = dataset.shuffle(buffer_size=buffer_size)
dataset = dataset.map(mapping_func)
return dataset
| [
2,
15069,
12131,
383,
309,
22854,
37535,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.502457 | 814 |
import numpy as np
import math
from blarf.dataset import dataset
from blarf.cluster import cluster
#################################
# code for internal types
#################################
# reciprical_bonds
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
698,
37595,
13,
19608,
292,
316,
1330,
27039,
198,
6738,
698,
37595,
13,
565,
5819,
1330,
13946,
198,
198,
29113,
2,
198,
2,
2438,
329,
5387,
3858,
198,
29113,
2,
198,
198,
... | 3.844828 | 58 |
from threading import Thread, Lock
mutex = Lock()
mutex.acquire()
mutex.release()
CRASH_STATUS = None
#t = Thread(target = processData, args = (0,))
#t.start()
| [
6738,
4704,
278,
1330,
14122,
11,
13656,
198,
198,
21973,
1069,
796,
13656,
3419,
198,
21973,
1069,
13,
330,
29782,
3419,
198,
21973,
1069,
13,
20979,
3419,
198,
198,
9419,
11211,
62,
35744,
2937,
796,
6045,
198,
198,
2,
83,
796,
1412... | 2.688525 | 61 |
from flask import Flask, render_template
from flask.ext.sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
db = SQLAlchemy(app)
@app.route('/')
@app.route('/hello')
if __name__ == '__main__':
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
198,
6738,
42903,
13,
2302,
13,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
11748,
28686,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
11250,
13,
6738,
62,
15252... | 2.601852 | 108 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_tempest_plugin.common import test
from heat_tempest_plugin.scenario import scenario_base
from heatclient.common import template_utils
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 3.485437 | 206 |
import tabulate
import textwrap
import typing
import checkmarx.client
import checkmarx.model as model
| [
11748,
7400,
5039,
198,
11748,
2420,
37150,
198,
11748,
19720,
198,
198,
11748,
2198,
3876,
87,
13,
16366,
198,
11748,
2198,
3876,
87,
13,
19849,
355,
2746,
628,
628,
628
] | 3.6 | 30 |
import os
import bilby
import unittest
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_array_almost_equal, assert_)
from PyGRB.backend.makepriors import MakePriors
from PyGRB.backend.makemodels import create_model_from_key
from PyGRB.backend.admin import mkdir
from PyGRB.main.fitpulse import PulseFitter
class PulseTester(PulseFitter):
""" Test class for PulseFitter. """
def _get_base_directory(self):
"""
Sets the directory that code products are made to be /products/ in
the folder the script was ran from.
"""
dir = f'test_products/{self.tlabel}_model_comparison_{str(self.nSamples)}'
self.base_folder = dir
mkdir(dir)
if __name__ == '__main__':
unittest.main()
| [
11748,
28686,
198,
11748,
47027,
1525,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
299,
32152,
13,
33407,
1330,
357,
30493,
62,
28177,
62,
40496,
11,
6818,
62,
40496,
11,
6818,
62,
439,
19836,
11,
19... | 2.51506 | 332 |
'''
#Reads processed tokens from a file called "docs_processed.txt"
#corpus type list of lists of strings [[Str]]
#Built gensim word2vec model, gensim reference: https://radimrehurek.com/gensim/models/word2vec.html
#Gensim is an open source library which is
#Using highly optimized C routines,
#Originally ported from the C package https://code.google.com/p/word2vec/ and extended with additional functionality and optimizations
#70x speedup compared to plain NumPy implementation (https://rare-technologies.com/parallelizing-word2vec-in-python/).
#Save the model ('/Word2VecModel/myModel')
#Load the model KeyedVectors.load('/Word2VecModel/myModel')
#Visualize the embeddings
#Write the weights (vecs.tsv and meta.tsv )to disk. To use the [Embedding Projector](http://projector.tensorflow.org)
15.08.2019
Cankut Coskun
cankutcoskun@sabanciuniv.edu
'''
import numpy as np
from gensim.models import Word2Vec
from gensim.test.utils import get_tmpfile
from gensim.models import KeyedVectors
import os
#Input: docs (List of list of Strings)
cdir = os.getcwd()
print(cdir)
f = open(cdir + "/docs_processed.txt")
content = f.read()
data = content.split("\n")
#Empty line at the end string.split behaviour
data.pop()
docs = []
for d in data:
doc = d.split(" ")
doc.pop()
docs.append(doc)
##Parameters
# min_count: Ignore words that appear less than this
# size: Dimensionality of word embeddings
# workers: Number of processors (parallelisation)
# window: Context window for words during training
# iter: Number of epochs training over corpus
path = get_tmpfile( cdir + "/word2vec.model")
model = Word2Vec(docs, min_count=1, size=200, workers=4, window=5, iter=100)
word_vectors = model.wv
model.save("word2vec.model")
#Word embedding dimesions
print(model.vector_size)
##Vocabulary
##Vocab size Total number of unique words in model
vocabulary = model.wv.vocab.keys()
vocabulary = list(vocabulary)
size_vocab = len(vocabulary)
#*********Test bench*********
print(model.wv.most_similar('kredi'))
print(model.wv.most_similar_cosmul('dolar', topn = 5))
print(model.wv.most_similar_cosmul('worldcard', topn = 5))
##Get the probability distribution of the center word given context words.
##Returns: topn length list of tuples of (word, probability).
##Return type: list of (str, float)
#model.predict_output_word(["hesap", "kredi", "kart"], topn=10)
##Compute cosine similarity between two sets of words.
model.wv.n_similarity(["ankara","istanbul"], [ "izmir","tรผrkiye"])
model.wv.similarity("ankara","istanbul")
##Embeddings
#print(type(model.wv["ankara"]))
#print(model.wv["ankara"].size)
#print(model.wv["ankara"])
## Next, let's retrieve the word embeddings learned during training. This will be a matrix with shape `(vocab_size,embedding-dimension)`.
embed_list = []
for word in vocabulary:
embed_list.append(model.wv[word].tolist())
print("Size vocabulary",len(embed_list))
print("Embedding dimensions",len(embed_list[0]))
embeddings = np.array(embed_list)
print(embeddings.shape)
print(embeddings[0])
## Visualize Embeddings
import io
out_v = io.open( cdir + '/tsvFiles/vecs.tsv', 'w', encoding='utf-8')
out_m = io.open(cdir + '/tsvFiles/meta.tsv', 'w', encoding='utf-8')
for idx in range(len(vocabulary)):
word = vocabulary[idx]
#print(word)
weights = embeddings[idx]
#print(weights)
#print(len(weights))
out_m.write(word + "\n")
out_v.write('\t'.join([str(w) for w in weights]) + "\n")
out_v.close()
out_m.close()
'''
## Visualize the embeddings
#
## We will now write the weights to disk. To use the [Embedding Projector](http://projector.tensorflow.org), we will upload two files in tab separated format: a file of vectors (containing the embedding), and a file of meta data (containing the words).
#
# To visualize our embeddings we will upload them to the embedding projector.
#
# Open the [Embedding Projector](http://projector.tensorflow.org/).
#
# * Click on "Load data".
#
# * Upload the two files we created above: ```vecs.tsv``` and ```meta.tsv```. T
'''
| [
7061,
6,
198,
2,
5569,
82,
13686,
16326,
422,
257,
2393,
1444,
366,
31628,
62,
14681,
276,
13,
14116,
1,
198,
2,
10215,
79,
385,
2099,
1351,
286,
8341,
286,
13042,
16410,
13290,
11907,
198,
198,
2,
39582,
308,
641,
320,
1573,
17,
... | 2.859065 | 1,412 |
# -*- coding: utf-8 -*-
import base64
from django.db.models import fields
#==============================================================================
class Base64Field(models.TextField):
"""
A base64 field to store binary data into a text field in django.
class Foo(models.Model):
data = Base64Field()
foo = Foo()
foo.data = 'Hello world!'
print foo.data # will 'Hello world!'
print foo.data_base64 # will print 'SGVsbG8gd29ybGQh\n'
"""
__metaclass__ = models.SubfieldBase
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
2779,
2414,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
7032,
198,
198,
2,
23926,
25609,
855,
198,
4871,
7308,
2414,
15878,
7,
27530,
13,
8206,
15878... | 2.691176 | 204 |
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Exporter tool for TF-Hub text embedding modules.
This tool creates TF-Hub Modules from embeddings text files in the following
format:
token1 1.0 2.0 3.0 4.0 5.0
token2 2.0 3.0 4.0 5.0 6.0
...
Example use:
python export.py --embedding_file=/tmp/embedding.txt --export_path=/tmp/module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import shutil
import sys
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
FLAGS = None
EMBEDDINGS_VAR_NAME = "embeddings"
def parse_line(line):
"""Parses a line of a text embedding file.
Args:
line: (str) One line of the text embedding file.
Returns:
A token string and its embedding vector in floats.
"""
columns = line.split()
token = columns.pop(0)
values = [float(column) for column in columns]
return token, values
def load(file_path, parse_line_fn):
"""Loads a text embedding into memory as a numpy matrix.
Args:
file_path: Path to the text embedding file.
parse_line_fn: callback function to parse each file line.
Returns:
A tuple of (list of vocabulary tokens, numpy matrix of embedding vectors).
Raises:
ValueError: if the data in the sstable is inconsistent.
"""
vocabulary = []
embeddings = []
embeddings_dim = None
for line in tf.gfile.GFile(file_path):
token, embedding = parse_line_fn(line)
if not embeddings_dim:
embeddings_dim = len(embedding)
elif embeddings_dim != len(embedding):
raise ValueError(
("Inconsistent embedding dimension detected, "
"%d != %d for token %s") % (embeddings_dim, len(embedding), token))
vocabulary.append(token)
embeddings.append(embedding)
return vocabulary, np.array(embeddings)
def make_module_spec(vocabulary_file, vocab_size, embeddings_dim,
num_oov_buckets, preprocess_text):
"""Makes a module spec to simply perform token to embedding lookups.
Input of this module is a 1-D list of string tokens. For T tokens input and
an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.
Args:
vocabulary_file: Text file where each line is a key in the vocabulary.
vocab_size: The number of tokens contained in the vocabulary.
embeddings_dim: The embedding dimension.
num_oov_buckets: The number of out-of-vocabulary buckets.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
tokens = tf.placeholder(shape=[None], dtype=tf.string, name="tokens")
embeddings_var = tf.get_variable(
initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
name=EMBEDDINGS_VAR_NAME,
dtype=tf.float32)
table_initializer = tf.lookup.TextFileInitializer(
vocabulary_file,
tf.string, tf.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.lookup.TextFileIndex.LINE_NUMBER)
lookup_table = tf.lookup.StaticVocabularyTable(
table_initializer, num_oov_buckets=num_oov_buckets)
ids = lookup_table.lookup(tokens)
combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids)
hub.add_signature("default", {"tokens": tokens},
{"default": combined_embedding})
def module_fn_with_preprocessing():
"""Spec function for a full-text embedding module with preprocessing."""
sentences = tf.placeholder(shape=[None], dtype=tf.string, name="sentences")
# Perform a minimalistic text preprocessing by removing punctuation and
# splitting on spaces.
normalized_sentences = tf.regex_replace(
input=sentences, pattern=r"\pP", rewrite="")
tokens = tf.string_split(normalized_sentences, " ")
embeddings_var = tf.get_variable(
initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
name=EMBEDDINGS_VAR_NAME,
dtype=tf.float32)
table_initializer = tf.lookup.TextFileInitializer(
vocabulary_file,
tf.string, tf.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.lookup.TextFileIndex.LINE_NUMBER)
lookup_table = tf.lookup.StaticVocabularyTable(
table_initializer, num_oov_buckets=num_oov_buckets)
sparse_ids = tf.SparseTensor(
indices=tokens.indices,
values=lookup_table.lookup(tokens.values),
dense_shape=tokens.dense_shape)
# In case some of the input sentences are empty before or after
# normalization, we will end up with empty rows. We do however want to
# return embedding for every row, so we have to fill in the empty rows with
# a default.
sparse_ids, _ = tf.sparse_fill_empty_rows(
sparse_ids, lookup_table.lookup(tf.constant("")))
# In case all of the input sentences are empty before or after
# normalization, we will end up with a SparseTensor with shape [?, 0]. After
# filling in the empty rows we must ensure the shape is set properly to
# [?, 1]. At this point, there are no empty rows, so the new shape will be
# [sparse_ids.dense_shape[0], max(1, sparse_ids.dense_shape[1])].
sparse_ids = tf.sparse_reset_shape(sparse_ids)
combined_embedding = tf.nn.embedding_lookup_sparse(
params=embeddings_var,
sp_ids=sparse_ids,
sp_weights=None,
combiner="sqrtn")
hub.add_signature("default", {"sentences": sentences},
{"default": combined_embedding})
if preprocess_text:
return hub.create_module_spec(module_fn_with_preprocessing)
else:
return hub.create_module_spec(module_fn)
def export(export_path, vocabulary, embeddings, num_oov_buckets,
preprocess_text):
"""Exports a TF-Hub module that performs embedding lookups.
Args:
export_path: Location to export the module.
vocabulary: List of the N tokens in the vocabulary.
embeddings: Numpy array of shape [N+K,M] the first N rows are the
M dimensional embeddings for the respective tokens and the next K
rows are for the K out-of-vocabulary buckets.
num_oov_buckets: How many out-of-vocabulary buckets to add.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
"""
# Write temporary vocab file for module construction.
tmpdir = tempfile.mkdtemp()
vocabulary_file = os.path.join(tmpdir, "tokens.txt")
with tf.gfile.GFile(vocabulary_file, "w") as f:
f.write("\n".join(vocabulary))
vocab_size = len(vocabulary)
embeddings_dim = embeddings.shape[1]
spec = make_module_spec(vocabulary_file, vocab_size, embeddings_dim,
num_oov_buckets, preprocess_text)
try:
with tf.Graph().as_default():
m = hub.Module(spec)
# The embeddings may be very large (e.g., larger than the 2GB serialized
# Tensor limit). To avoid having them frozen as constant Tensors in the
# graph we instead assign them through the placeholders and feed_dict
# mechanism.
p_embeddings = tf.placeholder(tf.float32)
load_embeddings = tf.assign(m.variable_map[EMBEDDINGS_VAR_NAME],
p_embeddings)
with tf.Session() as sess:
sess.run([load_embeddings], feed_dict={p_embeddings: embeddings})
m.export(export_path, sess)
finally:
shutil.rmtree(tmpdir)
def maybe_append_oov_vectors(embeddings, num_oov_buckets):
"""Adds zero vectors for oov buckets if num_oov_buckets > 0.
Since we are assigning zero vectors, adding more that one oov bucket is only
meaningful if we perform fine-tuning.
Args:
embeddings: Embeddings to extend.
num_oov_buckets: Number of OOV buckets in the extended embedding.
"""
num_embeddings = np.shape(embeddings)[0]
embedding_dim = np.shape(embeddings)[1]
embeddings.resize(
[num_embeddings + num_oov_buckets, embedding_dim], refcheck=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--embedding_file",
type=str,
default=None,
help="Path to file with embeddings.")
parser.add_argument(
"--export_path",
type=str,
default=None,
help="Where to export the module.")
parser.add_argument(
"--preprocess_text",
type=bool,
default=True,
help="Whether to preprocess the input tensor by removing punctuation and "
"splitting on spaces. Use this if input is a dense tensor of untokenized "
"sentences.")
parser.add_argument(
"--num_oov_buckets",
type=int,
default="1",
help="How many OOV buckets to add.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
2,
15069,
2864,
383,
309,
22854,
37535,
14699,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.717527 | 3,526 |
# coding: utf-8
# In[1]:
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
import matplotlib.pyplot as plt
# fix random seed for reproducibility
np.random.seed()
m = 1 # dimension
k_squared = 0.04
m_inv = 1.0
learning_rate = 1e-5
# epochs = 20000
epochs = 10000
batch_size = 1000000
x_stddev = 5
test_averaging=100
decay = 1 - 10*1e-10
# x_train = np.zeros((epochs,1))
# for i in range(0,epochs):
# x_train[i]=np.random.normal(scale = x_stddev)
# # x_train[i]=np.random.uniform(-20,20,1)'module' object has no attribute 'norm'
w1=([[ 0.0901572 , 0.2370918 , 0.6920835 , -0.45759007, -0.22167274,
-0.46439773, -0.45912468, 0.6203555 , 0.0419175 , 0.60444146,
4.952244 , 0.04192306, 0.53345317, 0.22216071, -0.24009007,
6.301405 , -0.50758445, -0.21116066, -0.37131187, -0.22089699,
0.04239784, 0.04331616, -0.18591626, -0.22142634, -0.4953288 ,
-0.23889707, 0.67850924, 0.5476355 , -0.7077681 , -1.0123378 ,
0.04195131, 0.22627208, -0.1888109 , 0.21195143, 0.44928712,
0.04276987, -0.20532611, 0.44252077, -0.04190878, 0.46343717,
-0.22356562, -0.5474644 , 0.04206235, -0.7823536 , -0.23852947,
0.26123488, -0.2369954 , -0.25654712, -0.25827566, 0.5539032 ,
0.22289808, 0.51685596, 1.0848937 , -0.6088887 , -0.04201594,
0.21767725, -0.23810348, -0.4646694 , -0.53889185, 1.1317953 ,
0.2089353 , -0.23368704, -5.6309223 , -0.2510263 , 0.71514434,
1.2417319 , 5.88868 , 0.4928691 , 0.2434442 , -0.54655886,
0.6717308 , 0.44354093, -0.7333635 , -0.6745134 , -0.04279398,
-0.7975697 , 0.22850451, -0.25397167, 0.2451518 , 1.1024855 ,
-0.53172445, 0.04208738, -0.04233624, 0.8983515 , 0.7710562 ,
-0.2548618 , -0.21645324, -1.0170518 , 0.9672949 , -0.23664552,
-0.22946735, 0.63287175, -0.79163665, -0.52115196, 0.21819146,
-0.22541553, 0.69617873, 0.73459744, 0.50693244, -0.24401082,
-0.5940728 , 1.3320855 , -1.140783 , 0.23237722, -1.1244652 ,
-5.6705046 , 0.2540727 , -0.04189253, -0.20804366, -0.04187457,
-0.21428825, 0.04335834, 0.96757776, -5.0284066 , -0.21626869,
-0.540456 , 0.51839244, 0.21898666, 0.9066629 , 0.22020821,
-0.50667083, 0.7983404 , -5.5656185 , -0.04212693, 0.25555643,
-0.45822552, 0.24277431, -0.04205061, 0.15989499, 0.23738208,
0.2237451 , 0.24180941, 0.49051645, -0.45438182, 0.47147265,
-0.04477705, -5.479455 , 0.04174316, 0.2551995 , 0.57939404,
-0.6557258 , -0.04206115, 0.6763663 , 0.23443314, 0.22873235,
-0.04198467, -0.4861976 , -0.6498148 , 0.44098404, -0.04172933]])
w2=([[-0.84504426],
[-0.51247114],
[-2.0340562 ],
[-0.76634175],
[ 0.61729795],
[-0.58101785],
[-0.6854419 ],
[ 0.6577067 ],
[-0.7736458 ],
[-1.8916265 ],
[-1.090016 ],
[-0.873359 ],
[ 0.42003942],
[-0.47995704],
[ 0.5497382 ],
[-2.1801522 ],
[-0.4831816 ],
[ 0.5648663 ],
[ 0.9415591 ],
[ 0.78689337],
[-0.91083336],
[-0.9763873 ],
[ 0.72957134],
[ 0.5560705 ],
[-0.4719117 ],
[ 0.5045661 ],
[ 0.66004866],
[-1.5987552 ],
[-0.4643787 ],
[-1.9016262 ],
[-0.96371204],
[-0.611284 ],
[ 0.65741754],
[-0.5599199 ],
[ 0.45351097],
[-0.97737604],
[ 0.7038435 ],
[ 0.5943796 ],
[ 0.9532466 ],
[ 0.7460163 ],
[ 0.5358916 ],
[-0.44170648],
[-0.9419488 ],
[-0.67798716],
[ 0.46497133],
[-0.391163 ],
[ 0.592325 ],
[ 0.45341557],
[ 0.43128943],
[ 0.41603804],
[-0.5674596 ],
[ 0.38761157],
[ 2.704492 ],
[-0.80798954],
[ 0.83548236],
[-0.5111326 ],
[ 0.6162054 ],
[-0.7550416 ],
[-0.4759281 ],
[-2.5150294 ],
[-0.50941396],
[ 0.49656197],
[-1.6215047 ],
[ 0.47244617],
[ 0.5376818 ],
[ 3.9775271 ],
[ 1.6411495 ],
[ 0.45862758],
[-0.47453666],
[-0.45376387],
[ 0.5765134 ],
[ 0.56581146],
[-1.1258857 ],
[-1.0639522 ],
[ 1.0760058 ],
[-1.235642 ],
[-0.53190786],
[ 0.47500044],
[-0.4640562 ],
[ 2.372436 ],
[-0.67921394],
[-1.0515941 ],
[ 1.1015248 ],
[ 1.4750271 ],
[-2.5024996 ],
[ 0.43387246],
[ 0.53801376],
[-2.327031 ],
[ 1.6461738 ],
[ 0.4792684 ],
[ 0.76675403],
[ 0.4892529 ],
[-1.1853842 ],
[-0.38456675],
[-0.80742 ],
[ 0.45512152],
[ 0.44872195],
[-2.1801472 ],
[ 0.67657053],
[ 0.40404373],
[-0.7937116 ],
[ 0.77783364],
[-2.4614215 ],
[-0.6792038 ],
[ 2.5339882 ],
[-1.5957985 ],
[-0.4930483 ],
[ 0.9237745 ],
[ 0.59356 ],
[ 0.9956936 ],
[ 0.47309944],
[-0.9341501 ],
[ 1.6710144 ],
[ 1.1764897 ],
[ 0.46367607],
[-0.7061653 ],
[ 0.46270266],
[-0.8225886 ],
[ 1.8290645 ],
[-0.5919749 ],
[-0.44208294],
[-1.948723 ],
[-1.3858926 ],
[ 0.8691517 ],
[-0.37294617],
[-0.6558015 ],
[-0.6871818 ],
[ 1.0781469 ],
[-0.87414324],
[-0.47635847],
[-0.5639866 ],
[-0.47552544],
[ 0.7286468 ],
[-0.34246516],
[ 0.6627983 ],
[ 0.7922385 ],
[-0.80032754],
[-0.6089186 ],
[-0.46824703],
[ 0.40888965],
[-0.56078476],
[ 0.98349524],
[ 0.48105317],
[-0.5328922 ],
[-0.70839876],
[ 1.0339078 ],
[-0.61342776],
[-0.79129976],
[ 0.48441455],
[ 0.5570059 ]])
# In[ ]:
b1=([-1.49632066e-01, 2.16088206e-01, 3.65778732e+00, -1.21041000e+00,
-1.35061651e-01, -1.29561055e+00, -1.22450840e+00, -2.32706118e+00,
-2.15838999e-02, 3.23842049e+00, 9.99821246e-01, 5.85471094e-02,
1.77022383e-01, 1.33129925e-01, -2.35600263e-01, -9.69530642e-01,
7.31552601e-01, -9.77801457e-02, -1.28652573e+00, 2.19140470e-01,
1.23102725e-01, -1.57810926e-01, 1.53959572e-01, -1.32225156e-01,
-1.57481730e-01, -2.27377295e-01, 4.70594555e-01, 2.85312033e+00,
3.12517256e-01, 5.74599028e+00, -2.24734023e-02, 1.56200081e-01,
-7.49236792e-02, 9.45027769e-02, -9.54202712e-01, -1.19746946e-01,
1.76245585e-01, -1.47855604e+00, 1.07089831e-02, 1.27336562e+00,
2.21104607e-01, -1.81072652e-01, 8.26996788e-02, -5.77640235e-01,
-2.25629151e-01, 3.77086610e-01, -2.16601476e-01, -3.45170379e-01,
-3.56887221e-01, -5.90745807e-01, -2.19919547e-01, -1.86245930e+00,
6.39037895e+00, 2.27631497e+00, -7.94772431e-02, 1.15748756e-01,
3.03080708e-01, -1.28756618e+00, -1.78790972e-01, -5.63836622e+00,
8.99272710e-02, -1.94928318e-01, -7.41466433e-02, -3.07720184e-01,
2.70801663e-01, 7.34310913e+00, 8.29299539e-02, -7.81100869e-01,
2.56538272e-01, -1.80862710e-01, 2.18636543e-01, 1.07038522e+00,
-2.78851628e+00, -2.51557636e+00, 1.20577067e-01, -3.08366776e+00,
1.66032478e-01, -3.27756613e-01, 2.67747581e-01, -6.31493044e+00,
-1.79744363e+00, -4.68141548e-02, 7.84308538e-02, -5.00692749e+00,
4.00230837e+00, -3.33558679e-01, -1.12384461e-01, -5.97595739e+00,
-5.45763254e+00, -2.12760210e-01, 2.53413409e-01, 1.98413730e-01,
4.21520996e+00, 6.86769903e-01, -2.12254256e-01, -1.46499500e-01,
4.68130678e-01, -4.72452021e+00, -1.81595242e+00, -2.60216951e-01,
2.21049786e+00, -1.94112194e+00, 6.55437994e+00, -2.68400759e-01,
5.60166454e+00, -7.55500719e-02, 3.28553319e-01, 6.42770529e-03,
-9.20422822e-02, -1.11987339e-02, -1.00595385e-01, -1.61407873e-01,
-5.45945311e+00, -1.01744628e+00, -1.06990181e-01, 1.96982336e+00,
1.70830369e-01, -2.16641054e-01, 5.29849386e+00, 1.28267542e-01,
7.34108150e-01, -4.16245031e+00, -7.15808198e-02, -9.45318416e-02,
3.37766856e-01, -1.21507788e+00, -3.34076196e-01, 4.01906781e-02,
-1.60489708e-01, 2.17334837e-01, 1.42836973e-01, 2.45796412e-01,
1.53452313e+00, 9.28530157e-01, 1.37115467e+00, 1.37233928e-01,
-6.79805875e-02, 4.52714004e-02, 3.36023450e-01, -2.64137276e-02,
-2.08564326e-01, 4.37483490e-02, 2.18686923e-01, 2.00063869e-01,
-2.48323262e-01, 2.81832628e-02, -1.50140417e+00, 2.45667958e+00,
-9.98386204e-01, -4.18332741e-02])
b2= ([-0.47102088])
w1_init = tf.constant_initializer(w1)
b1_init = tf.constant_initializer(b1)
w2_init = tf.constant_initializer(w2)
b2_init = tf.constant_initializer(b2)
# In[ ]:
w3=([[ 0.13879539, -0.13859922, -0.13828675, 0.13855068, 0.13883771,
-0.13857202, 0.13830268, -0.13850647, -0.13853791, -0.1384053 ,
0.1412606 , 0.13873734, -0.13856825, 0.13891087, 0.14233626,
-0.13833769, -0.13845266, -0.13875286, 0.13869044, 0.13887323,
0.13870366, -0.13859619, 0.13872615, 0.1389381 , -0.14017113,
0.13879456, -0.13882846, 0.1387299 , -0.1387175 , -0.13880575]])
w4=([[ 0.48300147],
[-1.2237911 ],
[-0.4338532 ],
[ 1.1876252 ],
[ 0.9885277 ],
[-1.3522526 ],
[ 1.0765364 ],
[-0.8109151 ],
[-1.0949211 ],
[-1.1373827 ],
[ 0.58461624],
[ 0.94439197],
[-1.2257808 ],
[ 1.2257911 ],
[ 0.37756 ],
[-0.9867794 ],
[-1.1356777 ],
[-0.7308337 ],
[ 1.3722858 ],
[ 1.1147363 ],
[ 1.1841174 ],
[-1.4234818 ],
[ 1.1866816 ],
[ 1.2437216 ],
[-0.7649936 ],
[ 0.8123563 ],
[-0.9246504 ],
[ 1.1653202 ],
[-1.1385669 ],
[-0.7842877 ]])
b3=([ 0.00128192, -0.02412418, -0.04015647, 0.02808985, 0.00440433,
-0.02689951, 0.051215 , -0.02894638, -0.02851768, -0.04047732,
-0.12068842, 0.01155403, -0.02665246, 0.00033755, -0.16250175,
-0.04577867, -0.0361448 , -0.00925925, 0.01730765, 0.00252271,
0.0154872 , -0.02517299, 0.01376084, -0.00146507, 0.07024308,
0.00624953, -0.00542742, 0.01337393, -0.01448685, -0.00600676])
b4=([0.13220455])
w3_init = tf.constant_initializer(w3)
b3_init = tf.constant_initializer(b3)
w4_init = tf.constant_initializer(w4)
b4_init = tf.constant_initializer(b4)
# In[2]:
# declare the training data placeholders
# input x - just one is x0
x0 = tf.placeholder(tf.float32, [None, 1])
# x1 = tf.placeholder(tf.float32, [None, 1])
# x1_noise = tf.placeholder(tf.float32, [None, 1])
# x2 = tf.placeholder(tf.float32, [None, 1])
# # now declare the output data placeholder
# u1 = tf.placeholder(tf.float32, [None, 1])
# u2 = tf.placeholder(tf.float32, [None, 1])
# y = tf.placeholder(tf.float32, [None, 2])
# In[3]:
# # now declare the weights connecting the input to the hidden layer
# W1 = tf.Variable(tf.random_normal([1, 150], stddev=0.03), name='W1')
# b1 = tf.Variable(tf.random_normal([150]), name='b1')
# # and the weights connecting the hidden layer to the u1 output layer
# W2 = tf.Variable(tf.random_normal([150, 1], stddev=0.03), name='W2')
# b2 = tf.Variable(tf.random_normal([1]), name='b2')
# # declare weights connecting x1+z to a hidden layer
# W3 = tf.Variable(tf.random_normal([1, 30], stddev=0.03), name='W1')
# b3 = tf.Variable(tf.random_normal([30]), name='b3')
# # and the weights connecting the hidden layer to the u1 output layer
# W4 = tf.Variable(tf.random_normal([30, 1], stddev=0.03), name='W2')
# b4 = tf.Variable(tf.random_normal([1]), name='b4')
u1 = tf.Variable(tf.random_normal([1]))
# In[4]:
# # calculate the output of the hidden layer
# hidden_out_1 = tf.add(tf.matmul(x0, W1), b1)
# hidden_out_1 = tf.nn.sigmoid(hidden_out_1)
# # # output layer
# u1 = tf.identity(tf.add(tf.matmul(hidden_out_1, W2), b2))
# # print(u1.get_shape())
# # x1 = u1 + x0
hidden_out_1 = tf.layers.dense(
x0, 150, tf.nn.tanh, use_bias=True, kernel_initializer=w1_init,
bias_initializer=b1_init, name='firstlayer')
u1 = tf.layers.dense(
hidden_out_1, m, activation=tf.identity, use_bias=True, kernel_initializer=w2_init,
bias_initializer=b2_init, name='secondlayer')
x1 = u1 + x0
# add noise to x1
z = tf.random_normal([1,1])
x1_noise = x1 + z
# hidden_out_2 = tf.add(tf.matmul(x1_noise, W3), b3)
# hidden_out_2 = tf.nn.sigmoid(hidden_out_2)
# u2 = tf.identity(tf.add(tf.matmul(hidden_out_2, W4), b4))
hidden_out_2 = tf.layers.dense(
x1_noise, 30, tf.nn.sigmoid, use_bias=True, name='thirdlayer',
kernel_initializer=w3_init, bias_initializer=b3_init)
u2 = tf.layers.dense(
hidden_out_2, m, activation=tf.identity, use_bias=True, name='fourthlayer',
kernel_initializer=w4_init, bias_initializer=b4_init)
x2 = x1 - u2
# In[5]:
# wits_cost = tf.add(tf.multiply(m_inv,tf.multiply(k_squared, tf.norm(u1,'euclidean'))),
# tf.multiply(m_inv,tf.norm(x2,'euclidean')))
# wits_cost = tf.norm(u1)
wits_cost = (k_squared*tf.norm(u1)**2 + tf.norm(x2)**2) / batch_size
adaptive_learning_rate = tf.placeholder_with_default(learning_rate, [])
optimizer = tf.train.AdamOptimizer(learning_rate=adaptive_learning_rate).minimize(wits_cost)
# In[6]:
# finally setup the initialisation operator
init_op = tf.global_variables_initializer()
# In[7]:
plt.figure()
# start the session
with tf.Session() as sess:
# initialize the variables
sess.run(init_op)
x_train = np.random.normal(size=epochs * batch_size * m, scale=x_stddev)
for epoch in range(epochs):
# x_batch = x_train[epoch].reshape(1,1)
x_batch = x_train[epoch: epoch + (batch_size * m)].reshape((batch_size, m))
# c = sess.run(optimiser, feed_dict={x0:np.zeros((10,1))})#{x0: x_batch})
# sess.run(optimiser, feed_dict={x0: x_batch})
_,cost = sess.run([optimizer, wits_cost], feed_dict={x0: x_batch,
adaptive_learning_rate: learning_rate * (decay**epoch)})
# print("Epoch:", (epoch + 1), "cost =", "{:.3f}")
# print(u1)
if epoch % 1 == 0:
print("Epoch: ", epoch, "Cost: ",cost)
plt.plot(epoch, cost, 'bo')
# Test over a continuous range of X
x0_test = np.linspace(-4*x_stddev, 4*x_stddev, num=1000)
u1_test, u2_test, x1_test = np.zeros((1, 1000)), np.zeros((1, 1000)), np.zeros(
(1, 1000))
for i in range(1000):
u1t, u2t, x1t = 0, 0, 0
for _ in range(test_averaging):
u1tmp, u2tmp, x1tmp = sess.run(
[u1, u2, x1],
feed_dict={x0: x0_test[i].reshape((1, 1))})
u1t += u1tmp
u2t += u2tmp
x1t += x1tmp
u1_test[0, i] = u1t / test_averaging
u2_test[0, i] = -u2t / test_averaging
x1_test[0, i] = x1t / test_averaging
with tf.variable_scope('firstlayer', reuse=True):
w1_post = tf.get_variable('kernel').eval()
b1_post = tf.get_variable('bias').eval()
with tf.variable_scope('secondlayer', reuse=True):
w2_post = tf.get_variable('kernel').eval()
b2_post = tf.get_variable('bias').eval()
with tf.variable_scope('thirdlayer', reuse=True):
w3_post = tf.get_variable('kernel').eval()
b3_post = tf.get_variable('bias').eval()
with tf.variable_scope('fourthlayer', reuse=True):
w4_post = tf.get_variable('kernel').eval()
b4_post = tf.get_variable('bias').eval()
print(cost)
plt.show()
l1, = plt.plot(x0_test, u1_test[0], label="U1 Test")
l3, = plt.plot(x0_test, u2_test[0], label="U2 Test")
plt.legend(handles=[l1, l3])
# plt.title("{} Unit NN With Activation Fn {}".format(
# str(num_units_1), str(activation_fn)))
# plt.savefig("figure_u_1.png
plt.show()
plt.clf()
l2, = plt.plot(x0_test, x1_test[0], label="X1 Test")
# plt.title("{} Unit NN With Activation Fn {}".format(
# str(num_units_1), str(activation_fn)))
plt.legend(handles=[l2])
# plt.savefig("figure_x_1.png")
plt.show()
# In[ ]:
w1_post.shape
np.array(w1).shape
np.linalg.norm(w1_post - np.array(w1))
# np.linalg.norm(w1)
# In[ ]:
np.linalg.norm(w4_post - np.array(w4))
# In[ ]:
# In[ ]:
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
16,
5974,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
300,
1292,
70,
355,
9131,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2603,
29487,... | 1.761495 | 9,417 |
# -*- coding: utf-8 -*-
__version__ = '1.3.1'
VERSION_DEV = True
VERSION_DATE = '20150826'
def GetVersion():
"""Returns version information for plaso."""
if not VERSION_DEV:
return __version__
return u'{0:s}_{1:s}'.format(__version__, VERSION_DATE)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9641,
834,
796,
705,
16,
13,
18,
13,
16,
6,
198,
198,
43717,
62,
39345,
796,
6407,
198,
43717,
62,
35,
6158,
796,
705,
1264,
33042,
2075,
6,
628,
198,
42... | 2.444444 | 108 |
# BSD Licence
# Copyright (c) 2011, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
"""
Some sanity checking code for the CMIP5 MIP tables.
"""
import sqlite3
from drslib import cmip5
table_store = cmip5.get_table_store()
db = sqlite3.connect(':memory:')
c = db.cursor()
c.execute('''
create table var (
name vchar(16),
mip_table vchar(16),
realm vchar(16)
)
''')
for table in table_store.tables.values():
for var in table.variables:
try:
realms = table.get_variable_attr(var, 'modeling_realm')
except AttributeError:
continue
# Only one realm should be defined but just in case
for realm in realms:
c.execute('insert into var values (?, ?, ?)', (var, table.name, realm))
| [
2,
347,
10305,
10483,
594,
198,
2,
15069,
357,
66,
8,
2813,
11,
5800,
1222,
8987,
48939,
4281,
357,
2257,
4851,
8,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
4091,
262,
38559,
24290,
2393,
287,
262,
2723,
6082,
286,
428,
3788,... | 2.638806 | 335 |
from django.urls import path
from .views import TasksListView, Tasks2ListView, ProjectListView, RoleListView
app_name = 'tasks'
urlpatterns = [
path('', TasksListView.as_view()),
path('take2', Tasks2ListView.as_view()),
path('project', ProjectListView.as_view()),
path('role', RoleListView.as_view())
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
33571,
1330,
309,
6791,
8053,
7680,
11,
309,
6791,
17,
8053,
7680,
11,
4935,
8053,
7680,
11,
20934,
8053,
7680,
198,
1324,
62,
3672,
796,
705,
83,
6791,
6,
198,
6371,
332... | 2.666667 | 120 |
"""
Uses the Open Trivia Database https://opentdb.com/ for a game of trivia.
"""
import aiohttp
import asyncio
import html
import pyryver
import random
import typing
CUSTOM_TRIVIA_QUESTIONS = {}
class OpenTDBError(Exception):
"""
An exception raised when an error occurs when interfacing OpenTDB.
The code is one of the CODE_ constants.
"""
CODE_SUCCESS = 0
CODE_NO_RESULTS = 1
CODE_INVALID_PARAM = 2
CODE_TOKEN_NOT_FOUND = 3
CODE_TOKEN_EMPTY = 4
class TriviaSession:
"""
A trivia session for interfacing OpenTDB.
"""
async def start(self, use_token: bool = True):
"""
Start the session.
If use_token is true, a token will be retrieved to avoid duplicate
questions.
"""
if use_token:
self._token = await self.retrieve_token()
async def close(self):
"""
Close the session.
"""
await self._session.close()
async def retrieve_token(self) -> str:
"""
Retrieve a session token.
Session tokens are used to ensure no duplicate questions are retrieved.
They expire after 6 hours of inactivity.
"""
url = "https://opentdb.com/api_token.php?command=request"
async with self._session.get(url) as resp:
resp.raise_for_status()
data = await resp.json()
if data["response_code"] != OpenTDBError.CODE_SUCCESS:
raise OpenTDBError(f"Bad response code: {data['response_code']}", data["response_code"])
return data["token"]
async def reset_token(self):
"""
Reset the session token.
"""
if self._token is None:
return
url = f"https://opentdb.com/api_token.php?command=reset&token={self._token}"
async with self._session.get(url) as resp:
resp.raise_for_status()
data = await resp.json()
if data["response_code"] != OpenTDBError.CODE_SUCCESS:
raise OpenTDBError(f"Bad response code: {data['response_code']}", data["response_code"])
async def get_categories(self) -> typing.List[typing.Dict[str, typing.Any]]:
"""
Get all the categories and their IDs.
"""
url = "https://opentdb.com/api_category.php"
async with self._session.get(url) as resp:
resp.raise_for_status()
data = await resp.json()
return data["trivia_categories"]
DIFFICULTY_EASY = "easy"
DIFFICULTY_MEDIUM = "medium"
DIFFICULTY_HARD = "hard"
TYPE_MULTIPLE_CHOICE = "multiple"
TYPE_TRUE_OR_FALSE = "boolean"
async def get_questions(self, amount: int, category: int = None, difficulty: str = None, question_type: str = None):
"""
Get questions.
"""
url = f"https://opentdb.com/api.php?amount={amount}"
if category is not None:
url += f"&category={category}"
if difficulty is not None:
url += f"&difficulty={difficulty}"
if question_type is not None:
url += f"&type={question_type}"
if self._token is not None:
url += f"&token={self._token}"
async with self._session.get(url) as resp:
resp.raise_for_status()
data = await resp.json()
if data["response_code"] != OpenTDBError.CODE_SUCCESS:
raise OpenTDBError(f"Bad response code: {data['response_code']}", data["response_code"])
# Unescape
for result in data["results"]:
result["question"] = html.unescape(result["question"])
return data["results"]
class CustomTriviaSession:
"""
A trivia session using only custom questions.
"""
def start(self, category: str = None):
"""
Start the session.
If the category is not specified, all the custom questions will be used.
"""
try:
if category is not None:
self._questions = CUSTOM_TRIVIA_QUESTIONS[category]["questions"]
else:
self._questions = []
for v in CUSTOM_TRIVIA_QUESTIONS.values():
self._questions.extend(v["questions"])
except KeyError as e:
raise ValueError("Error trying to load questions") from e
self._index = 0
random.shuffle(self._questions)
def get_question(self, difficulty: str = None, question_type: str = None):
"""
Get a question.
"""
while self._index < len(self._questions):
question = self._questions[self._index]
self._index += 1
if difficulty is not None and question["difficulty"] != difficulty:
continue
if question_type is not None and question["type"] != question_type:
continue
return question
raise OpenTDBError("Ran out of questions!", OpenTDBError.CODE_TOKEN_EMPTY)
class TriviaGame:
"""
A game of trivia.
"""
async def start(self, host: typing.Any = None):
"""
Start the game.
If a host is specified, it will be stored as self.host.
The host serves no other purpose.
"""
# Custom category
if isinstance(self.category, str):
self._custom_session = CustomTriviaSession()
if self.category == "custom":
self._custom_session.start()
else:
self._custom_session.start(self.category)
else:
self._session = TriviaSession()
await self._session.start()
if host is not None:
self.host = host
async def end(self):
"""
End the game.
"""
if self._session:
await self._session.close()
self._session = None
self._custom_session = None
def set_category(self, category: typing.Union[int, str]):
"""
Set the category.
If the category is a string, then it is assumed to be a custom category.
If the category is "custom", then all custom categories will be included.
"""
self.category = category
def set_difficulty(self, difficulty: str):
"""
Set the difficulty.
"""
self.difficulty = difficulty
def set_type(self, question_type: str):
"""
Set the question type (True/False or Multiple Choice).
"""
self.question_type = question_type
async def get_categories(self) -> typing.List[typing.Dict[str, typing.Any]]:
"""
Get all the categories and their IDs.
"""
return await self._session.get_categories()
async def next_question(self):
"""
Move on to the next question.
This changes the value of self.current_question.
The current question has the following format:
- "category": The category (str)
- "type": The question type (str, one of TriviaSession.TYPE_TRUE_OR_FALSE or TriviaSession.TYPE_MULTIPLE_CHOICE)
- "difficulty": The question difficulty (str, one of the TriviaSession.DIFFICULTY_ constants)
- "question": The question (str)
- "answers": A list of possible answers ([str])
- "correct_answer": The index of the correct answer in the list (int)
- "answered": If the question has been answered (bool)
"""
if isinstance(self.category, str):
question = self._custom_session.get_question(difficulty=self.difficulty, question_type=self.question_type)
else:
question = (await self._session.get_questions(1, category=self.category, difficulty=self.difficulty, question_type=self.question_type))[0]
self.current_question = {
"category": question["category"],
"type": question["type"],
"difficulty": question["difficulty"],
"question": question["question"],
"answered": False,
}
if question["type"] == TriviaSession.TYPE_TRUE_OR_FALSE:
self.current_question["answers"] = [
"True",
"False"
]
self.current_question["correct_answer"] = 0 if question["correct_answer"] == "True" else 1
else:
answers = question["incorrect_answers"]
random.shuffle(answers)
# Insert the correct answer at a random index
index = random.randint(0, len(answers))
answers.insert(index, question["correct_answer"])
self.current_question["answers"] = answers
self.current_question["correct_answer"] = index
def answer(self, answer: int, user: typing.Any = None, points: int = None) -> bool:
"""
Answer the current question.
Returns whether the answer was correct.
If user and points are provided, it will be added to the scoreboard.
Note that regardless of whether the question was answered correctly,
the current question will not be changed.
"""
if answer >= len(self.current_question["answers"]):
raise ValueError("Answer out of range")
self.current_question["answered"] = True
if answer == self.current_question["correct_answer"]:
if user is not None and points is not None:
if user in self.scores:
self.scores[user] += points
else:
self.scores[user] = points
return True
else:
if user is not None and points is not None and user not in self.scores:
# still record a score of 0 even if it's wrong
self.scores[user] = 0
return False
async def get_categories() -> typing.List[typing.Dict[str, typing.Any]]:
"""
Get all the categories and their IDs.
Used to get categories without a game or session object.
"""
url = "https://opentdb.com/api_category.php"
async with aiohttp.request("GET", url) as resp:
resp.raise_for_status()
data = await resp.json()
return data["trivia_categories"]
def get_custom_categories() -> typing.List[str]:
"""
Get all the custom trivia question categories.
"""
return list(CUSTOM_TRIVIA_QUESTIONS.keys())
def set_custom_trivia_questions(questions):
"""
Set the custom trivia questions.
"""
global CUSTOM_TRIVIA_QUESTIONS # pylint: disable=global-statement
CUSTOM_TRIVIA_QUESTIONS = questions
_T = typing.TypeVar("_T")
def order_scores(scores: typing.Dict[_T, int]) -> typing.Dict[int, typing.List[_T]]:
"""
Order a dict of {player: score} to produce a ranking.
The ranking will be a dict of {rank: (players, score)}. It will be ordered from first place
to last place and start at 1.
"""
if not scores:
return {}
scores = sorted(scores.items(), key=lambda x: x[1], reverse=True)
rank = 0
last_score = None
ranks = {}
for player, score in scores:
if last_score is None or score < last_score:
rank += len(ranks[rank]) if rank in ranks else 1
last_score = score
ranks[rank] = ([player], score)
elif score == last_score:
ranks[rank][0].append(player)
return ranks
class LatexBotTriviaGame:
"""
A game of trivia in latexbot.
"""
TRIVIA_NUMBER_EMOJIS = ["one", "two", "three", "four", "five", "six", "seven", "eight"]
TRIVIA_POINTS = {
TriviaSession.DIFFICULTY_EASY: 10,
TriviaSession.DIFFICULTY_MEDIUM: 20,
TriviaSession.DIFFICULTY_HARD: 30,
}
ERR_MSGS = {
OpenTDBError.CODE_NO_RESULTS: "No results!",
OpenTDBError.CODE_INVALID_PARAM: "Internal error",
OpenTDBError.CODE_TOKEN_EMPTY: "Ran out of questions! Please end the game using `@latexbot trivia end`.",
OpenTDBError.CODE_TOKEN_NOT_FOUND: "Invalid game session! Perhaps the game has expired?",
OpenTDBError.CODE_SUCCESS: "This should never happen.",
}
async def _timeout(self, delay: float):
"""
A task that waits for an amount of time and then terminates the game.
"""
try:
await asyncio.sleep(delay)
if not self.ended:
self.ended = True
await self.chat.send_message(f"The trivia game started by {self.get_user_name(self.game.host)} has ended due to inactivity.", self.msg_creator)
await self.end()
except asyncio.CancelledError:
pass
async def _try_get_next(self) -> bool:
"""
Try to get the next trivia question while handling errors.
Returns whether the question was obtained successfully.
This does not acquire the lock.
"""
try:
await self.game.next_question()
return True
except OpenTDBError as e:
err = self.ERR_MSGS[e.code]
await self.chat.send_message(f"Cannot get next question: {err}", self.msg_creator)
if e.code == OpenTDBError.CODE_TOKEN_NOT_FOUND:
# End the session
await self.chat.send_message("Ending invalid session...", self.msg_creator)
await self.end()
return False
async def next_question(self):
"""
Get the next question or repeat the current question and send it to the chat.
"""
try:
async with self.lock:
self.refresh_timeout()
# Only update the question if already answered
# Try to get the next question
if self.game.current_question["answered"] and not await self._try_get_next():
return
formatted_question = self.format_question(self.game.current_question)
# Send the message
# First send an empty message to get the reactions
mid = await self.chat.send_message("Loading...", self.msg_creator)
msg = await pyryver.retry_until_available(self.chat.get_message, mid, timeout=5.0, retry_delay=0)
if self.game.current_question["type"] == TriviaSession.TYPE_MULTIPLE_CHOICE:
# Iterate the reactions array until all the options are accounted for
for _, reaction in zip(self.game.current_question["answers"], self.TRIVIA_NUMBER_EMOJIS):
await msg.react(reaction)
else:
await msg.react("white_check_mark")
await msg.react("x")
await msg.react("trophy")
await msg.react("fast_forward")
# Now edit the message to show the actual question contents
await msg.edit(formatted_question)
self.question_msg = msg
except TimeoutError:
await self.chat.send_message("Critical: TimeoutError while trying to get message! Ending game!")
await self.end()
async def send_scores(self):
"""
Send the scoreboard to the chat.
"""
async with self.lock:
self.refresh_timeout()
scores = order_scores(self.game.scores)
if not scores:
await self.chat.send_message("No scores at the moment. Scores are only recorded after you answer a question.", self.msg_creator)
return
# The \\ before the . is to make it not a valid markdown list
# because you can't skip numbers in markdown lists in Ryver
resp = "\n".join(f"{rank}\\. **{', '.join(self.get_user_name(player) for player in players)}** with a score of {score}!" for rank, (players, score) in scores.items())
await self.chat.send_message(resp, self.msg_creator)
async def answer(self, answer: int, user: int):
"""
Answer the current question.
"""
async with self.lock:
self.refresh_timeout()
points = self.TRIVIA_POINTS[self.game.current_question["difficulty"]]
name = self.get_user_name(user)
if self.game.answer(answer, user, points):
await self.chat.send_message(f"Correct answer! **{name}** earned {points} points!", self.msg_creator)
else:
await self.chat.send_message(f"Wrong answer! The correct answer was option number {self.game.current_question['correct_answer'] + 1}. **{name}** did not get any points for that.", self.msg_creator)
async def end(self):
"""
End the game and clean up.
"""
async with self.lock:
self.ended = True
if self.timeout_task_handle is not None:
self.timeout_task_handle.cancel()
await self.game.end()
def get_user_name(self, user_id: int) -> str:
"""
Get the name of a user specified by ID.
"""
user = self.chat.get_ryver().get_user(id=user_id)
return user.get_name() if user is not None else "Unknown User"
@classmethod
def format_question(cls, question: typing.Dict[str, typing.Any]) -> str:
"""
Format a trivia question.
"""
result = f":grey_question: Category: *{question['category']}*, Difficulty: **{question['difficulty']}**\n\n"
if question['type'] == TriviaSession.TYPE_TRUE_OR_FALSE:
result += "True or False: "
result += question["question"]
answers = "\n".join(f"{i + 1}. {answer}" for i, answer in enumerate(question["answers"]))
result += "\n" + answers
return result
| [
37811,
198,
5842,
274,
262,
4946,
7563,
8869,
24047,
3740,
1378,
404,
298,
9945,
13,
785,
14,
329,
257,
983,
286,
44782,
13,
198,
37811,
198,
198,
11748,
257,
952,
4023,
198,
11748,
30351,
952,
198,
11748,
27711,
198,
11748,
12972,
56... | 2.308518 | 7,643 |
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
MOUSE_EVENT_TYPE_MOUSE_PRESSED = 'mousePressed'
MOUSE_EVENT_TYPE_MOUSE_DRAGGED = 'mouseDragged'
MOUSE_EVENT_TYPE_MOUSE_RELEASED = 'mouseReleased'
MOUSE_EVENT_TYPE_MOUSE_MOVED = 'mouseMoved'
MOUSE_EVENT_TYPE_MOUSE_ENTERED = 'mouseEntered'
MOUSE_EVENT_TYPE_MOUSE_EXITED = 'mouseExited'
MOUSE_EVENT_TYPE_MOUSE_WHEEL = 'mouseWheel'
MOUSE_EVENT_BUTTON_NONE = 'none'
MOUSE_EVENT_BUTTON_LEFT = 'left'
MOUSE_EVENT_BUTTON_RIGHT = 'right'
MOUSE_EVENT_BUTTON_MIDDLE = 'middle'
MOUSE_EVENT_BUTTON_BACK = 'back'
MOUSE_EVENT_BUTTON_FORWARD = 'forward'
MOUSE_EVENT_WHEEL_DIRECTION_NONE = 'none'
MOUSE_EVENT_WHEEL_DIRECTION_UP = 'up'
MOUSE_EVENT_WHEEL_DIRECTION_DOWN = 'down'
MOUSE_EVENT_WHEEL_DIRECTION_LEFT = 'left'
MOUSE_EVENT_WHEEL_DIRECTION_RIGHT = 'right'
class UIDevTools(object):
"""This class is mainly used to interact with native UI
For more info see the Desktop UI benchmark documentation in the link below
https://chromium.googlesource.com/chromium/src/+/master/docs/speed/benchmark/harnesses/desktop_ui.md
"""
# pylint: disable=redefined-builtin
| [
2,
15069,
33448,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
44,
... | 2.426587 | 504 |
import pystache
from redwind.models import Post
from sqlalchemy import desc
import isodate
FEED_TEMPLATE = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>{{{title}}}</title>
<style>
body {
font-family: sans-serif;
max-width:800px;
}
h1,h2,h3,h4 {
font-size: 1em;
}
li {
list-style: none;
}
.p-category {
list-style: none;
border: 1px solid #ddd;
border-radius:2px;
display: inline;
padding: 2px;
margin: 5px;
}
.dt-published {
margin-top:1em;
}
</style>
</head>
<body class="h-feed">
<h1 class="p-name">{{{title}}}</h1>
<ul>
{{#bookmarks}}
<li class="h-entry">
<h2 class="p-bookmark h-cite"><a href="{{bookmark}}">{{title}}</a></h2>
{{#content}}<div class="e-content">{{{.}}}</div>{{/content}}
{{#categories}}<span class="p-category">{{.}}</span>{{/categories}}
<div class="dt-published">{{published}}</div>
</li>
{{/bookmarks}}
</ul>
</body>
</html>
"""
blob = {
'title': 'Kylewm’s Bookmarks',
'bookmarks': []
}
for bmark in Post.query.filter_by(post_type='bookmark').order_by(desc(Post.published)).all():
blob['bookmarks'].append({
'title': bmark.bookmark_contexts[0].title,
'bookmark': bmark.bookmark_of[0],
'content': bmark.content_html,
'categories': [t.name for t in bmark.tags],
'published': isodate.datetime_isoformat(bmark.published),
})
print(pystache.render(FEED_TEMPLATE, blob))
| [
11748,
12972,
301,
4891,
198,
6738,
2266,
7972,
13,
27530,
1330,
2947,
198,
6738,
44161,
282,
26599,
1330,
1715,
198,
11748,
318,
375,
378,
198,
198,
15112,
1961,
62,
51,
3620,
6489,
6158,
796,
37227,
198,
27,
0,
18227,
4177,
56,
1140... | 2.189427 | 681 |
import pytest
from doc.names_title import file_name_to_title_name
| [
11748,
12972,
9288,
198,
6738,
2205,
13,
14933,
62,
7839,
1330,
2393,
62,
3672,
62,
1462,
62,
7839,
62,
3672,
628
] | 3.190476 | 21 |
import pickle
import numpy as np
import pandas as pd
import statsmodels.api as sm
| [
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
9756,
27530,
13,
15042,
355,
895,
628,
628,
628,
628,
628
] | 3.137931 | 29 |
# tracks taken from geneset comparison
# need to integrated
from RnaseqReport import *
##########################################################################
##########################################################################
##########################################################################
# Coverage of transcript models
##########################################################################
class ContaminationCoverage(AnnotationsAssociated):
"""Check for contamination by listing transcript models with reads ."""
pattern = "(.*)_coverage$"
mColumns = "count(*) as total, SUM(CASE WHEN nmatches > 1 THEN 1 ELSE 0 END) AS hico"
mTable = "coverage"
##########################################################################
##########################################################################
##########################################################################
# Coverage of transcript models
##########################################################################
class PolyATailCounts(AnnotationsAssociated):
"""Check for contamination by listing transcript models with reads ."""
pattern = "(.*)_polyA$"
mColumns = "COUNT(*) AS total, SUM(nmotifs) AS motifs, SUM(tails) AS tails"
mTable = "polyA"
##########################################################################
##########################################################################
##########################################################################
# Contamination and repeats
##########################################################################
class ContaminationRepeats(TrackerSQL):
"""Estimate contamination based on the overlap with repeats.
repeats
number of bases in repeats
genome
number of base is genome
prepeats
proportion of bases in repeats
repeat_overlap
number of bases in unknown transcript models overlapping repeats
length
number of bases in unknown transcript models
poverlap
proportion of bases in unknown transcript models overlapping repeats
nspliced_ovl
number of unknown transcript models with introns that overlap repeats
nspliced_ovl
number of unknown transcript models with introns
pspliced
proportion of unknown transcript models with introns that overlap
repeats
"""
pattern = "(.*)_repeats$"
| [
2,
8339,
2077,
422,
10812,
316,
7208,
198,
2,
761,
284,
11521,
198,
198,
6738,
371,
77,
589,
80,
19100,
1330,
1635,
628,
198,
29113,
29113,
7804,
2235,
198,
29113,
29113,
7804,
2235,
198,
29113,
29113,
7804,
2235,
198,
2,
33998,
286,
... | 4.559546 | 529 |
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
with open("VERSION", "r") as fh:
version = fh.read().strip()
setup(
name='slides-sound',
version=version,
url='http://github.com/jpfxgood/slides-sound',
author="James Goodwin",
author_email="slides-soundc@jlgoodwin.com",
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
description='Tools for creating slideshows from images with generated music',
long_description_content_type='text/markdown',
long_description=long_description,
license = 'MIT',
keywords= [
'music',
'slides',
'images',
],
install_requires=[
'Pillow',
'pyfluidsynth',
'pyaudio',
],
scripts=[
'scripts/contact_sheet',
'scripts/list_soundfont',
'scripts/music',
'scripts/rotate_resize',
'scripts/slides',
],
packages=[
'slides_sound',
],
python_requires='>=3.6',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
4480,
1280,
7203,
43717,
1600... | 2.322981 | 483 |
"""
Utility functions for the gym-idsgame environment
"""
from typing import Union, List
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import io
import cv2
#from gym_idsgame.envs.dao.idsgame_config import IdsGameConfig
#from gym_idsgame.envs.dao.game_config import GameConfig
#from gym_idsgame.envs.dao.game_state import GameState
from gym_idsgame.envs.dao.node_type import NodeType
#from gym_idsgame.envs.dao.network_config import NetworkConfig
# def validate_config(idsgame_config: IdsGameConfig) -> None:
# """
# Validates the configuration for the environment
#
# :param idsgame_config: the config to validate
# :return: None
# """
# if idsgame_config.game_config.num_layers < 0:
# raise AssertionError("The number of layers cannot be less than 0")
# if idsgame_config.game_config.num_attack_types < 1:
# raise AssertionError("The number of attack types cannot be less than 1")
# if idsgame_config.game_config.max_value < 1:
# raise AssertionError("The max attack/defense value cannot be less than 1")
def is_defense_id_legal(defense_id: int, game_config, state) -> bool:
"""
Check if a given defense is legal or not.
:param defense_id: the defense to verify
:param game_config: the game config
:param state: the game state
:return: True if legal otherwise False
"""
server_id, server_pos, defense_type = interpret_defense_action(defense_id, game_config)
if defense_type < game_config.num_attack_types:
if state.defense_values[server_id][defense_type] >= game_config.max_value:
return False
if defense_type >= game_config.num_attack_types:
if state.defense_det[server_id] >= game_config.max_value:
return False
if (game_config.network_config.node_list[server_id] == NodeType.SERVER.value
or game_config.network_config.node_list[server_id] == NodeType.DATA.value):
return True
return False
def is_attack_legal(target_pos: Union[int, int], attacker_pos: Union[int, int], network_config,
past_positions: List[int] = None) -> bool:
"""
Checks whether an attack is legal. That is, can the attacker reach the target node from its current
position in 1 step given the network configuration?
:param attacker_pos: the position of the attacker
:param target_pos: the position of the target node
:param network_config: the network configuration
:param past_positions: if not None, used to check whether the agent is in a periodic policy, e.g. a circle.
:return: True if the attack is legal, otherwise False
"""
if target_pos == attacker_pos:
return False
target_row, target_col = target_pos
attacker_row, attacker_col = attacker_pos
if target_row > attacker_row:
return False
# if past_positions is not None and len(past_positions) >=2:
# if target_pos in past_positions[-3:]:
# return False
attacker_adjacency_matrix_id = attacker_row * network_config.num_cols + attacker_col
target_adjacency_matrix_id = target_row * network_config.num_cols + target_col
return network_config.adjacency_matrix[attacker_adjacency_matrix_id][target_adjacency_matrix_id] == int(1)
def is_attack_legal(target_pos: Union[int, int], attacker_pos: Union[int, int], network_config,
past_positions: List[int] = None) -> bool:
"""
Checks whether an attack is legal. That is, can the attacker reach the target node from its current
position in 1 step given the network configuration?
:param attacker_pos: the position of the attacker
:param target_pos: the position of the target node
:param network_config: the network configuration
:param past_positions: if not None, used to check whether the agent is in a periodic policy, e.g. a circle.
:return: True if the attack is legal, otherwise False
"""
if target_pos == attacker_pos:
return False
target_row, target_col = target_pos
attacker_row, attacker_col = attacker_pos
if target_row > attacker_row:
return False
# if past_positions is not None and len(past_positions) >=2:
# if target_pos in past_positions[-3:]:
# return False
attacker_adjacency_matrix_id = attacker_row * network_config.num_cols + attacker_col
target_adjacency_matrix_id = target_row * network_config.num_cols + target_col
return network_config.adjacency_matrix[attacker_adjacency_matrix_id][target_adjacency_matrix_id] == int(1)
def is_attack_id_legal(attack_id: int, game_config, attacker_pos: Union[int, int], game_state,
past_positions: List[int] = None, past_reconnaissance_activities: List = None) -> bool:
"""
Check if a given attack is legal or not.
:param attack_id: the attack to verify
:param game_config: game configuration
:param attacker_pos: the current position of the attacker
:param game_state: the game state
:param past_positions: if not None, used to check whether the agent is in a periodic policy, e.g. a circle.
:return: True if legal otherwise False
"""
server_id, server_pos, attack_type, reconnaissance = interpret_attack_action(attack_id, game_config)
if not reconnaissance:
if game_state.attack_values[server_id][attack_type] >= game_config.max_value:
return False
# if reconnaissance and past_reconnaissance_activities is not None:
# for rec_act in past_reconnaissance_activities[-5:]:
# node_id, rec_type = rec_act
# if node_id == server_id and rec_type == attack_type:
# #print("illegal rec type, past:{}".format(past_reconnaissance_activities))
# return False
return is_attack_legal(server_pos, attacker_pos, game_config.network_config, past_positions)
def interpret_attack_action(action: int, game_config) -> Union[int, Union[int, int], int, bool]:
"""
Utility method for interpreting the given attack action, converting it into server_id,pos,type
:param action: the attack action-id
:param game_config: game configuration
:return: server-id, server-position, attack-type
"""
if not game_config.reconnaissance_actions:
server_id = action // game_config.num_attack_types
else:
server_id = action // (game_config.num_attack_types +1)
#server_id = action // (game_config.num_attack_types*2)
server_pos = game_config.network_config.get_node_pos(server_id)
attack_type = get_attack_type(action, game_config)
reconnaissance = attack_type >= game_config.num_attack_types
if reconnaissance:
attack_type = attack_type - game_config.num_attack_types
#print("server:{},pos:{},a_type:{},rec:{}".format(server_id, server_pos, attack_type, reconnaissance))
return server_id, server_pos, attack_type, reconnaissance
def interpret_defense_action(action: int, game_config) -> Union[int, Union[int, int], int]:
"""
Utility method for interpreting the given action, converting it into server_id,pos,type
:param action: the attack action-id
:param game_config: game configuration
:return: server-id, server-position, attack-type
"""
server_id = action // (game_config.num_attack_types+1) # +1 for detection type attack
server_pos = game_config.network_config.get_node_pos(server_id)
defense_type = get_defense_type(action, game_config)
return server_id, server_pos, defense_type
def get_attack_action_id(server_id, attack_type, game_config):
"""
Gets the attack action id from a given server position, attack_type, and game config
:param server_id: id of the server
:param attack_type: attack type
:param game_config: game config
:return: attack id
"""
if not game_config.reconnaissance_actions:
action_id = server_id * game_config.num_attack_types + attack_type
else:
action_id = server_id * (game_config.num_attack_types+1) + attack_type
return action_id
def get_defense_action_id(server_id, defense_type, game_config):
"""
Gets the defense action id from a given server position, defense_type, and game config
:param server_id: id of the server
:param defense_type: defense type
:param game_config: game config
:return: attack id
"""
action_id = server_id * (game_config.num_attack_types+1) + defense_type
return action_id
def get_attack_type(action: int, game_config) -> int:
"""
Utility method for getting the attack type of action-id
:param action: action-id
:param game_config: game configuration
:return: action type
"""
if not game_config.reconnaissance_actions:
attack_defense_type = action % game_config.num_attack_types
else:
attack_defense_type = action % (game_config.num_attack_types+1)
return attack_defense_type
def get_defense_type(action: int, game_config) -> int:
"""
Utility method for getting the defense type of action-id
:param action: action-id
:param game_config: game configuration
:return: action type
"""
defense_type = action % (game_config.num_attack_types+1) # +1 for detection
return defense_type
def get_img_from_fig(fig, dpi=180):
"""
Convert matplotlib fig to numpy array
:param fig: fig to convert
:param dpi: dpi of conversion
:return: np array of the image
"""
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def action_dist_hist(data: np.ndarray,
title: str = "Test", xlabel: str = "test", ylabel: str = "test",
file_name: str = "test.eps", xlims: Union[float, float] = None) -> np.ndarray:
"""
Plot a distribution of the policy
:param data: the data to plot
:param title: title of the plot
:param xlabel: xlabel
:param ylabel: ylabel
:param file_name: path where to save file
:param xlims: xlimits (optional)
:return: numpy array of the figure
"""
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage{amsfonts}')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 3))
if xlims is None:
xlims = (min(data),
max(data))
sns.distplot(data, kde=True,
color = 'darkblue',
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 0.5}, bins=xlims[1], fit=None)
ax.set_xlim(xlims)
ax.set_xticks(list(range(xlims[1]+1)))
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# set the grid on
ax.grid('on')
# tweak the axis labels
xlab = ax.xaxis.get_label()
ylab = ax.yaxis.get_label()
xlab.set_size(10)
ylab.set_size(10)
# change the color of the top and right spines to opaque gray
ax.spines['right'].set_color((.8, .8, .8))
ax.spines['top'].set_color((.8, .8, .8))
fig.tight_layout()
fig.savefig(file_name + ".png", format="png", dpi=600)
fig.savefig(file_name + ".pdf", format='pdf', dpi=600, bbox_inches='tight', transparent=True)
data = get_img_from_fig(fig, dpi=100)
plt.close(fig)
return data
| [
37811,
198,
18274,
879,
5499,
329,
262,
11550,
12,
2340,
6057,
2858,
198,
37811,
198,
6738,
19720,
1330,
4479,
11,
7343,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
384,
... | 2.653945 | 4,297 |
from enum import Enum
| [
6738,
33829,
1330,
2039,
388,
628
] | 3.833333 | 6 |
"""Script for finetuning and evaluating pre-trained ChemBERTa models on MoleculeNet tasks.
[classification]
python finetune.py --datasets=bbbp --pretrained_model_name_or_path=DeepChem/ChemBERTa-SM-015
[regression]
python finetune.py --datasets=delaney --pretrained_model_name_or_path=DeepChem/ChemBERTa-SM-015
[csv]
python finetune.py --datasets=$HOME/finetune_datasets/logd/ \
--dataset_types=regression \
--pretrained_model_name_or_path=DeepChem/ChemBERTa-SM-015 \
--is_molnet=False
[multiple]
python finetune.py \
--datasets=bace_classification,bace_regression,bbbp,clearance,clintox,delaney,lipo,tox21 \
--pretrained_model_name_or_path=DeepChem/ChemBERTa-SM-015 \
--n_trials=20 \
--output_dir=finetuning_experiments \
--run_name=sm_015
[from scratch (no pretraining)]
python finetune.py --datasets=bbbp
"""
import json
import os
import shutil
from collections import OrderedDict
from dataclasses import dataclass
from glob import glob
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from absl import app, flags
from scipy.special import softmax
from scipy.stats import pearsonr
from sklearn.metrics import (
average_precision_score,
matthews_corrcoef,
mean_squared_error,
roc_auc_score,
)
from transformers import RobertaConfig, RobertaTokenizerFast, Trainer, TrainingArguments
from transformers.trainer_callback import EarlyStoppingCallback
from chemberta.utils.molnet_dataloader import get_dataset_info, load_molnet_dataset
from chemberta.utils.roberta_regression import (
RobertaForRegression,
RobertaForSequenceClassification,
)
FLAGS = flags.FLAGS
# Settings
flags.DEFINE_string(name="output_dir", default="default_dir", help="")
flags.DEFINE_boolean(name="overwrite_output_dir", default=True, help="")
flags.DEFINE_string(name="run_name", default="default_run", help="")
flags.DEFINE_integer(name="seed", default=0, help="Global random seed.")
# Model params
flags.DEFINE_string(
name="pretrained_model_name_or_path",
default=None,
help="Arg to HuggingFace model.from_pretrained(). Can be either a path to a local model or a model ID on HuggingFace Model Hub. If not given, trains a fresh model from scratch (non-pretrained).",
)
flags.DEFINE_boolean(
name="is_molnet",
default=True,
help="If true, assumes all dataset are MolNet datasets.",
)
# RobertaConfig params (only for non-pretrained models)
flags.DEFINE_integer(name="vocab_size", default=600, help="")
flags.DEFINE_integer(name="max_position_embeddings", default=515, help="")
flags.DEFINE_integer(name="num_attention_heads", default=6, help="")
flags.DEFINE_integer(name="num_hidden_layers", default=6, help="")
flags.DEFINE_integer(name="type_vocab_size", default=1, help="")
# Train params
flags.DEFINE_integer(name="logging_steps", default=10, help="")
flags.DEFINE_integer(name="early_stopping_patience", default=5, help="")
flags.DEFINE_integer(name="num_train_epochs_max", default=10, help="")
flags.DEFINE_integer(name="per_device_train_batch_size", default=64, help="")
flags.DEFINE_integer(name="per_device_eval_batch_size", default=64, help="")
flags.DEFINE_integer(
name="n_trials",
default=5,
help="Number of different hyperparameter combinations to try. Each combination will result in a different finetuned model.",
)
flags.DEFINE_integer(
name="n_seeds",
default=5,
help="Number of unique random seeds to try. This only applies to the final best model selected after hyperparameter tuning.",
)
# Dataset params
flags.DEFINE_list(
name="datasets",
default=None,
help="Comma-separated list of MoleculeNet dataset names.",
)
flags.DEFINE_string(
name="split", default="scaffold", help="DeepChem data loader split_type."
)
flags.DEFINE_list(
name="dataset_types",
default=None,
help="List of dataset types (ex: classification,regression). Include 1 per dataset, not necessary for MoleculeNet datasets.",
)
# Tokenizer params
flags.DEFINE_string(
name="tokenizer_path",
default="seyonec/SMILES_tokenized_PubChem_shard00_160k",
help="",
)
flags.DEFINE_integer(name="max_tokenizer_len", default=512, help="")
flags.mark_flag_as_required("datasets")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["WANDB_DISABLED"] = "true"
def prune_state_dict(model_dir):
"""Remove problematic keys from state dictionary"""
if not (model_dir and os.path.exists(os.path.join(model_dir, "pytorch_model.bin"))):
return None
state_dict_path = os.path.join(model_dir, "pytorch_model.bin")
assert os.path.exists(
state_dict_path
), f"No `pytorch_model.bin` file found in {model_dir}"
loaded_state_dict = torch.load(state_dict_path)
state_keys = loaded_state_dict.keys()
keys_to_remove = [
k for k in state_keys if k.startswith("regression") or k.startswith("norm")
]
new_state_dict = OrderedDict({**loaded_state_dict})
for k in keys_to_remove:
del new_state_dict[k]
return new_state_dict
@dataclass
if __name__ == "__main__":
app.run(main)
| [
37811,
7391,
329,
957,
316,
46493,
290,
22232,
662,
12,
35311,
12870,
13246,
38586,
4981,
319,
25726,
23172,
7934,
8861,
13,
198,
198,
58,
4871,
2649,
60,
198,
29412,
957,
316,
1726,
13,
9078,
1377,
19608,
292,
1039,
28,
11848,
46583,
... | 2.751731 | 1,877 |
import ply.lex as lex
import ply.yacc as yacc
# List of tokens
tokens = (
'COMPLETE',
'PRIORITY',
'DATE',
'LIST',
'PARENT',
'COLOR',
'DUE',
'DESC'
)
t_DESC = r'[a-zA-Z_]+'
t_COMPLETE = r'x'
t_PRIORITY = r'\([a-zA-Z]\)'
t_DATE = '(\d+[-/]\d+[-/]\d+)'
t_LIST = r'\@[a-zA-Z_]+'
t_PARENT = r'\+[a-zA-Z]+'
t_COLOR = r'color:[^{}]'
t_DUE = r'due:\d+[-/]\d+[-/]\d+'
# Error handling rule
# Build the lexer
lexer = lex.lex()
data = '(C) this is a test task @test'
# Give the lexer some input
lexer.input(data)
# Tokenize
while True:
tok = lexer.token()
if not tok:
break # No more input
print tok
| [
11748,
35960,
13,
2588,
355,
31191,
198,
11748,
35960,
13,
88,
4134,
355,
331,
4134,
198,
198,
2,
7343,
286,
16326,
198,
83,
482,
641,
796,
357,
198,
220,
220,
220,
705,
41335,
9328,
3256,
198,
220,
220,
220,
705,
4805,
41254,
9050,... | 1.81016 | 374 |
import distributors.models
from swarm.workers.worker import Worker
| [
11748,
32612,
13,
27530,
198,
198,
6738,
30077,
13,
22896,
13,
28816,
1330,
35412,
628
] | 4.6 | 15 |
"""
MSX SDK
MSX SDK client. # noqa: E501
The version of the OpenAPI document: 1.0.9
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import python_msx_sdk
from python_msx_sdk.model.workflow_metadata_git_info import WorkflowMetadataGitInfo
globals()['WorkflowMetadataGitInfo'] = WorkflowMetadataGitInfo
from python_msx_sdk.model.workflow_metadata import WorkflowMetadata
class TestWorkflowMetadata(unittest.TestCase):
"""WorkflowMetadata unit test stubs"""
def testWorkflowMetadata(self):
"""Test WorkflowMetadata"""
# FIXME: construct object with mandatory attributes with example values
# model = WorkflowMetadata() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
37811,
198,
220,
220,
220,
6579,
55,
26144,
628,
220,
220,
220,
6579,
55,
26144,
5456,
13,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
220,
383,
2196,
286,
262,
4946,
17614,
3188,
25,
352,
13,
15,
13,
24,
198,
220,
220,
... | 2.745583 | 283 |
# coding: utf-8
import os
from distutils.version import StrictVersion
from os.path import dirname, realpath, join
import django
from django.utils import six
DEBUG = True
ALLOWED_HOSTS = ['*']
SECRET_KEY = 'dummy'
ROOT_URLCONF = 'testsite.urls'
SITE_ROOT = dirname(realpath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
# Using :memory: SQLIte DB cause a Segmentation Fault on Travis-CI server
# To prevent such error, we prefer using a true file-based DB.
# Another solution would be to use Travis Postgres or Mysql instance, but in that case
# we would need a different settings between local test environment and CI server one.
'NAME': os.path.join(SITE_ROOT, 'modernrpc-test.db'),
'TEST': {'NAME': os.path.join(SITE_ROOT, 'modernrpc-test.db')}
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.contenttypes',
'modernrpc',
'testsite.rpc_methods_stub',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.app_directories.Loader',
],
},
},
]
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# This allows the tests to be run against Django 1.8 to current
# MIDDLEWARE_CLASSES is deprecated since Django 1.10, and is completely
# removed from Django 2.0
if StrictVersion(django.get_version()) < StrictVersion('1.10'):
MIDDLEWARE_CLASSES = MIDDLEWARE
MEDIA_ROOT = ''
MEDIA_URL = '/'
STATIC_ROOT = ''
STATIC_URL = '/'
MODERNRPC_METHODS_MODULES = [
'testsite.rpc_methods_stub.generic',
'testsite.rpc_methods_stub.specific_types',
'testsite.rpc_methods_stub.specific_protocol',
'testsite.rpc_methods_stub.with_authentication',
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
'default': {
'format': "[%(asctime)s] %(levelname)s [%(filename)s:%(funcName)s:%(lineno)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
'nullhandler': {
'level': 'INFO',
'class': 'logging.NullHandler',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': join(SITE_ROOT, 'modernrpc.log'),
'maxBytes': 1024 * 1024 * 1, # 1 MB
'formatter': 'default',
'backupCount': 5,
# In Django 1.11, without this attribute, a warning is thrown at server startup
# Use `python -Wall ./manage.py runserver` to see warnings
# See https://stackoverflow.com/a/30684667/1887976 for more info
'delay': True,
},
},
'loggers': {
# Default modernrpc logger. Will collect test execution logs into modernrpc/tests/testsite/modernrpc.log
'modernrpc': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': False,
},
# test_logging.py will execute some tests on logging utilities (get_modernrpc_logger() and
# logger_has_handlers()). These dummy loggers are declared here
# See test_logging.py
'my_app': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': True,
},
'my_app.a': {
'handlers': [],
'level': 'INFO',
'propagate': False,
},
}
}
if six.PY2:
MODERNRPC_METHODS_MODULES.append('testsite.rpc_methods_stub.python2_specific')
MODERNRPC_PY2_STR_TYPE = str
MODERNRPC_LOG_EXCEPTIONS = False
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
28686,
198,
6738,
1233,
26791,
13,
9641,
1330,
520,
2012,
14815,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
1103,
6978,
11,
4654,
198,
198,
11748,
42625,
14208,
198,
6738,
42625,
142... | 2.12842 | 2,266 |
# Generated by Django 2.1 on 2018-08-30 18:46
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
319,
2864,
12,
2919,
12,
1270,
1248,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
"""Metadata generation logic for source distributions.
"""
import os
from stickybeak.vendored.pip._internal.utils.subprocess import runner_with_spinner_message
from stickybeak.vendored.pip._internal.utils.temp_dir import TempDirectory
from stickybeak.vendored.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from stickybeak.vendored.pip._internal.build_env import BuildEnvironment
from stickybeak.vendored.pip._vendor.pep517.wrappers import Pep517HookCaller
def generate_metadata(build_env, backend):
# type: (BuildEnvironment, Pep517HookCaller) -> str
"""Generate metadata using mechanisms described in PEP 517.
Returns the generated metadata directory.
"""
metadata_tmpdir = TempDirectory(
kind="modern-metadata", globally_managed=True
)
metadata_dir = metadata_tmpdir.path
with build_env:
# Note that Pep517HookCaller implements a fallback for
# prepare_metadata_for_build_wheel, so we don't have to
# consider the possibility that this hook doesn't exist.
runner = runner_with_spinner_message("Preparing wheel metadata")
with backend.subprocess_runner(runner):
distinfo_dir = backend.prepare_metadata_for_build_wheel(
metadata_dir
)
return os.path.join(metadata_dir, distinfo_dir)
| [
37811,
9171,
14706,
5270,
9156,
329,
2723,
24570,
13,
198,
37811,
198,
198,
11748,
28686,
198,
198,
6738,
23408,
1350,
461,
13,
85,
437,
1850,
13,
79,
541,
13557,
32538,
13,
26791,
13,
7266,
14681,
1330,
17490,
62,
4480,
62,
2777,
508... | 2.820833 | 480 |
import mathutils
from src.main.Provider import Provider
from src.provider.sampler.Sphere import Sphere
class PartSphere(Provider):
"""
Samples a point from the surface or from the interior of solid sphere which is split in two parts.
https://math.stackexchange.com/a/87238
https://math.stackexchange.com/a/1585996
Example 1: Sample a point from the surface of the sphere that is split by a plane with displacement of 0.5
above center and a normal of [1, 0, 0].
.. code-block:: yaml
{
"provider":"sampler.PartSphere",
"center": [0, 0, 0],
"part_sphere_vector": [1, 0, 0],
"mode": "SURFACE",
"distance_above_center": 0.5
}
**Configuration**:
.. list-table::
:widths: 25 100 10
:header-rows: 1
* - Parameter
- Description
- Type
* - center
- Location of the center of the sphere.
- mathutils.Vector
* - radius
- The radius of the sphere. dius of the sphere. Type: float
- float
* - mode
- Mode of sampling. Determines the geometrical structure used for sampling. Available: SURFACE (sampling
from the 2-sphere), INTERIOR (sampling from the 3-ball).
- string
* - distance_above_center
- The distance above the center, which should be used. Default: 0.0 (half of the sphere).
- float
* - part_sphere_vector
- The direction in which the sphere should be split, the end point of the vector, will be in the middle of
the sphere pointing towards the middle of the resulting surface. Default: [0, 0, 1].
- mathutils.Vector
"""
def run(self):
"""
:param config: A configuration object containing the parameters necessary to sample.
:return: A random point lying inside or on the surface of a solid sphere. Type: mathutils.Vector
"""
# Center of the sphere.
center = mathutils.Vector(self.config.get_list("center"))
# Radius of the sphere.
radius = self.config.get_float("radius")
# Mode of operation.
mode = self.config.get_string("mode")
dist_above_center = self.config.get_float("distance_above_center", 0.0)
part_sphere_dir_vector = self.config.get_vector3d("part_sphere_vector", [0, 0, 1])
part_sphere_dir_vector.normalize()
if dist_above_center >= radius:
raise Exception("The dist_above_center value is bigger or as big as the radius!")
while True:
location = Sphere.sample(center, radius, mode)
# project the location onto the part_sphere_dir_vector and get the length
loc_in_sphere = location - center
length = loc_in_sphere.dot(part_sphere_dir_vector)
if length > dist_above_center:
return location
| [
11748,
10688,
26791,
198,
198,
6738,
12351,
13,
12417,
13,
29495,
1330,
32549,
198,
6738,
12351,
13,
15234,
1304,
13,
37687,
20053,
13,
38882,
1330,
31798,
628,
198,
4871,
2142,
38882,
7,
29495,
2599,
198,
220,
220,
220,
37227,
198,
220... | 2.457741 | 1,195 |
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Underworld geophysics modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
import underworld as uw
import underworld._stgermain as _stgermain
class EqNumber(_stgermain.StgClass):
"""
The SolutionVector manages the numerical solution vectors used by Underworld's equation systems.
Interface between meshVariables and systems.
Parameters
----------
meshVariable : uw.mesh.MeshVariable
MeshVariable object for which this equation numbering corresponds.
removeBCs : Bool, optional
Determines if the MeshVariable's boundary conditions are included in the ordering.
Hence it effects the size of the matrix or vector and the algorithm used to handle boundary conditions.
Example
-------
>>> linearMesh = uw.mesh.FeMesh_Cartesian( elementType='Q1/dQ0', elementRes=(4,4), minCoord=(0.,0.), maxCoord=(1.,1.) )
>>> tField = uw.mesh.MeshVariable( linearMesh, 1 )
>>> teqNum = uw.systems.sle.EqNumber( tField )
"""
@property
| [
2235,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
93,
2,
... | 2.00123 | 813 |
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QVBoxLayout, QWidget, QTableWidget,QTableWidgetItem, QMessageBox
import pandas as pd
import os
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
39,
14253,
32517,
11,
1195,
33986,
11,
1195,
49222,
21864,
11,
1195,
53,
14253,
32517,
11,
1195,
38300,
11,
1195,
10962,
38300,
11,
48,
10962,
38300,
7449,
11,
1195,
12... | 2.537313 | 67 |
from tests.system.action.base import BaseActionTestCase
| [
6738,
5254,
13,
10057,
13,
2673,
13,
8692,
1330,
7308,
12502,
14402,
20448,
628
] | 4.071429 | 14 |
from DSAs.sorting.util import swap, key_and_reverse
@key_and_reverse()
| [
6738,
17400,
1722,
13,
82,
24707,
13,
22602,
1330,
16075,
11,
1994,
62,
392,
62,
50188,
628,
198,
198,
31,
2539,
62,
392,
62,
50188,
3419,
198
] | 2.740741 | 27 |
import matplotlib
from matplotlib import pyplot as plt
import csv
import json
# Function to calculate server CPU values
# Function to calculate average response time values
# Get X and Y axises for each setup
base_cpu_x, base_cpu_y = get_resource_usage("base", "cpu")
base_ram_x, base_ram_y = get_resource_usage("base", "ram")
base_arp_x, base_arp_y = get_response_time("base")
live_cpu_x, live_cpu_y = get_resource_usage("live", "cpu")
live_ram_x, live_ram_y = get_resource_usage("live", "ram")
live_arp_x, live_arp_y = get_response_time("live")
historical_cpu_x, historical_cpu_y = get_resource_usage("historical", "cpu")
historical_ram_x, historical_ram_y = get_resource_usage("historical", "ram")
historical_arp_x, historical_arp_y = get_response_time("historical")
# Create subplots
plt.rcParams.update({'font.size': 16})
fig, ((ax1, ax2, ax3)) = plt.subplots(nrows=1, ncols=3)
lw = 3
ax1.plot(base_cpu_x, base_cpu_y, marker='o', markersize=3.5, label="Planned schedules", linewidth=lw)
ax1.plot(live_cpu_x, live_cpu_y, marker='o', markersize=3.5, label="Live updates", linewidth=lw)
ax1.plot(historical_cpu_x, historical_cpu_y, marker='o', markersize=3.5, label="Historical", linewidth=lw)
ax1.set_ylabel("cpu use (%)")
ax1.set_xscale("log")
ax1.set_xticks([1, 2, 5, 10, 20, 50])
ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax1.set_yticks([0, 10, 20, 30, 40])
ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax1.spines["right"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.grid(alpha=0.3)
ax2.plot(base_ram_x, base_ram_y, marker='o', markersize=3.5, label="Planned schedules", linewidth=lw)
ax2.plot(live_ram_x, live_ram_y, marker='o', markersize=3.5, label="Live updates", linewidth=lw)
ax2.plot(historical_ram_x, historical_ram_y, marker='o', markersize=3.5, label="Historical", linewidth=lw)
ax2.set_ylabel("ram use (%)")
ax2.set_xscale("log")
ax2.set_xticks([1, 2, 5, 10, 20, 50])
ax2.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax2.set_yticks([0, 2, 4, 6, 8, 10])
ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax2.spines["right"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.grid(alpha=0.3)
ax3.plot(base_arp_x, base_arp_y, marker='o', markersize=3.5, label="Planned schedules", linewidth=lw)
ax3.plot(live_arp_x, live_arp_y, marker='o', markersize=3.5, label="Live updates", linewidth=lw)
ax3.plot(historical_arp_x, historical_arp_y, marker='o', markersize=3.5, label="Historical", linewidth=lw)
ax3.set_ylabel("average response time (ms)")
ax3.set_xscale("log")
ax3.set_yscale("log")
ax3.set_xticks([1, 2, 5, 10, 20, 50])
ax3.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax3.set_yticks([700, 1500, 3000, 10000, 50000])
ax3.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax3.spines["right"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.grid(alpha=0.3)
handles, labels = ax1.get_legend_handles_labels()
fig.legend(handles, labels, loc="lower center", ncol=3)
plt.show() | [
11748,
2603,
29487,
8019,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
198,
2,
15553,
284,
15284,
4382,
9135,
3815,
198,
198,
2,
15553,
284,
15284,
2811,
2882,
640,
3815... | 2.43037 | 1,271 |
import numpy as np
import pytest
from pandas import (
CategoricalDtype,
DataFrame,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"data, transposed_data, index, columns, dtype",
[
([[1], [2]], [[1, 2]], ["a", "a"], ["b"], int),
([[1], [2]], [[1, 2]], ["a", "a"], ["b"], CategoricalDtype([1, 2])),
([[1, 2]], [[1], [2]], ["b"], ["a", "a"], int),
([[1, 2]], [[1], [2]], ["b"], ["a", "a"], CategoricalDtype([1, 2])),
([[1, 2], [3, 4]], [[1, 3], [2, 4]], ["a", "a"], ["b", "b"], int),
(
[[1, 2], [3, 4]],
[[1, 3], [2, 4]],
["a", "a"],
["b", "b"],
CategoricalDtype([1, 2, 3, 4]),
),
],
)
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
12972,
9288,
201,
198,
201,
198,
6738,
19798,
292,
1330,
357,
201,
198,
220,
220,
220,
327,
2397,
12409,
35,
4906,
11,
201,
198,
220,
220,
220,
6060,
19778,
11,
201,
198,
8,
201,
198,
... | 1.722973 | 444 |
from sample_metadata import *
import sys
import os
def get_platform_samples_root(platform, sample_root):
'''
Gets the root directory for each platform
'''
if (platform == "UWP"):
return os.path.join(sample_root, "UWP", "ArcGISRuntime.UWP.Viewer", "Samples")
if (platform == "WPF"):
return os.path.join(sample_root, "WPF", "ArcGISRuntime.WPF.Viewer", "Samples")
if (platform == "Android"):
return os.path.join(sample_root, "Android", "Xamarin.Android", "Samples")
if (platform == "iOS"):
return os.path.join(sample_root, "iOS", "Xamarin.iOS", "Samples")
if (platform == "Forms" or platform in ["XFA", "XFI", "XFU"]):
return os.path.join(sample_root, "Forms", "Shared", "Samples")
raise AssertionError(None, None)
def get_relative_path_to_samples_from_platform_root(platform):
'''
Returns the path from the platform's readme.md file to the folder containing the sample categories
For use in sample TOC generation
'''
if (platform == "UWP"):
return "ArcGISRuntime.UWP.Viewer/Samples"
if (platform == "WPF"):
return "ArcGISRuntime.WPF.Viewer/Samples"
if (platform == "Android"):
return "Xamarin.Android/Samples"
if (platform == "iOS"):
return "Xamarin.iOS/Samples"
if (platform == "Forms" or platform in ["XFA", "XFI", "XFU"]):
return "Shared/Samples"
raise AssertionError(None, None)
def write_build_script(list_of_samples, platform, output_dir):
'''
output_dir: platform-specific output folder containing sample solutions
list_of_samples: flat list of sample formal names; should correspond to directories
'''
output_string = "@echo on"
output_string += "\nREM Set up environment variables for Visual Studio."
output_string += "\ncall \"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Enterprise\\Common7\\Tools\\VsDevCmd.bat\""
output_string += "\nREM ==================================="
for sample in list_of_samples:
output_string += f"\nCALL %ESRI_SAMPLES_TEMP_BUILD_ROOT%\\nuget restore {sample}\\{sample}.sln -PackagesDirectory %ESRI_SAMPLES_TEMP_BUILD_ROOT%\\packages"
output_string += "\n@echo on"
for sample in list_of_samples:
output_string += f"\nREM Building: {sample}"
output_string += f"\nmsbuild {plat_to_msbuild_string(platform)} /clp:errorsonly /flp2:errorsonly;logfile=%ESRI_SAMPLES_TEMP_BUILD_ROOT%\\{platform}\\build.log;append {sample}\\{sample}.sln"
file_name = os.path.join(output_dir, "BuildAll_CSharp.bat")
with open(file_name, 'w+') as output_file:
output_file.write(output_string)
def write_samples_toc(platform_dir, relative_path_to_samples, samples_in_categories):
'''
sample_in_categories is a dictionary of categories, each key is a list of sample_metadata
platform_dir is where the readme.md file should be written
'''
readme_text = "# Table of contents\n\n"
for category in samples_in_categories.keys():
readme_text += f"## {category}\n\n"
for sample in samples_in_categories[category]:
readme_text += f"* [{sample.friendly_name}]({relative_path_to_samples}/{sample.category}/{sample.formal_name}/readme.md) - {sample.description}\n"
readme_text += "\n"
readme_path = os.path.join(platform_dir, "../..", "readme.md")
with open(readme_path, 'w+') as file:
file.write(readme_text)
def main():
'''
Usage: python process_metadata.py {operation} {path_to_samples (ends in src)} {path_to_secondary}
Operations: toc; secondary path is empty
improve; secondary path is common readme
sync; keep metadata in sync with readme
'''
if len(sys.argv) < 3:
print("Usage: python process_metadata.py {operation} {path_to_samples (ends in src)} {path_to_secondary}")
print("Operations are toc, improve, and sync; secondary path is path to common readme source for the improve operation.")
return
operation = sys.argv[1]
sample_root = sys.argv[2]
common_dir_path = ""
if operation == "improve":
if len(sys.argv) < 4:
print("Usage: python process_metadata.py improve {path_to_samples (ends in src)} {path_to_readme_source}")
return
common_dir_path = sys.argv[3]
for platform in ["UWP", "WPF", "Android", "Forms", "iOS"]:
# make a list of samples, so that build_all_csproj.bat can be produced
list_of_sample_dirs = []
list_of_samples = {}
skipped_categories = False
for r, d, f in os.walk(get_platform_samples_root(platform, sample_root)):
if not skipped_categories:
skipped_categories = True
continue
for sample_dir in d:
# skip category directories
sample = sample_metadata()
path_to_readme = os.path.join(r, sample_dir, "readme.md")
if not os.path.exists(path_to_readme):
print(f"skipping path; does not exist: {path_to_readme}")
continue
sample.populate_from_readme(platform, path_to_readme)
sample.populate_snippets_from_folder(platform, path_to_readme)
if operation == "improve":
sample.try_replace_with_common_readme(platform, common_dir_path, path_to_readme)
if operation in ["improve", "sync"]:
sample.flush_to_json(os.path.join(r, sample_dir, "readme.metadata.json"))
list_of_sample_dirs.append(sample_dir)
# track samples in each category to enable TOC generation
if sample.category in list_of_samples.keys():
list_of_samples[sample.category].append(sample)
else:
list_of_samples[sample.category] = [sample]
# write out samples TOC
if operation in ["toc", "improve", "sync"]:
write_samples_toc(get_platform_samples_root(platform, sample_root), get_relative_path_to_samples_from_platform_root(platform), list_of_samples)
return
if __name__ == "__main__":
main() | [
6738,
6291,
62,
38993,
1330,
1635,
198,
11748,
25064,
198,
198,
11748,
28686,
198,
198,
4299,
651,
62,
24254,
62,
82,
12629,
62,
15763,
7,
24254,
11,
6291,
62,
15763,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
29620,
2... | 2.364742 | 2,632 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This scripts reads a fibers.bin file which is generated by opendihu parallel_fiber_estimation. It outputs a stl file and a pickle file containing all fibers.
# The purpose of this script is to inspect the fibers.bin file and manually evaluate the outcome.
#
# usage: ./examine_bin_fibers.py <input filename>
import sys, os
import numpy as np
import struct
import stl
from stl import mesh
import datetime
import pickle
input_filename = "fibers.bin"
if len(sys.argv) >= 2:
input_filename = sys.argv[1]
else:
print("Script to create various other file formats from a given *.bin file.\n usage: ./examine_bin_fibers.py <input bin filename>\n\n")
quit()
output_filename,extension = os.path.splitext(input_filename)
output_filename_stl = "{}.stl".format(output_filename)
output_filename2 = output_filename+"_.bin"
pickle_output_filename = "{}.pickle".format(output_filename)
pickle_output_filename2 = "{}_.pickle".format(output_filename)
svg_bottom_filename = "{}_bottom.svg".format(output_filename)
svg_center_filename = "{}_center.svg".format(output_filename)
svg_top_filename = "{}_top.svg".format(output_filename)
print("input filename: {}".format(input_filename))
print("output filenames: {}".format(output_filename_stl))
print(" {}".format(pickle_output_filename))
print(" {}".format(svg_bottom_filename))
print(" {}".format(svg_center_filename))
print(" {}".format(svg_top_filename))
print(" {}.vts".format(output_filename))
with open(input_filename, "rb") as infile:
# parse header
bytes_raw = infile.read(32)
header_str = struct.unpack('32s', bytes_raw)[0]
print("header: {}".format(header_str))
header_length_raw = infile.read(4)
header_length = struct.unpack('i', header_length_raw)[0]
print("header_length: {}".format(header_length))
#header_length = 32+8
parameters = []
for i in range(int(header_length/4) - 1):
int_raw = infile.read(4)
value = struct.unpack('i', int_raw)[0]
parameters.append(value)
n_fibers_total = parameters[0]
n_points_whole_fiber = parameters[1]
n_fibers_x = (int)(np.sqrt(parameters[0]))
n_fibers_y = n_fibers_x
if "version 2" in header_str.decode("utf-8") : # the version 2 has number of fibers explicitly stored and thus also allows non-square dimension of fibers
n_fibers_x = parameters[2]
n_fibers_y = parameters[3]
print("nFibersTotal: {n_fibers} = {n_fibers_x} x {n_fibers_y}".format(n_fibers=parameters[0], n_fibers_x=n_fibers_x, n_fibers_y=n_fibers_y))
print("nPointsWholeFiber: {}".format(parameters[1]))
if "version 2" not in header_str.decode("utf-8"):
print("nBoundaryPointsXNew: {}".format(parameters[2]))
print("nBoundaryPointsZNew: {}".format(parameters[3]))
print("nFineGridFibers_: {}".format(parameters[4]))
print("nRanks: {}".format(parameters[5]))
print("nRanksZ: {}".format(parameters[6]))
print("nFibersPerRank: {}".format(parameters[7]))
print("date: {:%d.%m.%Y %H:%M:%S}".format(datetime.datetime.fromtimestamp(parameters[8])))
#input("Press any key to continue.")
xmin = None
xmax = None
ymin = None
ymax = None
zmin = None
zmax = None
streamlines = []
n_streamlines_valid = 0
n_streamlines_invalid = 0
# loop over fibers
for streamline_no in range(n_fibers_total):
streamline = []
streamline_valid = True
n_zero_points = 0
# loop over points of fiber
for point_no in range(n_points_whole_fiber):
point = []
# parse point
for i in range(3):
double_raw = infile.read(8)
value = struct.unpack('d', double_raw)[0]
point.append(value)
# set bounding box
if xmin is None or point[0] < xmin:
xmin = point[0]
if xmax is None or point[0] > xmax:
xmax = point[0]
if ymin is None or point[1] < ymin:
ymin = point[1]
if ymax is None or point[1] > ymax:
ymax = point[1]
if zmin is None or point[2] < zmin:
zmin = point[2]
if zmax is None or point[2] > zmax:
zmax = point[2]
# check if point is valid
if point[0] == 0.0 and point[1] == 0.0 and point[2] == 0.0:
n_zero_points += 1
if point[0] == 0.0 and point[1] == 0.0 and point[2] == 0.0 and n_zero_points >= 2:
if streamline_valid:
coordinate_x = streamline_no % n_fibers_x
coordinate_y = (int)(streamline_no / n_fibers_x)
print("Warning: streamline {}, ({},{})/({},{}) contains point [0,0,0] (at {}. point) and thus may be invalid.".format(streamline_no, coordinate_x, coordinate_y, n_fibers_x, n_fibers_y, point_no))
print("streamline so far (first ten points): ",streamline[0:10])
streamline_valid = False
streamline.append(point)
if streamline_valid:
n_streamlines_valid += 1
else:
n_streamlines_invalid += 1
streamline = []
streamlines.append(streamline)
print("n valid: {}, n invalid: {}".format(n_streamlines_valid, n_streamlines_invalid))
print("bounding box: [{}, {}] x [{}, {}] x [{}, {}]".format(xmin, xmax, ymin, ymax, zmin, zmax))
# ------------------------------
# create pickle file
print("")
print("Create Pickle file.")
print("Output pickle to filename: \"{}\"...".format(pickle_output_filename))
with open(pickle_output_filename, 'wb') as f:
pickle.dump(streamlines, f)
print("done")
# -------------------------------
# create svg files for top, center and bottom planes
if n_streamlines_invalid == 0 and n_fibers_x > 1:
print("")
print("Create svg files for top, center and bottom planes.")
paths = ["", "", ""] # path strings for bottom, center, top
z_index = [0, (int)(n_points_whole_fiber/2.), -1]
n_fine_grid_fibers = parameters[4]
with_boundary_layer = n_fibers_x == (n_fine_grid_fibers+1)*(parameters[2]-1)*parameters[6]
# set circles at points
# loop over grid of fibers
for y in range(n_fibers_y):
for x in range(n_fibers_x):
for i in range(3):
point0 = streamlines[y*n_fibers_x + x][z_index[i]]
stroke_style = "#000000"
if (with_boundary_layer and (n_fine_grid_fibers > 0 and (x % (n_fine_grid_fibers+1) == 0 or y % (n_fine_grid_fibers+1) == 0))):
stroke_style = "#0000ff"
elif x == 0:
stroke_style = "#0000aa"
elif x == n_fibers_x-2:
stroke_style = "#3333ff"
elif y == 0:
stroke_style = "#aa0000"
elif y == n_fibers_x-2:
stroke_style = "#ff3333"
elif (y == 0 or y == n_fibers_y-1) and (x == 0 or x == n_fibers_x-1):
stroke_style = "#00ff00"
paths[i] += """
<circle cx="{cx}" cy="{cy}" r="0.1" stroke="{stroke}" stroke-width="0.1" fill="{stroke}" />""".format(cx=point0[0], cy=point0[1], stroke=stroke_style)
min_x = [None,None,None]
max_x = [None,None,None]
min_y = [None,None,None]
max_y = [None,None,None]
# loop over grid of fibers
for y in range(n_fibers_y-1):
for x in range(n_fibers_x-1):
# bottom rectangle
# p2 p3
# p0 p1
for i in range(3):
point0 = streamlines[y*n_fibers_x + x][z_index[i]]
point1 = streamlines[y*n_fibers_x + x+1][z_index[i]]
point2 = streamlines[(y+1)*n_fibers_x + x][z_index[i]]
point3 = streamlines[(y+1)*n_fibers_x + x+1][z_index[i]]
if min_x[i] is None:
min_x[i] = point0[0]
min_x[i] = min(min_x[i], point0[0])
min_x[i] = min(min_x[i], point1[0])
min_x[i] = min(min_x[i], point2[0])
min_x[i] = min(min_x[i], point3[0])
if max_x[i] is None:
max_x[i] = point0[0]
max_x[i] = max(max_x[i], point0[0])
max_x[i] = max(max_x[i], point1[0])
max_x[i] = max(max_x[i], point2[0])
max_x[i] = max(max_x[i], point3[0])
if min_y[i] is None:
min_y[i] = point0[1]
min_y[i] = min(min_y[i], point0[1])
min_y[i] = min(min_y[i], point1[1])
min_y[i] = min(min_y[i], point2[1])
min_y[i] = min(min_y[i], point3[1])
if max_y[i] is None:
max_y[i] = point0[1]
max_y[i] = max(max_y[i], point0[1])
max_y[i] = max(max_y[i], point1[1])
max_y[i] = max(max_y[i], point2[1])
max_y[i] = max(max_y[i], point3[1])
stroke_style = "#000000"
if x == 0:
stroke_style = "#0000aa"
if x == n_fibers_x-2:
stroke_style = "#3333ff"
if y == 0:
stroke_style = "#aa0000"
if y == n_fibers_x-2:
stroke_style = "#ff3333"
if (y == 0 or y == n_fibers_y-2) and (x == 0 or x == n_fibers_x-2):
stroke_style = "#00ff00"
paths[i] += """
<!-- xy {} {}, len {}, points {} {} {} {}-->""".format(x,y,len(streamlines[y*n_fibers_x + x]), str(point0), str(point1), str(point2), str(point3))
paths[i] += """
<path
style="fill:none;fill-rule:evenodd;stroke:{stroke};stroke-width:0.1;stroke-linecap:round;stroke-linejoin:round;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none"
d="m {p0x},{p0y} {d1x},{d1y} {d2x},{d2y} {d3x},{d3y} z" />
""".format(stroke=stroke_style,p0x=point0[0], p0y=point0[1], d1x=(point1[0]-point0[0]), d1y=(point1[1]-point0[1]), d2x=(point3[0]-point1[0]), d2y=(point3[1]-point1[1]), d3x=(point2[0]-point3[0]), d3y=(point2[1]-point3[1]))
svg_filenames = [svg_bottom_filename, svg_center_filename, svg_top_filename]
for i in range(3):
filename = svg_filenames[i]
with open(filename, "w") as f:
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<svg width="{lx}" height="{ly}">
<g transform="translate({tx},{ty})">
{paths}
</g>
</svg>
""".format(paths=paths[i], tx=-min_x[i], ty=-min_y[i], lx=(max_x[i]-min_x[i]), ly=(max_y[i]-min_y[i])))
print("Wrote file \"{}\".".format(filename))
#---------------------------------------
# Create the stl mesh
print("")
print("Create STL mesh of fibers.")
# create stl file
triangles = []
for points in streamlines:
previous_point = None
for p in points:
point = np.array([p[0], p[1], p[2]])
if np.linalg.norm(point) < 1e-3:
continue
if previous_point is not None:
triangles.append([previous_point, point, 0.5*(previous_point+point)])
previous_point = point
out_mesh = mesh.Mesh(np.zeros(len(triangles), dtype=mesh.Mesh.dtype))
for i, f in enumerate(triangles):
out_mesh.vectors[i] = f
#out_mesh.update_normals()
out_mesh.save(output_filename_stl)
print("Saved {} triangles to \"{}\".".format(len(triangles),output_filename_stl))
#---------------------------------------
# Create VTK files
print("")
print("Create VTK files")
try:
from pyevtk.hl import gridToVTK # pip3 install pyevtk
except:
print("Could not load pyevtk.hl. Install tho following:")
print("pip3 install pyevtk")
else:
# create geometry for vtk structured grid
n_points_x = n_fibers_x
n_points_y = n_fibers_y
n_points_z = n_points_whole_fiber
n_points = n_points_x * n_points_y * n_points_z
print("create geometry with {} x {} x {} = {} points".format(n_points_x, n_points_y, n_points_z, n_points))
positions_x = np.zeros((n_points_x, n_points_y, n_points_z))
positions_y = np.zeros((n_points_x, n_points_y, n_points_z))
positions_z = np.zeros((n_points_x, n_points_y, n_points_z))
field_fiber_no = np.zeros((n_points_x, n_points_y, n_points_z))
# loop over points of geometry and set positions
for k in range(n_points_z):
for j in range(n_points_y):
for i in range(n_points_x):
point = streamlines[j*n_fibers_x + i][k]
positions_x[i,j,k] = point[0]
positions_y[i,j,k] = point[1]
positions_z[i,j,k] = point[2]
fiber_no = j*n_points_x + i
field_fiber_no[i,j,k] = fiber_no
# write vtk file
gridToVTK(output_filename, positions_x, positions_y, positions_z, pointData = {"fiber_no" : field_fiber_no})
print("Wrote file \"{}.vts\"".format(output_filename))
print("Now run: ")
print(" paraview \"{}.vts\"".format(output_filename))
#---------------------------------------
# Postprocessing
sys.exit(0)
print("")
print("Postprocessing where fibres with too high distance to neighbouring fibers are removed (may take long)")
input("Press any key to continue or Ctrl-C to abort.")
# postprocess streamlines
invalid_streamlines = []
n_fibers_x = (int)(np.sqrt(n_fibers_total))
for j in range(0,n_fibers_x):
for i in range(0,n_fibers_x):
fiber_no = j*n_fibers_x + i
fiber_no_0minus = None
fiber_no_0plus = None
fiber_no_1minus = None
fiber_no_1plus = None
if i > 0:
fiber_no_0minus = j*n_fibers_x + i-1
if i < n_fibers_x-1:
fiber_no_0plus = j*n_fibers_x + i+1
if j > 0:
fiber_no_1minus = (j-1)*n_fibers_x + i
if j < n_fibers_x-1:
fiber_no_1plus = (j+1)*n_fibers_x + i
average_distance = 0
max_distance = 0
n_points = 0
for point_no in range(n_points_whole_fiber):
if len(streamlines[fiber_no]) > point_no:
if fiber_no_0minus is not None:
if len(streamlines[fiber_no_0minus]) > point_no:
#average_distance += np.linalg.norm(np.array(streamlines[fiber_no][point_no]) - np.array(streamlines[fiber_no_0minus][point_no]))
max_distance = max(max_distance, np.linalg.norm(np.array(streamlines[fiber_no][point_no]) - np.array(streamlines[fiber_no_0minus][point_no])))
n_points += 1
if fiber_no_1minus is not None:
if len(streamlines[fiber_no_1minus]) > point_no:
#average_distance += np.linalg.norm(np.array(streamlines[fiber_no][point_no]) - np.array(streamlines[fiber_no_1minus][point_no]))
max_distance = max(max_distance, np.linalg.norm(np.array(streamlines[fiber_no][point_no]) - np.array(streamlines[fiber_no_1minus][point_no])))
n_points += 1
if fiber_no_0plus is not None:
if len(streamlines[fiber_no_0plus]) > point_no:
#average_distance += np.linalg.norm(np.array(streamlines[fiber_no][point_no]) - np.array(streamlines[fiber_no_0plus][point_no]))
max_distance = max(max_distance, np.linalg.norm(np.array(streamlines[fiber_no][point_no]) - np.array(streamlines[fiber_no_0plus][point_no])))
n_points += 1
if fiber_no_1plus is not None:
if len(streamlines[fiber_no_1plus]) > point_no:
#average_distance += np.linalg.norm(np.array(streamlines[fiber_no][point_no]) - np.array(streamlines[fiber_no_1plus][point_no]))
max_distance = max(max_distance, np.linalg.norm(np.array(streamlines[fiber_no][point_no]) - np.array(streamlines[fiber_no_1plus][point_no])))
n_points += 1
#if n_points > 0:
#average_distance /= n_points
print (" fiber {},{}, average_distance: {}, n_points: {}, max_distance: {}".format(i,j,average_distance, n_points, max_distance))
if max_distance > 10:
invalid_streamlines.append(fiber_no)
for invalid_streamline_no in invalid_streamlines:
streamlines[invalid_streamline_no] = []
print("output other pickle to filename: {}".format(pickle_output_filename2))
with open(pickle_output_filename2, 'wb') as f:
pickle.dump(streamlines, f)
#streamlines = [streamlines[5]]
#print(streamlines[0])
triangles = []
for points in streamlines:
previous_point = None
for p in points:
point = np.array([p[0], p[1], p[2]])
if np.linalg.norm(point) < 1e-3:
continue
if previous_point is not None:
triangles.append([previous_point, point, 0.5*(previous_point+point)])
previous_point = point
#---------------------------------------
# Create the mesh
out_mesh = mesh.Mesh(np.zeros(len(triangles), dtype=mesh.Mesh.dtype))
for i, f in enumerate(triangles):
out_mesh.vectors[i] = f
#out_mesh.update_normals()
out_mesh.save(output_filename2)
print("saved {} triangles to \"{}\"".format(len(triangles),output_filename2))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
770,
14750,
9743,
257,
26742,
13,
8800,
2393,
543,
318,
7560,
416,
1034,
437,
48406,
10730,
62,
69,
1856,
... | 2.130346 | 7,833 |
"""Initialize the SQLAlchemy object.
"""
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
| [
37811,
24243,
1096,
262,
16363,
2348,
26599,
2134,
13,
198,
37811,
198,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
198,
11012,
796,
16363,
2348,
26599,
3419,
198
] | 3.15625 | 32 |
"""
DTIPrepext.py
===========================
Description:
Author:
Usage:
"""
import os
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
TraitedSpec,
File,
Directory,
traits,
isdefined,
InputMultiPath,
OutputMultiPath,
)
from nipype.interfaces.semtools.diffusion.dtiprep import (
DTIPrepInputSpec,
DTIPrepOutputSpec,
DTIPrep,
)
"""
This class represents a...
"""
# outputQCedBaseline = traits.Either(File(exists=True), None)
# outputQCedDTI = traits.Either(File(exists=True), None)
# outputQCedDTI_FA = traits.Either(File(exists=True), None)
# outputQCedDTI_MD = traits.Either(File(exists=True), None)
# outputQCedDTI_colorFA = traits.Either(File(exists=True), None)
# outputQCedDTI_frobeniusnorm = traits.Either(File(exists=True), None)
# outputQCedIDWI = traits.Either(File(exists=True), None)
"""
This class represents a...
"""
| [
37811,
198,
24544,
4061,
45956,
742,
13,
9078,
198,
4770,
2559,
18604,
198,
11828,
25,
198,
198,
13838,
25,
198,
198,
28350,
25,
198,
198,
37811,
198,
11748,
28686,
198,
198,
6738,
299,
541,
2981,
13,
3849,
32186,
13,
8692,
1330,
357,... | 2.590909 | 352 |
import marshmallow
import base64
class Base64ByteString(marshmallow.fields.Field):
"""
This fields serialize bytes to a base64 encoded UTF-8 string
"""
| [
11748,
22397,
42725,
198,
11748,
2779,
2414,
628,
198,
4871,
7308,
2414,
40778,
10100,
7,
76,
5406,
42725,
13,
25747,
13,
15878,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
770,
7032,
11389,
1096,
9881,
284,
257,
2779,
2414,
... | 3.192308 | 52 |
# Import standard library packages
# Import installed packages
from marshmallow import fields
# Import app code
from .base import BaseSchema
| [
2,
17267,
3210,
5888,
10392,
198,
198,
2,
17267,
6589,
10392,
198,
6738,
22397,
42725,
1330,
7032,
198,
198,
2,
17267,
598,
2438,
198,
6738,
764,
8692,
1330,
7308,
27054,
2611,
628
] | 4.5 | 32 |
import discord
from discord.ext import commands | [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729
] | 5.222222 | 9 |
#!/usr/bin/env python
#
# Copyright 2013 Adam Gschwender
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import tornado.web
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2211,
7244,
402,
20601,
86,
2194,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,... | 3.621891 | 201 |
from setuptools import find_packages, setup
setup(
name="mydbot",
version="0.1",
description="hmmm",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/Datavorous/sbutilcog",
author="Datavorous",
author_email="digestingdata1@gmail.com",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
keywords="Discord Bot, Discord, Discord Cogs",
packages=find_packages(),
install_requires=["discord.py","asyncio","sbutilcog", "sbfuncog", "sblycog", "sberrorcog","ModCog","FunCog"],
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
1820,
9945,
313,
1600,
198,
220,
220,
220,
2196,
2625,
15,
13,
16,
1600,
198,
220,
220,
220,
6764,
2625,
71,
27532,
1600,
... | 2.587031 | 293 |
from typing import Optional
from markdown import Markdown
from .grid_object import GridObject
from .mixin import SlideMixin
from .._utils import check_type
class TextBox(GridObject):
"""Represents a textbox on the slide.
Parameters
----------
content: str
A string containing the content of this textbox. May contain HTML and/or Markdown formatting.
row: int
The grid row in which to place this plot.
column: int
The grid column in which to place this plot.
row_span: int, default=1
The number of rows for this plot to span (defaults to `1`).
col_span: int, default=1
The number of columns for this plot to span (defaults to `1`).
markdown: bool, default=True
Whether to use Markdown to parse the text string (defaults to `True`).
keep_linebreaks: bool, default=True
Whether to replace newline characters (`\\n`) with HTML linebreaks (`<br>`). Defaults to `True`, but is only
relevant when `markdown = False`.
css_class: str, optional
The name of the CSS class (or classes) to apply to this object.
"""
def get_div(self):
"""Get the required <div></div> HTML tags to display this textbox.
Returns
-------
str
"""
if self.keep_linebreaks and not self.markdown:
text = self.content.replace("\n", "<br>")
else:
text = self.content
if self.markdown:
extensions = ["tables", "fenced_code"]
md = Markdown(extensions=extensions)
text = md.convert(text)
# If there are any backticks (`) present in the text, convert it to its HTML entity (`).
text = text.replace("`", "`")
return f"<div {self._style_str}>{text}</div>"
class TextboxMixin(SlideMixin):
"""Adds Textbox functionality to the Slide class."""
def add_textbox(self,
content: str,
row: int,
column: int,
row_span: int = 1,
col_span: int = 1,
css_class: Optional[str] = None,
markdown: bool = True,
keep_linebreaks: bool = True) -> None:
"""Add a textbox to this slide, in the specified row and column.
Parameters
----------
content : str
A string containing the text to add, which may include Markdown or HTML formatting. Use with `markdown
= False` to ignore any possible Markdown formatting and use literal text and HTML.
row : int
The grid row in which to place this textbox.
column : int
The grid column in which to place this textbox.
row_span : int, default=1
The number of rows for this textbox to span (defaults to `1`).
col_span : int, default=1
The number of columns for this textbox to span (defaults to `1`).
css_class : str, optional
The CSS class (or classes) to apply to this textbox. Multiple CSS classes are applied in a single string,
separated by a space. I.e. `css_class = "class1 class2"`.
markdown : bool, default=True
Whether to use Markdown to parse the text string (defaults to `True`).
keep_linebreaks : bool, default=True
Whether to replace newline characters (`\\n`) with HTML linebreaks (`<br>`). Defaults to `True`, but is only
relevant when `markdown = False`.
"""
textbox = TextBox(content, row, column, row_span, col_span, markdown, keep_linebreaks, css_class)
self._check_grid_pos(row, column)
self._elements.append(textbox)
| [
6738,
19720,
1330,
32233,
198,
198,
6738,
1317,
2902,
1330,
2940,
2902,
198,
198,
6738,
764,
25928,
62,
15252,
1330,
24846,
10267,
198,
6738,
764,
19816,
259,
1330,
37651,
35608,
259,
198,
6738,
11485,
62,
26791,
1330,
2198,
62,
4906,
6... | 2.421977 | 1,538 |
"""description: helpful fns for GO terms
"""
import pandas as pd
class GoParams(object):
"""clean up and othe fns
"""
REMOVE_SUBSTRINGS = [
"anatomical",
"ameboidal",
"animal organ",
"multicellular organism",
"cellular developmental",
"tube",
"regulation of",
"embryonic",
"cardiovascular",
"angiogenesis",
"blood vessel",
"vasculature",
"immune",
"defense",
"signaling",
"response to",
"movement of"]
REMOVE_EXACT_STRINGS = [
"system process",
"system development",
"developmental process",
"tissue development"]
GOOD_GO_TERMS = [
"stem cell differentiation",
"hemidesmosome",
"hair",
"cell migration",
"skin",
"keratinocyte",
"cell cycle",
"epiderm",
"cell junction",
"cell proliferation",
"adhesion",
"lipase activity",
"fatty acid",
"sphingolipid",
"glycerolipid"]
def is_enriched(go_file, filter_good_terms=False):
"""clean up terms and check if any terms in desired list
"""
# pull in go terms
go_terms = pd.read_csv(go_file, sep="\t")
go_terms = go_terms[go_terms["domain"] == "BP"]
go_terms = go_terms["term.name"].values.tolist()
# filter out bad terms
keep_terms = []
for func_term in go_terms:
keep = True
for bad_term_str in GoParams.REMOVE_SUBSTRINGS:
if bad_term_str in func_term:
keep = False
if func_term in GoParams.REMOVE_EXACT_STRINGS:
keep = False
if keep:
keep_terms.append(func_term)
# if filter good terms, do this here
# if any remain, then enriched
if len(keep_terms) > 0:
enriched = True
else:
enriched = False
return enriched
| [
37811,
11213,
25,
7613,
277,
5907,
329,
10351,
2846,
198,
37811,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
4871,
1514,
10044,
4105,
7,
15252,
2599,
198,
220,
220,
220,
37227,
27773,
510,
290,
267,
1169,
277,
5907,
198,
220,... | 2.056426 | 957 |
import urllib2
import re
# input:
# address: the address of a machine running PerfStudio Server
# frames: number of frames to capture
#
# Connects to the specified URL an returns a list of api calls in the following format:
# <interface> <function name> <parameter list> <returned value>
# the last parameter is omited if the funtion returns null
#
#
#
# Prints the whole API database
#
#
#
#
# API usage count
#
#
#
#
# Shader usage count
#
#
#
# connect to local host and take 5 frames
#
database = GetLoggedData( "127.0.0.1", 5 )
"""
print
print "API Log"
print "-------"
PrintAPILog( database )
"""
print
print "API Usage Histogram"
print "-------------------"
APIHistogram( database )
print
print "Shader Usage Histogram"
print "----------------------"
ShaderHistogram( database )
| [
11748,
2956,
297,
571,
17,
198,
11748,
302,
198,
198,
2,
5128,
25,
198,
2,
220,
220,
220,
2209,
25,
262,
2209,
286,
257,
4572,
2491,
2448,
69,
41501,
9652,
198,
2,
220,
220,
220,
13431,
25,
220,
1271,
286,
13431,
284,
8006,
198,
... | 2.99278 | 277 |
import sys
# Python3 program to demonstrate
# working of Alpha-Beta Pruning
# Initial values of Aplha and Beta
MAX, MIN = sys.maxsize, sys.maxsize*-1
# Returns optimal value for current player
#(Initially called for root and maximizer)
| [
11748,
25064,
198,
2,
11361,
18,
1430,
284,
10176,
220,
220,
198,
2,
1762,
286,
12995,
12,
43303,
1736,
46493,
220,
220,
198,
220,
220,
198,
2,
20768,
3815,
286,
317,
489,
3099,
290,
17993,
220,
220,
198,
198,
22921,
11,
20625,
796,... | 2.9 | 90 |
# File Name : shift_cipher.py
# Description : implement functions related to shift_cipher
# Author : Ganyuan Cao
# encrypt function
# encrypt function
# brute force
# main
if __name__ == "__main__":
main()
| [
2,
9220,
6530,
1058,
6482,
62,
66,
10803,
13,
9078,
198,
2,
12489,
1058,
3494,
5499,
3519,
284,
6482,
62,
66,
10803,
198,
2,
6434,
1058,
402,
1092,
7258,
34513,
628,
198,
2,
34117,
2163,
198,
198,
2,
34117,
2163,
198,
198,
2,
3390... | 3.142857 | 70 |
from django.http import HttpResponse
from django.template import Context
from django.template.loader import render_to_string, get_template
from django.conf import settings
from django.core.exceptions import ValidationError
from confy import env
import json
from django.utils.safestring import SafeText
from django.utils.crypto import get_random_string
import os
from . import models
from django.core import serializers
import collections
import datetime
"""
This is a upload wrapper for the ajax uploader widget for django forms.
"""
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
28243,
1330,
30532,
198,
6738,
42625,
14208,
13,
28243,
13,
29356,
1330,
8543,
62,
1462,
62,
8841,
11,
651,
62,
28243,
198,
6738,
42625,
14208,
13,
1041... | 3.774648 | 142 |
from helpers.CONSTANTS import WHITESPACE, ASCII_DIGIT, ASCII_ALPHA
def inside(constant, char):
"""
NORMALLY THE STATEMENT "" in "any_string" WILL RETURN TRUE, THIS FUNCTION AVOIDS THAT
READ AS: "IF INSIDE CONSTANT IS CHAR"
"""
if char != "":
return char in constant
else:
return False
| [
6738,
49385,
13,
10943,
2257,
1565,
4694,
1330,
7655,
2043,
1546,
47,
11598,
11,
37101,
62,
35,
3528,
2043,
11,
37101,
62,
1847,
47,
7801,
628,
198,
198,
4299,
2641,
7,
9979,
415,
11,
1149,
2599,
198,
220,
220,
220,
37227,
198,
220,... | 2.503759 | 133 |
from django.test import TestCase
from api.models import Product, Shop
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
40391,
13,
27530,
1330,
8721,
11,
13705,
628
] | 3.944444 | 18 |
import os
from typing import Optional, Callable, Union, Any
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MultiLabelBinarizer
from torch import Tensor
from torch.utils.data import Dataset, DataLoader
from app.common.utils import compose3
from app.dataset.dumps import get_single_track_features_path, dump_file_exists, extract_features, \
save_single_track_features
from app.features.features import N_MELS, MINUTE_LENGTH
from config import DIR_FEATURES_V1_SINGLE
BATCH_SIZE = 9
TEST_SPLIT_SEED = 2021
| [
11748,
28686,
198,
6738,
19720,
1330,
32233,
11,
4889,
540,
11,
4479,
11,
4377,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
6738,
1341,
... | 3.202128 | 188 |
from rest_framework import serializers
# import models
from core.models import Commission
# *****************************************************************************************
# Commission
# *****************************************************************************************
class CommissionSerializer(serializers.ModelSerializer):
"""
model serializer for commission
""" | [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
2,
1330,
4981,
198,
6738,
4755,
13,
27530,
1330,
4513,
198,
198,
2,
41906,
17174,
8412,
4557,
9,
198,
2,
4513,
198,
2,
41906,
17174,
8412,
4557,
9,
198,
4871,
4513,
32634,
7509,
7... | 5.753623 | 69 |
# AUTOGENERATED! DO NOT EDIT! File to edit: 99_refs.ipynb (unless otherwise specified).
__all__ = []
# Cell
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
plt.style.use('seaborn-white')
# Cell | [
2,
47044,
7730,
1677,
1137,
11617,
0,
8410,
5626,
48483,
0,
9220,
284,
4370,
25,
7388,
62,
5420,
82,
13,
541,
2047,
65,
357,
25252,
4306,
7368,
737,
198,
198,
834,
439,
834,
796,
17635,
198,
198,
2,
12440,
198,
11748,
299,
32152,
... | 2.818182 | 88 |
from user import User # Importing the user class
from credentials import Credentials # Importing credentials class
def create_user(first_name,last_name,email,password):
"""
Function to create new user
"""
new_user = User(first_name,last_name,email,password)
return new_user
def save_user(user):
'''
Function to save user
'''
User.save_user(user)
def verify_user(first_name,password):
'''
Checks the existance of a user before creating credentials
'''
checks_user = Credentials.check_user(first_name,password)
return checks_user
# def delete_credentials(cls, name):
# '''
# deletes an account's saved credentials from the credentials_list.
# '''
# for account in cls.credentials_list:
# if account.account_name == name:
# Credentials.credentials_list.remove(account)
# Main
if __name__ == '__main__':
main()
| [
6738,
2836,
1330,
11787,
1303,
17267,
278,
262,
2836,
1398,
198,
6738,
18031,
1330,
327,
445,
14817,
1303,
17267,
278,
18031,
1398,
198,
198,
4299,
2251,
62,
7220,
7,
11085,
62,
3672,
11,
12957,
62,
3672,
11,
12888,
11,
28712,
2599,
1... | 2.868687 | 297 |
from odmantic import AIOEngine
__all__ = (
'engine',
)
# Engine for a local database
from config import DATABASE_NAME
engine = AIOEngine(database=DATABASE_NAME)
# Engine for an atlas or hosted database
# from motor.motor_asyncio import AsyncIOMotorClient
# from config import DATABASE_URL
# client = AsyncIOMotorClient(DATABASE_URL)
# engine = AIOEngine(motor_client=client, database='kaggle_30dML')
| [
6738,
16298,
76,
5109,
1330,
317,
9399,
13798,
198,
198,
834,
439,
834,
796,
357,
198,
220,
220,
220,
705,
18392,
3256,
198,
8,
198,
198,
2,
7117,
329,
257,
1957,
6831,
198,
6738,
4566,
1330,
360,
1404,
6242,
11159,
62,
20608,
198,
... | 2.86014 | 143 |
#!/usr/bin/env python3
"""
Module Docstring
"""
__author__ = "Matheus Rocha Vieira"
__version__ = "0.0.1"
__license__ = "GNU GPLv3"
import argparse
import numpy as np
from algorithms import *
def main(args):
""" Main entry point of the app """
unsorted_array = []
if args.order == 'ASC':
unsorted_array = list(range(0, int(args.instancesize)))
if args.order == 'DESC':
unsorted_array = list(range(0, int(args.instancesize)))
unsorted_array = list(reversed(unsorted_array))
if args.order == 'RAND':
unsorted_array = list(range(0, int(args.instancesize)))
np.random.shuffle(unsorted_array)
size = int(args.instancesize)
if args.algorithm == 'all':
selection_sort(unsorted_array, size)
insertion_sort(unsorted_array, size)
shell_sort(unsorted_array, size)
merge_sort(unsorted_array, size)
heap_sort(unsorted_array, size)
quick_sort(unsorted_array, size)
if args.algorithm == 'selection':
selection_sort(unsorted_array, size)
if args.algorithm == 'insertion':
insertion_sort(unsorted_array, size)
if args.algorithm == 'shell':
shell_sort(unsorted_array, size)
if args.algorithm == 'merge':
merge_sort(unsorted_array, size)
if args.algorithm == 'heap':
heap_sort(unsorted_array, size)
if args.algorithm == 'quick':
quick_sort(unsorted_array, size)
if __name__ == "__main__":
""" This is executed when run from the command line """
parser = argparse.ArgumentParser()
parser.add_argument(
"-alg",
"--algorithm",
choices=['all','selection', 'insertion', 'shell', 'merge', 'heap', 'quick']
)
parser.add_argument(
"-is",
"--instancesize",
choices=['10', '1000', '100000', '1000000']
)
parser.add_argument(
"-o",
"--order",
choices=['ASC', 'DESC', 'RAND']
)
parser.add_argument(
"--verbose",
action="count",
default=0,
help="Verbosity"
)
parser.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s (version {version})".format(version=__version__)
)
args = parser.parse_args()
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
26796,
14432,
8841,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
19044,
258,
385,
371,
5374,
64,
47154,
8704,
1,
198,
834,
9641,
834,
796,
366,
15,
13,
15,
13,
1... | 2.549492 | 788 |
import os
import argparse
import json
from mltk.core.model import load_mltk_model
from mltk.core.tflite_model import TfliteModel
from mltk.core.tflite_micro import TfliteMicro
from mltk.utils.bin2header import bin2header
from mltk.utils.path import fullpath, create_tempdir
from mltk.utils.hasher import hash_file
from mltk import cli
def generate_model_header(
model: str,
output: str,
variable_name='MODEL_DATA',
variable_attributes:str=None,
length_variable_name='MODEL_DATA_LENGTH',
accelerator:str=None
):
"""Generate a model header file from a MLTK model or .tflite
Args:
model: Name of MLTK model or path to .tflite
output: Path to generated output header
variable_name: Name of C array
variable_attributes: Attributes to prepend to C array variable
length_variable_name: Name of C variable to hold length of C array
accelerator: Name of accelerator for which to generate header
"""
if model.endswith('.tflite'):
tflite_path = fullpath(model)
if not os.path.exists(tflite_path):
cli.abort(msg=f'\n\n*** .tflite model file not found: {model}\n\n')
else:
try:
mltk_model = load_mltk_model(model, print_not_found_err=True)
except Exception as e:
cli.abort(msg=f'\n\nFailed to load MltkModel, err: {e}\n\n')
try:
tflite_path = mltk_model.tflite_archive_path
except Exception as e:
cli.handle_exception(f'Failed to get .tflite from {mltk_model.archive_path}', e)
output = fullpath(output)
old_generation_details = None
generation_args_path = f'{os.path.dirname(output)}/generated_model_details.json'
generation_details = dict(
tflite_path=fullpath(tflite_path),
tflite_hash=hash_file(tflite_path),
output=output,
accelerator=accelerator
)
if os.path.exists(generation_args_path):
try:
with open(generation_args_path, 'r') as f:
old_generation_details = json.load(f)
except:
pass
if old_generation_details == generation_details:
print(f'{os.path.basename(output)} up-to-date')
return
if accelerator:
if not TfliteMicro.accelerator_is_supported(accelerator):
raise ValueError(f'Unknown accelerator: {accelerator}, supported accelerators are: {", ".join(TfliteMicro.get_supported_accelerators())}')
tflm_accelerator = TfliteMicro.get_accelerator(accelerator)
if tflm_accelerator.supports_model_compilation:
compilation_report_path = output + '-compilation_report.txt'
tflite_model = TfliteModel.load_flatbuffer_file(tflite_path)
compiled_tflite_model = tflm_accelerator.compile_model(
tflite_model,
report_path=compilation_report_path,
logger=cli.get_logger()
)
model_name = os.path.basename(tflite_path)[:-len('.tflite')]
tflite_path = f'{create_tempdir("tmp_models")}/{model_name}.{accelerator}.tflite'
compiled_tflite_model.save(tflite_path)
bin2header(
input=tflite_path,
output_path=output,
var_name=variable_name,
length_var_name=length_variable_name,
attributes=variable_attributes,
)
with open(generation_args_path, 'w') as f:
json.dump(generation_details, f, indent=3)
cli.print_info(f'Generated {output}\nfrom {tflite_path}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a model header file from a MLTK model or .tflite')
parser.add_argument('model', help='Name of MLTK model or path to .tflite')
parser.add_argument('--output', default='generated_model.tflite.h', help='Path to generated output header')
parser.add_argument('--name', default='MODEL_DATA', help='Name of C array')
parser.add_argument('--length_name', default='MODEL_DATA_LENGTH', help='Name of C variable to hold length of data in bytes')
parser.add_argument('--attributes', default=None, help='Attributes to prepend to C array variable')
parser.add_argument('--accelerator', default=None, help='Specific accelerator for which to generate model header')
args = parser.parse_args()
try:
generate_model_header(
model=args.model,
output=args.output,
variable_name=args.name,
variable_attributes=args.attributes,
length_variable_name=args.length_name,
accelerator=args.accelerator
)
except Exception as _ex:
cli.handle_exception('Failed to generate model header', _ex) | [
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
198,
6738,
285,
2528,
74,
13,
7295,
13,
19849,
1330,
3440,
62,
76,
2528,
74,
62,
19849,
198,
6738,
285,
2528,
74,
13,
7295,
13,
83,
2704,
578,
62,
19849,
1330,
309,... | 2.349776 | 2,007 |
import sys
from example import Circle
a = One(1)
b = Two(2)
print(sys.getsizeof(a))
print(sys.getsizeof(b))
| [
11748,
25064,
198,
6738,
1672,
1330,
16291,
628,
628,
198,
64,
796,
1881,
7,
16,
8,
198,
65,
796,
4930,
7,
17,
8,
198,
198,
4798,
7,
17597,
13,
11407,
1096,
1659,
7,
64,
4008,
198,
4798,
7,
17597,
13,
11407,
1096,
1659,
7,
65,
... | 2.404255 | 47 |
import numpy as np
from scipy.misc import comb
import functools
from itertools import combinations
from random import shuffle
import logging
CACHE_SIZE = 2**8 # 256
class WEAT(object):
"""Word Embedding Association Test"""
@functools.lru_cache(maxsize=CACHE_SIZE)
@functools.lru_cache(maxsize=CACHE_SIZE)
def cosine_sim_many(self, word_a, other_words):
"""Compare one word to many
NOTE* similarites will not be returned in the same order that were
passed into this function because they are sets... make sure you don't
do anything with the values returned in which order matters."""
return frozenset([self.gensim_cosine(word_a, word) for
word in other_words])
@functools.lru_cache(maxsize=CACHE_SIZE)
def mean_diff(self, A, B):
"""helper function for simple calculation of the
difference between means that can be cached."""
return sum(A)/len(A) - sum(B)/len(B)
def partitions_gen(self, target_X, target_Y):
"""Generator of unique partitions in which order of a subset
does not matter."""
targets = target_X.union(target_Y)
group_size = int(len(targets) / 2)
assert(len(target_X) == len(target_Y) == group_size)
# make sure we observe the given partition (need this for early
# stopping in the future)
observed = tuple(sorted(list(target_X)) + sorted(list(target_Y)))
seen = {observed}
yield observed
# now sample from the other partitions
t_list = list(targets)
if self.stopping_early:
# When stopping early it's important that the combinations we
# do yield aren't sorted and predictable as they are from
# itertools combinations... we need randomness to sample the
# full space of partitions better
seen_count = 1
for i in range(self.max_iters*2):
shuffle(t_list)
new_X = t_list[:group_size]
new_Y = t_list[group_size:]
new_partition = tuple(sorted(new_X)+sorted(new_Y))
if seen_count >= self.max_iters:
break
# as we see more we'll waste more time here and fail
# to yield the max_iters requested, that's why we do
# double the max_iters and keep track of num yielded
if new_partition in seen:
continue
seen.add(new_partition)
seen_count += 1
yield new_partition
else:
for c in combinations(t_list, group_size):
new_X = set(c)
new_Y = targets.difference(new_X)
new_partition = tuple(sorted(list(new_X))+sorted(list(new_Y)))
if new_partition in seen:
continue
seen.add(new_partition)
yield new_partition
def permutation_test_stat(self, target_X, target_Y, attr_A, attr_B,
skip_effect=True):
"""Calculates statistic for a specific permutation"""
x_assoc = 0
y_assoc = 0
diffs = [] # needed to calc std
for x, y in zip(target_X, target_Y):
x_sim_A = self.cosine_sim_many(x, attr_A)
x_sim_B = self.cosine_sim_many(x, attr_B)
x_diff = self.mean_diff(x_sim_A, x_sim_B)
x_assoc += x_diff
diffs.append(x_diff)
y_sim_A = self.cosine_sim_many(y, attr_A)
y_sim_B = self.cosine_sim_many(y, attr_B)
y_diff = self.mean_diff(y_sim_A, y_sim_B)
y_assoc += y_diff
diffs.append(y_diff)
test_stat = x_assoc - y_assoc
if skip_effect:
return test_stat
# Calculate Effect Size
std = np.std(diffs)
mean_assoc_X = x_assoc / len(target_X)
mean_assoc_Y = y_assoc / len(target_Y)
effect_size = (mean_assoc_X - mean_assoc_Y) / std
return test_stat, effect_size
def check_inputs(self, target_X, target_Y, attr_A, attr_B, max_iters):
"""Perform checks to make sure WEAT inputs adhere to the
constraints of the problem and that the permutation test
isn't intractable."""
targets = target_X.union(target_Y)
group_size = int(len(targets) / 2)
err = 'Target word sets must be of equal size and not have repeats.'
assert(len(target_X) == len(target_Y) == group_size), err
warn = ''
n_combs = int(comb(len(targets), group_size))
if max_iters is None:
max_iters = n_combs
if n_combs > max_iters:
self.stopping_early = True
self.max_iters = max_iters
warn += ''.join(['The P-Value returned may not be ',
'trustworthy because all combinations of ',
'target words will not be checked (max_iters',
'='+str(max_iters)+' is less than the ',
'{:,}'.format(n_combs)+' possible ',
'combinations)\n\n'])
A_LOT_OF_ITERS = 50000 # this takes about 30 sec on dev machine
if max_iters > A_LOT_OF_ITERS:
warn += ''.join(['Processing the ',
'{:,}'.format(max_iters)+' ',
'combinations of target words when ',
'calculating the P-Value may take a while.',
'\n\n'])
if warn == '':
warn = None
return max_iters, warn
def assert_vocab(self, target_X, target_Y):
"""Assert that the input target words are in the vocabulary
of the embeding, exclude terms that arent' and make sure
the two target sets are still balanced."""
oov_X = set([])
for term in target_X:
if not self.model.vocab.get(term):
oov_X.add(term)
oov_Y = set([])
for term in target_Y:
if not self.model.vocab.get(term):
oov_Y.add(term)
if len(oov_X.union(oov_Y)) == 0:
return target_X, target_Y
elif len(oov_X) == len(oov_Y):
pass
elif len(oov_X) > len(oov_Y):
# remove additional target_Y terms to balance the set
# FIXME - randomize?
delta = len(oov_X) - len(oov_Y)
for _ in range(delta):
excl = target_Y.difference(oov_Y).pop()
target_Y = target_Y.difference(set([excl]))
self.logger.warn(' '.join(['Target sets are unbalanced,',
'excluding:', str(excl),
'from target_Y']))
else:
delta = len(oov_Y) - len(oov_X)
for _ in range(delta):
excl = target_X.difference(oov_X).pop()
target_X = target_X.difference(set([excl]))
self.logger.warn(' '.join(['Target sets are unbalanced,',
'excluding:', str(excl),
'from target_X']))
self.logger.warn(' '.join(['Target words not in embedding vocabulary',
'are', 'being excluded: ', str(oov_X),
str(oov_Y)]))
if len(oov_X.union(oov_Y)) > min(len(target_X), len(target_Y)):
self.logger.info(' '.join(['More than half your target words are',
'out-of-vocabulary and will not be included in the test.']))
return target_X.difference(oov_X), target_Y.difference(oov_Y)
def perform_test(self, target_X, target_Y, attr_A, attr_B,
max_iters=50000):
"""Word Embedding Association Test"""
# in case embedding has changed since last test
self.cosine_sim_many.cache_clear()
attr_A = frozenset(attr_A)
attr_B = frozenset(attr_B)
# remove any input words not in the embedding vocab
target_X, target_Y = self.assert_vocab(target_X, target_Y)
max_iters, warns = self.check_inputs(target_X, target_Y, attr_A,
attr_B, max_iters)
if warns:
self.logger.warn(warns)
# Calculate observed test-statistic and effect size
T_obs, effect = self.permutation_test_stat(target_X, target_Y, attr_A,
attr_B, skip_effect=False)
# Now calculate test statistics for different groupings of target words
T_sampled = []
half = len(target_X)
assert(len(target_X) == len(target_Y))
# TODO: there are formulas to stop early and put estimates on the
# p-value, this will be necessary for large target lists.
for i, p in enumerate(self.partitions_gen(target_X, target_Y)):
targ_X = p[:half]
targ_Y = p[half:]
T_sampled.append(self.permutation_test_stat(targ_X, targ_Y,
attr_A, attr_B))
if i+1 == max_iters:
# self.logger.info('{:,} partitions processed.'.format(
# len(T_sampled)))
break
# total observation with statistic >= observed value
n = sum(t >= T_obs for t in T_sampled)
# p-value is how often the test statistic was >= observed
# in a random grouping of the target words
p_val = n / len(T_sampled)
return effect, p_val
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
44374,
1330,
1974,
198,
198,
11748,
1257,
310,
10141,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
6738,
4738,
1330,
36273,
198,
11748,
18931,
198,
198,
34,
2246,
13909,
62,
... | 1.968122 | 4,925 |
import numpy as np
import matplotlib.pyplot as plt
'data building'
x=np.linspace(-5,5,20)
y=x**2
z=5*np.sin(x)
'plotting'
#make plots
#plt figure is a good way to return and number (or name) separate figures
plt.figure(2)
plt.plot(x,y,'k',label = 'black')
plt.plot(x,z,'b',label = 'blue')
plt.legend(title='line colors')
plt.figure(4)
plt.plot(x,z,'bo')
plt.xlabel(r'$\phi$ / $\infty$')
plt.ylabel('${E^2}\div{c^4}$ / $kg^2$ ')
plt.figure('Another Figure')
plt.title('Title here')
plt.plot(x,y,'black',marker='^')
plt.xlabel('x')
plt.ylabel('y')
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
1549,
1045,
2615,
6,
198,
87,
28,
37659,
13,
21602,
10223,
32590,
20,
11,
20,
11,
1238,
8,
198,
88,
28,
87,
1174,
17,
198,
89,
... | 2.10566 | 265 |
# noinspection PyUnresolvedReferences
from aws_cfn_ses_domain import lambda_handler
| [
2,
645,
1040,
14978,
9485,
3118,
411,
5634,
19927,
198,
6738,
3253,
82,
62,
12993,
77,
62,
8448,
62,
27830,
1330,
37456,
62,
30281,
198
] | 3.36 | 25 |
from typing import NamedTuple, Iterator, Tuple, Dict, List
import re
from collections import Counter
rgx = "#([0-9]+) @ ([0-9]+),([0-9]+): ([0-9]+)x([0-9]+)"
Coord = Tuple[int, int]
# assert Rectangle.from_claim("#123 @ 3,2: 5x4") == Rectangle(123,3, 2,8, 6)
# print(Rectangle.all_squares("#123 @ 3,2: 5x4"))
with open("data/day03.txt") as f:
claims = [line.strip() for line in f]
print(multi_claimed(claims))
print(non_overalapping_claim(claims))
| [
6738,
19720,
1330,
34441,
51,
29291,
11,
40806,
1352,
11,
309,
29291,
11,
360,
713,
11,
7343,
198,
11748,
302,
198,
6738,
17268,
1330,
15034,
198,
198,
41345,
87,
796,
25113,
26933,
15,
12,
24,
60,
28988,
2488,
29565,
15,
12,
24,
48... | 2.387755 | 196 |
from typing import TypeVar, Callable, List, Tuple, Optional, Iterable
from random import randint
from datetime import datetime, timezone, timedelta
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import func
from sqlalchemy.orm import exc, joinedload
from swpt_creditors.extensions import db
from swpt_creditors.models import AgentConfig, Creditor, LogEntry, PendingLogEntry, PinInfo, Account, \
RunningTransfer, MAX_INT64
from .common import get_paths_and_types
from . import errors
T = TypeVar('T')
atomic: Callable[[T], T] = db.atomic
ACTIVATION_STATUS_MASK = Creditor.STATUS_IS_ACTIVATED_FLAG | Creditor.STATUS_IS_DEACTIVATED_FLAG
LOG_ENTRY_NONE_AUX_FIELDS_EXCLUDED_TYPE_HINT = {
attr: None for attr in LogEntry.AUX_FIELDS if attr != 'object_type_hint'
}
LOG_ENTRY_NONE_DATA_FIELDS = {
attr: None for attr in LogEntry.DATA_FIELDS
}
@atomic
@atomic
@atomic
@atomic
@atomic
@atomic
@atomic
@atomic
@atomic
@atomic
@atomic
@atomic
@atomic
| [
6738,
19720,
1330,
5994,
19852,
11,
4889,
540,
11,
7343,
11,
309,
29291,
11,
32233,
11,
40806,
540,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
11,
28805,
12514,
198,
6738,
44161,
282,
2... | 2.775956 | 366 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Dudendown
This script takes a german word as a commandline input and returns its meaning
overview, as parsed from the dictionary on the website `www.duden.de`.
Words with non-ascii characters should be given using following
transliteration:
* รค -> ae
* รถ -> oe
* รผ -> ue
* ร -> sz
"""
import argparse
import copy
import gettext
import os
import sys
from itertools import cycle
import bs4
import requests
from crayons import blue, red, yellow, white
from .common import (recursively_extract, print_tree_of_strings,
clear_text, print_string_or_list)
from .__version__ import __version__
URL_FORM = 'http://www.duden.de/rechtschreibung/{word}'
SEARCH_URL_FORM = 'http://www.duden.de/suchen/dudenonline/{word}'
# grammar forms constants
SINGULAR = 'Singular'
PLURAL = 'Plural'
PRASENS = 'Prรคsens'
PRATERITUM = 'Prรคteritum'
INDIKATIV = 'Indikativ'
IMPERATIV = 'Imperativ'
KONJUKTIV_1 = 'Konjunktiv I'
KONJUKTIV_2 = 'Konjunktiv II'
PARTIZIP_1 = 'Partizip I'
PARTIZIP_2 = 'Partizip II'
INFINITIV_MIT_ZU = 'Infinitiv mit zu'
PERSON_1 = 'Person I'
PERSON_2 = 'Person II'
PERSON_3 = 'Person III'
NOMINATIV = 'Nominativ'
GENITIV = 'Genitiv'
DATIV = 'Dativ'
AKKUSATIV = 'Akkusativ'
gettext.install('duden', os.path.join(os.path.dirname(__file__), 'locale'))
def get(word):
"""
Load the word 'word' and return the DudenWord instance
"""
url = URL_FORM.format(word=word)
try:
response = requests.get(url)
except requests.exceptions.ConnectionError:
raise Exception(_("Connection could not be established. "
"Check your internet connection."))
code = response.status_code
if code == 200:
soup = bs4.BeautifulSoup(response.text, 'html.parser')
elif code == 404:
# non-existent word
return None
else:
raise Exception(
_("Unexpected HTTP response status code {}").format(code))
return load_soup(soup)
def load_soup(soup):
"""
Load the DudenWord instance using a BeautifulSoup object
"""
return DudenWord(soup)
def get_search_link_variants(link_text):
"""
Lists possible interpretations of link text on search page.
Used for determining whether a search page entry matches the search term.
"""
return clear_text(link_text).split(', ')
def search(word, exact=True, return_words=True):
"""
Search for a word 'word' in duden
"""
url = SEARCH_URL_FORM.format(word=word)
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text, 'html.parser')
main_sec = soup.find('section', id='block-duden-tiles-0')
if main_sec is None:
return []
a_tags = [h2.a for h2 in main_sec.find_all('h2')]
urlnames = [a['href'].split('/')[-1]
for a in a_tags
if (not exact) or word in get_search_link_variants(a.text)]
if return_words:
return [get(urlname) for urlname in urlnames]
else:
return urlnames
def parse_args():
"""
Parse CLI arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('word')
parser.add_argument('--title', action='store_true',
help=_('display word and article'))
parser.add_argument('--name', action='store_true',
help=_('display the word itself'))
parser.add_argument('--article', action='store_true',
help=_('display article'))
parser.add_argument('--part-of-speech', action='store_true',
help=_('display part of speech'))
parser.add_argument('--frequency', action='store_true',
help=_('display commonness (1 to 5)'))
parser.add_argument('--usage', action='store_true',
help=_('display context of use'))
parser.add_argument('--word-separation', action='store_true',
help=_('display proper separation (line separated)'))
parser.add_argument('--meaning-overview', action='store_true',
help=_('display meaning overview'))
parser.add_argument('--synonyms', action='store_true',
help=_('list synonyms (line separated)'))
parser.add_argument('--origin', action='store_true',
help=_('display origin'))
parser.add_argument('--compounds', nargs='?', const='ALL',
help=_('list common compounds'))
parser.add_argument('-g', '--grammar', nargs='?', const='ALL',
help=_('list grammar forms'))
parser.add_argument('-r', '--result', type=int,
help=_('display n-th (starting from 1) result in case '
'of multiple words matching the input'))
parser.add_argument('--fuzzy', action='store_true',
help=_('enable fuzzy word matching'))
parser.add_argument('-V', '--version', action='store_true',
help=_('print program version'))
return parser.parse_args()
def main():
"""
Take the first CLI argument and describe the corresponding word
"""
# handle the --version switch
if '--version' in sys.argv:
print('duden ' + __version__)
sys.exit(0)
# parse normal arguments
args = parse_args()
# search all words matching the string
words = search(args.word, return_words=False, exact=not args.fuzzy)
# exit if the word wasn't found
if not words:
print(red(_("Word '{}' not found")).format(args.word))
sys.exit(1)
# list the options when there is more than one matching word
if len(words) > 1 and args.result is None:
print(_('Found {} matching words. Use the -r/--result argument to '
'specify which one to display.').format(white(len(words),
bold=True)))
for i, word in enumerate(words, 1):
print('{} {}'.format(blue('{})'.format(i)), word))
sys.exit(1)
result_index = args.result if args.result is not None else 1
# choose the correct result
try:
word_url_suffix = words[result_index - 1]
except IndexError:
print(red(_("No result with number {}.")).format(result_index))
sys.exit(1)
# fetch and parse the word
try:
word = get(word_url_suffix)
except Exception as exception:
print(red(exception))
sys.exit(1)
display_word(word, args)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
35,
463,
437,
593,
198,
198,
1212,
4226,
2753,
257,
308,
2224,
1573,
355,
257,
3141,
1370,
5128,
290,... | 2.332508 | 2,827 |
import configparser
import io
import sys
from imperfect import parse_string
if __name__ == "__main__": # pragma: no cover
for f in sys.argv[1:]:
verify(f)
| [
11748,
4566,
48610,
198,
11748,
33245,
198,
11748,
25064,
198,
198,
6738,
23162,
1330,
21136,
62,
8841,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
220,
1303,
23864,
2611,
25,
645,
3002,
198,
220,
220,
220,
... | 2.646154 | 65 |
# Generated by Django 3.0.1 on 2020-01-11 22:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
16,
319,
12131,
12,
486,
12,
1157,
2534,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# generateContCat.py - generate continuum catalog
#
# Usage:
#
# > python generateContCat.py <catalog name>
#
if __name__=="__main__":
import sys
import pdb
generateContCat(str(sys.argv[1]).strip())
| [
2,
7716,
4264,
21979,
13,
9078,
532,
7716,
44422,
18388,
198,
2,
198,
2,
29566,
25,
220,
220,
198,
2,
198,
2,
1875,
21015,
7716,
4264,
21979,
13,
9078,
1279,
9246,
11794,
1438,
29,
198,
2,
628,
198,
361,
11593,
3672,
834,
855,
1,
... | 2.654321 | 81 |
from django.db.models import Q
from django import forms
from watson import search as watson
import django_filters
from med_social.filters import BaseFilter
from divisions.models import Division
from .models import Vendor, ProcurementContact
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
266,
13506,
1330,
2989,
355,
266,
13506,
198,
11748,
42625,
14208,
62,
10379,
1010,
198,
198,
6738,
1117,
62,
14557,
13,
10379,
10... | 3.815385 | 65 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: dataset.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='dataset.proto',
package='matteautils',
syntax='proto2',
serialized_pb=_b('\n\rdataset.proto\x12\x0bmatteautils\"\x8a\x04\n\x07\x44\x61taset\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12.\n\x04type\x18\x03 \x01(\x0e\x32 .matteautils.Dataset.DatasetType\x12\x36\n\tfragments\x18\x04 \x03(\x0b\x32#.matteautils.Dataset.FragmentsEntry\x12\x32\n\x07matches\x18\x05 \x03(\x0b\x32!.matteautils.Dataset.MatchesEntry\x12\x34\n\x08matchmap\x18\x06 \x03(\x0b\x32\".matteautils.Dataset.MatchmapEntry\x1aK\n\x0e\x46ragmentsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.matteautils.TextFragment:\x02\x38\x01\x1a\x42\n\x0cMatchesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.matteautils.Match:\x02\x38\x01\x1a/\n\rMatchmapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"Q\n\x0b\x44\x61tasetType\x12\x08\n\x04MSRP\x10\x00\x12\x0c\n\x08ONECLICK\x10\x01\x12\x0f\n\x0bMOBILECLICK\x10\x02\x12\x19\n\x15TEMPORALSUMMARIZATION\x10\x03\"\xda\x01\n\x0cTextFragment\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\nsentenceid\x18\x02 \x01(\x05\x12\r\n\x05\x64ocid\x18\x03 \x01(\t\x12\x0f\n\x07queryid\x18\x04 \x01(\t\x12\x0c\n\x04text\x18\x05 \x01(\t\x12\x38\n\x04type\x18\x06 \x01(\x0e\x32*.matteautils.TextFragment.TextFragmentType\x12\x12\n\x06vector\x18\x07 \x03(\x02\x42\x02\x10\x01\".\n\x10TextFragmentType\x12\x0c\n\x08OBSERVED\x10\x00\x12\x0c\n\x08\x45XPECTED\x10\x01\"m\n\x05Match\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\x12\x0e\n\x06target\x18\x03 \x01(\t\x12\x0f\n\x07queryid\x18\x04 \x01(\x02\x12\r\n\x05score\x18\x05 \x01(\x02\x12\x18\n\x10normalized_score\x18\x06 \x01(\x02')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DATASET_DATASETTYPE = _descriptor.EnumDescriptor(
name='DatasetType',
full_name='matteautils.Dataset.DatasetType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MSRP', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ONECLICK', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOBILECLICK', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEMPORALSUMMARIZATION', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=472,
serialized_end=553,
)
_sym_db.RegisterEnumDescriptor(_DATASET_DATASETTYPE)
_TEXTFRAGMENT_TEXTFRAGMENTTYPE = _descriptor.EnumDescriptor(
name='TextFragmentType',
full_name='matteautils.TextFragment.TextFragmentType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OBSERVED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXPECTED', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=728,
serialized_end=774,
)
_sym_db.RegisterEnumDescriptor(_TEXTFRAGMENT_TEXTFRAGMENTTYPE)
_DATASET_FRAGMENTSENTRY = _descriptor.Descriptor(
name='FragmentsEntry',
full_name='matteautils.Dataset.FragmentsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='matteautils.Dataset.FragmentsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='matteautils.Dataset.FragmentsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=278,
serialized_end=353,
)
_DATASET_MATCHESENTRY = _descriptor.Descriptor(
name='MatchesEntry',
full_name='matteautils.Dataset.MatchesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='matteautils.Dataset.MatchesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='matteautils.Dataset.MatchesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=355,
serialized_end=421,
)
_DATASET_MATCHMAPENTRY = _descriptor.Descriptor(
name='MatchmapEntry',
full_name='matteautils.Dataset.MatchmapEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='matteautils.Dataset.MatchmapEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='matteautils.Dataset.MatchmapEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=423,
serialized_end=470,
)
_DATASET = _descriptor.Descriptor(
name='Dataset',
full_name='matteautils.Dataset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='matteautils.Dataset.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='matteautils.Dataset.id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='matteautils.Dataset.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fragments', full_name='matteautils.Dataset.fragments', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='matches', full_name='matteautils.Dataset.matches', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='matchmap', full_name='matteautils.Dataset.matchmap', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DATASET_FRAGMENTSENTRY, _DATASET_MATCHESENTRY, _DATASET_MATCHMAPENTRY, ],
enum_types=[
_DATASET_DATASETTYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=553,
)
_TEXTFRAGMENT = _descriptor.Descriptor(
name='TextFragment',
full_name='matteautils.TextFragment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='matteautils.TextFragment.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sentenceid', full_name='matteautils.TextFragment.sentenceid', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='docid', full_name='matteautils.TextFragment.docid', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='queryid', full_name='matteautils.TextFragment.queryid', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='matteautils.TextFragment.text', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='matteautils.TextFragment.type', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vector', full_name='matteautils.TextFragment.vector', index=6,
number=7, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
_TEXTFRAGMENT_TEXTFRAGMENTTYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=556,
serialized_end=774,
)
_MATCH = _descriptor.Descriptor(
name='Match',
full_name='matteautils.Match',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='matteautils.Match.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source', full_name='matteautils.Match.source', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='matteautils.Match.target', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='queryid', full_name='matteautils.Match.queryid', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='matteautils.Match.score', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='normalized_score', full_name='matteautils.Match.normalized_score', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=776,
serialized_end=885,
)
_DATASET_FRAGMENTSENTRY.fields_by_name['value'].message_type = _TEXTFRAGMENT
_DATASET_FRAGMENTSENTRY.containing_type = _DATASET
_DATASET_MATCHESENTRY.fields_by_name['value'].message_type = _MATCH
_DATASET_MATCHESENTRY.containing_type = _DATASET
_DATASET_MATCHMAPENTRY.containing_type = _DATASET
_DATASET.fields_by_name['type'].enum_type = _DATASET_DATASETTYPE
_DATASET.fields_by_name['fragments'].message_type = _DATASET_FRAGMENTSENTRY
_DATASET.fields_by_name['matches'].message_type = _DATASET_MATCHESENTRY
_DATASET.fields_by_name['matchmap'].message_type = _DATASET_MATCHMAPENTRY
_DATASET_DATASETTYPE.containing_type = _DATASET
_TEXTFRAGMENT.fields_by_name['type'].enum_type = _TEXTFRAGMENT_TEXTFRAGMENTTYPE
_TEXTFRAGMENT_TEXTFRAGMENTTYPE.containing_type = _TEXTFRAGMENT
DESCRIPTOR.message_types_by_name['Dataset'] = _DATASET
DESCRIPTOR.message_types_by_name['TextFragment'] = _TEXTFRAGMENT
DESCRIPTOR.message_types_by_name['Match'] = _MATCH
Dataset = _reflection.GeneratedProtocolMessageType('Dataset', (_message.Message,), dict(
FragmentsEntry = _reflection.GeneratedProtocolMessageType('FragmentsEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASET_FRAGMENTSENTRY,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:matteautils.Dataset.FragmentsEntry)
))
,
MatchesEntry = _reflection.GeneratedProtocolMessageType('MatchesEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASET_MATCHESENTRY,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:matteautils.Dataset.MatchesEntry)
))
,
MatchmapEntry = _reflection.GeneratedProtocolMessageType('MatchmapEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASET_MATCHMAPENTRY,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:matteautils.Dataset.MatchmapEntry)
))
,
DESCRIPTOR = _DATASET,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:matteautils.Dataset)
))
_sym_db.RegisterMessage(Dataset)
_sym_db.RegisterMessage(Dataset.FragmentsEntry)
_sym_db.RegisterMessage(Dataset.MatchesEntry)
_sym_db.RegisterMessage(Dataset.MatchmapEntry)
TextFragment = _reflection.GeneratedProtocolMessageType('TextFragment', (_message.Message,), dict(
DESCRIPTOR = _TEXTFRAGMENT,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:matteautils.TextFragment)
))
_sym_db.RegisterMessage(TextFragment)
Match = _reflection.GeneratedProtocolMessageType('Match', (_message.Message,), dict(
DESCRIPTOR = _MATCH,
__module__ = 'dataset_pb2'
# @@protoc_insertion_point(class_scope:matteautils.Match)
))
_sym_db.RegisterMessage(Match)
_DATASET_FRAGMENTSENTRY.has_options = True
_DATASET_FRAGMENTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_DATASET_MATCHESENTRY.has_options = True
_DATASET_MATCHESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_DATASET_MATCHMAPENTRY.has_options = True
_DATASET_MATCHMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_TEXTFRAGMENT.fields_by_name['vector'].has_options = True
_TEXTFRAGMENT.fields_by_name['vector']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
27039,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
58,
15,
60,
27,
18,
290,
357,
50033,
2124,
... | 2.294236 | 7,980 |
# Under MIT License, see LICENSE.txt
"""
Module intelligent contenant l'implementation d'un Rapidly exploring Random
Tree. Le module contient une classe qui peut รชtre instanciรฉe et qui calcule
les trajectoires des robots de l'รฉquipe. Les dรฉtails de l'algorithme sont
disponibles sur la page wikipedia. Code original http://myenigma.hatenablog.com
/entry/2016/03/23/092002
"""
# FIXME IMPORT!
import random
import math
import copy
import time
import socket
import pickle
from RULEngine.Util.Pose import Pose
from RULEngine.Util.Position import Position
from RULEngine.Util.constant import POSITION_DEADZONE
from ai.Algorithm.IntelligentModule import Pathfinder
from ai.Debug.debug_interface import COLOR_ID_MAP, DEFAULT_PATH_TIMEOUT
OBSTACLE_DEAD_ZONE = 700
TIME_TO_UPDATE = 1
class PathfinderRRT(Pathfinder):
"""
La classe hรฉrite de IntelligentModule pour dรฉfinir sa propriรฉtรฉ state.
L'interface expose une mรฉthode qui force le calcul de toutes les
trajectoires. Celles-ci sont enregistrรฉs par effet de bords dans le
GameState.
Une mรฉthode permet de rรฉcupรฉrer la trajectoire d'un robot spรฉcifique.
"""
def __init__(self, p_worldstate):
"""
Constructeur, appel le constructeur de la classe mรจre pour assigner
la rรฉfรฉrence sur l'InfoManager.
:param info_manager: rรฉfรฉrence sur l'InfoManager
"""
super().__init__(p_worldstate)
self.paths = {}
for i in range(6):
self.paths[i] = []
self.last_timestamp = self.ws.game_state.get_timestamp()
# Pour รชtre conforme ร la nouvelle interface ร รชtre changรฉ
# รฉventuellement mgl 2016/12/23
# TODO(mgl): change this please!
def get_path(self, pid=None, target=None):
"""
Retourne la trajectoire du robot.
:param pid: Identifiant du robot, 0 ร 5.
:return: Une liste de Pose, [Pose]
"""
assert(isinstance(pid, int)), "Un pid doit รชtre passรฉ"
assert(isinstance(target, Pose)), "La cible doit รชtre une Pose"
return self._compute_path(pid, target)
def _compute_path(self, pid, target):
"""
Cette mรฉthode calcul la trajectoire pour un robot.
:param pid: L'identifiant du robot, 0 ร 5.
:return: None
"""
# TODO mettre les buts dans les obstacles
list_of_pid = list(range(6))
list_of_other_team_pid = list(range(6))
list_of_pid.remove(pid)
obstacleList = []
for other_pid in list_of_pid:
# TODO info manager changer get_player_position
position = self.ws.game_state.get_player_pose(other_pid).position
obstacleList.append([position.x, position.y, OBSTACLE_DEAD_ZONE])
initial_position_of_main_player = self.ws.game_state.get_player_pose(pid).position
for pid in list_of_other_team_pid:
position = self.ws.game_state.get_player_pose(pid,False).position
obstacleList.append([position.x, position.y, OBSTACLE_DEAD_ZONE])
target_position_of_player = target.position
target_orientation_of_player = target.orientation
assert(isinstance(target_position_of_player, Position)), "La cible du joueur doit รชtre une Position"
try :
target_position_of_player.x
target_position_of_player.y
except AttributeError:
target_position_of_player = self.ws.game_state.get_player_pose(pid).position
rrt = RRT(start=[initial_position_of_main_player.x,
initial_position_of_main_player.y],
goal=[target_position_of_player.x, target_position_of_player.y],
obstacleList=obstacleList,
# TODO Vรฉrifier si le robot peut sortir du terrain
rand_area=[-4500, 4500],
expand_dis=get_expand_dis([initial_position_of_main_player.x,
initial_position_of_main_player.y],
[target_position_of_player.x, target_position_of_player.y]),
goal_sample_rate=get_goal_sample_rate([initial_position_of_main_player.x,
initial_position_of_main_player.y],
[target_position_of_player.x, target_position_of_player.y]))
not_smoothed_path = rrt.planning(obstacleList)
# Path smoothing
maxIter = 100
# Il faut inverser la liste du chemin lissรฉ tout en retirant le point de dรฉpart
smoothed_path = path_smoothing(not_smoothed_path, maxIter, obstacleList)
smoothed_path = list(reversed(smoothed_path[:-1]))
return self._smoothed_path_to_pose_list(smoothed_path, target_orientation_of_player)
class RRT():
"""
Classe principale du pathfinder, contient les fonctions principales
permettant de gรฉnรฉrer le path.
"""
def __init__(self, start, goal, obstacleList, rand_area, expand_dis, goal_sample_rate, max_iteration=50):
"""
Setting Parameter
start: Position de dรฉpart [x,y]
goal: Destination [x,y]
obstacleList: Position et taille des obstacles [[x,y,size],...]
randArea: Ramdom Samping Area [min,max]
expand_dis : Longueur des arรชtes
goal_sample_rate : Probabilitรฉ d'obtenir directement le goal comme position.
Amรฉliore la vitesse du RRT
max_iteration : Nombre d'itรฉration du path smoother
"""
self.start = Node(start[0], start[1])
self.end = Node(goal[0], goal[1])
self.minrand = rand_area[0]
self.maxrand = rand_area[1]
self.expand_dis = expand_dis
self.goal_sample_rate = goal_sample_rate
self.max_iteration = max_iteration
def planning(self, obstacleList):
"""Fonction qui s'occupe de faire le path"""
initial_time = time.time()
self.node_list = [self.start]
#TODO changer le gros hack degueux pour la gestion de la loop infinie
while True and time.time()-initial_time < TIME_TO_UPDATE:
# Random Sampling
if random.randint(0, 100) > self.goal_sample_rate:
random_coordinates = [random.uniform(self.minrand, self.maxrand), random.uniform(self.minrand, self.maxrand)]
else:
random_coordinates = [self.end.x, self.end.y]
# Find nearest node
nind = self.get_nearest_list_index(self.node_list, random_coordinates)
# print(nind)
# expand tree
nearest_node = self.node_list[nind]
theta = math.atan2(random_coordinates[1] - nearest_node.y, random_coordinates[0] - nearest_node.x)
new_node = copy.deepcopy(nearest_node)
new_node.x += self.expand_dis * math.cos(theta)
new_node.y += self.expand_dis * math.sin(theta)
new_node.parent = nind
if not self.__collision_check(new_node, obstacleList):
continue
self.node_list.append(new_node)
# check goal
dx = new_node.x - self.end.x
dy = new_node.y - self.end.y
d = math.sqrt(dx * dx + dy * dy)
if d <= self.expand_dis:
break
path = [[self.end.x, self.end.y]]
last_index = len(self.node_list) - 1
while self.node_list[last_index].parent is not None:
node = self.node_list[last_index]
path.append([node.x, node.y])
last_index = node.parent
path.append([self.start.x, self.start.y])
# TODO fix gros hack sale
if time.time()-initial_time >=1 :
path = [[self.start.x, self.start.y],[self.start.x, self.start.y]]
return path
def __collision_check(self, node, obstacleList):
""" Permet de vรฉrifier si le chemin passe ร travers un obstacle"""
for (ox, oy, size) in obstacleList:
dx = ox - node.x
dy = oy - node.y
d = math.sqrt(dx * dx + dy * dy)
if d <= size:
return False # collision
return True # safe
class Node():
"""
RRT Node
"""
def get_expand_dis(start, goal):
"""Modifie la distance entre 2 noeuds selon la distance entre le dรฉpart et le but.
Utile pour la prรฉcision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
# TODO voir comment on regle รงa
except TypeError:
d = 0
if d < 600 :
expand_dis = d/2
else :
expand_dis = 300
return expand_dis
def get_goal_sample_rate(start, goal):
"""Modifie la probabilitรฉ d'obtenir directement le but comme point selon la distance entre le dรฉpart et le but.
Utile pour la prรฉcision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
except TypeError:
goal_sample_rate = 5
return goal_sample_rate
if d < 600 :
goal_sample_rate = (10-d/140)**2
else :
goal_sample_rate = 30
return goal_sample_rate
def get_path_length(path):
"""Donne la longueur du trajet"""
path_length = 0
try :
for i in range(len(path) - 1):
dx = path[i + 1][0] - path[i][0]
dy = path[i + 1][1] - path[i][1]
d = math.sqrt(dx * dx + dy * dy)
path_length += d
except TypeError:
pass
return path_length
def line_collision_check(first, second, obstacleList):
"""
Vรฉrifie si la ligne entre 2 noeuds entre en collision avec un obstacle.
"""
# Line Equation
x1 = first[0]
y1 = first[1]
x2 = second[0]
y2 = second[1]
try:
a = y2-y1
b = -(x2-x1)
c = y2 * (x2-x1) - x2 * (y2-y1)
except ZeroDivisionError:
return False
# print(first)
# print(second)
for (ox, oy, size) in obstacleList:
d = abs(a*ox+b*oy+c)/(math.sqrt(a*a+b*b))
# print((ox,oy,size,d))
if d <= (size):
# print("NG")
return False
# print("OK")
return True # OK
def path_smoothing(path, maxIter, obstacleList):
# Elle ralentit lรฉgรจrement le tout, voir si amรฉliorable
"""Permet de rendre le trajet obtenu avec le RRT plus lisse"""
# print("PathSmoothing")
path_length = get_path_length(path)
for i in range(maxIter):
# Sample two points
pick_points = [random.uniform(0, path_length), random.uniform(0, path_length)]
pick_points.sort()
# print(pick_points)
first = get_target_point(path, pick_points[0])
# print(first)
second = get_target_point(path, pick_points[1])
# print(second)
if first[2] <= 0 or second[2] <= 0:
continue
if (second[2]+1) > len(path):
continue
if second[2] == first[2]:
continue
# collision check
if not line_collision_check(first, second, obstacleList):
continue
#Create New path
new_path = []
new_path.extend(path[:first[2]+1])
new_path.append([first[0], first[1]])
new_path.append([second[0], second[1]])
new_path.extend(path[second[2]+1:])
path = new_path
path_length = get_path_length(path)
return path
# taille terrain = 9000 x 6000
| [
2,
4698,
17168,
13789,
11,
766,
38559,
24290,
13,
14116,
198,
37811,
198,
220,
220,
220,
19937,
12661,
542,
268,
415,
300,
6,
320,
32851,
288,
6,
403,
26430,
306,
13504,
14534,
198,
220,
220,
220,
12200,
13,
1004,
8265,
542,
1153,
1... | 2.139827 | 5,421 |
#
# Copyright 2014-2015 Peter Bittner <django@bittner.it>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic Django settings manipulation utilities, used by organice-setup script.
"""
import os
import re
class DjangoModuleManager(object):
"""
Utility class to modify and write files in a Python module.
"""
def __init__(self, projectname, *modulename):
"""Constructor, computes and creates physical base path for module"""
self.__path = os.path.join(projectname, *modulename)
self.__file = {}
self.__data = {}
if not os.path.exists(self.__path):
os.makedirs(self.__path)
def add_file(self, module, data=None, lines=None):
"""
Add a Python file (identified by its module name) in the module.
If the related .py file doesn't exist an empty file is created.
"""
thefile = open(os.path.join(self.__path, module + '.py'), 'a+')
thefile.seek(0) # needed in `Python 3 <http://bugs.python.org/issue22651>`_
self.__file[module] = thefile
self.__data[module] = '' if data or lines else thefile.read()
if data:
self.set_data(module, data)
if lines:
self.append_lines(module, *lines)
def get_file(self, module):
"""Return the file object for a module file"""
return self.__file[module]
def save_files(self):
"""Write all changes to disk"""
for module, thefile in self.__file.items():
data = self.__data[module]
thefile.seek(0)
thefile.truncate()
thefile.write(data)
def get_data(self, module):
"""Return the data contained in the module file"""
return self.__data[module]
def set_data(self, module, data):
"""Set the data contained in the module file"""
self.__data[module] = data
def append_data(self, module, chunk):
"""Append a chunk of data to the module file"""
self.__data[module] += chunk
def append_lines(self, module, *lines):
"""Append lines of text to the module file"""
if len(self.__data[module]) > 0:
self.append_data(module, os.linesep)
for data in lines:
self.append_data(module, data + os.linesep)
def remove_line(self, module, line):
"""Remove a matching line of text from the module file"""
self.replace_line(module, line, None)
def replace_line(self, module, old, new):
"""Replace a matching line of text by some new text in the module file"""
self.__data[module] = \
self.__data[module].replace(old + os.linesep,
new + os.linesep if new else '')
class DjangoSettingsManager(DjangoModuleManager):
"""
Utility class which allows moving and copying variables in-between several
settings files in the project's ``settings/`` folder.
"""
DELIMITERS = {
'(': ')',
'[': ']',
'{': '}',
}
REGEX_DELIMS = {
'"""': (r'"""', r'"""'),
"'''": (r"'''", r"'''"),
'"': (r'"', r'"'),
"'": (r"'", r"'"),
'(': (r'\(', r'\)'),
'[': (r'\[', r'\]'),
'{': (r'\{', r'\}'),
}
NO_MATCH = (0, 0)
def __init__(self, projectname, *filenames):
"""Constructor, adds settings files (named without path and extension)"""
super(DjangoSettingsManager, self).__init__(projectname, 'settings')
for module in filenames:
self.add_file(module)
@staticmethod
def find_block(self, src, settings_path):
"""
Return (start, stop) position of a match, or NO_MATCH i.e. (0, 0).
A match is a value block of a certain data type (usually a list or a
tuple), excluding its opening and closing delimiter. Assumes clean
indentation (4 spaces) for each level, starting at level 0.
"""
data = self.get_data(src)
return DjangoSettingsManager._find_block(data, settings_path)
@staticmethod
def find_var(self, src, var, comments=True):
"""
Return (start, stop) position of a match, or NO_MATCH i.e. (0, 0).
A match is a variable including optional leading comment lines. If
comments is set to False the match strictly starts with the variable.
"""
data = self.get_data(src)
# variable incl. leading comments, until after trailing equal sign
# and optional line continuation mark (backslash)
re_comments = r'(?<=\n)\s*([ ]*#.*\n)*[ ]*' if comments else ''
re_variable = r'(\A|\b)' + var + r'\s*=\s*\\?\s*'
pattern = re.compile(re_comments + re_variable)
m = pattern.search(data)
if m is None:
return self.NO_MATCH
start, stop = m.span()
stop = self.__find_endofvalue(data, stop)
return start, stop
def __find_endofvalue(self, data, start):
"""
Identify value type (str, tuple, list, dict) and return end index.
"""
delim = data[start:start + 3]
if delim != '"""' and delim != "'''":
delim = delim[0]
delim_length = len(delim)
stop = start + delim_length
try:
open_delim, close_delim = self.REGEX_DELIMS[delim]
# TODO: ignore matches in comments and strings
open_pattern = re.compile(open_delim)
close_pattern = re.compile(close_delim + r'[ ]*,?[ ]*\n?')
open_count = 1
while open_count > 0:
close_match = close_pattern.search(data, stop)
if close_match:
open_count -= 1
cm_start, stop = close_match.span()
else:
raise SyntaxError('Closing delimiter missing for %s' % delim)
open_matches = open_pattern.findall(data, start + delim_length, cm_start)
start = stop + delim_length
open_count += len(open_matches)
except KeyError:
# expression (e.g. variable) found instead of opening delimiter
pattern = re.compile(r'(\n|\Z)')
m = pattern.search(data, stop)
# NOTE: no test on m needed, \Z will always match
ignore, stop = m.span()
return stop
def prepend_to_list(self, dest, settings_path, *items):
"""Add one or more list items to the beginning of a list identified by a hierarchy"""
start, stop = self.find_block(dest, settings_path)
chunk = self._build_list_items(start == stop, settings_path, *items)
self.__insert(dest,
start + len(os.linesep),
start + len(os.linesep),
chunk)
def append_to_list(self, dest, settings_path, *items):
"""Append one or more list items to a list identified by a hierarchy"""
start, stop = self.find_block(dest, settings_path)
chunk = self._build_list_items(start == stop, settings_path, *items)
self.__insert(dest,
stop,
stop,
chunk)
def delete_from_list(self, dest, settings_path, *items):
"""Remove list items from a list identified by a hierarchy"""
start, stop = self.find_block(dest, settings_path)
indentation = self._indentation_by(len(settings_path))
data = self.get_data(dest)
block = data[start:stop]
# TODO: make work for a value being a list/tuple (works for single, self-contained lines only atm)
for line in items:
chunk = indentation + line + ',' + os.linesep
block = block.replace(chunk, '')
self.set_data(dest, data[:start] + block + data[stop:])
def insert_lines(self, dest, *lines):
"""Find position after first comment and/or docstring, and insert the data"""
dest_data = self.get_data(dest)
re_comments = r'(\s*#.*\n)*'
pattern = re.compile(re_comments + r'\s*')
match = pattern.search(dest_data)
start, stop = self.NO_MATCH if match is None else match.span()
next3chars = dest_data[stop:stop + 3]
if next3chars == '"""' or next3chars == "'''":
stop = self.__find_endofvalue(dest_data, stop)
chunk = ''
for data in lines:
chunk += data + os.linesep
self.__insert(dest, stop, stop, chunk)
def set_value(self, dest, var, value):
"""Replace or add a variable in a settings file"""
var_value = '%s = %s' % (var, value)
match = self.find_var(dest, var, False)
if match == self.NO_MATCH:
self.append_lines(dest, var_value)
else:
start, stop = match
self.__insert(dest, start, stop, var_value + os.linesep)
def delete_var(self, dest, var):
"""Delete a variable from a settings file"""
start, stop = self.find_var(dest, var)
data = self.get_data(dest)
self.set_data(dest, data[:start] + data[stop:])
def copy_var(self, src, destinations, var):
"""Copy a variable from one settings file to one or more others"""
start, stop = self.find_var(src, var)
data = self.get_data(src)[start:stop]
for dest in destinations:
self.append_data(dest, data)
def move_var(self, src, destinations, var):
"""Move a variable from one settings file to one or more others"""
self.copy_var(src, destinations, var)
self.delete_var(src, var)
| [
2,
198,
2,
15069,
1946,
12,
4626,
5613,
347,
715,
1008,
1279,
28241,
14208,
31,
65,
715,
1008,
13,
270,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
4... | 2.317168 | 4,351 |
from django.test import TestCase
from .models import *
# Create your tests here.
leo = Schedule.leo
darcy = Schedule.darcy
bill = Schedule.bill
daniel = Schedule.daniel
participants = [x for x, _ in Schedule.participants]
user = "fake_test_user"
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
764,
27530,
1330,
1635,
198,
198,
2,
13610,
534,
5254,
994,
13,
198,
198,
293,
78,
796,
19281,
13,
293,
78,
198,
27455,
948,
796,
19281,
13,
27455,
948,
198,
35546,
796,
19... | 3.1125 | 80 |
import json
import os
import logging
import sys
import tensorflow as tf
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ERROR
logging.getLogger('tensorflow').setLevel(logging.ERROR)
logging.disable(logging.WARNING)
custom_objects = {}
# tfkg-custom-definitions
with open(sys.argv[1], "r") as f:
config = json.load(f)
save_model(config["save_dir"])
if config["cpu_inference"]:
with tf.device("/cpu:0"):
save_model(config["save_dir"] + "/cpu")
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
25064,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
198,
418,
13,
268,
2268,
17816,
10234,
62,
8697,
47,
62,
23678,
62,
25294,
6... | 2.518325 | 191 |
from pyexlatex.logic.output.api.builders.base import BaseBuilder
class LuaLatexBuilder(BaseBuilder):
"""A simple lualatex-based builder for LaTeX files.
Builds LaTeX files by copying them to a temporary directly and running
``lualatex`` until the associated ``.aux`` file stops changing.
:param executable: The path to the ``lualatex`` binary (will looked up on
``$PATH``).
:param max_runs: An integer providing an upper limit on the amount of times
``lualatex`` can be rerun before an exception is thrown.
"""
output_extension = 'pdf'
default_executable = 'lualatex'
| [
6738,
12972,
1069,
17660,
87,
13,
6404,
291,
13,
22915,
13,
15042,
13,
50034,
13,
8692,
1330,
7308,
32875,
628,
198,
4871,
43316,
26302,
87,
32875,
7,
14881,
32875,
2599,
198,
220,
220,
220,
37227,
32,
2829,
300,
723,
378,
87,
12,
3... | 2.833333 | 228 |
from konlpy.tag import Okt
"""
Req 1-1-1. ๋ฐ์ดํฐ ์ฝ๊ธฐ
read_data(): ๋ฐ์ดํฐ๋ฅผ ์ฝ์ด์ ์ ์ฅํ๋ ํจ์
"""
"""
Req 1-1-2. ํ ํฐํ ํจ์
tokenize(): ํ
์คํธ ๋ฐ์ดํฐ๋ฅผ ๋ฐ์ KoNLPy์ okt ํํ์ ๋ถ์๊ธฐ๋ก ํ ํฌ๋์ด์ง
"""
| [
6738,
479,
261,
75,
9078,
13,
12985,
1330,
6762,
83,
198,
198,
37811,
198,
3041,
80,
352,
12,
16,
12,
16,
13,
31619,
235,
108,
35975,
112,
169,
226,
108,
23821,
251,
121,
166,
116,
108,
198,
961,
62,
7890,
33529,
31619,
235,
108,
... | 0.86631 | 187 |
import os
os.system("python train.py --dataroot .\\datasets\\font --model font_translator_gan --name test_new_dataset --no_dropout --batch_size 128 --style_channel 10") | [
11748,
28686,
198,
418,
13,
10057,
7203,
29412,
4512,
13,
9078,
1377,
67,
9459,
1025,
764,
6852,
19608,
292,
1039,
6852,
10331,
1377,
19849,
10369,
62,
7645,
41880,
62,
1030,
1377,
3672,
1332,
62,
3605,
62,
19608,
292,
316,
1377,
3919,
... | 3.054545 | 55 |
from __future__ import division
import km
import sklearn
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.neighbors import kneighbors_graph
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.graph import graph_shortest_path
import json
import numpy as np
import KernelPCA
from sklearn.cluster import KMeans
from scipy.spatial.distance import euclidean
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
10571,
198,
198,
11748,
1341,
35720,
198,
6738,
1341,
35720,
13,
8692,
1330,
7308,
22362,
320,
1352,
11,
3602,
16354,
35608,
259,
198,
6738,
1341,
35720,
13,
710,
394,
32289,
1330,
2... | 3.387931 | 116 |
width = int(input())
height = int(input())
print(calculate_rectangle_area(width, height)) | [
10394,
796,
493,
7,
15414,
28955,
198,
17015,
796,
493,
7,
15414,
28955,
628,
198,
198,
4798,
7,
9948,
3129,
378,
62,
2554,
9248,
62,
20337,
7,
10394,
11,
6001,
4008
] | 2.967742 | 31 |
'''Defines Metaclasses and Base classes for stdnet Models.'''
import sys
from copy import copy, deepcopy
from inspect import isclass
from stdnet.utils.exceptions import *
from stdnet.utils import UnicodeMixin, unique_tuple
from stdnet.utils.structures import OrderedDict
from .globals import hashmodel, JSPLITTER, orderinginfo
from .fields import Field, AutoIdField
from .related import class_prepared
__all__ = ['ModelMeta', 'Model', 'ModelBase', 'ModelState',
'autoincrement', 'ModelType']
class ModelMeta(object):
'''A class for storing meta data for a :class:`Model` class.
To override default behaviour you can specify the ``Meta`` class as an inner
class of :class:`Model` in the following way::
from datetime import datetime
from stdnet import odm
class MyModel(odm.StdModel):
timestamp = odm.DateTimeField(default = datetime.now)
...
class Meta:
ordering = '-timestamp'
name = 'custom'
:parameter register: if ``True`` (default), this :class:`ModelMeta` is
registered in the global models hashtable.
:parameter abstract: Check the :attr:`abstract` attribute.
:parameter ordering: Check the :attr:`ordering` attribute.
:parameter app_label: Check the :attr:`app_label` attribute.
:parameter name: Check the :attr:`name` attribute.
:parameter modelkey: Check the :attr:`modelkey` attribute.
:parameter attributes: Check the :attr:`attributes` attribute.
This is the list of attributes and methods available. All attributes,
but the ones mantioned above, are initialized by the object relational
mapper.
.. attribute:: abstract
If ``True``, This is an abstract Meta class.
.. attribute:: model
:class:`Model` for which this class is the database metadata container.
.. attribute:: name
Usually it is the :class:`Model` class name in lower-case, but it
can be customised.
.. attribute:: app_label
Unless specified it is the name of the directory or file
(if at top level) containing the :class:`Model` definition. It can be
customised.
.. attribute:: modelkey
The modelkey which is by default given by ``app_label.name``.
.. attribute:: ordering
Optional name of a :class:`Field` in the :attr:`model`.
If provided, model indices will be sorted with respect to the value of the
specified field. It can also be a :class:`autoincrement` instance.
Check the :ref:`sorting <sorting>` documentation for more details.
Default: ``None``.
.. attribute:: dfields
dictionary of :class:`Field` instances.
.. attribute:: fields
list of all :class:`Field` instances.
.. attribute:: scalarfields
Ordered list of all :class:`Field` which are not :class:`StructureField`.
The order is the same as in the :class:`Model` definition. The :attr:`pk`
field is not included.
.. attribute:: indices
List of :class:`Field` which are indices (:attr:`Field.index` attribute
set to ``True``).
.. attribute:: pk
The :class:`Field` representing the primary key.
.. attribute:: related
Dictionary of :class:`related.RelatedManager` for the :attr:`model`. It is
created at runtime by the object data mapper.
.. attribute:: manytomany
List of :class:`ManyToManyField` names for the :attr:`model`. This
information is useful during registration.
.. attribute:: attributes
Additional attributes for :attr:`model`.
'''
@property
def type(self):
'''Model type, either ``structure`` or ``object``.'''
return self.model._model_type
def make_object(self, state=None, backend=None):
'''Create a new instance of :attr:`model` from a *state* tuple.'''
model = self.model
obj = model.__new__(model)
self.load_state(obj, state, backend)
return obj
def pkname(self):
'''Primary key name. A shortcut for ``self.pk.name``.'''
return self.pk.name
def pk_to_python(self, value, backend):
'''Convert the primary key ``value`` to a valid python representation.
'''
return self.pk.to_python(value, backend)
def is_valid(self, instance):
'''Perform validation for *instance* and stores serialized data,
indexes and errors into local cache.
Return ``True`` if the instance is ready to be saved to database.'''
dbdata = instance.dbdata
data = dbdata['cleaned_data'] = {}
errors = dbdata['errors'] = {}
#Loop over scalar fields first
for field, value in instance.fieldvalue_pairs():
name = field.attname
try:
svalue = field.set_get_value(instance, value)
except Exception as e:
errors[name] = str(e)
else:
if (svalue is None or svalue is '') and field.required:
errors[name] = ("Field '{0}' is required for '{1}'."
.format(name, self))
else:
if isinstance(svalue, dict):
data.update(svalue)
elif svalue is not None:
data[name] = svalue
return len(errors) == 0
def backend_fields(self, fields):
'''Return a two elements tuple containing a list
of fields names and a list of field attribute names.'''
dfields = self.dfields
processed = set()
names = []
atts = []
pkname = self.pkname()
for name in fields:
if name == pkname or name in processed:
continue
elif name in dfields:
processed.add(name)
field = dfields[name]
names.append(field.name)
atts.append(field.attname)
else:
bname = name.split(JSPLITTER)[0]
if bname in dfields:
field = dfields[bname]
if field.type in ('json object', 'related object'):
processed.add(name)
names.append(name)
atts.append(name)
return names, atts
def as_dict(self):
'''Model metadata in a dictionary'''
pk = self.pk
id_type = 3
if pk.type == 'auto':
id_type = 1
return {'id_name': pk.name,
'id_type': id_type,
'sorted': bool(self.ordering),
'autoincr': self.ordering and self.ordering.auto,
'multi_fields': [field.name for field in self.multifields],
'indices': dict(((idx.attname, idx.unique)
for idx in self.indices))}
class autoincrement(object):
'''An :class:`autoincrement` is used in a :class:`StdModel` Meta
class to specify a model with :ref:`incremental sorting <incremental-sorting>`.
.. attribute:: incrby
The amount to increment the score by when a duplicate element is saved.
Default: 1.
For example, the :class:`stdnet.apps.searchengine.Word` model is defined as::
class Word(odm.StdModel):
id = odm.SymbolField(primary_key = True)
class Meta:
ordering = -autoincrement()
This means every time we save a new instance of Word, and that instance has
an id already available, the score of that word is incremented by the
:attr:`incrby` attribute.
'''
@property
class ModelType(type):
'''Model metaclass'''
@classmethod
class ModelState(object):
'''The database state of a :class:`Model`.'''
@property
def action(self):
'''Action to be performed by the backend server when committing
changes to the instance of :class:`Model` for which this is a state.'''
return self._action
@property
def persistent(self):
'''``True`` if the instance is persistent in the backend server.'''
return self._action != 'add'
@property
def iid(self):
'''Instance primary key or a temporary key if not yet available.'''
return self._iid
__str__ = __repr__
class Model(UnicodeMixin):
'''This is the base class for both :class:`StdModel` and :class:`Structure`
classes. It implements the :attr:`uuid` attribute which provides the universal
unique identifier for an instance of a model.
.. attribute:: _meta
A class attribute which is an instance of :class:`ModelMeta`, it
containes all the information needed by a
:class:`stdnet.BackendDataServer`.
.. attribute:: session
The :class:`Session` which loaded the instance. Only available,
when the instance has been loaded from a :class:`stdnet.BackendDataServer`
via a :ref:`query operation <tutorial-query>`.
'''
_dbdata = None
_model_type = None
DoesNotExist = ObjectNotFound
'''Exception raised when an instance of a model does not exist.'''
DoesNotValidate = ObjectNotValidated
'''Exception raised when an instance of a model does not validate. Usually
raised when trying to save an invalid instance.'''
def get_state(self, **kwargs):
'''Return the current :class:`ModelState` for this :class:`Model`.
If ``kwargs`` parameters are passed a new :class:`ModelState` is created,
otherwise it returns the cached value.'''
dbdata = self.dbdata
if 'state' not in dbdata or kwargs:
dbdata['state'] = ModelState(self, **kwargs)
return dbdata['state']
def pkvalue(self):
'''Value of primary key'''
return self._meta.pk.get_value(self)
@classmethod
@property
def uuid(self):
'''Universally unique identifier for an instance of a :class:`Model`.
'''
pk = self.pkvalue()
if not pk:
raise self.DoesNotExist(
'Object not saved. Cannot obtain universally unique id')
return self.get_uuid(pk)
@property
session = property(__get_session, __set_session,
doc='The current :class:`Session` for this model.')
@property
def backend(self, client=None):
'''The :class:`stdnet.BackendDatServer` for this instance.
It can be ``None``.
'''
session = self.session
if session:
return session.model(self).backend
@property
def read_backend(self, client=None):
'''The read :class:`stdnet.BackendDatServer` for this instance.
It can be ``None``.
'''
session = self.session
if session:
return session.model(self).read_backend
def get_attr_value(self, name):
'''Provided for compatibility with :meth:`StdModel.get_attr_value`.
For this class it simply get the attribute at name::
return getattr(self, name)
'''
return getattr(self, name)
def save(self):
'''Save the model by adding it to the :attr:`session`. If the
:attr:`session` is not available, it raises a :class:`SessionNotAvailable`
exception.'''
return self.session.add(self)
def delete(self):
'''Delete the model. If the :attr:`session` is not available,
it raises a :class:`SessionNotAvailable` exception.'''
return self.session.delete(self)
ModelBase = ModelType('ModelBase', (Model,), {'abstract': True})
| [
7061,
6,
7469,
1127,
3395,
330,
28958,
290,
7308,
6097,
329,
14367,
3262,
32329,
2637,
7061,
201,
198,
11748,
25064,
201,
198,
6738,
4866,
1330,
4866,
11,
2769,
30073,
201,
198,
6738,
10104,
1330,
318,
4871,
201,
198,
201,
198,
6738,
... | 2.40946 | 4,799 |
"""User
Revision ID: 93d219eb7328
Revises:
Create Date: 2020-04-15 16:00:18.696276
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '93d219eb7328'
down_revision = None
branch_labels = None
depends_on = None
| [
37811,
12982,
198,
198,
18009,
1166,
4522,
25,
10261,
67,
28896,
1765,
4790,
2078,
198,
18009,
2696,
25,
220,
198,
16447,
7536,
25,
12131,
12,
3023,
12,
1314,
1467,
25,
405,
25,
1507,
13,
38205,
27988,
198,
198,
37811,
198,
6738,
3134... | 2.653465 | 101 |
import argparse
from adapter import Adapter
DEFAULT_ALGORITHM = 'mlp'
| [
11748,
1822,
29572,
198,
6738,
21302,
1330,
43721,
198,
198,
7206,
38865,
62,
1847,
38,
1581,
10554,
44,
796,
705,
4029,
79,
6,
198
] | 2.958333 | 24 |
""" Read Gimp .ggr gradient files.
Ned Batchelder, http://nedbatchelder.com
This code is in the public domain.
Update 2018: port to py3 and return uint value
"""
__version__ = '1.0.20180113'
import colorsys
import math
class GimpGradient:
""" Read and interpret a Gimp .ggr gradient file.
"""
def read(self, f):
""" Read a .ggr file from f (either an open file or a file path).
"""
if isinstance(f, str):
f = open(f)
if f.readline().strip() != "GIMP Gradient":
raise Exception("Not a GIMP gradient file")
line = f.readline().strip()
if not line.startswith("Name: "):
raise Exception("Not a GIMP gradient file")
self.name = line.split(": ", 1)[1]
nsegs = int(f.readline().strip())
self.segs = []
for i in range(nsegs):
line = f.readline().strip()
seg = self._segment()
(seg.l, seg.m, seg.r,
seg.rl, seg.gl, seg.bl, _,
seg.rr, seg.gr, seg.br, _,
seg.fn, seg.space) = map(float, line.split())
self.segs.append(seg)
def color(self, x):
""" Get the color for the point x in the range [0..1).
The color is returned as an rgb triple, with all values in the
range [0..1).
"""
# Find the segment.
for seg in self.segs:
if seg.l <= x <= seg.r:
break
else:
# No segment applies! Return black I guess.
return (0, 0, 0)
# Normalize the segment geometry.
mid = (seg.m - seg.l)/(seg.r - seg.l)
pos = (x - seg.l)/(seg.r - seg.l)
# Assume linear (most common, and needed by most others).
if pos <= mid:
f = pos/mid/2
else:
f = (pos - mid)/(1 - mid)/2 + 0.5
# Find the correct interpolation factor.
if seg.fn == 1: # Curved
f = math.pow(pos, math.log(0.5) / math.log(mid));
elif seg.fn == 2: # Sinusoidal
f = (math.sin((-math.pi/2) + math.pi*f) + 1)/2
elif seg.fn == 3: # Spherical increasing
f -= 1
f = math.sqrt(1 - f*f)
elif seg.fn == 4: # Spherical decreasing
f = 1 - math.sqrt(1 - f*f)
# Interpolate the colors
if seg.space == 0:
c = (
int((seg.rl + (seg.rr-seg.rl) * f) * 0xff) |
int((seg.gl + (seg.gr-seg.gl) * f) * 0xff) << 8 |
int((seg.bl + (seg.br-seg.bl) * f) * 0xff) << 16
)
elif seg.space in (1, 2):
hl, sl, vl = colorsys.rgb_to_hsv(seg.rl, seg.gl, seg.bl)
hr, sr, vr = colorsys.rgb_to_hsv(seg.rr, seg.gr, seg.br)
if seg.space == 1 and hr < hl:
hr += 1
elif seg.space == 2 and hr > hl:
hr -= 1
c = colorsys.hsv_to_rgb(
(hl + (hr-hl) * f) % 1.0,
sl + (sr-sl) * f,
vl + (vr-vl) * f
)
c = int(c[0] * 0xff) | \
int((c[1] * 0xff)) << 8 | \
int(c[2] * 0xff) << 16
return c
| [
37811,
4149,
402,
11011,
764,
1130,
81,
31312,
3696,
13,
198,
220,
220,
220,
35754,
6577,
2395,
6499,
11,
2638,
1378,
2817,
8664,
2395,
6499,
13,
785,
198,
220,
220,
220,
770,
2438,
318,
287,
262,
1171,
7386,
13,
628,
220,
220,
220,... | 1.786903 | 1,802 |
#!/usr/bin/python3
import ssl, json, time
from os.path import isfile, getctime
from os import sep, remove, name
from sys import exit, argv, version_info, stdout, stderr, maxsize
from getopt import getopt
from datetime import datetime
from urllib.parse import quote_plus
from xmlrpc.client import ServerProxy
pluginVersion = "18.10"
databaseName = None
hostName = None
userName = None
password = None
opts, args = None, None
backupAge = 7 #days
try:
opts, args = getopt(argv[1:], 'hVw:c:H:d:u:p:b:')
except:
print("Unknown parameter(s): %s" % argv[1:])
opts = []
opts.append(['-h', None])
for opt in opts:
parameter = opt[0]
value = opt[1]
if parameter == '-h':
print("""
EXAoperation XMLRPC database disk usage monitor (version %s)
Options:
-h shows this help
-V shows the plugin version
-H <license server> domain of IP of your license server
-d <db instance> the name of your DB instance
-u <user login> EXAoperation login user
-p <password> EXAoperation login password
-b <backup age in days> (optional) maximum age of the last valid backup
""" % (pluginVersion))
exit(0)
elif parameter == '-V':
print("EXAoperation XMLRPC database disk usage monitor (version %s)" % pluginVersion)
exit(0)
elif parameter == '-H':
hostName = value.strip()
elif parameter == '-u':
userName = value.strip()
elif parameter == '-p':
password = value.strip()
elif parameter == '-d':
databaseName = value.strip()
elif parameter == '-b':
backupAge = int(value.strip())
if not (databaseName and hostName and userName and password):
print('Please define at least the following parameters: -d -H -u -p')
exit(4)
try:
cluster = XmlRpcCall('/')
storage = XmlRpcCall('/storage')
database = XmlRpcCall('/db_' + quote_plus(databaseName))
backupList = database.getBackupList()
backups = []
latestBackupInfo = None
#fill up backups list with latest backup data (if available)
for backup in reversed(backupList):
backupInfo = database.getBackupInfo(backup[0]) #backup ids are not unique on systems with multiple archive volumes
if backupInfo['usable'] == True:
latestBackupInfo = backupInfo
volume = backupInfo['volume'][0]
for backupId in latestBackupInfo['dependencies']:
backups.append(database.getBackupInfo((backupId, volume))) #together with volume id it's unique again
backups.append(backupInfo)
break
if len(backups) == 0:
print('CRITICAL - No usable backup available')
exit(2)
#check for backup age of latest backup
now = int(time.time())
expirationDate = now - (backupAge * 3600 * 24)
if stringToTimestamp(latestBackupInfo['timestamp']) < expirationDate:
print('WARNING - Latest backup (ID %i on %s) is older than %d days' % (latestBackupInfo['id'], latestBackupInfo['volume'][0], backupAge))
exit(1)
#depency expiration date check
oldExpiration = maxsize
oldBackup = 0
for backup in backups:
newBackup = backup
newExpiration = stringToTimestamp(backup['expire date'])
if oldExpiration < newExpiration:
print('WARNING - Base backup (ID %i on %s) expires before its dependency (ID %i)' % (oldBackup['id'], oldBackup['volume'][0], newBackup['id']))
exit(1)
oldBackup = newBackup
oldExpiration = newExpiration
#all checks passed
print ('OK - There is a valid backup')
exit(0)
except Exception as e:
message = str(e).replace('%s:%s@%s' % (userName, password, hostName), hostName)
if 'unauthorized' in message.lower():
print('no access to EXAoperation: username or password wrong')
elif 'Unexpected Zope exception: NotFound: Object' in message:
print('database instance not found')
else:
print('UNKNOWN - internal error %s | ' % message.replace('|', '!').replace('\n', ';'))
exit(3)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
264,
6649,
11,
33918,
11,
640,
198,
6738,
28686,
13,
6978,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1330,
318,
7753,
11,
651,
310,
524,
198,
6738,
28686,
220,
220... | 2.377948 | 1,823 |