content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
def getPerspectiveTransform(view1_4_pts, view2_4_pts):
'''
source https://math.stackexchange.com/questions/494238/how-to-compute-homography-matrix-h-from-corresponding-points-2d-2d-planar-homog
'''
# loop through the 4 correspondences and create assemble matrix
a_list = []
for i in range(4):
p1 = np.matrix([view1_4_pts[i][0,0], view1_4_pts[i][0,1], 1])
p2 = np.matrix([view2_4_pts[i][0,0], view2_4_pts[i][0,1], 1])
a2 = [0, 0, 0, -p2.item(2) * p1.item(0), -p2.item(2) * p1.item(1), -p2.item(2) * p1.item(2),
p2.item(1) * p1.item(0), p2.item(1) * p1.item(1), p2.item(1) * p1.item(2)]
a1 = [-p2.item(2) * p1.item(0), -p2.item(2) * p1.item(1), -p2.item(2) * p1.item(2), 0, 0, 0,
p2.item(0) * p1.item(0), p2.item(0) * p1.item(1), p2.item(0) * p1.item(2)]
a_list.append(a1)
a_list.append(a2)
matrix_A = np.matrix(a_list)
# svd decomposition
u, s, v = np.linalg.svd(matrix_A)
# reshape the min singular value into 3x3 matrix
h = np.reshape(v[8], (3, 3))
# normalize h
h = (1/h.item(8)) * h
return h | [
11748,
299,
32152,
355,
45941,
198,
198,
4299,
651,
30946,
806,
425,
41762,
7,
1177,
16,
62,
19,
62,
457,
82,
11,
1570,
17,
62,
19,
62,
457,
82,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
2723,
3740,
1378,
11018,
1... | 1.830696 | 632 |
# -*- coding: utf-8 -*-
"""Setup functions
Functions used to set up the eMoL instance from a YAML file.
"""
# standard library imports
import contextlib
import json
# third-party imports
from flask import current_app
from slugify import slugify
# application imports
from emol.models import Authorization, Config, Discipline, Marshal, Role, User, UserRole
"""Application setup.
Receive loaded YAML object of setup data, and perform setup of the instance
admin_emails: A list of email addresses that must correspond to
Google accounts. Each address will be given system
admin privilege
disciplines: Array of disciplines that the eMoL instance will
track. This array contains all authorization and
marshal data for the discipline as well.
Each array element is an object:
name: Rapier
authorizations:
- Heavy Rapier
- Cut & Thrust
- Two Weapon
- Parry Device
marshals:
- Marshal
Roles will be configured based on the contents of USER_ROLES
(see emol.roles)
It is currently assumed that any changes to roles or disciplines will be
handled through SQL
"""
def setup(config):
"""Set up eMoL.
Args:
config: A dictionary of data as detailed above
"""
try:
if Config.get('is_setup'):
current_app.logger.info('eMoL is set up, truncating tables')
from sqlalchemy import MetaData
engine = current_app.db.engine
meta = MetaData(bind=engine, reflect=True)
with contextlib.closing(engine.connect()) as con:
con.execute('SET FOREIGN_KEY_CHECKS=0;')
trans = con.begin()
for table in reversed(meta.sorted_tables):
current_app.logger.info('truncate: {}'.format(table))
con.execute(table.delete())
trans.commit()
con.execute('SET FOREIGN_KEY_CHECKS=1;')
return
except Exception as exc:
print(exc)
raise
# Set up the disciplines
current_app.logger.debug('Set up disciplines')
for name, data in config.get('disciplines').items():
slug = slugify(name)
current_app.logger.info('Set up {0} ({1})'.format(name, slug))
new_discipline = Discipline(
name=name,
slug=slug,
_reminders_at=json.dumps(data.get('reminders_at', [30, 60]))
)
current_app.db.session.add(new_discipline)
for auth_name in data.get('authorizations'):
auth_slug = slugify(auth_name)
authorization = Authorization(
name=auth_name,
slug=auth_slug,
discipline=new_discipline
)
current_app.logger.info(
'{} authorization {} ({})'
.format(name, auth_name, auth_slug)
)
current_app.db.session.add(authorization)
for marshal_name in data.get('marshals'):
marshal_slug = slugify(marshal_name)
marshal = Marshal(
name=marshal_name,
slug=marshal_slug,
discipline=new_discipline
)
current_app.logger.info(
'{} marshal {} ({})'
.format(name, marshal_name, marshal_slug)
)
current_app.db.session.add(marshal)
current_app.logger.debug('Set up roles')
# Roles from the role definitions in roles.py
for role in Role.USER_ROLES:
name = role.get('name')
slug = role.get('slug')
if slug in Role.GLOBAL_ROLES:
current_app.logger.info('Global role: {} ({})'.format(name, slug))
user_role = Role(
name=name,
slug=slug,
discipline=None
)
current_app.db.session.add(user_role)
else:
# Need to make one role for each discipline
for discipline in Discipline.query.all():
current_app.logger.info(
'{} role: {} ({})'
.format(discipline.name, name, slug)
)
user_role = Role(
name=name,
slug=slug,
discipline=discipline
)
current_app.db.session.add(user_role)
# Admin role
current_app.logger.info('Role admin (admin)')
admin_role = Role(
name="admin",
slug="admin",
discipline_id=None
)
current_app.db.session.add(admin_role)
# Create system admin users
for email in config.get('admin_emails'):
if User.query.filter(User.email == email).one_or_none() is not None:
continue
current_app.logger.info('Admin {}'.format(email))
user = User(
email=email,
system_admin=True
)
current_app.db.session.add(user)
# Grant ultimate cosmic power
user_role = UserRole(
user=user,
role=admin_role,
discipline=None
)
current_app.db.session.add(user_role)
waiver_reminders = config.get('waiver_reminders', [30, 60])
Config.set('waiver_reminders', waiver_reminders)
current_app.logger.info('Waiver reminders {}'.format(waiver_reminders))
card_reminders = config.get('card_reminders', [30, 60])
Config.set('card_reminders', card_reminders)
current_app.logger.info('Waiver reminders {}'.format(card_reminders))
Config.set('is_setup', True)
current_app.db.session.commit()
current_app.logger.debug('Setup complete: {0}'.format(Config.get('is_setup')))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
40786,
5499,
198,
198,
24629,
2733,
973,
284,
900,
510,
262,
304,
16632,
43,
4554,
422,
257,
575,
2390,
43,
2393,
13,
198,
37811,
198,
198,
2,
3210,
5888,
17944... | 2.069401 | 2,853 |
# one city: (city name, population, federal state)
cities = [('Osnabrück',165000,'Lower Saxony'),('Münster',311000,'North Rhine-Westphalia'),('Bielefeld',333000,'North Rhine-Westphalia')]
total_population = 0
# get summed up population of all cities in list 'cities'
for city in cities:
total_population += city[2]
| [
2,
530,
1748,
25,
357,
19205,
1438,
11,
3265,
11,
2717,
1181,
8,
198,
66,
871,
796,
685,
10786,
46,
16184,
397,
81,
9116,
694,
3256,
20986,
830,
4032,
31426,
29242,
1647,
33809,
10786,
44,
9116,
77,
1706,
3256,
36244,
830,
4032,
141... | 2.927273 | 110 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
from datetime import datetime, timezone, timedelta
def split_by_char_limit(s, limit):
"""Given a string, return it split it on newlines into chunks under the given char limit.
Raise an exception if a single line exceeds the char limit.
max_chunk_size: Maximum size of individual strings. Default 1900 to fit comfortably under discord's 2000-char limit.
"""
chunks = []
while s:
# Terminate if s is under the chunk size
if len(s) <= limit:
chunks.append(s)
return chunks
# Find the last newline before the chunk limit
cut_idx = s.rfind("\n", 0, limit + 1) # Look for the newline closest to the char limit
if cut_idx == -1:
raise ValueError(f"Can't split message with line > {limit} chars")
chunks.append(s[:cut_idx])
s = s[cut_idx + 1:]
def parse_datetime_str(s):
"""Parse and check validity of given ISO date string then return as a UTC Datetime (converting as needed)."""
dt = datetime.fromisoformat(s.rstrip('Z')) # For some reason isoformat doesn't like Z (Zulu time) suffix
# If timezone unspecified, assume UTC, else convert to UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
else:
dt = dt.astimezone(timezone.utc)
return dt
def process_start_end_dates(start, end, check_start_in_future=True):
"""Helper for validating and reformatting tournament start/end date args (e.g. tournament or round start & end).
When updating an existing tournament, check for start date being in future should be ignored.
"""
start_dt = parse_datetime_str(start)
end_dt = parse_datetime_str(end)
cur_dt = datetime.now(timezone.utc)
if check_start_in_future and start_dt < cur_dt:
raise ValueError("Start time is in past.")
elif end_dt <= start_dt:
raise ValueError("End time is not after start time.")
elif end_dt < cur_dt:
raise ValueError("End time is in past.")
return start_dt.isoformat(), end_dt.isoformat()
def format_date(s):
"""Return the given datetime string (expected to be UTC and as returned by datetime.isoformat()) in a more
friendly format.
"""
return ' '.join(s[:16].split('T')) + ' UTC' # Remove T and the seconds field, and replace '+00:00' with ' UTC'
def format_timedelta(td: timedelta):
"""Given a time delta, return a user-friendly string of days + hours if it is > 1 day, hours + mins
if it is > 1 min, or seconds if it is <= 1 min.
"""
# Note that timedelta stores the time as days + seconds + microseconds internally
if td.days >= 1:
return f"{td.days} days, {td.seconds // 3600} hours"
elif td.seconds >= 60:
return f"{td.seconds // 3600} hours, {(td.seconds // 60) % 60} mins"
else:
return f"{td.seconds} seconds"
async def wait_until(dt):
"""Helper to async sleep until after the given Datetime."""
# Sleep and check time twice for safety since I've found mixed answers on the accuracy of sleeping for week+
for i in range(3):
cur_dt = datetime.now(timezone.utc)
remaining_seconds = (dt - cur_dt).total_seconds()
if remaining_seconds < 0:
return
elif i == 1:
print(f"BG task attempting to sleep until {dt.isoformat()} only slept until {cur_dt.isoformat()}; re-sleeping")
elif i == 2:
raise Exception(f"wait_until waited until {cur_dt.isoformat()} instead of {dt.isoformat()}")
await asyncio.sleep(remaining_seconds + 0.1) # Extra 10th of a sec to ensure we go past the specified time
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
30351,
952,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
11,
28805,
12514,
628,
198,
4299,
6626... | 2.689781 | 1,370 |
# Adapted from Gongfan Fang's implementation of DeepLab v3+ found at:
# https://github.com/VainF/DeepLabV3Plus-Pytorch
# Also used Yude Wang's implementation as a reference:
# https://github.com/YudeWang/deeplabv3plus-pytorch
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from torchvision.models import resnet
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.models.segmentation.deeplabv3 import ASPP, DeepLabV3, DeepLabHead
except ImportError:
resnet = None
IntermediateLayerGetter = None
ASPP = None
DeepLabV3 = None
from architectures.deeplab2 import freeze_bn_module
| [
2,
30019,
276,
422,
47142,
24408,
24468,
338,
7822,
286,
10766,
17822,
410,
18,
10,
1043,
379,
25,
198,
2,
3740,
1378,
12567,
13,
785,
14,
53,
391,
37,
14,
29744,
17822,
53,
18,
17860,
12,
20519,
13165,
354,
198,
2,
4418,
973,
575... | 3.037975 | 237 |
"""Showcases overall *Colour* examples."""
import numpy as np
import warnings
import colour
from colour.utilities import (
filter_warnings,
message_box,
warning,
runtime_warning,
usage_warning,
)
message_box("Automatic Colour Conversion Graph")
message_box(
'Starting with version "0.3.14", "Colour" implements an automatic colour '
"conversion graph enabling easier colour conversions."
)
message_box(
'Converting a "ColorChecker" "dark skin" sample spectral distribution to '
'"Output-Referred" "sRGB" colourspace.'
)
sd = colour.SDS_COLOURCHECKERS["ColorChecker N Ohta"]["dark skin"]
print(colour.convert(sd, "Spectral Distribution", "sRGB"))
print("\n")
RGB = np.array([0.45675795, 0.30986982, 0.24861924])
message_box(
f'Converting to the "CAM16-UCS" colourspace from given "Output-Referred" '
f'"sRGB" colourspace values:\n\n\t{RGB}'
)
print(colour.convert(RGB, "Output-Referred RGB", "CAM16UCS"))
print("\n")
Jpapbp = np.array([0.39994811, 0.09206558, 0.0812752])
message_box(
f'Converting to the "Output-Referred" "sRGB" colourspace from given '
f'"CAM16-UCS" colourspace colourspace values:\n\n\t{RGB}'
)
print(colour.convert(Jpapbp, "CAM16UCS", "sRGB"))
print("\n")
message_box('Filter "Colour" Warnings')
warning("This is a first warning and it can be filtered!")
filter_warnings()
warning("This is a second warning and it has been filtered!")
filter_warnings(False)
warning("This is a third warning and it has not been filtered!")
message_box(
"All Python can be filtered by setting the "
'"colour.utilities.filter_warnings" definition "python_warnings" '
"argument."
)
warnings.warn("This is a fourth warning and it has not been filtered!")
filter_warnings(python_warnings=False)
warning("This is a fifth warning and it has been filtered!")
filter_warnings(False, python_warnings=False)
warning("This is a sixth warning and it has not been filtered!")
filter_warnings(False, python_warnings=False)
filter_warnings(colour_warnings=False, colour_runtime_warnings=True)
runtime_warning("This is a first runtime warning and it has been filtered!")
filter_warnings(colour_warnings=False, colour_usage_warnings=True)
usage_warning("This is a first usage warning and it has been filtered!")
print("\n")
message_box('Overall "Colour" Examples')
message_box("N-Dimensional Arrays Support")
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
message_box(f'Using 1d "ArrayLike" parameter:\n\n{XYZ}')
print(colour.XYZ_to_Lab(XYZ, illuminant=illuminant))
print("\n")
XYZ = np.tile(XYZ, (6, 1))
illuminant = np.tile(illuminant, (6, 1))
message_box(f'Using 2d "ArrayLike" parameter:\n\n{XYZ}')
print(colour.XYZ_to_Lab(XYZ, illuminant=illuminant))
print("\n")
XYZ = np.reshape(XYZ, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
message_box(f'Using 3d "ArrayLike" parameter:\n\n{XYZ}')
print(colour.XYZ_to_Lab(XYZ, illuminant=illuminant))
print("\n")
XYZ = np.reshape(XYZ, (3, 2, 1, 3))
illuminant = np.reshape(illuminant, (3, 2, 1, 2))
message_box(f'Using 4d "ArrayLike" parameter:\n\n{XYZ}')
print(colour.XYZ_to_Lab(XYZ, illuminant=illuminant))
print("\n")
xy = np.tile((0.31270, 0.32900), (6, 1))
message_box(
f"Definitions return value may lose a dimension with respect to the "
f"parameter(s):\n\n{xy}"
)
print(colour.xy_to_CCT(xy))
print("\n")
CCT = np.tile(6504.38938305, 6)
message_box(
f"Definitions return value may gain a dimension with respect to the "
f"parameter(s):\n\n{CCT}"
)
print(colour.CCT_to_xy(CCT))
print("\n")
message_box(
'Definitions mixing "ArrayLike" and "Number" parameters expect the '
'"Number" parameters to have a dimension less than the "ArrayLike" '
"parameters."
)
XYZ_1 = np.array([28.00, 21.26, 5.27])
xy_o1 = np.array([0.4476, 0.4074])
xy_o2 = np.array([0.3127, 0.3290])
Y_o = 20
E_o1 = 1000
E_o2 = 1000
message_box(
f"Parameters:\n\n"
f"\tXYZ_1: {XYZ_1}\n"
f"\txy_o1: {xy_o1}\n"
f"\txy_o2: {xy_o2}\n"
f"\tY_o: {Y_o}\n"
f"\tE_o1: {E_o1}\n"
f"\tE_o2: {E_o2}"
)
print(
colour.adaptation.chromatic_adaptation_CIE1994(
XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2
)
)
print("\n")
XYZ_1 = np.tile(XYZ_1, (6, 1))
message_box(
f"Parameters:\n\n"
f"\tXYZ_1: {XYZ_1}\n"
f"\txy_o1: {xy_o1}\n"
f"\txy_o2: {xy_o2}\n"
f"\tY_o: {Y_o}\n"
f"\tE_o1: {E_o1}\n"
f"\tE_o2: {E_o2}"
)
print(
colour.adaptation.chromatic_adaptation_CIE1994(
XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2
)
)
print("\n")
xy_o1 = np.tile(xy_o1, (6, 1))
xy_o2 = np.tile(xy_o2, (6, 1))
Y_o = np.tile(Y_o, 6)
E_o1 = np.tile(E_o1, 6)
E_o2 = np.tile(E_o2, 6)
message_box(
f"Parameters:\n\n"
f"\tXYZ_1: {XYZ_1}\n"
f"\txy_o1: {xy_o1}\n"
f"\txy_o2: {xy_o2}\n"
f"\tY_o: {Y_o}\n"
f"\tE_o1: {E_o1}\n"
f"\tE_o2: {E_o2}"
)
print(
colour.adaptation.chromatic_adaptation_CIE1994(
XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2
)
)
print("\n")
XYZ_1 = np.reshape(XYZ_1, (2, 3, 3))
xy_o1 = np.reshape(xy_o1, (2, 3, 2))
xy_o2 = np.reshape(xy_o2, (2, 3, 2))
Y_o = np.reshape(Y_o, (2, 3))
E_o1 = np.reshape(E_o1, (2, 3))
E_o2 = np.reshape(E_o2, (2, 3))
message_box(
f"Parameters:\n\n"
f"\tXYZ_1: {XYZ_1}\n"
f"\txy_o1: {xy_o1}\n"
f"\txy_o2: {xy_o2}\n"
f"\tY_o: {Y_o}\n"
f"\tE_o1: {E_o1}\n"
f"\tE_o2: {E_o2}"
)
print(
colour.adaptation.chromatic_adaptation_CIE1994(
XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2
)
)
print("\n")
message_box("Domain-Range Scales")
message_box(
'"Colour" uses two different domain-range scales: \n\n'
'- "Reference"\n'
'- "1"'
)
print("\n")
message_box("Printing the current domain-range scale:")
print(colour.get_domain_range_scale())
print("\n")
message_box('Setting the current domain-range scale to "1":')
colour.set_domain_range_scale("1")
XYZ_1 = np.array([0.2800, 0.2126, 0.0527])
xy_o1 = np.array([0.4476, 0.4074])
xy_o2 = np.array([0.3127, 0.3290])
Y_o = 0.2
E_o1 = 1000
E_o2 = 1000
message_box(
f"Parameters:\n\n"
f"\tXYZ_1: {XYZ_1}\n"
f"\txy_o1: {xy_o1}\n"
f"\txy_o2: {xy_o2}\n"
f"\tY_o: {Y_o}\n"
f"\tE_o1: {E_o1}\n"
f"\tE_o2: {E_o2}"
)
print(
colour.adaptation.chromatic_adaptation_CIE1994(
XYZ_1, xy_o1, xy_o2, Y_o, E_o1, E_o2
)
)
| [
37811,
15307,
33964,
4045,
1635,
5216,
454,
9,
6096,
526,
15931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
14601,
198,
198,
11748,
9568,
198,
6738,
9568,
13,
315,
2410,
1330,
357,
198,
220,
220,
220,
8106,
62,
40539,
654,
1... | 2.079568 | 3,054 |
from django.contrib.auth.mixins import PermissionRequiredMixin, PermissionDenied
class TransactionPermissionMixin(PermissionRequiredMixin):
"""
Mixin has to come last in MRO because logic is based on transaction type
""" | [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
2448,
3411,
37374,
35608,
259,
11,
2448,
3411,
21306,
798,
198,
198,
4871,
45389,
5990,
3411,
35608,
259,
7,
5990,
3411,
37374,
35608,
259,
2599,
198,
220,
220,
220,
... | 3.477612 | 67 |
import numpy as np
import pandas as pd
from datetime import datetime
from sqlalchemy import create_engine
data = {
'X': [78, 85, 96, 80, 86],
'Y': [84, 94, 89, 83, 86],
'Z': [86, 97, 96, 72, 83]
}
data2 = {
'A': [44, 54, 56, 9, 77],
'B': [55, 94, 56, 75, 74],
'C': [66, 700, 90, 44, 23]
}
data_with_null = dict(data)
data_with_null['X'][0] = None
data_with_null['X'][1] = None
data_with_null['X'][2] = None
data_with_null['Y'][0] = None
data_with_null['Y'][1] = None
data_with_null['Y'][2] = None
data_with_null['Z'][0] = None
data_with_null['Z'][1] = None
data_with_null['Z'][2] = None
if __name__ == "__main__":
# https://pandas.pydata.org/
exercise123()
exercise124()
exercise125()
exercise126()
exercise127()
exercise128()
exercise129()
exercise130()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
198,
7890,
796,
1391,
198,
220,
220,
220,
705,
55,
10354,
685,
369... | 2.184697 | 379 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import logging
import os
import re
import tempfile
import unittest
import zipfile
from pathlib import Path
from typing import Optional
from unittest import mock
# We use TF to parse the logs
from accelerate import Accelerator
from accelerate.test_utils.testing import (
MockingTestCase,
TempDirTestCase,
require_comet_ml,
require_tensorboard,
require_wandb,
)
from accelerate.tracking import CometMLTracker, GeneralTracker
from accelerate.utils import is_comet_ml_available
if is_comet_ml_available():
from comet_ml import OfflineExperiment
logger = logging.getLogger(__name__)
@require_tensorboard
@require_wandb
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
# Comet has a special `OfflineExperiment` we need to use for testing
@require_comet_ml
@mock.patch.object(CometMLTracker, "__init__", offline_init)
class MyCustomTracker(GeneralTracker):
"Basic tracker that writes to a csv for testing"
_col_names = [
"total_loss",
"iteration",
"my_text",
"learning_rate",
"num_iterations",
"some_boolean",
"some_string",
]
requires_logging_directory = False
| [
2,
15069,
33160,
383,
12905,
2667,
32388,
4816,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.089041 | 584 |
from .lyft import LyftDataset
from .eval import get_lyft_eval_result
__all__ = ["LyftDataset", "get_lyft_eval_result"]
| [
6738,
764,
306,
701,
1330,
38928,
27354,
292,
316,
198,
6738,
764,
18206,
1330,
651,
62,
306,
701,
62,
18206,
62,
20274,
198,
198,
834,
439,
834,
796,
14631,
31633,
701,
27354,
292,
316,
1600,
366,
1136,
62,
306,
701,
62,
18206,
62,... | 2.608696 | 46 |
import time
import pygame as pg
from pygame.surface import Surface
from bomber_monkey.features.display.image import Image
from bomber_monkey.features.player.players_config import PlayersConfig
from bomber_monkey.game_config import GameConfig, BLUE_MONKEY_COLOR, BLACK_COLOR, GAME_FONT
from bomber_monkey.game_scores import GameRoundResult
from bomber_monkey.utils.vector import Vector
TITLE_FONT_SIZE = 50
TITLE_BOTTOM_MARGIN = 50
MESSAGE_FONT_SIZE = 35
BOX_PADDING = 30
PLAYER_MESSAGE_PREFIX = "Player "
SCORE_LINE_HEIGHT = 80
SCORE_SLOT_MARGIN = 10
SCORE_SLOT_SIZE = Vector.create(64, 64)
SCORE_SLOT_SPACING = 10
MONKEY_BLINK_ON_TIME = .4
MONKEY_BLINK_OFF_TIME = .2
MONKEY_MAX_BLINK_TIME = 5
MONKEY_FLIP_TIME = .5
SCORE_SLOT_OFFSET_Y = SCORE_LINE_HEIGHT / 2 - SCORE_SLOT_SIZE.y / 2
SCORE_SLOT_OFFSET_X = SCORE_SLOT_SIZE.x + SCORE_SLOT_SPACING
| [
11748,
640,
198,
198,
11748,
12972,
6057,
355,
23241,
198,
6738,
12972,
6057,
13,
42029,
1330,
20321,
198,
198,
6738,
24065,
62,
49572,
13,
40890,
13,
13812,
13,
9060,
1330,
7412,
198,
6738,
24065,
62,
49572,
13,
40890,
13,
7829,
13,
... | 2.594512 | 328 |
#! /usr/bin/env python
"""
Module with functions related to image coordinates and coordinate conversions.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez, Valentin Christiaens'
__all__ = ['dist',
'dist_matrix',
'frame_center',
'cart_to_pol',
'pol_to_cart',
'pol_to_eq',
'QU_to_QUphi']
import math
from matplotlib.pyplot import xlim, ylim, axes, gca, show
import matplotlib.pyplot as plt
import numpy as np
def dist(yc, xc, y1, x1):
"""
Return the Euclidean distance between two points, or between an array
of positions and a point.
"""
return np.sqrt(np.power(yc-y1, 2) + np.power(xc-x1, 2))
def dist_matrix(n, cx=None, cy=None):
"""
Create matrix with euclidian distances from a reference point (cx, cy).
Parameters
----------
n : int
output image shape is (n, n)
cx,cy : float
reference point. Defaults to the center.
Returns
-------
im : ndarray with shape (n, n)
Notes
-----
This is a replacement for ANDROMEDA's DISTC.
"""
if cx is None:
cx = (n - 1) / 2
if cy is None:
cy = (n - 1) / 2
yy, xx = np.ogrid[:n, :n]
return np.sqrt((yy-cy)**2 + (xx-cx)**2)
def frame_center(array, verbose=False):
"""
Return the coordinates y,x of the frame(s) center.
If odd: dim/2-0.5
If even: dim/2
Parameters
----------
array : 2d/3d/4d numpy ndarray
Frame or cube.
verbose : bool optional
If True the center coordinates are printed out.
Returns
-------
cy, cx : int
Coordinates of the center.
"""
if array.ndim == 2:
shape = array.shape
elif array.ndim == 3:
shape = array[0].shape
elif array.ndim == 4:
shape = array[0, 0].shape
else:
raise ValueError('`array` is not a 2d, 3d or 4d array')
cy = shape[0] / 2
cx = shape[1] / 2
if shape[0] % 2:
cy -= 0.5
if shape[1] % 2:
cx -= 0.5
if verbose:
print('Center px coordinates at x,y = ({}, {})'.format(cx, cy))
return int(cy), int(cx)
def cart_to_pol(x, y, cx=0, cy=0, astro_convention=False):
"""
Returns polar coordinates for input cartesian coordinates
Parameters
----------
x : float or numpy ndarray
x coordinates with respect to the center
y : float or numpy ndarray
y coordinates with respect to the center
cx, cy : float or numpy ndarray
x, y coordinates of the center of the image to be considered for
conversion to cartesian coordinates.
astro_convention: bool
Whether to use angles measured from North up/East left (True), or
measured from the positive x axis (False).
Returns
-------
r, theta: floats or numpy ndarrays
radii and polar angles corresponding to the input x and y.
"""
r = dist(cy, cx, y, x)
theta = np.rad2deg(np.arctan2(y-cy, x-cx))
if astro_convention:
theta -= 90
return r, theta
def pol_to_cart(r, theta, r_err=0, theta_err=0, cx=0, cy=0,
astro_convention=False):
"""
Returns cartesian coordinates for input polar coordinates, with error
propagation.
Parameters
----------
r, theta : float or numpy ndarray
radii and position angles to be converted to cartesian coords x and y.
r_err : float, optional
Error on radial separation. Default is 0
theta_err : float, optional
Error on position angle, in degrees. Default is 0
cx, cy : float or numpy ndarray
x, y coordinates of the center to be considered for conversion to
cartesian coordinates.
astro_convention: bool
Whether to use angles measured from North up/East left (True), or
measured from the positive x axis (False). If True, the x axis is
reversed to match positive axis pointing East (left).
Returns
-------
x, y: floats or numpy ndarrays
x, y positions corresponding to input radii and position angles.
dx, dy: floats or numpy arrays
dx, dy uncertainties on positions propagated from input uncertainties
on r and theta.
"""
if astro_convention:
theta += 90
sign = -1
else:
sign = 1
theta = np.deg2rad(theta)
theta_err = np.deg2rad(theta_err)
x = cx+sign*r*np.cos(theta)
y = cy+r*np.sin(theta)
t1x = np.cos(theta)**2 * r_err**2
t2x = r**2 * np.sin(theta)**2 * theta_err**2
t1y = np.sin(theta)**2 * r_err**2
t2y = r**2 * np.cos(theta)**2 * theta_err**2
dx_err = np.sqrt(t1x + t2x)
dy_err = np.sqrt(t1y + t2y)
if r_err != 0 or theta_err != 0:
return x, y, dx_err, dy_err
else:
return x, y
def pol_to_eq(r, t, rError=0, tError=0, astro_convention=False, plot=False):
r"""
Converts a position (r,t) given in polar coordinates into :math:`\Delta` RA
and :math:`\Delta` DEC (equatorial coordinates), with error propagation.
Note: regardless of the assumption on input angle t (see description for
`astro_convention`), the output RA is counted positive towards left.
Parameters
----------
r: float
The radial coordinate.
t: float
The angular coordinate in degrees
rError: float, optional
The error bar related to r.
tError: float, optional
The error bar related to t, in deg.
astro_convention: bool, optional
Whether the input angle t is assumed to be measured from North up,
East left (True), or measured from the positive x axis (False).
plot: boolean, optional
If True, a figure illustrating the error ellipse is displayed.
Returns
-------
out : tuple
((RA, RA error), (DEC, DEC error))
"""
if not astro_convention:
t -= 90
ra = (r * np.sin(math.radians(t)))
dec = (r * np.cos(math.radians(t)))
u, v = (ra, dec)
nu = np.mod(np.pi/2-math.radians(t), 2*np.pi)
a, b = (rError, r*np.sin(math.radians(tError)))
beta = np.linspace(0, 2*np.pi, 5000)
x, y = (u + (a * np.cos(beta) * np.cos(nu) - b * np.sin(beta) * np.sin(nu)),
v + (b * np.sin(beta) * np.cos(nu) + a * np.cos(beta) * np.sin(nu)))
raErrorInf = u - np.amin(x)
raErrorSup = np.amax(x) - u
decErrorInf = v - np.amin(y)
decErrorSup = np.amax(y) - v
if plot:
plt.plot(u, v, 'ks', x, y, 'r')
plt.plot((r+rError) * np.cos(nu), (r+rError) * np.sin(nu), 'ob',
(r-rError) * np.cos(nu), (r-rError) * np.sin(nu), 'ob')
plt.plot(r * np.cos(nu+math.radians(tError)),
r*np.sin(nu+math.radians(tError)), 'ok')
plt.plot(r * np.cos(nu-math.radians(tError)),
r*np.sin(nu-math.radians(tError)), 'ok')
plt.plot(0, 0, 'og', np.cos(np.linspace(0, 2*np.pi, 10000)) * r,
np.sin(np.linspace(0, 2*np.pi, 10000)) * r, 'y')
plt.plot([0, r*np.cos(nu+math.radians(tError*0))],
[0, r*np.sin(nu+math.radians(tError*0))], 'k')
axes().set_aspect('equal')
lim = np.amax([a, b]) * 2.
xlim([ra-lim, ra+lim])
ylim([dec-lim, dec+lim])
gca().invert_xaxis()
show()
return ((ra, np.mean([raErrorInf, raErrorSup])),
(dec, np.mean([decErrorInf, decErrorSup])))
def QU_to_QUphi(Q, U, delta_x=0, delta_y=0, scale_r2=False,
north_convention=False):
"""
Returns Qphi and Uphi images, from input Q and U images.
Parameters
----------
Q: numpy ndarray
2d numpy array containing the Q component of polarisation.
U: numpy ndarray
2d numpy array containing the U component of polarisation. Should have
the same dimensions as Q.
delta_x, delta_y: float, opt
If the star is not at the center of the image, delta_x and delta_y
indicate by how much it is offset along the x and y dimensions, resp.
scale_r2: bool, opt
Whether to scale by r^2 during conversion.
north_convention: bool, opt
Whether to use angles measured from North up/East left (True), or
measured from the positive x axis (False).
Returns
-------
Qphi, Uphi: numpy ndarrays
Qphi and Uphi images
"""
cy, cx = frame_center(Q)
Qphi = np.zeros_like(Q)
Uphi = np.zeros_like(U)
for ii in range(Q.shape[1]):
for jj in range(Q.shape[0]):
x = float(ii-cx-delta_x)
y = float(jj-cy-delta_y)
rho, phi = cart_to_pol(x, y, north_convention=north_convention)
phi = np.deg2rad(phi)
if scale_r2:
Qphi[jj, ii] = (Q[jj, ii]*np.cos(2*phi) +
U[jj, ii]*np.sin(2*phi))*rho**2
Uphi[jj, ii] = (-Q[jj, ii]*np.sin(2*phi) +
U[jj, ii]*np.cos(2*phi))*rho**2
else:
Qphi[jj, ii] = Q[jj, ii]*np.cos(2*phi) + U[jj, ii]*np.sin(2*phi)
Uphi[jj, ii] = -Q[jj, ii] * \
np.sin(2*phi) + U[jj, ii]*np.cos(2*phi)
return Qphi, Uphi
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
26796,
351,
5499,
3519,
284,
2939,
22715,
290,
20435,
32626,
13,
198,
37811,
198,
198,
834,
9800,
834,
796,
705,
26886,
418,
40649,
33231,
24416,
11,
17284,
259,
195... | 2.18039 | 4,202 |
from enum import Enum
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertAdam
from pytorch_pretrained_bert.modeling import BertLayerNorm
from data_utils.readers.span_pred_reader import BertSpanPredReader
import flint.span_util as span_util
import flint.torch_util as torch_util
import torch.nn as nn
from torch.nn.functional import nll_loss
import torch
import config
def init_bert_weights(module, initializer_range):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=initializer_range)
elif isinstance(module, BertLayerNorm):
module.beta.data.normal_(mean=0.0, std=initializer_range)
module.gamma.data.normal_(mean=0.0, std=initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
| [
6738,
33829,
1330,
2039,
388,
198,
198,
6738,
12972,
13165,
354,
62,
5310,
13363,
62,
4835,
1330,
22108,
30642,
7509,
11,
22108,
17633,
11,
22108,
23159,
198,
6738,
12972,
13165,
354,
62,
5310,
13363,
62,
4835,
13,
4666,
10809,
1330,
22... | 2.794038 | 369 |
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import ast
from tank import Hook
import tank.templatekey
class HieroTranslateTemplate(Hook):
"""
This class implements a hook that's responsible for translating a Toolkit
template object into a Hiero export string.
"""
def execute(self, template, output_type, **kwargs):
"""
Takes a Toolkit template object as input and returns a string
representation which is suitable for Hiero exports. The Hiero export
templates contain tokens, such as {shot} or {clip}, which are replaced
by the exporter. This hook should convert a template object with its
special custom fields into such a string. Depending on your template
setup, you may have to do different steps here in order to fully
convert your template. The path returned will be validated to check
that no leftover template fields are present, and that the returned
path is fully understood by Hiero.
:param template: The Toolkit template object to be translated.
:param str output_type: The output type associated with the template.
:returns: A Hiero-compatible path.
:rtype: str
"""
# first convert basic fields
mapping = {
"{Sequence}": "{sequence}",
"{Shot}": "{shot}",
"{name}": "{clip}",
"{version}": "{tk_version}",
}
# see if we have a value to use for Step
try:
task_filter = self.parent.get_setting("default_task_filter", "[]")
task_filter = ast.literal_eval(task_filter)
for (field, op, value) in task_filter:
if field == "step.Step.code":
mapping["{Step}"] = value
except ValueError:
# continue without Step
self.parent.log_error("Invalid value for 'default_task_filter'")
# get the string representation of the template object
template_str = template.definition
# simple string to string replacement
# the nuke script name is hard coded to ensure a valid template
if output_type == 'script':
template_str = template_str.replace('{name}', 'scene')
for (orig, repl) in mapping.iteritems():
template_str = template_str.replace(orig, repl)
# replace {SEQ} style keys with their translated string value
for (name, key) in template.keys.iteritems():
if isinstance(key, tank.templatekey.SequenceKey):
# this is a sequence template, for example {SEQ}
# replace it with ####
template_str = template_str.replace("{%s}" % name, key.str_from_value("FORMAT:#"))
return template_str
| [
2,
15069,
357,
66,
8,
2211,
34198,
10442,
3457,
13,
198,
2,
220,
198,
2,
7102,
37,
25256,
12576,
5357,
4810,
3185,
7112,
2767,
13153,
198,
2,
220,
198,
2,
770,
670,
318,
2810,
366,
1921,
3180,
1,
290,
2426,
284,
262,
34198,
37709,... | 2.758232 | 1,154 |
import tables
import tempfile
import shutil
from ctapipe.core import run_tool
from pathlib import Path
from ctapipe.tools.process import ProcessorTool
try:
from importlib.resources import files
except ImportError:
from importlib_resources import files
| [
11748,
8893,
198,
11748,
20218,
7753,
198,
11748,
4423,
346,
198,
198,
6738,
269,
44335,
3757,
13,
7295,
1330,
1057,
62,
25981,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
269,
44335,
3757,
13,
31391,
13,
14681,
1330,
32893,
253... | 3.573333 | 75 |
#!/usr/bin/python
#
# Copyright 2016 Pinterest, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classes is common/hosts.py."""
from collections import Counter
from mock import Mock, patch
import os
import tempfile
import testutil
import time
from unittest import TestCase
from kingpin.kazoo_utils import KazooClientManager, ServerSet, hosts, FileWatch
from kingpin.kazoo_utils.hosts import (BaseHostSelector, HostsProvider, RandomHostSelector)
ZK_HOSTS = ["datazk001:2181", "datazk002:2181"]
class HostSelectorWithLocalFileTestCase(TestCase):
"""
This class has exact test set as the class above. Every time a
HostProvider is initialized, it takes an additional file path
argument. Although adding this file path argument does not change
the code path of all unit tests, we want to keep the exact test set
here to make sure having the local file does not change any behavior
of HostProvider.
"""
HOST_LIST = ["host11:8080", "host12:8181"]
HOST_PROVIDER_NAME = "test_provider"
# Initialize a singleton file watch with low wait time
FILE_WATCH = FileWatch(polling_wait_in_seconds=0.5)
def test_init_base_host_selector_class(self):
"""Test base initialization and functionality."""
fd, tmp_file = tempfile.mkstemp()
host_provider = HostsProvider([], file_path=tmp_file)
base_host_selector = BaseHostSelector(host_provider)
# Check that some base states are set.
self.assertTrue(base_host_selector._last is None)
self.assertTrue(base_host_selector._current is None)
self.assertTrue(base_host_selector._select_time is None)
self.assertEquals(base_host_selector._bad_hosts, {})
self.assertEquals(base_host_selector._retry_time, 60)
self.assertTrue(base_host_selector._host_provider is host_provider)
# This is an abstract class. _chose_host() should raise an exception.
self.assertRaises(NotImplementedError, base_host_selector._choose_host)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_retrieving_and_invalidation(self):
"""Test host retrieval."""
fd, tmp_file = tempfile.mkstemp()
with open(tmp_file, 'w') as f:
f.write('\n'.join(HostSelectorWithLocalFileTestCase.HOST_LIST))
host_provider = HostsProvider(HostSelectorWithLocalFileTestCase.HOST_LIST, file_path=tmp_file)
base_host_selector = BaseHostSelector(
host_provider, expire_time=0, retry_time=0,
invalidation_threshold=1.0)
self.assertTrue(base_host_selector.get_last_host() is None)
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorWithLocalFileTestCase.HOST_LIST[0])):
# Get one host.
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorWithLocalFileTestCase.HOST_LIST[0])
# If invalidated the state of the object changes.
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector.invalidate()
self.assertTrue(host1 in base_host_selector._bad_hosts)
# If called again, with retry_time being set to 0 bad hosts should be
# invalidated.
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorWithLocalFileTestCase.HOST_LIST[1])):
host2 = base_host_selector.get_host()
# Now bad hosts should be empty
self.assertTrue(not base_host_selector._bad_hosts)
self.assertEquals(host2, HostSelectorWithLocalFileTestCase.HOST_LIST[1])
base_host_selector.invalidate()
self.assertTrue(host2 in base_host_selector._bad_hosts)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_reject_invalidation(self):
"""Test rejecting invalidation."""
fd, tmp_file = tempfile.mkstemp()
with open(tmp_file, 'w') as f:
f.write('\n'.join(HostSelectorWithLocalFileTestCase.HOST_LIST))
host_provider = HostsProvider(HostSelectorWithLocalFileTestCase.HOST_LIST, file_path=tmp_file)
base_host_selector = BaseHostSelector(host_provider, expire_time=0, retry_time=0)
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorWithLocalFileTestCase.HOST_LIST[0])):
# Get one host.
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorWithLocalFileTestCase.HOST_LIST[0])
# If invalidated the state of the object changes.
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector.invalidate()
# Because 1 is larger than 2 * 0.2 = 0.4
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector._invalidation_threshold = 0.5
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorWithLocalFileTestCase.HOST_LIST[0])
base_host_selector.invalidate()
# Because 1 <= 2 * 0.5 = 1.0
self.assertTrue(host1 in base_host_selector._bad_hosts)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_random_host_selector(self):
"""Test the RandomHostSelector."""
fd, tmp_file = tempfile.mkstemp()
with open(tmp_file, 'w') as f:
f.write('\n'.join(HostSelectorWithLocalFileTestCase.HOST_LIST))
host_provider = HostsProvider(HostSelectorWithLocalFileTestCase.HOST_LIST,
file_path=tmp_file)
random_host_selector = RandomHostSelector(
host_provider, expire_time=0, retry_time=0,
invalidation_threshold=1.0)
# Note that we didn't have to mock _chose_host() call this time,
# it should be im RandomHostSelector class already.
some_host = random_host_selector.get_host()
self.assertTrue(some_host in HostSelectorWithLocalFileTestCase.HOST_LIST)
self.assertEquals(random_host_selector._current, some_host)
no_of_iterations = 250
# If I run get_host() about 100 times I expect to have relatively
# even distribution and all hosts in the host_list returned by now.
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
host_counter = Counter(returned_hosts)
# We expect that all calls happened.
self.assertEquals(sum(host_counter.itervalues()), no_of_iterations)
# We should have seen all the elements.
self.assertEquals(set(host_counter),
set(HostSelectorWithLocalFileTestCase.HOST_LIST))
# But if we had left large expire_time only one host would be picked
# up all the time, and we'll show that here.
random_host_selector = RandomHostSelector(host_provider,
invalidation_threshold=1.0)
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
host_counter = Counter(returned_hosts)
self.assertEquals(len(list(host_counter)), 1)
# Test invalidation
hosts = [HostSelectorWithLocalFileTestCase.HOST_LIST[0]]
for i in xrange(4):
hosts.append(HostSelectorWithLocalFileTestCase.HOST_LIST[1])
mock = Mock(side_effect=random_select)
with patch("random.choice", new=mock):
random_host_selector = RandomHostSelector(
host_provider, expire_time=0, retry_time=60,
invalidation_threshold=1.0)
host = random_host_selector.get_host()
self.assertEqual(host, HostSelectorWithLocalFileTestCase.HOST_LIST[1])
random_host_selector.invalidate()
# Because mock will return the bad host three times in a row,
# this will force it to compute the set of good hosts
host = random_host_selector.get_host()
self.assertEqual(host, HostSelectorWithLocalFileTestCase.HOST_LIST[0])
# At this point, random.choice should have been called 5 times
self.assertEqual(mock.call_count, 5)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_invalid_use_zk_for_discovery(self):
"""Test invalid USE_ZOOKEEPER_FOR_DISCOVERY setting."""
fd, tmp_file = tempfile.mkstemp()
hosts.USE_ZOOKEEPER_FOR_DISCOVERY = False
self.assertRaises(Exception, HostsProvider,
HostSelectorWithLocalFileTestCase.HOST_LIST,
file_path = tmp_file)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_both_zk_and_file_paths(self):
"""Test invalid USE_ZOOKEEPER_FOR_DISCOVERY setting."""
fd, tmp_file = tempfile.mkstemp()
hosts.USE_ZOOKEEPER_FOR_DISCOVERY = False
self.assertRaises(Exception, HostsProvider,
HostSelectorWithLocalFileTestCase.HOST_LIST,
"/foo",
file_path = tmp_file)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
1584,
17334,
11,
3457,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.34122 | 4,311 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import pexpect
# (current) UNIX password:
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
613,
87,
806,
198,
198,
2,
357,
14421,
8,
4725,
10426,
9206,
25,
198,
198,
361,
11593,
3672,
8... | 2.220339 | 59 |
from abcpy.discretemodels import *
from tests.probabilisticmodels_tests import AbstractAPIImplementationTests
import unittest
"""Tests whether the methods defined for discrete probabilistic models are working as intended."""
class CheckParametersAtInitializationTests(unittest.TestCase):
"""Tests that no probabilistic model with invalid parameters can be initialized."""
# TODO: Test for all distributions the behaviour if input parameters are real distributions and not only
# hyperparameters
class DimensionTests(unittest.TestCase):
"""Tests whether the dimensions of all discrete models are defined in the correct way."""
class SampleFromDistributionTests(unittest.TestCase):
"""Tests the return value of forward_simulate for all discrete distributions."""
class CheckParametersBeforeSamplingTests(unittest.TestCase):
"""Tests whether False will be returned if the input parameters of _check_parameters_before_sampling are not
accepted."""
if __name__ == '__main__':
unittest.main()
| [
6738,
450,
66,
9078,
13,
15410,
1186,
368,
375,
1424,
1330,
1635,
198,
6738,
5254,
13,
1676,
65,
14991,
2569,
27530,
62,
41989,
1330,
27741,
17614,
3546,
32851,
51,
3558,
198,
198,
11748,
555,
715,
395,
198,
198,
37811,
51,
3558,
1771... | 3.701439 | 278 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import parlai.core.testing_utils as testing_utils
SKIP_TESTS = False
try:
import fairseq # noqa: F401
except ImportError:
SKIP_TESTS = True
BATCH_SIZE = 64
NUM_EPOCHS = 5
LR = 1e-2
class TestFairseq(unittest.TestCase):
"""Checks that fairseq can learn some very basic tasks."""
@testing_utils.skipUnlessGPU
@unittest.skipIf(SKIP_TESTS, "Fairseq not installed")
@testing_utils.skipUnlessGPU
@unittest.skipIf(SKIP_TESTS, "Fairseq not installed")
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,... | 2.759259 | 270 |
import click
from clients.services import ClientServices
from clients.models import Client
@click.group()
def clients():
""" Manages the clients lifecycle"""
pass
@clients.command()
@click.option('-n', '--name',
type=str,
prompt=True,
help='The client name')
@click.option('-c', '--company',
type=str,
prompt=True,
help='The client Company')
@click.option('-e', '--email',
type=str,
prompt=True,
help='The client email')
@click.option('-p', '--position',
type=str,
prompt=True,
help='The client position')
@click.pass_context
def create(ctx, name, company, email, position):
"""Create a new client """
client = Client(name, company, email, position)
client_service = ClientServices(ctx.obj['clients_table'])
client_service.create_client(client)
@clients.command()
@click.pass_context
def list_clients(ctx):
"""List all clients"""
client_services = ClientServices(ctx.obj['clients_table'])
client_list = client_services.list_clients()
click.echo(' ID | NAME | COMPANY | EMAIL | POSITION')
click.echo('*'*50)
for client in client_list:
print('{uid} | {name} | {company} | {email} | {position}'.format(
uid = client['uid'],
name = client['name'],
company = client['company'],
email = client['email'],
position = client['position']
))
@clients.command()
@click.argument('client_uid', type=str)
@click.pass_context
def update(ctx, client_uid):
"""Update a client """
client_service = ClientServices(ctx.obj['clients_table'])
client_list = client_service.list_clients()
client = [client for client in client_list if client['uid'] == client_uid]
if client:
client = _update_client_flow(Client(**client[0]))
client_service.update_client(client)
click.echo('Client Updated')
else:
click.echo('Client not found')
@clients.command()
@click.argument('client_uid', type=str)
@click.pass_context
def delete(ctx, client_uid):
"""Delete a client"""
client_service = ClientServices(ctx.obj['clients_table'])
client_list = client_service.list_clients()
client = [client for client in client_list if client['uid'] == client_uid]
if client:
client_service._delete_client(Client(**client[0]))
click.echo('Remove it')
else:
click.echo('Client not found')
@clients.command()
@click.argument('client_uid', type=str)
@click.pass_context
def search(ctx, client_uid):
"""Search a client """
client_service = ClientServices(ctx.obj['clients_table'])
client_found = client_service.search_client(client_uid)
click.echo(' ID | NAME | COMPANY | EMAIL | POSITION')
click.echo('*'*50)
for client in client_found:
print('{uid} | {name} | {company} | {email} | {position}'.format(
uid = client['uid'],
name = client['name'],
company = client['company'],
email = client['email'],
position = client['position']
))
all = clients
| [
11748,
3904,
198,
6738,
7534,
13,
30416,
1330,
20985,
31007,
198,
6738,
7534,
13,
27530,
1330,
20985,
628,
198,
31,
12976,
13,
8094,
3419,
198,
4299,
7534,
33529,
198,
220,
220,
220,
37227,
1869,
1095,
262,
7534,
3868,
47510,
37811,
198... | 2.351178 | 1,401 |
# -*- coding: utf-8 -*-
"""Module containing the logic for updating DNS records using the duckdns protocol.
From the duckdns.org website:
https://{DOMAIN}/update?domains={DOMAINLIST}&token={TOKEN}&ip={IP}
where:
DOMAIN the service domain
DOMAINLIST is either a single domain or a comma separated list of domains
TOKEN is the API token for authentication/authorization
IP is either the IP or blank for auto-detection
"""
from logging import getLogger
import requests
from .base import UpdateProtocol
from ..common import constants
LOG = getLogger(__name__)
class UpdateProtocolDuckdns(UpdateProtocol):
"""Updater for services compatible with the duckdns protocol."""
configuration_key = "duckdns"
def __init__(self, hostname, token, url, *args, **kwargs):
"""
Initialize.
:param hostname: the fully qualified hostname to be managed
:param token: the token for authentication
:param url: the API URL for updating the DNS entry
"""
self.hostname = hostname
self.__token = token
self._updateurl = url
super(UpdateProtocolDuckdns, self).__init__()
def update(self, ip):
"""Update the IP on the remote service."""
timeout = 60
LOG.debug("Updating '%s' to '%s' at service '%s'", self.hostname, ip, self._updateurl)
params = {"domains": self.hostname.partition(".")[0], "token": self.__token}
if ip is None:
params["ip"] = ""
else:
params["ip"] = ip
# LOG.debug("Update params: %r", params)
req = requests.get(self._updateurl, params=params, headers=constants.REQUEST_HEADERS_DEFAULT,
timeout=timeout)
LOG.debug("status %i, %s", req.status_code, req.text)
# duckdns response codes seem undocumented...
if req.status_code == 200:
if req.text.startswith("OK"):
return ip
return req.text
return "invalid http status code: %s" % req.status_code
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
26796,
7268,
262,
9156,
329,
19698,
18538,
4406,
1262,
262,
22045,
67,
5907,
8435,
13,
198,
198,
4863,
262,
22045,
67,
5907,
13,
2398,
3052,
25,
198,
198,
... | 2.519065 | 813 |
import time
import os
# As you can see, its not very optimal
| [
11748,
640,
198,
11748,
28686,
198,
198,
2,
1081,
345,
460,
766,
11,
663,
407,
845,
16586,
220,
628
] | 3.368421 | 19 |
#!usr/bin/env python
# -*- coding:utf-8 -*-
# Create on 2017.2.24
import json
import time
import click
import datetime
import schedule
import threading
from collector import tasks
from collector.handler import CollectHandler
from libs.database.mongodb.projectdb import Project
from django.conf import settings
class Scheduler(object):
"""
Oakling Scheduler Module
"""
def __init__(self):
"""
Models Initialization
"""
# Project.object(status=STATUS_ON).order_by('+priority')
activate = settings.BASE_DIR
# @staticmethod
# def _filter_generator_projects():
# """
# Projects Filter
# :return:
# """
# _projects = Project.objects(status=Project.STATUS_ON).order_by('+priority')
# projects = []
# for project in _projects:
# now = datetime.datetime.now()
# last = project.last_generator_time
# interval = int(project.generator_interval)
# if not project.last_generator_time:
# projects.append(project)
# project.update(last_generator_time=now)
# continue
# next = last + datetime.timedelta(seconds=interval)
# if next <= now:
# projects.append(project)
# project.update(last_generator_time=now)
# else:
# continue
#
# return projects
@staticmethod
def _filter_drop_project():
"""
Filter delete group project
:return:
"""
_projects = Project.objects(status=Project.STATUS_DELAY)
return _projects
@staticmethod
def _filter_processor_projects():
"""
Projects Filter
:return:
"""
_projects = Project.objects(status=Project.STATUS_ON).order_by('+priority')
projects = []
for project in _projects:
now = datetime.datetime.now()
last = project.last_generator_time
interval = int(project.generator_interval)
if not project.last_generator_time:
projects.append(project)
project.update(last_generator_time=now)
continue
next = last + datetime.timedelta(seconds=interval)
if next <= now:
projects.append(project)
project.update(last_generator_time=now)
else:
continue
return projects
# @staticmethod
# def _filter_processor_projects():
# """
# Projects Filter
# :return:
# """
# _projects = Project.objects(status=Project.STATUS_ON).order_by('+priority')
# projects = []
# for project in _projects:
# projects.append(project)
#
# return projects
# def run_generator_dispatch(self):
# """
# Generator Dispatch
# :return:
# """
# projects = self._filter_generator_projects()
# for project in projects:
# _priority = project.priority
# if _priority == -1:
# celery.high_generator.delay(str(project.id))
# elif _priority <= 3:
# celery.mid_generator.delay(str(project.id))
# else:
# celery.low_generator.delay(str(project.id))
#
# result = {
# 'status': True,
# "projects": len(projects)
# }
#
# print "[{0}]::Generator Dispatch::{1}".format(str(datetime.datetime.now())[:-4], result)
# return result
# @staticmethod
# def _filter_tasks(project):
# """
# Filter Tasks by Project
# :return:
# """
# _name = project.name
#
# _num = project.downloader_dispatch
# exec("from execute.{0}_models import {1}Task".format(_name, str(_name).capitalize()))
# exec("tasks = {0}Task.objects(status=0)[0:{1}]".format(str(_name).capitalize(), int(_num)))
#
# return tasks
@staticmethod
def _processor_tasks(project):
"""
Dispatch Tasks by Project
:return:
"""
_priority = project.priority
args = json.loads(project.args)
if _priority == -1:
tasks.high_processor.delay(project.name, **args)
elif _priority <= 3:
tasks.mid_processor.delay(project.name, **args)
else:
tasks.low_processor.delay(project.name, **args)
return {
"project": str(project.name),
}
def run_processor_dispatch(self):
"""
Processor Dispatch
:return:
"""
results = []
projects = self._filter_processor_projects()
for project in projects:
# tasks = self._filter_tasks(project)
result = self._processor_tasks(project)
results.append(result)
now = str(datetime.datetime.now())[:-4]
info = "[ Scheduler {0}]::Processor Dispatch::{1} ::{2}".format(now, len(results), results)
click.secho("[ INFO ] %s" % info, fg='green', bg='black')
return results
def run_auto_drop_project(self):
"""
Auto drop project
:return:
"""
results = []
handler = CollectHandler()
projects = self._filter_drop_project()
for project in projects:
if project.group == "delete" or "Delete" or "DELETE":
result = handler.drop_project(project.name)
results.append(result)
now = str(datetime.datetime.now())[:-4]
info = "[ Scheduler {0}]::Drop Project::{1} ::{2}".format(now, len(results), results)
click.secho("[ INFO ] %s" % info, fg='red', bg='black')
return results
@staticmethod
def run_query_project_status():
"""
Run Query Project Status to Redis
:return:
"""
handler = CollectHandler()
results = handler.query_all_projects_status("--all")
now = str(datetime.datetime.now())[:-4]
info = "[ Scheduler {0}]::Analysis Dispatch::{1} Updated Success.".format(now, len(results))
click.secho("[ INFO ] %s" % info, fg='yellow', bg='black')
@staticmethod | [
2,
0,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
13610,
319,
2177,
13,
17,
13,
1731,
628,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
3904,
198,
11748,
4818,
8079,
19... | 2.157118 | 2,915 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 12 15:27:06 2019
@author: weiji
"""
import random
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from keras.layers import TimeDistributed
from keras.layers import Bidirectional
import tensorflow as tf
import keras
import tensorflow.keras.backend as K
import numpy as np
import pandas as pd
import tensorflow as tf
debug=False
def get_loss_of_segmentation (y_true, y_pred):
''' Our goal was to tell different steps, such as dissection, suture, or even more detailed steps, such as dissect bile duct. To approach this, I will first separate one procedure into a series of continuous 5-second segments (or totally 200 segments) with start and end time. The next segment's start time is the previous one's end time. Then, for each segment, I will have a list of binary indicators of 'is_grasper_installed', 'is_grasper_following', 'counts_grasper_following', ('seconds_grasper_following',), 'is_grasper_offscreen', 'counts_grasper_offscreen', ('seconds_grasper_offscreen',) 'is_grasper_activated' (energized), 'counts_grasper_actived', ('seconds_grasper_actived',) (repeat the same thing by replacing grasper->monopolor, bipolar, scissors, clip_applier, needle_drive, suction, energy). For each segment, I will assign a true label from hand labeling. The next step is to implement the loss function, which is a difficult that costs me some time to think. I prefer not the use absolute precision as the loss function, because this way does not punish much on switch to a new label and will cost a lot of noises in the middle of a big chunk of activity. So I would like to implement my loss function this way: after I got the predicted labels, I will take out every time stamp that the label changes between sequential segments. If the change exists in both ground truth and predict, the loss would add the square of time between the two changes divided by the whole procedure length. Else, for each unexpected changes of labels between sequential segments, the loss function add by 1. In this case, I would punish a lot on these unexpected changes of labels. I will train the model with bi-directional LSTM and RNN, since I believe knowing what happens next matters for the prediction of the previous. At the end, I will group the continuous segments with the same label and return the start and end time of grouped data. He liked my idea. Ben asked me whether I think we have enough data. We now only have 8 Cholecystectomy cases. I told him I would like to try my best.
Input:
y_true: true label, 1D array
y_pred: predicted label: 1D array
Output:
loss: a float value larger than or equal to 0
'''
#return np.abs(y_true-y_pred)
y_true = np.array(y_true)
y_pred = np.array(y_pred)
# check the shape of y_true and y_pred.
if y_true.shape!= y_pred.shape:
print ('y_true and y_pred are in difference size. ')
return 0
if len(y_true.shape)!=1:
print ('y_true and y_pred are not 1d array. ')
return 0
# hanble when the length of y_true is 0 or 1.
if y_true.size==0:
return 0
if y_true.size==1:
if y_true[0]==y_pred[0]:
return 0
else:
return 1
# hanble when the length of y_true is larger than 1.
total_length = float(y_true.size-1) # length of the procedure.
# a) find all change values between continuous elements in y_true
# x is in list_true if the [x]th element and [x+1]th element are different
# the first element is [0]
index_true = np.where(y_true[:-1]!=y_true[1:])[0]
list_true =[]
for ind in index_true:
list_true.append({'y_pre': y_true[ind], \
'y_post': y_true[ind+1], \
'ind': ind, })
df_true = pd.DataFrame(list_true)
df_true.sort_values(by=['y_pre', 'y_post', 'ind'], inplace=True)
df_true.reset_index(drop=True, inplace=True)
if debug:
print (df_true)
# repeat a) for y_pred, and put results in list_pred and df_pred
index_pred = np.where(y_pred[:-1]!=y_pred[1:])[0]
list_pred =[]
for ind in index_pred:
list_pred.append({'y_pre': y_pred[ind], \
'y_post': y_pred[ind+1], \
'ind': ind, })
df_pred = pd.DataFrame(list_pred)
df_pred.sort_values(by=['y_pre', 'y_post', 'ind'], inplace=True)
df_pred.reset_index(drop=True, inplace=True)
if debug:
print (df_pred)
# compute loss
loss = 0
while len(df_true)>0:
# b) select all rows in df_true that has the same value of y_pre and y_post pair
y_pre = df_true['y_pre'][0]
y_post = df_true['y_post'][0]
if debug:
print(y_pre, y_post)
sel_true = (df_true['y_pre']== y_pre) &\
(df_true['y_post']== y_post)
sel_df_true = df_true.loc[sel_true]
# repeat b) for df_pred
sel_pred = (df_pred['y_pre']== y_pre) &\
(df_pred['y_post']== y_post)
sel_df_pred = df_pred.loc[sel_pred,:]
# for the rest of rows in y_true, each row contribute to loss by 1^2
# these values of y_pre and y_post pair entries
# exist in y_true not in y_pred
if len(sel_df_pred)==0:
loss+=len(sel_df_true)*1**2
elif len(sel_df_true)==0:
loss+=len(sel_df_pred)*1**2
else:
# if the length of the sel_true and sel_pred are the same
# return the sum of ( (ind_true-ind_pred)/total_length )**2
if len(sel_df_true)== len(sel_df_pred):
loss += np.sum((sel_df_true['ind'].reset_index(drop=True) \
- sel_df_pred['ind'].reset_index(drop=True))**2)/total_length**2
# print('value:',np.sum((sel_df_true['ind'].reset_index(drop=True) \
# - sel_df_pred['ind'].reset_index(drop=True))**2)/total_length**2)
if debug: print ('loss0', loss)
# if the length of the sel_true and sel_pred are not the same
# use greedy method to decide the loss
# if the length of the sel_true and sel_pred are not the same
else:
loss += greedy_distance(sel_df_true['ind'].reset_index(drop=True)\
, sel_df_pred['ind'].reset_index(drop=True))/total_length**2\
+ np.abs(len(sel_df_true)-len(sel_df_pred))*1**2
if debug: print ('loss1', loss)
# delete the processed columns in df_true and df_pred
df_true = df_true.loc[~sel_true, :]
df_true.reset_index(drop=True, inplace=True)
df_pred = df_pred.loc[~sel_pred, :]
df_pred.reset_index(drop=True, inplace=True)
# for the rest of rows in y_pred, each row contribute to loss by 1^2
# these values of y_pre and y_post pair entries
# exist in y_pred not in y_true
if len(df_true) == 0:
loss+= len(df_pred)*1**2
if debug: print ('loss2', loss)
break
return loss
def greedy_distance(ind_true, ind_pred):
'''
The is a way to find the closest match pairs between
It is done by the following steps:
1) a matrix A of size (I,J) is created.
I is the length of ind_true, J is the length of ind_pred
element A(ii,jj) = (ind-true[ii]-ind_pred[jj])**2
2) pick the minimum value of A, add the value to greedy_distance, delete the row and column the value exist
Input: ind_true, ind_pred
Output: greedy_distance, float value larger than or equal to 0.
'''
greedy_distance = 0
if debug: print('greedy distance',ind_true, ind_pred)
if len(ind_true)==0: return 0;
if len(ind_pred)==0: return 0
A = np.zeros((len(ind_true), len(ind_pred)))
for ii in range(len(ind_true)):
A[ii] = (ind_true[ii]-ind_pred)**2
if debug: print('A:',A)
while True:
# find value and index of the minimum element
min_element = np.amin(A)
if debug: print ('min_e:',min_element)
ii,jj = np.where(A==min_element)[0][0], np.where(A==min_element)[1][0]
if debug: print('ii,jj:',ii,jj)
# add the value to greedy_distance
greedy_distance += min_element
# delete the row and column the value exist
try:
A = np.delete(A,ii, axis=0)
A = np.delete(A,jj, axis=1)
except:
print ('cannot drop row and column')
break
if len(A)==0:
break
if A.size==0:
break
return greedy_distance
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
4280,
1105,
1315,
25,
1983,
25,
3312,
13130,
198,
198,
31,
9800,
25,
356,
20770,
1... | 2.315762 | 3,813 |
# -*- encoding: utf-8 -*-
'''
Current module: pyrunner.ext.idleshell.diyrpc
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: lkf20031988@163.com
RCS: rock4.common.dev.idleshell.diyrpc,v 2.0 2017年2月7日
FROM: 2016年8月16日
********************************************************************
======================================================================
UI and Web Http automation frame for python.
'''
import time,socket,sys,os,types
from code import InteractiveInterpreter
from idlelib import PyShell, rpc, IOBinding
from idlelib.configHandler import idleConf
HOST = "127.0.0.1"
PORT = 0
class TkAsyncUpdate:
''' Update Tk UI when MyInterp.poll_subprocess get a response. '''
def update(self, poll_response):
''' can be override '''
if poll_response:
how, what = poll_response
if how == "OK":
if what is not None:
print "ok:",repr(what)
elif how == "EXCEPTION":
print "exception:",repr(what)
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print "error:",errmsg, what
class TkConsole():
''' In order to act as a output console with Tk-Text, this class is define some methods of "write writelines flush".'''
#### 示例一: RPC远程调用 runcode,并对输出重定向到 标准输入,输出,错误
def start_example():
''' d:\auto\buffer\sdf.py:
class Asdf:
def __init__(self):
self.hh = "Now in Asdf()"
def sdfs2(self,*args,**kwargs):
print self.hh
print args,kwargs
test2.py:
print "Now in running file: test2.py"
print "====呵呵"
'''
from Tkinter import Tk,Text
root = Tk()
text = Text(root)
text.pack()
tkconsole = TkConsole(text)
### run code and not block the Tk
api_file = r"d:\auto\buffer\sdf.py"
intp = MyInterp(tkconsole)
intp.start_subprocess(api_file)
# 默认执行 一行命令
intp.runsource("print +_+_+_+_语法错误信息的重定向示例'")
intp.runsource("import time;time.sleep(3);print '你哈'")
intp.runsource("time.sleep(3)")
intp.runsource("print '+_+_+_+_哈哈哈'")
# 执行多行命令, symbol = "exec"
intp.runsource("""
a = 'hello world'
if True:
print a
else:
print "oh no"
""", symbol= "exec")
### run code and block the tk
# run_file = r"d:\auto\buffer\test2.py"
# clt = MyRpcClient(intp.rpcclt)
#
# # 获取定义的 var值
# result = clt.remotequeue("exec", "poll_var", ('a'), {})
# print "poll_var result:", result
#
# result1 = clt.send_code_obj_req(filename = "test-Text",source = u'print "running here"')
# result2 = clt.send_code_obj_req(filename = run_file)
# print "send_code_obj_req result: %r %r" %(result1,result2)
#
# result3 = clt.remotequeue("exec", "runcode", ("import time;time.sleep(3);print 'Block end'",), {})
# print "runcode result:", result3
#
# result4 = clt.remotecall("asdf", "sdfs2", ("print 'remote call for Asdf'",), {})
# print "sdfs2 result:", result4
root.mainloop()
if __name__ == "__main__":
start_example()
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
7061,
6,
201,
198,
11297,
8265,
25,
279,
2417,
403,
1008,
13,
2302,
13,
312,
829,
12758,
13,
10989,
2417,
14751,
201,
198,
201,
198,
49,
619,
2196,
2106,
25,
20... | 1.942021 | 1,811 |
import json
config = json.load(open('config/config.json'))
config['all_lexicons'] = json.load(open('config/lexicons.json'))
config['lexiconpath'] = {}
for lex in config['all_lexicons']:
config['lexiconpath'][lex['name']] = 'config/%s.json' % lex['name']
| [
11748,
33918,
198,
198,
11250,
796,
33918,
13,
2220,
7,
9654,
10786,
11250,
14,
11250,
13,
17752,
6,
4008,
198,
11250,
17816,
439,
62,
2588,
34280,
20520,
796,
33918,
13,
2220,
7,
9654,
10786,
11250,
14,
2588,
34280,
13,
17752,
6,
400... | 2.755319 | 94 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: openedge_function_runtime.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='openedge_function_runtime.proto',
package='runtime',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1fopenedge_function_runtime.proto\x12\x07runtime\"f\n\x07Message\x12\x0b\n\x03QOS\x18\x01 \x01(\r\x12\r\n\x05Topic\x18\x02 \x01(\t\x12\x0f\n\x07Payload\x18\x03 \x01(\x0c\x12\x14\n\x0c\x46unctionName\x18\x0b \x01(\t\x12\x18\n\x10\x46unctionInvokeID\x18\x0c \x01(\t29\n\x07Runtime\x12.\n\x06Handle\x12\x10.runtime.Message\x1a\x10.runtime.Message\"\x00\x62\x06proto3')
)
_MESSAGE = _descriptor.Descriptor(
name='Message',
full_name='runtime.Message',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='QOS', full_name='runtime.Message.QOS', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Topic', full_name='runtime.Message.Topic', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Payload', full_name='runtime.Message.Payload', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='FunctionName', full_name='runtime.Message.FunctionName', index=3,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='FunctionInvokeID', full_name='runtime.Message.FunctionInvokeID', index=4,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=146,
)
DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), dict(
DESCRIPTOR = _MESSAGE,
__module__ = 'openedge_function_runtime_pb2'
# @@protoc_insertion_point(class_scope:runtime.Message)
))
_sym_db.RegisterMessage(Message)
_RUNTIME = _descriptor.ServiceDescriptor(
name='Runtime',
full_name='runtime.Runtime',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=148,
serialized_end=205,
methods=[
_descriptor.MethodDescriptor(
name='Handle',
full_name='runtime.Runtime.Handle',
index=0,
containing_service=None,
input_type=_MESSAGE,
output_type=_MESSAGE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_RUNTIME)
DESCRIPTOR.services_by_name['Runtime'] = _RUNTIME
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
4721,
469,
62,
8818,
62,
43282,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
58,
15,
60,
27,
18... | 2.460635 | 1,702 |
from gan_training.decoder import models
decoder_dict = {
'default': models.ReluNetworkDisentangle,
}
| [
6738,
308,
272,
62,
34409,
13,
12501,
12342,
1330,
4981,
198,
198,
12501,
12342,
62,
11600,
796,
1391,
198,
220,
220,
220,
705,
12286,
10354,
4981,
13,
6892,
84,
26245,
7279,
298,
9248,
11,
198,
92,
198
] | 2.864865 | 37 |
"""
Provides useful stuff, generally!
"""
from typing import Optional
class PossiblyIncompleteDict:
"""
A dict kind of thing (only supporting item getting) that, if an item isn't
available, gets fresh data from a refresh function.
"""
@staticmethod
def _del_nul(elem):
"""
elegantly tries to remove invalid \x00 chars from
strings, strings in lists, strings in dicts.
"""
if isinstance(elem, str):
return elem.replace(chr(0), '')
elif isinstance(elem, dict):
return {key: PossiblyIncompleteDict._del_nul(value)
for key, value in elem.items()}
elif isinstance(elem, list):
return [PossiblyIncompleteDict._del_nul(item)
for item in elem]
return elem
def __contains__(self, item):
"""
Needed for determining if an item is in the possibly incomplete self.
"""
return item in self._data
def update(self, value: dict):
"""
Updates the dict with provided dict.
"""
self._data.update(self._del_nul(value))
def maybe_refresh(self):
"""
Refresh if it may need a refresh.
"""
if self.may_need_refresh:
self.refresh()
def refresh(self):
"""
Refreshes data unconditionally.
"""
self._data = self._del_nul(self._refresh())
self.may_need_refresh = False
class CachedDataMixin:
"""
You provide:
- self._get_data for getting your data
You can also create an IGitt instance with your own data using from_data
classmethod.
"""
default_data = {} # type: dict
@classmethod # Ignore PyLintBear
def from_data(cls, data: Optional[dict]=None, *args, **kwargs):
"""
Returns an instance created from the provided data. No further requests
are made.
:raises TypeError:
When the args provided are insufficient to call __init__.
"""
instance = cls(*args, **kwargs)
instance.data = data or {}
return instance
def _get_data(self):
"""
Retrieves the data for the object.
"""
raise NotImplementedError
def refresh(self): # dont cover
"""
Refreshes all the data from the hoster!
"""
if not getattr(self, '_data', None):
self._data = PossiblyIncompleteDict(
self.default_data, self._get_data)
self._data.refresh()
@property
def data(self):
"""
Retrieves the data, if needed from the network.
"""
if not getattr(self, '_data', None):
self._data = PossiblyIncompleteDict(
self.default_data, self._get_data)
return self._data
@data.setter
def data(self, value):
"""
Setter for the data, use it to override, refresh, ...
"""
self._data = PossiblyIncompleteDict(value, self._get_data)
def eliminate_none(data):
"""
Remove None values from dict
"""
return dict((k, v) for k, v in data.items() if v is not None)
| [
37811,
198,
15946,
1460,
4465,
3404,
11,
4143,
0,
198,
37811,
198,
6738,
19720,
1330,
32233,
628,
198,
4871,
43046,
818,
20751,
35,
713,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
8633,
1611,
286,
1517,
357,
8807,
6493,
... | 2.302174 | 1,380 |
import spotipy.util as util
from progress.bar import Bar
import numpy as np
from bs4 import BeautifulSoup
import pandas as pd
from wordcloud import WordCloud, STOPWORDS
import json, requests, urllib.parse, re, time, sys, click, spotipy, os
if __name__ == "__main__":
main()
# command = 'blender -b --python WordPile.py <URI>' | [
11748,
4136,
541,
88,
13,
22602,
355,
7736,
198,
6738,
4371,
13,
5657,
1330,
2409,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1573,
17721,
1330,... | 2.881356 | 118 |
# Author: Nikolaos Perrakis <nikos@nannyml.com>
#
# License: Apache Software License 2.0
"""Utility module offering curated datasets for quick experimentation."""
from importlib import resources
from pandas import DataFrame, read_csv
DATA_MODULE = "nannyml.datasets.data"
def load_csv_file_to_df(local_file: str) -> DataFrame:
"""Loads a data file from within the NannyML package.
Parameters
----------
local_file : str, required
string with the name of the data file to be loaded.
Returns
-------
df: pd.DataFrame
A DataFrame containing the requested data
"""
with resources.path(DATA_MODULE, local_file) as data:
return read_csv(data)
def load_synthetic_binary_classification_dataset():
"""Loads the synthetic binary classification dataset provided for testing the NannyML package.
Returns
-------
reference : pd.DataFrame
A DataFrame containing reference partition of synthetic binary classification dataset
analysis : pd.DataFrame
A DataFrame containing analysis partition of synthetic binary classification dataset
analysis_tgt : pd.DataFrame
A DataFrame containing target values for the analysis partition of synthetic binary
classification dataset
Examples
--------
>>> from nannyml.datasets import load_synthetic_binary_classification_dataset
>>> reference_df, analysis_df, analysis_targets_df = load_synthetic_binary_classification_dataset()
"""
reference = load_csv_file_to_df('synthetic_sample_reference.csv')
analysis = load_csv_file_to_df('synthetic_sample_analysis.csv')
analysis_gt = load_csv_file_to_df('synthetic_sample_analysis_gt.csv')
return reference, analysis, analysis_gt
def load_synthetic_multiclass_classification_dataset():
"""Loads the synthetic multiclass classification dataset provided for testing the NannyML package.
Returns
-------
reference : pd.DataFrame
A DataFrame containing reference partition of synthetic multiclass classification dataset
analysis : pd.DataFrame
A DataFrame containing analysis partition of synthetic multiclass classification dataset
analysis_tgt : pd.DataFrame
A DataFrame containing target values for the analysis partition of synthetic
multiclass classification dataset
Examples
--------
>>> from nannyml.datasets import load_synthetic_multiclass_classification_dataset
>>> reference_df, analysis_df, analysis_targets_df = load_synthetic_multiclass_classification_dataset()
"""
reference = load_csv_file_to_df('mc_reference.csv')
analysis = load_csv_file_to_df('mc_analysis.csv')
analysis_gt = load_csv_file_to_df('mc_analysis_gt.csv')
return reference, analysis, analysis_gt
def load_modified_california_housing_dataset():
"""Loads the modified california housing dataset provided for testing the NannyML package.
This dataset has been altered to represent a binary classification problem over time.
More information about the dataset can be found at:
:ref:`dataset-california`
Returns
-------
reference : pd.DataFrame
A DataFrame containing reference partition of modified california housing dataset
analysis : pd.DataFrame
A DataFrame containing analysis partition of modified california housing dataset
analysis_tgt : pd.DataFrame
A DataFrame containing target values for the analysis partition of modified california housing dataset
Examples
--------
>>> from nannyml.datasets import load_modified_california_housing_dataset
>>> reference_df, analysis_df, analysis_targets_df = load_modified_california_housing_dataset()
"""
reference = load_csv_file_to_df('california_housing_reference.csv')
analysis = load_csv_file_to_df('california_housing_analysis.csv')
analysis_gt = load_csv_file_to_df('california_housing_analysis_gt.csv')
return reference, analysis, analysis_gt
| [
2,
220,
6434,
25,
220,
47817,
418,
2448,
17716,
271,
220,
1279,
17187,
418,
31,
77,
7737,
4029,
13,
785,
29,
198,
2,
198,
2,
220,
13789,
25,
24843,
10442,
13789,
362,
13,
15,
198,
198,
37811,
18274,
879,
8265,
6011,
36768,
40522,
... | 3.156693 | 1,270 |
# 312. Burst Balloons
# ttungl@gmail.com
| [
2,
34465,
13,
30635,
6932,
13022,
198,
2,
256,
83,
2150,
75,
31,
14816,
13,
785,
198
] | 2.411765 | 17 |
import os.path
import subprocess
import sys
import tkinter as tk
from logging import getLogger
from tkinter import messagebox, ttk
from typing import Any, Dict
import thonny
from thonny import get_runner, get_workbench, running, ui_utils
from thonny.common import (
InlineCommand,
InlineResponse,
ToplevelCommand,
get_base_executable,
is_private_python,
is_virtual_executable,
normpath_with_actual_case,
)
from thonny.languages import tr
from thonny.misc_utils import running_on_mac_os, running_on_windows
from thonny.plugins.backend_config_page import BackendDetailsConfigPage
from thonny.running import WINDOWS_EXE, SubprocessProxy, get_front_interpreter_for_subprocess
from thonny.terminal import run_in_terminal
from thonny.ui_utils import askdirectory, askopenfilename, create_string_var
logger = getLogger(__name__)
| [
11748,
28686,
13,
6978,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
18931,
1330,
651,
11187,
1362,
198,
6738,
256,
74,
3849,
1330,
3275,
3524,
11,
256,
30488,
198,
6738,
19720,
1330,
... | 2.948454 | 291 |
"""State Model"""
from django.db import models
from reservas.utils.models import BookingAudit
| [
37811,
9012,
9104,
37811,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
5463,
292,
13,
26791,
13,
27530,
1330,
4897,
278,
16353,
270,
628
] | 3.653846 | 26 |
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Dynamically calculate the version based on tagging.VERSION.
version_tuple = __import__('voting').VERSION
if version_tuple[2] is not None:
version = "%d.%d_%s" % version_tuple
else:
version = "%d.%d" % version_tuple[:2]
setup(
name = 'django-voting',
version = version,
description = 'Generic voting application for Django',
author = 'Jonathan Buchanan',
author_email = 'jonathan.buchanan@gmail.com',
url = 'http://code.google.com/p/django-voting/',
packages = ['voting', 'voting.templatetags', 'voting.tests'],
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
) | [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
1233,
26791,
13,
21812,
13,
17350,
1330,
40589,
7036,
62,
50,
3398,
3620,
1546,
198,
198,
2,
14026,
1233,
26791,
284,
1234,
262,
1366,
62,
16624,
287,
3859,
12,
11423,
9988,
198,
2,
... | 2.554717 | 530 |
"""
DNS Plugin driver
Overrides the default evaluator plugin handling so we can check for legit IPs for UDP tests.
"""
import argparse
import calendar
import copy
import logging
import os
import random
import socket
import sys
import tempfile
import time
import traceback
import urllib.request
import requests
socket.setdefaulttimeout(1)
import actions.utils
from plugins.plugin import Plugin
BASEPATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_ROOT = os.path.dirname(os.path.dirname(BASEPATH))
class DNSPluginRunner(Plugin):
"""
Defines the DNS plugin runner.
"""
name = "dns"
def __init__(self, args):
"""
Marks this plugin as enabled
"""
self.enabled = True
def check_legit_ip(self, ip, logger, domain="facebook"):
"""
Helper method to check if the given IP address is serving web content.
"""
url = "http://%s" % ip
logger.debug("Checking %s if returned legitimate %s" % (url, domain))
try:
res = requests.get(url, allow_redirects=False, timeout=3)
if res.status_code == 400:
res.raise_for_status()
# If we got a 301 redirect, the res.text will be empty, but facebook will show up in
# the headers
for header in res.headers:
if domain in res.headers[header]:
return True
# Otherwise, check the res.text
return domain in res.text
except Exception as exc:
logger.debug("Exception caught in checking DNS result %s: %s", url, exc)
return False
def start(self, args, evaluator, environment, ind, logger):
"""
Runs the plugins
"""
# Start the server
port = args.get("port", 53)
use_tcp = evaluator.client_args.get("use_tcp", False)
if port != 53:
logger.warning("Warning: Given port %s, but GFW only censors on port 53.", str(port))
# Disable wait for server - it checks based on binding to a TCP port
evaluator.server_args.update({"no_wait_for_server" : True})
# If we're given a server to start, start it now
if evaluator.server_cls and not args.get("external_server"):
# If a test using TCP has been requested, switch the server to that mode
if use_tcp:
evaluator.server_args.update({"listener": "socket_TCP"})
server = evaluator.start_server(evaluator.server_args, environment, logger)
evaluator.client_args.update({"dns_server": evaluator.args["server"]})
fitness = evaluator.run_client(evaluator.client_args, environment, logger)
if evaluator.server_cls and not evaluator.args["external_server"]:
evaluator.stop_server(environment, server)
evaluator.read_fitness(ind)
# If the engine ran on the server side, ask that it punish fitness
if evaluator.args["server_side"]:
ind.fitness = server.punish_fitness(ind.fitness, logger)
# When performing a DNS test, a timeout is indistinguishable from
# a reset, which means we can't tell if the strategy broke the packet
# stream, or if the censor caught us. Strategies that break the stream
# should be punished more harshly, so raise the fitness slightly
# if the engine detected censorship for failed DNS tests.
if use_tcp and server.engine and server.engine.censorship_detected and ind.fitness < 0:
logger.debug("Censorship detected - adjusting positively for not killing stream")
ind.fitness += 40
output_path = os.path.join(PROJECT_ROOT, evaluator.client_args.get("output_directory"))
fitpath = os.path.join(PROJECT_ROOT, output_path, actions.utils.FLAGFOLDER, environment["id"]) + ".fitness"
with open(fitpath, "w") as fitfile:
fitfile.write(str(ind.fitness))
if evaluator.args["external_client"]:
command = 'cat %s/%s/%s/%s.dnsresult' % (environment["worker"]["geneva_path"], evaluator.args["output_directory"], actions.utils.FLAGFOLDER, environment["id"])
dns_result, error_lines = evaluator.remote_exec_cmd(environment["remote"], command, logger)
if not dns_result:
logger.debug("Failed to get DNS result.")
else:
result = dns_result[0]
logger.debug("Got result: %s" % result)
# If the IP we got back was bad, we must fail the strategy
if not self.check_legit_ip(result, logger, domain="facebook"):
ind.fitness = -360
output_path = os.path.join(PROJECT_ROOT, evaluator.client_args.get("output_directory"))
fitpath = os.path.join(PROJECT_ROOT, output_path, actions.utils.FLAGFOLDER, environment["id"]) + ".fitness"
with open(fitpath, "w") as fitfile:
fitfile.write(str(ind.fitness))
# Log the fitness
#logger.info("[%s] Fitness %s: %s" % (ind.environment_id, str(ind.fitness), str(ind)))
return ind.environment_id, ind.fitness
@staticmethod
def get_args(command):
"""
Defines required global args for this plugin
"""
parser = argparse.ArgumentParser(description='DNS plugin runner', allow_abbrev=False)
parser.add_argument('--use-tcp', action='store_true', help='leverage TCP for this plugin')
parser.add_argument('--environment-id', action='store', help="ID of the current environment")
parser.add_argument('--output-directory', action='store', help="Where to output results")
parser.add_argument('--port', action='store', type=int, default=53, help='port to use')
args, _ = parser.parse_known_args(command)
return vars(args)
| [
37811,
198,
35,
8035,
42636,
4639,
198,
198,
5886,
81,
1460,
262,
4277,
5418,
84,
1352,
13877,
9041,
523,
356,
460,
2198,
329,
6984,
6101,
82,
329,
36428,
5254,
13,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
11845,
198,
117... | 2.408371 | 2,461 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is the main file of btn4ws.
#
# Copyright (c) 1999-2009 Jan Dittberner <jan@dittberner.info>
#
# This file is part of btn4ws.
#
# btn4ws is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# btn4ws is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with btn4ws; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# version: $Id: btn4ws.py 21 2009-03-15 07:56:39Z jan $
#
"""
Gimp script to generate button images for websites. This script is a
port of the older gimp-perl version to python.
(c) 2007, 2008, 2009 Jan Dittberner <jan@dittberner.info>
"""
import os, urllib, logging, sys, pickle
import gimp, gimpplugin, gimpui, gimpcolor
import pygtk
pygtk.require('2.0')
import gtk
from gimpenums import *
from gimpshelf import shelf
pdb = gimp.pdb
btn4ws_version = "0.8.0.1"
logging.basicConfig(level=logging.WARN,
format='%(asctime)s %(levelname)s %(message)s',
stream=sys.stderr)
class text_to_name_mapper:
"""
Text string to name mapper class. This class provides mappings for several target
environments.
"""
def asitemid(self, text):
"""
Get a img itemid for the given text.
"""
if 'itemid' not in self.mapping[text]:
self.mapping[text]['itemid'] = "id%03d" % (self.idnum)
self.idnum += 1
#logging.debug("self.mapping=" + str(self.mapping))
return self.mapping[text]['itemid']
def asjavascriptid(self, text):
"""
Get a javascript itemid for the given text.
"""
if 'jsid' not in self.mapping[text]:
self.mapping[text]['jsid'] = "img%03d" % (self.idnum)
self.idnum += 1
#logging.debug("self.mapping=" + str(self.mapping))
return self.mapping[text]['jsid']
def aslinktarget(self, text):
"""
Get a link target for the given text.
"""
if 'link' not in self.mapping[text]:
self.mapping[text]['link'] = urllib.quote(text)
#logging.debug("self.mapping=" + str(self.mapping))
return "%s.html" % (self.mapping[text]['link'])
def asfilename(self, text, extension = 'png', prefix= '', dirname = None):
"""
Get a filename for the given text with optional extension, prefix and dirname.
"""
if 'file' not in self.mapping[text]:
self.mapping[text]['file'] = text.encode('ascii', 'ignore')
fname = "%s%s.%s" % (prefix, self.mapping[text]['file'], extension)
#logging.debug("self.mapping=" + str(self.mapping))
if dirname:
return os.path.join(dirname, fname)
return fname
class text_to_name_mapper:
"""
Text string to name mapper class. This class provides mappings for
several target environments.
"""
def asitemid(self, text):
"""
Get a img itemid for the given text.
"""
if 'itemid' not in self.mapping[text]:
self.mapping[text]['itemid'] = "id%03d" % (self.idnum)
self.idnum += 1
logging.debug("self.mapping=" + str(self.mapping))
return self.mapping[text]['itemid']
def asjavascriptid(self, text):
"""
Get a javascript itemid for the given text.
"""
if 'jsid' not in self.mapping[text]:
self.mapping[text]['jsid'] = "img%03d" % (self.idnum)
self.idnum += 1
logging.debug("self.mapping=" + str(self.mapping))
return self.mapping[text]['jsid']
def aslinktarget(self, text):
"""
Get a link target for the given text.
"""
if 'link' not in self.mapping[text]:
self.mapping[text]['link'] = urllib.quote(text)
logging.debug("self.mapping=" + str(self.mapping))
return "%s.html" % (self.mapping[text]['link'])
def asfilename(self, text, extension = 'png', prefix= '', dirname = None):
"""
Get a filename for the given text with optional extension,
prefix and dirname.
"""
if 'file' not in self.mapping[text]:
self.mapping[text]['file'] = text.encode('ascii', 'ignore')
fname = "%s%s.%s" % (prefix, self.mapping[text]['file'], extension)
logging.debug("self.mapping=" + str(self.mapping))
if dirname:
return os.path.join(dirname, fname)
return fname
class IntEntry(gtk.Entry):
"""Input field for integer numbers."""
def _cb_int_field_insert(self, w, new_text, new_text_length, position):
"""Allow integer input only."""
if not new_text.isdigit():
w.stop_emission("insert-text")
class Btn4wsDialog(gtk.Assistant):
"""This class is the input dialog field for btn4ws"""
class btn4wsplugin(gimpplugin.plugin):
"""This is the btn4ws gimp plugin."""
def gimp2html_color(self, color):
"""
Converts a color tuple to a hex encoded color for CSS.
"""
return "#%02x%02x%02x" % (color[0], color[1], color[2])
def parsefont(self, font):
"""
Parses a font into its fontname and size parts.
"""
parts = font.split(" ")
return (" ".join(parts[:-1]), parts[-1])
def toprocess(self, item):
"""
Decides whether the plugin is able to process the item or not.
"""
item = item.strip()
return len(item) > 0 and not item.startswith('#')
def getmaxextents(self, strings, fontsize, fontname):
"""
Gets the maximum width and height of texts in strings array
with the given font.
"""
getextents = pdb['gimp_text_get_extents_fontname']
maxx = 0
maxy = 0
for extents in [getextents(string, fontsize, 1, fontname)
for string in strings]:
maxx = max(maxx, extents[0])
maxy = max(maxy, extents[1])
return (maxx, maxy)
def btn4ws(self, runmode, filename = None, outdir = None, font = None,
strcolor = None, transparency = False, bgcolor = None,
glow = False, glowcolor = None, usepattern = False,
pattern = None, buttoncolor = None, roundradius = None,
padding = None, glowsize = None, bevelwidth = None,
nova = False, novasparkles = None, novaradius = None,
novacolor = None, writexcf = False, makeinactive = True,
makeactive = True, makepressed = True, makejscript = True):
"""
This function controls the creation of the buttons and is
registered as gimp plugin.
"""
self.inputdata = {
"filename" : filename, "outdir" : outdir, "font" : font,
"strcolor" : strcolor, "transparency" : transparency,
"bgcolor" : bgcolor, "glow" : glow, "glowcolor" : glowcolor,
"usepattern" : usepattern, "pattern" : pattern,
"buttoncolor" : buttoncolor, "roundradius" : roundradius,
"padding" : padding, "glowsize" : glowsize,
"bevelwidth" : bevelwidth, "nova" : nova,
"novasparkles" : novasparkles, "novaradius" : novaradius,
"novacolor" : novacolor, "writexcf" : writexcf,
"makeinactive" : makeinactive, "makeactive" : makeactive,
"makepressed" : makepressed, "makejscript" : makejscript
}
if runmode in (RUN_INTERACTIVE, RUN_WITH_LAST_VALS):
if shelf.has_key("btn4ws"):
self.inputdata = shelf["btn4ws"]
else:
self.inputdata = self._loaddata(self.inputdata)
dialog = Btn4wsDialog(self.inputdata)
dialog.connect("close", self._cb_destroy)
dialog.connect("cancel", self._cb_destroy)
dialog.connect("destroy", self._cb_destroy)
dialog.connect("apply", self._cb_apply)
gtk.main()
elif runmode == RUN_NONINTERACTIVE:
logging.debug("runmode noninteractive")
if self.checkdata(self.inputdata):
self.makebuttons(**self.inputdata)
else:
logging.error("checking data failed")
else:
logging.error("unknown runmode %d" % runmode)
return
if __name__ == '__main__':
btn4wsplugin().start()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
770,
318,
262,
1388,
2393,
286,
275,
34106,
19,
18504,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
7358,
12,
10531,
... | 2.246753 | 3,927 |
import os
import uuid
import random
import unittest
import pprint
from langcodes import Language
from spaceone.core import config
from spaceone.core import pygrpc
from spaceone.core import utils
from spaceone.core.unittest.runner import RichTestRunner
from google.protobuf.json_format import MessageToDict
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
| [
11748,
28686,
198,
11748,
334,
27112,
198,
11748,
4738,
198,
11748,
555,
715,
395,
198,
11748,
279,
4798,
198,
6738,
42392,
40148,
1330,
15417,
198,
6738,
2272,
505,
13,
7295,
1330,
4566,
198,
6738,
2272,
505,
13,
7295,
1330,
12972,
216... | 3.359649 | 114 |
from typing import Dict
import requests
from seki.conf import DRONE_SERVER, DRONE_TOKEN, SEKI_PROJECT_OWNER, SEKI_PROJECT_REPO
| [
6738,
19720,
1330,
360,
713,
198,
198,
11748,
7007,
198,
198,
6738,
384,
4106,
13,
10414,
1330,
10560,
11651,
62,
35009,
5959,
11,
10560,
11651,
62,
10468,
43959,
11,
7946,
37845,
62,
31190,
23680,
62,
14165,
1137,
11,
7946,
37845,
62,
... | 2.765957 | 47 |
from __future__ import absolute_import, division, print_function
import os
from appr.commands.command_base import CommandBase
from appr.pack import ApprPackage
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
198,
6738,
598,
81,
13,
9503,
1746,
13,
21812,
62,
8692,
1330,
9455,
14881,
198,
6738,
598,
81,
13,
8002,
1330,
2034,
81,
278... | 3.704545 | 44 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
from pandapower.auxiliary import _sum_by_group
from pandapower.idx_bus import VM, VA, PD, QD, LAM_P, LAM_Q, BASE_KV
from pandapower.idx_gen import PG, QG
def write_pq_results_to_element(net, ppc, element):
"""
get p_mw and q_mvar for a specific pq element ("load", "sgen"...).
This function basically writes values element table to res_element table
:param net: pandapower net
:param element: element name (str)
:return:
"""
# info from net
_is_elements = net["_is_elements"]
ac = net["_options"]["ac"]
# info element
el_data = net[element]
res_ = "res_%s"%element
ctrl_ = "%s_controllable"%element
is_controllable = False
if ctrl_ in _is_elements:
controlled_elements = net[element][net._is_elements[ctrl_]].index
gen_idx = net._pd2ppc_lookups[ctrl_][controlled_elements]
gen_sign = 1 if element == "sgen" else -1
is_controllable = True
# Wards and xwards have different names in their element table, but not in res table. Also no scaling -> Fix...
p_mw = "ps_mw" if element in ["ward", "xward"] else "p_mw"
q_mvar = "qs_mvar" if element in ["ward", "xward"] else "q_mvar"
scaling = el_data["scaling"].values if element not in ["ward", "xward"] else 1.0
element_in_service = _is_elements[element]
# P result in kw to element
net[res_]["p_mw"].values[:] = el_data[p_mw].values * scaling * element_in_service
if is_controllable:
net[res_]["p_mw"].loc[controlled_elements] = ppc["gen"][gen_idx, PG] * gen_sign
if ac:
# Q result in kvar to element
net[res_]["q_mvar"].values[:] = el_data[q_mvar].values * scaling * element_in_service
if is_controllable:
net[res_]["q_mvar"].loc[controlled_elements] = ppc["gen"][gen_idx, QG] * gen_sign
return net
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
1584,
12,
7908,
416,
2059,
286,
15035,
741,
290,
39313,
403,
71,
30288,
5136,
329,
6682,
18963,
198,
2,
290,
6682,
4482,
8987,
357,
40,
65... | 2.452153 | 836 |
"""
The MIT License (MIT)
Copyright (c) 2014 Trustly Group AB
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import trustly.api.api
import trustly.data
import trustly.exceptions
# vim: set et cindent ts=4 ts=4 sw=4:
| [
37811,
198,
464,
17168,
13789,
357,
36393,
8,
198,
198,
15269,
357,
66,
8,
1946,
9870,
306,
4912,
9564,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
39... | 3.71517 | 323 |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import *
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
764,
27530,
1330,
1635,
198,
198,
2,
13610,
534,
5009,
994,
13,
198
] | 3.771429 | 35 |
import unittest
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.357143 | 28 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Records application events."""
from tradefed_cluster.util import ndb_shim as ndb
from multitest_transport.models import ndb_models
def GetEntries(entity_or_key=None):
"""Retrieve all log entries for an entity.
Args:
entity_or_key: optional entity or entity key to look up
Returns:
list of log entries
"""
ancestor = _GetKey(entity_or_key)
query = ndb_models.EventLogEntry.query(ancestor=ancestor)
return query.order(ndb_models.EventLogEntry.create_time).fetch()
def _GetKey(entity_or_key):
"""Helper to retrieve an entity's key if needed."""
if isinstance(entity_or_key, ndb.Model):
return entity_or_key.key
return entity_or_key
def Info(entity_or_key, message):
"""Convenience method to add an info entry.
Args:
entity_or_key: related entity or entity key
message: log message
"""
_Log(ndb_models.EventLogLevel.INFO, entity_or_key, message)
def Warn(entity_or_key, message):
"""Convenience method to add a warning entry.
Args:
entity_or_key: related entity or entity key
message: log message
"""
_Log(ndb_models.EventLogLevel.WARNING, entity_or_key, message)
def Error(entity_or_key, message):
"""Convenience method to add an error entry.
Args:
entity_or_key: related entity or entity key
message: log message
"""
_Log(ndb_models.EventLogLevel.ERROR, entity_or_key, message)
def _Log(level, entity_or_key, message):
"""Persists a new log entry."""
parent = _GetKey(entity_or_key)
ndb_models.EventLogEntry(parent=parent, level=level, message=message).put()
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.104499 | 689 |
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.PutRelatie import PutRelatie
from OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument
from OTLMOW.OTLModel.Datatypes.KlKamerKlasse import KlKamerKlasse
from OTLMOW.OTLModel.Datatypes.KlPutMateriaal import KlPutMateriaal
from OTLMOW.OTLModel.Datatypes.KlRioleringVorm import KlRioleringVorm
from OTLMOW.OTLModel.Datatypes.KwantWrdInMeter import KwantWrdInMeter
from OTLMOW.OTLModel.Datatypes.KwantWrdInMillimeter import KwantWrdInMillimeter
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Kamer(PutRelatie, VlakGeometrie):
"""Een kamer is een aanééngesloten ondergrondse constructie waarbinnen vrije stroming van water over de bodem mogelijk is. Een constructie of inspectieput kan één of meerdere kamers hebben."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Kamer'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
@property
def breedte(self):
"""De afmeting 1 (breedte) van het grondplan van de putkamer in millimeter."""
return self._breedte.get_waarde()
@breedte.setter
@property
def diepte(self):
"""De diepte van de putkamer in meter."""
return self._diepte.get_waarde()
@diepte.setter
@property
def hoogte(self):
"""De afmeting 2 (hoogte) van het grondplan van de putkamer in millimeter."""
return self._hoogte.get_waarde()
@hoogte.setter
@property
def klasse(self):
"""De stabiliteitsklasse van de kamer."""
return self._klasse.get_waarde()
@klasse.setter
@property
def materiaal(self):
"""Het materiaal waaruit de kamer opgebouwd is."""
return self._materiaal.get_waarde()
@materiaal.setter
@property
def technischeFiche(self):
"""De technische fiche van de kamer."""
return self._technischeFiche.get_waarde()
@technischeFiche.setter
@property
def vorm(self):
"""De vorm van de kamer."""
return self._vorm.get_waarde()
@vorm.setter
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,
13,
14881,
9487,
274,
13,
2394,
43,
8086,
822,
84,
315,
1330,
440,
14990,
8086,
822,
84,
315,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,... | 2.348148 | 945 |
#
# This file is part Protein Engineering Analysis Tool (PEAT)
# (C) Copyright Jens Erik Nielsen, University College Dublin 2003-
# All rights reserved
#
#
# Written by D Farrell, April 2008
#
from Tkinter import *
import csv
class TableImporter:
"""Provides import utility methods for the Table and Table Model classes"""
def __init__(self):
"""Setup globals"""
#self.separator = ','
self.separator_list = {',':',',' ':'space','\t':'tab','blank':' ',':':':'}
self.var_sep = StringVar()
self.var_sep.set(',')
return
def import_Dialog(self, parent):
"""Allows user to set some import options"""
import Pmw
self.parent=parent
self.master=Toplevel()
self.master.title("Import Data")
self.xsize = 450
self.ysize = 370
top=self.master.winfo_toplevel()
rootx=top.winfo_rootx()
rooty=top.winfo_rooty()
self.sep_choice = Pmw.OptionMenu(
parent = self.master,labelpos = 'w',
label_text = 'Record separator:',
menubutton_textvariable = self.var_sep,
items = self.separator_list.keys(),
initialitem = ',',
menubutton_width = 4,
command= self.update_display)
self.sep_choice.grid(row=0,column=0,sticky='nw',padx=2,pady=2)
#place for text preview frame
self.textframe=Pmw.ScrolledFrame(self.master,
labelpos = 'n', label_text = 'Preview',
usehullsize = 1,
hull_width = 450,
hull_height = 300)
self.textframe.grid(row=1,column=0,columnspan=5,sticky='news',padx=2,pady=2)
self.previewarea = Text(self.textframe.interior(), bg='white', width=400, height=500)
self.previewarea.pack(fill=BOTH, expand=1)
#buttons
self.openButton = Button(self.master, text = 'Open File',
command = self.do_openFile )
self.openButton.grid(row=3,column=0,sticky='news',padx=2,pady=2)
self.importButton = Button(self.master, text = 'Do Import',
command = self.do_ModelImport )
self.importButton.grid(row=3,column=1,sticky='news',padx=2,pady=2)
self.CancelButton = Button(self.master, text = 'Cancel',
command = self.close )
self.CancelButton.grid(row=3,column=2,sticky='news',padx=2,pady=2)
self.master.columnconfigure(0,weight=1)
self.master.rowconfigure(1,weight=1)
return self.master
def update_display(self,evt=None):
"""Preview loaded file"""
sep = self.var_sep.get()
self.previewarea.delete(1.0, END)
print 'sep',sep
reader = csv.reader(open(self.datafile, "rb"), delimiter=sep)
for row in reader:
self.previewarea.insert(END,row)
self.previewarea.insert(END,'\n')
return
def do_ModelImport(self):
"""imports and places the result in self.modeldata"""
self.modeldata = self.ImportTableModel(self.datafile)
self.close()
return
def ImportTableModel(self,filename):
"""Import table data from a comma separated file and create data for a model
This is reusable outside the GUI dialog also."""
import os
if not os.path.isfile(filename):
return None
try:
sep = self.var_sep.get()
except:
sep = ','
#takes first row as field names
dictreader = csv.DictReader(open(filename, "rb"), delimiter=sep)
dictdata = {}
count=0
for rec in dictreader:
dictdata[count]=rec
count=count+1
print dictdata
modeldata={}
modeldata['columnnames']=[]
modeldata['columntypes']={}
modeldata['columnlabels']={}
count=0
modeldata['columnnames'] = dictdata[0].keys()
#check for col types, text or num?
for col in modeldata['columnnames']:
'''coltype='text'
for row in dictdata.keys():
if
modeldata['columntypes'][col]=coltype'''
modeldata['columntypes'][col] = 'text'
for colname in modeldata['columnnames']:
modeldata['columnlabels'][colname]=colname
#now add the data
for row in dictdata.keys():
modeldata[row]=dictdata[row]
print '-------MODELDATA------\n',modeldata
return modeldata
| [
2,
198,
2,
770,
2393,
318,
636,
31702,
14044,
14691,
16984,
357,
11401,
1404,
8,
198,
2,
357,
34,
8,
15069,
449,
641,
22722,
31154,
11,
2059,
5535,
18220,
5816,
12,
198,
2,
1439,
2489,
10395,
198,
2,
220,
220,
198,
2,
198,
2,
22... | 2.026886 | 2,306 |
import sys
N = int(sys.stdin.readline())
numList = list(map(int, sys.stdin.readline().split()))
operator = list(map(int, sys.stdin.readline().split()))
minimum = 1e9
maximum = -1e9
now = numList[0]
result(now, 1)
print(maximum)
print(minimum)
| [
11748,
25064,
198,
198,
45,
796,
493,
7,
17597,
13,
19282,
259,
13,
961,
1370,
28955,
198,
22510,
8053,
796,
1351,
7,
8899,
7,
600,
11,
25064,
13,
19282,
259,
13,
961,
1370,
22446,
35312,
3419,
4008,
198,
46616,
796,
1351,
7,
8899,
... | 2.510204 | 98 |
from summarize_contig_identities_and_alignments import parse_reads, export_chromosome_summary_to_csv, export_genome_summary_to_csv
from handlers.FastaHandler import FastaHandler
from handlers.FileManager import FileManager
from handlers.BamHandler import BamHandler
from multiprocessing import Manager, Pool, cpu_count
import argparse
'''
Generate stats on contig identity and alignment given a BAM of contigs VS true reference
'''
def process_bam(bam_path, reference_path, max_threads, output_dir=None):
"""
Find useful summary data from a bam that can be represented as a table of identities/matches/mismatches/indels
:param bam_path: path to a bam containing contigs aligned to a true reference
:param reference_path: the true reference that contigs were aligned to
:param output_dir: where to save stats
:return:
"""
if output_dir is None:
output_dir = "stats/"
if max_threads is None:
max_threads = max(1, cpu_count() - 2)
process_manager = Manager()
genome_data = process_manager.list()
FileManager.ensure_directory_exists(output_dir)
fasta_handler = FastaHandler(reference_path)
chromosome_names = fasta_handler.get_contig_names()
arguments = list()
for chromosome_name in chromosome_names:
chromosome_length = fasta_handler.get_chr_sequence_length(chromosome_name)
start = 0
stop = chromosome_length
arguments.append([genome_data, reference_path, chromosome_name, start, stop, output_dir, bam_path])
if len(arguments) < max_threads:
print("Fewer jobs than threads")
max_threads = len(arguments)
print("Using %d threads..." % max_threads)
with Pool(processes=max_threads) as pool:
pool.starmap(get_chromosome_stats, arguments)
print("genome_data", genome_data)
export_genome_summary_to_csv(bam_path=bam_path, output_dir=output_dir, genome_data=genome_data)
if __name__ == "__main__":
'''
Processes arguments
'''
parser = argparse.ArgumentParser()
parser.add_argument(
"--bam",
type=str,
required=True,
help="BAM file path of contigs aligned to true reference"
)
parser.add_argument(
"--ref",
type=str,
required=True,
help="FASTA file path of true reference to be compared against"
)
parser.add_argument(
"--max_threads", "-t",
type=int,
required=False,
help="FASTA file path of true reference to be compared against"
)
parser.add_argument(
"--output_dir",
type=str,
required=False,
help="desired output directory path (will be created during run time if doesn't exist)"
)
args = parser.parse_args()
main(bam_path=args.bam, reference_path=args.ref, output_dir=args.output_dir, max_threads=args.max_threads)
| [
6738,
35743,
62,
3642,
328,
62,
738,
871,
62,
392,
62,
31494,
902,
1330,
21136,
62,
40779,
11,
10784,
62,
28663,
418,
462,
62,
49736,
62,
1462,
62,
40664,
11,
10784,
62,
5235,
462,
62,
49736,
62,
1462,
62,
40664,
198,
6738,
32847,
... | 2.652214 | 1,084 |
#!/usr/bin/env python3
# coding:utf-8
# 只是引入“泡菜”而已,并不是新的思路
import os, pickle
def save_text(pf, text):
'''保存文件'''
with open(pf, 'wb') as f:
pickle.dump(text, f)
def load_text(pf):
'''载入文件'''
with open(pf, 'rb') as f:
text = pickle.load(f)
print(text)
if __name__ == "__main__":
run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
25,
40477,
12,
23,
198,
198,
2,
10263,
237,
103,
42468,
28156,
243,
17739,
98,
447,
250,
37345,
94,
164,
237,
250,
447,
251,
32003,
234,
32432,
110,
171,
120,
234,
33... | 1.558685 | 213 |
from app.models import db, Recipe
# Adds a demo location, you can add other locations here if you want
# Uses a raw SQL query to TRUNCATE the locations table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and resets
# the auto incrementing primary key
| [
6738,
598,
13,
27530,
1330,
20613,
11,
26694,
628,
198,
2,
34333,
257,
13605,
4067,
11,
345,
460,
751,
584,
7064,
994,
611,
345,
765,
198,
198,
2,
36965,
257,
8246,
16363,
12405,
284,
7579,
4944,
34,
6158,
262,
7064,
3084,
13,
198,
... | 3.75 | 84 |
import json
import logging
import os
from arm_prosthesis.external_communication.models.dto.get_settings_dto import GetSettingsDto
from arm_prosthesis.external_communication.models.dto.set_settings_dto import SetSettingsDto
| [
11748,
33918,
201,
198,
11748,
18931,
201,
198,
11748,
28686,
201,
198,
201,
198,
6738,
3211,
62,
1676,
301,
8497,
13,
22615,
62,
32560,
13,
27530,
13,
67,
1462,
13,
1136,
62,
33692,
62,
67,
1462,
1330,
3497,
26232,
35,
1462,
201,
1... | 3.135135 | 74 |
import os
import re
import json
from . import tasks
from falcon import HTTPInternalServerError, HTTP_201
from bson.objectid import ObjectId
from .tools import TODOException
from .models import *
from pprint import pprint
from urllib.parse import quote
movie_name_and_year = re.compile("(.*)\((.*)\)")
| [
11748,
28686,
198,
11748,
302,
198,
11748,
33918,
198,
6738,
764,
1330,
8861,
198,
6738,
24215,
1102,
1330,
14626,
37693,
10697,
12331,
11,
14626,
62,
1264,
198,
6738,
275,
1559,
13,
15252,
312,
1330,
9515,
7390,
198,
6738,
764,
31391,
... | 3.377778 | 90 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 8 03:04:49 2021
@author: ike
"""
import os
import shutil
import string
import os.path as op
from glob import glob as gg
ALLOWED = "".join((string.ascii_letters, string.digits, " ", "_", "-"))
def getPath(*args, ext=None):
"""
Convert arbitrary number of filenames or subpaths into one filepath
@param args: filenames or filepaths to be combined, in order
@type args: arguments
@param ext: file extension, if filepath
@type ext: string
@return: path to desired file or directory
@rtype: string
"""
path = op.normpath(op.join(*[
op.join(*op.join(*arg.split("\\")).split("/")) for arg in args]))
path = (".".join([path, ext]) if ext is not None else path)
return "".join(("/", path))
def getParent(path, num=1):
"""
Find parent directory of a file or subdirectory
@param path: filepath or subdirectory from which to find a parent
@type path: string
@param num: how many subdirectories to traverse before returning parent
@type num: int
@return: path to parent directory
@rtype: string
"""
for x in range(num):
if "/" in path:
path = path[:path.rindex("/")]
return path
def getName(path):
"""
Find name of a file or subdirectory
@param path: filepath or subdirectory from which to find a name
@type path: string
@return: name of given file or subdirectory
@rtype: string
"""
name = "".join(("/", path))
name = name[name.rindex("/") + 1:]
return name
def cleanName(filename):
"""
Remove unstable characters from proposed filename
@param filename: filename to clean
@type filename: string
@return: filename with unstable characters replaced with "-"
@rtype: string
"""
filename = list(str(filename))
for idx in range(len(filename)):
if filename[idx] not in ALLOWED:
filename[idx] = "-"
filename = "".join(filename)
return filename
def changeExt(path, ext=None):
"""
Add or change the extension of a filepath
@param path: filepath to modify
@type path: string
@param ext: extension to add to filepath
@type ext: string
@return: filepath with old extension removed and any new extension added
@rtype: string
"""
path = (path[:path.rindex(".")] if "." in path else path)
if ext is not None:
path = ".".join([path, ext])
return path
def makeDir(path):
"""
If a necessary directory does not exist, create it
@param path: path to necessary directory
@type path: string
"""
if not op.isdir(path):
os.makedirs(path, exist_ok=False)
def makeParent(path):
"""
If a necessary parent directory does not exist, create it
@param path: filepath for which to check parent
@type path: string
"""
makeDir(getParent(path))
def removeParent(path):
"""
If parent directory of filepath is empty, delete it
@param path: filepath for which to check parent
@type path: string
"""
parent = getParent(path)
if op.isdir(parent) and not os.listdir(parent):
shutil.rmtree(parent)
def movePath(src, dst):
"""
Move a filepath or subdirectory to a new location
@param src: path to file or subdirectory to be moved
@type src: string
@param dst: desired path to file or subdirectory
@type dst: string
"""
if all(((type(src) == str), (type(dst) == str),
(not any(((op.isfile(dst)), (op.isdir(dst))))),
(op.isfile(src) or op.isdir(src)))):
makeParent(dst)
shutil.move(src=src, dst=dst)
def glob(*args, ext=None):
"""
glob function from glob package with following additions:
- return any matches in alphabetical order
- return None if no matches found
@param args: passed to getPath()
@type args: arguments
@param ext: passed to getPath()
@type ext: string
@return: list of paths that match desired pattern
@rtype: list
"""
path = getPath(*args, ext=ext)
pathList = sorted(gg(path))
pathList = (pathList if len(pathList) > 0 else None)
return pathList
def recursiveGlob(*args, ext=None):
"""
glob with ability to search in subdirectories
@param args: passed to getPath()
@type args: arguments
@param ext: passed to getPath()
@type ext: string
@return: list of paths that match desired pattern
@rtype: list
"""
path = getPath(*args, ext=ext)
pathList = sorted(gg(path, recursive=True))
pathList = (pathList if len(pathList) > 0 else None)
return pathList
def firstGlob(*args, ext=None):
"""
glob but only return first match
@param args: passed to getPath()
@type args: arguments
@param ext: passed to getPath()
@type ext: string
@return: firat path that matches desired pattern
@rtype: string
"""
path = getPath(*args, ext=ext)
path = (gg(path, recursive=True) if "**" in path else gg(path))
path = (path[0] if len(path) > 0 else None)
return path
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2758,
220,
807,
7643,
25,
3023,
25,
2920,
33448,
198,
198,
31,
9800,
25,
220,
522,... | 2.693354 | 1,911 |
# _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>
from .bgr import (
bgr_to_gray,
bgr_to_hsv,
bgr_to_lab,
bgr_to_rgb,
bgr_to_hls,
is_bgr_image,
__all__ as __all_bgr__
)
from .rgb import (
rgb_to_gray,
rgb_to_hsv,
rgb_to_lab,
rgb_to_bgr,
rgb_to_hls,
is_rgb_image,
__all__ as __all_rgb__
)
from .gray import (
gray_to_lab,
gray_to_rgb,
gray_to_hsv,
gray_to_bgr,
is_gray_image,
__all__ as __all_gray__
)
from .hsv import (
hsv_to_gray,
hsv_to_rgb,
hsv_to_lab,
hsv_to_bgr,
is_hsv_image,
__all__ as __all_hsv__
)
from .hls import (
hls_to_gray,
hls_to_rgb,
hls_to_lab,
hls_to_bgr,
is_hls_image,
__all__ as __all_hls__
)
from .lab import (
lab_to_gray,
lab_to_rgb,
lab_to_hsv,
lab_to_bgr,
is_lab_image,
__all__ as __all_lab__
)
from .constants import (
IMREAD_COLOR,
BGR2RGB,
BGR2GRAY,
BGR2HSV,
RGB2GRAY,
RGB2BGR,
RGB2HSV,
BGR2LAB,
RGB2LAB,
HSV2BGR,
HSV2RGB,
LAB2BGR,
LAB2RGB,
GRAY2BGR,
GRAY2RGB,
HLS2BGR,
HLS2RGB,
__all__ as __all_const__
)
__all__ = __all_const__ + __all_rgb__ + __all_hls__+ __all_gray__ + __all_bgr__ + __all_hsv__ + __all_lab__
# Don't pollute namespace
del __all_const__
del __all_bgr__
del __all_rgb__
del __all_gray__
del __all_hsv__
del __all_lab__
del __all_hls__ | [
2,
220,
220,
220,
220,
29343,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
44435,
220,
220,
29343,
220,
198,
2,
220,
1220,
220,
1427,
14,
220,
220,
220,
1220,
59,
220,
220,
220,
930,
220,
220,
1427,
930,
220,
11593,
3467,
1... | 1.835317 | 1,008 |
from django.test import TestCase
from django.urls import reverse
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import test, status
from authors.apps.authentication.models import User
from authors.apps.articles.models import Article, FavoriteModel
from authors.apps.notify.models import Notification
from authors.apps.notify.serializers import NotificationSerializer
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
6738,
1334,
62,
30604,
1330,
1332,
11,
3722... | 4.125 | 96 |
from datetime import timedelta
from unittest.mock import create_autospec, patch
import pytest
import garden.utils as utils
from garden import models
@pytest.mark.unit
| [
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
2251,
62,
2306,
418,
43106,
11,
8529,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
11376,
13,
26791,
355,
3384,
4487,
198,
6738,
11376,
1330,
4981,
6... | 3.352941 | 51 |
OUT_FOLDER = 'out/'
BUCKET_CONFIG_PATH = OUT_FOLDER + 'categoryBuckets.json'
SELECTION_CACHE_PATH = OUT_FOLDER + "selection.json"
| [
198,
12425,
62,
37,
3535,
14418,
796,
705,
448,
14,
6,
198,
33,
16696,
2767,
62,
10943,
16254,
62,
34219,
796,
16289,
62,
37,
3535,
14418,
1343,
705,
22872,
33,
1347,
1039,
13,
17752,
6,
198,
198,
46506,
2849,
62,
34,
2246,
13909,
... | 2.275862 | 58 |
from rest_framework import serializers
from .models import Posts, Url
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
764,
27530,
1330,
12043,
11,
8799,
75,
628,
198
] | 4 | 18 |
import torch, sys, argparse, ipdb, joblib
import numpy as np
from tqdm import tqdm
if __name__ == "__main__":
args = parser_args()
args = vars(args)
paths = [f'rest/{args["dataset"]}/{args["model"]}/rest_{i}.pt' for i in range(args['num_nodes'])]
dataset_matrix, dataset_text = [], []
for path in tqdm(paths):
matrix, text = torch.load(path)
dataset_matrix.append(matrix)
dataset_text.extend(text)
dataset_matrix = np.concatenate(dataset_matrix)
assert len(dataset_matrix) == len(dataset_text)
print(f'[!] collect {len(dataset_text)} samples')
with open(f'rest/{args["dataset"]}/{args["model"]}/rest.pt', 'wb') as f:
joblib.dump((dataset_matrix, dataset_text), f)
# torch.save((dataset_matrix, dataset_text), f'rest/{args["dataset"]}/{args["model"]}/rest.pt')
print(f'[!] reconstruct and save the overall embedding into rest/{args["dataset"]}/{args["model"]}/rest.pt') | [
11748,
28034,
11,
25064,
11,
1822,
29572,
11,
20966,
9945,
11,
1693,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
22... | 2.280285 | 421 |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes
from ...config import options
from ...core import OutputType, ENTITY_TYPE
from ...serialization.serializables import BoolField
from .core import DataFrameReductionOperand, DataFrameReductionMixin
| [
2,
15069,
7358,
12,
1238,
2481,
41992,
4912,
31703,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.835616 | 219 |
"""
test_utils
----------
"""
import numpy as np
from ..utils.sampling_utils import create_times_randompoints,\
create_times_randomregimes, create_times_randombursts
from ..utils.util_operations import join_regimes, format_as_regular_ts,\
apply_gaussianconvolved_ts
from ..utils.sliding_utils import sliding_embeded_transf
from ..utils.fit_utils import general_multiscale_fit,\
fit_loglogleastsquares
| [
198,
37811,
198,
9288,
62,
26791,
198,
35937,
198,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11485,
26791,
13,
37687,
11347,
62,
26791,
1330,
2251,
62,
22355,
62,
25192,
3361,
1563,
82,
11,
59,
198,
220,
220... | 2.923077 | 143 |
from rdkit import DataStructs
allchems = read()
print(len(allchems))
for i in range(100):
sims = get_similar(allchems[i],allchems,0.90)
print(allchems[i])
print('---')
for s in sims:
print(s)
print('+++++++++++++++++\n')
| [
6738,
374,
67,
15813,
1330,
6060,
44909,
82,
628,
198,
439,
2395,
907,
796,
1100,
3419,
198,
4798,
7,
11925,
7,
439,
2395,
907,
4008,
198,
198,
1640,
1312,
287,
2837,
7,
3064,
2599,
198,
220,
220,
220,
985,
82,
796,
651,
62,
38610... | 2.230088 | 113 |
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.workbook_filter_criteria import WorkbookFilterCriteria
from ..one_drive_object_base import OneDriveObjectBase
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
198,
7061,
6,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
220,
1439,
6923,
33876,
13,
220,
49962,
739,
262,
17168,
13789,
13,
220,
4091,
13789,
287,
262,
1628,
6808,
... | 3.57265 | 117 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ModuleDefaultVersionArgs', 'ModuleDefaultVersion']
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 3.688073 | 109 |
import mxnet as mx
import numpy as np
class Loss(mx.metric.EvalMetric):
"""Calculate loss"""
class Accuracy(mx.metric.EvalMetric):
"""Calculate accuracy"""
| [
11748,
285,
87,
3262,
355,
285,
87,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4871,
22014,
7,
36802,
13,
4164,
1173,
13,
36,
2100,
9171,
1173,
2599,
198,
220,
220,
220,
37227,
9771,
3129,
378,
2994,
37811,
628,
198,
4871,
33222,
... | 2.625 | 64 |
#!/usr/bin/python3
#Copyright 2017 Michael Kirsch
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
#to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
try:
import http.server
import configparser
import time
import os
import RPi.GPIO as GPIO
import subprocess
from configparser import SafeConfigParser
from enum import Enum
except ImportError:
raise ImportError('spidev or gpio not installed')
snes = SNES()
snes.attach_interrupts()
snes.check_video()
while True:
time.sleep(5)
snes.led(1)
snes.check_fan()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
15269,
2177,
3899,
7385,
20601,
198,
198,
2,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
116... | 3.546713 | 289 |
import argparse
import inspect
import random
import pickle
import turtle
if __name__ == '__main__':
fns = {"line": draw_line,
"squares": draw_squares_until_escaped,
"triangles": draw_triangles,
"spirangles" : draw_random_spirangles}
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--function",
choices = fns,
help="One of " + ', '.join(fns.keys()))
parser.add_argument("--number", type=int, help="How many?")
args = parser.parse_args()
turtle.setworldcoordinates(-70., -70., 70., 70.)
draw_bag()
turtle.hideturtle()
try:
f = fns[args.function]
if len(inspect.getargspec(f).args)==1:
f(args.number)
else:
f()
turtle.mainloop()
except KeyError:
parser.print_help()
| [
11748,
1822,
29572,
198,
11748,
10104,
198,
11748,
4738,
198,
11748,
2298,
293,
198,
11748,
28699,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
277,
5907,
796,
19779,
1370,
1298,
3197,
62,
1370,
11,... | 2.466454 | 313 |
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2010 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: Giangi Sacco
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import print_function
import os
from contextlib import contextmanager
import sys
from . import StdOEL as ST
## A convinence constructor to make the writer the way applications need it
def create_writer(where, fileTag, flag, filename=None,
out=None, err=None, log=None):
"""create_writer(*args, **kwargs) takes the args/kwargs needed to
make a ready-for Application StdOEL instance.
"""
result = StdOEL()
result.createWriters(out=out, err=err, log=log)
result.configWriter(where, fileTag, flag, filename=filename)
result.init()
return result
@contextmanager
def context_writer(where, fileTag, flag, filename=None,
out=None, err=None, log=None):
"""create_writer as a context manager, see that for signature.
Usage:
>>>with context_writer as <writer>:
>>>... <suite>
>>>"""
result = create_writer(where, fileTag, flag, filename=filename,
out=out, err=err, log=log)
yield result
result.finalize()
## Any class that talks to StdOEL, needs these methods.
## The StdOEL object
| [
2,
27156,
27156,
27156,
27156,
15116,
8728,
4907,
93,
198,
2,
15069,
3050,
3442,
5136,
286,
8987,
13,
11096,
371,
34874,
15731,
1137,
53,
1961,
13,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
116... | 3.438762 | 743 |
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.hybrid_shape_interfaces.plane import Plane
from pycatia.in_interfaces.reference import Reference
class HybridShapePlaneTangent(Plane):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| CATGSMIDLItf.Plane
| HybridShapePlaneTangent
|
| Tangency plane.
| Role: Allows to access data of the the plane feature tangent to a surface at a
| given point.
"""
@property
def point(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Point() As Reference
|
| Role: Get the tangency point.
|
| Parameters:
|
| oPoint
| tangency point.
|
| See also:
| Reference
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_plane_tangent.Point)
@point.setter
def point(self, reference_point: Reference):
"""
:param Reference reference_point:
"""
self.hybrid_shape_plane_tangent.Point = reference_point.com_object
@property
def surface(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Surface() As Reference
|
| Role: Get the surface to which the plane is to be tangent.
|
| Parameters:
|
| oSurface
| reference surface.
|
| See also:
| Reference
| Returns:
| HRESULT S_OK if Ok E_FAIL else return error code for C++
| Implementations
| See also:
| HybridShapeFactory
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_plane_tangent.Surface)
@surface.setter
def surface(self, reference_surface: Reference):
"""
:param Reference reference_surface:
"""
self.hybrid_shape_plane_tangent.Surface = reference_surface.com_object
| [
2,
0,
514,
81,
14,
8800,
14,
29412,
18,
13,
21,
198,
37811,
198,
220,
220,
220,
19937,
7317,
8295,
7560,
1262,
569,
20,
38062,
341,
3696,
422,
38348,
3539,
569,
20,
371,
2078,
319,
12131,
12,
2998,
12,
3312,
1478,
25,
2999,
25,
... | 1.820658 | 1,946 |
#
# Copyright (c) 2013-2015 Christopher L. Felton
#
import traceback
import pytest
import argparse
from myhdl import *
from rhea.cores.spi import spi_controller
from rhea.cores.spi import SPIBus
from rhea.models.spi import SPIEEPROM
from rhea.system import Global, Clock, Reset
from rhea.system import Wishbone
from rhea.system import FIFOBus
from rhea.utils.test import run_testbench, tb_convert, tb_args
@pytest.mark.xfail
if __name__ == '__main__':
testbench_spi(tb_args())
| [
2,
198,
2,
15069,
357,
66,
8,
2211,
12,
4626,
12803,
406,
13,
13937,
1122,
198,
2,
198,
198,
11748,
12854,
1891,
198,
11748,
12972,
9288,
198,
11748,
1822,
29572,
198,
198,
6738,
616,
71,
25404,
1330,
1635,
198,
198,
6738,
374,
2163... | 2.734807 | 181 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
628
] | 1.8 | 15 |
# extracts features of 1d array like data.
import numpy as np
import scipy
from scipy.stats import norm, rankdata
class Trends(Features):
"""
Arguments:
x array/list/series: 1d array or array like whose features are to be calculated.
"""
def sen_slope(self, alpha=None):
# https://github.com/USGS-python/trend/blob/master/trend/__init__.py
"""A nonparametric estimate of trend.
Parameters
----------
x : array_like
Observations taken at a fixed frequency.
Notes
-----
This method works with missing or censored data, as long as less <20% of
observations are censored.
References
----------
.. [1] Helsel and Hirsch, R.M. 2002. Statistical Methods in Water Resources.
.. [2] https://vsp.pnnl.gov/help/vsample/nonparametric_estimate_of_trend.htm
"""
s = sen_diff(self.x)
s.sort()
if alpha:
N = len(s)
# calculate confidence limits
C_alpha = norm.ppf(1 - alpha / 2) * np.sqrt(np.nanvar(self.x))
U = int(np.round(1 + (N + C_alpha) / 2))
L = int(np.round((N - C_alpha) / 2))
return np.nanmedian(s), s[L], s[U]
else:
return np.nanmedian(s)
def seasonal_sen_slope(self, period=12, alpha=None):
"""A nonparametric estimate of trend for seasonal time series.
Paramters
---------
x : array_like
Observations taken at a fixed frequency.
period : int
Number of observations in a cycle. The number of seasons.
"""
s = 0
for season in np.arange(0, period):
x_season = self.x[season::period]
s = np.append(s, sen_diff(x_season))
s.sort()
if alpha:
# XXX This code needs to be verified
N = len(s)
# calculate confidence limits
C_alpha = norm.ppf(1-alpha/2)*np.sqrt(np.nanvar(self.x))
U = int(np.round(1 + (N + C_alpha)/2))
L = int(np.round((N - C_alpha)/2))
return np.nanmedian(s), s[L], s[U]
else:
return np.nanmedian(s)
def pettitt(self, alpha=0.05):
"""Pettitt's change-point test
A nonparameteric test for detecting change points in a time series.
Parameters
----------
x : array_like
Observations taken at a fixed frequency.
alpha : float
Significance level
Return
------
The index of the change point of the series, provided that it is
statistically significant.
"""
U_t = np.zeros_like(self.x)
n = len(self.x)
r = rankdata(self.x)
for i in np.arange(n):
U_t[i] = 2 * np.sum(r[:i+1]) - (i+1)*(n-1)
t = np.argmax(np.abs(U_t))
K_t = U_t[t]
p = 2.0 * np.exp((-6.0 * K_t**2)/(n**3 + n**2))
if p > alpha:
return t
else:
return np.nan
def mann_kendall(self, alpha=0.05):
"""Mann-Kendall (MK) is a nonparametric test for monotonic trend.
Parameters
----------
x : array
Observations taken at a fixed frequency.
Returns
-------
z : float
Normalized MK test statistic.
Examples
--------
>>> x = np.random.rand(100) + np.linspace(0,.5,100)
>>> z,p = kendall(x)
Attribution
-----------
Modified from code by Michael Schramn available at
https://github.com/mps9506/Mann-Kendall-Trend/blob/master/mk_test.py
"""
# n = len(self.x)
s = mk_score(self.x)
var_s = mk_score_variance(self.x)
z = mk_z(s, var_s)
# calculate the p_value
p_value = 2*(1-norm.cdf(abs(z))) # two tail test
return p_value
def seasonal_mann_kendall(self, period=12):
""" Seasonal nonparametric test for detecting a monotonic trend.
Parameters
----------
x : array
A sequence of chronologically ordered observations with fixed
frequency.
period : int
The number of observations that define period. This is the number of seasons.
"""
# Compute the SK statistic, S, for each season
s = 0
var_s = 0
for season in np.arange(period):
x_season = self.x[season::period]
s += mk_score(x_season)
var_s += mk_score_variance(x_season)
# Compute the SK test statistic, Z, for each season.
z = mk_z(s, var_s)
# calculate the p_value
p_value = 2*(1-norm.cdf(abs(z))) # two tail test
return p_value
def mk_z(s, var_s):
"""Computes the MK test statistic, Z.
Parameters
----------
s : float
The MK trend statistic, S.
var_s : float
Variance of S.
Returns
-------
MK test statistic, Z.
"""
# calculate the MK test statistic
if s > 0:
z = (s - 1)/np.sqrt(var_s)
elif s < 0:
z = (s + 1)/np.sqrt(var_s)
else:
z = 0
return z
def mk_score_variance(x):
"""Computes corrected variance of S statistic used in Mann-Kendall tests.
Equation 8.4 from Helsel and Hirsch (2002).
Parameters
----------
x : array_like
Returns
-------
Variance of S statistic
Note that this might be equivalent to:
See page 728 of Hirsch and Slack
References
----------
.. [1] Helsel and Hirsch, R.M. 2002. Statistical Methods in Water Resources.
"""
x = x[~np.isnan(x)]
n = len(x)
# calculate the unique data
unique_x = np.unique(x)
# calculate the number of tied groups
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n * (n - 1) * (2 * n + 5)) / 18
else: # there are some ties in data
tp = np.zeros_like(unique_x)
for i in range(len(unique_x)):
tp[i] = sum(x == unique_x[i])
var_s = (n * (n - 1) * (2 * n + 5) - np.sum(tp * (tp - 1) * (2 * tp + 5))) / 18
return var_s
def mk_score(x):
"""Computes S statistic used in Mann-Kendall tests.
Parameters
----------
x : array_like
Chronologically ordered array of observations.
Returns
-------
MK trend statistic (S).
"""
x = x[~np.isnan(x)]
n = len(x)
s = 0
for j in np.arange(1, n):
s += np.sum(np.sign(x[j] - x[0:j]))
return s
def sen_diff(x):
"""Sen's difference operator.
Paramaters
----------
x : array_like
Observations taken at a fixed frequency.
Returns
-------
Sen difference
"""
#x = x[~np.isnan(x)]
n = len(x)
N = int(n*(n-1)/2) # number of slope estimates
s = np.zeros(N)
i = 0
for j in np.arange(1, n):
#s[i:j+i] = (x[j] - x[0:j])/np.arange(1, j+1)
s[i:j+i] = (x[j] - x[0:j])/np.arange(j, 0, -1)
i += j
return s
if __name__ == "__main__":
f = Features(np.random.random(10))
| [
2,
32139,
3033,
286,
352,
67,
7177,
588,
1366,
13,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
198,
6738,
629,
541,
88,
13,
34242,
1330,
2593,
11,
4279,
7890,
198,
198,
4871,
34308,
7,
23595,
2599,
628,
220,
... | 2.081317 | 3,431 |
default_app_config = 'vacancies.apps.VacanciesConfig'
| [
12286,
62,
1324,
62,
11250,
796,
705,
85,
330,
16183,
13,
18211,
13,
53,
330,
16183,
16934,
6,
198
] | 2.842105 | 19 |
import os
import os.path
import scipy.io as sio
import numpy as np
import struct
dataset_dir = "D:/Cache/Git/HandPointNet/data/cvpr15_MSRAHandGestureDB/"
save_dir = "./"
sub_names = ['P0', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8']
ges_names = ['1', '2', '3', '4', '5', '6', '7', '8',
'9', 'I', 'IP', 'L', 'MP', 'RP', 'T', 'TIP', 'Y']
| [
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
629,
541,
88,
13,
952,
355,
264,
952,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2878,
198,
198,
19608,
292,
316,
62,
15908,
796,
366,
35,
14079,
30562,
14,
38,
270,
14,
12... | 1.853659 | 205 |
import factory
from intake import models
from user_accounts.tests.factories import UserFactory
from .tag_factory import TagFactory
from .form_submission_factory import FormSubmissionFactory
| [
11748,
8860,
198,
6738,
10337,
1330,
4981,
198,
6738,
2836,
62,
23317,
82,
13,
41989,
13,
22584,
1749,
1330,
11787,
22810,
198,
6738,
764,
12985,
62,
69,
9548,
1330,
17467,
22810,
198,
6738,
764,
687,
62,
7266,
3411,
62,
69,
9548,
133... | 4.06383 | 47 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import numpy as np
import pandas as pd
import urltools
# Loading individual tweets and manipulate them
def load_json(x):
'''
Load Tweet JSON
'''
try:
return json.loads(x, strict=False)
except:
print("Problematic tweet found.")
return None
def get_tweet_urls(t):
'''
Given a Tweet JSON, pull the URLs found inside it
'''
try:
return get_urls(t['entities']['urls'])
except:
return []
def get_retweet_urls(t):
'''
Given a Tweet JSON, pull the URLs of the Tweet this tweet retweeted
'''
try:
return get_urls(t['retweeted_status']['entities']['urls'])
except:
return []
def get_urls(urls):
'''
Generic function to extract the URLs from the urls sub-object
'''
try:
urls = [v for (k, v) in urls[0].items()
if k in ('url', 'expanded_url')]
return list(set(urls))
except:
return []
def load_queries(file):
'''
Load file that contains information about the search queries
'''
queries = pd.read_csv(file, index_col="id")
return queries
def load_tweets(x):
"""
Load files containing tweets
"""
df = pd.read_csv(x, index_col="id", parse_dates=['posted_on'],
dtype={
'tweet_id': str,
'user_id': str,
'retweeted_status': str,
'quoted_status': str,
'in_reply_to': str
})
return df
# Loading files
def load_urls(file):
'''
Load file that contains tweets & urls
'''
df = pd.read_csv(file, index_col="id",
na_values="None",
dtype={'tweet_id': str,
'retweeted_status': str,
'quoted_status': str,
'relevant_url': str,
'cleaned_url': str},
parse_dates=['timestamp'])
return df
def load_altmetric(file):
"""
Load file containing altmetric URLs
"""
df = pd.read_csv(file, index_col="id", parse_dates=['posted_on'])
return df
def clean_url(url, venue=None):
'''
Strip out trailing slashes, URL query variables, anchors, etc.
'''
if pd.isna(url):
return np.nan
# remove parts of the URL that come before the domain (e.g., google news)
if venue:
url = "www." + venue + "".join(url.split(venue)[1:])
try:
up = urltools.extract(url)
url = up.subdomain + "." + up.domain + "." + up.tld + up.path
url = urltools.normalize(url)
return url
except:
raise
def relevant_url(url, venue, terms):
'''
Check if URL contains one of the search terms for each news venue
in the form "/search_term/" (which would typically be a section)
'''
if pd.isna(url):
return False
if venue not in url:
return False
for term in terms:
if "/" + term + "/" in url:
return True
return False
def merge_urls(row):
'''
If relevant URL exists return it, otherwise
use retweeted URL
'''
if not pd.isna(row['relevant_url']):
return row['relevant_url']
return row['retweet_url']
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2956,
252... | 2.144256 | 1,532 |
import matplotlib.pyplot as plt
import os
import numpy as np
from numpy.testing._private.utils import print_assert_equal
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow import keras
from tensorflow.keras import callbacks, layers
from tensorflow.keras.models import Sequential
from tensorflow.python.keras.layers.core import Activation
#checkpoint.path
#LIMITING THE GPU, i need this or it blows away
gpus = tf.config.experimental.list_physical_devices('GPU')
#limit exponential growth
tf.config.experimental.set_memory_growth(gpus[0], True)
if gpus:
# Restrict TensorFlow to only allocate 4GB of memory on the first GPU
try:
#setting limit
tf.config.experimental.set_virtual_device_configuration(gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024 * 4)])
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
#HYPERPARAMETERS
seed = 9682
epochs= 3 #Albini uses 80
batch_size = 64
starter_learning_rate = 0.1
end_learning_rate = 0.02
decay_steps = 100000
boundaries = [1, 1000]
values = [0.05, 0.005, 0.0005]
#PolynomialDecay
#learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(starter_learning_rate, decay_steps, end_learning_rate, power=0.5)
#PiecewiseConstDecay
#learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(boundaries, values)
#the preprocessinge image dataset from directory can easily reshape to the required size
image_size = (100, 68)
#DATASET GENERATION
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"dataset",
validation_split=0.3,
subset="training",
labels="inferred",
color_mode ="grayscale",
seed=seed,
image_size=image_size,
batch_size=batch_size,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"dataset",
validation_split=0.3,
subset="validation",
labels="inferred",
color_mode ="grayscale",
seed=seed,
image_size=image_size,
batch_size=batch_size,
)
#STUFF I STILL DO NOT UNDERSTAND
#use disk data without IO becoming blocking
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
#PREPROCESSING
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal", seed=seed),
layers.experimental.preprocessing.RandomFlip("vertical", seed=seed),
#layers.experimental.preprocessing.RandomRotation(0.25 , seed=seed),
#layers.experimental.preprocessing.RandomRotation((-0.4, -0.1), seed=seed),
layers.experimental.preprocessing.Rescaling(1./255)
]
)
#MODEL
HandsNet = Sequential([
keras.Input(shape=( 100, 68, 1)),
#preprocessing in the model, although i should actually do it before
data_augmentation,
#ConvLayer 1
layers.Conv2D(32, 7, strides=1, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='same'),
layers.Dropout(0.1),
#ConvLayer 2
layers.Conv2D(64, 5, strides=1, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='same'),
layers.Dropout(0.1),
#ConvLayer 3
layers.Conv2D(128, 3, strides=1, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='same'),
#ConvLayer 4
layers.Conv2D(256, 1, strides=1, padding='same'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='same'),
#FCLayer1
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dropout(0.6),
#FCLayer2
layers.Flatten(),
layers.Dense(32, activation='relu'),
layers.Dropout(0.5),
#FCLayer3
layers.Flatten(),
layers.Dense(2, activation='softmax'),
])
#SETTING MODEL OPTIMIZER
#doing a gradient descent with momentum optimizer, this is a pretty standard and optimized situation
#opt = keras.optimizers.SGD(learning_rate=0.1, momentum=0.9, decay=0.05)
#opt = keras.optimizers.SGD(learning_rate=learning_rate_fn, momentum=0.9)
opt = tf.keras.optimizers.Adam(learning_rate = 0.01)
#TRAIN1
#MODEL COMPILE
HandsNet.compile(optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # "binary_crossentropy"
metrics=['accuracy'])
HandsNet.fit(train_ds, epochs=epochs, validation_data=val_ds, batch_size=batch_size)
#TRAIN2 FINE TUNING
opt = tf.keras.optimizers.Adam(learning_rate = 0.0005)
#MODEL COMPILE
HandsNet.compile(optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # "binary_crossentropy"
metrics=['accuracy'])
history = HandsNet.fit(train_ds, epochs=30, validation_data=val_ds, batch_size=batch_size)
HandsNet.save('Models/HandsNet_3') # save_format='tf', overwrite=True
HandsNet.save('Models/HandsNet_3.h5')
scores = HandsNet.evaluate(val_ds, verbose=0, return_dict=True)
print(scores)
#VISUALIZE TRAINING RESULS
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
13557,
19734,
13,
26791,
1330,
3601,
62,
30493,
62,
40496,
198,
11748,
11192,
273,
11125,
... | 2.631395 | 2,287 |
import collections
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
| [
11748,
17268,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
7645,
23914,
355,
31408,
628,
220,
220,
220,
220,
198,
220,
220,
220,
220
] | 3.153846 | 39 |
# -*- coding: utf-8 -*-
#
# Copyright 2016 Continuum Analytics, Inc.
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
#
"""
Preferences dialog.
"""
# Third party imports
from qtpy.QtCore import Qt, QRegExp, Signal
from qtpy.QtGui import QRegExpValidator, QPixmap
from qtpy.QtWidgets import (QCheckBox, QGridLayout, QHBoxLayout, QLabel,
QLineEdit, QPushButton, QVBoxLayout,
QWidget)
# Local imports
from anaconda_navigator.api import AnacondaAPI
from anaconda_navigator.config import CONF
from anaconda_navigator.images import WARNING_ICON
from anaconda_navigator.utils.logs import logger
from anaconda_navigator.widgets import ButtonCancel
from anaconda_navigator.widgets.dialogs import DialogBase
if __name__ == '__main__':
test()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
1584,
6389,
13814,
30437,
11,
3457,
13,
198,
2,
1737,
307,
18984,
290,
9387,
12748,
691,
355,
636,
286,
281,
1052,
330,
13533,
393,
198,
2,
1855,
2... | 2.627329 | 322 |
import numpy as np
import pickle,os
from Common.Module import Module
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
mod = SourceModule("""
#include <stdio.h>
__global__ void calculate(float *dest, float *xs_weight, float *selection_weight, float* dataMCWeight, float* pileupWeight, float *k_qqZZ_qcd_M, float* k_qqZZ_ewk)
{
const int i = threadIdx.x + blockDim.x * blockIdx.x;
dest[i] = xs_weight[i]*selection_weight[i]*dataMCWeight[i]*pileupWeight[i]*k_qqZZ_qcd_M[i]*k_qqZZ_ewk[i];
}
""")
calculate_event_weight = mod.get_function("calculate")
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
11,
418,
198,
198,
6738,
8070,
13,
26796,
1330,
19937,
198,
198,
11748,
12972,
66,
15339,
13,
23736,
15003,
198,
11748,
12972,
66,
15339,
13,
26230,
355,
1553,
85,
198,
6738,
12972,
... | 2.555556 | 234 |
import numpy as np
import sys, os
from game import tiles
class tilemap():
"""
A map consists of a Numpy Array of integers. This array contains the ID's of tiles to be displayed.
Tiles are loaded from file into a dictionary of id's to names, ascii symbol and colours.
A tilemap is used as reference to the units so they can navigate the world.
The tilemap is dynamic, such that when objects or units exist on a specific tile, display is overlayed, but effects still exist.
"""
def display_map(self):
"""
Takes the unit
"""
sys.stdout.write("\x1B[2J\x1B[H")
for y in range(self.height):
#print(y, end=" ")
#print(self.world[y])
for x in range(self.width):
#print(self.world[y][x])
self.map_tiles.display_tile(self.world[y][x])
sys.stdout.write("\n")
def load_map(self, file):
"""
"""
self.name = os.path.splitext(file)[0]
#clear previous map
#sys.stdout.write("\x1B[2J\x1B[H")
file = open(file,'r')
lines = [line.rstrip() for line in file]
self.height = len(lines)
lengths = []
for line in lines:
temp = line.split('\t')
lengths.append(len(temp))
#print(max(lengths))
self.width = max(lengths)
#print(self.width, self.height)
self.world = np.full((self.height, self.width), -1)
i = 0
for line in lines:
j = 0
line = line.split('\t')
#print(line)
for symbol in line:
#print(i,j)
if symbol == ' ' or symbol == '':
symbol = -1
#print(symbol)
self.world[i][j] = int(symbol)
#print(self.world)
j += 1
i += 1
def load_portals(self, filename):
"""
"""
file = open(filename, 'r')
next(file) #skip first line. This is used for user
lines = [line.rstrip() for line in file]
for line in lines:
#get rid of repeating tabs
while "\t\t" in line:
line = line.replace("\t\t", "\t")
#split by single tabs into a list
line = line.split('\t')
print(line)
#should be formatted id: ("name","symbol",r,g,b)
self.portals[(int(line[0]), int(line[1]))] = {"portal_name":str(line[2]),
"target_world":str(line[3]),
"target_loc":(int(line[4]), int(line[5]))
}
if __name__ == "__main__":
world = tilemap()
world.load_map("C:/Users/legom/Documents/GitHub/Turn2.0/saves/maps/Starting")
#print(world.grid)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
11,
28686,
198,
198,
6738,
983,
1330,
19867,
628,
198,
4871,
17763,
8899,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
3975,
10874,
286,
257,
399,
32152,
15690,
286,
37014,... | 1.896062 | 1,549 |
if __name__=="__main__":
x = [10,20,-1,19,40]
insertion_sort(x)
print(x) | [
628,
198,
361,
11593,
3672,
834,
855,
1,
834,
12417,
834,
1298,
198,
220,
220,
220,
2124,
796,
685,
940,
11,
1238,
12095,
16,
11,
1129,
11,
1821,
60,
198,
220,
220,
220,
36075,
62,
30619,
7,
87,
8,
198,
220,
220,
220,
3601,
7,
... | 1.851064 | 47 |
import os
import sys
from redisgraph import Graph, Node, Edge
from base import FlowTestsBase
redis_graph = None
# Create a single node without any labels or properties.
# Retry to create an existing entity.
# Create a single node with two properties and no labels.
# Retry to create an existing entity.
# Create a single node with both label and property.
# Retry to create an existing entity.
# Create a single edge and additional two nodes.
# Retry to create a single edge and additional two nodes.
# Update existing entity
# Update new entity
# Create a single edge and additional two nodes.
# Update existing relation
# Update multiple nodes
# Update multiple nodes
# Add node that matches pre-existing index
# Update nodes based on non-constant inlined properties
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
2266,
271,
34960,
1330,
29681,
11,
19081,
11,
13113,
198,
6738,
2779,
1330,
27782,
51,
3558,
14881,
198,
198,
445,
271,
62,
34960,
796,
6045,
628,
220,
220,
220,
1303,
13610,
257,
2060,
10139,... | 3.28839 | 267 |
""" One of the widgets that main window composes of.
See :class:`.MainWindow`
Similar modules: class:`.AddCluster`, :class:`.RemoveCluster`,
:class:`.IdactNotebook`, :class:`.ManageJobs`
"""
from PyQt5.QtWidgets import QWidget
from idact.core.retry import Retry
from idact.detail.environment.environment_provider import EnvironmentProvider
from idact import save_environment, load_environment
from gui.functionality.popup_window import WindowType, PopUpWindow
from gui.helpers.ui_loader import UiLoader
class AdjustTimeouts(QWidget):
""" Module of GUI that is responsible for allowing the adjustment of the
clusters timeouts.
"""
def refresh_timeouts(self, cluster_name):
""" Fetches and refreshes the timeouts of the particular cluster.
:param cluster_name: Name of a cluster to refresh timeouts for.
"""
load_environment()
default_retries = EnvironmentProvider().environment.clusters[cluster_name].config.retries
self.ui.port_info_count.setValue(default_retries[Retry.PORT_INFO].count)
self.ui.jupyter_json_count.setValue(default_retries[Retry.JUPYTER_JSON].count)
self.ui.scheduler_connect_count.setValue(default_retries[Retry.SCHEDULER_CONNECT].count)
self.ui.dask_node_connect_count.setValue(default_retries[Retry.DASK_NODE_CONNECT].count)
self.ui.deploy_dask_scheduler_count.setValue(default_retries[Retry.DEPLOY_DASK_SCHEDULER].count)
self.ui.deploy_dask_worker_count.setValue(default_retries[Retry.DEPLOY_DASK_WORKER].count)
self.ui.get_scheduler_address_count.setValue(default_retries[Retry.GET_SCHEDULER_ADDRESS].count)
self.ui.check_worker_started_count.setValue(default_retries[Retry.CHECK_WORKER_STARTED].count)
self.ui.cancel_deployment_count.setValue(default_retries[Retry.CANCEL_DEPLOYMENT].count)
self.ui.squeue_after_sbatch_count.setValue(default_retries[Retry.SQUEUE_AFTER_SBATCH].count)
self.ui.open_tunnel_count.setValue(default_retries[Retry.OPEN_TUNNEL].count)
self.ui.validate_http_tunnel_count.setValue(default_retries[Retry.VALIDATE_HTTP_TUNNEL].count)
self.ui.tunnel_try_again_with_any_port_count.setValue(
default_retries[Retry.TUNNEL_TRY_AGAIN_WITH_ANY_PORT].count)
self.ui.port_info_seconds.setValue(default_retries[Retry.PORT_INFO].seconds_between)
self.ui.jupyter_json_seconds.setValue(default_retries[Retry.JUPYTER_JSON].seconds_between)
self.ui.scheduler_connect_seconds.setValue(default_retries[Retry.SCHEDULER_CONNECT].seconds_between)
self.ui.dask_node_connect_seconds.setValue(default_retries[Retry.DASK_NODE_CONNECT].seconds_between)
self.ui.deploy_dask_scheduler_seconds.setValue(
default_retries[Retry.DEPLOY_DASK_SCHEDULER].seconds_between)
self.ui.deploy_dask_worker_seconds.setValue(default_retries[Retry.DEPLOY_DASK_WORKER].seconds_between)
self.ui.get_scheduler_address_seconds.setValue(
default_retries[Retry.GET_SCHEDULER_ADDRESS].seconds_between)
self.ui.check_worker_started_seconds.setValue(default_retries[Retry.CHECK_WORKER_STARTED].seconds_between)
self.ui.cancel_deployment_seconds.setValue(default_retries[Retry.CANCEL_DEPLOYMENT].seconds_between)
self.ui.squeue_after_sbatch_seconds.setValue(default_retries[Retry.SQUEUE_AFTER_SBATCH].seconds_between)
self.ui.open_tunnel_seconds.setValue(default_retries[Retry.OPEN_TUNNEL].seconds_between)
self.ui.validate_http_tunnel_seconds.setValue(default_retries[Retry.VALIDATE_HTTP_TUNNEL].seconds_between)
self.ui.tunnel_try_again_with_any_port_seconds.setValue(
default_retries[Retry.TUNNEL_TRY_AGAIN_WITH_ANY_PORT].seconds_between)
def save_timeouts(self):
""" Saves to the configuration the timeouts of current cluster.
"""
if self.current_cluster == '':
self.popup_window.show_message("There are no added clusters", WindowType.error)
else:
default_retries = EnvironmentProvider().environment.clusters[self.current_cluster].config.retries
default_retries[Retry.PORT_INFO].count = int(self.ui.port_info_count.text())
default_retries[Retry.JUPYTER_JSON].count = int(self.ui.jupyter_json_count.text())
default_retries[Retry.SCHEDULER_CONNECT].count = int(self.ui.scheduler_connect_count.text())
default_retries[Retry.DASK_NODE_CONNECT].count = int(self.ui.dask_node_connect_count.text())
default_retries[Retry.DEPLOY_DASK_SCHEDULER].count = int(self.ui.deploy_dask_scheduler_count.text())
default_retries[Retry.DEPLOY_DASK_WORKER].count = int(self.ui.deploy_dask_worker_count.text())
default_retries[Retry.GET_SCHEDULER_ADDRESS].count = int(self.ui.get_scheduler_address_count.text())
default_retries[Retry.CHECK_WORKER_STARTED].count = int(self.ui.check_worker_started_count.text())
default_retries[Retry.CANCEL_DEPLOYMENT].count = int(self.ui.cancel_deployment_count.text())
default_retries[Retry.SQUEUE_AFTER_SBATCH].count = int(self.ui.squeue_after_sbatch_count.text())
default_retries[Retry.OPEN_TUNNEL].count = int(self.ui.open_tunnel_count.text())
default_retries[Retry.VALIDATE_HTTP_TUNNEL].count = int(self.ui.validate_http_tunnel_count.text())
default_retries[Retry.TUNNEL_TRY_AGAIN_WITH_ANY_PORT].count = int(
self.ui.tunnel_try_again_with_any_port_count.text())
default_retries[Retry.PORT_INFO].seconds_between = int(self.ui.port_info_seconds.text())
default_retries[Retry.JUPYTER_JSON].seconds_between = int(self.ui.jupyter_json_seconds.text())
default_retries[Retry.SCHEDULER_CONNECT].seconds_between = int(self.ui.scheduler_connect_seconds.text())
default_retries[Retry.DASK_NODE_CONNECT].seconds_between = int(self.ui.dask_node_connect_seconds.text())
default_retries[Retry.DEPLOY_DASK_SCHEDULER].seconds_between = int(
self.ui.deploy_dask_scheduler_seconds.text())
default_retries[Retry.DEPLOY_DASK_WORKER].seconds_between = int(self.ui.deploy_dask_worker_seconds.text())
default_retries[Retry.GET_SCHEDULER_ADDRESS].seconds_between = int(
self.ui.get_scheduler_address_seconds.text())
default_retries[Retry.CHECK_WORKER_STARTED].seconds_between = int(
self.ui.check_worker_started_seconds.text())
default_retries[Retry.CANCEL_DEPLOYMENT].seconds_between = int(self.ui.cancel_deployment_seconds.text())
default_retries[Retry.SQUEUE_AFTER_SBATCH].seconds_between = int(self.ui.squeue_after_sbatch_seconds.text())
default_retries[Retry.OPEN_TUNNEL].seconds_between = int(self.ui.open_tunnel_seconds.text())
default_retries[Retry.VALIDATE_HTTP_TUNNEL].seconds_between = int(
self.ui.validate_http_tunnel_seconds.text())
default_retries[Retry.TUNNEL_TRY_AGAIN_WITH_ANY_PORT].seconds_between = int(
self.ui.tunnel_try_again_with_any_port_seconds.text())
save_environment()
self.popup_window.show_message("Timeouts have been saved", WindowType.success)
def item_pressed(self, item_pressed):
""" Handles the cluster change.
:param item_pressed: Name of a cluster that was selected.
"""
self.current_cluster = item_pressed
self.refresh_timeouts(item_pressed)
def handle_cluster_list_modification(self):
""" Handles the modification of the clusters list.
"""
self.cluster_names = self.parent.data_provider.get_cluster_names()
self.ui.cluster_names_box.clear()
self.ui.cluster_names_box.addItems(self.cluster_names)
| [
37811,
1881,
286,
262,
40803,
326,
1388,
4324,
552,
4629,
286,
13,
628,
220,
220,
220,
4091,
1058,
4871,
25,
44646,
13383,
27703,
63,
198,
220,
220,
220,
11014,
13103,
25,
220,
1398,
25,
44646,
4550,
2601,
5819,
47671,
1058,
4871,
25,... | 2.321407 | 3,382 |
# エラトステネスの篩, 素因数分解
N = int(input())
m = 1000000007
prime_table = make_prime_table(N)
t = [0] * (N + 1)
for i in range(2, N + 1):
for p, e in prime_factorize(i):
t[p] += e
result = 1
for i in range(2, N + 1):
if t[i] == 0:
continue
result = result * (t[i] + 1) % m
print(result)
| [
2,
17433,
101,
9263,
13298,
8943,
24336,
44916,
8943,
17683,
107,
102,
11,
13328,
112,
254,
32368,
254,
46763,
108,
26344,
228,
164,
100,
96,
628,
198,
198,
45,
796,
493,
7,
15414,
28955,
198,
198,
76,
796,
1802,
10535,
22,
198,
198... | 1.819767 | 172 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
198
] | 3.5625 | 16 |
#!/usr/bin/env python3
import pandas as pd
import common.data_management as dm
import common.describe_data as dd
if __name__ == '__main__':
pd.set_option('display.max_columns', None)
whr_df = pd.read_csv('data/data.csv')
whr_df = dm.preprocess_data(whr_df)
columns = ['Score', 'GDP per capita', 'Healthy life expectancy', 'Social support']
dd.show_tendency_measures(whr_df, columns)
dd.show_variability_measures(whr_df, columns)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
2219,
13,
7890,
62,
27604,
355,
288,
76,
198,
11748,
2219,
13,
20147,
4892,
62,
7890,
355,
49427,
198,
198,
361,
11593,
367... | 2.519337 | 181 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from gilded_rose import *
import csv
if __name__ == "__main__":
print ("OMGHAI!")
items = []
with open("items.csv") as f:
reader = csv.DictReader(f)
for row in reader:
items.append(Item(row['Name'], int(row['sellIn']), int(row['quality'])))
days = 1
import sys
if len(sys.argv) > 1:
days = int(sys.argv[1]) + 1
for day in range(days):
print("-------- day %s --------" % day)
print("name, sellIn, quality")
for item in items:
print(item)
print("")
GildedRose(items).update_quality()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
308,
46158,
62,
13698,
1330,
1635,
198,
11748,
269,
21370,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834... | 2.17377 | 305 |
import argparse
from annotations import *
import config
import pash_runtime
from util import *
##
## A Daemon responding to requests for compilation
##
## Note: Not an actual daemon with the strict Unix sense
##
## TODO: Rename the pash_runtime to pash_compiler and this to pash_daemon
## TODO: Should we maybe use sockets instead of fifos?
## TODO: Fix the daemon logging.
## Initialize the daemon
## TODO: Improve the way parsing happens plz :') At the moment this will fail with : in file etc
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
198,
6738,
37647,
1330,
1635,
198,
11748,
4566,
198,
11748,
279,
1077,
62,
43282,
198,
6738,
7736,
1330,
1635,
198,
198,
2235,
198,
2235,
317,
9637,
7966,
14409,
284,
7007,
329,
23340,
198,
2235,
198,
2235,
5740... | 3.408805 | 159 |
import cv2
import numpy as np
import math
from model_init import face_model, landmark_model
import face_detect
import face_landmark_detection
def get_2d_points(rotation_vector, translation_vector, camera_matrix, val):
"""Return the 3D points present as 2D for making annotation box"""
point_3d = []
dist_coeffs = np.zeros((4,1))
rear_size = val[0]
rear_depth = val[1]
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = val[2]
front_depth = val[3]
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
# Map to 2d img points
(point_2d, _) = cv2.projectPoints(point_3d,
rotation_vector,
translation_vector,
camera_matrix,
dist_coeffs)
point_2d = np.int32(point_2d.reshape(-1, 2))
return point_2d
def head_pose_points(image, rotation_vector, translation_vector, camera_matrix):
"""
Get the points to estimate head pose sideways
Parameters
----------
img : np.unit8
Original Image.
rotation_vector : Array of float64
Rotation Vector obtained from cv2.solvePnP
translation_vector : Array of float64
Translation Vector obtained from cv2.solvePnP
camera_matrix : Array of float64
The camera matrix
Returns
-------
(x, y) : tuple
Coordinates of line to estimate head pose
"""
rear_size = 1
rear_depth = 0
front_size = image.shape[1]
front_depth = front_size*2
val = [rear_size, rear_depth, front_size, front_depth]
point_2d = get_2d_points(rotation_vector, translation_vector, camera_matrix, val)
y = (point_2d[5] + point_2d[8])//2
x = point_2d[2]
return (x, y)
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
2746,
62,
15003,
1330,
1986,
62,
19849,
11,
20533,
62,
19849,
198,
11748,
1986,
62,
15255,
478,
198,
11748,
1986,
62,
1044,
4102,
62,
15255,
3213,
6... | 2.225166 | 1,057 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from enum import Enum
from typing import Optional, List
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from pydantic import BaseModel
from epicteller.core.controller import character as character_ctl
from epicteller.core.controller import combat as combat_ctl
from epicteller.core.model.combat import Combat, CombatToken
from epicteller.core.error.base import NotFoundError, EpictellerError
from epicteller.core.model.kafka_msg.base import get_msg_model
from epicteller.core.model.kafka_msg.combat import MsgCombat
from epicteller.web import bus
from epicteller.web.fetcher import combat as combat_fetcher
from epicteller.web.model.combat import Combat as WebCombat
from epicteller.web.model.combat import CombatToken as WebCombatToken
router = APIRouter()
@router.get('/combats/{url_token}', response_model=WebCombat, response_model_exclude_none=True)
@router.websocket('/combats/{url_token}')
@router.put('/combats/{url_token}', response_model=WebCombat, response_model_exclude_none=True)
@router.post('/combats/{url_token}/tokens', response_model=CombatTokenOut, response_model_exclude_none=True)
@router.delete('/combats/{url_token}/tokens/{token_name}')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
32233,
11,
7343,
198,
198,
6738,
3049,
15042,
1330,
3486,
4663,
3... | 3.049261 | 406 |
"""
.. _plot_events_inverse:
============================
03. Compute inverse solution
============================
This workflow mainly call the
:ref:`inverse solution pipeline <source_reconstruction>`
performing source reconstruction starting from raw data specified by the user.
The first node of the workflow (:ref:`concat_event`) extracts the events from
stimulus channel ``STI101``. The events are saved in the :ref:`concat_event`
directory. For each subject, the different run are also concatenated in
one single raw file and saved in :ref:`concat_event` directory.
The input of this node are the different run taken from the
preprocessing workflow directory, i.e. the cleaned
raw data created by :ref:`preproc_meg`.
In the :ref:`inv_solution_node` the raw data are epoched accordingly to
events specified in ``json`` file and created in :ref:`concat_event`.
The evoked datasets are created by averaging the different conditions specified
in ``json file``. Finally the source estimates obtained by the
:ref:`inv_solution_node` are morphed to the ``fsaverage`` brain in the
:ref:`morphing_node`.
.. warning:: Before running this pipeline, the coregistration between the MRI
and MEG device needs to be performed.
"""
###############################################################################
# Import modules
# ^^^^^^^^^^^^^^
import os.path as op
import json
import pprint # noqa
import nipype.pipeline.engine as pe
from nipype.interfaces.utility import Function
from ephypype.nodes import create_iterator, create_datagrabber
from ephypype.pipelines.fif_to_inv_sol import create_pipeline_source_reconstruction # noqa
###############################################################################
# Define data and variables
# ^^^^^^^^^^^^^^^^^^^^^^^^^
# Let us specify the variables that are specific for the data analysis (the
# main directories where the data are stored, the list of subjects and
# sessions, ...) and the variable specific for the particular pipeline
# (events_id, inverse method, ...) in a
# :download:`json <https://github.com/neuropycon/ephypype/tree/master/doc/workshop/eeg/params.json>` file
params = json.load(open("params.json"))
pprint.pprint({'parameters': params["general"]})
data_type = params["general"]["data_type"]
subject_ids = params["general"]["subject_ids"]
NJOBS = params["general"]["NJOBS"]
session_ids = params["general"]["session_ids"]
conditions = params["general"]["conditions"]
subjects_dir = params["general"]["subjects_dir"]
if "data_path" in params["general"].keys():
data_path = params["general"]["data_path"]
else:
data_path = op.expanduser("~")
print("data_path : %s" % data_path)
# source reconstruction
pprint.pprint({'inverse parameters': params["inverse"]})
events_id = params["inverse"]['events_id']
condition = params["inverse"]['condition']
t_min = params["inverse"]['tmin']
t_max = params["inverse"]['tmax']
spacing = params["inverse"]['spacing'] # oct-6
snr = params["inverse"]['snr']
inv_method = params["inverse"]['method'] # dSPM
parc = params["inverse"]['parcellation'] # aparc
trans_fname = op.join(data_path, params["inverse"]['trans_fname'])
###############################################################################
# Define functions
# ^^^^^^^^^^^^^^^^
# Hwew we define two different functions that will be encapsulated in two
# different nodes (:ref:`concat_event` and :ref:`morphing_node`).
# The ``run_events_concatenate`` function extracts events from the stimulus
# channel while ``compute_morph_stc`` morph the source estimates obtained by
# the :ref:`inv_solution_node` into the ``fsaverage`` brain.
def run_events_concatenate(list_ica_files, subject):
'''
The events are extracted from stim channel 'STI101'. The events are saved
to the Node directory.
For each subject, the different run are concatenated in one single raw file
and saved in the Node directory. We take the different run from the
preprocessing workflow directory, i.e. the cleaned raw data.
'''
print(subject, list_ica_files)
import os
import mne
# could be added in a node to come
mask = 4096 + 256 # mask for excluding high order bits
delay_item = 0.0345
min_duration = 0.015
print("processing subject: %s" % subject)
raw_list = list()
events_list = list()
fname_events_files = []
print(" Loading raw data")
for i, run_fname in enumerate(list_ica_files):
run = i + 1
raw = mne.io.read_raw_fif(run_fname, preload=True)
events = mne.find_events(raw, stim_channel='STI101',
consecutive='increasing', mask=mask,
mask_type='not_and',
min_duration=min_duration)
print(" S %s - R %s" % (subject, run))
fname_events = os.path.abspath('run_%02d-eve.fif' % run)
mne.write_events(fname_events, events)
fname_events_files.append(fname_events)
delay = int(round(delay_item * raw.info['sfreq']))
events[:, 0] = events[:, 0] + delay
events_list.append(events)
raw_list.append(raw)
raw, events = mne.concatenate_raws(raw_list, events_list=events_list)
raw.set_eeg_reference(projection=True)
raw_file = os.path.abspath('{}_sss_filt_dsamp_ica-raw.fif'.format(subject))
print(raw_file)
raw.save(raw_file, overwrite=True)
event_file = os.path.abspath(
'{}_sss_filt_dsamp_ica-raw-eve.fif'.format(subject))
mne.write_events(event_file, events)
del raw_list
del raw
return raw_file, event_file, fname_events_files
###############################################################################
###############################################################################
# Specify Nodes
# ^^^^^^^^^^^^^
# Infosource and Datasource
# """""""""""""""""""""""""
# We create an ``infosurce`` node to pass input filenames to
infosource = create_iterator(['subject_id'], [subject_ids])
###############################################################################
# the ``datasource`` node to grab data. The ``template_args`` in this node
# iterate upon the value in the infosource node
ica_dir = op.join(
data_path, 'preprocessing_dsamp_workflow', 'preproc_meg_dsamp_pipeline') # noqa
template_path = '*'
field_template = dict(
raw_file="_session_id_*_subject_id_%s/ica/%s*ses*_*filt*ica.fif", # noqa
trans_file='../../%s/ses-meg/meg/%s%s.fif')
template_args = dict(
raw_file=[['subject_id', 'subject_id']],
trans_file=[['subject_id', 'subject_id', "-trans"]])
datasource = create_datagrabber(ica_dir, template_path, template_args,
field_template=field_template,
infields=['subject_id'],
outfields=['raw_file', 'trans_file'])
###############################################################################
# .. _concat_event:
#
# Event Node
# """"""""""
# We define the Node that encapsulates ``run_events_concatenate`` function
concat_event = pe.Node(
Function(input_names=['list_ica_files', 'subject'],
output_names=['raw_file', 'event_file', 'fname_events_files'],
function=run_events_concatenate),
name='concat_event')
###############################################################################
# .. _inv_solution_node:
#
# Inverse solution Node
# """""""""""""""""""""
# Ephypype creates for us a pipeline to compute inverse solution which can be
# connected to the other nodes we created.
# The inverse solution pipeline is implemented by the function
# :func:`~ephypype.pipelines.preproc_meeg.create_pipeline_source_reconstruction`,
# thus to instantiate this pipeline node, we pass our parameters to it.
# Since we want to do source estimation in three different conditions
# (famous faces, unfamiliar faces and scrambled), we provide all information
# related to the events in the ``json`` file where we also specify as inverse
# method dSPM th
inv_sol_workflow = create_pipeline_source_reconstruction(
data_path, subjects_dir, spacing=spacing, inv_method=inv_method,
is_epoched=True, is_evoked=True, events_id=events_id, condition=condition,
t_min=t_min, t_max=t_max, all_src_space=True, parc=parc, snr=snr)
###############################################################################
# .. _morphing_node:
#
# Morphing Node
# """""""""""""
# The last Node we define encapsulates ``compute_morph_stc`` function.
morph_stc = pe.Node(
Function(input_names=['subject', 'conditions', 'cond_files', 'subjects_dir'], # noqa
output_names=['stc_morphed_files'],
function=compute_morph_stc),
name="morph_stc")
morph_stc.inputs.conditions = conditions
morph_stc.inputs.subjects_dir = subjects_dir
###############################################################################
# Create workflow
# ^^^^^^^^^^^^^^^
# Then, we can create our workflow and specify the ``base_dir`` which tells
# nipype the directory in which to store the outputs.
src_reconstruction_pipeline_name = 'source_dsamp_full_reconstruction_' + \
inv_method + '_' + parc.replace('.', '')
main_workflow = pe.Workflow(name=src_reconstruction_pipeline_name)
main_workflow.base_dir = data_path
###############################################################################
# Connect Nodes
# ^^^^^^^^^^^^^
# Finally, we connect the nodes two at a time. First, we connect the
# output (``subject_id``) of the ``infosource`` node to the input of
# ``datasource`` node. So, these two nodes taken together can grab data.
main_workflow.connect(infosource, 'subject_id', datasource, 'subject_id')
###############################################################################
# Now we connect their outputs to the input of and their connections to the
# input (``list_ica_files``, ``subject``) of :ref:`concat_event`.
main_workflow.connect(datasource, ('raw_file', show_files),
concat_event, 'list_ica_files')
main_workflow.connect(infosource, 'subject_id', concat_event, 'subject')
###############################################################################
# The output of ``infosource``, ``datasource`` and ``concat_event`` are the
# inputs of ``inv_sol_workflow``, thus we connect these nodes two at time.
main_workflow.connect(infosource, ('subject_id', show_files),
inv_sol_workflow, 'inputnode.sbj_id')
main_workflow.connect(datasource, 'trans_file',
inv_sol_workflow, 'inputnode.trans_file')
main_workflow.connect(concat_event, ('raw_file', show_files),
inv_sol_workflow, 'inputnode.raw')
main_workflow.connect(concat_event, ('event_file', show_files),
inv_sol_workflow, 'inputnode.events_file')
###############################################################################
# Finally, we connect ``infosource`` and ``inv_sol_workflow`` to
# ``morph_stc``.
main_workflow.connect(infosource, 'subject_id', morph_stc, 'subject')
main_workflow.connect(inv_sol_workflow, 'inv_solution.stc_files',
morph_stc, 'cond_files')
###############################################################################
# Run workflow
# ^^^^^^^^^^^^
# Now, we are now ready to execute our workflow.
main_workflow.write_graph(graph2use='colored') # colored
main_workflow.config['execution'] = {'remove_unnecessary_outputs': 'false'}
main_workflow.run(plugin='LegacyMultiProc', plugin_args={'n_procs': NJOBS})
###############################################################################
# Results
# ^^^^^^^
# The output are the reconstructed neural time series morphed to the standard
# FreeSurfer average subject named fsaverage.The output is stored in the
# workflow dir defined by ``base_dir``. To plot the estimated source timeseries
# we can use :ref:`plot_stc`.
| [
37811,
198,
492,
4808,
29487,
62,
31534,
62,
259,
4399,
25,
198,
198,
4770,
25609,
198,
3070,
13,
3082,
1133,
34062,
4610,
198,
4770,
25609,
198,
1212,
30798,
8384,
869,
262,
198,
25,
5420,
25,
63,
259,
4399,
4610,
11523,
1279,
10459,... | 2.984708 | 3,989 |
import os
import numpy as np
import torch
import yaml
class YAMLParser:
""" YAML parser for optical flow and image reconstruction config files """
@property
@config.setter
@property
@property
@staticmethod
| [
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
331,
43695,
628,
198,
4871,
575,
2390,
19930,
28198,
25,
198,
220,
220,
220,
37227,
575,
2390,
43,
30751,
329,
18480,
5202,
290,
2939,
25056,
4566,
36... | 3.090909 | 77 |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapi.admin_permission import AdminPermission
from networkapi.ambiente.models import Ambiente
from networkapi.ambiente.models import AmbienteError
from networkapi.auth import has_perm
from networkapi.exception import InvalidValueError
from networkapi.infrastructure.xml_utils import dumps_networkapi
from networkapi.rest import RestResource
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
67... | 3.989655 | 290 |
#!/usr/bin/env python
import argparse
import concurrent.futures
import json
import logging
import math
import os
import pathlib
import subprocess
import time
from typing import TextIO, Dict
import cv2
import vidstab
import decord
import natsort
import numpy as np
import pandas as pd
import tqdm
def extract_frames(dest_dir: pathlib.Path, video_path: pathlib.PurePath, root: pathlib.PurePath, time_f: int,
ori_images_txt: TextIO, ctx) -> pathlib.Path:
"""
:param dest_dir: Directory where the extracted frames will be stored.
:param video_path: The absolute path of the video.
:param root: Directory containing the videos to be processed.
:param time_f: Time frequency.
:param ori_images_txt: File object where the frames of `video_path` are stored.
:param ctx: The context to decode the video file.
:return: Directory containing the stored frames of `video_path`
"""
pic_path = dest_dir / video_path.relative_to(root).with_suffix("")
pic_path.mkdir(parents=True, exist_ok=True)
try:
vr = decord.VideoReader(os.fspath(video_path), ctx=ctx)
vr.skip_frames(time_f)
vr.seek(time_f - 1)
size = len(vr)
frames_indices = range(time_f - 1, size, time_f)
for c in frames_indices:
img_path = os.fspath(pic_path / (str(c + 1) + '.jpg'))
cv2.imwrite(img_path, vr.next().asnumpy())
ori_images_txt.write(img_path + "\n")
except decord.DECORDError:
vc = cv2.VideoCapture(os.fspath(video_path))
if vc.isOpened():
c = 1
while vc.grab():
if c % time_f == 0:
img_path = os.fspath(pic_path / (str(c) + '.jpg'))
_, frame = vc.retrieve()
cv2.imwrite(img_path, frame)
ori_images_txt.write(img_path + "\n")
c += 1
cv2.waitKey(1)
vc.release()
return pic_path
def stabilize_frames(dest_dir: pathlib.Path) -> bool:
"""
:param dest_dir: Directory where the processed frames are stored.
:return: True if success
"""
files = natsort.natsorted(dest_dir.glob("*.jpg"), alg=natsort.ns.PATH)
stabilizer = vidstab.VidStab()
success = False
for f in files:
frame = cv2.imread(os.fspath(f))
frame = stabilizer.stabilize_frame(frame)
cv2.imwrite(os.fspath(f), frame)
success = True
return success
def stabilize_video(stabilizer: vidstab.VidStab, dest_dir: pathlib.Path, video_path: pathlib.PurePath,
root: pathlib.PurePath) -> pathlib.Path:
"""
:param stabilizer: VidStab object with stabilize method.
:param dest_dir: Directory where the stabilized video will be stored.
:param video_path: Video to stabilize
:param root: Directory with the source video to stabilize.
:return: Video path of the stabilized video
"""
logger = get_logger()
vid_path = dest_dir / video_path.relative_to(root)
vid_path.parent.mkdir(parents=True, exist_ok=True)
logger.debug(f"Printing the vid_path {os.fspath(vid_path.with_suffix('avi'))}")
stabilizer.stabilize(input_path=os.fspath(video_path), output_path=os.fspath(vid_path.with_suffix("mp4")))
return vid_path.with_suffix("mp4")
if __name__ == "__main__":
main(parse_cli().parse_args())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
1822,
29572,
198,
11748,
24580,
13,
69,
315,
942,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
850,
14681,
198,
... | 2.295995 | 1,473 |
import asyncio
from decimal import Decimal
from typing import Awaitable, Optional
from unittest import TestCase
from hummingbot.connector.exchange.bitfinex.bitfinex_exchange import BitfinexExchange
from hummingbot.core.data_type.common import OrderType, TradeType
from hummingbot.core.data_type.trade_fee import TokenAmount
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import BuyOrderCompletedEvent, MarketEvent, OrderFilledEvent
| [
11748,
30351,
952,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
19720,
1330,
5851,
4548,
540,
11,
32233,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
41465,
13645,
13,
8443,
273,
13,
1069,
3803,
13,
2545,
38125,
87,
... | 3.692308 | 130 |
import time
import file
from adb import By
# 找到需要打开的微信
| [
11748,
640,
198,
11748,
2393,
198,
6738,
512,
65,
1330,
2750,
628,
198,
220,
220,
220,
1303,
10545,
231,
122,
26344,
108,
165,
250,
222,
17358,
223,
33699,
241,
28156,
222,
21410,
36181,
106,
46479,
94,
198
] | 1.648649 | 37 |
# Copyright (c) 2016, Kate Fox
# All rights reserved.
#
# This file is covered by the 3-clause BSD license.
# See the LICENSE file in this program's distribution for details.
import zipfile, os.path
from os import walk
from sys import argv
if __name__ == "__main__":
main (argv[1], argv[2])
| [
2,
15069,
357,
66,
8,
1584,
11,
16693,
5426,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2393,
318,
5017,
416,
262,
513,
12,
565,
682,
347,
10305,
5964,
13,
198,
2,
4091,
262,
38559,
24290,
2393,
287,
428,
1430,
338,
608... | 3.061856 | 97 |
from scapy.layers.bluetooth import *
| [
6738,
629,
12826,
13,
75,
6962,
13,
65,
2290,
16271,
1330,
1635,
198
] | 2.846154 | 13 |