content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import logging
from abc import ABC, abstractmethod
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
class Predictor(ABC):
"""
TODO:
"""
@abstractmethod
@abstractmethod
| [
11748,
18931,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
1612,
62,
16485,
1144,
62,
18224,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
161... | 3.016529 | 121 |
import pytest
from odoo import exceptions
from pytest_tr_odoo.fixtures import env
from pytest_tr_odoo import utils
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
'''
openacademy.openacademy
'''
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10}, 'Kyle Bogan'),
({'first_name': 'Nickolas', 'last_name': 'Pacocha', 'value': 2},
'Nickolas Pacocha'),
({'first_name': 'Keon', 'last_name': 'Lemke', 'value': 4}, 'Keon Lemke')
])
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10, 'repeat': 0},
'Copy of Bogan'),
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 4, 'repeat': 1},
'Copy of Bogan (1)'),
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 1, 'repeat': 2},
'Copy of Bogan (2)'),
({'first_name': 'Dino', 'last_name': 'Green', 'value': 7, 'repeat': 0},
'Copy of Green'),
({'first_name': 'Aliza', 'last_name': 'Green', 'value': 4, 'repeat': 1},
'Copy of Green (1)'),
({'first_name': 'Hugh', 'last_name': 'Green', 'value': 32, 'repeat': 2},
'Copy of Green (2)')
])
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10},
{'first_name': 'Kyle', 'last_name': 'Copy of Bogan', 'value': 10}),
({'first_name': 'Cheyenne', 'last_name': 'Erdman', 'value': 2},
{'first_name': 'Cheyenne', 'last_name': 'Copy of Erdman', 'value': 2})
])
'''
openacademy.session
'''
@pytest.mark.parametrize('test_input,expected', [
({'name': 'bypass', 'start_date': '2020-01-01'},
'2020-01-01'),
({'name': 'Lead', 'start_date': '2020-01-01', 'duration': 1},
'2020-01-01'),
({'name': 'Loan', 'start_date': '2020-01-01', 'duration': 2},
'2020-01-02'),
({'name': 'Direct', 'start_date': '2020-01-01', 'duration': 10},
'2020-01-10'),
])
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'start_date': False, 'end_date': '2020-01-01'},
False),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-01'},
1),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-02'},
2),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-10'},
10),
])
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'seats': -10,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
{'warning': {
'title': 'Incorrect \'seats\' value',
'message': 'The number of available seatsmay not be negative'
}}),
({'name': 'Plastic', 'seats': 0,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
{'warning': {
'title': 'Too many attendees',
'message': 'Increase seats or remove excess attendees'
}}),
({'name': 'Plastic', 'seats': 1,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
False),
({'name': 'Plastic', 'seats': 12,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
False),
])
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'seats': 12,
'attendee_ids': []}, 'A session\'s instructor can\'t be an attendee')
])
| [
11748,
12972,
9288,
198,
6738,
16298,
2238,
1330,
13269,
198,
6738,
12972,
9288,
62,
2213,
62,
375,
2238,
13,
69,
25506,
1330,
17365,
198,
6738,
12972,
9288,
62,
2213,
62,
375,
2238,
1330,
3384,
4487,
628,
198,
31,
9078,
9288,
13,
69,... | 2.114602 | 1,719 |
# Generated by Django 2.2.8 on 2020-04-13 03:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
23,
319,
12131,
12,
3023,
12,
1485,
7643,
25,
2598,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.019231 | 52 |
__all__ = ['get_fundamentus','print_csv']
from fundamentus import get_fundamentus
from fundamentus import print_csv
| [
198,
834,
439,
834,
796,
37250,
1136,
62,
10990,
3263,
385,
41707,
4798,
62,
40664,
20520,
198,
198,
6738,
14387,
385,
220,
1330,
651,
62,
10990,
3263,
385,
198,
6738,
14387,
385,
220,
1330,
3601,
62,
40664,
628,
198
] | 3.128205 | 39 |
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from abc import ABC, abstractmethod
class ZuulAPIError(Exception):
"""Represents an error occurring while performing a call to Zuul's API
"""
class ZuulJobAPI(ABC):
"""Interface which defines the information that can be retrieved from
Zuul regarding a particular job.
"""
def __init__(self, tenant, job):
"""Constructor.
:param tenant: Tenant this job belongs to.
:type tenant: :class:`ZuulTenantAPI`
:param job: Description of the job being consulted by this
API. At least a field called 'name' providing the name
of the job is required here.
:type job: dict
"""
self._tenant = tenant
self._job = job
@property
def tenant(self):
"""
:return: The tenant this job belongs to.
:rtype: :class:`ZuulTenantAPI`
"""
return self._tenant
@property
def name(self):
"""
:return: Name of the job being consulted.
:rtype: str
"""
return self._job['name']
@abstractmethod
def builds(self):
"""
:return: The builds of this job.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
class ZuulTenantAPI(ABC):
"""Interface which defines the information that can be retrieved from
Zuul regarding a particular tenant.
"""
def __init__(self, tenant):
"""Constructor.
:param tenant: Description of the tenant being consulted by this
API. At least a field called 'name' providing the name
of the tenant is required here.
:type tenant: dict
"""
self._tenant = tenant
@property
def name(self):
"""
:return: Name of the tenant being consulted.
:rtype: str
"""
return self._tenant['name']
@abstractmethod
def builds(self):
"""A build is an instance of a job running independently.
:return: Information about all executed builds under this tenant.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def buildsets(self):
"""A buildset is a collection of builds running under a common context.
:return: Information about all executed buildsets under this tenant.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def jobs(self):
"""A job describes the steps that need to be taken in order to test
a project.
:return: Information about all jobs under this tenant.
:rtype: list[:class:`ZuulJobAPI`]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
class ZuulAPI(ABC):
"""Interface describing the actions that can be taken over Zuul's API.
"""
@abstractmethod
def info(self):
"""Information which define the target host. Among this info there
are entries such as 'capabilities' or 'authentication' param.
:return: General information about the host.
:rtype: dict
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def tenants(self):
"""Gets all tenants currently present on the host.
:return: A sub-api to retrieve information about all tenants on the
host.
:rtype: list[:class:`ZuulTenantAPI`]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
| [
37811,
198,
2,
220,
220,
220,
15069,
33160,
2297,
10983,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
284... | 2.584283 | 1,667 |
import webbrowser, os, tempfile, io, sys, time
import glob, shutil
import warnings
warnings.simplefilter('ignore')
import flask
from flask import Flask, escape, request
import processing
#need to import all the packages here in the main file because of dill-ed ipython model
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
arange = np.arange
import skimage.io as skio
import skimage.morphology as skmorph
import skimage.util as skimgutil
import PIL
PIL.Image.MAX_IMAGE_PIXELS = None #Needed to open large images
app = Flask('DigIT! Root Detector', static_folder=os.path.abspath('./HTML'))
is_debug = sys.argv[0].endswith('.py')
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" or not is_debug:
TEMPPREFIX = 'root_detector_'
TEMPFOLDER = tempfile.TemporaryDirectory(prefix=TEMPPREFIX)
print('Temporary Directory: %s'%TEMPFOLDER.name)
#delete all previous temporary folders if not cleaned up properly
for tmpdir in glob.glob( os.path.join(os.path.dirname(TEMPFOLDER.name), TEMPPREFIX+'*') ):
if tmpdir != TEMPFOLDER.name:
print('Removing ',tmpdir)
shutil.rmtree(tmpdir)
@app.route('/')
@app.route('/static/<path:path>')
@app.route('/file_upload', methods=['POST'])
@app.route('/images/<imgname>')
@app.route('/process_image/<imgname>')
@app.route('/processing_progress/<imgname>')
@app.route('/delete_image/<imgname>')
@app.route('/settings', methods=['GET', 'POST'])
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" or not is_debug:
with app.app_context():
processing.init()
if not is_debug:
print('Flask started')
webbrowser.open('http://localhost:5000', new=2)
app.run(host='127.0.0.1',port=5000, debug=is_debug)
| [
11748,
3992,
40259,
11,
28686,
11,
20218,
7753,
11,
33245,
11,
25064,
11,
640,
201,
198,
11748,
15095,
11,
4423,
346,
201,
198,
11748,
14601,
201,
198,
40539,
654,
13,
36439,
24455,
10786,
46430,
11537,
201,
198,
201,
198,
11748,
42903,... | 2.302096 | 811 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import shutil
import logging
import re
from datetime import datetime
from vilya.config import DOMAIN, DEVELOP_MODE
from vilya.libs import gyt
from vilya.libs.permdir import get_repo_root
from vilya.libs.text import format_md_or_rst
from vilya.libs.store import store, mc, cache, ONE_DAY, IntegrityError
from vilya.libs.props import PropsMixin
from vilya.libs.validators import check_project_name
from vilya.libs.signals import (
repo_create_signal, repo_watch_signal, repo_fork_signal)
from vilya.models.hook import CodeDoubanHook
from vilya.models.git import GitRepo
from vilya.models.ngit.repo import ProjectRepo
from vilya.models.user import User
from vilya.models.inbox import Inbox
from vilya.models.consts import (
PROJECT_BC_KEY, MIRROR_HTTP_PROXY, NOTIFY_ON, PERM_PUSH, PERM_ADMIN)
from vilya.models.project_conf import make_project_conf
from vilya.models.utils import linear_normalized
from vilya.models.project_issue import ProjectIssue
from vilya.models.tag import TagMixin, TAG_TYPE_PROJECT_ISSUE
from vilya.models.release import get_unreleased_commit_num
from vilya.models.lru_counter import (
ProjectOwnLRUCounter, ProjectWatchLRUCounter)
from vilya.models.milestone import Milestone
from vilya.models.utils.switch import WhiteListSwitch
from ellen.utils import JagareError
from vilya.models.nproject import ProjectWatcher
MCKEY_PROJECT = 'code:project:%s:v2'
MCKEY_PROJECT_ID_BY_NAME = 'code:project_id:name:%s'
MCKEY_PROJECT_IDS_BY_OWNER_SORTBY_SUMUP = 'code:project_ids:sumup:%s'
PROPS_LANGUAGE_KEY = 'language'
PROPS_LANGUAGES_KEY = 'languages'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
18931,
198,
11748,
302,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,... | 2.75793 | 599 |
### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Data Overrides",
"author": "Lukas Toenne",
"version": (0, 1),
"blender": (2, 73, 0),
"location": "Scene Properties",
"description": "Override settings and caching for linked objects",
"warning": "",
"wiki_url": "",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Object",
}
import bpy
from data_overrides import override, ui
if __name__ == "__main__":
register()
| [
21017,
347,
43312,
38644,
38559,
24290,
9878,
11290,
46424,
198,
2,
198,
2,
220,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
220,
13096,
340,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
13789,
... | 3.121359 | 412 |
import torch
from torch.utils import data
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
from pathlib import Path
import argparse
import convolutional_model as cm
import skimage as ski
import math
DATA_DIR = default_data_dir = Path(__file__).parent / 'data' / 'cifar-10-batches-py'
img_height = 32
img_width = 32
num_channels = 3
num_classes = 10
train_x = np.ndarray((0, img_height * img_width * num_channels), dtype=np.float32)
train_y = []
for i in range(1, 6):
subset = unpickle(os.path.join(DATA_DIR, 'data_batch_%d' % i))
train_x = np.vstack((train_x, subset['data']))
train_y += subset['labels']
train_x = train_x.reshape((-1, num_channels, img_height, img_width)).transpose(0, 2, 3, 1)
train_y = np.array(train_y, dtype=np.long)
subset = unpickle(os.path.join(DATA_DIR, 'test_batch'))
test_x = subset['data'].reshape((-1, num_channels, img_height, img_width)).transpose(0, 2, 3, 1).astype(np.float32)
test_y = np.array(subset['labels'], dtype=np.long)
valid_size = 5000
train_x, train_y = shuffle_data(train_x, train_y)
valid_x = train_x[:valid_size, ...]
valid_y = train_y[:valid_size, ...]
train_x = train_x[valid_size:, ...]
train_y = train_y[valid_size:, ...]
data_mean = train_x.mean((0, 1, 2))
data_std = train_x.std((0, 1, 2))
train_x = (train_x - data_mean) / data_std
valid_x = (valid_x - data_mean) / data_std
test_x = (test_x - data_mean) / data_std
train_x = torch.from_numpy(train_x.transpose(0, 3, 1, 2))
valid_x = torch.from_numpy(valid_x.transpose(0, 3, 1, 2))
test_x = torch.from_numpy(test_x.transpose(0, 3, 1, 2))
train_y = torch.from_numpy(train_y)
valid_y = torch.from_numpy(valid_y)
test_y = torch.from_numpy(test_y)
# =================== ARGS stuff =================
args = cm.parse_arguments()
config = vars(args)
# =================== MODEL stuff =================
model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2, padding_mode='replicate'),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=3, stride=2),
torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2, padding_mode='replicate'),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=3, stride=2),
torch.nn.Flatten(start_dim=1, end_dim=-1),
torch.nn.Linear(in_features=1568, out_features=256, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(in_features=256, out_features=128, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(in_features=128, out_features=10, bias=True)
)
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.gamma)
loss = torch.nn.CrossEntropyLoss()
results = train(train_x, train_y, valid_x, valid_y, model, loss, optimizer, scheduler, config)
lrs, train_losses, avg_train_accuracies, valid_losses, avg_valid_accuracies = results
epochs = np.arange(0, len(lrs))
fig, (ax1, ax2, ax3) = plt.subplots(3)
ax1.plot(epochs, lrs, label='learning rate')
ax1.set_title('Learning rate')
ax1.legend()
ax2.plot(epochs, train_losses, label='train')
ax2.plot(epochs, valid_losses, label='validation')
ax2.set_title('Cross-entropy loss')
ax2.legend()
ax3.plot(epochs, avg_train_accuracies, label='train')
ax3.plot(epochs, avg_valid_accuracies, label='validation')
ax3.set_title('Average class accuracy')
ax3.legend()
plt.show()
| [
11748,
28034,
198,
6738,
28034,
13,
26791,
1330,
1366,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
6738,
3108,
8019,
1330,
10644,
198,
... | 2.452143 | 1,400 |
# Generated by Django 3.1.7 on 2021-05-07 11:20
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
2713,
12,
2998,
1367,
25,
1238,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
if __name__ == "__main__":
s = Stack()
s.push(1)
s.push(2)
print(s.top())
s.push(5)
print(s.size())
print(s.toString())
s.pop()
print(s.toString())
print(s.size())
| [
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
264,
796,
23881,
3419,
198,
220,
220,
220,
264,
13,
14689,
7,
16,
8,
198,
220,
220,
220,
264,
13,
14689,
7,
17,
8,
198,
220,
220,
220,
3601,
7,
... | 1.863636 | 110 |
# (C) Copyright 2017, 2019 by Rocky Bernstein
"""
CPython 2.3 bytecode opcodes
This is a like Python 2.3's opcode.py with some classification
of stack usage.
"""
import xdis.opcodes.opcode_2x as opcode_2x
from xdis.opcodes.base import (
finalize_opcodes,
format_extended_arg,
init_opdata,
update_pj2,
)
version = 2.3
l = locals()
init_opdata(l, opcode_2x, version)
update_pj2(globals(), l)
opcode_arg_fmt = {"EXTENDED_ARG": format_extended_arg}
finalize_opcodes(l)
| [
2,
357,
34,
8,
15069,
2177,
11,
13130,
416,
24534,
37584,
198,
37811,
198,
8697,
7535,
362,
13,
18,
18022,
8189,
1034,
40148,
198,
198,
1212,
318,
257,
588,
11361,
362,
13,
18,
338,
1034,
8189,
13,
9078,
351,
617,
17923,
198,
1659,
... | 2.452261 | 199 |
from datetime import datetime, timezone
from main import models,statics
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
198,
198,
6738,
1388,
1330,
4981,
11,
14269,
873,
628,
628,
628,
628,
198
] | 3.521739 | 23 |
# HackerRank "Write a function" Leap Year challenge
# https://www.hackerrank.com/challenges/write-a-function/problem
is_leap(1990)
| [
2,
34399,
27520,
366,
16594,
257,
2163,
1,
33927,
6280,
4427,
198,
2,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
13564,
12,
64,
12,
8818,
14,
45573,
628,
198,
198,
271,
62,
293,
499,
7,
19891,
8,
198
] | 3.045455 | 44 |
import json
import threading
import time
from functools import wraps
from uuid import UUID
from typing import AnyStr
def format_response(resp):
"""
Returns a str formatted response
:param resp: Requests response
:return: response text as a string, formatted as a json if valid
"""
try:
error_msg = format_str(resp.json(), is_json=True)
except ValueError: # requests returns a ValueError when resp.text is not a valid json
error_msg = format_str(resp.text, is_json=False)
return error_msg
def format_str(str_value, is_json):
"""
Returns a formatted string with break lines; if is_json True, pretty format the output
:param str_value: plain text or json value
:param is_json: Boolean
:return: str
"""
str_value = json.dumps(str_value, indent=4, sort_keys=True) if is_json else str_value
return '\n {} \n'.format(str_value)
def is_json(str_value):
"""A function to check if a string contains a valid json"""
try:
json.loads(str_value)
except ValueError:
return False
return True
def rate_limited(max_per_second: int):
"""
Rate-limits the decorated function locally, for one process.
source: https://gist.github.com/gregburek/1441055#gistcomment-945625
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
return decorate
def synchronized(lock):
""" Synchronization decorator. """
return wrap
| [
11748,
33918,
198,
11748,
4704,
278,
198,
11748,
640,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
334,
27112,
1330,
471,
27586,
198,
6738,
19720,
1330,
4377,
13290,
628,
198,
4299,
5794,
62,
26209,
7,
4363,
2599,
198,
220,
220,... | 2.780718 | 529 |
# Easy
# https://leetcode.com/problems/maximum-depth-of-binary-tree/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# Time Complexity : O(N)
# Space Complexity : O(N) | [
2,
16789,
198,
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
47033,
12,
18053,
12,
1659,
12,
39491,
12,
21048,
14,
198,
198,
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
... | 2.371212 | 132 |
from enum import Enum
class ItemAttributeId(Enum):
"""
Derived from makemkvgui/inc/lgpl/apdefs.h
"""
Unknown = 0
Type = 1
Name = 2
LangCode = 3
LangName = 4
CodecId = 5
CodecShort = 6
CodecLong = 7
ChapterCount = 8
Duration = 9
DiskSize = 10
DiskSizeBytes = 11
StreamTypeExtension = 12
BitRate = 13
AudioChannelsCount = 14
AngleInfo = 15
SourceFileName = 16
AudioSampleRate = 17
AudioSampleSize = 18
VideoSize = 19
VideoAspectRatio = 20
VideoFrameRate = 21
StreamFlags = 22
DateTime = 23
OriginalTitleId = 24
SegmentsCount = 25
SegmentsMap = 26
OutputFileName = 27
MetadataLanguageCode = 28
MetadataLanguageName = 29
TreeInfo = 30
PanelTitle = 31
VolumeName = 32
OrderWeight = 33
OutputFormat = 34
OutputFormatDescription = 35
SeamlessInfo = 36
PanelText = 37
MkvFlags = 38
MkvFlagsText = 39
AudioChannelLayoutName = 40
OutputCodecShort = 41
OutputConversionType = 42
OutputAudioSampleRate = 43
OutputAudioSampleSize = 44
OutputAudioChannelsCount = 45
OutputAudioChannelLayoutName = 46
OutputAudioChannelLayout = 47
OutputAudioMixDescription = 48
Comment = 49
OffsetSequenceId = 50
| [
6738,
33829,
1330,
2039,
388,
628,
198,
4871,
9097,
33682,
7390,
7,
4834,
388,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
9626,
1572,
422,
787,
28015,
85,
48317,
14,
1939,
14,
75,
70,
489,
14,
499,
4299,
82,
13,
71,
198,... | 2.59481 | 501 |
from difflib import get_close_matches
from .squad_ids import SQUAD_IDS
def _parse_squad_name(team_id):
"""
Parse and clean the team's name.
To try and match requested team names with the master squad ID list, passed
names should be parsed to remove the common 'FC' and 'CF' tags, as well as
force all strings to be lowercase and excess whitespace removed.
Parameters
----------
team_id : string
The requested team's name to be parsed.
Returns
-------
string
Returns a ``string`` of the parsed team's name.
"""
irrelevant = [' FC', ' CF', 'FC ', 'CF ']
for val in irrelevant:
team_id = team_id.replace(val, '')
name = team_id.lower().strip()
return name
def lookup_squad_id(name, quiet=False):
"""
Attempt to match a team name with a squad ID.
A simple utility to make it easier to find squad IDs given a team name.
By supplying a team name, this function will return the squad ID if a
match can be found, or return a dictionary of the top 5 closest teams if a
match cannot be made. For example, specifying 'Tottenham Hotspur' will
return Tottenham's squad ID of '361ca564'. However, specifying 'Tottenham'
doesn't technically match an official team name, and the closest matches
will be returned instead, with Tottenham Hotspur being the first result.
Due to the massive number of teams listed on fbref.com, the incorrect team
could be accidently pulled by what appears to be the proper name. For
example, 'Barcelona' is the name of one of the largest clubs in the world,
located in Barcelona, Spain. However, 'Barcelona' could also refer to
Barcelona Sporting Club (commonly referred to as just 'Barcelona' locally)
who competes in the Ecuadorian Serie A. By using the squad ID, the intended
team is guaranteed to be used.
This helper function does not rely on case for the words, so 'Tottenham
Hotspur' will return the same result as 'tottenham hotspur'. Also, common
tags such as 'FC' and 'CF' are removed, so there is no need to specify
those components.
In the case a match can't be made, a dictionary of suggestions will be
returned instead of the squad ID. The dictionary is intended to be used
to find the best alternatives for later use. The keys are the suggested
names and values are the squad IDs. This allows direct usage of a squad ID
in subsequent calls to various classes in the Football module in
sportsipy instead of attempting to lookup a name. As there can be
multiple return types, it is recommended to check the type of the returned
value before further calculations. If the return is of type ``string``, it
is the 8-digit squad ID. If it is of type ``dictionary``, it is a key-value
object containing suggestions.
Parameters
----------
name : string
A ``string`` of the name of a squad to lookup, such as 'Tottenham
Hotspur'.
quiet : boolean
A ``boolean`` value which suppresses text output while True.
Returns
-------
string or dictionary
Returns a ``string`` of the squad's 8-digit ID if a match could be
found for the requested team. If a match could not be found, a
``dictionary`` is returned with the key-value pairs for the top 5
closest teams as keys and their respective IDs as values.
"""
filtered_name = _parse_squad_name(name)
if filtered_name in SQUAD_IDS:
return SQUAD_IDS[filtered_name]
closest_matches = get_close_matches(filtered_name, SQUAD_IDS.keys(), 5)
squad_match_ids = {}
output = 'Exact match not found - Printing closest matches:\n'
print(closest_matches)
for team in closest_matches:
output += team.title() + ' - ' + SQUAD_IDS[team] + '\n'
squad_match_ids[team.title()] = SQUAD_IDS[team]
if not quiet:
print(output)
return squad_match_ids
def _lookup_team(team_id):
"""
Find the squad ID for the requested team.
Every team on fbref.com has its own unique squad ID, which is a 8-digit
code containing alphanumeric numbers. The user can either supply the
8-digit code as-is, or provide the team's full name. If the squad ID is
provided and matches a master list of IDs, the squad ID will be returned
as-is for later use in the class. If the name is passed, it will first be
parsed to try and match the team with a team in the master squad ID list.
If no squad is found, an error will be raised indicating the requested team
cannot be found.
Parameters
----------
team_id : string
A ``string`` of either the team's ID or the name of the team.
Returns
-------
string
Returns a ``string`` of the squad's 8-digit ID.
"""
if team_id.lower() in SQUAD_IDS.values():
return team_id.lower()
name = lookup_squad_id(team_id)
if type(name) == str:
return name
error_message = ('Team ID of "%s" not found. Did you mean one of the '
'following?\n%s' % (team_id, name))
raise ValueError(error_message)
| [
6738,
814,
8019,
1330,
651,
62,
19836,
62,
6759,
2052,
198,
6738,
764,
16485,
324,
62,
2340,
1330,
45880,
2885,
62,
14255,
628,
198,
4299,
4808,
29572,
62,
16485,
324,
62,
3672,
7,
15097,
62,
312,
2599,
198,
220,
220,
220,
37227,
19... | 3.074538 | 1,677 |
#!/usr/bin/env python3
from __future__ import unicode_literals
import cgi
import io
import os
import socket
import unittest
import sys
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
_WVS_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(_WVS_ROOT_DIR)
import webvulnscan
sitemap = {}
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
269,
12397,
198,
11748,
33245,
198,
11748,
28686,
198,
11748,
17802,
198,
11748,
555,
715,
395,
198,
1174... | 2.810427 | 211 |
import airbnb
import airbnb_secrets
import csv
items=1
output_filename="airbnb_chapelhill.csv"
# Set CSV Header & line format
csv_header = ['City','Latitude','Longitude','Type','Bathrooms','Bedrooms','Public Address','Localized City','Source']
api = airbnb.Api()
# api = airbnb.Api(airbnb_secrets.login, airbnb_secrets.password)
api = airbnb.Api(access_token=airbnb_secrets.access_token)
try:
output_file = open(output_filename, 'w')
csvwriter = csv.writer(output_file, dialect='excel')
except IOError:
print("Output file creation failed")
exit(1)
csvwriter.writerow(csv_header)
while True:
try:
response = api.get_homes("Chapel Hill, NC, USA",items_per_grid=10, offset=items)
except Exception:
print("Terminating on error")
raise Exception
break
print("Starting item: "+ str(items) + " responses: " + str(len(response['explore_tabs'][0]['sections'][0]['listings'])))
# items += 50
items += len(response['explore_tabs'][0]['sections'][0]['listings'])
if len(response['explore_tabs'][0]['sections'][0]['listings']) == 0:
break
# ETL processing result set
for x in range(0, len(response['explore_tabs'][0]['sections'][0]['listings'])):
# build the output values in key order
csv_output=['null']*9
csv_output[0]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['city']
csv_output[1]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['lat']
csv_output[2]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['lng']
csv_output[3]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['room_and_property_type']
csv_output[4]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['bathrooms']
csv_output[5]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['bedrooms']
csv_output[6]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['public_address']
csv_output[7]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['localized_city']
csv_output[8]="AirBnB"
csvwriter.writerow(csv_output)
# cleanup and exit
output_file.close()
| [
11748,
1633,
31971,
198,
11748,
1633,
31971,
62,
2363,
8004,
198,
11748,
269,
21370,
198,
198,
23814,
28,
16,
198,
22915,
62,
34345,
2625,
958,
31971,
62,
354,
499,
417,
12639,
13,
40664,
1,
198,
2,
5345,
44189,
48900,
1222,
1627,
579... | 2.436947 | 904 |
if __name__ == "__main__":
n = int(input("Enter number: "))
print("Is a Narcissistic Number" if is_narcissistic_number(n) else "Is NOT a Narcissistic Number")
| [
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
299,
796,
493,
7,
15414,
7203,
17469,
1271,
25,
366,
4008,
198,
220,
220,
220,
3601,
7203,
3792,
257,
31987,
747,
2569,
7913,
1,
611,
318,
62,
... | 2.698413 | 63 |
from PIL import Image
# import torchvision.models as models
#
# print(type(models.__dict__['resnet18']))
| [
6738,
350,
4146,
1330,
7412,
628,
198,
2,
1330,
28034,
10178,
13,
27530,
355,
4981,
198,
2,
198,
2,
3601,
7,
4906,
7,
27530,
13,
834,
11600,
834,
17816,
411,
3262,
1507,
20520,
4008,
198
] | 3.057143 | 35 |
# Generated by Django 2.1.5 on 2020-04-23 22:50
from django.db import migrations
import tinymce.models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
20,
319,
12131,
12,
3023,
12,
1954,
2534,
25,
1120,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
11748,
7009,
76,
344,
13,
27530,
628
] | 2.837838 | 37 |
# SIEL type compliance cases require a specific control code prefixes. currently: (0 to 9)D, (0 to 9)E, ML21, ML22.
COMPLIANCE_CASE_ACCEPTABLE_GOOD_CONTROL_CODES = "(^[0-9][DE].*$)|(^ML21.*$)|(^ML22.*$)"
| [
2,
25861,
3698,
2099,
11846,
2663,
2421,
257,
2176,
1630,
2438,
21231,
274,
13,
3058,
25,
357,
15,
284,
860,
8,
35,
11,
357,
15,
284,
860,
8,
36,
11,
10373,
2481,
11,
10373,
1828,
13,
198,
9858,
6489,
16868,
5222,
62,
34,
11159,
... | 2.314607 | 89 |
from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import stat
c = get_config()
c.NotebookApp.ip = '0.0.0.0'
# The port the notebook server will listen on
c.NotebookApp.port = 8888
# Whether to open in a browser after starting
c.NotebookApp.open_browser = False
# Set the Access-Control-Allow-Credentials: true header
c.NotebookApp.allow_password_change = False
# https://github.com/jupyter/notebook/issues/3130
c.FileContentsManager.delete_to_trash = False | [
6738,
474,
929,
88,
353,
62,
7295,
13,
6978,
82,
1330,
474,
929,
88,
353,
62,
7890,
62,
15908,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
11748,
1185,
198,
198,
66,
796,
651,
62,
11250,
3419,
198,
198,
66,
13,
6425,
2070,
467... | 2.904762 | 168 |
from django.conf import settings
from django.db import models
from django.forms import ModelForm, Textarea
from django.shortcuts import reverse
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from django.utils import timezone
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
11,
8255,
20337,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
9575,
198,
6738,
42625,
1420... | 3.493151 | 73 |
"""
菜单序列化
@project: bright
@file: .py
@ide: PyCharm
@auth: Eric Joe
@email: whatisjava@hotmail.com
@build: 2019-09-16 10:12
@info:
"""
from apps.admin.api.common import serializers
from apps.admin.models import Menu
class MenuSerializer(serializers.ModelSerializer):
"""
菜单
"""
class MenuTreeSerializer(serializers.ModelSerializer):
"""
菜单树
"""
children = serializers.RecursiveField(many=True)
| [
37811,
198,
164,
237,
250,
39355,
243,
41753,
237,
26344,
245,
44293,
244,
198,
31,
16302,
25,
6016,
198,
31,
7753,
25,
764,
9078,
198,
31,
485,
25,
9485,
1925,
1670,
198,
31,
18439,
25,
7651,
5689,
198,
31,
12888,
25,
644,
271,
1... | 2.431818 | 176 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Path hack
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from cdata.summary import * # noqa
try:
import unittest2 as unittest
except ImportError:
import unittest
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
10644,
8156,
198,
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13... | 2.423729 | 118 |
#imports
import pandas as pd
import joblib
from flask import Flask, render_template, session,Markup, redirect, url_for
from flask_bootstrap import Bootstrap
import os
#Production server
#from waitress import serve
#Form validator
from flask_wtf import FlaskForm
from wtforms import FloatField, SubmitField
from wtforms.validators import NumberRange,InputRequired
###############CODE#####################
loaded_model = joblib.load("./obj/knn_model.pkl")
#Let's open the file that contains the plotly div
plotly_file = open("static/div_html.txt", "r")
div = plotly_file.read()
plotly_file.close()
#Flask app
app = Flask(__name__)
app.config.from_mapping(SECRET_KEY = "DontTellAnyone")
want_to_validate = "NO"
if want_to_validate =="YES":
Bootstrap(app)
index_template = 'index_complex.html'
else:
index_template = 'index.html'
#le indicamos a flask la url que debe lanzar con la función index
@app.route('/index', methods=['GET','POST'])
@app.route('/', methods=['GET','POST'])
@app.route('/result')
@app.errorhandler(404)
if __name__=="__main__":
port = os.environ.get("PORT")
app.run(debug=False, host="0.0.0.0", port=3000)
#serve(app, host ="0.0.0.0", port=port) # If I use serve as my production web server, I need to change my Dockerfile to CMD ["python","main.py"] | [
2,
320,
3742,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
1693,
8019,
201,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
6246,
11,
9704,
929,
11,
18941,
11,
19016,
62,
1640,
201,
198,
6738,
42903,
62,
18769... | 2.704453 | 494 |
import random
print("\tWelcome to the Python Dice App")
flag = True
while flag:
a = dice_sides()
b = dice_number()
c = roll_dice(b,a)
sum_dice(c)
flag = roll_again()
| [
11748,
4738,
198,
4798,
7203,
59,
83,
14618,
284,
262,
11361,
34381,
2034,
4943,
198,
32109,
796,
6407,
198,
4514,
6056,
25,
198,
220,
220,
220,
257,
796,
17963,
62,
82,
1460,
3419,
198,
220,
220,
220,
275,
796,
17963,
62,
17618,
34... | 2.320988 | 81 |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
constant = (3/2)*1.38*10**(-23)
particles_number = [0 for x in range(4)]
temperature = [0 for x in range(4)]
# X = rows, Y = columns
energy = ([[0 for y in range(4)] for x in range(4)])
for x in range(1, 4): # row
particles_number[x] = x
for y in range(1, 4): # column
temperature[y] = y
internal_energy = constant*temperature[y]*particles_number[x]
energy[x][y] = internal_energy
print(temperature)
print(particles_number)
print(energy)
# print(energy)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.set_xlabel('Particles number')
# ax.set_ylabel('Temperature')
# ax.set_zlabel('Internal energy')
# ax.scatter()
| [
6738,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
67,
1330,
12176,
274,
18,
35,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
9979,
415,
796,
357,
18,
14,
17,
27493,
16,
13,
2548,
9,
940,
1174,
325... | 2.447712 | 306 |
import os, sys, copy, re
import xml.etree.ElementTree as ET
_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","cime")
sys.path.append(_CIMEROOT)
from CIME.utils import expect
###############################################################################
class MockCase(object):
###############################################################################
"""
Helper function, to generate a cime case to be fed to doctest tests
"""
###############################################################################
def parse_string_as_list (string):
###############################################################################
"""
Takes a string representation of nested list and creates
a nested list of stirng. For instance, with
s = "(a,b,(c,d),e)
l = parse_string_as_list
we would have l = ['a', 'b', '(c,d)', 'e']
>>> s = '(a,(b,c))'
>>> l = parse_string_as_list(s)
>>> len(l)
2
>>> l[0] == 'a'
True
>>> l[1] == '(b,c)'
True
>>> ###### NOT STARTING/ENDING WITH PARENTHESES #######
>>> s = '(a,b,'
>>> l = parse_string_as_list(s)
Traceback (most recent call last):
ValueError: Input string must start with '(' and end with ')'.
>>> ################ UNMATCHED PARENTHESES ##############
>>> s = '(a,(b)'
>>> l = parse_string_as_list(s)
Traceback (most recent call last):
ValueError: Unmatched parentheses in input string
"""
if string[0]!='(' or string[-1]!=')':
raise ValueError ("Input string must start with '(' and end with ')'.")
sub_open = string.find('(',1)
sub_close = string.rfind(')',0,-1)
if not (sub_open>=0)==(sub_close>=0):
raise ValueError ("Unmatched parentheses in input string")
# Prevent empty string to pollute s.split()
my_split = lambda str : [s for s in str.split(',') if s.strip() != '']
if sub_open>=0:
l = []
l.extend(my_split(string[1:sub_open-1]))
l.append(string[sub_open:sub_close+1])
l.extend(my_split(string[sub_close+2:-1]))
else:
l = my_split(string[1:-1])
return l
###############################################################################
def is_array_type (name):
###############################################################################
"""
>>> is_array_type('array(T)')
True
>>> is_array_type('array')
False
>>> is_array_type('array(T)')
True
"""
return name[0:6]=="array(" and name[-1]==")"
###############################################################################
def array_elem_type (name):
###############################################################################
"""
>>> print(array_elem_type('array(T)'))
T
>>> print(array_elem_type('array()'))
<BLANKLINE>
"""
expect (is_array_type(name),
"Error! Type '{}' does not represent an array.".format(name))
return name[6:-1]
###############################################################################
def find_node (root,name):
###############################################################################
"""
Finds node with given name inside the root element, with a depth-search
strategy (i.e., follow children before siblings).
WARNING: this function does not check for uniqueness. If there are
multiple matches, the first match is returned.
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> find_node(root,'d')==None
True
>>> find_node(root,'c').text
'2'
"""
if root.tag==name:
return root
for elem in root:
found = find_node(elem,name)
if found is not None:
return found
return None
###############################################################################
def get_child (root,name,remove=False,must_exist=True):
###############################################################################
"""
Get children with given name. If not found, throws an exception.
Optionally, the child can be removed from the parent.
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> get_child(root,'c')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: There must be exactly one c entry inside my_root
>>> get_child(root,'c',must_exist=False)
"""
expect (len(root.findall(name))==1 or must_exist==False,
"There must be exactly one {} entry inside {}".format(name,root.tag))
child = root.find(name)
if remove and child is not None:
root.remove(child)
return child
###############################################################################
def has_child (root,name):
###############################################################################
"""
Check if root element has a *direct* child with given name
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> has_child(root,'c')
False
>>> has_child(root,'b')
True
"""
return False if root.find(name) is None else True
###############################################################################
def refine_type(entry, force_type=None):
###############################################################################
"""
Try to convert the text entry to the appropriate type based on its contents.
>>> e = '(a,b)'
>>> refine_type(e)==e
True
>>> e = '[a,b]'
>>> refine_type(e)==e
True
>>> e = 'a,b'
>>> refine_type(e)==['a','b']
True
>>> e = 'true,falsE'
>>> refine_type(e)==[True,False]
True
>>> e = '1'
>>> refine_type(e,force_type='real')==1.0
True
>>> e = '1,b'
>>> refine_type(e)==[1,'b',True]
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: List '1,b' has inconsistent types inside
>>> e = '1.0'
>>> refine_type(e,force_type='my_type')
Traceback (most recent call last):
NameError: Bad force_type: my_type
>>> e = 'true,falsE'
>>> refine_type(e,'logical')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Error! Invalid type 'logical' for an array.
>>> refine_type(e,'array(logical)')
[True, False]
"""
# We want to preserve strings representing lists
if (entry[0]=="(" and entry[-1]==")") or \
(entry[0]=="[" and entry[-1]=="]") :
expect (force_type is None or force_type=="string",
"Error! Invalid force type '{}' for a string representing a list"
.format(force_type))
return entry
if "," in entry:
expect (force_type is None or is_array_type(force_type),
"Error! Invalid type '{}' for an array.".format(force_type))
elem_type = force_type if force_type is None else array_elem_type(force_type);
result = [refine_type(item.strip(), force_type=elem_type) for item in entry.split(",") if item.strip() != ""]
expected_type = type(result[0])
for item in result[1:]:
expect(isinstance(item, expected_type),
"List '{}' has inconsistent types inside".format(entry))
return result
if force_type:
try:
elem_type = force_type if not is_array_type(force_type) else array_elem_type(force_type)
if elem_type == "logical":
if entry.upper() == "TRUE":
elem = True
elif entry.upper() == "FALSE":
elem = False
else:
elem = bool(int(entry))
elif elem_type == "integer":
elem = int(entry)
elif elem_type == "real":
elem = float(entry)
elif elem_type == "string":
elem = str(entry)
else:
raise NameError ("Bad force_type: {}".format(force_type))
if is_array_type(force_type):
return [elem]
else:
return elem
except ValueError:
raise ValueError ("Could not use '{}' as type '{}'".format(entry, force_type))
if entry.upper() == "TRUE":
return True
elif entry.upper() == "FALSE":
return False
try:
v = int(entry)
return v
except ValueError:
pass
try:
v = float(entry)
return v
except ValueError:
return entry
###############################################################################
def derive_type(entry):
###############################################################################
"""
Try to determine the type that the input string is representing
>>> derive_type('1')
'integer'
>>> derive_type('1.0')
'real'
>>> derive_type('one')
'string'
>>> derive_type('one,two')
'array(string)'
>>> derive_type('true,FALSE')
'array(logical)'
"""
refined_value = refine_type(entry)
if isinstance(refined_value, list):
elem_value = refined_value[0]
else:
elem_value = refined_value
if isinstance(elem_value, bool):
elem_type = "logical"
elif isinstance(elem_value, int):
elem_type = "integer"
elif isinstance(elem_value, float):
elem_type = "real"
elif isinstance(elem_value, str):
elem_type = "string"
else:
raise(UnrecognizedType, "Couldn't derive type of '{}'".format(entry))
return None
if isinstance(refined_value,list):
return "array(" + elem_type + ")"
else:
return elem_type
###############################################################################
def check_value(elem, value):
###############################################################################
"""
Check that a parameter's value is in the valid list
>>> import xml.etree.ElementTree as ET
>>> xml = '''
... <a type="integer" valid_values="1,2">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'1.0')
Traceback (most recent call last):
ValueError: Could not use '1.0' as type 'integer'
>>> check_value(root,'3')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Invalid value '3' for element 'a'. Value not in the valid list ('[1, 2]')
>>> xml = '''
... <a type="real" constraints="ge 0">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'-1')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Value '-1.0' for entry 'a' violates constraint '-1.0 >= 0.0'
>>> xml = '''
... <a type="real" constraints="mod 2 eq 0">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'2')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Cannot evaluate constraint '2.0 mod 2 eq 0' for entry 'a'
Modulo constraint only makes sense for integer parameters.
>>> xml = '''
... <a constraints="gt 0; le 5">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'2')
>>> check_value(root,'6')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Value '6' for entry 'a' violates constraint '6 <= 5'
"""
v = value
if "type" in elem.attrib.keys():
vtype = elem.attrib["type"]
v = refine_type(v,force_type=vtype)
expect (v is not None,
"Error! Value '{}' for element '{}' does not satisfy the constraint type={}"
.format(value,elem.tag,vtype) +
" NOTE: this error should have been caught earlier! Please, contact developers.")
else:
# If no 'type' attribute present, deduce the type and refine
vtype = derive_type(v)
v = refine_type(v,force_type=vtype)
if "valid_values" in elem.attrib.keys():
valids_str = elem.attrib["valid_values"]
valids = [refine_type(item.strip(), force_type=vtype) for item in valids_str.split(",")]
expect(v in valids,
"Invalid value '{}' for element '{}'. Value not in the valid list ('{}')".format(value, elem.tag, valids))
if "constraints" in elem.attrib.keys():
expect ("type" not in elem.attrib.keys() or not is_array_type(elem.attrib["type"]),
"Attribute 'constraints' only available for non-array parameters.")
constraints = elem.attrib["constraints"].split(";")
for c in constraints:
# The split should return a list [ '', s1, s2, ..., sN, rhs ],
# where sK is 'None' if opK is not found, and s=opK if opK is found.
# NOTE: we don't use math symbols, since XML doesn't like < or > inside
# strings. For consistency, we use worded ops for all operators:
# 'lt': < 'gt': > 'ne': != 'mod': %
# 'le': <= 'ge': >= 'eq': ==
# We use list comprehension to filter out 'None' and empty strings
pattern = "(ge)|(gt)|(lt)|(le)|(eq)|(ne)|(mod)"
tokens = [i.strip() for i in re.split(pattern,c,maxsplit=1) if i and i.strip()]
expect(len(tokens)==2,
"Invalid constraint syntax for entry '{}'.\n".format(elem.tag) +
" Correct syntax: 'op val', to be interpreted as '$param $op val'.\n"
" Constraint found: '{}'".format(c))
lhs = v
op = tokens[0]
if op=="ne":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v!=rhs,
"Value '{}' for entry '{}' violates constraint '{} != {}'"
.format(v,elem.tag,v,rhs))
elif op=="le":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v<=rhs,
"Value '{}' for entry '{}' violates constraint '{} <= {}'"
.format(v,elem.tag,v,rhs))
elif op=="lt":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v<rhs,
"Value '{}' for entry '{}' violates constraint '{} < {}'"
.format(v,elem.tag,v,rhs))
elif op=="ge":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v>=rhs,
"Value '{}' for entry '{}' violates constraint '{} >= {}'"
.format(v,elem.tag,v,rhs))
elif op=="gt":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v>rhs,
"Value '{}' for entry '{}' violates constraint '{} > {}'"
.format(v,elem.tag,v,rhs))
elif op=="mod":
expect (vtype=="integer",
"Cannot evaluate constraint '{} mod {}' for entry '{}'\n"
.format(lhs,tokens[1],elem.tag) +
"Modulo constraint only makes sense for integer parameters.")
# Use list comprehension to filter out None (for the cmp op not found)
rhs_tokens = [i for i in re.split("(eq)|(ne)",tokens[1]) if i]
expect (len(rhs_tokens)==3,
"Modular arithmetic constraint syntax is '% M op rhs', with op being 'eq' or 'ne'"
" String found: {}".format(tokens[1]))
mod = int(rhs_tokens[0])
cmp = rhs_tokens[1]
expect (cmp=="eq" or cmp=="ne",
"Modular arithmetic constraint syntax is '% M op rhs', with op being 'eq' or 'ne'"
" String found: {}".format(tokens[1]))
rhs = int(rhs_tokens[2])
if cmp=="eq":
expect ( (v % mod)==rhs, "Value '{}' for entry '{}' violates constraint {}{}".format(v,elem.tag,v,c))
else:
expect ( (v % mod)!=rhs, "Value '{}' for entry '{}' violates constraint {}{}".format(v,elem.tag,v,c))
###############################################################################
def check_all_values(root):
###############################################################################
"""
Check that all values in the xml tree do not violate their metadata
>>> ############### GENERATE TYPE ATTRIB ###############
>>> xml_str = '''
... <root>
... <prop1>1</prop1>
... <prop2>1.0</prop2>
... <prop3>one</prop3>
... <prop4>true</prop4>
... </root>
... '''
>>> import xml.etree.ElementTree as ET
>>> xml = ET.fromstring(xml_str)
>>> check_all_values(xml)
>>> print (get_child(xml,"prop1").attrib["type"])
integer
>>> print (get_child(xml,"prop2").attrib["type"])
real
>>> print (get_child(xml,"prop3").attrib["type"])
string
>>> print (get_child(xml,"prop4").attrib["type"])
logical
"""
has_children = len(root)>0
if has_children:
for c in root:
check_all_values(c)
else:
if "type" not in root.attrib.keys():
root.attrib["type"] = derive_type(root.text)
check_value(root,root.text)
###############################################################################
def resolve_inheritance (root,elem):
###############################################################################
"""
If elem inherits from another node within $root, this function adds all
children of its "parent" to elem. If parent also inherits, first
resolve parent recursively. If parent is not found, throw an exception
>>> xml = '''
... <my_root>
... <base>
... <a>2</a>
... </base>
... <derived inherit="base">
... </derived>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> d = get_child(root,'derived')
>>> len(d)
0
>>> resolve_inheritance(root,d)
>>> len(d)
1
>>> get_child(d,'a').text
'2'
"""
if "inherit" in elem.attrib.keys():
parent_name = elem.attrib["inherit"]
parent = find_node(root,parent_name)
expect (elem is not None,
"Error! Parent {} of {} not found within root {}"
.format(parent_name,elem.tag,root.tag))
# Make sure the parent is fully resolved
resolve_inheritance(root,parent)
del elem.attrib["inherit"]
for entry in parent:
# Add the parent's default only if this element does not
# have a more specialized version
if not has_child(elem,entry.tag):
new_entry = copy.deepcopy(entry)
elem.append(new_entry)
for child in elem:
resolve_inheritance(root,child)
###############################################################################
def resolve_all_inheritances (root):
###############################################################################
"""
Resolve all inheritances in the root tree
"""
for elem in root:
resolve_inheritance(root,elem)
###############################################################################
def get_valid_selectors(xml_root):
###############################################################################
"""
Extract the <selector> node from the xml root, verifying
its integrity, and returning selectors as a dict.
>>> xml = '''
... <namelist_defaults>
... <selectors>
... <selector name="S1" case_env="ENV1"/>
... <selector name="S2" case_env="ENV2"/>
... </selectors>
... </namelist_defaults>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> selectors = get_valid_selectors(root)
>>> len(selectors)
2
>>> xml = '''
... <namelist_defaults>
... <selectors>
... <blah name="S1" case_env="ENV1"/>
... </selectors>
... </namelist_defaults>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> selectors = get_valid_selectors(root)
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Expected selector tag, not blah
"""
# Get the right XML element, and iterate over its children
selectors_elem = get_child(xml_root,"selectors",remove=True)
selectors = {}
for selector in selectors_elem:
expect(selector.tag == "selector",
"Expected selector tag, not {}".format(selector.tag))
selector_name = selector.attrib["name"]
selector_env = selector.attrib["case_env"]
if "regex" in selector.attrib:
selector_regex = selector.attrib["regex"]
else:
selector_regex = "(.*)" # Just grab the whole thing
selectors[selector_name] = (selector_env, selector_regex)
return selectors
###############################################################################
def gen_group_processes (ap_names_str, atm_procs_defaults):
###############################################################################
"""
Given a (possibly nested) string representation of an atm group,
generates the corresponding atm processes as XML nodes.
"""
group = ET.Element("__APG__")
ap_names_list = parse_string_as_list(ap_names_str)
for ap in ap_names_list:
# The current ap can be itself a group if either:
# - ap = "(ap1,ap2,...,apXYZ)", with each ap possibly itself a group string.
# This group is built on the fly based on the building blocks specs.
# - ap is declared in the XML defaults as an atm proc group (which must store
# the 'atm_procs_list' child, with the string representation of the group.
if ap[0]=='(':
# Create the atm proc group
proc = gen_atm_proc_group(ap,atm_procs_defaults)
else:
# Get defaults
proc = copy.deepcopy(get_child(atm_procs_defaults,ap))
# Check if this pre-defined proc is itself a group, and, if so,
# build all its sub-processes
ptype = get_child(proc,"Type",must_exist=False)
if ptype is not None and ptype.text=="Group":
# This entry of the group is itself a group, with pre-defined
# defaults. Let's add its entries to it
sub_group_procs = get_child(proc,"atm_procs_list").text
proc.extend(gen_group_processes(sub_group_procs,atm_procs_defaults))
# Append subproc to group
group.append(proc)
return group
###############################################################################
def gen_atm_proc_group(atm_procs_list, atm_procs_defaults):
###############################################################################
"""
Given a (possibly nested) list of atm procs names, and the defaults
section for each atm proc, builds an XML node containing the tree
representing the atm process group, with nodes including APG parameters
as well as one sub-node for each atm proc in the group
>>> xml = '''
... <ap>
... <atm_proc_group>
... <prop1>1</prop1>
... <atm_procs_list>THE_LIST</atm_procs_list>
... </atm_proc_group>
... <ap1>
... </ap1>
... <ap2>
... <prop1>2</prop1>
... <prop2>3</prop2>
... </ap2>
... <my_group inherit="atm_proc_group">
... <atm_procs_list>(p1,ap2)</atm_procs_list>
... </my_group>
... </ap>
... '''
>>> import xml.etree.ElementTree as ET
>>> defaults = ET.fromstring(xml)
>>> ap_list = '(ap1,(ap2,ap1))'
>>> apg = gen_atm_proc_group(ap_list,defaults)
>>> get_child(apg,'atm_procs_list').text==ap_list
True
>>>
>>> has_child(apg,'group.ap2_ap1.')
True
>>> get_child(apg,'prop1').text=="1"
True
"""
# Set defaults from atm_proc_group
group = ET.Element("__APG__")
group.attrib["inherit"] = "atm_proc_group"
resolve_inheritance(atm_procs_defaults,group)
get_child(group,"atm_procs_list").text = atm_procs_list
# Create processes
group_procs = gen_group_processes (atm_procs_list, atm_procs_defaults)
# Append procs and generate name for the group.
# NOTE: the name of a 'generic' group is 'group.AP1_AP2_..._APN.'
names = []
for c in group_procs:
names.append(c.tag)
group.append(c)
group.tag = "group." + '_'.join(names) + '.'
return group
| [
11748,
28686,
11,
25064,
11,
4866,
11,
302,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
198,
62,
34,
3955,
34812,
2394,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
... | 2.384851 | 10,430 |
import torch
import numpy
| [
11748,
28034,
198,
11748,
299,
32152,
628,
628,
628,
628,
198
] | 3.090909 | 11 |
import logging
from collections import Counter, defaultdict
import math
import sys
import numpy as np
from scipy.sparse import csr_matrix
import scipy.sparse
from sklearn.utils.extmath import safe_sparse_dot
from gensim.models import Word2Vec, KeyedVectors
from spherecluster import SphericalKMeans
| [
11748,
18931,
198,
6738,
17268,
1330,
15034,
11,
4277,
11600,
198,
11748,
10688,
198,
11748,
25064,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
82,
29572,
1330,
269,
27891,
62,
6759,
8609,
198,
11748,
629,
541,... | 3.307692 | 91 |
# #!/usr/bin/env python
# encoding: utf-8
#
# --------------------------------------------------------------------------------------------------------------------
# Name: smart_contact_manager.py
# Version: 0.0.1
# Summary: Smart Contact Manager a contact book GUI application with Python, SQLite, and PyQt.
#
# Author: Alexsander Lopes Camargos
# Author-email: alcamargos@vivaldi.net
#
# License: MIT
# --------------------------------------------------------------------------------------------------------------------
"""Smart Contact Manager entry point script."""
# TODO: Provide search capability: Giving your users a way to search for a contact in the database.
# TODO: Add back-up capability: Providing a way of backing up contact information.
from main import main
if __name__ == '__main__':
main()
| [
2,
220,
1303,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
220,
21004,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
220,
16529,
3880,
19351,
198,
2,
220,
6530,
25,
4451,
62,
32057,
62,
37153,
13,
9078,
198,
2,
220,
10628,
25,
657,... | 4.135 | 200 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 23:32:57 2021
Author: Josef Perktold
License: BSD-3
"""
# import numpy as np
from numpy.testing import assert_allclose
import pytest
import statsmodels.sandbox.distributions.copula as cop
ev_list = [
[cop.transform_bilogistic, 0.5, 0.9, (0.25, 0.05), 0.5],
[cop.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117],
# note evd has asymmetry reversed, interchange variables
[cop.transform_tawn2, 0.9, 0.5, (0.25, 0.05), 0.464357480263932],
[cop.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.4916117128670654],
[cop.transform_tawn2, 0.5, 0.9, (0.5, 0.25), 0.48340673415789],
# note evd has parameter for hr 1/lmbda (inverse of our parameter)
[cop.transform_hr, 0.5, 0.9, (2,), 0.4551235014298542],
[cop.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.25), 0.4543698299835434],
[cop.transform_joe, 0.9, 0.5, (0.5, 0.75, 1/0.25), 0.4539773435983587],
# tev is against R `copula` package
# > cop = tevCopula(0.8, df = 4)
# > pCopula(c(0.5, 0.75), cop)
# [1] 0.456807960674953
# > pCopula(c(0.5, 0.9), cop)
# [1] 0.4911039761533587
[cop.transform_tev, 0.5, 0.75, (0.8, 4), 0.456807960674953],
[cop.transform_tev, 0.5, 0.9, (0.8, 4), 0.4911039761533587],
]
cop_list = [
[cop.TransfFrank, 0.5, 0.9, (2,), 0.4710805107852225, 0.9257812360337806],
[cop.TransfGumbel, 0.5, 0.9, (2,), 0.4960348880595387, 0.3973548776136501],
[cop.TransfClayton, 0.5, 0.9, (2,), 0.485954322440435, 0.8921974147432954],
[cop.TransfIndep, 0.5, 0.5, (), 0.25, 1],
]
@pytest.mark.parametrize("case", ev_list)
@pytest.mark.parametrize("case", cop_list)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2365,
1478,
2242,
25,
2624,
25,
3553,
33448,
198,
198,
13838,
25,
5264,
69,
2448,
21841,
727,
198,
34156,
25,
347,
10305,
12,
18,
198,
1... | 1.962529 | 854 |
import numpy as np
class Squared(LossFunction):
"""Squared loss: L(p, y) = 0.5 * (y - p)²"""
class Logistic(LossFunction):
"""Logistic loss: L(p, y) = log(1 + exp(-yp))"""
class SquaredHinge(LossFunction):
"""Squared hinge loss: L(p, y) = max(1 - yp, 0)²"""
class Hinge(LossFunction):
"""hinge loss: L(p, y) = max(1 - y*p, 0)""" | [
11748,
299,
32152,
355,
45941,
628,
628,
198,
4871,
5056,
1144,
7,
43,
793,
22203,
2599,
198,
220,
220,
220,
37227,
22266,
1144,
2994,
25,
406,
7,
79,
11,
331,
8,
796,
657,
13,
20,
1635,
357,
88,
532,
279,
8,
31185,
37811,
628,
... | 2.283871 | 155 |
import unittest
import json
import os
| [
11748,
555,
715,
395,
628,
198,
11748,
33918,
198,
11748,
28686,
628,
198
] | 3.230769 | 13 |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 17:29:26 2020
@author: Alberto Suárez
"""
# Load packages
import numpy as np
import sys
import matplotlib.pyplot as plt
from numpy.core.shape_base import _accumulate
def generate_regular_grid(t0, delta_t, N):
"""Generates a regular grid of times.
Parameters
----------
t0 : float
Initial time for the simulation
N: int
Number of steps for the simulation
delta_t: float
Step
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
Example
-------
>>> generate_regular_grid(0, 0.1, 100)
"""
return np.array([t0 + delta_t*i for i in range(N+1)])
def euler_maruyana(t0, x0, T, a, b, M, N):
""" Numerical integration of an SDE using the stochastic Euler scheme
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) [Itô SDE]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a :
Function a(t,x(t)) that characterizes the drift term
b :
Function b(t,x(t)) that characterizes the diffusion term
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the values
of the process at t.
Example
-------
>>> import matplotlib.pyplot as plt
>>> import sde_solvers as sde
>>> t0, S0, T, mu, sigma = 0, 100.0, 2.0, 0.3, 0.4
>>> M, N = 20, 1000
>>> def a(t, St): return mu*St
>>> def b(t, St): return sigma*St
>>> t, S = sde.euler_maruyana(t0, S0, T, a, b, M, N)
>>> _ = plt.plot(t,S.T)
>>> _= plt.xlabel('t')
>>> _= plt.ylabel('S(t)')
>>> _= plt.title('Geometric BM (Euler scheme)')
"""
# Initialize trayectories using x0 and the rest of the variables.
trajectories = np.tile(x0, (M, N+1))
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
times = generate_regular_grid(t0, delta_t, N)
noise = np.random.randn(M, N)
# Traverse the trajectories columns except the last one.
# I.e., x will be a vector with all the trajectories at time t,
# and z a vector will the noise at time t.
for idx, (t, x, z) in enumerate(zip(times[:-1], trajectories.T[:-1], noise.T)):
trajectories.T[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t
return times, trajectories
def milstein(t0, x0, T, a, b, db_dx, M, N):
""" Numerical integration of an SDE using the stochastic Milstein scheme
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) [Itô SDE]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a :
Function a(t, x(t)) that characterizes the drift term
b :
Function b(t, x(t)) that characterizes the diffusion term
db_dx :
Function db_dx(t, x(t)), derivative wrt the second argument of b(t, x)
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
Example
-------
>>> import matplotlib.pyplot as plt
>>> import sde_solvers as sde
>>> t0, S0, T, mu, sigma = 0, 100.0, 2.0, 0.3, 0.4
>>> M, N = 20, 1000
>>> def a(t, St): return mu*St
>>> def b(t, St): return sigma*St
>>> def db_dSt(t, St): return sigma
>>> t, S = sde.milstein(t0, S0, T, a, b, db_dSt, M, N)
>>> _ = plt.plot(t,S.T)
>>> _= plt.xlabel('t')
>>> _= plt.ylabel('S(t)')
>>> _= plt.title('Geometric BM (Milstein scheme)')
"""
# Initialize trayectories using x0 and the rest of the variables.
trajectories = np.tile(x0, (M, N+1))
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
times =generate_regular_grid(t0, delta_t, N)
noise = np.random.randn(M, N)
# Traverse the trajectories columns except the last one.
# I.e., x will be a vector with all the trajectories at time t,
# and z a vector will the noise at time t.
for idx, (t, x, z) in enumerate(zip(times[:-1], trajectories.T[:-1], noise.T)):
trajectories.T[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t \
+ 0.5 * b(t, x) * db_dx(t, x) * (z**2 - 1) * delta_t
return times, trajectories
def simulate_jump_process(t0, T, simulator_arrival_times, simulator_jumps, M):
""" Simulation of jump process
Parameters
----------
t0 : float
Initial time for the simulation
T : float
Length of the simulation interval [t0, t0+T]
simulator_arrival_times: callable with arguments (t0,T)
Function that returns a list of M arrays of arrival times in [t0, t0+T]
simulator_jumps: callable with argument N
Function that returns a list of M arrays with the sizes of the jumps
M: int
Number of trajectories in the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0,t1]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
"""
times_of_jumps = [[] for _ in range(M)]
sizes_of_jumps = [[] for _ in range(M)]
for m in range(M):
times_of_jumps[m] = simulator_arrival_times(t0, T)
max_jumps = len(times_of_jumps[m])
sizes_of_jumps[m] = simulator_jumps(max_jumps)
return times_of_jumps, sizes_of_jumps
# Stochastic Euler scheme for the numerical solution of a jump-diffision SDE
def euler_jump_diffusion(t0, x0, T, a, b, c,
simulator_jump_process,
M, N):
""" Simulation of jump diffusion process
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) + c(t, x(t)) dJ(t)
[Itô SDE with a jump term]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a : Function a(t,x(t)) that characterizes the drift term
b : Function b(t,x(t)) that characterizes the diffusion term
c : Function c(t,x(t)) that characterizes the jump term
simulator_jump_process: Function that returns times and sizes of jumps
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N_t,)
Non-regular grid of discretization times in [t0,t1].
N_t is the number of jumps generated.
X: numpy.ndarray of shape (M, N_t)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
"""
# Create a 2-d array with the times and the sizes of jumps (0 in this case)
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
original_times = np.array([generate_regular_grid(t0, delta_t, N), np.repeat(None, N+1)])
# Create empy arrays to be filled with final results
final_times = []
final_trajectories = []
# Simulate the jump processes
times_of_jumps, sizes_of_jumps = simulator_jump_process(t0, T, M)
# Each trajectory has different length (the numebr of jumps is different, so the length of
# the times array will be different). By computing
for single_times_oj_jumps, single_sizes_oj_jumps in zip(times_of_jumps, sizes_of_jumps):
# Create a 2-d array with the times and the sizes of jumps
jump_times = np.array([single_times_oj_jumps, single_sizes_oj_jumps])
# Join the time grid and sort them by increasing times, keeping the correct
# jump associated to each time.
times_and_jumps = np.concatenate((original_times, jump_times), axis=1)
times_and_jumps = np.array(sorted(times_and_jumps.T, key=lambda x: x[0])).T
# Unpack the times and jumps
times, jumps = times_and_jumps
N_t = len(times)
# Create noise for every time that will be computed.
noise = np.random.randn(N_t-1)
# Initialize trayectories using x0 and the rest of the variables.
trajectory = np.repeat(x0, N_t)
# Create the trajectories following the studied algorithm
for idx, (t, j, x, z) in enumerate(zip(times[:-1], jumps[:-1], trajectory[:-1], noise)):
trajectory[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t
if j is not None:
trajectory[idx+1] = trajectory[idx+1] + c(t, trajectory[idx+1]) * j
# Save the times and trajectories
final_times.append(times)
final_trajectories.append(trajectory)
return final_times, final_trajectories
def subplot_mean_and_std(x, mean, std, fig_num=1, color='b',
fill_color='#1f77b4',
xlims=None, ylims=None, xlabel=None,
ylabel=None, title=None, alpha_std=.3):
"""
Plots the passed mean and std.
Parameters
----------
x : numpy.ndarray
x-component to plot
mean : numpy.ndarray
mean of the y-component to plot
std : numpy.ndarray
std of the y-component to plot
color : string, optional
Color to plot the mean on
color : string, optional
Color to plot the std on
xlims : numpy.ndarray, optional
xlims for the plot
ylims : numpy.ndarray, optional
xlims for the plot
xlabel : string, optional
xlabel for the plot
ylabel : string, optional
ylabel for the plot
title : string, optional
Title for the plot
alpha_std : float, optional
Alpha of the std-filling color
Returns
-------
No returns, it fills the axis
Example
-------
>>> simulate_wiener_process(n_processes=1000)
>>> mean, std = np.mean(trajectories, axis=0), np.std(trajectories, axis=0)
>>> fig, axis = plt.figure(figsize=(12, 8))
>>> subplot_mean_and_std(axis, ts, mean, 2*std)
"""
plt.figure(fig_num)
plt.plot(x, mean, color=color)
plt.fill_between(x, mean-std, mean+std, color=fill_color, alpha=alpha_std)
if xlims is not None: plt.xlim(xlims)
if ylims is not None: plt.ylim(ylims)
if xlabel is not None: plt.xlabel(xlabel)
if ylabel is not None: plt.ylabel(ylabel)
if title is not None: plt.title(title)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
8621,
2681,
1596,
25,
1959,
25,
2075,
12131,
198,
198,
31,
9800,
25,
40649,
1778,
6557,
21107,
198,
37811,
198,
2,
8778,
10392,
198,
11748,... | 2.307929 | 4,868 |
nome = str(input('>>Seu nome completo:\n')).strip()
print('>>No seu nome há ', nome.upper().count('A'), 'letra(s) A.')
print('>>Aparece na posição ', nome.upper().find('A')+1)
print('>>A última aparição é na ', nome.upper().rfind('A') + 1) | [
77,
462,
796,
965,
7,
15414,
10786,
4211,
4653,
84,
299,
462,
1224,
1462,
7479,
77,
11537,
737,
36311,
3419,
198,
4798,
10786,
4211,
2949,
384,
84,
299,
462,
289,
6557,
46083,
299,
462,
13,
45828,
22446,
9127,
10786,
32,
33809,
705,
... | 2.27619 | 105 |
import visualpriors
import torch
from config.config import device
def mid_level_representations(input_image_tensor, representation_names):
"""
:param input_image_tensor: (batch_size, 3, 256, 256)
:param representation_names: list
:return: concatted image tensor to pass into FCN (batch_size, 8*len(representation_names), 16, 16)
"""
representations = []
for name in representation_names:
# (batch_size, 3, 256, 256) ——>(batch_size, 8, 16, 16)
representations.append(visualpriors.representation_transform(input_image_tensor, name, device=device))
return torch.cat(representations, dim=1)
| [
11748,
5874,
3448,
669,
198,
11748,
28034,
198,
6738,
4566,
13,
11250,
1330,
3335,
628,
198,
4299,
3095,
62,
5715,
62,
15603,
602,
7,
15414,
62,
9060,
62,
83,
22854,
11,
10552,
62,
14933,
2599,
198,
220,
220,
220,
37227,
198,
220,
2... | 2.917808 | 219 |
# -*- coding: utf-8 -*-
"""
pmutt.test_pmutt_model_statmech_nucl
Tests for pmutt module
"""
import unittest
from pmutt.statmech import nucl
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
4426,
15318,
13,
9288,
62,
4426,
15318,
62,
19849,
62,
14269,
1326,
354,
62,
28803,
565,
198,
51,
3558,
329,
9114,
15318,
8265,
198,
37811,
198,
11748,
555,
... | 2.345679 | 81 |
#########################################################################
# _________ ___. ______________________ ___
# \_ ___ \___.__.\_ |__ ___________ / _____/\______ \ \/ /
# / \ \< | | | __ \_/ __ \_ __ \/ \ ___ | _/\ /
# \ \___\___ | | \_\ \ ___/| | \/\ \_\ \| | \/ \
# \______ / ____| |___ /\___ >__| \______ /|____|_ /___/\ \
# \/\/ \/ \/ \/ \/ \_/
#
#
import re
from glom import glom
from openpyxl.cell import Cell, MergedCell
from openpyxl.styles import PatternFill
from openpyxl.styles import colors
from openpyxl.styles.fills import FILL_SOLID
from openpyxl.utils.exceptions import IllegalCharacterError
_VLOOKUP_REGEX = re.compile(r'.*?VLOOKUP\("(?P<control>\d+\.\d+\.\d+\.\d+|[A-Z]{1,3}\.\d+\.\d+\.\d+).*?".*')
| [
29113,
29113,
7804,
2,
198,
2,
220,
220,
220,
220,
2602,
62,
220,
220,
220,
220,
220,
220,
220,
11593,
44807,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
4841,
25947,
220,
46444,
... | 1.949339 | 454 |
#!/usr/bin/env python3.7
""" This file contains code to carry out a simple estimate of the amount of
Fortran code 'handled' by for2py.
COMMAND-LINE INVOCATION:
python3.7 measure-coverage.py <directory with Fortran code files>
ADDING HANDLED LANGUAGE CONSTRUCTS:
As the set of language features handled by for2py grows, they should be
incorporated into this script. This can be done as follows:
1) Write a regular expression to recognize that feature (see examples
under "SYNTAX MATCHING" below).
2) Add the regular expression to the list for the variable HANDLED.
"""
import os
import sys
import delphi.translators.for2py.preprocessor
from .syntax import *
FORTRAN_EXTENSIONS = ['.f', '.f90', '.for']
################################################################################
# #
# SYNTAX MATCHING #
# #
################################################################################
# Regular expressions that specify patterns for various Fortran constructs.
# These are very similar to the constructs in the file syntax.py, but only
# include constructs that are currently handled in for2py.
FN_START = r"\s*(\w*\s*){0,2}function\s+(\w+)\s*\("
RE_FN_START = re.compile(FN_START, re.I)
PGM_UNIT = r"\s*\w*\s*(program|module|subroutine|(\w*\s*){0,2}function)\s+(\w+)"
RE_PGM_UNIT_START = re.compile(PGM_UNIT, re.I)
PGM_UNIT_SEP = r"\s+contains(\W+)"
RE_PGM_UNIT_SEP = re.compile(PGM_UNIT_SEP, re.I)
PGM_UNIT_END = r"\s*[a-z]*\s*end\s+(program|module|subroutine|function)\s+"
RE_PGM_UNIT_END = re.compile(PGM_UNIT_END, re.I)
SUBPGM_END = r"\s*end\s+"
RE_SUBPGM_END = re.compile(SUBPGM_END, re.I)
ASSG_STMT = r"\s*(\d+|&)?\s*.*=\s*"
RE_ASSG_STMT = re.compile(ASSG_STMT, re.I)
IMPLICIT_STMT = r"\s*implicit\s+"
RE_IMPLICIT_STMT = re.compile(IMPLICIT_STMT, re.I)
CALL_STMT = r"\s*(\d+|&)?\s*call\s*"
RE_CALL_STMT = re.compile(CALL_STMT, re.I)
IO_STMT = r"\s*(\d+|&)?\s*(open|close|read|write|print|format|rewind)\W*"
RE_IO_STMT = re.compile(IO_STMT, re.I)
DO_STMT = r"\s*(\d+|&)?\s*do\s*"
RE_DO_STMT = re.compile(DO_STMT, re.I)
ENDDO_STMT = r"\s*(\d+|&)?\s*end\s*do\s*"
RE_ENDDO_STMT = re.compile(ENDDO_STMT, re.I)
ENDIF_STMT = r"\s*(\d+|&)?\s*end\s*if\s*"
RE_ENDIF_STMT = re.compile(ENDIF_STMT, re.I)
GOTO_STMT = r"\s*(\d+|&)?\s*go\s*to\s*"
RE_GOTO_STMT = re.compile(GOTO_STMT, re.I)
IF_STMT = r"\s*(\d+|&)?\s*(if|elseif|else)\s*"
RE_IF_STMT = re.compile(IF_STMT, re.I)
PAUSE_STMT = r"\s*(\d+|&)?\s*pause\s*"
RE_PAUSE_STMT = re.compile(PAUSE_STMT, re.I)
USE_STMT = r"\s*(\d+|&)?\s*use\s*"
RE_USE_STMT = re.compile(USE_STMT, re.I)
RETURN_STMT = r"\s*(\d+|&)?\s*return\s*"
RE_RETURN_STMT = re.compile(RETURN_STMT, re.I)
CYCLE_STMT = r"\s*(\d+|&)?\s*cycle\s*"
RE_CYCLE_STMT = re.compile(CYCLE_STMT, re.I)
EXIT_STMT = r"\s*(\d+|&)?\s*exit\s*"
RE_EXIT_STMT = re.compile(EXIT_STMT, re.I)
SAVE_STMT = r"\s*(\d+|&)?\s*save\s*"
RE_SAVE_STMT = re.compile(SAVE_STMT, re.I)
SELECT_STMT = r"\s*(\d+|&)?\s*select\s*case\s*"
RE_SELECT_STMT = re.compile(SELECT_STMT, re.I)
ENDSELECT_STMT = r"\s*(\d+|&)?\s*end\s*select\s*"
RE_ENDSELECT_STMT = re.compile(ENDSELECT_STMT, re.I)
CASE_STMT = r"\s*(\d+|&)?\s*case\s*"
RE_CASE_STMT = re.compile(CASE_STMT, re.I)
STOP_STMT = r"\s*(\d+|&)?\s*stop\s*"
RE_STOP_STMT = re.compile(STOP_STMT, re.I)
TYPE_NAMES = r"^\s*(integer|real|double\s+precision|logical|dimension|type)\W*"
RE_TYPE_NAMES = re.compile(TYPE_NAMES, re.I)
HANDLED = [
RE_FN_START,
RE_PGM_UNIT_START,
RE_PGM_UNIT_SEP,
RE_PGM_UNIT_END,
RE_SUBPGM_END,
RE_ASSG_STMT,
RE_CALL_STMT,
RE_CYCLE_STMT,
RE_EXIT_STMT,
RE_IMPLICIT_STMT,
RE_IO_STMT,
RE_DO_STMT,
RE_ENDDO_STMT,
RE_ENDIF_STMT,
RE_GOTO_STMT,
RE_IF_STMT,
RE_PAUSE_STMT,
RE_RETURN_STMT,
RE_SAVE_STMT,
RE_STOP_STMT,
RE_TYPE_NAMES,
RE_USE_STMT,
]
KEYWD = r"\s*(\d+|&)?\s*([a-z]+).*"
RE_KEYWD = re.compile(KEYWD)
################################################################################
# #
# FILE PROCESSING #
# #
################################################################################
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
22,
198,
198,
37811,
770,
2393,
4909,
2438,
284,
3283,
503,
257,
2829,
8636,
286,
262,
2033,
286,
198,
220,
220,
220,
6401,
2596,
2438,
705,
38788,
6,
416,
329,
17,
9078,
13,
62... | 1.870061 | 2,455 |
from collections import namedtuple
from leapp.libraries.common.config import get_env, get_all_envs
from leapp.libraries.stdlib import api
from leapp.models import EnvVar
| [
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
6738,
443,
1324,
13,
75,
11127,
13,
11321,
13,
11250,
1330,
651,
62,
24330,
11,
651,
62,
439,
62,
268,
14259,
198,
6738,
443,
1324,
13,
75,
11127,
13,
19282,
8019,
1330,
40391,
198,
6738... | 3.283019 | 53 |
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from itertools import chain
import numpy as np
from led_wabbit.models import LogisticRegressionBinary
if __name__ == '__main__':
X1 = [[0, 1, 1] for i in range(40)]
X2 = [[0, 2, 0] for i in range(40)]
X3 = [[1, 0, 1] for i in range(40)]
X4 = [[0, 2, 2] for i in range(3)]
X = np.array([x for x in chain(X1, X2, X3, X4)])
Y1 = [0 for i in range(40)]
Y2 = [1 for i in range(40)]
Y3 = [0 for i in range(40)]
Y4 = [1 for i in range(3)]
Y = np.array([y for y in chain(Y1, Y2, Y3, Y4)])
X, Y = shuffle(X, Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
header_dict = {0: ('n', 'X', 'x0'), 1: ('n', 'Y', 'y0'), 2: ('n', 'Z', 'z0')}
clf = LogisticRegressionBinary(learning_rate=5, header_dict=header_dict) # loss='logistic',
params = {'passes': [50, 100], 'header_dict': [header_dict], \
'learning_rate': [0.5, 0.2, 0.8], 'log_stderr_to_file': [True]} # 'loss':['logistic'],
with open('wv_learning.vw','w') as g:
for s in clf.iterate_over_vw_strings(X_train,y_train):
g.write(s)
g.write('\n') | [
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
6738,
1341,
35720,
13,
26791,
1330,
36273,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
2957,
62,
86,
14229,
... | 2.132635 | 573 |
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
b1 = tsds.load_ozone_exogenous_categorical()
df = b1.mPastData
print(b1.mExogenousDataFrame.Exog2.cat.categories)
print(b1.mExogenousDataFrame.Exog3.cat.categories)
print(b1.mExogenousDataFrame.Exog4.cat.categories)
lEngine = autof.cForecastEngine()
lEngine.mOptions.mDebug = True;
lEngine.mOptions.mDebugProfile = True;
lEngine.mOptions.disable_all_periodics()
lEngine.mOptions.set_active_autoregressions(['ARX'])
lExogenousData = (b1.mExogenousDataFrame , b1.mExogenousVariables)
H = 12
lEngine.train(df , 'Time' , b1.mSignalVar, H, lExogenousData)
lEngine
lEngine.getModelInfo();
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots(name = "outputs/my_categorical_arx_ozone")
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/arx_ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
12972,
1878,
13,
16351,
2701,
13798,
355,
1960,
1659,
198,
11748,
12972,
1878,
13,
44199,
13,
4694,
62,
19608,
292,
1039,
355,
40379,
9310,
198,
198,
... | 2.416084 | 572 |
# coding=utf-8
"""UrbanTerror server info"""
# Author: zephrax http://kernelpanic.com.ar
from __future__ import unicode_literals, absolute_import, print_function, division
from sopel import web
from sopel.module import commands
from sopel.logger import get_logger
from sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute
import socket
import re
LOGGER = get_logger(__name__)
class Player(object):
"""
Player class
"""
def __init__(self, num, name, frags, ping, address=None, bot=-1):
"""
create a new instance of Player
"""
self.num = num
self.name = name
self.frags = frags
self.ping = ping
self.address = address
self.bot = bot
class PyQuake3(object):
"""
PyQuake3 class
"""
packet_prefix = b'\xff' * 4
player_reo = re.compile(r'^(\d+) (\d+) "(.*)"')
rcon_password = None
port = None
address = None
players = None
values = None
def __init__(self, server, rcon_password=''):
"""
create a new instance of PyQuake3
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.set_server(server)
self.set_rcon_password(rcon_password)
def set_server(self, server):
"""
set IP address and port and connect to socket
"""
try:
self.address, self.port = server.split(':')
except Exception:
raise ValueError('Server address format must be: "address:port"')
self.port = int(self.port)
self.sock.connect((self.address, self.port))
def get_address(self):
"""
get IP address and port
"""
return '%s:%s' % (self.address, self.port)
def set_rcon_password(self, rcon_password):
"""
set RCON password
"""
self.rcon_password = rcon_password
def send_packet(self, data):
"""
send packet
"""
base = b''
self.sock.send(base.join([self.packet_prefix, data.encode(), b'\n']))
# self.sock.send('{}{}\n'.format(self.packet_prefix, data).encode())
def recv(self, timeout=1):
"""
receive packets
"""
self.sock.settimeout(timeout)
try:
return self.sock.recv(8192)
except Exception as err:
raise Exception('Error receiving the packet: %s' % err[1])
def command(self, cmd, timeout=1, retries=5):
"""
send command and receive response
"""
while retries:
self.send_packet(cmd)
try:
data = self.recv(timeout)
except Exception:
data = None
if data:
return self.parse_packet(data)
retries -= 1
raise Exception('Server response timed out')
def rcon(self, cmd):
"""
send RCON command
"""
r_cmd = self.command('rcon "{}" {}'.format(self.rcon_password, cmd))
if r_cmd[1] == 'No rconpassword set on the server.\n' or r_cmd[1] == 'Bad rconpassword.\n':
raise Exception(r_cmd[1][:-1])
return r_cmd
def parse_packet(self, data):
"""
parse the received packet
"""
if data.find(self.packet_prefix) != 0:
raise Exception('Malformed packet')
first_line_length = data.find(b'\n')
if first_line_length == -1:
raise Exception('Malformed packet')
response_type = data[len(self.packet_prefix):first_line_length].decode()
response_data = data[first_line_length + 1:].decode()
return response_type, response_data
def parse_status(self, data):
"""
parse the response message and return a list
"""
split = data[1:].split('\\')
values = dict(zip(split[::2], split[1::2]))
# if there are \n's in one of the values, it's the list of players
for var, val in values.items():
pos = val.find('\n')
if pos == -1:
continue
split = val.split('\n', 1)
values[var] = split[0]
self.parse_players(split[1])
return values
def parse_players(self, data):
"""
parse player information - name, frags and ping
"""
self.players = []
for player in data.split('\n'):
if not player:
continue
match = self.player_reo.match(player)
if not match:
print('couldnt match {}'.format(player))
continue
frags, ping, name = match.groups()
self.players.append(Player(1, name, frags, ping))
def update(self):
"""
get status
"""
data = self.command('getstatus')[1]
self.values = self.parse_status(data)
def rcon_update(self):
"""
perform RCON status update
"""
data = self.rcon('status')[1]
lines = data.split(b'\n')
players = lines[3:]
self.players = []
for ply in players:
while ply.find(' ') != -1:
ply = ply.replace(' ', ' ')
while ply.find(' ') == 0:
ply = ply[1:]
if ply == '':
continue
ply = ply.split(' ')
try:
self.players.append(Player(int(ply[0]), ply[3], int(ply[1]), int(ply[2]), ply[5]))
except (IndexError, ValueError):
continue
class UrbanTerrorSection(StaticSection):
"""UrbanTerror server host. Default to localhost."""
server_host = ValidatedAttribute('server_host', str, default='localhost')
"""UrbanTerror server port. Default to 27960."""
server_port = ValidatedAttribute('server_port', int, default=27960)
"""UrbanTerror server rcon password."""
rcon_password = ValidatedAttribute('rcon_password', str)
@commands('ut')
def ut(bot, trigger):
"""UrbanTerror server stats"""
try:
ut_cfg = bot.config.urbanterror
UT = PyQuake3(server='{}:{}'.format(ut_cfg.server_host, ut_cfg.server_port), rcon_password=ut_cfg.rcon_password)
UT.update()
bot.say('Server: {} ({}) | Map: {} | Players ({}) {}'.format(
UT.values['sv_hostname'],
UT.get_address(),
UT.values['mapname'],
len(UT.players),
[gamer.name for gamer in UT.players]))
except Exception as err:
LOGGER.debug('Internal Error. {}'.format(err))
| [
2,
19617,
28,
40477,
12,
23,
198,
37811,
46667,
40194,
4382,
7508,
37811,
198,
2,
6434,
25,
1976,
27446,
32040,
2638,
1378,
33885,
35843,
13,
785,
13,
283,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
4112,
62,... | 2.148282 | 3,055 |
import fpga_config
from nifpga import Session
import ntpath
configpath = input('Please enter the full filepath of your .fpgaconfig file: ')
vsfpga = fpga_config.VeriStandFPGA(configpath)
folder = ntpath.split(configpath)
read_count = vsfpga.read_packets + 1
write_count = vsfpga.write_packets + 1
read_packets = {}
write_packets = {}
for i in range(1, read_count):
read_packets['packet{}'.format(i)] = vsfpga._create_packet('read', i)
for i in range(1, write_count):
write_packets['packet{}'.format(i)] = vsfpga._create_packet('write', i)
print('Please input five values separated by commas for the following channels')
print('Please enter PWMs as 0-100 Duty Cycles, Digital Lines as 1\'s or 0\'s and Analog Lines as floating points')
write_values = {}
for i in range(1, write_count):
write_packet = write_packets['packet{}'.format(i)]
iteration_writes = {}
for j in range(write_packet.definition['channel_count']):
valuestr = input('{}: '.format(write_packet.definition['name{}'.format(j)]))
channel_values = valuestr.split(',')
for k, value in enumerate(channel_values):
iteration_writes['{},{}'.format(write_packet.definition['name{}'.format(j)], k)] = value
write_values['packet{}'.format(i)] = iteration_writes
device = input('Please input the name of your FPGA board as it appears in NI-MAX: ')
with Session(vsfpga.full_bitpath, device) as sesh:
read_fifo = sesh.fifos['DMA_READ']
write_fifo = sesh.fifos['DMA_WRITE']
loop_timer = sesh.registers['Loop Rate (usec)']
start = sesh.registers['Start']
rtsi = sesh.registers['Write to RTSI']
ex_timing = sesh.registers['Use External Timing']
irq = sesh.registers['Generate IRQ']
loop_timer.write(1000)
rtsi.write(False)
ex_timing.write(False)
irq.write(False)
start.write(True)
packed_reads = {}
for i in range(5):
packed_reads["iteration{}".format(i)] = read_fifo.read(number_of_elements=vsfpga.read_packets, timeout_ms=2000)
write_list = []
for j in range(1, write_count):
packet_of_interest = write_packets['packet{}'.format(j)]
p_values = []
this_iteration = write_values['packet{}'.format(j)]
for k in range(packet_of_interest.definition['channel_count']):
channel_name = packet_of_interest.definition['name{}'.format(k)]
p_values.append(this_iteration['{},{}'.format(channel_name, i)])
packed_data = packet_of_interest._pack(p_values)
write_list.append(packed_data)
write_fifo.write(data=write_list, timeout_ms=2000)
for i in range(5):
print("Iteration {} Reads:".format(i+1))
read_tup = packed_reads['iteration{}'.format(i)]
current_it = read_tup[0]
for j, u64 in enumerate(current_it):
packet_of_interest = read_packets['packet{}'.format(j+1)]
print(packet_of_interest._unpack(u64))
sesh.close()
# Assumptions:
# Bitfile in the same folder as the .fpgaconfig file
# .fpgaconfig file follows the VeriStand standard
# Bitfile is written with the VeriStand FPGA project template in LabVIEW
# Control names and FIFO names have not been edited from the template names
# The FPGA bitfile was generated using the VeriStand FPGA Suppport VIs for all IO.
# Basic IO palette
# Digital Lines not Ports
# Pulse Measurement VI
# Pulse Generation VI
# Analog IO
| [
11748,
277,
79,
4908,
62,
11250,
198,
6738,
299,
361,
79,
4908,
1330,
23575,
198,
11748,
299,
83,
6978,
198,
198,
11250,
6978,
796,
5128,
10786,
5492,
3802,
262,
1336,
2393,
6978,
286,
534,
764,
69,
6024,
7807,
5647,
2393,
25,
705,
... | 2.452045 | 1,418 |
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import platform
import subprocess
import os
from os.path import expanduser
import re
import glob
import numpy as np
from argparse import ArgumentParser, REMAINDER
from argparse import RawTextHelpFormatter
import logging
import psutil
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
r"""
This is a script for launching PyTorch training and inference on Intel Xeon CPU with optimal configurations.
Now, single instance inference/training, multi-instance inference/training and distributed training
with oneCCL backend is enabled.
To get the peak performance on Intel Xeon CPU, the script optimizes the configuration of thread and memory
management. For thread management, the script configures thread affinity and the preload of Intel OMP library.
For memory management, it configures NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc).
**How to use this module:**
*** Single instance inference/training ***
1. Run single-instance inference or training on a single node with all CPU sockets.
::
>>> python -m intel_pytorch_extension.launch script.py args
2. Run single-instance inference or training on a single CPU socket.
::
>>> python -m intel_pytorch_extension.launch --socket_id 1 script.py args
*** Multi-instance inference ***
1. Multi-instance
By default, one instance per socket. if you want to set the instance numbers and core per instance,
--nintances and --ncore_per_instance should be set.
>>> python -m intel_pytorch_extension.launch --multi_instance python_script args
eg: on CLX8280 with 14 instance, 4 cores per instance
::
>>> python -m intel_pytorch_extension.launch --multi_instance --nintances 14 --ncore_per_instance 4 python_script args
*** Distributed Training ***
spawns up multiple distributed training processes on each of the training nodes. For intel_pytorch_extension, oneCCL
is used as the communication backend and MPI used to launch multi-proc. To get the better
performance, you should specify the different cores for oneCCL communication and computation
process seperately. This tool can automatically set these ENVs(such as I_MPI_PIN_DOMIN) and launch
multi-proc for you.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
1. Single-Node multi-process distributed training
::
>>> python -m intel_pytorch_extension.launch --distributed python_script --arg1 --arg2 --arg3 and all other
arguments of your training script
2. Multi-Node multi-process distributed training: (e.g. two nodes)
rank 0: *(IP: 192.168.10.10, and has a free port: 295000)*
::
>>> python -m intel_pytorch_extension.launch --distributed --nproc_per_node=xxx
--nnodes=2 --hostfile hostfile python_sript --arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
>>> python -m intel_pytorch_extension.launch --help
*** Memory allocator ***
"--enable_tcmalloc" and "--enable_jemalloc" can be used to enable different memory allcator.
"""
def set_mpi_pin_domain(args):
'''
I_MPI_PIN_DOMAIN specify the cores used for every MPI process.
The first ccl_worker_count cores of every rank for ccl communication
and the other cores will be used to do computation.
For example: on CascadeLake 8280 CPU, 2 ranks on one node. ccl_worker_count=4
CCL_WORKER_COUNT=4
CCL_WORKER_AFFINITY="0,1,2,3,28,29,30,31"
I_MPI_PIN_DOMAIN=[0xffffff0,0xffffff0000000]
'''
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = cpuinfo.physical_core_nums()
if args.use_logical_core:
total_cores = cpuinfo.logcal_core_nums()
cores_per_rank = total_cores // ppn
pin_domain = "["
for proc in range(ppn):
domain_binary = 0
begin = proc * cores_per_rank + args.ccl_worker_count
end = proc * cores_per_rank + cores_per_rank -1
for i in range(begin, end + 1):
domain_binary |= (1 << i)
pin_domain += hex(domain_binary) + ","
return pin_domain + "]"
def set_ccl_worker_affinity(args):
'''
computation and communication use different cores when using oneCCL
backend for distributed training. we use first ccl_worker_count cores of
every rank for ccl communication
'''
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = cpuinfo.physical_core_nums()
if args.use_logical_core:
total_cores = cpuinfo.logcal_core_nums()
cores_per_rank = total_cores // ppn
affinity = ''
for proc in range(ppn):
for ccl_worker in range(args.ccl_worker_count):
affinity += str(proc * cores_per_rank + ccl_worker)+ ","
os.environ["CCL_WORKER_AFFINITY"] = affinity
def add_lib_preload(lib_type=None):
'''
Enale TCMalloc/JeMalloc/iomp
'''
library_paths = []
if "CONDA_PREFIX" in os.environ:
library_paths.append(os.environ["CONDA_PREFIX"] + "/lib/")
library_paths += ["{}/.local/lib/".format(expanduser("~")), "/usr/local/lib/",
"/usr/local/lib64/", "/usr/lib/", "/usr/lib64/"]
lib_find = False
for lib_path in library_paths:
library_file = lib_path + "lib" + lib_type + ".so"
matches = glob.glob(library_file)
if len(matches) > 0:
if "LD_PRELOAD" in os.environ:
os.environ["LD_PRELOAD"] = matches[0] + ":" + os.environ["LD_PRELOAD"]
else:
os.environ["LD_PRELOAD"] = matches[0]
lib_find = True
break
return lib_find
def launch(args):
'''
single-instance / multi-instance launcher
'''
processes = []
cores = []
cpuinfo = CPUinfo()
if args.core_list:#user specify what cores will be used by params
cores = args.core_list.strip().split(",")
if args.ncore_per_instance == -1:
logger.error("please specify the '--ncore_per_instance' if you have pass the --core_list params")
exit(-1)
elif args.ninstances > 1 and args.ncore_per_instance * args.ninstances < len(cores):
logger.warning("only first {} cores will be used, but you specify {} cores in core_list".format
(args.ncore_per_instance * args.ninstances, len(cores)))
else:
args.ninstances = len(cores) // args.ncore_per_instance
else:
if args.use_logical_core:
if args.socket_id != -1:
cores = cpuinfo.get_socket_logical_cores(args.socket_id)
else:
cores = cpuinfo.get_all_logical_cores()
else:
if args.socket_id != -1:
cores = cpuinfo.get_socket_physical_cores(args.socket_id)
else:
cores = cpuinfo.get_all_physical_cores()
if not args.multi_instance and args.ninstances == -1 and args.ncore_per_instance == -1:
args.ninstances = 1;
args.ncore_per_instance = len(cores)
elif args.multi_instance and args.ninstances == -1 and args.ncore_per_instance == -1:
args.throughput_performance = True
elif args.ncore_per_instance == -1 and args.ninstances != -1:
args.ncore_per_instance = len(cores) // args.ninstances
elif args.ncore_per_instance != -1 and args.ninstances == -1:
args.ninstances = len(cores) // args.ncore_per_instance
else:
if args.ninstances * args.ncore_per_instance > len(cores):
logger.error("Please make sure ninstances * ncore_per_instance <= total_cores")
exit(-1)
if args.latency_performance:
if args.ncore_per_instance !=4:
logger.warning("latency_performance is a specail mode, args.ncore_per_instance can only be set to be 4")
args.ncore_per_instance = 4
cores = cpuinfo.get_all_physical_cores()
args.ninstances = len(cores) // args.ncore_per_instance
if args.throughput_performance:
args.ninstances = cpuinfo.socket_nums()
cores = cpuinfo.get_all_physical_cores()
args.ncore_per_instance = len(cores) // args.ninstances
os.environ["LAUNCH_CMD"] = "#"
set_multi_thread_and_allcator(args)
for i in range(args.ninstances):
cmd = []
cur_process_cores = ""
if not args.disable_numactl:
cmd = ["numactl"]
for core in cores[i * args.ncore_per_instance:(i + 1) * args.ncore_per_instance]:
cur_process_cores = cur_process_cores + str(core) + ","
numa_params = "-C {} ".format(cur_process_cores[:-1])
cmd.extend(numa_params.split())
with_python = not args.no_python
if with_python:
cmd.append(sys.executable)
if args.module:
cmd.append("-m")
cmd.append(args.program)
cmd.extend(args.program_args)
os.environ["LAUNCH_CMD"] += " ".join(cmd) + ",#"
process = subprocess.Popen(cmd, env=os.environ)
processes.append(process)
os.environ["LAUNCH_CMD"] = os.environ["LAUNCH_CMD"][:-2]
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=cmd)
def mpi_dist_launch(args):
'''
Set ENVs and launch MPI process for distributed training.
'''
if args.nnodes > 1 and not os.path.exists(args.hostfile):
raise ValueError("hostfile is necessary when you use multi-node distributed training,"
"Please create hostfile which include the ip list you used for distributed running")
elif args.nnodes > 1:
ipv4_addr_pattern = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
ip_list = []
with open(args.hostfile) as f:
for line in f:
line = line.strip().strip("\n")
is_valid = re.match(ipv4_addr_pattern, line)
if not is_valid:
logger.error("{} is not valid IPV4 address".format(line))
exit(-1)
else:
ip_list.append(line)
if len(ip_list) < args.nnodes:
logger.error("The number of IP {} should greater than nnodes parameters {}".format(len(ip_list), args.nnodes))
exit(-1)
master_check = False
dic = psutil.net_if_addrs()
for adapter in dic:
snicList = dic[adapter]
for snic in snicList:
if snic.address == ip_list[0]:
master_check = True
if not master_check:
logger.error("MASTER_ADDR is not right. Please make sure the first ip {} in your hostfile is the current node".format(ip_list[0]))
exit(-1)
logger.info("Begin to validate the ip connect")
args.master_addr = ip_list[0]
for ip in ip_list[1:]:
completed_process = subprocess.run("ssh -o PasswordAuthentication=no {} ':'".format(ip), shell=True)
if completed_process.returncode != 0:
logger.error("Passwordless SSH login to {} failed, please make sure you have setup SSH public key right")
exit(-1)
else:
logger.info("connection from master node {} to slave node {} is OK".format(args.master_addr, ip))
set_memory_allocator(args)
# set distributed related environmental variables
os.environ["MASTER_ADDR"] = args.master_addr
os.environ["MASTER_PORT"] = str(args.master_port)
if "I_MPI_PIN_DOMAIN" not in os.environ:
mpi_pin_domain = set_mpi_pin_domain(args)
else:
mpi_pin_domain = os.environ["I_MPI_PIN_DOMAIN"]
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = len(cpuinfo.get_all_physical_cores())
cores_per_rank = total_cores // ppn
if "OMP_NUM_THREADS" not in os.environ:
opm_num_threads = cores_per_rank - args.ccl_worker_count
else:
opm_num_threads = os.environ["OMP_NUM_THREADS"]
os.environ["CCL_WORKER_COUNT"] = str(args.ccl_worker_count)
if "CCL_WORKER_AFFINITY" not in os.environ:
set_ccl_worker_affinity(args)
if "CCL_ATL_TRANSPORT" not in os.environ:
os.environ["CCL_ATL_TRANSPORT"] = "ofi"
if args.enable_iomp:
find_iomp = add_lib_preload(lib_type="iomp")
if not find_iomp:
logger.warning("Unable to find the {} library file lib{}.so in $CONDA_PREFIX/lib or /.local/lib/"
" or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or "
"~/.local/lib/ so the LD_PRELOAD environment variable will not be set."
.format("iomp", "iomp", expanduser("~")))
else:
logger.info("Enale iomp by set LD_PRELOAD")
logger.info("MASTER_ADDR={}".format(args.master_addr))
logger.info("MASTER_PORT={}".format(args.master_port))
logger.info("I_MPI_PIN_DOMAIN={}".format(mpi_pin_domain))
logger.info("OMP_NUM_THREADS={} ".format(opm_num_threads))
logger.info("CCL_WORKER_COUNT={}".format(args.ccl_worker_count))
logger.info("CCL_WORKER_AFFINITY={}".format(os.environ["CCL_WORKER_AFFINITY"]))
os.environ["LAUNCH_CMD"] = "#"
cmd = ['mpiexec.hydra']
mpi_config = "-l -np {} -ppn {} -genv I_MPI_PIN_DOMAIN={} -genv OMP_NUM_THREADS={} ".format(args.nnodes*args.nproc_per_node,
args.nproc_per_node, mpi_pin_domain, opm_num_threads)
mpi_config += args.more_mpi_parms
if args.nnodes > 1:
mpi_config += " -hostfile {}".format(args.hostfile)
cmd.extend(mpi_config.split())
with_python = not args.no_python
if with_python:
cmd.append(sys.executable)
cmd.append("-u")
if args.module:
cmd.append("-m")
cmd.append(args.program)
cmd.extend(args.program_args)
process = subprocess.Popen(cmd, env=os.environ)
process.wait()
os.environ["LAUNCH_CMD"] += " ".join(cmd) + ",#"
os.environ["LAUNCH_CMD"] = os.environ["LAUNCH_CMD"][:-2]
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="This is a script for launching PyTorch training and inference on Intel Xeon CPU "
"with optimal configurations. Now, single instance inference/training, multi-instance "
"inference/training and distributed training with oneCCL backend is enabled. "
"To get the peak performance on Intel Xeon CPU, the script optimizes the configuration "
"of thread and memory management. For thread management, the script configures thread "
"affinity and the preload of Intel OMP library. For memory management, it configures "
"NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc) "
"\n################################# Basic usage ############################# \n"
"\n 1. single instance\n"
"\n >>> python -m intel_pytorch_extension.launch python_script args \n"
"\n2. multi-instance \n"
"\n >>> python -m intel_pytorch_extension.launch --multi_instance python_script args\n"
"\n3. Single-Node multi-process distributed training\n"
"\n >>> python -m intel_pytorch_extension.launch --distributed python_script args\n"
"\n4. Multi-Node multi-process distributed training: (e.g. two nodes)\n"
"\n rank 0: *(IP: 192.168.10.10, and has a free port: 295000)*\n"
"\n >>> python -m intel_pytorch_extension.launch --distributed --nproc_per_node=2\n"
"\n --nnodes=2 --hostfile hostfile python_script args\n",
formatter_class=RawTextHelpFormatter)
parser.add_argument("--multi_instance", action='store_true', default=False,
help="Enable multi-instance, by default one instance per socket")
parser.add_argument('--distributed', action='store_true', default=False,
help='Enable distributed training.')
parser.add_argument("-m", "--module", default=False, action="store_true",
help="Changes each process to interpret the launch script "
"as a python module, executing with the same behavior as"
"'python -m'.")
parser.add_argument("--no_python", default=False, action="store_true",
help="Do not prepend the --program script with \"python\" - just exec "
"it directly. Useful when the script is not a Python script.")
add_memory_allocator_params(parser)
add_kmp_iomp_params(parser)
add_distributed_training_params(parser)
add_multi_instance_params(parser)
# positional
parser.add_argument("program", type=str,
help="The full path to the proram/script to be launched. "
"followed by all the arguments for the script")
# rest from the training program
parser.add_argument('program_args', nargs=REMAINDER)
return parser.parse_args()
if __name__ == "__main__":
main()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
11748,
25064,
198,
11748,
3859,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
4292,
7220,... | 2.277315 | 8,045 |
from django.contrib import admin
from .models import Complaint, Notification
admin.site.register(Notification, NotifAdmin)
admin.site.register(Complaint, ComplaintAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
20011,
2913,
11,
42808,
628,
628,
198,
28482,
13,
15654,
13,
30238,
7,
3673,
2649,
11,
1892,
361,
46787,
8,
198,
28482,
13,
15654,
13,
30238,
7,
38143,
... | 3.571429 | 49 |
from . import unittest
import pytest
from shapely.geometry import Point, LineString, Polygon, MultiPoint, \
GeometryCollection
from shapely.wkt import loads
from shapely.geos import TopologicalError
| [
6738,
764,
1330,
555,
715,
395,
198,
11748,
12972,
9288,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
6252,
11,
6910,
10100,
11,
12280,
14520,
11,
15237,
12727,
11,
3467,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.746988 | 83 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import setuptools # type: ignore
import streamer_binaries
separator_index = sys.argv.index('--')
platform_binaries = sys.argv[separator_index + 1:]
sys.argv = sys.argv[:separator_index]
setuptools.setup(
name='shaka-streamer-binaries',
version=streamer_binaries.__version__,
author='Google',
description='A package containing FFmpeg, FFprobe, and Shaka Packager static builds.',
long_description=('An auxiliary package that provides platform-specific'
' binaries used by Shaka Streamer.'),
url='https://github.com/google/shaka-streamer/tree/master/binaries',
packages=[streamer_binaries.__name__,],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
],
package_data={
# Only add the corresponding platform specific binaries to the wheel.
streamer_binaries.__name__: platform_binaries,
}
) | [
2,
15069,
33448,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.223969 | 509 |
import argparse
import logging
import os
import sys
import time
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import fsspec
import nvtabular as nvt
from nvtabular.io.shuffle import Shuffle
from nvtabular.ops import Categorify
from nvtabular.ops import Clip
from nvtabular.ops import FillMissing
from nvtabular.ops import Normalize
from nvtabular.utils import device_mem_size
import numpy as np
from typing import Dict, List, Union
def create_csv_dataset(
data_paths,
sep,
recursive,
col_dtypes,
frac_size,
client
):
"""Create nvt.Dataset definition for CSV files."""
fs_spec = fsspec.filesystem('file')
rec_symbol = '**' if recursive else '*'
valid_paths = []
for path in data_paths:
try:
if fs_spec.isfile(path):
valid_paths.append(path)
else:
path = os.path.join(path, rec_symbol)
for i in fs_spec.glob(path):
if fs_spec.isfile(i):
valid_paths.append(f'{i}')
except FileNotFoundError as fnf_expt:
print(fnf_expt)
print('Incorrect path: {path}.')
except OSError as os_err:
print(os_err)
print('Verify access to the bucket.')
return nvt.Dataset(
path_or_source=valid_paths,
engine='csv',
names=list(col_dtypes.keys()),
sep=sep,
dtypes=col_dtypes,
part_mem_fraction=frac_size,
client=client,
assume_missing=True
)
def convert_csv_to_parquet(
output_path,
dataset,
output_files,
shuffle=None
):
"""Convert CSV file to parquet and write to GCS."""
if shuffle == 'None':
shuffle = None
else:
try:
shuffle = getattr(Shuffle, shuffle)
except:
print('Shuffle method not available. Using default.')
shuffle = None
dataset.to_parquet(
output_path,
shuffle=shuffle,
output_files=output_files
)
def create_criteo_nvt_workflow(client):
"""Create a nvt.Workflow definition with transformation all the steps."""
# Columns definition
cont_names = ['I' + str(x) for x in range(1, 14)]
cat_names = ['C' + str(x) for x in range(1, 27)]
# Transformation pipeline
num_buckets = 10000000
categorify_op = Categorify(max_size=num_buckets)
cat_features = cat_names >> categorify_op
cont_features = cont_names >> FillMissing() >> Clip(
min_value=0) >> Normalize()
features = cat_features + cont_features + ['label']
# Create and save workflow
return nvt.Workflow(features, client)
def create_cluster(
n_workers,
device_limit_frac,
device_pool_frac,
memory_limit
):
"""Create a Dask cluster to apply the transformations steps to the Dataset."""
device_size = device_mem_size()
device_limit = int(device_limit_frac * device_size)
device_pool_size = int(device_pool_frac * device_size)
rmm_pool_size = (device_pool_size // 256) * 256
cluster = LocalCUDACluster(
n_workers=n_workers,
device_memory_limit=device_limit,
rmm_pool_size=rmm_pool_size,
memory_limit=memory_limit
)
return Client(cluster)
def create_parquet_dataset(
client,
data_path,
frac_size
):
"""Create a nvt.Dataset definition for the parquet files."""
fs = fsspec.filesystem('file')
file_list = fs.glob(
os.path.join(data_path, '*.parquet')
)
if not file_list:
raise FileNotFoundError('Parquet file(s) not found')
file_list = [os.path.join(i) for i in file_list]
return nvt.Dataset(
file_list,
engine='parquet',
part_mem_fraction=frac_size,
client=client
)
def save_dataset(
dataset,
output_path,
output_files,
shuffle=None
):
"""Save dataset to parquet files to path."""
if shuffle == 'None':
shuffle = None
else:
try:
shuffle = getattr(Shuffle, shuffle)
except:
print('Shuffle method not available. Using default.')
shuffle = None
dataset.to_parquet(
output_path=output_path,
shuffle=shuffle,
output_files=output_files,
write_hugectr_keyset=True
)
def get_criteo_col_dtypes() -> Dict[str, Union[str, np.int32]]:
"""Returns a dict mapping column names to numpy dtype."""
# Specify column dtypes. Note that "hex" means that
# the values will be hexadecimal strings that should
# be converted to int32
col_dtypes = {}
col_dtypes["label"] = np.int32
for x in ["I" + str(i) for i in range(1, 14)]:
col_dtypes[x] = np.int32
for x in ["C" + str(i) for i in range(1, 27)]:
col_dtypes[x] = "hex"
return col_dtypes
# --------------------------------------------
# ---------- Convert CSV to Parquet ----------
# --------------------------------------------
# --------------------------------------------
# -------------- Analyse Dataset -------------
# --------------------------------------------
# --------------------------------------------
# -------- Transform Parquet Dataset ---------
# --------------------------------------------
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--task',
type=str,
required=False)
parser.add_argument('--csv_data_path',
required=False,
nargs='+')
parser.add_argument('--parquet_data_path',
type=str,
required=False)
parser.add_argument('--output_path',
type=str,
required=False)
parser.add_argument('--output_files',
type=int,
required=False)
parser.add_argument('--workflow_path',
type=str,
required=False)
parser.add_argument('--n_workers',
type=int,
required=False)
parser.add_argument('--sep',
type=str,
required=False)
parser.add_argument('--frac_size',
type=float,
required=False,
default=0.10)
parser.add_argument('--memory_limit',
type=int,
required=False,
default=100_000_000_000)
parser.add_argument('--device_limit_frac',
type=float,
required=False,
default=0.60)
parser.add_argument('--device_pool_frac',
type=float,
required=False,
default=0.90)
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.INFO,
datefmt='%d-%m-%y %H:%M:%S',
stream=sys.stdout)
parsed_args = parse_args()
start_time = time.time()
logging.info('Timing task')
if parsed_args.task == 'convert':
main_convert(parsed_args)
elif parsed_args.task == 'analyse':
main_analyse(parsed_args)
elif parsed_args.task == 'transform':
main_transform(parsed_args)
end_time = time.time()
elapsed_time = end_time - start_time
logging.info('Task completed. Elapsed time: %s', elapsed_time)
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
198,
6738,
288,
2093,
13,
17080,
6169,
1330,
20985,
198,
6738,
288,
2093,
62,
66,
15339,
1330,
10714,
34,
8322,
2246,
48375,
198,
11748,
... | 2.317742 | 3,100 |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 10:23:29 2021
@author: caldwell ©
"""
import pygame as pg
from random import choice
# from pprint import pprint
## --- Paramètres de jeu ------------------------------
# si on joue contre l'IA (True) ou JvJ (False)
MODE_IA = True
# la taille de la grille de jeu (largeur, hauteur)
TAILLE_GRILLE = (3, 2)
# la taille de l'écran au lancement
TAILLE_ECRAN = (1080, 720)
# la profondeur de recursivité pour l'IA
MAX_PROFONDEUR = 5
## ----------------------------------------------------
## les couleures
# https://coolors.co/00d9ff-ff1f1f-67697c-253d5b-dbd053 (liens pas à jours)
COUL_FOND = (37, 61, 91)
COUL_CERCLE_JONCTION = (219, 208, 83)
COUL_SEGMENTS = (103, 105, 124)
COUL_SEGMENT_HOVER = (128, 131, 158)
COUL_BLEU = (0, 217, 255)
COUL_ROUGE = (255, 56, 56)
class Grille:
"""
Represente la grille du jeu.
----
taille (tuple): taille de la grille (largeur, hauteur)
© Raphaël
"""
def setup(self):
"""
Initialise la grille.
"""
self.joueur_actuel = 0 # 0 bleu, 1 rouge
self.nb_tour = 0 # nb de tours depuis le début de la partie
## Création des segments
# par lignes, il y aura toujours un segment de plus que la largeur,
# d'ou le +1 (et bien sur pareil pour les colones et la hauteur)
# pour accelerer l'acces aux segments:
# pour chaques indices de segments on peut obtenir sa localisation
# dans self.segments: (ligne-colone, rang, pos dans le rang)
self.table_acces = {}
ind_segment = 0
# 1ère étape, ajout des lignes (gauche-droite)
lignes = []
for i in range(self.hauteur):
rang_indiv = []
for j in range(self.largeur + 1):
rang_indiv.append(None)
self.table_acces[ind_segment] = (0, i, j)
ind_segment += 1
lignes.append(rang_indiv)
# 2nde étape, ajout des colones (haut-bas)
colones = []
for i in range(self.largeur):
rang_indiv = []
for j in range(self.hauteur + 1):
rang_indiv.append(None)
self.table_acces[ind_segment] = (1, i, j)
ind_segment += 1
colones.append(rang_indiv)
# 3ème étape, on rassemble tout ensemble
self.segments = [lignes, colones]
## Création des carrés
# un carré n'est qu'autre que 4 segments mis en relations
# on determine a l'avance le dernier segment vertical
dernier_vertical = self.largeur + (self.largeur + 1) * (self.hauteur - 1)
self.carres = []
for j in range(self.hauteur):
for i in range(self.largeur):
carre = []
# ajout des deux segments verticaux
# voir en bas pour comprendre le fonctionement (sauf que
# cette fois ci c'est aux changements de lignes qu'il y
# a le décalage d'indices)
carre.append(j * (self.largeur + 1) + i)
carre.append(j * (self.largeur + 1) + i + 1)
# pour les deux autres horizontaux
# on a tout d'abord besoin de connaitre l'indice du dernier
# segment horizontal. puis de la c'est la même chose qu'au
# dessus: quand on change de colone il faut "ajouter"
# le nombre d'indice par colones (= hauteur + 1)
carre.append(dernier_vertical + j + i * (self.hauteur + 1) + 1)
carre.append(dernier_vertical + j + i * (self.hauteur + 1) + 2)
self.carres.append(carre)
# pour savoir qui a gagné chaques carres
self.carres_gagnes = [None] * len(self.carres)
def reset(self):
"""
Reinitialise la grille.
"""
self.setup()
def copie(self):
"""
Produit une copie de la grille et la renvoie.
"""
grille = Grille((self.largeur, self.hauteur), copie=self)
return grille
def changer_joueur(self):
"""
Change de joueur.
"""
if self.joueur_actuel == 0:
self.joueur_actuel = 1
self.nb_tour += 1
else:
self.joueur_actuel = 0
def get_segment(self, indice):
"""
Renvoie le contenu du segment à l'indice indiqué.
----
indice (int): L'indice du segment
"""
# si il y a une erreur, l'indice est invalide
orientation, rang, pos_rang = self.table_acces[indice]
return self.segments[orientation][rang][pos_rang]
def set_segment(self, indice, couleur):
"""
Change la couleur d'un segment a l'indice indiqué.
----
...
"""
# si il y a une erreur, l'indice est invalide
orientation, rang, pos_rang = self.table_acces[indice]
self.segments[orientation][rang][pos_rang] = couleur
def get_carres(self, ind_segment):
"""
Renvoie les indices de tout les carrés qui possedent ce segment.
"""
carres = []
for ind, carre in enumerate(self.carres):
if ind_segment in carre:
carres.append(ind)
if len(carres) == 0:
raise IndexError(f"Indice invalide ({ind_segment})")
return carres
def carre_rempli(self, ind_carre):
"""
Verifie si un carre est rempli (les 4 segments sont coloriés,
differents de None).
"""
carre = self.carres[ind_carre]
for seg in carre:
if self.get_segment(seg) is None:
# le carre n'est pas rempli
return False
return True
def score_detaille(self):
"""
Calcule le score de chaques joueurs.
"""
bleu = 0
rouge = 0
for carre in self.carres_gagnes:
if carre == 0:
bleu += 1
elif carre == 1:
rouge += 1
return bleu, rouge
def calculer_score(self):
"""
Calcule le score de la partie.
Un score < 0 signifie que les bleus ont l'avantage, et le cas contraire les rouges.
"""
bleu, rouge = self.score_detaille()
return rouge - bleu
def partie_finie(self):
"""
Indique si la partie est finie (plus aucunes cases libres).
"""
for ind, chemin in self.table_acces.items():
orientation, rang, pos_rang = chemin
seg = self.segments[orientation][rang][pos_rang]
if seg is None:
return False
return True # on a trouvé aucun "None"
def coups_possibles_ia(self):
"""
Retourne les indices de tout les segments libres.
"""
segments_libres = []
for ind, chemin in self.table_acces.items():
orientation, rang, pos_rang = chemin
seg = self.segments[orientation][rang][pos_rang]
if seg is None:
segments_libres.append(ind)
return segments_libres
def calcul_coup_ia(self, profondeur=0):
"""
Calcule le meilleur coup a jouer pour l'IA.
"""
# condition de sortie
if self.partie_finie() or profondeur == MAX_PROFONDEUR:
return self.calculer_score(), None
## le minimax
choix_coups = []
for segment in self.coups_possibles_ia():
# on copie la grille actuelle
copie_grille = self.copie()
# on joue un coup et on analyse le meilleur score qu'il peut nous
# offrir en fonction des suivants
copie_grille.jouer_coup(segment)
score, _ = copie_grille.calcul_coup_ia(profondeur + 1)
choix_coups.append((score, segment))
# on choisi le meilleur coup en fonction du joueur actuel:
# le joueur bleu veut le score le plus bas possible
# et le rouge veut l'inverse
if self.joueur_actuel == 0: # bleu
meilleur_choix = min(choix_coups)
else: # rouge
meilleur_choix = max(choix_coups)
return meilleur_choix
def jouer_coup_ia(self):
"""
Joue le meilleur coup possible pour que les humains se preparent
psycologiquement à se faire renverser et dominer par les formes de
vie superieures que sont les IA.
"""
# si c'est le premier tour et que la grille est assez grande on peut
# se permettre de jouer un coup aléatoire
if self.nb_tour == 1 and (self.largeur, self.hauteur) >= (2, 2):
choix = choice(self.coups_possibles_ia())
self.jouer_coup(choix)
return
# la boucle sert a s'assurer que le bot joue une 2nde fois si il viens
# de remporter un carré
while self.joueur_actuel == 1 and not self.partie_finie(): # hard codé pour le rouge...
_, meilleur_coup = self.calcul_coup_ia()
if meilleur_coup is not None: # surviens en fin de partie
self.jouer_coup(meilleur_coup)
def coup_valide(self, ind_segment):
"""
Vérifie si un coup est valide (personne n'y a joué avant).
"""
return self.get_segment(ind_segment) is None
def jouer_coup(self, ind_segment):
"""
Joue un coup pendant une partie.
Le coups doit obligatoirement être valide (testé avec self.coup_valide).
"""
# on change le segment
self.set_segment(ind_segment, self.joueur_actuel)
## verification si un carré a été gagné
changement = True
carres = self.get_carres(ind_segment)
for carre in carres:
# on verifie si le coup du joueur rempli un carre, et dans ce cas
# il le remporte et peut jouer à nouveau
if self.carre_rempli(carre):
self.carres_gagnes[carre] = self.joueur_actuel
changement = False
# et finalement on peut changer de joueur
if changement:
self.changer_joueur()
class JeuPipopipette:
"""
L'interface graphique du jeu.
"""
def maj_tailles(self, taille_ecran):
"""
Crée ou met à jour les variables des tailles.
"""
# taille ecran
self.largeur, self.hauteur = taille_ecran
# le coté de l'ecran le plus petit (comme il est en numerateur)
cote_min = min(self.largeur, self.hauteur)
# le coté de la grille le plus grand (comme il est en denominateur)
cote_max = max(self.grille.largeur, self.grille.hauteur)
# les segments
self.long_segment = cote_min / (1.2 * cote_max)
# self.long_segment = self.hauteur / 6
self.larg_segment = round(self.long_segment / 8)
# les cercles
self.rayon_cercle = round(self.long_segment / 3.5)
self.rayon_cercle_jonction = round(self.long_segment / 5.5)
# calcul de la position du coin en haut a gauche de la grille, pour la centrer.
# pour dessiner la grille, tout part de la
x = self.largeur / 2 - self.grille.largeur * self.long_segment / 2
y = self.hauteur / 2 - self.grille.hauteur * self.long_segment / 2
self.depart_grille = [x, y]
# texte
self.police = pg.font.SysFont("Impact", round(cote_min / 12))
def get_couleur(self, objet, ind=None):
"""
Renvoie la couleur a utiliser pour un objet (segment/carré) donné.
"""
if objet == 0: # bleu
couleur = COUL_BLEU
elif objet == 1:
couleur = COUL_ROUGE
else:
if self.segment_hover == ind:
couleur = COUL_SEGMENT_HOVER
else:
couleur = COUL_SEGMENTS
return couleur
def dessiner_grille(self):
"""
Affiche la grille du jeu.
"""
self.rects_segment.clear() # enlever ?
lignes, colones = self.grille.segments
## Tracage des segments -------------------------------------------
depart_l = self.depart_grille.copy()
depart_c = self.depart_grille.copy()
indice_segment = 0 # pour la surbrillance
# on trace les traits verticaux
for rangee in lignes:
for ind_seg, segment in enumerate(rangee):
couleur = self.get_couleur(segment, indice_segment)
indice_segment += 1
rect = pg.draw.line(
self.surf, couleur,
(depart_l[0] + (self.long_segment * ind_seg), depart_l[1]),
(depart_l[0] + (self.long_segment * ind_seg), depart_l[1] + self.long_segment),
self.larg_segment
)
# si c'est vide, pas besoin d'ajouter les rects
# if not self.rects_segment:
self.rects_segment.append(rect)
depart_l[1] += self.long_segment
# on trace les traits horizontaux
for rangee in colones:
for ind_seg, segment in enumerate(rangee):
couleur = self.get_couleur(segment, indice_segment)
indice_segment += 1
rect = pg.draw.line(
self.surf, couleur,
(depart_c[0], depart_c[1] + (self.long_segment * ind_seg)),
(depart_c[0] + self.long_segment, depart_c[1] + (self.long_segment * ind_seg)),
self.larg_segment
)
# if not self.rects_segment:
self.rects_segment.append(rect)
depart_c[0] += self.long_segment
## Remplissage des carres gagnés ----------------------------------
ind = 0
for j in range(self.grille.hauteur):
for i in range(self.grille.largeur):
carre = self.grille.carres_gagnes[ind]
if carre is not None:
couleur = self.get_couleur(carre)
topleft = (
self.depart_grille[0] + self.long_segment * (i + 0.5),
self.depart_grille[1] + self.long_segment * (j + 0.5)
)
pg.draw.circle(
self.surf, couleur,
(round(topleft[0]), round(topleft[1])),
self.rayon_cercle
)
ind += 1
## Tracage des cercles aux jonctions de segments ------------------
origine = self.depart_grille.copy()
for i in range(self.grille.largeur + 1):
origine[1] = self.depart_grille[1] # reset
for j in range(self.grille.hauteur + 1):
pg.draw.circle(
self.surf, COUL_CERCLE_JONCTION,
(round(origine[0]), round(origine[1])),
self.rayon_cercle_jonction
)
origine[1] += self.long_segment
origine[0] += self.long_segment
def dessiner_hud(self):
"""
Affiche les scores et le joueur actuel.
"""
bleu, rouge = self.grille.score_detaille()
# pour connaitre à qui le tour
if self.grille.joueur_actuel: # rouge
txt_rouge = f"[{rouge}]"
txt_bleu = f" {bleu} "
else: # bleu
txt_rouge = f" {rouge} "
txt_bleu = f"[{bleu}]"
# creation des textes de score
score_b = self.police.render(txt_bleu, True, COUL_BLEU)
score_r = self.police.render(txt_rouge, True, COUL_ROUGE)
# le "panneau" pour tout mettre ensemble
largeur = score_b.get_width() + score_r.get_width()
panneau = pg.Surface((largeur, score_b.get_height()))
panneau.fill(COUL_FOND)
panneau.blit(score_b, (0, 0))
panneau.blit(score_r, (largeur - score_r.get_width(), 0))
# on le centre
rect = panneau.get_rect(midtop=(self.largeur / 2, 0))
self.surf.blit(panneau, rect)
def deter_segment(self, pos):
"""
Determine si il y a un segment a la position donnée.
Renvoie None si il y a aucun segment.
----
pos (tuple): la position a verifier
"""
i = 0
for rect in self.rects_segment:
if rect.collidepoint(pos):
return i
i += 1
return None
def boucle_jeu(self):
"""
La boucle du jeu.
"""
clock = pg.time.Clock()
run = True
while run:
## gestion des evenements
for event in pg.event.get():
if event.type == pg.QUIT:
run = False
# on change la taille de l'écran
elif event.type == pg.VIDEORESIZE:
self.maj_tailles(event.dict["size"])
elif event.type == 32778: # la meme chose mais bug
self.maj_tailles((event.x, event.y))
# on enfonce une touche
elif event.type == pg.KEYDOWN:
if event.key == pg.K_r:
self.grille.reset()
# clic
elif event.type == pg.MOUSEBUTTONDOWN:
if pg.mouse.get_pressed() == (1,0,0):
pos = pg.mouse.get_pos()
ind_seg = self.deter_segment(pos)
# si on a cliqué sur un segment valide et pas dans le vide
# et que le coup sois valide
if ind_seg is not None and self.grille.coup_valide(ind_seg):
self.grille.jouer_coup(ind_seg)
if MODE_IA:
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_WAIT)
self.grille.jouer_coup_ia()
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_ARROW)
# pour mettre les segments en surbrillance
pos = pg.mouse.get_pos()
self.segment_hover = self.deter_segment(pos)
## Maj de l'ecran...
self.surf.fill(COUL_FOND)
self.dessiner_grille()
self.dessiner_hud()
pg.display.flip()
pg.display.set_caption(f"PIPOPIPETTE: IA vs HUMAIN - {round(clock.get_fps())} FPS")
clock.tick(60)
pg.quit()
def main():
"""
La foncion qui lance tout !
"""
jeu = JeuPipopipette()
jeu.boucle_jeu()
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
1526,
678,
838,
25,
1954,
25,
1959,
33448,
198,
198,
31,
9800,
25,
269,
1940,
4053,
10673,
198,
37811,
198,
198,
11748,
12972,
6057,
355,
... | 1.909774 | 9,720 |
from lifxlan import *
from random import randint, betavariate
from time import sleep
if __name__=="__main__":
main()
| [
6738,
3868,
87,
9620,
1330,
1635,
198,
6738,
4738,
1330,
43720,
600,
11,
731,
615,
2743,
378,
198,
6738,
640,
1330,
3993,
198,
198,
361,
11593,
3672,
834,
855,
1,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.97561 | 41 |
#!/usr/bin/python3
import sys;
import cgicommon;
import urllib.request, urllib.parse, urllib.error;
import cgi;
import cgitb;
import os;
import re;
import random;
CONFLICT_STRATEGY_FORCE = 0
CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY = 1
CONFLICT_STRATEGY_ONLY_FILL_BLANKS = 2
CONFLICT_STRATEGY_DISCARD = 3
cgitb.enable();
cgicommon.writeln("Content-Type: text/html; charset=utf-8");
cgicommon.writeln("");
baseurl = "/cgi-bin/gameslist.py";
form = cgi.FieldStorage();
tourney_name = form.getfirst("tourney");
tourney = None;
request_method = os.environ.get("REQUEST_METHOD", "");
cgicommon.set_module_path();
import countdowntourney;
cgicommon.print_html_head("Games: " + str(tourney_name));
cgicommon.writeln("<body>");
cgicommon.assert_client_from_localhost()
if tourney_name is None:
cgicommon.writeln("<h1>No tourney specified</h1>");
cgicommon.writeln("<p><a href=\"/cgi-bin/home.py\">Home</a></p>");
cgicommon.writeln("</body></html>");
sys.exit(0);
try:
tourney = countdowntourney.tourney_open(tourney_name, cgicommon.dbdir);
cgicommon.show_sidebar(tourney);
cgicommon.writeln("<div class=\"mainpane\">");
# If a round is selected, show the scores for that round, in editable
# boxes so they can be changed.
round_no = None;
if "round" in form:
try:
round_no = int(form.getfirst("round"));
except ValueError:
cgicommon.writeln("<h1>Invalid round number</h1>");
cgicommon.writeln("<p>\"%s\" is not a valid round number.</p>");
if round_no is not None:
games = tourney.get_games(round_no=round_no);
rounds = tourney.get_rounds();
round_name = None;
last_modified_element = None;
for r in rounds:
if r["num"] == round_no:
round_name = r.get("name", None);
break;
if not round_name:
round_name = "Round " + str(round_no);
remarks = dict();
cgicommon.writeln("<h1>Score editor: %s</h1>" % cgicommon.escape(round_name));
cgicommon.writeln("<p>");
cgicommon.writeln("<a href=\"/cgi-bin/fixtureedit.py?tourney=%s&round=%d\">Edit fixtures</a>" % (urllib.parse.quote_plus(tourney_name), round_no));
cgicommon.writeln("</p>");
cgicommon.writeln("<script>")
cgicommon.writeln("""function set_unsaved_data_warning() {
if (window.onbeforeunload == null) {
window.onbeforeunload = function() {
return 'You have modified scores on this page and not saved them.';
};
}
}
function unset_unsaved_data_warning() {
window.onbeforeunload = null;
}
function score_modified(control_name) {
document.getElementById('lastmodified').value = control_name;
document.getElementById(control_name).style.backgroundColor = '#ffffcc';
set_unsaved_data_warning();
}
""");
cgicommon.writeln("</script>")
conflict_resolution = False
conflict_strategy = int_or_none(form.getfirst("conflictstrategy"))
stored_revision_no = tourney.get_game_table_revision_no(round_no)
stored_revision_timestamp = tourney.get_game_table_revision_time(round_no, stored_revision_no)
if "save" in form or "randomresults" in form:
# If the user clicked Save, then save the new scores to the
# database.
last_modified_element = form.getfirst("lastmodified");
if last_modified_element:
if not re.match("^gamescore_[0-9]+_[0-9]+$", last_modified_element):
last_modified_element = None;
submitted_revision_no = int_or_none(form.getfirst("revision"))
if "randomresults" not in form and submitted_revision_no < stored_revision_no:
# One or more games in this round have changed since the user
# last refreshed the page. Ask the user how we should cope with
# this.
cgicommon.show_warning_box("<p>The results for this round have been modified in another window since you last refreshed this page.</p>" +
"<p>The current state of the games is shown below, with your changes on the right-hand side.</p>" +
"<p>What do you want to do with your changes? Select one of the options below, then Resolve Conflicts.</p>");
show_conflict_resolution_box(tourney, games, round_no, stored_revision_no, stored_revision_timestamp, form)
conflict_resolution = True
else:
for g in games:
if "randomresults" in form and not g.is_complete():
set_random_score(g, 15 if int_or_none(form.getfirst("scrabbleresults")) else 9, int_or_none(form.getfirst("scrabbleresults")));
else:
score = form.getfirst("gamescore_%d_%d" % (g.round_no, g.seq));
parsed_score = parse_score(score)
if parsed_score is None:
remarks[(g.round_no, g.seq)] = "Invalid score: %s" % (score)
else:
apply_change = True
if conflict_strategy == CONFLICT_STRATEGY_DISCARD:
# Don't overwrite any changes
apply_change = False
elif conflict_strategy == CONFLICT_STRATEGY_ONLY_FILL_BLANKS:
# Prefer our changes only when that would fill
# in an unplayed game with a result
if g.is_complete():
apply_change = False
elif conflict_strategy == CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY:
# Prefer our changes except where that would
# replace a filled-in result with a blank one
if parsed_score[0] is None or parsed_score[1] is None:
apply_change = False
# Otherwise, always prefer our changes
if apply_change:
g.set_score(parsed_score[0], parsed_score[1], parsed_score[2])
tourney.merge_games(games);
stored_revision_no = tourney.get_game_table_revision_no(round_no)
num_divisions = tourney.get_num_divisions()
cgicommon.writeln("<div class=\"scorestable\">");
# If we've put up the conflict resolution form, then what we print here
# isn't a form but an ordinary table showing the current results and
# the user's submission.
# The usual case is not conflict_resolution, where we put the game list
# form here.
if not conflict_resolution:
cgicommon.writeln("<form method=\"POST\" action=\"%s?tourney=%s&round=%d\">" % (baseurl, urllib.parse.quote_plus(tourney_name), round_no));
cgicommon.writeln("<input type=\"hidden\" name=\"tourney\" value=\"%s\" />" % cgicommon.escape(tourney_name, True));
cgicommon.writeln("<input type=\"hidden\" name=\"round\" value=\"%d\" />" % round_no);
cgicommon.writeln("<input type=\"hidden\" id=\"lastmodified\" name=\"lastmodified\" value=\"\" />");
cgicommon.writeln("<input type=\"hidden\" name=\"revision\" value=\"%d\" />" % (stored_revision_no))
for div_index in range(num_divisions):
if num_divisions > 1:
cgicommon.writeln("<h2>%s</h2>" % (cgicommon.escape(tourney.get_division_name(div_index))))
if tourney.are_players_assigned_teams():
team_scores = tourney.get_team_scores()
cgicommon.show_team_score_table(team_scores)
cgicommon.writeln('<br />')
div_games = tourney.get_games(round_no=round_no, only_players_known=False, division=div_index);
if conflict_resolution:
for g in games:
score = form.getfirst("gamescore_%d_%d" % (g.round_no, g.seq));
parsed_score = parse_score(score)
if parsed_score is None:
remarks[(g.round_no, g.seq)] = "Invalid score: %s" % (score)
else:
# If the score the user has entered is different
# from the score in the table, display the
# user's submitted score in the Remarks column.
if not ((g.s1 is None and g.s2 is None and parsed_score[0] is None and parsed_score[1] is None) or (g.s1 == parsed_score[0] and g.s2 == parsed_score[1] and g.tb == parsed_score[2]) ):
player_names = g.get_player_names()
if parsed_score[0] is None or parsed_score[1] is None:
remarks[(g.round_no, g.seq)] = "%s - %s" % (player_names[0], player_names[1])
else:
remarks[(g.round_no, g.seq)] = "%s %d%s - %d%s %s" % (
player_names[0],
parsed_score[0],
"*" if (parsed_score[0] > parsed_score[1] and parsed_score[2]) else "",
parsed_score[1],
"*" if (parsed_score[1] >= parsed_score[0] and parsed_score[2]) else "",
player_names[1])
cgicommon.show_games_as_html_table(div_games, editable=False,
remarks=remarks, include_round_column=False,
round_namer=None,
player_to_link=lambda x : cgicommon.player_to_link(x, tourney.get_name(), False, True),
remarks_heading="Your submission")
else:
cgicommon.show_games_as_html_table(div_games, editable=True,
remarks=remarks, include_round_column=False,
round_namer=None,
player_to_link=lambda x : cgicommon.player_to_link(x, tourney.get_name(), False, True))
if not conflict_resolution:
cgicommon.writeln("<p><input type=\"submit\" name=\"save\" value=\"Save\" onclick=\"unset_unsaved_data_warning();\" /></p>");
if form.getfirst("showrandomresultsbutton"):
cgicommon.writeln("<p><input type=\"submit\" name=\"randomresults\" value=\"Random Results\" /></p>");
elif form.getfirst("showscrabbleresultsbutton"):
cgicommon.writeln("<p><input type=\"submit\" name=\"randomresults\" value=\"Random Scrabble-ish Results\" /></p>");
cgicommon.writeln("<p><input type=\"hidden\" name=\"scrabbleresults\" value=\"1\" /></p>");
cgicommon.writeln("</form>")
focus = None;
if last_modified_element:
m = re.match("^gamescore_([0-9]+)_([0-9]+)$", last_modified_element)
if m:
lastmod_index = (int(m.group(1)), int(m.group(2)));
# The box with focus should be the next unfilled box equal
# to or after the one that was last modified. If they're all
# filled, put the focus on the first box.
for i in range(0, len(games)):
if games[i].round_no == lastmod_index[0] and games[i].seq == lastmod_index[1]:
# We've found the control we last modified;
for j in range(0, len(games)):
g = games[(i + j) % len(games)]
if not g.is_complete():
focus = (g.round_no, g.seq)
break
break
if games:
if focus is None:
focus = (games[0].round_no, games[0].seq);
control_with_focus = "gamescore_%d_%d" % (focus[0], focus[1]);
cgicommon.writeln("<script>")
cgicommon.writeln("document.getElementById('" + control_with_focus + "').focus();")
cgicommon.writeln("</script>")
cgicommon.writeln("</div>"); #scorestable
cgicommon.writeln("</div>"); #mainpane
except countdowntourney.TourneyException as e:
cgicommon.show_tourney_exception(e);
cgicommon.writeln("</body>");
cgicommon.writeln("</html>");
sys.exit(0);
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
25064,
26,
198,
11748,
269,
70,
291,
2002,
261,
26,
198,
11748,
2956,
297,
571,
13,
25927,
11,
2956,
297,
571,
13,
29572,
11,
2956,
297,
571,
13,
18224,
26,
198,
11748,
26... | 1.98 | 6,400 |
#!/usr/bin/python3
import argparse
import logging
from .problem import Problem
from .contest import Contest
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
6738,
764,
45573,
1330,
20647,
198,
6738,
764,
3642,
395,
1330,
27297,
198
] | 3.724138 | 29 |
import math
import itertools
from collections import defaultdict
flatten_iter = itertools.chain.from_iterable
# https://stackoverflow.com/a/6909532/5538273
| [
11748,
10688,
198,
11748,
340,
861,
10141,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
2704,
41769,
62,
2676,
796,
340,
861,
10141,
13,
7983,
13,
6738,
62,
2676,
540,
628,
198,
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
64,
... | 3.037736 | 53 |
from flask import Flask, render_template, session, jsonify
from flask_session import Session # new style
'''
Möchte man z.B.: Objekte in der Session speichern, gibt es bei der normalen Verwendung ein Problem,
weil Flask die Session Info als JSON-String (verschlüsselt!) in einem Cookie speichert.
Es gäbe die Möglichkeit, das Schreiben als JSON selbst zu implementieren bzw. das Objekt o mit o.__dict__ in ein Dictionary
zu übertragen, aber die hier verwendete Erweiterung flask-session macht es einfacher. Hier werden die Session-Daten im Dateisystem des Servers gespeichert.
pip3 install flask_session
'''
app = Flask(__name__)
SESSION_TYPE = 'filesystem'
app.config.from_object(__name__)
Session(app)
#Session darf nur innerhalb von den Methoden verwendet werden, sonst fehlt der Kontext!
@app.route('/question')
@app.route('/question/<int:correct>')
if __name__ == '__main__':
app.run(debug=True) | [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
6246,
11,
33918,
1958,
198,
6738,
42903,
62,
29891,
1330,
23575,
220,
1303,
649,
3918,
198,
7061,
6,
198,
44,
9101,
354,
660,
582,
1976,
13,
33,
11207,
38764,
988,
660,
287,
4587,
2... | 2.783537 | 328 |
#***********************************************************************#
# Copyright (C) 2010-2012 Tomas Tinoco De Rubira #
# #
# This file is part of CVXPY #
# #
# CVXPY is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# CVXPY is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#***********************************************************************#
import numpy as np
from .defs import *
from .scalars import cvxpy_obj
from .arrays import cvxpy_array
from .arrays import cvxpy_matrix
from functools import reduce
#***********************************************************************#
# Class definition: cvxpy_constr #
#***********************************************************************#
#***********************************************************************#
# Class definition: cvxpy_list #
#***********************************************************************#
#***********************************************************************#
# Function definition: compare #
#***********************************************************************#
def compare(obj1,constraint_type,obj2):
"""
Compares obj1 with obj2.
:param obj1: Left hand side obejct.
:param constraint_type: Keyword (See cvxpy.defs.).
:param obj2: Right hand side object.
"""
# Both scalars
if ((np.isscalar(obj1) or type(obj1).__name__ in SCALAR_OBJS) and
(np.isscalar(obj2) or type(obj2).__name__ in SCALAR_OBJS)):
# Upgrade scalars to cvxpy_obj
if np.isscalar(obj1):
obj1 = cvxpy_obj(CONSTANT,obj1,str(obj1))
if np.isscalar(obj2):
obj2 = cvxpy_obj(CONSTANT,obj2,str(obj2))
# Construct and return constraint
return cvxpy_constr(obj1,constraint_type,obj2)
# Upgrate scalars to arrays
if ((type(obj1) is cvxpy_matrix or type(obj1).__name__ in ARRAY_OBJS) and
(np.isscalar(obj2) or type(obj2).__name__ in SCALAR_OBJS)):
(m,n) = obj1.shape
new_ar = cvxpy_array(m,n)
for i in range(0,m,1):
for j in range(0,n,1):
new_ar[i,j] = obj2
obj2 = new_ar
if ((type(obj2) is cvxpy_matrix or type(obj2).__name__ in ARRAY_OBJS) and
(np.isscalar(obj1) or type(obj1).__name__ in SCALAR_OBJS)):
(m,n) = obj2.shape
new_ar = cvxpy_array(m,n)
for i in range(0,m,1):
for j in range(0,n,1):
new_ar[i,j] = obj1
obj1 = new_ar
# Both arrays
if ((type(obj1) is cvxpy_matrix or type(obj1).__name__ in ARRAY_OBJS) and
(type(obj2) is cvxpy_matrix or type(obj2).__name__ in ARRAY_OBJS)):
constr = []
if obj1.shape != obj2.shape:
raise ValueError('Invalid dimensions')
(m,n) = obj1.shape
for i in range(0,m,1):
for j in range(0,n,1):
constr += [compare(obj1[i,j],constraint_type,obj2[i,j])]
return cvxpy_list(constr)
# Invalid arguments
raise TypeError('Objects not comparable')
| [
2,
17174,
17174,
2466,
8162,
2,
198,
2,
15069,
357,
34,
8,
3050,
12,
6999,
42884,
22894,
25634,
1024,
6256,
8704,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.08733 | 1,981 |
# write-html.py
import webbrowser
if __name__ == "__main__":
main() | [
2,
3551,
12,
6494,
13,
9078,
198,
11748,
3992,
40259,
198,
220,
220,
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
220,
220,
220,
220
] | 2.157895 | 38 |
from unittest import TestCase
from main.classes.arc_solver import ArcSolver
from main.classes.decimal_location import DecimalLocation
from main.classes.graph import Graph
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
1388,
13,
37724,
13,
5605,
62,
82,
14375,
1330,
10173,
50,
14375,
198,
6738,
1388,
13,
37724,
13,
12501,
4402,
62,
24886,
1330,
4280,
4402,
14749,
198,
6738,
1388,
13,
37724,
13,... | 3.76087 | 46 |
"""
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
| [
37811,
198,
32,
31904,
1398,
329,
1262,
33855,
27395,
351,
14367,
8019,
7534,
198,
7,
2804,
489,
571,
11,
35555,
81,
79,
565,
571,
11,
545,
64,
489,
571,
11,
1461,
8019,
737,
198,
37811,
198,
198,
6738,
256,
75,
6649,
578,
13,
912... | 2.606185 | 2,199 |
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from gym.envs.classic_control import rendering
from config import Config
| [
11748,
10688,
198,
11748,
11550,
198,
6738,
11550,
1330,
9029,
198,
6738,
11550,
13,
26791,
1330,
384,
8228,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11550,
13,
268,
14259,
13,
49421,
62,
13716,
1330,
14837,
198,
6738,
4566,
1330,
... | 4 | 42 |
#050_Soma_dos_pares.py
soma = 0
for c in range(1, 6):
num = int(input(f"{c}º Número: "))
if (num % 2 == 0):
soma += num
print (f"Soma = {soma}") | [
2,
28669,
62,
50,
6086,
62,
37427,
62,
79,
3565,
13,
9078,
198,
198,
82,
6086,
796,
657,
198,
1640,
269,
287,
2837,
7,
16,
11,
718,
2599,
198,
220,
220,
220,
997,
796,
493,
7,
15414,
7,
69,
1,
90,
66,
92,
36165,
399,
21356,
... | 1.829545 | 88 |
import re
import math
from .datetime_string import to_seconds, to_timedelta
import logging
import os
logger = logging.getLogger()
| [
11748,
302,
198,
11748,
10688,
198,
6738,
764,
19608,
8079,
62,
8841,
1330,
284,
62,
43012,
11,
284,
62,
16514,
276,
12514,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
3419,
628,
628
] | 3.268293 | 41 |
import pytest
from beerpy.receipe import hop_quantity, malt_composition, PILSENER_MALT, \
MUNICH_MALT
from beerpy.units.gravity import Gravity
| [
11748,
12972,
9288,
198,
198,
6738,
6099,
9078,
13,
260,
344,
3757,
1330,
1725,
62,
40972,
414,
11,
26868,
62,
785,
9150,
11,
350,
45484,
1677,
1137,
62,
44,
31429,
11,
3467,
198,
220,
220,
220,
337,
4944,
20739,
62,
44,
31429,
198,... | 2.777778 | 54 |
import mayavi.mlab as mlab
import numpy as np
import torch
import cv2
import matplotlib.pyplot as plt
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
label_dict = {0:"background", 1:"Car", 2:"Ped", 3:"Cycler", 4:"Truck"}
color_dict = {"0":[255, 255, 255], "1":[255, 255, 0], "2":[51, 153, 255], "3":[255, 0, 255], "4":[0, 0, 0]}
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
# def draw_on_image(image, calib, points=None, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
# if points is not None:
# if not isinstance(points, np.ndarray):
# points = points.cpu().numpy()
# if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
# ref_boxes = ref_boxes.cpu().numpy()
# if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
# gt_boxes = gt_boxes.cpu().numpy()
# if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
# ref_scores = ref_scores.cpu().numpy()
# if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
# ref_labels = ref_labels.cpu().numpy()
# if gt_boxes is not None:
# corners3d = boxes_to_corners_3d(gt_boxes)
# for i in range(len(corners3d)):
# image = draw_one_frame_on_img(image, corners3d[i], color_dict, label_dict, 0, calib, pretty=True)
# if ref_boxes is not None and len(ref_boxes) > 0:
# ref_corners3d = boxes_to_corners_3d(ref_boxes)
# for i in range(len(ref_corners3d)):
# if ref_labels is None:
# label = 0
# else:
# label = ref_labels[i]
# image = draw_one_frame_on_img(image, ref_corners3d[i], color_dict, label_dict, label, calib, pretty=True)
# return image
# def draw_one_frame_on_img(img_draw, corners3d, color_dict, label_dict, label, calib, pretty = True):
# box_list = [(0, 1), (0, 2), (0, 4), (1, 5), (1, 3), (2, 6), (2, 3), (3, 7), (4, 6), (4, 5), (5, 7), (6, 7)]
# rect_min_w_h = [50, 20]
# if not str(label) in color_dict.keys():
# obj_color = (255, 255, 255)
# else:
# obj_color = color_dict[str(label)]
# plane_cor, plane_obj_cen = project_3D_to_image(corners3d, calib)
# print(plane_cor)
# if pretty:
# img_draw = draw_color_on_contour_roi(img_draw, plane_cor, obj_color)
# for point_pair in box_list:
# cv2.line(img_draw, (int(plane_cor[0][point_pair[0]]), int(plane_cor[1][point_pair[0]])),
# (int(plane_cor[0][point_pair[1]]), int(plane_cor[1][point_pair[1]])), obj_color, 1)
# for index in range(8):
# cv2.circle(img_draw, (int(plane_cor[0][index]), int(plane_cor[1][index])), 2, (0, 255, 255), -1)
# cv2.circle(img_draw, (int(plane_obj_cen[0]), int(plane_obj_cen[1])), 3, (255, 0, 255), -1)
# if not label_dict == None:
# left_corner_cor = []
# image_label = img_draw.copy()
# for index in range(8):
# left_corner_cor.append([plane_cor[0][index], plane_cor[1][index]])
# left_corner_cor.sort(key=lambda x: x[0])
# left_corner_cor = left_corner_cor[0:2]
# left_corner_cor.sort(key=lambda x: x[1])
# left_corner_cor = left_corner_cor[0]
# rect_left_top = (int(left_corner_cor[0]), int(left_corner_cor[1] - rect_min_w_h[1]))
# rect_right_down = (int(left_corner_cor[0] + rect_min_w_h[0]), int(left_corner_cor[1]))
# cv2.rectangle(image_label, rect_left_top, rect_right_down, (102, 178, 255), -1)
# if label in label_dict:
# text = label_dict[label]
# else:
# text = "None"
# cv2.putText(image_label, text, (int(left_corner_cor[0]), int(left_corner_cor[1]) - 5), cv2.FONT_HERSHEY_DUPLEX,
# 0.5, (0, 0, 0), 1, cv2.LINE_AA)
# img_draw = cv2.addWeighted(img_draw, 0.4, image_label, 0.6, 0)
# return img_draw
# def draw_one_bv_frame(corners3d, color_dict):
# if plt.gcf().number > 1:
# plt.close('all')
# if plt.gcf().number < 1:
# plt.figure()
# plt.ion()
# fig = plt.gcf()
# ax = plt.gca()
# fig.set_size_inches(5, 12.5)
# # point_list = [(1, 3), (3, 7), (7, 5), (5, 1)]
# point_list = [(3, 7), (7, 6), (6, 2), (2, 3)]
# plt.cla()
#
# for cat in dets:
# for i in range(len(dets[cat])):
# if dets[cat][i, -1] > center_thresh:
# dim_ = dets[cat][i, 5:8]
# loc_ = dets[cat][i, 8:11]
# rot_y = dets[cat][i, 11]
# loc = np.array([loc_[0], loc_[1] - dim_[0] / 2, loc_[2]])
# dim = np.array([dim_[2], dim_[0], dim_[1]])
# if not str(cat) in obj_color_dict.keys():
# obj_color = (1, 1, 1)
# else:
# obj_color = (obj_color_dict[str(cat)][2] / 255, obj_color_dict[str(cat)][1] / 255,
# obj_color_dict[str(cat)][0] / 255)
# corner_point = self.generate_obj_cam_cor(np.array(loc), np.array(dim), np.array([rot_y, 0, 0]))
# for point_ in point_list:
# ax.plot((corner_point[0][point_[0]], corner_point[0][point_[1]]),
# (corner_point[2][point_[0]], corner_point[2][point_[1]]), color=obj_color)
# ax.axis(xmin=-25, xmax=25)
# ax.axis(ymin=0, ymax=100)
# ax.grid()
# plt.title("bird view")
# plt.xlabel("horizontal distance/m")
# plt.ylabel("vertical distance/m")
# fig.canvas.draw()
# img_from_mat = cv2.cvtColor(np.asarray(fig.canvas.buffer_rgba()), cv2.COLOR_RGBA2BGR)
# img_from_mat = cv2.resize(img_from_mat, (400, 1000))
#
# return img_from_mat
# def project_3D_to_image(corner_point, calib):
# calib_ = np.array(calib)[:, 0:3]
# center_3d = corner_point[0]
# for i in range(1, 4):
# center_3d += corner_point[i]
# center_3d = center_3d/4
# center_3d = center_3d[np.newaxis,...]
# corner_point = np.append(center_3d, corner_point, axis=0)
# corner_point = corner_point.T
# tmp = corner_point[0,...]
# corner_point[0, ...] = corner_point[2,...]
# corner_point[2, ...] = tmp
# tmp = corner_point[0,...]
# corner_point[0, ...] = corner_point[1,...]
# corner_point[1, ...] = tmp
# for i in range(9):
# if corner_point[2, i] < 0:
# corner_point[0, i] = -corner_point[0, i]
# corner_point[1, i] = -corner_point[1, i]
# corner_point = np.matmul(calib_, corner_point)
# plane_cor = corner_point[0:2, :]
# for i in range(9):
# plane_cor[0][i] = corner_point[0][i] / corner_point[2][i]
# plane_cor[1][i] = corner_point[1][i] / corner_point[2][i]
# plane_cen = plane_cor[:, 0]
# plane_cor = plane_cor[:, 1:]
# #
# return plane_cor, plane_cen
# def draw_color_on_contour_roi(self, image, plane_cor, color):
# plane_list = [[2, 3, 7, 6], [0, 1, 3, 2], [4, 5, 7, 6], [0, 1, 5, 4], [0, 2, 6, 4], [1, 3, 7 ,5]]
# contour = []
# image_draw_contour = image.copy()
# for plane in plane_list:
# contour_ = []
# for index in plane:
# contour_.append([plane_cor[0][index], plane_cor[1][index]])
# contour.append(np.array(contour_).reshape((-1,1,2)).astype(np.int32))
# for contour_ in contour:
# cv2.drawContours(image_draw_contour, [contour_], -1, color, thickness = -1)
# image = cv2.addWeighted(image, 0.75, image_draw_contour, 0.25, 0)
# return image
# def generate_obj_cam_cor(self, position, size, ZYX):
# pitch = ZYX[0]
# roll = ZYX[1]
# yaw = ZYX[2]
# R_roll = np.array([[1 , 0 , 0 ],
# [0 , np.cos(roll), -np.sin(roll)],
# [0 , np.sin(roll), np.cos(roll)]])
# R_pitch = np.array([[np.cos(pitch) , 0 , np.sin(pitch)],
# [0 , 1 , 0 ],
# [-np.sin(pitch), 0 , np.cos(pitch)]])
# R_yaw = np.array([[np.cos(yaw) , -np.sin(yaw), 0 ],
# [np.sin(yaw) , np.cos(yaw) , 0 ],
# [0 , 0 , 1 ]])
# R = np.matmul(R_pitch, R_roll)
# R = np.matmul(R_yaw, R)
# size = 0.5 * size
# arithm_list = []
# for i in ['+','-']:
# for j in ['+', '-']:
# for k in ['+', '-']:
# arithm_list.append(i+j+k)
# corner_point = np.array([0, 0, 0])
# for arithm in arithm_list:
# point = np.array([eval(str(0) + arithm[0] + str(size[0])),eval(str(0) + arithm[1] + str(size[1])),eval(str(0) + arithm[2] + str(size[2]))])
# corner_point = np.vstack((corner_point, point))
# corner_point = corner_point.T
# corner_point = np.matmul(R, corner_point)
# for i in range(9):
# corner_point[:,i] = corner_point[:,i] + position
# return corner_point
| [
11748,
743,
15820,
13,
4029,
397,
355,
285,
23912,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
269,
85,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
3524,
62,
4033,
579,
499,
796,
... | 1.841058 | 6,732 |
import os
import time
import psutil
import sys
import atexit
| [
11748,
28686,
198,
11748,
640,
198,
11748,
26692,
22602,
198,
11748,
25064,
198,
11748,
379,
37023,
198
] | 3.588235 | 17 |
"""Fast Lomb-Scargle Periodograms"""
from .core import LombScargle
| [
37811,
22968,
28503,
12,
3351,
853,
293,
18581,
26836,
37811,
198,
198,
6738,
764,
7295,
1330,
28503,
3351,
853,
293,
198
] | 3.238095 | 21 |
import os.path
import mimetypes
from django.http import HttpResponse
from .utils import get_scorm_storage
import logging
logger = logging.getLogger(__name__)
def proxy_scorm_media(request, block_id, file, sha1=None):
"""
Render the media objects by proxy, as the files
must be in the same domain as the LMS
"""
guess = mimetypes.guess_type(file)
if guess[0] is None:
content_type = "text/html"
else:
content_type = guess[0]
if sha1:
location = "scorm/{}/{}/{}".format(block_id, sha1, file)
else:
location = "scorm/{}/{}".format(block_id, file)
return HttpResponse(
get_scorm_storage().open(location).read(),
content_type=content_type,
) | [
11748,
28686,
13,
6978,
198,
11748,
17007,
2963,
12272,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
198,
6738,
764,
26791,
1330,
651,
62,
1416,
579,
62,
35350,
198,
198,
11748,
18931,
198,
198,
6404,
1362,
796,... | 2.421405 | 299 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TOPIC: Decide what to do with a field that has multiple items in it.
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up.
Since in the previous quiz you made a decision on which value to keep for the
"areaLand" field, you now know what has to be done.
Finish the function fix_area(). It will receive a string as an input, and it
has to return a float representing the value of the area or None.
You have to change the function fix_area. You can use extra functions if you
like, but changes to process_file will not be taken into account.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import json
import pprint
CITIES = 'cities.csv'
if __name__ == "__main__":
test() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
35222,
2149,
25,
4280,
485,
644,
284,
466,
351,
257,
2214,
326,
468,
3294,
3709,
287,
340,... | 3.189091 | 275 |
import parmed
import numpy as np
from constrainmol import ConstrainedMolecule
# Load from gro/topology
system = parmed.load_file("system.top", xyz="system.gro")
system.save("unconstrained.pdb", overwrite=True)
constrained_coordinates = np.zeros(system.coordinates.shape)
unique_res = system.split()
for (res, resids) in unique_res:
constrain_mol = ConstrainedMolecule(res)
for resid in resids:
constrain_mol.update_xyz(system[resid, :].coordinates)
constrain_mol.solve()
atom_ids = [atom.idx for atom in system.residues[resid].atoms]
constrained_coordinates[atom_ids] = constrain_mol.structure.coordinates
system.coordinates = constrained_coordinates
system.save("constrained.pdb", overwrite=True)
| [
11748,
1582,
1150,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1500,
3201,
43132,
1330,
1482,
2536,
1328,
44,
2305,
23172,
198,
198,
2,
8778,
422,
7128,
14,
4852,
1435,
198,
10057,
796,
1582,
1150,
13,
2220,
62,
7753,
7203,
10... | 2.664311 | 283 |
from datetime import datetime
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import Join, MapCompose, Identity
from scrapy.contrib_exp.djangoitem import DjangoItem
from django_edzapp.jobs.models import Job
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
15881,
88,
13,
3642,
822,
13,
29356,
1330,
9097,
17401,
198,
6738,
15881,
88,
13,
3642,
822,
13,
29356,
13,
41341,
1330,
15251,
11,
9347,
7293,
577,
11,
27207,
198,
6738,
15881,
88,
... | 3.5 | 70 |
from comment import *
from thread import *
from site import *
from custom_user import *
| [
6738,
2912,
1330,
1635,
198,
6738,
4704,
1330,
1635,
198,
6738,
2524,
1330,
1635,
198,
6738,
2183,
62,
7220,
1330,
1635,
198
] | 4 | 22 |
# 1091
# (?:(?:w{3}\.)(?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])+[\.com|\.edu|\.gov|\.int|\.mil|\.net|\.org|\.biz|\.info|\.name|\.pro|\.aero|\.coop|\.museum|\.cat|\.jobs|\.travel|\.arpa|\.mobi|\.ac|\.ad|\.ae|\.af|\.ag|\.ai|\.al|\.am|\.an|\.ao|\.aq|\.ar|\.as|\.at|\.au|\.aw|\.az|\.ax|\.ba|\.bb|\.bd|\.be|\.bf|\.bg|\.bh|\.bi|\.bj|\.bm|\.bn|\.bo|\.br|\.bs|\.bt|\.bv|\.bw|\.by|\.bz|\.ca|\.cc|\.cd|\.cf|\.cg|\.ch|\.ci|\.ck|\.cl|\.cm|\.cn|\.co|\.cr|\.cs|\.cu|\.cv|\.cx|\.cy|\.cz|\.de|\.dj|\.dk|\.dm|\.do|\.dz|\.ec|\.ee|\.eg|\.eh|\.er|\.es|\.et|\.eu|\.fi|\.fj|\.fk|\.fm|\.fo|\.fr|\.ga|\.gb|\.gd|\.ge|\.gf|\.gg|\.gh|\.gi|\.gl|\.gm|\.gn|\.gp|\.gq|\.gr|\.gs|\.gt|\.gu|\.gw|\.gy|\.hk|\.hm|\.hn|\.hr|\.ht|\.hu|\.id|\.ie|\.il|\.im|\.in|\.io|\.iq|\.ir|\.is|\.it|\.je|\.jm|\.jo|\.jp|\.ke|\.kg|\.kh|\.ki|\.km|\.kn|\.kp|\.kr|\.kw|\.ky|\.kz|\.la|\.lb|\.lc|\.li|\.lk|\.lr|\.ls|\.lt|\.lu|\.lv|\.ly|\.ma|\.mc|\.md|\.mg|\.mh|\.mk|\.ml|\.mm|\.mn|\.mo|\.mp|\.mq|\.mr|\.ms|\.mt|\.mu|\.mv|\.mw|\.mx|\.my|\.mz|\.na|\.nc|\.ne|\.nf|\.ng|\.ni|\.nl|\.no|\.np|\.nr|\.nu|\.nz|\.om|\.pa|\.pe|\.pf|\.pg|\.ph|\.pk|\.pl|\.pm|\.pn|\.pr|\.ps|\.pt|\.pw|\.py|\.qa|\.re|\.ro|\.ru|\.rw|\.sa|\.sb|\.sc|\.sd|\.se|\.sg|\.sh|\..si|\.sj|\.sk|\.sl|\.sm|\.sn|\.so|\.sr|\.st|\.sv|\.sy|\.sz|\.tc|\.td|\.tf|\.tg|\.th|\.tj|\.tk|\.tl|\.tm|\.tn|\.to|\.tp|\.tr|\.tt|\.tv|\.tw|\.tz|\.ua|\.ug|\.uk|\.um|\.us|\.uy|\.uz|\.va|\.vc|\.ve|\.vg|\.vi|\.vn|\.vu|\.wf|\.ws|\.ye|\.yt|\.yu|\.za|\.zm|\.zw](?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])*)
# EXPONENT
# nums:4
# EXPONENT AttackString:""+"www."*1024+"@1 _SLQ_2"
import re
from time import perf_counter
regex = """(?:(?:w{3}\.)(?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])+[\.com|\.edu|\.gov|\.int|\.mil|\.net|\.org|\.biz|\.info|\.name|\.pro|\.aero|\.coop|\.museum|\.cat|\.jobs|\.travel|\.arpa|\.mobi|\.ac|\.ad|\.ae|\.af|\.ag|\.ai|\.al|\.am|\.an|\.ao|\.aq|\.ar|\.as|\.at|\.au|\.aw|\.az|\.ax|\.ba|\.bb|\.bd|\.be|\.bf|\.bg|\.bh|\.bi|\.bj|\.bm|\.bn|\.bo|\.br|\.bs|\.bt|\.bv|\.bw|\.by|\.bz|\.ca|\.cc|\.cd|\.cf|\.cg|\.ch|\.ci|\.ck|\.cl|\.cm|\.cn|\.co|\.cr|\.cs|\.cu|\.cv|\.cx|\.cy|\.cz|\.de|\.dj|\.dk|\.dm|\.do|\.dz|\.ec|\.ee|\.eg|\.eh|\.er|\.es|\.et|\.eu|\.fi|\.fj|\.fk|\.fm|\.fo|\.fr|\.ga|\.gb|\.gd|\.ge|\.gf|\.gg|\.gh|\.gi|\.gl|\.gm|\.gn|\.gp|\.gq|\.gr|\.gs|\.gt|\.gu|\.gw|\.gy|\.hk|\.hm|\.hn|\.hr|\.ht|\.hu|\.id|\.ie|\.il|\.im|\.in|\.io|\.iq|\.ir|\.is|\.it|\.je|\.jm|\.jo|\.jp|\.ke|\.kg|\.kh|\.ki|\.km|\.kn|\.kp|\.kr|\.kw|\.ky|\.kz|\.la|\.lb|\.lc|\.li|\.lk|\.lr|\.ls|\.lt|\.lu|\.lv|\.ly|\.ma|\.mc|\.md|\.mg|\.mh|\.mk|\.ml|\.mm|\.mn|\.mo|\.mp|\.mq|\.mr|\.ms|\.mt|\.mu|\.mv|\.mw|\.mx|\.my|\.mz|\.na|\.nc|\.ne|\.nf|\.ng|\.ni|\.nl|\.no|\.np|\.nr|\.nu|\.nz|\.om|\.pa|\.pe|\.pf|\.pg|\.ph|\.pk|\.pl|\.pm|\.pn|\.pr|\.ps|\.pt|\.pw|\.py|\.qa|\.re|\.ro|\.ru|\.rw|\.sa|\.sb|\.sc|\.sd|\.se|\.sg|\.sh|\..si|\.sj|\.sk|\.sl|\.sm|\.sn|\.so|\.sr|\.st|\.sv|\.sy|\.sz|\.tc|\.td|\.tf|\.tg|\.th|\.tj|\.tk|\.tl|\.tm|\.tn|\.to|\.tp|\.tr|\.tt|\.tv|\.tw|\.tz|\.ua|\.ug|\.uk|\.um|\.us|\.uy|\.uz|\.va|\.vc|\.ve|\.vg|\.vi|\.vn|\.vu|\.wf|\.ws|\.ye|\.yt|\.yu|\.za|\.zm|\.zw](?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])*)"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "www." * i * 1 + "@1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | [
2,
838,
6420,
198,
2,
357,
27514,
7,
27514,
86,
90,
18,
32239,
2014,
7,
27514,
58,
64,
12,
89,
32,
12,
57,
15,
12,
24,
14,
26,
59,
30,
5,
28,
7479,
12,
62,
59,
3,
59,
10,
0,
59,
9,
6,
59,
38016,
91,
6852,
93,
59,
58,
... | 1.643411 | 2,064 |
#!/usr/bin/env python3
"""
This module merge to xml files into one. First it checks if its sha's are the same to avoid merging of different projects an finally merge it into one called merged.xml.
"""
import sys
import argparse
from lxml import etree
import met.xmlMerger as merger
USAGE = './met-merger firstXml.xml secondXml.xml'
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE)
parser.add_argument('firstXml', help=argparse.SUPPRESS)
parser.add_argument('secondXml', help=argparse.SUPPRESS)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
1212,
8265,
20121,
284,
35555,
3696,
656,
530,
13,
3274,
340,
8794,
611,
663,
427,
64,
338,
389,
262,
976,
284,
3368,
35981,
286,
1180,
4493,
281,
3443,
20121,
340,
6... | 3.058824 | 204 |
"""Docstring for db_config file."""
import psycopg2
import os
from instance.config import app_config
# from boto.s3.connection import S3Connection
# s3 = S3Connection(os.environ['S3_KEY'], os.environ['S3_SECRET'])
env = os.getenv("FLASK_ENV")
if not env:
url = app_config["production"].DATABASE_URL
else:
url = "host='localhost' port='5433' dbname='sendit' user='app' password='app'"
def connection(url):
"""Docstring for connection method."""
connection_to_db = psycopg2.connect(url, sslmode='require')
return connection_to_db
def init_db():
"""Docstring for init_db method."""
connection_to_db = psycopg2.connect(url)
return connection_to_db
def tables():
"""Docstring for tables method."""
users = """CREATE TABLE IF NOT EXISTS users(
user_id SERIAL PRIMARY KEY,
first_name CHARACTER VARYING(200) NOT NULL,
last_name CHARACTER VARYING(200) NOT NULL,
username CHARACTER VARYING(200) NOT NULL,
role CHARACTER VARYING(200) DEFAULT 'user',
email CHARACTER VARYING(320) NOT NULL,
password CHARACTER VARYING(200) NOT NULL);"""
orders = """CREATE TABLE IF NOT EXISTS orders(
parcel_id SERIAL PRIMARY KEY,
user_id SERIAL REFERENCES users(user_id),
item_shipped CHARACTER VARYING(200) NOT NULL,
origin CHARACTER VARYING(200) NOT NULL,
destination CHARACTER VARYING(200) NOT NULL,
weight INTEGER NOT NULL,
current_location CHARACTER VARYING(200) NOT NULL,
pickup_location CHARACTER VARYING(200) NOT NULL,
status CHARACTER VARYING(200) NOT NULL);"""
query = [users, orders]
return query
def create_tables():
"""Docstring for create_tables method."""
tables_to_create = tables()
connection_to_db = connection(url)
cursor = connection_to_db.cursor()
for table in tables_to_create:
cursor.execute(table)
connection_to_db.commit()
def destroy_tables():
"""Docstring for destroy tables method."""
connection_to_db = connection(url)
cursor = connection_to_db.cursor()
drop_orders = """DROP TABLE IF EXISTS orders CASCADE"""
drop_users = """DROP TABLE IF EXISTS users CASCADE"""
queries = [drop_users, drop_orders]
for table_to_drop in queries:
cursor.execute(table_to_drop)
connection_to_db.commit()
| [
37811,
23579,
8841,
329,
20613,
62,
11250,
2393,
526,
15931,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
28686,
198,
6738,
4554,
13,
11250,
1330,
598,
62,
11250,
198,
2,
422,
275,
2069,
13,
82,
18,
13,
38659,
1330,
311,
18,
32048,
... | 2.685815 | 853 |
from typing import Union
from talon import Context, Module, actions, app
from talon.grammar import Phrase
import gaze_ocr
import screen_ocr # dependency of gaze-ocr
from gaze_ocr import _talon_wrappers as talon_wrappers
mod = Module()
setting_ocr_logging_dir = mod.setting(
"ocr_logging_dir",
type=str,
default=None,
desc="If specified, log OCR'ed images to this directory.",
)
app.register("ready", on_ready)
@mod.capture(rule="<user.prose> | <user.single_digit_string>")
def onscreen_text(m) -> str:
"""Either words or a number."""
return str(m)
@mod.capture(rule="<user.word> | {user.punctuation} | <user.single_digit_string>")
def onscreen_word(m) -> str:
"""Either a word or a number."""
return str(m)
@mod.action_class | [
6738,
19720,
1330,
4479,
198,
198,
6738,
3305,
261,
1330,
30532,
11,
19937,
11,
4028,
11,
598,
198,
6738,
3305,
261,
13,
4546,
3876,
1330,
1380,
22789,
198,
198,
11748,
17841,
62,
1696,
198,
11748,
3159,
62,
1696,
220,
1303,
20203,
28... | 2.72242 | 281 |
import ast
import os
import string
from wsgiref.util import FileWrapper
from django.conf import settings
from django.contrib.staticfiles import finders
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.utils.crypto import random
from .docx import read_docx_tags, replace_docx_tags
from .forms import TaleForm
from .models import Tale, TaleTag
| [
11748,
6468,
198,
11748,
28686,
198,
11748,
4731,
198,
6738,
266,
45213,
557,
69,
13,
22602,
1330,
9220,
36918,
2848,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
12708,
16624,
1330,
1... | 3.482456 | 114 |
main()
| [
12417,
3419,
198
] | 2.333333 | 3 |
"""
This module contains functionality required for disconnected installation.
"""
import logging
import os
import tempfile
import yaml
from ocs_ci.framework import config
from ocs_ci.helpers.disconnected import get_opm_tool
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import NotFoundError
from ocs_ci.ocs.resources.catalog_source import CatalogSource
from ocs_ci.utility import templating
from ocs_ci.utility.utils import (
create_directory_path,
exec_cmd,
get_image_with_digest,
get_latest_ds_olm_tag,
get_ocp_version,
login_to_mirror_registry,
prepare_customized_pull_secret,
wait_for_machineconfigpool_status,
)
logger = logging.getLogger(__name__)
def get_csv_from_image(bundle_image):
"""
Extract clusterserviceversion.yaml file from operator bundle image.
Args:
bundle_image (str): OCS operator bundle image
Returns:
dict: loaded yaml from CSV file
"""
manifests_dir = os.path.join(
config.ENV_DATA["cluster_path"], constants.MANIFESTS_DIR
)
ocs_operator_csv_yaml = os.path.join(manifests_dir, constants.OCS_OPERATOR_CSV_YAML)
create_directory_path(manifests_dir)
with prepare_customized_pull_secret(bundle_image) as authfile_fo:
exec_cmd(
f"oc image extract --registry-config {authfile_fo.name} "
f"{bundle_image} --confirm "
f"--path /manifests/ocs-operator.clusterserviceversion.yaml:{manifests_dir}"
)
try:
with open(ocs_operator_csv_yaml) as f:
return yaml.safe_load(f)
except FileNotFoundError as err:
logger.error(f"File {ocs_operator_csv_yaml} does not exists ({err})")
raise
def prepare_disconnected_ocs_deployment():
"""
Prepare disconnected ocs deployment:
- get related images from OCS operator bundle csv
- mirror related images to mirror registry
- create imageContentSourcePolicy for the mirrored images
- disable the default OperatorSources
Returns:
str: OCS registry image prepared for disconnected installation (with
sha256 digest) or None (for live deployment)
"""
logger.info("Prepare for disconnected OCS installation")
if config.DEPLOYMENT.get("live_deployment"):
get_opm_tool()
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
ocp_version = get_ocp_version()
index_image = f"{config.DEPLOYMENT['cs_redhat_operators_image']}:v{ocp_version}"
mirrored_index_image = (
f"{config.DEPLOYMENT['mirror_registry']}/{constants.MIRRORED_INDEX_IMAGE_NAMESPACE}/"
f"{constants.MIRRORED_INDEX_IMAGE_NAME}:v{ocp_version}"
)
# prune an index image
logger.info(
f"Prune index image {index_image} -> {mirrored_index_image} "
f"(packages: {', '.join(constants.DISCON_CL_REQUIRED_PACKAGES)})"
)
cmd = (
f"opm index prune -f {index_image} "
f"-p {','.join(constants.DISCON_CL_REQUIRED_PACKAGES)} "
f"-t {mirrored_index_image}"
)
# opm tool doesn't have --atuhfile parameter, we have to suply auth
# file through env variable
os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path
exec_cmd(cmd)
# login to mirror registry
login_to_mirror_registry(pull_secret_path)
# push pruned index image to mirror registry
logger.info(
f"Push pruned index image to mirror registry: {mirrored_index_image}"
)
cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}"
exec_cmd(cmd)
# mirror related images (this might take very long time)
logger.info(f"Mirror images related to index image: {mirrored_index_image}")
cmd = (
f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure "
f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*'"
)
oc_acm_result = exec_cmd(cmd, timeout=7200)
for line in oc_acm_result.stdout.decode("utf-8").splitlines():
if "wrote mirroring manifests to" in line:
break
else:
raise NotFoundError(
"Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command."
)
mirroring_manifests_dir = line.replace("wrote mirroring manifests to ", "")
logger.debug(f"Mirrored manifests directory: {mirroring_manifests_dir}")
# create ImageContentSourcePolicy
icsp_file = os.path.join(
f"{mirroring_manifests_dir}",
"imageContentSourcePolicy.yaml",
)
exec_cmd(f"oc apply -f {icsp_file}")
# Disable the default OperatorSources
exec_cmd(
"""oc patch OperatorHub cluster --type json """
"""-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'"""
)
# create redhat-operators CatalogSource
catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML)
catalog_source_manifest = tempfile.NamedTemporaryFile(
mode="w+", prefix="catalog_source_manifest", delete=False
)
catalog_source_data["spec"]["image"] = f"{mirrored_index_image}"
catalog_source_data["metadata"]["name"] = "redhat-operators"
catalog_source_data["spec"]["displayName"] = "Red Hat Operators - Mirrored"
templating.dump_data_to_temp_yaml(
catalog_source_data, catalog_source_manifest.name
)
exec_cmd(f"oc apply -f {catalog_source_manifest.name}")
catalog_source = CatalogSource(
resource_name="redhat-operators",
namespace=constants.MARKETPLACE_NAMESPACE,
)
# Wait for catalog source is ready
catalog_source.wait_for_state("READY")
return
if config.DEPLOYMENT.get("stage_rh_osbs"):
raise NotImplementedError(
"Disconnected installation from stage is not implemented!"
)
ocs_registry_image = config.DEPLOYMENT.get("ocs_registry_image", "")
logger.debug(f"ocs-registry-image: {ocs_registry_image}")
ocs_registry_image_and_tag = ocs_registry_image.split(":")
ocs_registry_image = ocs_registry_image_and_tag[0]
image_tag = (
ocs_registry_image_and_tag[1] if len(ocs_registry_image_and_tag) == 2 else None
)
if not image_tag and config.REPORTING.get("us_ds") == "DS":
image_tag = get_latest_ds_olm_tag(
upgrade=False,
latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest"),
)
ocs_registry_image = f"{config.DEPLOYMENT['default_ocs_registry_image'].split(':')[0]}:{image_tag}"
bundle_image = f"{constants.OCS_OPERATOR_BUNDLE_IMAGE}:{image_tag}"
logger.debug(f"ocs-operator-bundle image: {bundle_image}")
csv_yaml = get_csv_from_image(bundle_image)
ocs_operator_image = (
csv_yaml.get("spec", {})
.get("install", {})
.get("spec", {})
.get("deployments", [{}])[0]
.get("spec", {})
.get("template", {})
.get("spec", {})
.get("containers", [{}])[0]
.get("image")
)
logger.debug(f"ocs-operator-image: {ocs_operator_image}")
# prepare list related images (bundle, registry and operator images and all
# images from relatedImages section from csv)
ocs_related_images = []
ocs_related_images.append(get_image_with_digest(bundle_image))
ocs_registry_image_with_digest = get_image_with_digest(ocs_registry_image)
ocs_related_images.append(ocs_registry_image_with_digest)
ocs_related_images.append(get_image_with_digest(ocs_operator_image))
ocs_related_images += [
image["image"] for image in csv_yaml.get("spec").get("relatedImages")
]
logger.debug(f"OCS Related Images: {ocs_related_images}")
mirror_registry = config.DEPLOYMENT["mirror_registry"]
# prepare images mapping file for mirroring
mapping_file_content = [
f"{image}={mirror_registry}{image[image.index('/'):image.index('@')]}\n"
for image in ocs_related_images
]
logger.debug(f"Mapping file content: {mapping_file_content}")
name = "ocs-images"
mapping_file = os.path.join(config.ENV_DATA["cluster_path"], f"{name}-mapping.txt")
# write mapping file to disk
with open(mapping_file, "w") as f:
f.writelines(mapping_file_content)
# prepare ImageContentSourcePolicy for OCS images
with open(constants.TEMPLATE_IMAGE_CONTENT_SOURCE_POLICY_YAML) as f:
ocs_icsp = yaml.safe_load(f)
ocs_icsp["metadata"]["name"] = name
ocs_icsp["spec"]["repositoryDigestMirrors"] = []
for image in ocs_related_images:
ocs_icsp["spec"]["repositoryDigestMirrors"].append(
{
"mirrors": [
f"{mirror_registry}{image[image.index('/'):image.index('@')]}"
],
"source": image[: image.index("@")],
}
)
logger.debug(f"OCS imageContentSourcePolicy: {yaml.safe_dump(ocs_icsp)}")
ocs_icsp_file = os.path.join(
config.ENV_DATA["cluster_path"], f"{name}-imageContentSourcePolicy.yaml"
)
with open(ocs_icsp_file, "w+") as fs:
yaml.safe_dump(ocs_icsp, fs)
# create ImageContentSourcePolicy
exec_cmd(f"oc apply -f {ocs_icsp_file}")
# mirror images based on mapping file
with prepare_customized_pull_secret(ocs_related_images) as authfile_fo:
login_to_mirror_registry(authfile_fo.name)
exec_cmd(
f"oc image mirror --filter-by-os='.*' -f {mapping_file} --insecure "
f"--registry-config={authfile_fo.name} --max-per-registry=2",
timeout=3600,
)
# Disable the default OperatorSources
exec_cmd(
"""oc patch OperatorHub cluster --type json """
"""-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'"""
)
# wait for newly created imageContentSourcePolicy is applied on all nodes
wait_for_machineconfigpool_status("all")
return ocs_registry_image_with_digest
| [
37811,
198,
1212,
8265,
4909,
11244,
2672,
329,
28597,
9988,
13,
198,
37811,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
198,
11748,
331,
43695,
198,
198,
6738,
267,
6359,
62,
979,
13,
30604,
1330,
4566,
198... | 2.293339 | 4,459 |
import pytest
import rivalcfg.helpers
| [
11748,
12972,
9288,
198,
198,
11748,
8976,
37581,
13,
16794,
364,
628
] | 3.333333 | 12 |
import pytest
from dnaio import Sequence
from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter
def test_issue_265():
"""Crash when accessing the matches property of non-anchored linked adapters"""
s = Sequence('name', 'AAAATTTT')
front_adapter = Adapter('GGG', where=Where.FRONT)
back_adapter = Adapter('TTT', where=Where.BACK)
la = LinkedAdapter(front_adapter, back_adapter, front_required=False, back_required=False, name='name')
assert la.match_to(s).matches == 3
@pytest.mark.parametrize("where", [Where.PREFIX, Where.SUFFIX])
| [
11748,
12972,
9288,
198,
198,
6738,
288,
2616,
952,
1330,
45835,
198,
6738,
2005,
42552,
13,
324,
12126,
1330,
43721,
11,
13225,
11,
6350,
11,
7502,
276,
47307,
628,
628,
628,
628,
198,
198,
4299,
1332,
62,
21949,
62,
22980,
33529,
19... | 2.914573 | 199 |
# Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Calculating and checking file hashes.
"""
import hashlib
import functools
from pathlib import Path
# From the docs: https://docs.python.org/3/library/hashlib.html#hashlib.new
# The named constructors are much faster than new() and should be
# preferred.
# Need to fallback on new() for some algorithms.
ALGORITHMS_AVAILABLE = {
alg: getattr(hashlib, alg, functools.partial(hashlib.new, alg))
for alg in hashlib.algorithms_available
}
try:
import xxhash
# xxhash doesn't have a list of available algorithms yet.
# https://github.com/ifduyue/python-xxhash/issues/48
ALGORITHMS_AVAILABLE.update(
{
alg: getattr(xxhash, alg, None)
for alg in ["xxh128", "xxh64", "xxh32", "xxh3_128", "xxh3_64"]
}
)
# The xxh3 algorithms are only available for version>=2.0. Set to None and
# remove to ensure backwards compatibility.
ALGORITHMS_AVAILABLE = {
alg: func for alg, func in ALGORITHMS_AVAILABLE.items() if func is not None
}
except ImportError:
pass
def file_hash(fname, alg="sha256"):
"""
Calculate the hash of a given file.
Useful for checking if a file has changed or been corrupted.
Parameters
----------
fname : str
The name of the file.
alg : str
The type of the hashing algorithm
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> fname = "test-file-for-hash.txt"
>>> with open(fname, "w") as f:
... __ = f.write("content of the file")
>>> print(file_hash(fname))
0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00
>>> import os
>>> os.remove(fname)
"""
if alg not in ALGORITHMS_AVAILABLE:
raise ValueError(
f"Algorithm '{alg}' not available to the pooch library. "
"Only the following algorithms are available "
f"{list(ALGORITHMS_AVAILABLE.keys())}."
)
# Calculate the hash in chunks to avoid overloading the memory
chunksize = 65536
hasher = ALGORITHMS_AVAILABLE[alg]()
with open(fname, "rb") as fin:
buff = fin.read(chunksize)
while buff:
hasher.update(buff)
buff = fin.read(chunksize)
return hasher.hexdigest()
def hash_algorithm(hash_string):
"""
Parse the name of the hash method from the hash string.
The hash string should have the following form ``algorithm:hash``, where
algorithm can be the name of any algorithm known to :mod:`hashlib`.
If the algorithm is omitted or the hash string is None, will default to
``"sha256"``.
Parameters
----------
hash_string : str
The hash string with optional algorithm prepended.
Returns
-------
hash_algorithm : str
The name of the algorithm.
Examples
--------
>>> print(hash_algorithm("qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("md5:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
md5
>>> print(hash_algorithm("sha256:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("SHA256:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("xxh3_64:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
xxh3_64
>>> print(hash_algorithm(None))
sha256
"""
default = "sha256"
if hash_string is None:
algorithm = default
elif ":" not in hash_string:
algorithm = default
else:
algorithm = hash_string.split(":")[0]
return algorithm.lower()
def hash_matches(fname, known_hash, strict=False, source=None):
"""
Check if the hash of a file matches a known hash.
If the *known_hash* is None, will always return True.
Coverts hashes to lowercase before comparison to avoid system specific
mismatches between hashes in the registry and computed hashes.
Parameters
----------
fname : str or PathLike
The path to the file.
known_hash : str
The known hash. Optionally, prepend ``alg:`` to the hash to specify the
hashing algorithm. Default is SHA256.
strict : bool
If True, will raise a :class:`ValueError` if the hash does not match
informing the user that the file may be corrupted.
source : str
The source of the downloaded file (name or URL, for example). Will be
used in the error message if *strict* is True. Has no other use other
than reporting to the user where the file came from in case of hash
mismatch. If None, will default to *fname*.
Returns
-------
is_same : bool
True if the hash matches, False otherwise.
"""
if known_hash is None:
return True
algorithm = hash_algorithm(known_hash)
new_hash = file_hash(fname, alg=algorithm)
matches = new_hash.lower() == known_hash.split(":")[-1].lower()
if strict and not matches:
if source is None:
source = str(fname)
raise ValueError(
f"{algorithm.upper()} hash of downloaded file ({source}) does not match"
f" the known hash: expected {known_hash} but got {new_hash}. Deleted"
" download for safety. The downloaded file may have been corrupted or"
" the known hash may be outdated."
)
return matches
def make_registry(directory, output, recursive=True):
"""
Make a registry of files and hashes for the given directory.
This is helpful if you have many files in your test dataset as it keeps you
from needing to manually update the registry.
Parameters
----------
directory : str
Directory of the test data to put in the registry. All file names in
the registry will be relative to this directory.
output : str
Name of the output registry file.
recursive : bool
If True, will recursively look for files in subdirectories of
*directory*.
"""
directory = Path(directory)
if recursive:
pattern = "**/*"
else:
pattern = "*"
files = sorted(
[
str(path.relative_to(directory))
for path in directory.glob(pattern)
if path.is_file()
]
)
hashes = [file_hash(str(directory / fname)) for fname in files]
with open(output, "w") as outfile:
for fname, fhash in zip(files, hashes):
# Only use Unix separators for the registry so that we don't go
# insane dealing with file paths.
outfile.write("{} {}\n".format(fname.replace("\\", "/"), fhash))
| [
2,
15069,
357,
66,
8,
2864,
383,
41778,
354,
34152,
13,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
347,
10305,
513,
12,
2601,
682,
13789,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
18,
12,
2601,
68... | 2.530635 | 2,693 |
from django.test import TestCase
from mock import patch
from classphoto import models as classphoto_api
from classphoto import db
from classphoto import emails
from signup import models as signup_api
@patch('signup.models.sequence_model.get_current_sequence_number', lambda: 1)
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
15290,
1330,
8529,
198,
198,
6738,
1398,
23074,
1330,
4981,
355,
1398,
23074,
62,
15042,
198,
6738,
1398,
23074,
1330,
20613,
198,
6738,
1398,
23074,
1330,
7237,
198,
198,
... | 3.580247 | 81 |
import tensorflow as tf
slim = tf.contrib.slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def inception_resnet_v2(inputs,
reuse=None,
scope='InceptionResnetV2'):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_5a_3x3')
end_points['MaxPool_5a_3x3'] = net
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[tower_conv, tower_conv1_1,
tower_conv2_2, tower_pool_1])
end_points['Mixed_5b'] = net
net = slim.repeat(net, 10, block35, scale=0.17)
# 17 x 17 x 1024
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[tower_conv, tower_conv1_2, tower_pool])
end_points['Mixed_6a'] = net
net = slim.repeat(net, 20, block17, scale=0.10)
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool])
end_points['Mixed_7a'] = net
net = slim.repeat(net, 9, block8, scale=0.20)
net = block8(net, activation_fn=None)
# GVH: Not sure if we want or need this convolution
# 8 x 8 x 2080
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
end_points['Conv2d_7b_1x1'] = net
# 8 x 8 x 1536
return net, end_points | [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
82,
2475,
796,
48700,
13,
3642,
822,
13,
82,
2475,
628,
198,
4299,
2512,
2327,
7,
3262,
11,
5046,
28,
16,
13,
15,
11,
14916,
62,
22184,
28,
27110,
13,
20471,
13,
260,
2290,
11,
8354,... | 1.843879 | 4,836 |
from fastai.vision.all import *
import re
df = pd.read_csv(Path('download/labels/train_labels.csv'), names=['name','x_p','y_p'], header=0)
imgs = DataBlock(blocks=(ImageBlock, PointBlock), get_items=get_image_files, get_y=get_focus_point, splitter=RandomSplitter(valid_pct=0.2, seed=42), batch_tfms=[*aug_transforms(size=(244, 244)), Normalize.from_stats(*imagenet_stats)], item_tfms=Resize(244),)
dls = imgs.dataloaders(Path('download/images/norm_images'), bs=16)
cnn_learner(dls, resnet18, y_range=(-1,1)).fine_tune(3, 4e-5).export(('./models/m.pkl')) | [
6738,
3049,
1872,
13,
10178,
13,
439,
1330,
1635,
198,
11748,
302,
198,
198,
7568,
796,
279,
67,
13,
961,
62,
40664,
7,
15235,
10786,
15002,
14,
23912,
1424,
14,
27432,
62,
23912,
1424,
13,
40664,
33809,
3891,
28,
17816,
3672,
41707,
... | 2.444934 | 227 |
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 8/1/14
###Function: scatter plot zOR metrics vs. epidemic duration at state level, using the state peak retrospective classification
## one plot per season per classification period vs. epidemic duration
## a single plot for all seasons for retrospective period vs. epidemic duration
## define epidemic duration as the number of weeks that falls between the point in the epidemic where cumulative incidence is 20% and 80% cumulative incidence (inclusive)
###Import data: Py_export/SDI_state_classifications_7st.csv, R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
###Command Line: python Supp_zOR_epiduration_state.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
fw = fxn.gp_fluweeks
wklab = fxn.gp_weeklabels
scol = fxn.gp_colors
epi_min_perc = 10 # cumulative incidence percentage that defines beginning of epidemic
epi_max_perc = 90 # cumulative incidence percentage that defines end of epidemic
### functions ###
### data files ###
# state zOR data
st_zORin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_7st.csv', 'r')
st_zORin.readline()
st_zOR = csv.reader(st_zORin, delimiter=',')
# state incidence files
st_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
st_incidin.readline()
stincid = csv.reader(st_incidin, delimiter=',')
st_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
st_popin.readline()
stpop = csv.reader(st_popin, delimiter=',')
### program ###
# import state classification data
# d_st_classif[(season, state abbr)] = (mean retro zOR, mean early zOR)
d_st_classif = fxn.readStateClassifFile(st_zOR)
# grab list of unique states in dataset
states = list(set([key[1] for key in d_st_classif]))
## state-level data ##
d_wk, d_zip3_st, d_incid_st, d_OR_st = fxn.week_OR_processing_state(stincid, stpop)
# dict_zOR_st[(week, state)] = zOR
d_zOR_st = fxn.week_zOR_processing_state(d_wk, d_OR_st)
# dict_incid53ls_st[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_st[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_st[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_st, d_OR53ls_st, d_zOR53ls_st = fxn.week_plotting_dicts_state(d_wk, d_incid_st, d_OR_st, d_zOR_st)
# plot values per season
for s in ps:
retrozOR = [d_st_classif[(s, st)][0] for st in states]
earlyzOR = [d_st_classif[(s, st)][1] for st in states]
epidur = [fxn.epidemic_duration(d_incid53ls_st[(s, st)], epi_min_perc, epi_max_perc) for st in states]
# mean retro zOR vs peak timing
plt.plot(epidur, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
for st, x, y in zip(states, epidur, retrozOR):
plt.annotate(st, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
plt.ylabel('Mean Retrospective zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks), Season %s' %(s), fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_retro_epidur_state_Season%s.png' %(s), transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# mean retro zOR vs peak timing
plt.plot(epidur, earlyzOR, marker = 'o', color = 'black', linestyle = 'None')
for st, x, y in zip(states, epidur, earlyzOR):
plt.annotate(st, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
plt.ylabel('Mean Early Warning zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks), Season %s' %(s), fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_early_epidur_state_Season%s.png' %(s), transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# mean retro zOR vs timing -- all seasons on a single plot
for s, col, lab in zip(ps, scol, sl):
retrozOR = [d_st_classif[(s, st)][0] for st in states]
epidur = [fxn.epidemic_duration(d_incid53ls_st[(s, st)], epi_min_perc, epi_max_perc) for st in states]
plt.plot(epidur, retrozOR, marker = 'o', color = col, label = lab, linestyle = 'None')
plt.ylabel('Mean Retrospective zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks)', fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.legend(loc='upper left')
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_retro_epidur_state_allseas.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show() | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
29113,
7804,
4242,
2235,
198,
21017,
37906,
11055,
198,
21017,
13838,
25,
10674,
5741,
198,
21017,
10430,
25,
807,
14,
16,
14,
1415,
198,
21017,
22203,
25,
41058,
7110,
1976,
1581,
20731,
... | 2.527791 | 2,069 |
import pandas as pd
from subprocess import call
# Generate raw data file
# call("./get_actions.sh") // Add this if you dont have the out raw data TODO: Replace this with the go script
# Do we add a count for each category a action has or ???
all_actions = open("data/actions_out/actions_raw.txt", "r")
no_ver_actions = open("data/actions_out/no_version_actions.txt", "w")
for i in all_actions:
tmp = i.split("@")
no_ver_actions.write(tmp[0] + "\n")
all_actions.close()
no_ver_actions.close()
# Data analysis
data = pd.read_csv(r'data/actions_out/no_version_actions.txt',
names=['action_name', 'usages'])
# print(data)
# Write value counts to csv
table = data['action_name'].value_counts()
table.to_csv('table.csv', index=True, header=False)
# Get unique actions names
unique_actions = set(data['action_name'].to_list())
out = open("data/actions_out/unique_actions_list.txt", "w")
for action in unique_actions:
out.write(action + "\n")
out.close()
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
850,
14681,
1330,
869,
198,
198,
2,
2980,
378,
8246,
1366,
2393,
198,
2,
869,
7,
1911,
14,
1136,
62,
4658,
13,
1477,
4943,
3373,
3060,
428,
611,
345,
17666,
423,
262,
503,
8246,
1366,
16... | 2.729282 | 362 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 ASLP@NPU Ke Wang
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import librosa
import numpy as np
import soundfile as sf
from scipy.io import wavfile
MAX_INT16 = np.iinfo(np.int16).max
EPSILON = np.finfo(np.float32).eps
MAX_EXP = np.log(np.finfo(np.float32).max - 10.0)
def get_window(window_size, window_type, square_root_window=True):
"""Return the window"""
window = {
'hamming': np.hamming(window_size),
'hanning': np.hanning(window_size),
}[window_type]
if square_root_window:
window = np.sqrt(window)
return window
def pre_emphasis(signal, coefficient=0.97):
"""Pre-emphasis original signal
y(n) = x(n) - a*x(n-1)
"""
return np.append(signal[0], signal[1:] - coefficient * signal[:-1])
def de_emphasis(signal, coefficient=0.97):
"""De-emphasis original signal
y(n) = x(n) + a*x(n-1)
"""
length = signal.shape[0]
for i in range(1, length):
signal[i] = signal[i] + coefficient * signal[i - 1]
return signal
def stft(signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
square_root_window=True):
"""Compute the Short Time Fourier Transform.
Args:
signal: input speech signal
sample_rate: waveform data sample frequency (Hz)
frame_length: frame length in milliseconds
frame_shift: frame shift in milliseconds
window_type: type of window
square_root_window: square root window
Return:
fft: (n/2)+1 dim complex STFT restults
"""
if preemphasis != 0.0:
signal = pre_emphasis(signal, preemphasis)
hop_length = int(sample_rate * frame_shift / 1000)
win_length = int(sample_rate * frame_length / 1000)
num_point = fft_point(win_length)
window = get_window(num_point, window_type, square_root_window)
feat = librosa.stft(signal, n_fft=num_point, hop_length=hop_length,
win_length=win_length, window=window)
return np.transpose(feat)
def get_phase(signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
square_root_window=True):
"""Compute phase imformation.
Args:
signal: input speech signal
sample_rate: waveform data sample frequency (Hz)
frame_length: frame length in milliseconds
frame_shift: frame shift in milliseconds
window_type: type of window
square_root_window: square root window
"""
feat = stft(signal, sample_rate, frame_length, frame_shift,
window_type, preemphasis, square_root_window)
phase = np.angle(feat)
return phase
def overlap_and_add(spectrum,
signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
use_log=False,
use_power=False,
square_root_window=True):
"""Convert frames to signal using overlap-and-add systhesis.
Args:
spectrum: magnitude spectrum
signal: wave signal to supply phase information
Return:
wav: synthesied output waveform
"""
if use_log:
spectrum = np.clip(spectrum, a_min=None, a_max=MAX_EXP)
spectrum = np.exp(spectrum)
if use_power:
spectrum = np.sqrt(spectrum)
phase = get_phase(signal, sample_rate, frame_length, frame_shift,
window_type, preemphasis, square_root_window)
spectrum = spectrum * np.exp(1.0j * phase)
if spectrum.shape != phase.shape:
print(('Wave and Spectrum are not the same length, '
'phase.shape = {}, spectrum.shape = {}').format(
spectrum.shape, phase.shape), 'error')
spectrum = np.transpose(spectrum)
hop_length = int(sample_rate * frame_shift / 1000)
win_length = int(sample_rate * frame_length / 1000)
num_point = fft_point(win_length)
window = get_window(num_point, window_type, square_root_window)
wav = librosa.istft(spectrum, hop_length=hop_length,
win_length=win_length, window=window)
if preemphasis != 0.0:
wav = de_emphasis(wav, preemphasis)
return wav
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
2864,
220,
7054,
19930,
31,
45,
5105,
220,
220,
220,
3873,
15233,
198,
198,
6738,
11593,
37443,
834,
1... | 2.253327 | 2,029 |
"""
vim: set enc=utf-8
Author : winterTTr
Mail : winterTTr@gmail.com
Desc : Tools for Operation on Win32 Environment variables
Module : win32export.py
"""
import win32gui
import win32con
import win32api
| [
37811,
198,
31124,
25,
900,
2207,
28,
40477,
12,
23,
198,
198,
13838,
1058,
220,
7374,
51,
2898,
198,
25804,
220,
220,
1058,
220,
7374,
51,
2898,
31,
14816,
13,
785,
198,
24564,
220,
220,
1058,
220,
20003,
329,
14680,
319,
7178,
262... | 2.917808 | 73 |
from execpeewee.builder import PeeweeModel
from execpeewee.handler import ExecPeewee
from execpeewee.mapping import PeeweeFields
__all__ = [
'PeeweeModel',
'ExecPeewee',
'PeeweeFields'
]
| [
6738,
2452,
431,
413,
1453,
13,
38272,
1330,
2631,
413,
1453,
17633,
198,
6738,
2452,
431,
413,
1453,
13,
30281,
1330,
8393,
6435,
413,
1453,
198,
6738,
2452,
431,
413,
1453,
13,
76,
5912,
1330,
2631,
413,
1453,
15878,
82,
628,
198,
... | 2.481481 | 81 |
f = lambda a, b, *c, d: None # default arg
#f = lambda a, b=1, *c, d: None # default arg for lambda not implemented
| [
69,
796,
37456,
257,
11,
275,
11,
1635,
66,
11,
288,
25,
6045,
1303,
4277,
1822,
198,
2,
69,
796,
37456,
257,
11,
275,
28,
16,
11,
1635,
66,
11,
288,
25,
6045,
1303,
4277,
1822,
329,
37456,
407,
9177,
198
] | 2.829268 | 41 |
# Generated by Django 2.2.24 on 2021-09-13 15:58
import colorfield.fields
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1731,
319,
33448,
12,
2931,
12,
1485,
1315,
25,
3365,
198,
198,
11748,
3124,
3245,
13,
25747,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 3.027778 | 36 |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, BooleanField, SubmitField
from wtforms.fields.html5 import DateField
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms.validators import DataRequired, ValidationError, EqualTo, Email, Length
from app.models import User | [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11,
30275,
15878,
11,
9683,
15878,
11,
41146,
15878,
11,
39900,
15878,
198,
6738,
266,
83,
23914,
13,
25747,
13,
6494,
20,
1330,
7536,
15878,
... | 3.840909 | 88 |
import argparse
import os
import webbrowser
from Option import Option
from functions.Clock import Clock
from functions.shut import shut
from functions.Ipconfig import Showip
from functions.CryptoSystem import *
from functions.Nslookup import Nslookup
from functions.PortScanner import Scanport
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
3992,
40259,
198,
6738,
16018,
1330,
16018,
198,
6738,
5499,
13,
44758,
1330,
21328,
198,
6738,
5499,
13,
49625,
1330,
4423,
198,
6738,
5499,
13,
40,
79,
11250,
1330,
5438,
541,
198,
67... | 3.752809 | 89 |
from django import template
from django.contrib.auth import get_permission_codename
from django.utils.translation import gettext_lazy as _
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
525,
3411,
62,
19815,
12453,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
42625,
... | 3.350649 | 77 |
DOMAIN = "https://discoin.zws.im"
VERSION = "4.0" | [
39170,
29833,
796,
366,
5450,
1378,
6381,
3630,
13,
89,
18504,
13,
320,
1,
198,
43717,
796,
366,
19,
13,
15,
1
] | 2.227273 | 22 |