content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
import scanner
# ------------------------------------------------------------------------------
# Classes for holding source code entities
# ------------------------------------------------------------------------------
# Factories for module and function filters
# ------------------------------------------------------------------------------
# Class for holding and and querying source code maps
# ------------------------------------------------------------------------------
# State machine to build a code map from scanned source
# ------------------------------------------------------------------------------
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628,
198,
11748,
27474,
628,
198,
2,
16529,
26171,
198,
2,
38884,
329,
4769,
2723,
2438,
12066,
628,
628,
198,
2,
16529,
26171,
198,
2,
19020,
1749,
329,
8265,
290,
2163,
16628,
628,
... | 6.805825 | 103 |
'''
Instructions:
1. Make sure you have Flask, Flask-Rauth, and SQLAlchemy installed.
$ pip install Flask Flask-Rauth SQLAlchemy
2. Open a Python shell in this directory and execute the following:
$ python
>>> from tweet import init_db
>>> init_db()
>>> exit()
This will initialize the SQLite database.
3. Start the application.
$ python tweet.py
4. Navigate your web browser to where this app is being served (localhost,
by default).
'''
from flask import Flask, request, redirect, url_for, session, flash, g, render_template
from flask.ext.rauth import RauthOAuth1
from sqlalchemy import create_engine, Column, Integer, String, Text
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# setup flask
app = Flask(__name__)
# you can specify the consumer key and consumer secret in the application,
# like this:
app.config.update(
TWITTER_CONSUMER_KEY='your_consumer_key',
TWITTER_CONSUMER_SECRET='your_consumer_secret',
SECRET_KEY='just a secret key, to confound the bad guys',
DEBUG = True
)
# setup the twitter endpoint
twitter = RauthOAuth1(
name='twitter',
base_url='https://api.twitter.com/1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize'
)
# this call simply initializes default an empty consumer key and secret in the app
# config if none exist.
# I've included it to match the "look" of Flask extensions
twitter.init_app(app)
# setup sqlalchemy
engine = create_engine('sqlite:////tmp/tweet.db')
db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
@app.before_request
@app.after_request
@twitter.tokengetter
def get_twitter_token():
'''
This is used by the API to look for the auth token and secret that are used
for Twitter API calls. If you don't want to store this in the database,
consider putting it into the session instead.
Since the Twitter API is OAuth 1.0a, the `tokengetter` must return a
2-tuple: (oauth_token, oauth_secret).
'''
user = g.user
if user is not None:
return user.oauth_token, user.oauth_secret
@app.route('/')
@app.route('/tweet', methods=['POST'])
def tweet():
'''
Calls the remote twitter API to create a new status update.
'''
if g.user is None:
return redirect(url_for('login', next=request.url))
status = request.form['tweet']
if not status:
return redirect(url_for('index'))
resp = twitter.post('statuses/update.json', data={
'status': status
})
if resp.status == 403:
flash('Your tweet was too long.')
elif resp.status == 401:
flash('Authorization error with Twitter.')
else:
flash('Successfully tweeted your tweet (ID: #%s)' % resp.content['id'])
return redirect(url_for('index'))
@app.route('/login')
def login():
'''
Calling into `authorize` will cause the OAuth 1.0a machinery to kick
in. If all has worked out as expected or if the user denied access to
his/her information, the remote application will redirect back to the callback URL
provided.
Int our case, the 'authorized/' route handles the interaction after the redirect.
'''
return twitter.authorize(callback=url_for('authorized',
_external=True,
next=request.args.get('next') or request.referrer or None))
@app.route('/logout')
@app.route('/authorized')
@twitter.authorized_handler()
def authorized(resp, oauth_token):
'''
Called after authorization. After this function finished handling,
the tokengetter from above is used to retrieve the 2-tuple containing the
oauth_token and oauth_token_secret.
Because reauthorization often changes any previous
oauth_token/oauth_token_secret values, then we must update them in the
database.
If the application redirected back after denying, the `resp` passed
to the function will be `None`. Unfortunately, OAuth 1.0a (the version
that Twitter, LinkedIn, etc use) does not specify exactly what should
happen when the user denies access. In the case of Twitter, a query
parameter `denied=(some hash)` is appended to the redirect URL.
'''
next_url = request.args.get('next') or url_for('index')
# check for the Twitter-specific "access_denied" indicator
if resp is None and 'denied' in request.args:
flash(u'You denied the request to sign in.')
return redirect(next_url)
# pull out the nicely parsed response content.
content = resp.content
user = User.query.filter_by(name=content['screen_name']).first()
# this if the first time signing in for this user
if user is None:
user = User(content['screen_name'])
db_session.add(user)
# we now update the oauth_token and oauth_token_secret
# this involves destructuring the 2-tuple that is passed back from the
# Twitter API, so it can be easily stored in the SQL database
user.oauth_token = oauth_token[0]
user.oauth_secret = oauth_token[1]
db_session.commit()
session['user_id'] = user.id
flash('You were signed in')
return redirect(next_url)
if __name__ == '__main__':
app.run()
| [
7061,
6,
198,
43993,
507,
25,
198,
198,
16,
13,
6889,
1654,
345,
423,
46947,
11,
46947,
12,
49,
18439,
11,
290,
16363,
2348,
26599,
6589,
13,
628,
220,
220,
220,
220,
220,
220,
720,
7347,
2721,
46947,
46947,
12,
49,
18439,
16363,
... | 2.942795 | 1,853 |
# -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller as dpController
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
288,
79,
62,
45910,
4533,
13,
18392,
13,
36500,
1330,
22741,
355,
288,
79,
22130,
628
] | 2.9375 | 32 |
import typing
from abc import ABC, abstractmethod
| [
11748,
19720,
198,
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
628
] | 4.25 | 12 |
import os
from collections import namedtuple
from hyperopt import fmin, tpe, Trials, space_eval
import numpy as np
import pandas as pd
import pytest
import requests
import yaml
from crosspredict.crossval import \
CrossLightgbmModel, CrossXgboostModel, CrossCatboostModel
from crosspredict.iterator import Iterator
from crosspredict.report_binary import ReportBinary
pd.set_option('display.max_columns', 999)
pd.set_option('display.max_rows', 999)
PARAMETERS_FPATH = 'tests/parameters.yml'
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.mark.slow
| [
11748,
28686,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
6738,
8718,
8738,
1330,
277,
1084,
11,
256,
431,
11,
28945,
11,
2272,
62,
18206,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12... | 2.955285 | 246 |
# -*- coding: utf-8 -*-
import os
# For different service
SITE_NAME = 'Dentimonial'
TRACKING_HASHTAG = '#dentimonial'
TWEET_ACTION_NAME = 'Send'
SERVICE_NAME = 'Identi.ca'
SERVICE_URI = 'http://identi.ca/'
FOLLOWERS_NAME = 'Subscribers'
FOLLOWED_NAME = 'Subscribed'
FOLLOW_NAME = 'Subscribe to'
TWEET_NAME = 'Notice'
# Twitter Account
TWITTER_ID = ''
TWITTER_PW = ''
# Switches
DEBUG = True
# UI
MAIN_CSS_REV = '0'
MAIN_JS_REV = '0'
# APIs
TWITTER_USERS_SHOW_URI = 'https://identi.ca/api/users/show.json?screen_name=%s'
TWITTER_SEARCH_BASE_URI = 'https://identi.ca/api/search.json'
TWITTER_SHOW_URI = 'https://identi.ca/api/friendships/show.json?source_screen_name=%s&target_screen_name=%s'
# Tasks
TASK_GET_TWIMONIAL_INTERVAL = 300
TASK_PROCESS_TQI_INTERVAL = 300
# Rate limit
RATE_AGREE_DURATION = 3600
RATE_AGREE_MASS = 5
RATE_AGREE_MASS_DURATION = 60
# Cache time
CACHE_TIME_HOMEPAGE = 300
CACHE_TIME_USERPAGE = 300
CACHE_TIME_USERLISTPAGE = 300
CACHE_TIME_LISTPAGE = 300
CACHE_TIME_USERFEED_TOP = 300
# Check Profile Image
CHECK_PROFILE_IMAGE_INTERVAL = 86400 * 7
# Under development server?
DEV = os.environ['SERVER_SOFTWARE'].startswith('Development')
# Base URI
if DEV:
BASE_URI = 'http://localhost:8080/'
BASE_SECURE_URI = BASE_URI
else:
BASE_URI = 'http://%s.appspot.com/' % os.environ['APPLICATION_ID']
BASE_SECURE_URI = 'https://%s.appspot.com/' % os.environ['APPLICATION_ID']
BEFORE_HEAD_END = ''
BEFORE_BODY_END = ''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
628,
198,
2,
1114,
1180,
2139,
198,
50,
12709,
62,
20608,
796,
705,
35,
298,
20473,
498,
6,
198,
5446,
8120,
2751,
62,
39,
1921,
6535,
4760,
796,
... | 2.306349 | 630 |
version_info = (2, 0, 5)
version = '.'.join(str(c) for c in version_info) | [
9641,
62,
10951,
796,
357,
17,
11,
657,
11,
642,
8,
198,
9641,
796,
705,
2637,
13,
22179,
7,
2536,
7,
66,
8,
329,
269,
287,
2196,
62,
10951,
8
] | 2.433333 | 30 |
from itertools import chain
from dmutils.email.helpers import get_email_addresses, hash_string
from dmutils.env_helpers import get_web_url_from_stage
| [
6738,
340,
861,
10141,
1330,
6333,
198,
198,
6738,
288,
21973,
4487,
13,
12888,
13,
16794,
364,
1330,
651,
62,
12888,
62,
2860,
16746,
11,
12234,
62,
8841,
198,
198,
6738,
288,
21973,
4487,
13,
24330,
62,
16794,
364,
1330,
651,
62,
... | 3.039216 | 51 |
#
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test for QUANTIZED_LSTM op.
import copy
model = Model()
batch_size = 2
input_size = 5
num_units = 4
output_size = 3
InputType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, input_size], 0.0078125, 0)
input = Input("input", InputType)
InputWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, input_size], 0.00784314, 0)
input_to_input_weights = Input("input_to_input_weights", InputWeightsType)
input_to_forget_weights = Input("input_to_forget_weights", InputWeightsType)
input_to_cell_weights = Input("input_to_cell_weights", InputWeightsType)
input_to_output_weights = Input("input_to_output_weights", InputWeightsType)
RecurrentWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, output_size], 0.00784314, 0)
recurrent_to_input_weights = Input("recurrent_to_input_weights", RecurrentWeightsType)
recurrent_to_forget_weights = Input("recurrent_to_forget_weights", RecurrentWeightsType)
recurrent_to_cell_weights = Input("recurrent_to_cell_weights", RecurrentWeightsType)
recurrent_to_output_weights = Input("recurrent_to_output_weights", RecurrentWeightsType)
CellWeightsType = ("TENSOR_QUANT16_SYMM", [num_units], 1.0, 0)
cell_to_input_weights = Input("cell_to_input_weights", CellWeightsType)
cell_to_forget_weights = Input("cell_to_forget_weights", CellWeightsType)
cell_to_output_weights = Input("cell_to_output_weights", CellWeightsType)
# The bias scale value here is not used.
BiasType = ("TENSOR_INT32", [num_units], 0.0, 0)
input_gate_bias = Input("input_gate_bias", BiasType)
forget_gate_bias = Input("forget_gate_bias", BiasType)
cell_gate_bias = Input("cell_gate_bias", BiasType)
output_gate_bias = Input("output_gate_bias", BiasType)
projection_weights = Input("projection_weights",
("TENSOR_QUANT8_SYMM", [output_size, num_units], 0.00392157, 0))
projection_bias = Input("projection_bias", ("TENSOR_INT32", [output_size]))
OutputStateType = ("TENSOR_QUANT8_ASYMM_SIGNED", [batch_size, output_size], 3.05176e-05, 0)
CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0)
output_state_in = Input("output_state_in", OutputStateType)
cell_state_in = Input("cell_state_in", CellStateType)
LayerNormType = ("TENSOR_QUANT16_SYMM", [num_units], 3.05182e-05, 0)
input_layer_norm_weights = Input("input_layer_norm_weights", LayerNormType)
forget_layer_norm_weights = Input("forget_layer_norm_weights", LayerNormType)
cell_layer_norm_weights = Input("cell_layer_norm_weights", LayerNormType)
output_layer_norm_weights = Input("output_layer_norm_weights", LayerNormType)
cell_clip = Float32Scalar("cell_clip", 0.)
projection_clip = Float32Scalar("projection_clip", 0.)
input_intermediate_scale = Float32Scalar("input_intermediate_scale", 0.007059)
forget_intermediate_scale = Float32Scalar("forget_intermediate_scale", 0.007812)
cell_intermediate_scale = Float32Scalar("cell_intermediate_scale", 0.007059)
output_intermediate_scale = Float32Scalar("output_intermediate_scale", 0.007812)
hidden_state_zero_point = Int32Scalar("hidden_state_zero_point", 0)
hidden_state_scale = Float32Scalar("hidden_state_scale", 0.007)
output_state_out = Output("output_state_out", OutputStateType)
cell_state_out = Output("cell_state_out", CellStateType)
output = Output("output", OutputStateType)
model = model.Operation(
"QUANTIZED_LSTM", input, input_to_input_weights, input_to_forget_weights,
input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
recurrent_to_forget_weights, recurrent_to_cell_weights,
recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
output_gate_bias, projection_weights, projection_bias, output_state_in,
cell_state_in, input_layer_norm_weights, forget_layer_norm_weights,
cell_layer_norm_weights, output_layer_norm_weights, cell_clip, projection_clip,
input_intermediate_scale, forget_intermediate_scale, cell_intermediate_scale,
output_intermediate_scale, hidden_state_zero_point, hidden_state_scale).To([output_state_out,
cell_state_out, output])
# Example 1. Layer Norm, Projection.
input0 = {
input_to_input_weights: [
64, 77, 89, -102, -115, 13, 25, 38, -51, 64, -102, 89, -77, 64, -51, -64, -51, -38, -25, -13
],
input_to_forget_weights: [
-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64
],
input_to_cell_weights: [
-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77
],
input_to_output_weights: [
-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51
],
input_gate_bias: [644245, 3221226, 4724464, 8160438],
forget_gate_bias: [2147484, -6442451, -4294968, 2147484],
cell_gate_bias: [-1073742, 15461883, 5368709, 1717987],
output_gate_bias: [1073742, -214748, 4294968, 2147484],
recurrent_to_input_weights: [
-25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77
],
recurrent_to_forget_weights: [
-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25
],
recurrent_to_cell_weights: [
-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25
],
recurrent_to_output_weights: [
38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25
],
projection_weights: [
-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51
],
projection_bias: [ 0 for _ in range(output_size) ],
input_layer_norm_weights: [3277, 6553, 9830, 16384],
forget_layer_norm_weights: [6553, 6553, 13107, 9830],
cell_layer_norm_weights: [22937, 6553, 9830, 26214],
output_layer_norm_weights: [19660, 6553, 6553, 16384],
output_state_in: [ 0 for _ in range(batch_size * output_size) ],
cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
cell_to_input_weights: [],
cell_to_forget_weights: [],
cell_to_output_weights: [],
}
test_input = [90, 102, 13, 26, 38, 102, 13, 26, 51, 64]
golden_output = [
127, 127, -108, -67, 127, 127
]
output0 = {
output_state_out: golden_output,
cell_state_out: [-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939],
output: golden_output,
}
input0[input] = test_input
Example((input0, output0))
# Example 2. CIFG, Layer Norm, Projection.
input0 = {
input_to_input_weights: [],
input_to_forget_weights: [
-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64
],
input_to_cell_weights: [
-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77
],
input_to_output_weights: [
-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51
],
input_gate_bias: [],
forget_gate_bias: [2147484, -6442451, -4294968, 2147484],
cell_gate_bias: [-1073742, 15461883, 5368709, 1717987],
output_gate_bias: [1073742, -214748, 4294968, 2147484],
recurrent_to_input_weights: [],
recurrent_to_forget_weights: [
-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25
],
recurrent_to_cell_weights: [
-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25
],
recurrent_to_output_weights: [
38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25
],
projection_weights: [
-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51
],
projection_bias: [ 0 for _ in range(output_size) ],
input_layer_norm_weights: [],
forget_layer_norm_weights: [6553, 6553, 13107, 9830],
cell_layer_norm_weights: [22937, 6553, 9830, 26214],
output_layer_norm_weights: [19660, 6553, 6553, 16384],
output_state_in: [ 0 for _ in range(batch_size * output_size) ],
cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
cell_to_input_weights: [],
cell_to_forget_weights: [],
cell_to_output_weights: [],
}
test_input = [90, 102, 13, 26, 38, 102, 13, 26, 51, 64]
golden_output = [
127, 127, 127, -128, 127, 127
]
output0 = {
output_state_out: golden_output,
cell_state_out: [-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149],
output: golden_output,
}
input0[input] = test_input
Example((input0, output0))
| [
2,
198,
2,
15069,
357,
34,
8,
12131,
383,
5565,
4946,
8090,
4935,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.401841 | 3,693 |
frase = str(input('Digite uma frase: ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto) -1, -1, -1):
inverso += junto[letra]
if inverso == junto:
print('Temos um palíndromo')
else:
print('A frase digitada não é um palíndromo!')
#a debaixo peguei no youtube
frase = str(input("Qual a frase? ").upper().replace(" ", ""))
if frase == frase[::-1]:
print("A frase é um palíndromo")
else:
print("A frase não é um palíndromo") | [
8310,
589,
796,
965,
7,
15414,
10786,
19511,
578,
334,
2611,
1216,
589,
25,
705,
29720,
36311,
22446,
45828,
3419,
198,
18596,
615,
8847,
796,
1216,
589,
13,
35312,
3419,
198,
29741,
1462,
796,
705,
4458,
22179,
7,
18596,
615,
8847,
8... | 2.322581 | 217 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Precompute DCASE2021 Task 2 Dataset fixed representations (logSTFT, logMel)
"""
import os
from pathlib import Path
import json
#
from omegaconf import OmegaConf
import numpy as np
#
from d2021umaps.utils import IncrementalHDF5
from d2021umaps.logging import ColorLogger, make_timestamp
from d2021umaps.features import wavpath_to_mel, wavpath_to_stft
from d2021umaps.data import DCASE2021t2Frames
# ##############################################################################
# # GLOBALS
# ##############################################################################
CONF = OmegaConf.create()
CONF.ROOT_PATH = None # must be given by user!
#
CONF.WAV_NORM = "none"
CONF.WAV_SR = 16000 # WAVs will be resampled to this when loaded
CONF.STFT_WINSIZE = 1024 # powers of 2 ideally
CONF.STFT_HOPSIZE = 512
CONF.NUM_MELS = 128
CONF.OUT_DIR = "precomputed_features"
log_ts = make_timestamp(timezone="Europe/London", with_tz_output=False)
CONF.LOG_OUTPATH = os.path.join("logs", "{}_[{}].log".format(log_ts, __file__))
cli_conf = OmegaConf.from_cli()
CONF = OmegaConf.merge(CONF, cli_conf)
assert CONF.ROOT_PATH is not None, \
"Please provide a ROOT_PATH=... containing the DCASE dev and eval folders"
CONF.ROOT_PATH = str(Path(CONF.ROOT_PATH).resolve()) # in case of softlinks
# these variables may depend on CLI input so we set them at the end
STFT_FREQBINS = int(CONF.STFT_WINSIZE / 2 + 1)
DEV_PATH = os.path.join(CONF.ROOT_PATH, "dev")
EVAL_PATH = os.path.join(CONF.ROOT_PATH, "eval")
STFT_OUTPATH_TRAIN = os.path.join(
CONF.OUT_DIR,
f"dcase2021_t2_train_wavnorm={CONF.WAV_NORM}_stft_win{CONF.STFT_WINSIZE}_" +
f"hop{CONF.STFT_HOPSIZE}.h5")
STFT_OUTPATH_CV = os.path.join(
CONF.OUT_DIR,
f"dcase2021_t2_cv_wavnorm={CONF.WAV_NORM}_stft_win{CONF.STFT_WINSIZE}_" +
f"hop{CONF.STFT_HOPSIZE}.h5")
MEL_OUTPATH_TRAIN = os.path.join(
CONF.OUT_DIR,
f"dcase2021_t2_train_wavnorm={CONF.WAV_NORM}_mel_win{CONF.STFT_WINSIZE}_" +
f"hop{CONF.STFT_HOPSIZE}_m{CONF.NUM_MELS}.h5")
MEL_OUTPATH_CV = os.path.join(
CONF.OUT_DIR,
f"dcase2021_t2_cv_wavnorm={CONF.WAV_NORM}_mel_win{CONF.STFT_WINSIZE}_" +
f"hop{CONF.STFT_HOPSIZE}_m{CONF.NUM_MELS}.h5")
# ##############################################################################
# # MAIN ROUTINE
# ##############################################################################
LOGGER = ColorLogger(__file__, CONF.LOG_OUTPATH, filemode="w")
LOGGER.info(f"\n\n\nSTARTED SCRIPT: {__file__}")
LOGGER.info(OmegaConf.to_yaml(CONF))
def save_stft_dataset(out_path, df_dataset, in_db=True, root_path=None):
"""
"""
ds_len = len(df_dataset)
with IncrementalHDF5(out_path, STFT_FREQBINS, np.float32) as ihdf5:
LOGGER.info(f"Writing to {out_path}")
for i, (_, row) in enumerate(df_dataset.iterrows(), 1):
arr = wavpath_to_stft(row["path"], CONF.WAV_SR,
wav_norm=CONF.WAV_NORM,
n_fft=CONF.STFT_WINSIZE,
hop_length=CONF.STFT_HOPSIZE,
pad_mode="constant", in_decibels=in_db,
logger=LOGGER)
#
rowp = Path(row["path"])
metadata = row.to_dict()
if root_path is not None:
metadata["path"] = str(rowp.relative_to(root_path))
else:
metadata["path"] = rowp.name
if i%1000 == 0:
LOGGER.info(f"[{i}/{ds_len}] stft_dataset: {metadata}")
ihdf5.append(arr, json.dumps(metadata))
# check that file is indeed storing the exact array
_, arr_w = arr.shape
assert (arr == ihdf5.data_ds[:, -arr_w:]).all(), \
"Should never happen"
LOGGER.info(f"Finished writing to {out_path}")
def save_mel_dataset(out_path, df_dataset, in_db=True, root_path=None):
"""
"""
ds_len = len(df_dataset)
with IncrementalHDF5(out_path, CONF.NUM_MELS, np.float32) as ihdf5:
LOGGER.info(f"Writing to {out_path}")
for i, (_, row) in enumerate(df_dataset.iterrows(), 1):
arr = wavpath_to_mel(
row["path"], CONF.WAV_SR, wav_norm=CONF.WAV_NORM,
n_mels=CONF.NUM_MELS,
n_fft=CONF.STFT_WINSIZE, hop_length=CONF.STFT_HOPSIZE,
pad_mode="constant", in_decibels=in_db, logger=LOGGER)
#
rowp = Path(row["path"])
metadata = row.to_dict()
if root_path is not None:
metadata["path"] = str(rowp.relative_to(root_path))
else:
metadata["path"] = rowp.name
if i%1000 == 0:
LOGGER.info(f"[{i}/{ds_len}] mel_dataset: {metadata}")
ihdf5.append(arr, json.dumps(metadata))
# check that file is indeed storing the exact array
_, arr_w = arr.shape
assert (arr == ihdf5.data_ds[:, -arr_w:]).all(), \
"Should never happen"
LOGGER.info(f"Finished writing to {out_path}")
dcase_df = DCASE2021t2Frames(DEV_PATH, EVAL_PATH)
dcase_train = dcase_df.query_dev(filter_split=lambda x: x=="train")
dcase_cv = dcase_df.query_dev(filter_split=lambda x: x=="test")
#
save_mel_dataset(MEL_OUTPATH_CV, dcase_cv, root_path=CONF.ROOT_PATH)
save_stft_dataset(STFT_OUTPATH_CV, dcase_cv, root_path=CONF.ROOT_PATH)
# these are bigger
save_mel_dataset(MEL_OUTPATH_TRAIN, dcase_train, root_path=CONF.ROOT_PATH)
save_stft_dataset(STFT_OUTPATH_TRAIN, dcase_train, root_path=CONF.ROOT_PATH)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
628,
198,
37811,
198,
6719,
5589,
1133,
6257,
11159,
1238,
2481,
15941,
362,
16092,
292,
316,
5969,
24612,
357,
6404,
2257,
9792,... | 2.055394 | 2,744 |
import numpy as np
import matplotlib.pyplot as plt
from numberGenerator.chaos.cprng import CPRNG
from particleSwarmOptimization.pso import PSO
from particleSwarmOptimization.structure.particle import Particle
from particleSwarmOptimization.structure.chaoticParticle import ChaoticParticle
from neuralNetwork.feedForwardNeuralNetwork import NeuralNetwork
from neuralNetwork.structure.layer import Layer
np.set_printoptions(suppress=True)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
1271,
8645,
1352,
13,
354,
7495,
13,
66,
1050,
782,
1330,
42920,
10503,
198,
6738,
18758,
10462,
1670,
27871,
320,
1634,
13,
... | 3.577236 | 123 |
"""Notification channels for django-notifs."""
from json import dumps
import pika
from notifications.channels import BaseNotificationChannel
class BroadCastWebSocketChannel(BaseNotificationChannel):
"""Fanout notification for RabbitMQ."""
def _connect(self):
"""Connect to the RabbitMQ server."""
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost')
)
channel = connection.channel()
return connection, channel
def construct_message(self):
"""Construct the message to be sent."""
extra_data = self.notification_kwargs['extra_data']
return dumps(extra_data['message'])
def notify(self, message):
"""put the message of the RabbitMQ queue."""
connection, channel = self._connect()
uri = self.notification_kwargs['extra_data']['uri']
channel.exchange_declare(exchange=uri, exchange_type='fanout')
channel.basic_publish(exchange=uri, routing_key='', body=message)
connection.close()
| [
37811,
3673,
2649,
9619,
329,
42625,
14208,
12,
1662,
361,
82,
526,
15931,
198,
198,
6738,
33918,
1330,
45514,
198,
198,
11748,
279,
9232,
198,
198,
6738,
19605,
13,
354,
8961,
1330,
7308,
3673,
2649,
29239,
628,
198,
4871,
9765,
19248,... | 2.809019 | 377 |
# Copyright 2019-2022 Simon Zigelli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.views.generic.base import ContextMixin, View
from console.models import UserPreferences
# Origin: https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib by Jamieson Becker,
# Public domain.
| [
2,
220,
15069,
13130,
12,
1238,
1828,
11288,
24992,
23225,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 3.56338 | 284 |
"""
@brief test log(time=21s)
"""
import os
import unittest
from pyquickhelper.pycode import ExtTestCase
from ensae_teaching_cs.td_1a.discours_politique import enumerate_speeches_from_elysees
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
31,
65,
3796,
220,
220,
220,
220,
220,
1332,
2604,
7,
2435,
28,
2481,
82,
8,
198,
37811,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
12972,
24209,
2978,
525,
13,
9078,
8189,
1330,
5683,
14402,
20448,
198,
67... | 2.520408 | 98 |
import warnings
from functools import wraps
def scraper_enabled(func):
"""
Decorator which ensures that a :class:`pyanimelist.Client.scraper` isn't used without it being explictly allowed
Example usage:
.. code-block:: py
from pyanimelist.util.web import scraper_enabled
@scraper_enabled
async def function(func):
return await func()
"""
@wraps(func)
return wrapped
| [
11748,
14601,
198,
6738,
1257,
310,
10141,
1330,
27521,
628,
198,
4299,
19320,
525,
62,
25616,
7,
20786,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4280,
273,
1352,
543,
19047,
326,
257,
1058,
4871,
25,
63,
79,
4121,
320,
... | 2.716981 | 159 |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import datetime
import logging
import sys
import uuid
from volttron.platform.vip.agent import Agent, Core, PubSub, compat
from volttron.platform.agent import utils
from volttron.platform.messaging import headers as headers_mod
from volttron.platform.messaging import topics, headers as headers_mod
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '0.1'
def DatetimeFromValue(ts):
''' Utility for dealing with time
'''
if isinstance(ts, int):
return datetime.utcfromtimestamp(ts)
elif isinstance(ts, float):
return datetime.utcfromtimestamp(ts)
elif not isinstance(ts, datetime):
raise ValueError('Unknown timestamp value')
return ts
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
try:
utils.vip_main(schedule_example, version=__version__)
except Exception as e:
print(e)
_log.exception('unhandled exception')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
22935,
90,
198,
2,
43907,
25,
900,
277,
12685,
28,
40477,
12,
23,
10117,
28,
29412,
1509,
28,
19,
40379,
28,
19,
39747,
28,
19,
2123,
25,
198,
2,
198,
2,
15069,
12131,
... | 3.376404 | 890 |
import asyncio
from datetime import datetime
from typing import Callable
import discord
from discord.ext import commands
| [
11748,
30351,
952,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
4889,
540,
198,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
628
] | 4.392857 | 28 |
import torch.nn as nn
import torch
from torchsummary import summary
import torchsummaryX
from lib.medzoo.BaseModelClass import BaseModel
import torch.nn.functional as F
class BaseAttentionBlock(nn.Module):
"""The basic implementation for self-attention block/non-local block."""
class BaseOCModule(nn.Module):
"""Base-OC"""
class UNet3D(BaseModel):
"""
Implementations based on the Unet3D paper: https://arxiv.org/abs/1606.06650
"""
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
198,
6738,
28034,
49736,
1330,
10638,
198,
11748,
28034,
49736,
55,
198,
6738,
9195,
13,
1150,
89,
2238,
13,
14881,
17633,
9487,
1330,
7308,
17633,
198,
11748,
28034,
13,
20471,
... | 3.04 | 150 |
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path, walk
here = path.abspath(path.dirname(__file__))
datadir = 'pyKriging/sampling_plans'
package_data = [ (d, [path.join(d, f) for f in files]) for d,folders,files in walk(datadir)]
data_files=[]
for i in package_data:
for j in i[1]:
data_files.append(j)
data_files = [path.relpath(file, datadir) for file in data_files]
setup(
name='pyKriging',
version='0.1.0',
zip_safe = False,
packages=find_packages(),
package_data={'pyKriging': ['sampling_plans/*']},
url='www.pykriging.com',
license='',
author='Chris Paulson',
author_email='capaulson@gmail.com',
description='A Kriging Toolbox for Python',
install_requires=['scipy', 'numpy', 'dill', 'matplotlib','inspyred'],
) | [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
1303,
16622,
4702,
900,
37623,
10141,
625,
1233,
26791,
198,
6738,
28686,
1330,
3108,
11,
2513,
628,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
834,... | 2.519637 | 331 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import json
from classify_r_equiv.const import get_seed_functions
from tqdm import tqdm
from sympy import *
import random
x, y = symbols("x y")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
6738,
36509,
62,
81,
62,
4853,
452,
13,
9979,
1330,
651,
62,
2... | 2.679012 | 81 |
#! /usr/bin/env python
# Source https://lorenzod8n.wordpress.com/2007/05/30/pygame-tutorial-3-mouse-events/
# Deal with mouse events.
import pygame
# Tracks the position of the mouse on our window.
# Draws lines that cut the mouse pointer’s coordinates.
# Draws lines that cut the mouse pointer’s coordinates flashins.
# Shows the coordinates when clicking on the screen.
# Main section
pygame.init()
#track_mouse_position()
#draw_lines_using_mouse_position()
#draw_lines_using_mouse_position_flashing()
mouse_button_event()
pygame.quit()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
8090,
3740,
1378,
31131,
27305,
375,
23,
77,
13,
40346,
13,
785,
14,
12726,
14,
2713,
14,
1270,
14,
9078,
6057,
12,
83,
44917,
12,
18,
12,
35888,
12,
31534,
14,
198,
2,... | 3.154286 | 175 |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 12:33:58 2018
@author: michaelek
"""
import os
import pandas as pd
from hilltoppy import web_service as ws
from hilltoppy.util import convert_site_names
from pyhydrotel import get_ts_data, get_sites_mtypes
from flownat import FlowNat
from pdsql import mssql
import yaml
import util
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_rows', 30)
run_time_start = pd.Timestamp.today()
######################################
### Parameters
base_dir = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(base_dir, 'parameters.yml')) as param:
param = yaml.safe_load(param)
to_date = run_time_start.floor('D')
from_date = (to_date - pd.DateOffset(days=3)).round('D')
try:
######################################
### Determine last generated unmod flow date
last_val1 = mssql.rd_sql(param['Output']['hydrotel_server'], 'hydrotel', stmt='select max(DT) from Samples where Point = {point}'.format(point=param['Output']['unmod_point'])).iloc[0][0]
if last_val1 is None:
last_val1 = pd.Timestamp('1900-01-01')
######################################
### Get data
## Detided data
tsdata = get_ts_data(param['Output']['hydrotel_server'], 'hydrotel', param['Input']['detided_mtype'], str(param['Input']['site']), str(last_val1), None, None)[1:]
## Determine the Wap usage ratios
fn1 = FlowNat(from_date=from_date, to_date=to_date, rec_data_code='RAW', input_sites=str(param['Input']['site']))
up_takes1 = fn1.upstream_takes()
up_takes2 = up_takes1[up_takes1.AllocatedRate > 0].copy()
up_takes2['AllocatedRateSum'] = up_takes2.groupby('Wap')['AllocatedRate'].transform('sum')
up_takes2['AllocatedRateRatio'] = up_takes2['AllocatedRate']/up_takes2['AllocatedRateSum']
wap_ratios = up_takes2[up_takes2.HydroFeature == 'Surface Water'].groupby('Wap')['AllocatedRateRatio'].sum()
## Pull out the usage data
# Hilltop
ht_sites = ws.site_list(param['Input']['hilltop_base_url'], param['Input']['hilltop_hts'])
ht_sites['Wap'] = convert_site_names(ht_sites.SiteName)
ht_sites1 = ht_sites[ht_sites['Wap'].isin(wap_ratios.index) & ~ht_sites['Wap'].isin(param['Input']['browns_rock_waps'])].copy()
ht_sites1.rename(columns={'SiteName': 'Site'}, inplace=True)
mtype_list = []
for site in ht_sites1.Site:
m1 = ws.measurement_list(param['Input']['hilltop_base_url'], param['Input']['hilltop_hts'], site)
mtype_list.append(m1)
mtypes = pd.concat(mtype_list).reset_index()
mtypes1 = mtypes[mtypes.To >= from_date]
mtypes2 = mtypes1[~mtypes1.Measurement.str.contains('regularity', case=False)].sort_values('To').drop_duplicates('Site', keep='last')
# Hydrotel
br_summ = get_sites_mtypes(param['Input']['hydrotel_server'], 'hydrotel', sites=param['Input']['browns_rock_site'], mtypes=param['Input']['browns_rock_mtype'])
######################################
### Run detide
det1 = dtl.detide(roll1, float(param['Input']['quantile'])).round(3).reset_index()
# det2 = dtl.plot.plot_detide(roll1, float(param['Input']['quantile']))
mtypes3 = pd.merge(ht_sites1, mtypes2.drop(['DataType', 'Units'], axis=1), on='Site')
takes1 = pd.merge(up_takes2[['RecordNumber', 'HydroFeature', 'AllocationBlock', 'Wap', 'FromDate', 'ToDate', 'FromMonth', 'ToMonth', 'AllocatedRate', 'AllocatedAnnualVolume', 'WaterUse', 'ConsentStatus']], mtypes3, on='Wap', how='left').sort_values('AllocatedRate', ascending=False)
takes1.to_csv(os.path.join(base_dir, 'waimak_consents_2019-07-24.csv'), index=False)
#####################################
### Clip data to last value in Hydrotel
last_val1 = mssql.rd_sql(param['Output']['hydrotel_server'], 'hydrotel', stmt='select max(DT) from Samples where Point = {point}'.format(point=param['Output']['new_point'])).iloc[0][0]
if isinstance(last_val1, pd.Timestamp):
det1 = det1[det1.DateTime > last_val1].copy()
#####################################
### Save to Hydrotel and log result
if not det1.empty:
det1['Point'] = param['Output']['new_point']
det1['Quality'] = param['Output']['quality_code']
det1.rename(columns={'DateTime': 'DT', 'de-tided': 'SampleValue'}, inplace=True)
mssql.to_mssql(det1, param['Output']['server'], param['Input']['database'], 'Samples')
util.log(run_time_start, from_date, det1.DT.max(), 'Hydrotel', 'Samples', 'pass', '{det} data points added to {mtype} (Point {point})'.format(det=len(det1), mtype=param['Input']['new_mtype'], point=param['Output']['new_point']))
else:
util.log(run_time_start, to_date, to_date, 'Hydrotel', 'Samples', 'pass', 'No data needed to be added')
except Exception as err:
err1 = err
print(err1)
util.log(run_time_start, from_date, to_date, 'Hydrotel', 'Samples', 'fail', str(err1))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
8621,
1542,
1105,
25,
2091,
25,
3365,
2864,
198,
198,
31,
9800,
25,
285,
488,
3609,
293,
74,
198,
37811,
198,
11748,
28686,
198,
11748,
1... | 2.492409 | 1,976 |
# 카드 짝 맞추기
"""
1. 현재 보드를 모두 순회하면서 카드의 종류를 모두 긁어 모은다.
2. 순회한 보드에 따라 permutations 한다.
3. 방문할 좌표들을 백트래킹한다. -> 좌표들을 저장하고 좌표의 인덱스만 백트래킹한다.
4. 방문해야할 모든 좌표 세트를 구하고 bfs를 실행한다.
"""
import copy
from collections import deque
from itertools import permutations
dy = [-1, 1, 0, 0]
dx = [0, 0, -1, 1]
INF = 987654321
if __name__ == "__main__":
board = [[3, 0, 0, 2], [0, 0, 1, 0], [0, 1, 0, 0], [2, 0, 0, 3]]
print(solution(board, 0, 1))
| [
2,
23821,
117,
112,
167,
241,
250,
23821,
100,
251,
31619,
100,
252,
168,
114,
242,
166,
116,
108,
198,
37811,
198,
16,
13,
220,
169,
246,
226,
168,
252,
105,
31619,
111,
112,
167,
241,
250,
167,
98,
120,
31619,
103,
101,
167,
2... | 1.047619 | 420 |
import numpy
def local_energy_hubbard_holstein_momentum(system, G, P, Lap, Ghalf=None):
r"""Calculate local energy of walker for the Hubbard-Hostein model.
Parameters
----------
system : :class:`HubbardHolstein`
System information for the HubbardHolstein model.
G : :class:`numpy.ndarray`
Walker's "Green's function"
Returns
-------
(E_L(phi), T, V): tuple
Local, kinetic and potential energies of given walker phi.
"""
# T = kinetic_lang_firsov(system.t, system.gamma_lf, P, system.nx, system.ny, system.ktwist)
Dp = numpy.array([numpy.exp(1j*system.gamma_lf*P[i]) for i in range(system.nbasis)])
T = numpy.zeros_like(system.T, dtype=numpy.complex128)
T[0] = numpy.diag(Dp).dot(system.T[0]).dot(numpy.diag(Dp.T.conj()))
T[1] = numpy.diag(Dp).dot(system.T[1]).dot(numpy.diag(Dp.T.conj()))
ke = numpy.sum(T[0] * G[0] + T[1] * G[1])
sqrttwomw = numpy.sqrt(2.0 * system.m * system.w0)
assert (system.gamma_lf * system.w0 == system.g * sqrttwomw)
Ueff = system.U + system.gamma_lf**2 * system.w0 - 2.0 * system.g * system.gamma_lf * sqrttwomw
if system.symmetric:
pe = -0.5*Ueff*(G[0].trace() + G[1].trace())
pe = Ueff * numpy.dot(G[0].diagonal(), G[1].diagonal())
pe_ph = - 0.5 * system.w0 ** 2 * system.m * numpy.sum(Lap)
ke_ph = 0.5 * numpy.sum(P*P) / system.m - 0.5 * system.w0 * system.nbasis
rho = G[0].diagonal() + G[1].diagonal()
e_eph = (system.gamma_lf**2 * system.w0 / 2.0 - system.g * system.gamma_lf * sqrttwomw) * numpy.sum(rho)
etot = ke + pe + pe_ph + ke_ph + e_eph
Eph = ke_ph + pe_ph
Eel = ke + pe
Eeb = e_eph
return (etot, ke+pe, ke_ph+pe_ph+e_eph)
def local_energy_hubbard_holstein(system, G, X, Lap, Ghalf=None):
r"""Calculate local energy of walker for the Hubbard-Hostein model.
Parameters
----------
system : :class:`HubbardHolstein`
System information for the HubbardHolstein model.
G : :class:`numpy.ndarray`
Walker's "Green's function"
X : :class:`numpy.ndarray`
Walker's phonon coordinate
Returns
-------
(E_L(phi), T, V): tuple
Local, kinetic and potential energies of given walker phi.
"""
ke = numpy.sum(system.T[0] * G[0] + system.T[1] * G[1])
if system.symmetric:
pe = -0.5*system.U*(G[0].trace() + G[1].trace())
pe = system.U * numpy.dot(G[0].diagonal(), G[1].diagonal())
pe_ph = 0.5 * system.w0 ** 2 * system.m * numpy.sum(X * X)
ke_ph = -0.5 * numpy.sum(Lap) / system.m - 0.5 * system.w0 * system.nbasis
rho = G[0].diagonal() + G[1].diagonal()
e_eph = - system.g * numpy.sqrt(system.m * system.w0 * 2.0) * numpy.dot(rho, X)
etot = ke + pe + pe_ph + ke_ph + e_eph
Eph = ke_ph + pe_ph
Eel = ke + pe
Eeb = e_eph
return (etot, ke+pe, ke_ph+pe_ph+e_eph)
def local_energy_hubbard(system, G, Ghalf=None):
r"""Calculate local energy of walker for the Hubbard model.
Parameters
----------
system : :class:`Hubbard`
System information for the Hubbard model.
G : :class:`numpy.ndarray`
Walker's "Green's function"
Returns
-------
(E_L(phi), T, V): tuple
Local, kinetic and potential energies of given walker phi.
"""
ke = numpy.sum(system.T[0] * G[0] + system.T[1] * G[1])
# Todo: Stupid
if system.symmetric:
pe = -0.5*system.U*(G[0].trace() + G[1].trace())
pe = system.U * numpy.dot(G[0].diagonal(), G[1].diagonal())
return (ke + pe, ke, pe)
def local_energy_hubbard_ghf(system, Gi, weights, denom):
"""Calculate local energy of GHF walker for the Hubbard model.
Parameters
----------
system : :class:`Hubbard`
System information for the Hubbard model.
Gi : :class:`numpy.ndarray`
Array of Walker's "Green's function"
denom : float
Overlap of trial wavefunction with walker.
Returns
-------
(E_L(phi), T, V): tuple
Local, kinetic and potential energies of given walker phi.
"""
ke = numpy.einsum('i,ikl,kl->', weights, Gi, system.Text) / denom
# numpy.diagonal returns a view so there should be no overhead in creating
# temporary arrays.
guu = numpy.diagonal(Gi[:,:system.nbasis,:system.nbasis], axis1=1, axis2=2)
gdd = numpy.diagonal(Gi[:,system.nbasis:,system.nbasis:], axis1=1, axis2=2)
gud = numpy.diagonal(Gi[:,system.nbasis:,:system.nbasis], axis1=1, axis2=2)
gdu = numpy.diagonal(Gi[:,:system.nbasis,system.nbasis:], axis1=1, axis2=2)
gdiag = guu*gdd - gud*gdu
pe = system.U * numpy.einsum('j,jk->', weights, gdiag) / denom
return (ke+pe, ke, pe)
def local_energy_hubbard_ghf_full(system, GAB, weights):
r"""Calculate local energy of GHF walker for the Hubbard model.
Parameters
----------
system : :class:`Hubbard`
System information for the Hubbard model.
GAB : :class:`numpy.ndarray`
Matrix of Green's functions for different SDs A and B.
weights : :class:`numpy.ndarray`
Components of overlap of trial wavefunction with walker.
Returns
-------
(E_L, T, V): tuple
Local, kinetic and potential energies of given walker phi.
"""
denom = numpy.sum(weights)
ke = numpy.einsum('ij,ijkl,kl->', weights, GAB, system.Text) / denom
# numpy.diagonal returns a view so there should be no overhead in creating
# temporary arrays.
guu = numpy.diagonal(GAB[:,:,:system.nbasis,:system.nbasis], axis1=2,
axis2=3)
gdd = numpy.diagonal(GAB[:,:,system.nbasis:,system.nbasis:], axis1=2,
axis2=3)
gud = numpy.diagonal(GAB[:,:,system.nbasis:,:system.nbasis], axis1=2,
axis2=3)
gdu = numpy.diagonal(GAB[:,:,:system.nbasis,system.nbasis:], axis1=2,
axis2=3)
gdiag = guu*gdd - gud*gdu
pe = system.U * numpy.einsum('ij,ijk->', weights, gdiag) / denom
return (ke+pe, ke, pe)
def local_energy_multi_det(system, Gi, weights):
"""Calculate local energy of GHF walker for the Hubbard model.
Parameters
----------
system : :class:`Hubbard`
System information for the Hubbard model.
Gi : :class:`numpy.ndarray`
Array of Walker's "Green's function"
weights : :class:`numpy.ndarray`
Components of overlap of trial wavefunction with walker.
Returns
-------
(E_L(phi), T, V): tuple
Local, kinetic and potential energies of given walker phi.
"""
denom = numpy.sum(weights)
ke = numpy.einsum('i,ikl,kl->', weights, Gi, system.Text) / denom
# numpy.diagonal returns a view so there should be no overhead in creating
# temporary arrays.
guu = numpy.diagonal(Gi[:,:,:system.nup], axis1=1,
axis2=2)
gdd = numpy.diagonal(Gi[:,:,system.nup:], axis1=1,
axis2=2)
pe = system.U * numpy.einsum('j,jk->', weights, guu*gdd) / denom
return (ke+pe, ke, pe)
def fock_hubbard(system, P):
"""Hubbard Fock Matrix
F_{ij} = T_{ij} + U(<niu>nid + <nid>niu)_{ij}
"""
niu = numpy.diag(P[0].diagonal())
nid = numpy.diag(P[1].diagonal())
return system.T + system.U*numpy.array([nid,niu])
| [
11748,
299,
32152,
198,
4299,
1957,
62,
22554,
62,
40140,
23024,
62,
3937,
5714,
62,
32542,
298,
388,
7,
10057,
11,
402,
11,
350,
11,
26944,
11,
11972,
1604,
28,
14202,
2599,
198,
220,
220,
220,
374,
37811,
9771,
3129,
378,
1957,
25... | 2.204655 | 3,308 |
# -*- coding: utf-8 -*-
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# 3p
import mock
from datadog_checks.http_check import HTTPCheck
from datadog_checks.utils.headers import headers as agent_headers
from .common import (
FAKE_CERT, CONFIG, CONFIG_HTTP_HEADERS, CONFIG_SSL_ONLY, CONFIG_EXPIRED_SSL, CONFIG_CUSTOM_NAME, CONFIG_DATA_METHOD,
CONFIG_HTTP_REDIRECTS, CONFIG_UNORMALIZED_INSTANCE_NAME, CONFIG_DONT_CHECK_EXP
)
def test_http_headers(http_check):
"""
Headers format.
"""
# Get just the headers from http_check._load_conf(...), which happens to be at index 10
headers = http_check._load_conf(CONFIG_HTTP_HEADERS['instances'][0])[10]
expected_headers = agent_headers({}).get('User-Agent')
assert headers["X-Auth-Token"] == "SOME-AUTH-TOKEN", headers
assert expected_headers == headers.get('User-Agent'), headers
def test_check(aggregator, http_check):
"""
Check coverage.
"""
# Run the check for all the instances in the config
for instance in CONFIG['instances']:
http_check.check(instance)
# HTTP connection error
connection_err_tags = ['url:https://thereisnosuchlink.com', 'instance:conn_error']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.CRITICAL, tags=connection_err_tags, count=1)
# Wrong HTTP response status code
status_code_err_tags = ['url:http://httpbin.org/404', 'instance:http_error_status_code']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.CRITICAL, tags=status_code_err_tags, count=1)
# HTTP response status code match
status_code_match_tags = ['url:http://httpbin.org/404', 'instance:status_code_match', 'foo:bar']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.OK, tags=status_code_match_tags, count=1)
# Content match & mismatching
content_match_tags = ['url:https://github.com', 'instance:cnt_match']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.OK, tags=content_match_tags, count=1)
content_mismatch_tags = ['url:https://github.com', 'instance:cnt_mismatch']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.CRITICAL, tags=content_mismatch_tags, count=1)
unicode_content_match_tags = ['url:https://ja.wikipedia.org/', 'instance:cnt_match_unicode']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.OK, tags=unicode_content_match_tags, count=1)
unicode_content_mismatch_tags = ['url:https://ja.wikipedia.org/', 'instance:cnt_mismatch_unicode']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.CRITICAL, tags=unicode_content_mismatch_tags,
count=1)
reverse_content_match_tags = ['url:https://github.com', 'instance:cnt_match_reverse']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.CRITICAL, tags=reverse_content_match_tags,
count=1)
reverse_content_mismatch_tags = ['url:https://github.com', 'instance:cnt_mismatch_reverse']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.OK, tags=reverse_content_mismatch_tags,
count=1)
unicode_reverse_content_match_tags = ['url:https://ja.wikipedia.org/', 'instance:cnt_match_unicode_reverse']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.CRITICAL,
tags=unicode_reverse_content_match_tags, count=1)
unicode_reverse_content_mismatch_tags = ['url:https://ja.wikipedia.org/', 'instance:cnt_mismatch_unicode_reverse']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.OK,
tags=unicode_reverse_content_mismatch_tags, count=1)
@mock.patch('ssl.SSLSocket.getpeercert', **{'return_value.raiseError.side_effect': Exception()})
@mock.patch('ssl.SSLSocket.getpeercert', return_value=FAKE_CERT)
def test_service_check_instance_name_normalization(aggregator, http_check):
"""
Service check `instance` tag value is normalized.
Note: necessary to avoid mismatch and backward incompatiblity.
"""
# Run the check for the one instance
http_check.check(CONFIG_UNORMALIZED_INSTANCE_NAME['instances'][0])
# Assess instance name normalization
normalized_tags = ['url:https://github.com', 'instance:need_to_be_normalized']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.OK, tags=normalized_tags, count=1)
aggregator.assert_service_check(HTTPCheck.SC_SSL_CERT, status=HTTPCheck.OK, tags=normalized_tags, count=1)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
357,
34,
8,
16092,
324,
519,
11,
3457,
13,
2864,
198,
2,
1439,
2489,
10395,
198,
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
357,
3826,
... | 2.519765 | 1,872 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import ast
import os
import yaml
from traits.api import Str, Button, List
from traitsui.api import HGroup, UItem, VGroup, Item
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.handler import Controller
from traitsui.table_column import ObjectColumn
from pychron.core.fits.filter_fit_selector import FilterFitSelector
from pychron.core.fits.fit import FilterFit
from pychron.core.helpers.filetools import add_extension, glob_list_directory
from pychron.core.helpers.iterfuncs import partition
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.ui.enum_editor import myEnumEditor
from pychron.core.ui.table_editor import myTableEditor
from pychron.core.yaml import yload
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.paths import paths
ATTRS = ['fit', 'error_type', 'name', 'filter_outliers', 'filter_iterations', 'filter_std_devs']
if __name__ == '__main__':
# build_directories(paths)
m = MeasurementFitsSelector()
# keys = ['Ar40', 'Ar39']
# detectors=['H1','AX']
# fits = [('linear', 'SEM', {}),
# ('linear', 'SEM', {})]
t = os.path.join(paths.fits_dir, 'test.yaml')
m.load(t)
a = MeasurementFitsSelectorView(model=m)
a.configure_traits()
# ============= EOF =============================================
| [
2,
38093,
25609,
855,
198,
2,
15069,
1946,
14757,
9847,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.442879 | 639 |
"""Top-level package for advertools."""
__author__ = """Elias Dabbas"""
__email__ = 'eliasdabbas@gmail.com'
__version__ = '0.9.0'
from advertools.ad_create import ad_create
from advertools.ad_from_string import ad_from_string
from advertools.emoji import emoji_search, emoji_df
from advertools.extract import *
from advertools.kw_generate import *
from advertools.regex import *
from advertools.sitemaps import sitemap_to_df
from advertools.stopwords import stopwords
from advertools.url_builders import url_utm_ga
from advertools.word_frequency import word_frequency
from advertools.word_tokenize import word_tokenize
from . import twitter
from . import youtube
from .serp import *
| [
198,
37811,
9126,
12,
5715,
5301,
329,
6728,
10141,
526,
15931,
198,
198,
834,
9800,
834,
796,
37227,
9527,
4448,
360,
6485,
292,
37811,
198,
834,
12888,
834,
796,
705,
417,
4448,
67,
6485,
292,
31,
14816,
13,
785,
6,
198,
834,
9641... | 3.282297 | 209 |
import argparse
import os
import sys
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
from . import exceptions, query, settings, utils
from .models import Base, Bulletin
from .version import __version__
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
7742,
1330,
35886,
27201,
198,
198,
6738,
764,
1... | 3.776119 | 67 |
from keras.datasets import cifar10
from autokeras.generator import DefaultClassifierGenerator
from autokeras.net_transformer import default_transform
from autokeras.preprocessor import OneHotEncoder
from autokeras.utils import ModelTrainer
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('Start Encoding')
encoder = OneHotEncoder()
encoder.fit(y_train)
y_train = encoder.transform(y_train)
y_test = encoder.transform(y_test)
print('Start Generating')
graphs = default_transform(DefaultClassifierGenerator(10, x_train.shape[1:]).generate())
keras_model = graphs[0].produce_model()
print('Start Training')
ModelTrainer(keras_model,
x_train,
y_train,
x_test,
y_test,
True).train_model(max_no_improvement_num=100, batch_size=128)
print(keras_model.evaluate(x_test, y_test, True))
| [
6738,
41927,
292,
13,
19608,
292,
1039,
1330,
269,
361,
283,
940,
198,
198,
6738,
1960,
11020,
292,
13,
8612,
1352,
1330,
15161,
9487,
7483,
8645,
1352,
198,
6738,
1960,
11020,
292,
13,
3262,
62,
7645,
16354,
1330,
4277,
62,
35636,
19... | 2.364532 | 406 |
from dotenv import load_dotenv
load_dotenv()
import os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
from trello import TrelloClient
boardTitle = os.getenv("BOARD_NAME")
listTitle = os.getenv("LIST_NAME")
| [
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
2220,
62,
26518,
24330,
3419,
198,
198,
11748,
28686,
198,
418,
13,
354,
15908,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
22305,
19... | 2.7 | 80 |
#!/usr/bin/env python 3
"""
MQTT client base class to Connect, Publish and Subscribe messages using Mosquitto broker
"""
__author__ = "Amjad B."
__license__ = "MIT"
__version__ = '1.0'
__status__ = "beta"
import time
import json
import ssl
import sys
import logging
import paho.mqtt.client as mqtt_client
logger = logging.getLogger(__name__)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
513,
198,
37811,
198,
49215,
15751,
5456,
2779,
1398,
284,
8113,
11,
8525,
1836,
290,
19808,
6218,
1262,
5826,
421,
37606,
20426,
220,
220,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
5... | 2.853659 | 123 |
import magma as m
from magma.testing import check_files_equal
import os
| [
11748,
2153,
2611,
355,
285,
198,
6738,
2153,
2611,
13,
33407,
1330,
2198,
62,
16624,
62,
40496,
198,
11748,
28686,
628
] | 3.47619 | 21 |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import time
import universe
seed = 91231
n_steps_pretraining = 3000
n_steps = 3500
steps_to_reward = 9
max_reward = n_steps / steps_to_reward
# uni = universe.Universe('grid_world', world='world0')
uni = universe.Universe('grid_world', world='2d_world0')
uni.show()
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_xlim([0, uni._env._shape[1]])
ax.set_ylim([0, uni._env._shape[0]])
ax_value = fig.add_axes([0.1, 0.1, .8, .8],
frameon=False, xticks=[], yticks=[])
ax_value.set_xlim([0, uni._env._shape[1]])
ax_value.set_ylim([0, uni._env._shape[0]])
plt.ion()
plt.show()
uni.plot_env(ax)
np.random.seed(seed)
for _ in range(n_steps_pretraining):
uni.step()
last_reward = 0.
uni.reset_agent_position()
uni.reset_agent_reward()
for _ in range(n_steps):
uni.step()
uni.plot_agent(ax)
if uni.total_agent_reward() != last_reward:
uni.plot_value(ax_value)
last_reward = uni.total_agent_reward()
plt.pause(0.010)
print('reward:', uni.total_agent_reward())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
198,
11748,
6881,
628,
198,
28826,
796,
860,
1065... | 2.111954 | 527 |
'''Aprimore o desafio 93 para que ele funcione com varios
jogadores, incluindo um sistema de visualização de detalhes
do aproveitamento de cada jogador'''
| [
7061,
6,
13680,
9401,
267,
748,
1878,
952,
10261,
31215,
8358,
9766,
25439,
7935,
401,
1401,
4267,
198,
73,
519,
324,
2850,
11,
13358,
84,
521,
78,
23781,
264,
396,
19687,
390,
5874,
23638,
16175,
28749,
390,
1062,
282,
956,
198,
4598... | 2.529412 | 68 |
#!/usr/bin/env python
from getpass import getpass
from pprint import pprint
from lxml import etree
# import xmltodict
from jnpr.junos import Device
# from jnpr.junos.op.ethport import EthPortTable
# from jnpr.junos.op.arp import ArpTable
# from jnpr.junos.op.routes import RouteTable
# from jnpr.junos.op.phyport import PhyPortTable
# from jnpr.junos.op.phyport import PhyPortStatsTable
# from jnpr.junos.utils.config import Config
'''
7. Use Juniper's PyEZ and direct RPC to retrieve the XML for
'show version' from the Juniper SRX.
Print out this returned XML as a string using 'etree.tostring()'.
Parse the returned XML to retrieve the model from the device.
Print this model number to the screen.
get-software-information
'''
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
651,
6603,
1330,
651,
6603,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
6738,
300,
19875,
1330,
2123,
631,
198,
2,
1330,
2124,
76,
2528,
375,
713,
198,
198,
6738,
474... | 2.914179 | 268 |
import json
from simpl.constants.urls import URL
from simpl.resources.base import BaseResource
| [
11748,
33918,
198,
198,
6738,
7106,
13,
9979,
1187,
13,
6371,
82,
1330,
10289,
198,
6738,
7106,
13,
37540,
13,
8692,
1330,
7308,
26198,
198
] | 3.84 | 25 |
import unittest
from dalpy.queues import Queue, QueueUnderflowError
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
288,
282,
9078,
13,
4188,
947,
1330,
4670,
518,
11,
4670,
518,
9203,
11125,
12331,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,... | 2.586957 | 46 |
import polyphony
from polyphony.io import Port
from polyphony.typing import bit, uint3, uint12, uint16
from polyphony.timing import clksleep, clkfence, wait_rising, wait_falling
CONVST_PULSE_CYCLE = 10
CONVERSION_CYCLE = 40
@polyphony.module
@polyphony.testbench
@polyphony.rule(scheduling='parallel')
spic = AD7091R_SPIC()
test(spic)
| [
11748,
7514,
23021,
198,
6738,
7514,
23021,
13,
952,
1330,
4347,
198,
6738,
7514,
23021,
13,
774,
13886,
1330,
1643,
11,
20398,
18,
11,
20398,
1065,
11,
20398,
1433,
198,
6738,
7514,
23021,
13,
16514,
278,
1330,
537,
591,
8892,
11,
53... | 2.700787 | 127 |
#! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author : MG
@Time : 2018/7/6 10:21
@File : run.py.py
@contact : mmmaaaggg@163.com
@desc :
"""
if __name__ == '__main__':
import logging
from app.config import config
from app.app import app
logger = logging.getLogger()
if config.APP_ENABLE_SSL:
logger.info('ssl path: %s', config.HTTPS_SSL_PEM_FILE_PATH)
app.run(
host='0.0.0.0', port=config.APP_PORT, debug=True,
# ssl_context='adhoc',
ssl_context=(config.HTTPS_SSL_PEM_FILE_PATH, config.HTTPS_SSL_KEY_FILE_PATH) if config.APP_ENABLE_SSL else None
)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
220,
1058,
34809,
198,
31,
7575,
220,
220,
220,
1058,
2864,
14,
22,
14,
21,
838,
25,
2481,
198... | 2.170139 | 288 |
"""Test MSlider class"""
import pytest
from dayu_widgets3.slider import MSlider
from dayu_widgets3.qt import Qt
@pytest.mark.parametrize('orient', (Qt.Horizontal, Qt.Vertical))
def test_slider_init(qtbot, orient):
"""Test MSlider init"""
slider = MSlider(orientation=orient)
slider.setValue(10)
qtbot.addWidget(slider)
slider.show()
assert slider.value() == 10
# test mouseMoveEvent, show the tooltip
# qtbot.mouseMove(slider) # mouse enter
# qtbot.mousePress(slider, Qt.LeftButton) # click
# qtbot.mouseMove(slider) # click
# assert slider.toolTip() == '10'
| [
37811,
14402,
6579,
75,
1304,
1398,
37811,
198,
11748,
12972,
9288,
198,
6738,
1110,
84,
62,
28029,
11407,
18,
13,
6649,
1304,
1330,
6579,
75,
1304,
198,
6738,
1110,
84,
62,
28029,
11407,
18,
13,
39568,
1330,
33734,
628,
198,
31,
9078... | 2.571429 | 238 |
'''
Module is a file which contains various Python functions and global variables.
It is simply just .py extension file which has python executable code.
Package is a collection of modules. It must contain an init.py file as a
flag so that the python interpreter processes it as such. The init.py
could be an empty file without causing issues.
Library is a collection of packages.
Framework is a collection of libraries.
'''
import json # you need to import this package
# following is a JSON string:
# json is used to share information b/w systems
# which may be programmed in separate programmimg langaue
# but communicate over web/network - http, etc
x = '{ "name":"John", "age":30, "city":"New York"}'
# parse x:
y = json.loads(x)
# the result is a Python dictionary:
print(y["age"])
print(type(y)) | [
7061,
6,
198,
26796,
318,
257,
2393,
543,
4909,
2972,
11361,
5499,
290,
3298,
9633,
13,
198,
1026,
318,
2391,
655,
764,
9078,
7552,
2393,
543,
468,
21015,
28883,
2438,
13,
198,
198,
27813,
318,
257,
4947,
286,
13103,
13,
632,
1276,
... | 3.654709 | 223 |
import requests
from bs4 import BeautifulSoup
import sqlite3
import re
import threading
from os import system
from time import sleep
from PySide2.QtWidgets import (
QMessageBox, QDialog, QMessageBox,
QVBoxLayout, QLabel, QLineEdit
)
from PySide2 import QtGui, QtCore
## DB
## SOCORRO!! lembra o tamnho dessas linhas?? kkk
con = sqlite3.connect("ourdata.db")
cur = con.cursor()
cur.execute(
"""
CREATE TABLE IF NOT EXISTS ourgames(
gamename TEXT,
gameurl TEXT,
gameactualprice TEXT,
gametrigger TEXT
)
"""
)
con.commit()
## Menu/Requests/BS4
# remove jogos cadastrados
| [
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
220,
198,
11748,
44161,
578,
18,
220,
198,
11748,
302,
198,
11748,
4704,
278,
198,
6738,
28686,
1330,
1080,
198,
6738,
640,
1330,
3993,
198,
6738,
9485,
24819,
17,
13,
48,
... | 2.584034 | 238 |
from datetime import datetime, time
from decimal import Decimal
from random import randint, choice
from uuid import uuid4
import factory
import factory.fuzzy
import faker.providers.phone_number.pt_BR
import faker.providers.date_time
from src.classes import ValorHoraInput
from src.enums import EstadosEnum, UploadStatus
from src.models import AdminSistema, AdminEstacio, Endereco, Upload, PedidoCadastro, Estacionamento, HorarioPadrao, \
ValorHora, Veiculo, HorarioDivergente
from src.models.senha_request import SenhaRequest
from src.services import Crypto
from src.utils import random_string
crypto = Crypto(True, 12)
factory.Faker.add_provider(SimplePhoneProvider, locale='pt_BR')
factory.Faker.add_provider(CustomTimeProvider)
_ALL_FACTORIES = (AdminSistemaFactory, AdminEstacioFactory, UploadFactory, EnderecoFactory, PedidoCadastroFactory,
EstacionamentoFactory, HorarioPadraoFactory, ValorHoraFactory, VeiculoFactory,
HorarioDivergenteFactory, SenhaRequestFactory)
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
4738,
1330,
43720,
600,
11,
3572,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
11748,
8860,
198,
11748,
8860,
13,
69,
4715,
88,
198,... | 2.924157 | 356 |
import sys
import solution_reader3
# load a solution reader object which parses the input spec
sr = solution_reader3.SolutionReader('input_data') # should properly set in "solution_reader3.py" if 32-bit or 64-bit
# load the density data at output step 20
rho = sr.loadVec('rho001.dat')
# the shape of the data is (NX, NY, size) for 2D runs and (NX, NY, NZ, size) for 3D
# where size depends on the data read. For rho, size is the number of components.
# For velocity, size is the number of dimensions (u,v,w) velocities.
# This assumes we did a 2D simulation with 2 components (standard bubble test).
print rho.shape
# visualize density of the 0th component with matplotlib
from matplotlib import pyplot as plt
plt.imshow(rho[:,:,0].transpose(), origin='lower')
plt.colorbar()
plt.show()
| [
11748,
25064,
198,
11748,
4610,
62,
46862,
18,
198,
198,
2,
3440,
257,
4610,
9173,
2134,
543,
13544,
274,
262,
5128,
1020,
198,
27891,
796,
4610,
62,
46862,
18,
13,
46344,
33634,
10786,
15414,
62,
7890,
11537,
1303,
815,
6105,
900,
28... | 3.167331 | 251 |
""" Contains classes and methods to process the VRM data and convert it to the format as required by the 3D CNN model"""
import pandas as pd
import numpy as np
from tqdm import tqdm
#from numba import cuda
class GetTrainData():
"""GetTrainData Class (No initialization parameter)
"""
def data_import(self,file_names,data_folder):
"""data import used to import all files within the given folder and concatenate them into one dataframe
:param file_names: List of the input files
:type file_name: list (required)
:param data_folder: data folder name
:type data_folder: str (required)
:returns: dataframe of concatenated data from each file within the list
:rtype: pandas.dataframe [samples,point_dim]
"""
data_files=[]
for file in file_names:
file_path=data_folder+'/'+file
data_files.append(pd.read_csv(file_path,header=None))
dataset = pd.concat(data_files, ignore_index=True)
return dataset
def load_mapping_index(self,index_file):
"""load_mapping_index is used to import the mapping index
:param index_file: index file name
:type index_file: str (required)
:returns: array of mapping index (i,j,k) for each node (x,y,z)
:rtype: numpy.array [point_dim*3]
"""
file_path='../resources/mapping_files/'+index_file
try:
voxel_point_index = np.load(file_path,allow_pickle=True)
except AssertionError as error:
print(error)
print('Voxel Mapping File not found !')
return voxel_point_index
#@cuda.jit
def data_convert_voxel_mc(self,vrm_system,dataset,point_index,kcc_data=pd.DataFrame({'A' : []})):
"""data converts the node deviations to voxelized output
:param vrm_system: Object of the VRM System class
:type file_name: object(VRM_System class) (required)
:param dataset: list of concatenated dataset consisting of x,y,z deviations for each node
:type dataset: list (required)
:param point_index: mapping index
:type point_index: numpy.array [nodes*3] (required)
:param kcc_data: Process parameter data
:type kcc_data: numpy.array [samples*kcc_dim] (required)
:returns: input_conv_data, voxelized data for model input
:rtype: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*3]
:returns: kcc_data_dump, process/parameter data for model output
:rtype: numpy.array [samples*kcc_dim]
:returns: kpi_data_dump, KPI data (if any) for each sample, convergence flag (convergence of simulation model) is always the first KPI
:rtype: numpy.array [samples*kpi_dim]
"""
point_dim=vrm_system.point_dim
voxel_dim=vrm_system.voxel_dim
dev_channel=vrm_system.voxel_channels
noise_level=vrm_system.noise_level
noise_type=vrm_system.noise_type
kcc_dim=vrm_system.assembly_kccs
kpi_dim=vrm_system.assembly_kpis
#Declaring the variables for initializing input data structure initialization
start_index=0
end_index=len(dataset[0])
#end_index=50000
run_length=end_index-start_index
input_conv_data=np.zeros((run_length,voxel_dim,voxel_dim,voxel_dim,dev_channel))
if isinstance(kcc_data,pd.DataFrame):
kcc_dump=kcc_data.values
else:
kcc_dump=kcc_data
#kcc_dump=dataset.iloc[start_index:end_index, point_dim:point_dim+kcc_dim]
kpi_dump=dataset[0].iloc[start_index:end_index, point_dim:point_dim+kpi_dim]
kpi_dump=kpi_dump.values
not_convergent=0
convergent_id=[]
for index in tqdm(range(run_length)):
x_point_data=dataset[0].iloc[index, 0:point_dim]
y_point_data=dataset[1].iloc[index, 0:point_dim]
z_point_data=dataset[2].iloc[index, 0:point_dim]
if(dataset[0].iloc[index, point_dim]==0):
not_convergent=not_convergent+1
if(dataset[0].iloc[index, point_dim]==1):
convergent_id.append(index)
dev_data_x=x_point_data.values
dev_data_y=y_point_data.values
dev_data_z=z_point_data.values
if(noise_type=='uniform'):
measurement_noise_x= np.random.uniform(low=-noise_level, high=noise_level, size=(point_dim))
measurement_noise_y= np.random.uniform(low=-noise_level, high=noise_level, size=(point_dim))
measurement_noise_z= np.random.uniform(low=-noise_level, high=noise_level, size=(point_dim))
else:
measurement_noise_x=np.random.gauss(0,noise_level, size=(point_dim))
measurement_noise_y=np.random.gauss(0,noise_level, size=(point_dim))
measurement_noise_z=np.random.gauss(0,noise_level, size=(point_dim))
dev_data_x=dev_data_x+measurement_noise_x
dev_data_y=dev_data_y+measurement_noise_y
dev_data_z=dev_data_z+measurement_noise_z
cop_dev_data=np.zeros((voxel_dim,voxel_dim,voxel_dim,dev_channel))
for p in range(point_dim):
x_index=int(point_index[p,0])
y_index=int(point_index[p,1])
z_index=int(point_index[p,2])
cop_dev_data[x_index,y_index,z_index,:]=get_dev_data(cop_dev_data[x_index,y_index,z_index,0],dev_data_x[p],cop_dev_data[x_index,y_index,z_index,1],dev_data_y[p],cop_dev_data[x_index,y_index,z_index,2],dev_data_z[p])
input_conv_data[index,:,:,:]=cop_dev_data
print("Number of not convergent solutions: ",not_convergent)
#input_conv_data =input_conv_data[convergent_id,:,:,:,:]
#kcc_dump=kcc_dump[convergent_id,:]
kpi_dump=convergent_id
print("Convergent IDs ")
print(len(kpi_dump))
return input_conv_data, kcc_dump,kpi_dump
if (__name__=="__main__"):
#Importing Datafiles
print('Function for importing and preprocessing Cloud-of-Point Data')
| [
37811,
49850,
6097,
290,
5050,
284,
1429,
262,
6453,
44,
1366,
290,
10385,
340,
284,
262,
5794,
355,
2672,
416,
262,
513,
35,
8100,
2746,
37811,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,... | 2.354486 | 2,285 |
#!/usr/bin/env python3
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628
] | 2.4 | 10 |
import os
import random
from random import shuffle
import pandas as pd
from pandas.core.frame import DataFrame
hdfs_train = data_read('data/hdfs_train')
hdfs_test_normal = data_read('data/hdfs_test_normal')
hdfs_test_abnormal = data_read('data/hdfs_test_abnormal')
hdfs_train.extend(hdfs_test_normal)
normal_all = hdfs_train
abnormal = hdfs_test_abnormal
print(len(normal_all))
max_len = 0
for i in range(len(normal_all)):
leng = len(normal_all[i])
# if leng>200:
# print(i)
# print(normal_all[i])
max_len = max([max_len, leng])
print(max_len)
random.seed(42)
shuffle(normal_all)
shuffle(abnormal)
train_normal = normal_all[:6000]
valid_normal = normal_all[6000:7000]
test_normal = normal_all[6000:]
train_abnormal = abnormal[:6000]
valid_abnormal = abnormal[6000:7000]
test_abnormal = abnormal[6000:]
train_all = train_normal + train_abnormal
train_all_label = [0] * len(train_normal) + [1] * len(train_abnormal)
valid_all = valid_normal + valid_abnormal
valid_all_label = [0] * len(valid_normal) + [1] * len(valid_abnormal)
test_all = test_normal + test_abnormal
test_all_label = [0] * len(test_normal) + [1] * len(test_abnormal)
train_new = DataFrame({"Sequence": train_all, "label": train_all_label})
valid_new = DataFrame({"Sequence": valid_all, "label": valid_all_label})
test_new = DataFrame({"Sequence": test_all, "label": test_all_label})
train_new.to_csv('data/train.csv', index=None)
valid_new.to_csv('data/valid.csv', index=None)
test_new.to_csv('data/test.csv', index=None)
| [
11748,
28686,
201,
198,
11748,
4738,
201,
198,
6738,
4738,
1330,
36273,
201,
198,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
6738,
19798,
292,
13,
7295,
13,
14535,
1330,
6060,
19778,
201,
198,
201,
198,
201,
198,
201,
198,
... | 2.404871 | 657 |
from django.urls import path
from dataworkspace.apps.accounts.utils import login_required
from dataworkspace.apps.applications.views import (
application_spawning_html_view,
application_running_html_view,
tools_html_view,
quicksight_start_polling_sync_and_redirect,
UserToolSizeConfigurationView,
)
urlpatterns = [
path("", login_required(tools_html_view), name="tools"),
path("<str:public_host>/spawning", login_required(application_spawning_html_view)),
path("<str:public_host>/running", login_required(application_running_html_view)),
path(
"quicksight/redirect",
login_required(quicksight_start_polling_sync_and_redirect),
name="quicksight_redirect",
),
path(
"quicksight/redirect",
login_required(quicksight_start_polling_sync_and_redirect),
name="quicksight_redirect",
),
path(
"configure-size/<str:tool_host_basename>/",
login_required(UserToolSizeConfigurationView.as_view()),
name="configure_tool_size",
),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
4818,
707,
3647,
10223,
13,
18211,
13,
23317,
82,
13,
26791,
1330,
17594,
62,
35827,
198,
6738,
4818,
707,
3647,
10223,
13,
18211,
13,
1324,
677,
602,
13,
33571,
1330,
357,... | 2.501193 | 419 |
# coding: utf-8
"""
cccc-praying-api
The API for CCCC Praying project # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.user_api import UserApi # noqa: E501
from swagger_client.rest import ApiException
class TestUserApi(unittest.TestCase):
"""UserApi unit test stubs"""
def test_authenticate_user(self):
"""Test case for authenticate_user
Log in a User. # noqa: E501
"""
pass
def test_get_user_by_id(self):
"""Test case for get_user_by_id
Fetch data about a specific User. # noqa: E501
"""
pass
def test_new_user(self):
"""Test case for new_user
"""
pass
if __name__ == '__main__':
unittest.main()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
269,
535,
66,
12,
1050,
8369,
12,
15042,
628,
220,
220,
220,
383,
7824,
329,
327,
46361,
1736,
8369,
1628,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,
... | 2.340206 | 388 |
from functools import wraps
from multiprocessing import Process
import webbrowser
from .utils import processing_func_name
def processing_function(func):
"""Decorator for turning Sketch methods into Processing functions.
Marks the function it's decorating as a processing function by camel
casing the name of the function (to follow Processing naming conventions)
and attaching the new name to the function object as 'processing_name'.
It also DRY's up the code a bit by creating the command dict from the
result of calling the wrapped function and appends it to the Sketch
object's frame.
"""
# Camel case the name to match the Processing naming conventions
processing_name = processing_func_name(func.__name__)
# Create a wrapper function that gets the returned args from the real
# function and creates a new command dict and adds it to the frame queue.
@wraps(func)
# Mark the method as a Processing function by adding its counterparts name
wrapper.processing_name = processing_name
return wrapper
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
18540,
305,
919,
278,
1330,
10854,
198,
11748,
3992,
40259,
198,
198,
6738,
764,
26791,
1330,
7587,
62,
20786,
62,
3672,
628,
198,
4299,
7587,
62,
8818,
7,
20786,
2599,
198,
220,
220,
22... | 3.877256 | 277 |
import argparse
from app import init_app
from common.config import Config
if __name__ == "__main__":
args = get_runtime_args()
config = Config(args)
app = init_app(config)
app.run(debug=config.debug, host=config.host, port=config.port)
| [
11748,
1822,
29572,
198,
198,
6738,
598,
1330,
2315,
62,
1324,
198,
6738,
2219,
13,
11250,
1330,
17056,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
26498,
796,
651,
62,
43282,
62,
22046,
3419... | 2.791209 | 91 |
from typing import List
solution = Solution()
print(solution.reverseOnlyLetters(s = "ab-cd"))
print(solution.reverseOnlyLetters(s = "a-bC-dEf-ghIj"))
print(solution.reverseOnlyLetters(s = "Test1ng-Leet=code-Q!"))
| [
6738,
19720,
1330,
7343,
220,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
82,
2122,
796,
28186,
3419,
198,
198,
4798,
7,
82,
2122,
13,
50188,
10049,
5756,
1010,
7,
82,
796,
366,
397,
12,
10210,
48774,
198,
198,
4798,
7,... | 2.364583 | 96 |
from gevent import monkey
monkey.patch_all()
import time
import logging
import copy_reg
import types
import grequests
import requests
from multiprocessing import JoinableQueue, Process
SLEEP_INTERVAL = 5
# We need to pickle instance methods of the Worker Class below so this snippet does that
# Refer: http://stackoverflow.com/questions/1816958/cant-pickle-type-instancemethod-
# when-using-pythons-multiprocessing-pool-ma
# START
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
# END
class WorkerProcessor(Process):
"""
Worker to process the responses to HTTP requests sent
Abstract class
"""
def __init__(self, queue, processor_fn):
"""
Constructor
:param queue: JoinableQueue object which will contain the responses
:param processor_fn: Function to perform the processing
:return: None
"""
super(WorkerProcessor, self).__init__()
self.queue = queue
self.processor_fn = processor_fn
def run(self):
"""
Run
:return: None
"""
while True:
rs = self.queue.get()
self.processor_fn(rs)
self.queue.task_done()
class WorkerHTTP(object):
"""
Worker to sent HTTP requests
"""
def __init__(self, worker_size=4, pool_size=15, max_retries=None, sleep_interval=SLEEP_INTERVAL):
"""
Constructor
:param worker_size: No of child processes or workers to start
:param pool_size: Size of the Pool to setup for sending concurrent requests using grequests
:param max_retries: No of retries after which we need to shutdown the Workers
:return: None
"""
self._session = requests.Session()
self._to_process_mq = JoinableQueue()
self._workers = []
for i in range(worker_size):
p = WorkerProcessor(self._to_process_mq, self.process_response)
p.daemon = True
self._workers.append(p)
self._sleep_interval = sleep_interval
self._pool_size = pool_size
self._max_retries = max_retries
self._requests_list = []
self._retries_list = []
# Here we remove the Queue object from the dict that has to be pickled
# Since the instance object is already being pickled
def start(self):
"""
Start the Processor Workers to process response of HTTP requests sent
:return: None
"""
for _worker in self._workers:
_worker.start()
self.prepare()
working = True
retry_count = 0
while working:
grequests.map(self._requests_list, size=self._pool_size, stream=False)
if len(self._retries_list) == 0:
break
# sleep before a retry
time.sleep(self._sleep_interval)
# reset state of requests and retries array
self._requests_list = self._retries_list
self._retries_list = []
logging.info("Retrying ... for %d URLs" % len(self._requests_list))
if self._max_retries is not None:
retry_count += 1
working = retry_count == self._max_retries
self._to_process_mq.join()
def prepare(self):
"""
Method to prepare the Worker for sending/processing HTTP requests
:return: None
"""
raise NotImplementedError, "Method not implemented"
def process_response(self, item):
"""
Method to process response for all the HTTP requests' response added to MQ
:param item: Response object in the MQ
:return: None
"""
raise NotImplementedError, "Callback not implemented"
def process_request(self, r, *args, **kwargs):
"""
Method to process the request sent by the HTTP Worker (add to the MQ for response processing)
:param r: HTTP requests' response object
:param args:
:param kwargs:
:return: None
"""
raise NotImplementedError, "Callback not implemented"
def put_request(self, url, payload, retry=False):
"""
Method to add a request to the request_list to be sent by the HTTP Worker
:param url:
:param payload:
:param retry:
:return:
"""
r = grequests.get(url, params=payload, hooks={'response': self.process_request}, session=self._session)
if retry:
self._retries_list.append(r)
else:
self._requests_list.append(r)
def put_response(self, rs):
"""
Method to add the response of the request to the MQ
:param rs:
:return:
"""
self._to_process_mq.put(rs)
| [
6738,
4903,
1151,
1330,
21657,
198,
49572,
13,
17147,
62,
439,
3419,
198,
198,
11748,
640,
198,
11748,
18931,
198,
198,
11748,
4866,
62,
2301,
198,
11748,
3858,
198,
198,
11748,
308,
8897,
3558,
198,
11748,
7007,
198,
198,
6738,
18540,
... | 2.374065 | 2,005 |
import torch
import torch.optim as optim
import argparse
import numpy as np
import time
from tensorboardX import SummaryWriter
from collections import deque
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('../..')
from pg_travel.deeprm import model
from pg_travel.deeprm.hparams import HyperParams as Hp
from pg_travel.deeprm.env_simu_sigle.environment import Env
from pg_travel.deeprm.env_simu_sigle import other_agents
from pg_travel.deeprm.env_simu_sigle import job_distribution
from pg_travel.deeprm.agent import vanila_pg
def discount(x, gamma):
"""
Given vector x, computes a vector y such that
y[i] = x[i] + gamma * x[i + 1] + gamma ^ 2 * x[i + 2] + ...
:param x:
:param gamma:
:return:
"""
out = np.zeros(len(x))
out[-1] = x[-1]
for i in reversed(range(len(x) - 1)):
out[i] = x[i] + gamma * out[i + 1]
assert x.ndim >= 1
# TODO: More efficient version:
# # scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
# TODO: and maybe torch has similar method
return out
if __name__ == '__main__':
main()
| [
11748,
28034,
201,
198,
11748,
28034,
13,
40085,
355,
6436,
201,
198,
11748,
1822,
29572,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
640,
201,
198,
6738,
11192,
273,
3526,
55,
1330,
21293,
34379,
201,
198,
6738,
17268,
13... | 2.35503 | 507 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 18 18:07:30 2019
@author: Guillaume
"""
"""
This file contains functions to calculate the score of the students and the statistics for the group
"""
# calculates the statistics for the class
# given the correction, marks every answer of ONE student as either true or false
# main function : given the correction, marks every answer of every student as either true or false
# main for testing only
if __name__ == '__main__':
corr=['D', 'C', 'A', 'D', 'B', 'C', 'B', 'D', 'A', 'C', 'D', 'B', 'A', 'C', 'D', 'B', 'C', 'D', 'C', 'C', 'B', 'D', 'A', 'C', 'D', 'D', 'A', 'B', 'D', 'A', 'C', 'C', 'D', 'B', 'B', 'D', 'B', 'C', 'D', 'B', 'C', 'A', 'B', 'A', 'C', 'C', 'D', 'B', 'D', 'D', 'A', 'B', 'C', 'B', 'A', 'B', 'C', 'D', 'C', 'A', 'C', 'D', 'A', 'A', 'D', 'D', 'B', 'C', 'B', 'C', 'B', 'D', 'C', 'B', 'A', 'D', 'A', 'C', 'C', 'C', 'B', 'D', 'D', 'C', 'C', 'B', 'B', 'A', 'C', 'C', 'D', 'D', 'A', 'A', 'B', 'C', 'A', 'B', 'C', 'D', 'C', 'C', 'B', 'C', 'C', 'A', 'C', 'C', 'C', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'A', 'B', 'A', 'A', 'C', 'C', 'B', 'B', 'C', 'C', 'C', 'B', 'A', 'B', 'A', 'B', 'C', 'A', 'B', 'A', 'C', 'D', 'B', 'A', 'A', 'C', 'B', 'B', 'A', 'C', 'A', 'B', 'C', 'B', 'B', 'C', 'B', 'C', 'A', 'B', 'C', 'C', 'B', 'A', 'C', 'C', 'A', 'B', 'A', 'B', 'A', 'A', 'C', 'A', 'B', 'A', 'A', 'C', 'A', 'A', 'B', 'B', 'B', 'A', 'A', 'A', 'A', 'B', 'B', 'A', 'A', 'A', 'B', 'A', 'A', 'B', 'A', 'A', 'A', 'B', 'A', 'D', 'B']
ans= ['C', 'C', 'A', 'C', 'C', 'B', 'D', 'C', 'B', 'C', 'C', 'C', 'D', 'B', 'C', 'D', 'B', 'C', 'A', 'C', 'C', 'C', 'A', 'B', 'D', 'A', 'A', 'C', 'A', 'B', 'D', 'C', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'D', 'D', 'C', 'B', 'A', 'B', 'A', 'B', 'C', 'D', 'C', 'A', 'D', 'C', 'C', 'C', 'B', 'A', 'A', 'B', 'A', 'B', 'B', 'B', 'A', 'A', 'A', 'C', 'D', 'B', 'A', 'A', 'C', 'B', 'B', 'D', 'D', 'C', 'C', 'D', 'D', 'D', 'D', 'D', 'C', 'A', 'A', 'A', 'B', 'A', 'B', 'B', 'A', 'A', 'B', 'B', 'B', 'C', 'A', 'A', 'D', 'C', 'D', 'A', 'D', 'D', 'C', 'C', 'C', 'C', 'C', 'B', 'C', 'D', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'A', 'A', 'D', 'B', 'B', 'C', 'C', 'D', 'D', 'C', 'A', 'A', 'B', 'D', 'D', 'D', 'D', 'C', 'D', 'D', 'C', 'A', 'A', 'D', 'C', 'A', 'D', 'D', 'A', 'B', 'C', 'B', 'B', 'A', 'B', 'D', 'A', 'A', 'A', 'A', 'D', 'B', 'B', 'D', 'B', 'D', 'A', 'C', 'D', 'D', 'B', 'A', 'B', 'A', 'C', 'C', 'B', 'C', 'C', 'C', 'A', 'C', 'A', 'A', 'A', 'C', 'C', 'A', 'A', 'A', 'C', 'C', 'A', 'C', 'B', 'B', 'A', 'A', 'A']
ans2=['D', 'C', 'A', 'D', 'B', 'C', 'D', 'C', 'B', 'C', 'C', 'C', 'D', 'B', 'C', 'D', 'B', 'C', 'A', 'C', 'C', 'C', 'A', 'B', 'D', 'A', 'A', 'C', 'A', 'B', 'D', 'C', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'D', 'D', 'C', 'B', 'A', 'B', 'A', 'B', 'C', 'D', 'C', 'A', 'D', 'C', 'C', 'C', 'B', 'A', 'A', 'B', 'A', 'B', 'B', 'B', 'A', 'A', 'A', 'C', 'D', 'B', 'A', 'A', 'C', 'B', 'B', 'D', 'D', 'C', 'C', 'D', 'D', 'D', 'D', 'D', 'C', 'A', 'A', 'A', 'B', 'A', 'B', 'B', 'A', 'A', 'B', 'B', 'B', 'C', 'A', 'A', 'D', 'C', 'D', 'A', 'D', 'D', 'C', 'C', 'C', 'C', 'C', 'B', 'C', 'D', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'A', 'A', 'D', 'B', 'B', 'C', 'C', 'D', 'D', 'C', 'A', 'A', 'B', 'D', 'D', 'D', 'D', 'C', 'D', 'D', 'C', 'A', 'A', 'D', 'C', 'A', 'D', 'D', 'A', 'B', 'C', 'B', 'B', 'A', 'B', 'D', 'A', 'A', 'A', 'A', 'D', 'B', 'B', 'D', 'B', 'D', 'A', 'C', 'D', 'D', 'B', 'A', 'B', 'A', 'C', 'C', 'B', 'C', 'C', 'C', 'A', 'C', 'A', 'A', 'A', 'C', 'C', 'A', 'A', 'A', 'C', 'C', 'A', 'C', 'B', 'B', 'A', 'A', 'A']
ans3=['A', 'C', 'A', 'D', 'B', 'C', 'B', 'C', 'B', 'C', 'C', 'C', 'D', 'B', 'C', 'D', 'B', 'C', 'A', 'C', 'C', 'C', 'A', 'B', 'D', 'A', 'A', 'C', 'A', 'B', 'D', 'C', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'D', 'D', 'C', 'B', 'A', 'B', 'A', 'B', 'C', 'D', 'C', 'A', 'D', 'C', 'C', 'C', 'B', 'A', 'A', 'B', 'A', 'B', 'B', 'B', 'A', 'A', 'A', 'C', 'D', 'B', 'A', 'A', 'C', 'B', 'B', 'D', 'D', 'C', 'C', 'D', 'D', 'D', 'D', 'D', 'C', 'A', 'A', 'A', 'B', 'A', 'B', 'B', 'A', 'A', 'B', 'B', 'B', 'C', 'A', 'A', 'D', 'C', 'D', 'A', 'D', 'D', 'C', 'C', 'C', 'C', 'C', 'B', 'C', 'D', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'A', 'A', 'D', 'B', 'B', 'C', 'C', 'D', 'D', 'C', 'A', 'A', 'B', 'D', 'D', 'D', 'D', 'C', 'D', 'D', 'C', 'A', 'A', 'D', 'C', 'A', 'D', 'D', 'A', 'B', 'C', 'B', 'B', 'A', 'B', 'D', 'A', 'A', 'A', 'A', 'D', 'B', 'B', 'D', 'B', 'D', 'A', 'C', 'D', 'D', 'B', 'A', 'B', 'A', 'C', 'C', 'B', 'C', 'C', 'C', 'A', 'C', 'A', 'A', 'A', 'C', 'C', 'A', 'A', 'A', 'C', 'C', 'A', 'C', 'B', 'B', 'A', 'A', 'A']
test=compareAll(corr,[ans,ans2,ans3])
#print(test [0])
#print(test [1])
#print(test [2])
from export import exportIndiv, exportClasse
exportIndiv(test[0], [("tata"),("tété"),("titi"),("tutu"),("tonton"),("toto"),("tyty")])
exportClasse(test[1],test[2]) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
19480,
2365,
1248,
1248,
25,
2998,
25,
1270,
13130,
201,
198,
201,
198,
31,
9800,
25,
1962,
5049,
2454,
201,
198,
37811,
201,
198,
20... | 1.764919 | 2,765 |
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt : \n")
text = input("Type your message: \n").lower()
shift = int(input("Type the shift number : \n"))
if direction == "encode":
encode(text=text,shift=shift)
elif direction == "decode":
decode(text=text,shift=shift)
else:
print("Invalid Input")
| [
17307,
8380,
796,
37250,
64,
41707,
65,
41707,
66,
41707,
67,
41707,
68,
41707,
69,
41707,
70,
41707,
71,
41707,
72,
41707,
73,
41707,
74,
41707,
75,
41707,
76,
41707,
77,
41707,
78,
41707,
79,
41707,
80,
41707,
81,
41707,
82,
41707,
... | 2.365145 | 241 |
"""
将 M 个同样的糖果放在 N 个同样的篮子里,允许有的篮子空着不放,共有多少种不同的分法?
比如,把 7 个糖果放在 3 个篮子里,共有 8 种分法(每个数表示篮子中放的糖果数,数的个数为篮子数):
1 1 5
1 2 4
1 3 3
2 2 3
2 5 0
3 4 0
6 1 0
7 0 0
注意:相同的分布,顺序不同也只算作一种分法,如 7 0 0、0 7 0 和 0 0 7 只算作一种。
输入包含二个正整数 M 和 N,以(,)分开,M 表示有几个同样的糖果,N 表示有几个同样的篮子 M与N范围:1 <= M,N <= 100。
输出一个正整数 K,表示有多少种分法。
输入样例
7,3
输出样例
8
"""
# 此处可 import 模块
"""
@param string line 为单行测试数据
@return string 处理后的结果
"""
aa = solution("7 3")
print(aa)
| [
37811,
628,
220,
220,
220,
10263,
108,
228,
337,
220,
10310,
103,
28938,
234,
43718,
115,
21410,
163,
111,
244,
162,
252,
250,
162,
242,
122,
28839,
101,
399,
220,
10310,
103,
28938,
234,
43718,
115,
21410,
163,
107,
106,
36310,
34932... | 0.812598 | 635 |
from mpi4py import MPI
import argparse
import numpy
from arcsilib.arcsiutils import ARCSIEnum
import sys
# Define MPI message tags
mpiTags = ARCSIEnum('READY', 'DONE', 'EXIT', 'START')
arcsiStages = ARCSIEnum('ARCSIPART1', 'ARCSIPART2', 'ARCSIPART3', 'ARCSIPART4')
# Initializations and preliminaries
mpiComm = MPI.COMM_WORLD # get MPI communicator object
mpiSize = mpiComm.size # total number of processes
mpiRank = mpiComm.rank # rank of this process
mpiStatus = MPI.Status() # get MPI status object
print("Rank: " + str(mpiRank))
if (__name__ == '__main__') and (mpiRank == 0):
paramsLst = numpy.arange(100)
paramsLstTmp = []
nTasks = len(paramsLst)
taskIdx = 0
completedTasks = 0
while completedTasks < nTasks:
print("completedTasks = ", completedTasks)
print("nTasks = ", nTasks)
rtnParamsObj = mpiComm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=mpiStatus)
source = mpiStatus.Get_source()
tag = mpiStatus.Get_tag()
print("Source: ", source)
if tag == mpiTags.READY:
# Worker is ready, so send it a task
if taskIdx < nTasks:
mpiComm.send([arcsiStages.ARCSIPART1, paramsLst[taskIdx]], dest=source, tag=mpiTags.START)
print("Sending task %d to worker %d" % (taskIdx, source))
taskIdx += 1
#else:
# mpiComm.send(None, dest=source, tag=mpiTags.EXIT)
elif tag == mpiTags.DONE:
print("Got data from worker %d" % source)
paramsLstTmp.append(rtnParamsObj)
completedTasks += 1
elif tag == tags.EXIT:
print("Worker %d exited." % source)
closedWorkers += 1
#raise ARCSIException("MPI worker was closed - worker was still needed so there is a bug here somewhere... Please report to mailing list.")
paramsLst = paramsLstTmp
print(paramsLst)
paramsLstTmp = []
nTasks = len(paramsLst)
taskIdx = 0
completedTasks = 0
while completedTasks < nTasks:
print("completedTasks = ", completedTasks)
print("nTasks = ", nTasks)
rtnParamsObj = mpiComm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=mpiStatus)
source = mpiStatus.Get_source()
tag = mpiStatus.Get_tag()
print("Source: ", source)
if tag == mpiTags.READY:
# Worker is ready, so send it a task
if taskIdx < nTasks:
mpiComm.send([arcsiStages.ARCSIPART4, paramsLst[taskIdx]], dest=source, tag=mpiTags.START)
print("Sending task %d to worker %d" % (taskIdx, source))
taskIdx += 1
#else:
# mpiComm.send(None, dest=source, tag=mpiTags.EXIT)
elif tag == mpiTags.DONE:
print("Got data from worker %d" % source)
paramsLstTmp.append(rtnParamsObj)
completedTasks += 1
elif tag == tags.EXIT:
print("Worker %d exited." % source)
closedWorkers += 1
#raise ARCSIException("MPI worker was closed - worker was still needed so there is a bug here somewhere... Please report to mailing list.")
for workerID in range(mpiSize):
if workerID > 0:
mpiComm.send(None, dest=workerID, tag=mpiTags.EXIT)
else:
print("ELSE not main: ", mpiRank)
# Worker processes execute code below
while True:
mpiComm.send(None, dest=0, tag=mpiTags.READY)
tskData = mpiComm.recv(source=0, tag=MPI.ANY_TAG, status=mpiStatus)
tag = mpiStatus.Get_tag()
paramsObj = None
print(tskData)
print(tag)
if tag == mpiTags.START:
# Do work!
if tskData[0] == arcsiStages.ARCSIPART1:
print('PART #1')
paramsObj = tskData[1] * 10
elif tskData[0] == arcsiStages.ARCSIPART2:
print('PART #2')
paramsObj = tskData[1] * 20
elif tskData[0] == arcsiStages.ARCSIPART3:
print('PART #3')
paramsObj = tskData[1] * 30
elif tskData[0] == arcsiStages.ARCSIPART4:
print('PART #4')
paramsObj = tskData[1] * 40
else:
raise ARCSIException("Don't recognise processing stage")
mpiComm.send(paramsObj, dest=0, tag=mpiTags.DONE)
elif tag == mpiTags.EXIT:
break
mpiComm.send(None, dest=0, tag=mpiTags.EXIT)
| [
6738,
285,
14415,
19,
9078,
1330,
4904,
40,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
198,
6738,
44606,
22282,
13,
5605,
13396,
26791,
1330,
5923,
7902,
40,
4834,
388,
198,
11748,
25064,
198,
198,
2,
2896,
500,
4904,
40,
3275,
... | 2.020889 | 2,250 |
from __future__ import print_function
from collections import namedtuple
from distutils.util import get_platform
import subprocess
import sys
class CommandFailed(Exception):
"""
The command failed to run for any reason
"""
pass
class CommandError(CommandFailed):
"""
The command returned an exit code
"""
def shell(command, capture=True):
"""
Run a command on the local system.
This is borrowed from fabric.operations, with simplifications
`local` is simply a convenience wrapper around the use of the builtin
Python ``subprocess`` module with ``shell=True`` activated. If you need to
do anything special, consider using the ``subprocess`` module directly.
`local` is not currently capable of simultaneously printing and
capturing output, as `~fabric.operations.run`/`~fabric.operations.sudo`
do. The ``capture`` kwarg allows you to switch between printing and
capturing as necessary, and defaults to ``False``.
When ``capture=False``, the local subprocess' stdout and stderr streams are
hooked up directly to your terminal, though you may use the global
:doc:`output controls </usage/output_controls>` ``output.stdout`` and
``output.stderr`` to hide one or both if desired. In this mode, the return
value's stdout/stderr values are always empty.
When ``capture=True``, you will not see any output from the subprocess in
your terminal, but the return value will contain the captured
stdout/stderr.
"""
if capture:
out_stream = subprocess.PIPE
err_stream = subprocess.PIPE
else:
# Non-captured streams are left to stdout
out_stream = subprocess.STDOUT
err_stream = subprocess.STDOUT
try:
cmd_arg = command if is_windows() else [command]
p = subprocess.Popen(cmd_arg, shell=True,
stdout=out_stream, stderr=err_stream)
stdout, stderr = p.communicate()
except Exception:
e = CommandFailed('command failed', sys.exc_info()[1])
e.__traceback__ = sys.exc_info()[2]
raise e
# Handle error condition (deal with stdout being None, too)
out = stdout.strip() if stdout else ""
err = stderr.strip() if stderr else ""
failed = p.returncode != 0
result = CommandResult(out, err, p.returncode, failed)
if result.failed:
msg = "Encountered an error (return code %s) while executing '%s'" % (
p.returncode, command)
raise CommandError(message=msg, result=result)
return result
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
1233,
26791,
13,
22602,
1330,
651,
62,
24254,
198,
11748,
850,
14681,
198,
11748,
25064,
628,
628,
628,
198,
198,
4871,
9455,
37,
6255,
... | 2.856826 | 901 |
# Basketball Scores
# apple vs banana
# input - apples
apple_three = int(input())
apple_two = int(input())
apple_free = int(input())
# input - bananas
banana_three = int(input())
banana_two = int(input())
banana_free = int(input())
apple_total = (apple_three * 3) + (apple_two * 2) + apple_free
banana_total = (banana_three * 3) + (banana_two * 2) + banana_free
if apple_total == banana_total:
print('T')
elif apple_total > banana_total:
print('A')
else:
print('B') | [
2,
25911,
44654,
198,
2,
17180,
3691,
25996,
198,
198,
2,
5128,
532,
22514,
198,
18040,
62,
15542,
796,
493,
7,
15414,
28955,
198,
18040,
62,
11545,
796,
493,
7,
15414,
28955,
198,
18040,
62,
5787,
796,
493,
7,
15414,
28955,
198,
19... | 2.702247 | 178 |
# /*******************************************************************************
# Copyright Intel Corporation.
# This software and the related documents are Intel copyrighted materials, and your use of them
# is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose
# or transmit this software or the related documents without Intel's prior written permission.
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.
#
# *******************************************************************************/
import os
import json
import subprocess
import tempfile
from typing import List, Dict
from modules.check import CheckSummary, CheckMetadataPy
from checkers_py.common.gpu_helper import are_intel_gpus_found, intel_gpus_not_found_handler
from checkers_py.common.gpu_helper import get_card_devices, get_render_devices
FULL_PATH_TO_CHECKER = os.path.dirname(os.path.realpath(__file__))
PATH_TO_SOURCE_OFFLOAD = os.path.join(FULL_PATH_TO_CHECKER, "oneapi_check_offloads")
TMP_MATMUL_FILE = os.path.join(tempfile.mkdtemp(), "matmul")
TMP_BINOPTION_FILE = os.path.join(tempfile.mkdtemp(), "binoption")
TMP_SIMPLE_SYCL_CODE_FILE = os.path.join(tempfile.mkdtemp(), "simple-sycl-code")
TMP_PARALLEL_FOR_1D_FILE = os.path.join(tempfile.mkdtemp(), "parallel-for-1D")
| [
2,
1220,
17174,
17174,
46068,
8162,
198,
2,
15069,
8180,
10501,
13,
198,
2,
770,
3788,
290,
262,
3519,
4963,
389,
8180,
33696,
5696,
11,
290,
534,
779,
286,
606,
198,
2,
318,
21825,
416,
262,
4911,
5964,
739,
543,
484,
547,
2810,
... | 3.525581 | 430 |
#!/usr/bin/env python3
import sys
import warnings
import pandas as pd
import statsmodels.api as sm
from copy import deepcopy # Used to create sentiment word dictionary
warnings.simplefilter(action="ignore", category=FutureWarning)
# **************************************************************************
# **************************************************************************
# **************************************************************************
# **************************************************************************
# **************************************************************************
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
14601,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
9756,
27530,
13,
15042,
355,
895,
198,
198,
6738,
4866,
1330,
2769,
30073,
220,
1303,
16718,
... | 5.962963 | 108 |
import os
template_dir = os.path.dirname(__file__)
ene_ana_old_path = template_dir + "/ene_ana_REEDS_7state.md++.lib"
ene_ana_lib_path = template_dir + "/new_ene_ana_REEDS_9state.md++.lib"
| [
11748,
28686,
198,
198,
28243,
62,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
198,
198,
1734,
62,
2271,
62,
727,
62,
6978,
796,
11055,
62,
15908,
1343,
12813,
1734,
62,
2271,
62,
2200,
1961,
50,
62,
22,
52... | 2.301205 | 83 |
import numpy as np
from scipy.special import legendre
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
20887,
1330,
8177,
260,
198
] | 3.375 | 16 |
# coding:utf-8
# 版权信息: All rights Reserved, Designed By XHal.cc
# 代码作者: Hal
# 创建时间: 2021/2/4 22:08
# 文件版本: V1.0.0
# 功能描述: 字典对象 - 基础使用
# 创建方式1:{} 花括号,与javascript Object 一样
# 空字典
dic = {}
print(dic)
# 键: 值
dic = {'a': 'aa', 'b': 3}
print(dic, id(dic), type(dic))
# 创建方式2: 内置函数
dic1 = dict({'a': 'aa', 'b': 3})
print(dic1, id(dic1), type(dic1))
print(dic == dic1)
print(dic is dic1)
# 创建方式2: 内置函数(左侧键,不加引号; 右侧值则根据对应类型)
dic2 = dict(a='aa', b=3)
print(dic2, id(dic2), type(dic2))
print(dic == dic2)
print(dic is dic2)
| [
2,
19617,
25,
40477,
12,
23,
198,
2,
13328,
231,
230,
30266,
225,
46479,
94,
162,
223,
107,
25,
1439,
2489,
33876,
11,
39198,
2750,
1395,
40202,
13,
535,
198,
2,
220,
47987,
163,
254,
223,
43291,
38519,
25,
11023,
198,
2,
10263,
2... | 1.229858 | 422 |
# -*- coding: utf-8 -*-
"""ANN MNIST .ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1iKLukhHa0mOTG2BBrKnlJawBK1ZYaEC0
"""
import keras
from keras.datasets import mnist
(x_train,y_train),(x_test,y_test)=mnist.load_data()
x_train=x_train.reshape(60000,784)
x_test=x_test.reshape(10000,784)
x_train=x_train.astype('float32')
x_test=x_test.astype('float32')
x_train/=255
x_test/=255
from keras.utils import np_utils
y_train=np_utils.to_categorical(y_train,10)
y_test=np_utils.to_categorical(y_test,10)
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers.core import Dense, Dropout, Activation
model=Sequential(Dropout(0.2))
model.add(Dense(512, activation='relu', input_dim=784))
model.add(Dense(512, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(
optimizer='adam',
loss='categorical_crossentropy' ,
metrics=['accuracy']
)
model.fit(x_train,y_train,batch_size=128,epochs=10)
model.evaluate(x_train,y_train) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
22846,
29060,
8808,
764,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
220,
220,
3740,
... | 2.350109 | 457 |
import distutils.dir_util as copy_tree
import glob
import os
import shutil
import tempfile
import runner1c
import runner1c.common as common
import runner1c.exit_code as exit_code
| [
11748,
1233,
26791,
13,
15908,
62,
22602,
355,
4866,
62,
21048,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
198,
11748,
17490,
16,
66,
198,
11748,
17490,
16,
66,
13,
11321,
355,
2219,
198,
... | 3.327273 | 55 |
"""API configuration."""
import os
from typing import Any, Dict, Set
from elasticsearch import AsyncElasticsearch, Elasticsearch # type: ignore
from stac_fastapi.types.config import ApiSettings
_forbidden_fields: Set[str] = {"type"}
class ElasticsearchSettings(ApiSettings):
"""API settings."""
# Fields which are defined by STAC but not included in the database model
forbidden_fields: Set[str] = _forbidden_fields
@property
def create_client(self):
"""Create es client."""
return Elasticsearch(**_es_config())
class AsyncElasticsearchSettings(ApiSettings):
"""API settings."""
# Fields which are defined by STAC but not included in the database model
forbidden_fields: Set[str] = _forbidden_fields
@property
def create_client(self):
"""Create async elasticsearch client."""
return AsyncElasticsearch(**_es_config())
| [
37811,
17614,
8398,
526,
15931,
198,
11748,
28686,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
5345,
198,
198,
6738,
27468,
12947,
1330,
1081,
13361,
9527,
3477,
12947,
11,
48567,
12947,
220,
1303,
2099,
25,
8856,
198,
6738,
336,
33... | 3.113793 | 290 |
from efr32fg13p.halconfig import halconfig_types as types
from efr32fg13p.halconfig import halconfig_dependency as dep | [
6738,
304,
8310,
2624,
40616,
1485,
79,
13,
14201,
11250,
1330,
10284,
11250,
62,
19199,
355,
3858,
198,
6738,
304,
8310,
2624,
40616,
1485,
79,
13,
14201,
11250,
1330,
10284,
11250,
62,
45841,
1387,
355,
1207
] | 3.277778 | 36 |
import logging
import re
import scipy.sparse as sp
import numpy as np
import tensorflow as tf
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.utils import check_X_y, check_array, check_random_state
from sklearn.utils.multiclass import type_of_target
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import LabelEncoder
from muffnn.core import TFPicklingBase
_LOGGER = logging.getLogger(__name__)
class FMClassifier(TFPicklingBase, ClassifierMixin, BaseEstimator):
"""Factorization machine classifier.
Parameters
----------
rank : int, optional
Rank of the underlying low-rank representation.
batch_size : int, optional
The batch size for learning and prediction. If there are fewer
examples than the batch size during fitting, then the the number of
examples will be used instead.
n_epochs : int, optional
The number of epochs (iterations through the training data) when
fitting. These are counted for the positive training examples, not
the unlabeled data.
random_state: int, RandomState instance or None, optional
If int, the random number generator seed. If RandomState instance,
the random number generator itself. If None, then `np.random` will be
used.
lambda_v : float, optional
L2 regularization strength for the low-rank embedding.
lambda_beta : float, optional
L2 regularization strength for the linear coefficients.
init_scale : float, optional
Standard deviation of random normal initialization.
solver : a subclass of `tf.train.Optimizer` or str, optional
Solver to use. If a string is passed, then the corresponding solver
from `scipy.optimize.minimize` is used.
solver_kwargs : dict, optional
Additional keyword arguments to pass to `solver` upon construction.
See the TensorFlow documentation for possible options. Typically,
one would want to set the `learning_rate`.
Attributes
----------
n_dims_ : int
Number of input dimensions.
classes_ : array
Classes from the data.
n_classes_ : int
Number of classes.
is_sparse_ : bool
Whether a model taking sparse input was fit.
"""
def _set_up_graph(self):
"""Initialize TF objects (needed before fitting or restoring)."""
# Input values.
if self.is_sparse_:
self._x_inds = tf.placeholder(tf.int64, [None, 2], "x_inds")
self._x_vals = tf.placeholder(tf.float32, [None], "x_vals")
self._x_shape = tf.placeholder(tf.int64, [2], "x_shape")
self._x = tf.sparse_reorder(
tf.SparseTensor(self._x_inds, self._x_vals, self._x_shape))
x2 = tf.sparse_reorder(
tf.SparseTensor(self._x_inds,
self._x_vals * self._x_vals,
self._x_shape))
matmul = tf.sparse_tensor_dense_matmul
else:
self._x = tf.placeholder(tf.float32, [None, self.n_dims_], "x")
x2 = self._x * self._x
matmul = tf.matmul
self._sample_weight = \
tf.placeholder(np.float32, [None], "sample_weight")
if self._output_size == 1:
self._y = tf.placeholder(tf.float32, [None], "y")
else:
self._y = tf.placeholder(tf.int32, [None], "y")
with tf.variable_scope("fm"):
self._v = tf.get_variable(
"v", [self.rank, self.n_dims_, self._output_size])
self._beta = tf.get_variable(
"beta", [self.n_dims_, self._output_size])
self._beta0 = tf.get_variable("beta0", [self._output_size])
vx = tf.stack([matmul(self._x, self._v[i, :, :])
for i in range(self.rank)], axis=-1)
v2 = self._v * self._v
v2x2 = tf.stack([matmul(x2, v2[i, :, :])
for i in range(self.rank)], axis=-1)
int_term = 0.5 * tf.reduce_sum(tf.square(vx) - v2x2, axis=-1)
self._logit_y_proba \
= self._beta0 + matmul(self._x, self._beta) + int_term
if self._output_size == 1:
self._logit_y_proba = tf.squeeze(self._logit_y_proba)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self._logit_y_proba,
labels=self._y)
self._obj_func = reduce_weighted_mean(
cross_entropy, self._sample_weight)
self._y_proba = tf.sigmoid(self._logit_y_proba)
else:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self._logit_y_proba,
labels=self._y)
self._obj_func = reduce_weighted_mean(
cross_entropy, self._sample_weight)
self._y_proba = tf.nn.softmax(self._logit_y_proba)
if self.lambda_v > 0:
self._obj_func \
+= self.lambda_v * tf.reduce_sum(tf.square(self._v))
if self.lambda_beta > 0:
self._obj_func \
+= self.lambda_beta * tf.reduce_sum(tf.square(self._beta))
if isinstance(self.solver, str):
from tensorflow.contrib.opt import ScipyOptimizerInterface
self._train_step = ScipyOptimizerInterface(
self._obj_func,
method=self.solver,
options=self.solver_kwargs if self.solver_kwargs else {})
else:
self._train_step = self.solver(
**self.solver_kwargs if self.solver_kwargs else {}).minimize(
self._obj_func)
def _check_data(self, X):
"""check input data
Raises an error if number of features doesn't match.
If the estimator has not yet been fitted, then do nothing.
"""
if self._is_fitted:
if X.shape[1] != self.n_dims_:
raise ValueError("Number of features in the input data does "
"not match the number assumed by the "
"estimator!")
def fit(self, X, y, monitor=None, sample_weight=None):
"""Fit the classifier.
Parameters
----------
X : numpy array or sparse matrix [n_samples, n_features]
Training data.
y : numpy array [n_samples]
Targets.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator, and a dictionary with
{'loss': loss_value} representing the loss calculated by the
objective function at this iteration.
If the callable returns True the fitting procedure is stopped.
The monitor can be used for various things such as computing
held-out estimates, early stopping, model introspection,
and snapshotting.
sample_weight : numpy array of shape [n_samples,]
Per-sample weights. Re-scale the loss per sample.
Higher weights force the estimator to put more emphasis
on these samples. Sample weights are normalized per-batch.
Returns
-------
self : returns an instance of self.
"""
_LOGGER.info("Fitting %s", re.sub(r"\s+", r" ", repr(self)))
# Mark the model as not fitted (i.e., not fully initialized based on
# the data).
self._is_fitted = False
# Call partial fit, which will initialize and then train the model.
return self.partial_fit(X, y, monitor=monitor,
sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, monitor=None,
sample_weight=None):
"""Fit the classifier.
Parameters
----------
X : numpy array or sparse matrix [n_samples, n_features]
Training data.
y : numpy array [n_samples]
Targets.
classes : array, shape (n_classes,)
Classes to be used across calls to partial_fit. If not set in the
first call, it will be inferred from the given targets. If
subsequent calls include additional classes, they will fail.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator, and a dictionary with
{'loss': loss_value} representing the loss calculated by the
objective function at this iteration.
If the callable returns True the fitting procedure is stopped.
The monitor can be used for various things such as computing
held-out estimates, early stopping, model introspection,
and snapshotting.
sample_weight : numpy array of shape [n_samples,]
Per-sample weights. Re-scale the loss per sample.
Higher weights force the estimator to put more emphasis
on these samples. Sample weights are normalized per-batch.
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse='csr')
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
# check target type
target_type = type_of_target(y)
if target_type not in ['binary', 'multiclass']:
# Raise an error, as in
# sklearn.utils.multiclass.check_classification_targets.
raise ValueError("Unknown label type: %s" % target_type)
# Initialize the model if it hasn't been already by a previous call.
if not self._is_fitted:
self._random_state = check_random_state(self.random_state)
assert self.batch_size > 0, "batch_size <= 0"
self.n_dims_ = X.shape[1]
if classes is not None:
self._enc = LabelEncoder().fit(classes)
else:
self._enc = LabelEncoder().fit(y)
self.classes_ = self._enc.classes_
self.n_classes_ = len(self.classes_)
if self.n_classes_ <= 2:
self._output_size = 1
else:
self._output_size = self.n_classes_
if sp.issparse(X):
self.is_sparse_ = True
else:
self.is_sparse_ = False
# Instantiate the graph. TensorFlow seems easier to use by just
# adding to the default graph, and as_default lets you temporarily
# set a graph to be treated as the default graph.
self.graph_ = tf.Graph()
with self.graph_.as_default():
tf.set_random_seed(self._random_state.randint(0, 10000000))
tf.get_variable_scope().set_initializer(
tf.random_normal_initializer(stddev=self.init_scale))
self._build_tf_graph()
# Train model parameters.
self._session.run(tf.global_variables_initializer())
# Set an attributed to mark this as at least partially fitted.
self._is_fitted = True
# Check input data against internal data.
# Raises an error on failure.
self._check_data(X)
# transform targets
if sp.issparse(y):
y = y.toarray()
y = self._enc.transform(y)
# Train the model with the given data.
with self.graph_.as_default():
if not isinstance(self.solver, str):
n_examples = X.shape[0]
indices = np.arange(n_examples)
for epoch in range(self.n_epochs):
self._random_state.shuffle(indices)
for start_idx in range(0, n_examples, self.batch_size):
max_ind = min(start_idx + self.batch_size, n_examples)
batch_ind = indices[start_idx:max_ind]
if sample_weight is None:
batch_sample_weight = None
else:
batch_sample_weight = sample_weight[batch_ind]
feed_dict = self._make_feed_dict(
X[batch_ind],
y[batch_ind],
sample_weight=batch_sample_weight)
obj_val, _ = self._session.run(
[self._obj_func, self._train_step],
feed_dict=feed_dict)
_LOGGER.debug("objective: %.4f, epoch: %d, idx: %d",
obj_val, epoch, start_idx)
_LOGGER.info("objective: %.4f, epoch: %d, idx: %d",
obj_val, epoch, start_idx)
if monitor:
stop_early = monitor(epoch, self, {'loss': obj_val})
if stop_early:
_LOGGER.info(
"stopping early due to monitor function.")
return self
else:
feed_dict = self._make_feed_dict(
X, y, sample_weight=sample_weight)
self._train_step.minimize(self._session,
feed_dict=feed_dict)
return self
def predict_log_proba(self, X):
"""Compute log p(y=1).
Parameters
----------
X : numpy array or sparse matrix [n_samples, n_features]
Data.
Returns
-------
numpy array [n_samples]
Log probabilities.
"""
if not self._is_fitted:
raise NotFittedError("Call fit before predict_log_proba!")
return np.log(self.predict_proba(X))
def predict_proba(self, X):
"""Compute p(y=1).
Parameters
----------
X : numpy array or sparse matrix [n_samples, n_features]
Data.
Returns
-------
numpy array [n_samples]
Probabilities.
"""
if not self._is_fitted:
raise NotFittedError("Call fit before predict_proba!")
X = check_array(X, accept_sparse='csr')
# Check input data against internal data.
# Raises an error on failure.
self._check_data(X)
# Compute weights in batches.
probs = []
start_idx = 0
n_examples = X.shape[0]
with self.graph_.as_default():
while start_idx < n_examples:
X_batch = \
X[start_idx:min(start_idx + self.batch_size, n_examples)]
feed_dict = self._make_feed_dict(
X_batch, np.zeros(self.n_dims_))
start_idx += self.batch_size
probs.append(np.atleast_1d(self._y_proba.eval(
session=self._session, feed_dict=feed_dict)))
probs = np.concatenate(probs, axis=0)
if probs.ndim == 1:
return np.column_stack([1.0 - probs, probs])
else:
return probs
def predict(self, X):
"""Compute the predicted class.
Parameters
----------
X : numpy array or sparse matrix [n_samples, n_features]
Data.
Returns
-------
numpy array [n_samples]
Predicted class.
"""
if not self._is_fitted:
raise NotFittedError("Call fit before predict!")
return self.classes_[self.predict_proba(X).argmax(axis=1)]
| [
11748,
18931,
198,
11748,
302,
198,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
599,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
1341,
35720,
13,
8692,
1330,
5016,
7483,
35608,
259,
... | 2.081917 | 7,532 |
from monitorrent.plugins.trackers import Topic
| [
6738,
5671,
1156,
13,
37390,
13,
11659,
364,
1330,
47373,
628
] | 4.363636 | 11 |
""" Function for loading go dependencies for the go jsonformat library"""
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
def fhir_go_dependencies():
""" Loads dependencies of the Go FHIR library"""
go_rules_dependencies()
go_register_toolchains()
gazelle_dependencies()
go_repository(
name = "com_github_pkg_errors",
importpath = "github.com/pkg/errors",
tag = "v0.9.1",
)
go_repository(
name = "com_github_serenize_snaker",
commit = "a683aaf2d516deecd70cad0c72e3ca773ecfcef0",
importpath = "github.com/serenize/snaker",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
tag = "23def4e6c14b4da8ac2ed8007337bc5eb5007998",
)
go_repository(
name = "com_github_json_iterator_go",
importpath = "github.com/json-iterator/go",
tag = "v1.1.9",
)
go_repository(
name = "com_github_vitessio",
importpath = "github.com/vitessio/vitess",
tag = "vitess-parent-3.0.0",
)
go_repository(
name = "com_bitbucket_creachadair_stringset",
importpath = "bitbucket.org/creachadair/stringset",
tag = "v0.0.8",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
tag = "v0.3.0",
)
go_repository(
name = "org_golang_google_protobuf",
commit = "d165be301fb1e13390ad453281ded24385fd8ebc",
importpath = "google.golang.org/protobuf",
)
go_repository(
name = "com_github_modern_go_reflect2",
importpath = "github.com/modern-go/reflect2",
commit = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd",
)
go_repository(
name = "com_github_modern_go_concurrent",
importpath = "github.com/modern-go/concurrent",
commit = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94",
)
| [
37811,
15553,
329,
11046,
467,
20086,
329,
262,
467,
33918,
18982,
5888,
37811,
198,
198,
2220,
7203,
31,
952,
62,
65,
41319,
62,
38785,
62,
2188,
1003,
2188,
25,
10378,
82,
13,
65,
48274,
1600,
366,
2188,
62,
30238,
62,
25981,
38861,... | 2.032417 | 1,018 |
[](http://introml.analyticsdojo.com)
<center><h1>Introduction to Python - Null Values</h1></center>
<center><h3><a href = 'http://introml.analyticsdojo.com'>introml.analyticsdojo.com</a></h3></center>
# Null Values
## Running Code using Kaggle Notebooks
- Kaggle utilizes Docker to create a fully functional environment for hosting competitions in data science.
- You could download/run this locally or run it online.
- Kaggle has created an incredible resource for learning analytics. You can view a number of *toy* examples that can be used to understand data science and also compete in real problems faced by top companies.
!wget https://raw.githubusercontent.com/rpi-techfundamentals/spring2019-materials/master/input/train.csv
!wget https://raw.githubusercontent.com/rpi-techfundamentals/spring2019-materials/master/input/test.csv
### Null Values Typical When Working with Real Data
- Null values `NaN` in Pandas
import numpy as np
import pandas as pd
# Input data files are available in the "../input/" directory.
# Let's input them into a Pandas DataFrame
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
print(train.dtypes)
train.head()
test.head()
#Let's get some general s
totalRows=len(train.index)
print("There are ", totalRows, " so totalRows-count is equal to missing variables.")
print(train.describe())
print(train.columns)
# We are going to do operations on thes to show the number of missing variables.
train.isnull().sum()
### Dropping NA
- If we drop all NA values, this can dramatically reduce our dataset.
- Here while there are 891 rows total, there are only 183 complete rows
- `dropna()` and `fillna()` are 2 method for dealing with this, but they should be used with caution.
- [Fillna documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html)
- [Dropna documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html)
# This will drop all rows in which there is any missing values
traindrop=train.dropna()
print(len(traindrop.index))
print(traindrop.isnull().sum())
# This will drop all rows in which there is any missing values
trainfill=train.fillna(0) #This will just fill all values with nulls. Probably not what we want.
print(len(trainfill.index))
print(traindrop.isnull().sum())
# forward-fill previous value forward.
train.fillna(method='ffill')
# forward-fill previous value forward.
train.fillna(method='bfill')
### Customized Approach
- While those approaches
average=train.Age.mean()
print(average)
#Let's convert it to an int
average= int(average)
average
#This will select out values that
train.Age.isnull()
#Now we are selecting out those values
train.loc[train.Age.isnull(),"Age"]=average
train
### More Complex Models - Data Imputation
- Could be that Age could be inferred from other variables, such as SibSp, Name, Fare, etc.
- A next step could be to build a more complex regression or tree model that would involve data tat was not null.
### Missing Data - Class Values
- We have 2 missing data values for the Embarked Class
- What should we replace them as?
pd.value_counts(train.Embarked)
train.Embarked.isnull().sum()
train[train.Embarked.isnull()]
train.loc[train.Embarked.isnull(),"Embarked"]="S"
This work is licensed under the [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/) license agreement.
Adopted from [materials](https://github.com/phelps-sg/python-bigdata) Copyright [Steve Phelps](http://sphelps.net) 2014 | [
58,
0,
58,
37702,
14094,
5211,
7639,
16151,
5450,
1378,
12567,
13,
785,
14,
81,
14415,
12,
13670,
10990,
3263,
874,
14,
16469,
23344,
12,
33665,
82,
14,
2436,
672,
14,
9866,
14,
5647,
14,
20311,
12,
6404,
78,
13,
11134,
30,
1831,
... | 3.256864 | 1,129 |
from django.template import loader
from django.conf import settings
import os
from goods.models import SKU
from contents.utils import get_categories
from goods.utils import get_goods_specs, get_breadcrumb
from celery_tasks.main import celery_app
@celery_app.task(name='generate_static_sku_detail_html')
def generate_static_sku_detail_html(sku_id):
"""
生成静态商品详情页面
:param sku_id: 商品sku id
:return:
"""
# 查询sku信息
sku = SKU.objects.get(id=sku_id)
# 查询商品频道分类
categories = get_categories()
# 查询面包屑导航
bread_crumb = get_breadcrumb(sku.category)
# 构建当前商品的规格
goods_specs = get_goods_specs(sku)
# 构建上下文
context = {
'categories': categories,
'bread_crumb': bread_crumb,
'sku': sku,
'specs': goods_specs
}
# 获取详情页模板文件
template = loader.get_template('detail.html')
# 渲染详情页html字符串
detail_html_text = template.render(context)
# 将详情页html字符串写入到指定目录,命名'index.html'
file_path = os.path.join(settings.STATICFILES_DIRS[0], 'detail/' + str(sku_id) + '.html')
with open(file_path, 'w', encoding='utf-8') as f:
f.write(detail_html_text) | [
6738,
42625,
14208,
13,
28243,
1330,
40213,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
28686,
198,
198,
6738,
7017,
13,
27530,
1330,
14277,
52,
198,
6738,
10154,
13,
26791,
1330,
651,
62,
66,
26129,
198,
6738,
7017,
13,... | 1.766871 | 652 |
import copy
import json
import threading
from collections import defaultdict
from typing import List, Dict, Union, Optional, Callable, TypeVar, Iterable, Tuple
def find_key(obj: Union[Dict, List], key: str):
"""
根据字符串查找对象值,字符串形式如a.b.0,
查找对象,如::
{'a':{'b':['val']}}
val值将被查出
:param obj: 查找key值的对象
:param key: 查找key
:return: 查找到的值
"""
key_list = key.split('.')
for k in key_list:
if isinstance(obj, list):
val = obj[int(k)]
else:
val = obj.get(k)
if val is None:
return None
else:
obj = val
return obj
def inin(content: str, pool: List[str]) -> Optional[str]:
"""
查找指定内容是否存在于列表的字符串中,这种情况content一定要比列表中字符串短
举例::
inin('a',['asdf','fsfsdf']) 将返回 'asdf'
:param content: 内容
:param pool: 列表
:return: 匹配内容
"""
for p in pool:
if content in p:
return p
return None
def rinin(content: str, pool: List[str]) -> Optional[str]:
"""
查找指定内容是否存在于列表的字符串中,这种情况content一定要比列表中字符串长
举例::
inin('asdf',['a','fsfsdf']) 将返回 'a'
:param content: 内容
:param pool: 列表
:return: 匹配内容
"""
for p in pool:
if p in content:
return p
return None
IT = TypeVar('IT')
def find(iterable: Iterable[IT], func: Callable[[IT], bool]) -> Tuple[int, Optional[IT]]:
"""
查找可迭代对象的指定项,匹配第一个子项并返回,无匹配项时返回(-1,None)
:param func: 匹配函数
:param iterable: 可迭代对象
:return: 索引,子对象
"""
for i, v in enumerate(iterable):
if func(v):
return i, v
return -1, None
def retry(freq: int = 3, retry_hook: Optional[Callable[[int], None]] = None) -> Callable:
"""
装饰器,为函数添加此装饰器当函数抛出异常时会对函数重新调用,重新调用次数取决于freq指定的参数
:param freq: 重试次数
:param retry_hook: 钩子函数,当函数重调用时回调的函数
:return: 原函数返回值
"""
return decorator
def fiber(start: Optional[Callable] = None, end: Optional[Callable] = None):
"""
`装饰器`,封装一个函数作为线程执行,允许传入开始和结束的回调函数
:param start: 开始执行函数的回调
:param end: 结束执行函数的回调
:return: 函数封装器
"""
return decorator
class AdvancedJSONEncoder(json.JSONEncoder):
"""
定义ApiController JSON解析器
"""
find_dict = {
'date': lambda v: v.strftime('%Y-%m-%d'),
'datetime': lambda v: v.strftime('%Y-%m-%d %H:%M'),
'Decimal': lambda v: v.to_eng_string()
}
class UpdateList(list):
"""
主要方法update(),该方法是对list类型拓展,
当update的数据对象存在时对其更新,注意请保证UpdateList
的子项是dict类型而不要使用值类型,值类型对于UpdateList毫无意义
on_update hook函数,接收old_val(旧数据), p_object(新数据),需要返回更新数据
on_append hook函数,接收p_object(添加数据),需要返回添加数据
on_fetch_key hook函数,当key属性定义为函数时需要同时定义如何捕获key值
key 支持字符串,字符串指定子元素中的更新参考值
支持函数,接收val(当前数据),key(参考key值)该key值由on_fetch_key返回,函数返回bool值True为更新,False为添加
on_fetch_key作用::
复杂场景下我们可能需要up[('home2', True)]这样来找到响应的item,这样显示传递key值没有什么问题,key函数可以获取到
相应的key数据以供我们处理,但是当我们调用update时,update需要判断该内容是更新还是添加,这时我们传入的内容是数据,显然
update无法知晓如何获取我们想要的类型key值,如('home2', True),所以我们要定义on_fetch_key来告知update如何捕获我们
想要的类型的key值,on_fetch_key只有当key属性定义为函数时才有意义。
"""
def update(self, p_object):
"""
类似于append方法,不同的是当内容存在时会对内容进行更新,更新逻辑遵从update_callback
而当内容不存在时与append方法一致进行末尾加入内容
:param p_object: 内容对象
:return: None
"""
if not self.on_update:
self.on_update = lambda o, p: p
old_val = None
if isinstance(self.key, str):
key = p_object.get(self.key) or -1
if key != -1:
key, old_val = self.find(lambda val: val[self.key] == key)
elif hasattr(self.key, '__call__'):
try:
key, old_val = self.find(lambda val: self.key(val, self.on_fetch_key(p_object)))
except TypeError:
raise TypeError('Function `on_fetch_key` is not defined')
else:
raise TypeError('`key` is TypeError')
if key == -1:
if self.on_append:
self.append(self.on_append(p_object))
else:
self.append(p_object)
else:
super(UpdateList, self).__setitem__(key, self.on_update(old_val, p_object))
def find(self, callback):
"""
返回满足回调函数的内容
:param callback: 回调函数,返回布尔类型用于判断是否满足要求
:return: (索引,值)
"""
for index, item in enumerate(self):
if callback(item):
return index, item
| [
11748,
4866,
198,
11748,
33918,
198,
11748,
4704,
278,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
4479,
11,
32233,
11,
4889,
540,
11,
5994,
19852,
11,
40806,
540,
11,
309,
29291,
628,
198,
198... | 1.296744 | 3,471 |
from astropy import units as u
from astropy.modeling import models, fitting
from astropy.stats import sigma_clip
from ccdproc import CCDData
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import sys
sys.path.append('/user/simon/development/soar/goodman')
from pipeline.wcs.wcs import WCS
if __name__ == '__main__':
# _file = 'data/fits/goodman_comp_400M2_GG455_HgArNe.fits'
_file = 'data/fits/goodman_comp_400M2_GG455_Ne.fits'
wav = WavelengthCalibration()
wav(spectrum=_file)
| [
6738,
6468,
28338,
1330,
4991,
355,
334,
198,
198,
6738,
6468,
28338,
13,
4666,
10809,
1330,
4981,
11,
15830,
198,
6738,
6468,
28338,
13,
34242,
1330,
264,
13495,
62,
15036,
198,
6738,
269,
10210,
36942,
1330,
327,
8610,
6601,
198,
6738... | 2.693396 | 212 |
#!/usr/bin/env python3
import torch
from .marginal_log_likelihood import MarginalLogLikelihood
from ..likelihoods import _GaussianLikelihoodBase
from ..distributions import MultivariateNormal
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28034,
198,
6738,
764,
30887,
1292,
62,
6404,
62,
2339,
11935,
1330,
11899,
1292,
11187,
7594,
11935,
198,
6738,
11485,
2339,
11935,
82,
1330,
4808,
35389,
31562,
7594,
1... | 3.660377 | 53 |
import logging
logger = logging.getLogger('blossom') | [
11748,
18931,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
10786,
2436,
25548,
11537
] | 3.466667 | 15 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
"""
File providing functions to mimic OpenMP Runtime library routines to allow files to run
in pure python mode
"""
def omp_set_num_threads(num_threads : int):
"""
The omp_set_num_threads routine affects the number of threads
to be used for subsequent parallel regions that do not specify
a num_threads clause, by setting the value of the first element
of the nthreads-var ICV of the current task.
Parameters
----------
num_threads : int
"""
def omp_get_num_threads():
"""
The omp_get_num_threads routine returns the number of threads
in the current team.
"""
return 1
def omp_get_max_threads():
"""
The omp_get_max_threads routine returns an upper bound on the
number of threads that could be used to form a new team if a
parallel construct without a num_threads clause were encountered
after execution returns from this routine.
"""
return 1
def omp_get_thread_num():
"""
The omp_get_thread_num routine returns the thread number,
within the current team, of the calling thread
"""
return 0
def omp_get_num_procs():
"""
The omp_get_num_procs routine returns the number of processors
available to the device.
"""
return 1
def omp_in_parallel():
"""
The omp_in_parallel routine returns true if the active-levels-var
ICV is greater than zero; otherwise, it returns false
"""
return False
def omp_set_dynamic(dynamic_threads : bool):
"""
The omp_set_dynamic routine enables or disables dynamic
adjustment of the number of threads available for the execution
of subsequent parallel regions by setting the value of the
dyn-var ICV
Parameters
----------
: bool
"""
def omp_get_dynamic():
"""
The omp_get_dynamic routine returns the value of the dyn-var
ICV, which determines whether dynamic adjustment of the number
of threads is enabled or disabled.
"""
return False
def omp_get_cancellation():
"""
The omp_get_cancellation routine returns the value of the
cancel-var ICV, which determines if cancellation is enabled
or disabled.
"""
return False
def omp_set_nested(nested : bool):
"""
The deprecated omp_set_nested routine enables or disables
nested parallelism by setting the max-active-levels-var ICV.
Parameters
----------
nested : bool
"""
def omp_get_nested():
"""
The deprecated omp_get_nested routine returns whether nested
parallelism is enabled or disabled, according to the value
of the max-active-levels-var ICV.
"""
return False
def omp_set_schedule(kind : int, chunk_size : int):
"""
The omp_set_schedule routine affects the schedule that is
applied when runtime is used as schedule kind, by setting
the value of the run-sched-var ICV.
Parameters
----------
kind : int
chunk_size : int
"""
def omp_get_schedule():
"""
The omp_get_schedule routine returns the schedule that is
applied when the runtime schedule is used.
Results
-------
kind : int
chunk_size : int
"""
return 1,0
def omp_get_thread_limit():
"""
The omp_get_thread_limit routine returns the maximum number
of OpenMP threads available to participate in the current
contention group.
"""
return 1
def omp_set_max_active_levels(max_levels : int):
"""
The omp_set_max_active_levels routine limits the number of
nested active parallel regions on the device, by setting the
max-active-levels-var ICV
Parameters
----------
max_levels : int
"""
def omp_get_max_active_levels():
"""
The omp_get_max_active_levels routine returns the value of
the max-active-levels-var ICV, which determines the maximum
number of nested active parallel regions on the device.
"""
return 1
def omp_get_level():
"""
The omp_get_level routine returns the value of the levels-var ICV.
"""
return 0
def omp_get_ancestor_thread_num(level : int):
"""
The omp_get_ancestor_thread_num routine returns, for a given
nested level of the current thread, the thread number of the
ancestor of the current thread.
Parameters
----------
level : int
"""
return -1
def omp_get_team_size(level : int):
"""
The omp_get_team_size routine returns, for a given nested
level of the current thread, the size of the thread team to
which the ancestor or the current thread belongs.
Parameters
----------
level : int
"""
return 1
def omp_get_active_level():
"""
The omp_get_active_level routine returns the value of the
active-level-vars ICV.
"""
return 0
def omp_in_final():
"""
The omp_in_final routine returns true if the routine is
executed in a final task region; otherwise, it returns false.
"""
return False
def omp_get_proc_bind():
"""
The omp_get_proc_bind routine returns the thread affinity
policy to be used for the subsequent nested parallel regions
that do not specify a proc_bind clause.
"""
return 0
def omp_get_num_places():
"""
The omp_get_num_places routine returns the number of places
available to the execution environment in the place list.
"""
return 1
def omp_get_place_num_procs(place_num : int):
"""
The omp_get_place_num_procs routine returns the number of
processors available to the execution environment in the
specified place.
Parameters
----------
place_num : int
"""
return 1
def omp_get_place_proc_ids(place_num : int, ids : 'int[:]'):
"""
The omp_get_place_proc_ids routine returns the numerical
identifiers of the processors available to the execution
environment in the specified place.
Parameters
----------
place_num : int
ids : array of ints
To be filled by the function
"""
def omp_get_place_num():
"""
The omp_get_place_num routine returns the place number of
the place to which the encountering thread is bound.
"""
return -1
def omp_get_partition_num_places():
"""
The omp_get_partition_num_places routine returns the number
of places in the place partition of the innermost implicit task.
"""
return 1
def omp_get_partition_place_nums(place_nums : 'int[:]'):
"""
The omp_get_partition_place_nums routine returns the list of
place numbers corresponding to the places in the
place-partition-var ICV of the innermost implicit task.
Parameters
----------
place_nums : array of ints
To be filled by the function
"""
def omp_set_default_device(device_num : int):
"""
The omp_set_default_device routine controls the default
target device by assigning the value of the
default-device-var ICV.
"""
def omp_get_default_device():
"""
The omp_get_default_device routine returns the default
target device.
"""
return 0
def omp_get_num_devices():
"""
The omp_get_num_devices routine returns the number of
target devices.
"""
return 1
def omp_get_num_teams():
"""
The omp_get_num_teams routine returns the number of initial
teams in the current teams region.
"""
return 1
def omp_get_team_num():
"""
The omp_get_team_num routine returns the initial team number
of the calling thread.
"""
def omp_is_initial_device():
"""
The omp_is_initial_device routine returns true if the current
task is executing on the host device; otherwise, it returns
false.
"""
return True
def omp_get_initial_device():
"""
The omp_get_initial_device routine returns a device number
that represents the host device.
"""
return 0
def omp_get_max_task_priority():
"""
The omp_get_max_task_priority routine returns the maximum
value that can be specified in the priority clause.
"""
return 0
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
10097,
22369,
438,
2,
198,
2,
770,
2393,
318,
636,
286,
9485,
66,
5276,
543,
318,
2716,
739,
17168,
13789,
13,
4091,
262,
38559,
24290,
2393,
393,
1303,
198,
2,
4... | 2.908807 | 2,884 |
# -*- coding: utf-8 -*-
# @Project: fluentpython
# @Author: xuzhiyi
# @File name: __init__.py
# @Create time: 2021/8/1 20:20 | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
16775,
25,
43472,
29412,
198,
2,
2488,
13838,
25,
2124,
10277,
5303,
48111,
198,
2,
2488,
8979,
1438,
25,
11593,
15003,
834,
13,
9078,
198,
2,
2488,
16447,
64... | 2.296296 | 54 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: vmware_guest_disk
short_description: Manage disks related to virtual machine in given vCenter infrastructure
description:
- This module can be used to add, remove and update disks belonging to given virtual machine.
- All parameters and VMware object names are case sensitive.
- This module is destructive in nature, please read documentation carefully before proceeding.
- Be careful while removing disk specified as this may lead to data loss.
author:
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
notes:
- Tested on vSphere 6.0 and 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the virtual machine.
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
type: str
uuid:
description:
- UUID of the instance to gather facts if known, this is VMware's unique identifier.
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
type: str
folder:
description:
- Destination folder, absolute or relative path to find an existing guest.
- This is a required parameter, only if multiple VMs are found with same name.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
datacenter:
description:
- The datacenter name to which virtual machine belongs to.
required: True
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: false
type: bool
disk:
description:
- A list of disks to add or remove.
- The virtual disk related information is provided using this list.
- All values and parameters are case sensitive.
suboptions:
size:
description:
- Disk storage size.
- If size specified then unit must be specified. There is no space allowed in between size number and unit.
- Only first occurrence in disk element will be considered, even if there are multiple size* parameters available.
type: str
size_kb:
description: Disk storage size in kb.
type: int
size_mb:
description: Disk storage size in mb.
type: int
size_gb:
description: Disk storage size in gb.
type: int
size_tb:
description: Disk storage size in tb.
type: int
type:
description:
- The type of disk, if not specified then use C(thick) type for new disk, no eagerzero.
- The disk type C(rdm) is added in version 1.13.0.
type: str
choices: ['thin', 'eagerzeroedthick', 'thick', 'rdm' ]
disk_mode:
description:
- Type of disk mode. If not specified then use C(persistent) mode for new disk.
- If set to 'persistent' mode, changes are immediately and permanently written to the virtual disk.
- If set to 'independent_persistent' mode, same as persistent, but not affected by snapshots.
- If set to 'independent_nonpersistent' mode, changes to virtual disk are made to a redo log and discarded
at power off, but not affected by snapshots.
type: str
choices: ['persistent', 'independent_persistent', 'independent_nonpersistent']
rdm_path:
description:
- Path of LUN for Raw Device Mapping required for disk type C(rdm).
- Only valid if C(type) is set to C(rdm).
type: str
cluster_disk:
description:
- This value allows for the sharing of an RDM between two machines.
- The primary machine holding the RDM uses the default C(False).
- The secondary machine holding the RDM uses C(True).
type: bool
default: False
version_added: '1.17.0'
compatibility_mode:
description: Compatibility mode for raw devices. Required for disk type 'rdm'
type: str
choices: ['physicalMode','virtualMode']
sharing:
description:
- The sharing mode of the virtual disk.
- Setting sharing means that multiple virtual machines can write to the virtual disk.
- Sharing can only be set if C(type) is set to C(eagerzeroedthick)or C(rdm).
type: bool
default: False
datastore:
description: Name of datastore or datastore cluster to be used for the disk.
type: str
autoselect_datastore:
description: Select the less used datastore. Specify only if C(datastore) is not specified.
type: bool
scsi_controller:
description:
- SCSI controller number. Only 4 SCSI controllers are allowed per VM.
- Care should be taken while specifying 'scsi_controller' is 0 and 'unit_number' as 0 as this disk may contain OS.
type: int
choices: [0, 1, 2, 3]
bus_sharing:
description:
- Only functions with Paravirtual SCSI Controller.
- Allows for the sharing of the scsi bus between two virtual machines.
type: str
choices: ['noSharing', 'physicalSharing', 'virtualSharing']
default: 'noSharing'
version_added: '1.17.0'
unit_number:
description:
- Disk Unit Number.
- Valid value range from 0 to 15, except 7 for SCSI Controller.
- Valid value range from 0 to 64, except 7 for Paravirtual SCSI Controller on Virtual Hardware version 14 or higher
- Valid value range from 0 to 29 for SATA controller.
- Valid value range from 0 to 14 for NVME controller.
type: int
required: True
scsi_type:
description:
- Type of SCSI controller. This value is required only for the first occurrence of SCSI Controller.
- This value is ignored, if SCSI Controller is already present or C(state) is C(absent).
type: str
choices: ['buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual']
destroy:
description: If C(state) is C(absent), make sure the disk file is deleted from the datastore. Added in version 2.10.
type: bool
default: True
filename:
description:
- Existing disk image to be used. Filename must already exist on the datastore.
- Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.10.
type: str
state:
description:
- State of disk.
- If set to 'absent', disk will be removed permanently from virtual machine configuration and from VMware storage.
- If set to 'present', disk will be added if not present at given Controller and Unit Number.
- or disk exists with different size, disk size is increased, reducing disk size is not allowed.
type: str
choices: ['present', 'absent']
default: 'present'
controller_type:
description:
- This parameter is added for managing disks attaching other types of controllers, e.g., SATA or NVMe.
- If either C(controller_type) or C(scsi_type) is not specified, then use C(paravirtual) type.
type: str
choices: ['buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual', 'sata', 'nvme']
controller_number:
description: This parameter is used with C(controller_type) for specifying controller bus number.
type: int
choices: [0, 1, 2, 3]
iolimit:
description: Section specifies the shares and limit for storage I/O resource.
suboptions:
limit:
description: Section specifies values for limit where the utilization of a virtual machine will not exceed, even if there are available resources.
type: int
shares:
description: Specifies different types of shares user can add for the given disk.
suboptions:
level:
description: Specifies different level for the shares section.
type: str
choices: ['low', 'normal', 'high', 'custom']
level_value:
description: Custom value when C(level) is set as C(custom).
type: int
type: dict
type: dict
shares:
description: Section for iolimit section tells about what are all different types of shares user can add for disk.
suboptions:
level:
description: Tells about different level for the shares section.
type: str
choices: ['low', 'normal', 'high', 'custom']
level_value:
description: Custom value when C(level) is set as C(custom).
type: int
type: dict
default: []
type: list
elements: dict
extends_documentation_fragment:
- community.vmware.vmware.documentation
'''
EXAMPLES = r'''
- name: Add disks to virtual machine using UUID
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
disk:
- size_mb: 10
type: thin
datastore: datacluster0
state: present
scsi_controller: 1
unit_number: 1
scsi_type: 'paravirtual'
disk_mode: 'persistent'
- size_gb: 10
type: eagerzeroedthick
state: present
autoselect_datastore: True
scsi_controller: 2
scsi_type: 'buslogic'
unit_number: 12
disk_mode: 'independent_persistent'
- size: 10Gb
type: eagerzeroedthick
state: present
autoselect_datastore: True
scsi_controller: 2
scsi_type: 'buslogic'
unit_number: 1
disk_mode: 'independent_nonpersistent'
- filename: "[datastore1] path/to/existing/disk.vmdk"
delegate_to: localhost
register: disk_facts
- name: Add disks with specified shares to the virtual machine
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
disk:
- size_gb: 1
type: thin
datastore: datacluster0
state: present
scsi_controller: 1
unit_number: 1
disk_mode: 'independent_persistent'
shares:
level: custom
level_value: 1300
delegate_to: localhost
register: test_custom_shares
- name: Add physical raw device mapping to virtual machine using name
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
validate_certs: no
name: "Test_VM"
disk:
- type: rdm
state: present
scsi_controller: 1
unit_number: 5
rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453
compatibility_mode: 'physicalMode'
- name: Add virtual raw device mapping to virtual machine using name and virtual mode
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
validate_certs: no
name: "Test_VM"
disk:
- type: rdm
state: present
scsi_controller: 1
unit_number: 5
rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453
compatibility_mode: 'virtualMode'
disk_mode: 'persistent'
- name: Add raw device mapping to virtual machine with Physical bus sharing
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
validate_certs: no
name: "Test_VM"
disk:
- type: rdm
state: present
scsi_controller: 1
unit_number: 5
rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453
compatibility_mode: 'virtualMode'
disk_mode: 'persistent'
bus_sharing: physicalSharing
- name: Add raw device mapping to virtual machine with Physical bus sharing and clustered disk
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
validate_certs: no
name: "Test_VM"
disk:
- type: rdm
state: present
scsi_controller: 1
unit_number: 5
compatibility_mode: 'virtualMode'
disk_mode: 'persistent'
bus_sharing: physicalSharing
filename: "[datastore1] path/to/rdm/disk-marker.vmdk"
- name: create new disk with custom IO limits and shares in IO Limits
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
disk:
- size_gb: 1
type: thin
datastore: datacluster0
state: present
scsi_controller: 1
unit_number: 1
disk_mode: 'independent_persistent'
iolimit:
limit: 1506
shares:
level: custom
level_value: 1305
delegate_to: localhost
register: test_custom_IoLimit_shares
- name: Remove disks from virtual machine using name
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
name: VM_225
disk:
- state: absent
scsi_controller: 1
unit_number: 1
delegate_to: localhost
register: disk_facts
- name: Remove disk from virtual machine using moid
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
moid: vm-42
disk:
- state: absent
scsi_controller: 1
unit_number: 1
delegate_to: localhost
register: disk_facts
- name: Remove disk from virtual machine but keep the VMDK file on the datastore
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
name: VM_225
disk:
- state: absent
scsi_controller: 1
unit_number: 2
destroy: no
delegate_to: localhost
register: disk_facts
- name: Add disks to virtual machine using UUID to SATA and NVMe controller
community.vmware.vmware_guest_disk:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: "{{ datacenter_name }}"
validate_certs: no
uuid: 421e4592-c069-924d-ce20-7e7533fab926
disk:
- size_mb: 256
type: thin
datastore: datacluster0
state: present
controller_type: sata
controller_number: 1
unit_number: 1
disk_mode: 'persistent'
- size_gb: 1
state: present
autoselect_datastore: True
controller_type: nvme
controller_number: 2
unit_number: 3
disk_mode: 'independent_persistent'
delegate_to: localhost
register: disk_facts
'''
RETURN = r'''
disk_status:
description: metadata about the virtual machine's disks after managing them
returned: always
type: dict
sample: {
"0": {
"backing_datastore": "datastore2",
"backing_disk_mode": "persistent",
"backing_eagerlyscrub": false,
"backing_filename": "[datastore2] VM_225/VM_225.vmdk",
"backing_thinprovisioned": false,
"backing_writethrough": false,
"backing_uuid": "421e4592-c069-924d-ce20-7e7533fab926",
"capacity_in_bytes": 10485760,
"capacity_in_kb": 10240,
"controller_key": 1000,
"key": 2000,
"label": "Hard disk 1",
"summary": "10,240 KB",
"unit_number": 0
},
}
'''
import re
try:
from pyVmomi import vim
except ImportError:
pass
from random import randint
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec,\
wait_for_task, find_obj, get_all_objs, get_parent_datacenter
from ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
25,
357,
66,
8,
2864,
11,
28038,
856,
4935,
198,
2,
15069,
25,
357,
66,
8,
2864,
11,
2275,
71,
2926,
68,
316,
... | 2.433153 | 7,390 |
"""Functions and classes to deduplicate and simplify test code."""
class ColumnAssertionMixin:
"""Mixin class for making columns assertions in tests for Kedro nodes."""
| [
37811,
24629,
2733,
290,
6097,
284,
4648,
84,
489,
5344,
290,
30276,
1332,
2438,
526,
15931,
628,
198,
4871,
29201,
8021,
861,
295,
35608,
259,
25,
198,
220,
220,
220,
37227,
35608,
259,
1398,
329,
1642,
15180,
29965,
287,
5254,
329,
... | 3.645833 | 48 |
import io
import mimetypes
from pathlib import Path
from typing import Optional, Tuple, Union
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils.timezone import now
from PIL import Image, ImageOps
from .models import TFile
# TODO: Convert this into a class to handle image formatting
| [
11748,
33245,
198,
11748,
17007,
2963,
12272,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
32233,
11,
309,
29291,
11,
4479,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
25850,
276,
7753,
1330,
17427,
41592,
276,
... | 3.709302 | 86 |
#!/usr/bin/env python3
# https://knmi.nl/kennis-en-datacentrum/achtergrond/data-ophalen-vanuit-een-script
# https://github.com/EnergieID/KNMI-py
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
# https://rethinkdb.com/
import knmi
from latlon import Latitude, Longitude
# Pandas DataFrame.
df = knmi.get_day_data_dataframe(stations=[260])
stations = knmi.stations.values()
sortedStations = sorted(stations, key=lambda s: s.name, reverse=False)
for station in sortedStations:
lat = Latitude(station.latitude).to_string('d% %m% %S% %H')
lon = Longitude(station.longitude).to_string('d% %m% %S% %H')
# station.altitude
print(' * %s, #%d, ll: (%s, %s)' % (station.name, station.number, lat, lon))
for key,value in knmi.variables.items():
print(' * %s: %s' % (key,value))
print('Description')
print(df.describe())
print('Data Frame')
print(df)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
3740,
1378,
15418,
11632,
13,
21283,
14,
74,
10679,
12,
268,
12,
19608,
12643,
6582,
14,
620,
353,
2164,
623,
14,
7890,
12,
2522,
282,
268,
12,
10438,
5013,
12,
6429,
1... | 2.505556 | 360 |
from . import ServiceMixin, ForecastMixin, EpisodeMixin
from datetime import timedelta
from dateutil.parser import parse | [
6738,
764,
1330,
4809,
35608,
259,
11,
4558,
2701,
35608,
259,
11,
7922,
35608,
259,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
3128,
22602,
13,
48610,
1330,
21136
] | 4 | 30 |
# -*- coding: utf-8 -*-
"""
# youbot Illustrates the V-REP MATLAB bindings, more specifically the way to take a 3D point cloud.
# (C) Copyright Renaud Detry 2013, Thibaut Cuvelier 2017.
# Distributed under the GNU General Public License.
# (See http://www.gnu.org/copyleft/gpl.html)
"""
# VREP
import sim as vrep
# Useful import
import time
import numpy as np
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from cleanup_vrep import cleanup_vrep
from vrchk import vrchk
from youbot_init import youbot_init
from youbot_hokuyo_init import youbot_hokuyo_init
from youbot_hokuyo import youbot_hokuyo
from youbot_xyz_sensor import youbot_xyz_sensor
# Test the python implementation of a youbot
# Initiate the connection to the simulator.
print('Program started')
# Use the following line if you had to recompile remoteApi
# vrep = remApi('remoteApi', 'extApi.h')
# vrep = remApi('remoteApi')
# Close the connection in case if a residual connection exists
vrep.simxFinish(-1)
clientID = vrep.simxStart('127.0.0.1', 19997, True, True, 2000, 5)
# The time step the simulator is using (your code should run close to it).
timestep = .05
# Synchronous mode
returnCode = vrep.simxSynchronous(clientID, True)
# If you get an error like:
# Remote API function call returned with error code: 64. Explanation: simxStart was not yet called.
# Make sure your code is within a function! You cannot call V-REP from a script.
if clientID < 0:
sys.exit('Failed connecting to remote API server. Exiting.')
print('Connection ' + str(clientID) + ' to remote API server open')
# Make sure we close the connection whenever the script is interrupted.
# cleanup_vrep(vrep, id)
# This will only work in "continuous remote API server service".
# See http://www.v-rep.eu/helpFiles/en/remoteApiServerSide.htm
vrep.simxStartSimulation(clientID, vrep.simx_opmode_blocking)
# Send a Trigger to the simulator: this will run a time step for the physic engine
# because of the synchronous mode. Run several iterations to stabilize the simulation
for i in range(int(1./timestep)):
vrep.simxSynchronousTrigger(clientID)
vrep.simxGetPingTime(clientID)
# Retrieve all handles, mostly the Hokuyo.
h = youbot_init(vrep, clientID)
h = youbot_hokuyo_init(vrep, h)
# Send a Trigger to the simulator: this will run a time step for the physic engine
# because of the synchronous mode. Run several iterations to stabilize the simulation
for i in range(int(1./timestep)):
vrep.simxSynchronousTrigger(clientID)
vrep.simxGetPingTime(clientID)
# Read data from the depth camera (Hokuyo)
# Reading a 3D image costs a lot to VREP (it has to simulate the image). It also requires a lot of
# bandwidth, and processing a 3D point cloud (for instance, to find one of the boxes or cylinders that
# the robot has to grasp) will take a long time in MATLAB. In general, you will only want to capture a 3D
# image at specific times, for instance when you believe you're facing one of the tables.
# Reduce the view angle to pi/8 in order to better see the objects. Do it only once.
# ^^^^^^ ^^^^^^^^^^ ^^^^ ^^^^^^^^^^^^^^^
# simxSetFloatSignal simx_opmode_oneshot_wait
# |
# rgbd_sensor_scan_angle
# The depth camera has a limited number of rays that gather information. If this number is concentrated
# on a smaller angle, the resolution is better. pi/8 has been determined by experimentation.
res = vrep.simxSetFloatSignal(clientID, 'rgbd_sensor_scan_angle', np.pi/8, vrep.simx_opmode_oneshot_wait)
vrchk(vrep, res) # Check the return value from the previous V-REP call (res) and exit in case of error.
vrep.simxSynchronousTrigger(clientID)
vrep.simxGetPingTime(clientID)
# Ask the sensor to turn itself on, take A SINGLE POINT CLOUD, and turn itself off again.
# ^^^ ^^^^^^ ^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# simxSetIntegerSignal 1 simx_opmode_oneshot_wait
# |
# handle_xyz_sensor
res = vrep.simxSetIntegerSignal(clientID, 'handle_xyz_sensor', 1, vrep.simx_opmode_oneshot_wait)
vrchk(vrep, res)
vrep.simxSynchronousTrigger(clientID)
vrep.simxGetPingTime(clientID)
# Then retrieve the last point cloud the depth sensor took.
# If you were to try to capture multiple images in a row, try other values than
# vrep.simx_opmode_oneshot_wait.
print('Capturing point cloud...\n');
pts = youbot_xyz_sensor(vrep, h, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(clientID)
vrep.simxGetPingTime(clientID)
# Each column of pts has [x;y;z;distancetosensor]. However, plot3 does not have the same frame of reference as
# the output data. To get a correct plot, you should invert the y and z dimensions.
# Plot all the points.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], marker="*")
# Plot the points of the wall (further away than 1.87 m, which is determined either in the simulator by measuring
# distances or by trial and error) in a different colour. This value is only valid for this robot position, of
# course. This simple test ignores the variation of distance along the wall (distance between a point and several
# points on a line).
#pts_wall = pts[pts[:, 3] >= 1.87]
#ax.scatter(pts_wall[:, 0], pts_wall[:, 2], pts_wall[:, 1], marker="+")
plt.show()
cleanup_vrep(vrep, clientID)
print('Simulation has stopped')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
2,
345,
13645,
23279,
9700,
262,
569,
12,
35316,
36775,
48780,
34111,
11,
517,
5734,
262,
835,
284,
1011,
257,
513,
35,
966,
6279,
13,
198,
198,
2,
357,
... | 2.887782 | 1,907 |
# TODO:
# - cached scenes
from __future__ import division
from libtbx.math_utils import roundoff
from cctbx.miller import display2 as display
from cctbx.array_family import flex
from scitbx import graphics_utils
from cctbx import miller
from libtbx.utils import Sorry
from websocket_server import WebsocketServer
import threading, math, sys
from time import sleep
import os.path, time
import libtbx
import numpy as np
import webbrowser, tempfile
#--- user input and settings
"""
# python2 code
from websocket_server import WebsocketServer
import threading, math
from time import sleep
nc = {}
def new_client(client, server):
nc = client
print "got a new client:", nc
def on_message(client, server, message):
print message
websocket.enableTrace(True)
server = WebsocketServer(7894, host='127.0.0.1')
server.set_fn_new_client(new_client)
server.set_fn_message_received(on_message)
wst = threading.Thread(target=server.run_forever)
wst.daemon = True
wst.start()
def LoopSendMessages():
x = 0.0
i=0
while server.clients:
nc = server.clients[0]
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
server.send_message(server.clients[0], msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
server.send_message(server.clients[0], msg )
sleep(0.2)
"""
"""
# python3 code
import asyncio
import math
import websockets
async def time(websocket, path):
x = 0
for i in range(1000):
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
await websocket.send( msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
await websocket.send( msg )
message = await websocket.recv()
print( message)
await asyncio.sleep(0.2)
start_server = websockets.serve(time, '127.0.0.1', 7894)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
"""
| [
198,
2,
16926,
46,
25,
198,
2,
220,
532,
39986,
8188,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
9195,
83,
65,
87,
13,
11018,
62,
26791,
1330,
2835,
2364,
198,
6738,
269,
310,
65,
87,
13,
76,
4665,
1330,
3359,
17,
... | 2.341034 | 909 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
import json
from pprint import pprint
import re
import termplotlib as tpl
image_data = {}
# need permissions
ihash = ''
img_id = []
with open("/var/lib/docker/image/btrfs/repositories.json", "r") as f3:
data = f3.read()
js = json.loads(data)
for reponame in js['Repositories']:
for image in js['Repositories'][reponame]:
m = re.match(r"((\w+/)?)*\w+@sha256", image)
if not m:
ihash = js['Repositories'][reponame][image].split(':')[1]
image_data[image] = ihash
# info
for img, ihash in image_data.items():
with open("/var/lib/docker/image/btrfs/imagedb/content/sha256/%s" % ihash, "r") as f2:
data = f2.read()
js = json.loads(data)
#first should have always size file
img_id = js['rootfs']['diff_ids'][0].split(':')[1]
image_data[img] = img_id
#size
dt = []
for img, idhash in image_data.items():
with open("/var/lib/docker/image/btrfs/layerdb/sha256/%s/size" % idhash, "r") as f1:
size = f1.read()
dt.append(size)
image_data[img] = size
fig = tpl.figure()
vals = [int(x) for x in image_data.values()]
fig.barh(vals, list(image_data.keys()), force_ascii=True)
fig.show()
| [
11748,
33918,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
11748,
302,
198,
11748,
3381,
29487,
8019,
355,
256,
489,
198,
198,
9060,
62,
7890,
796,
23884,
198,
2,
761,
21627,
198,
198,
4449,
1077,
796,
10148,
198,
9600,
62,
312,
796,
... | 2.145614 | 570 |
# coding: utf-8
"""
Digitick REST API
The Digitick REST API is a set of methods giving access to catalog, user and cart management.
OpenAPI spec version: v1.0
Contact: contact@digitick.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Show(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, start=None, end=None, stock_availability_status=None, sales_status=None):
"""
Show - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'start': 'str',
'end': 'str',
'stock_availability_status': 'str',
'sales_status': 'str'
}
self.attribute_map = {
'id': 'id',
'start': 'start',
'end': 'end',
'stock_availability_status': 'stockAvailabilityStatus',
'sales_status': 'salesStatus'
}
self._id = id
self._start = start
self._end = end
self._stock_availability_status = stock_availability_status
self._sales_status = sales_status
@property
def id(self):
"""
Gets the id of this Show.
:return: The id of this Show.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Show.
:param id: The id of this Show.
:type: int
"""
self._id = id
@property
def start(self):
"""
Gets the start of this Show.
:return: The start of this Show.
:rtype: str
"""
return self._start
@start.setter
def start(self, start):
"""
Sets the start of this Show.
:param start: The start of this Show.
:type: str
"""
self._start = start
@property
def end(self):
"""
Gets the end of this Show.
:return: The end of this Show.
:rtype: str
"""
return self._end
@end.setter
def end(self, end):
"""
Sets the end of this Show.
:param end: The end of this Show.
:type: str
"""
self._end = end
@property
def stock_availability_status(self):
"""
Gets the stock_availability_status of this Show.
:return: The stock_availability_status of this Show.
:rtype: str
"""
return self._stock_availability_status
@stock_availability_status.setter
def stock_availability_status(self, stock_availability_status):
"""
Sets the stock_availability_status of this Show.
:param stock_availability_status: The stock_availability_status of this Show.
:type: str
"""
self._stock_availability_status = stock_availability_status
@property
def sales_status(self):
"""
Gets the sales_status of this Show.
:return: The sales_status of this Show.
:rtype: str
"""
return self._sales_status
@sales_status.setter
def sales_status(self, sales_status):
"""
Sets the sales_status of this Show.
:param sales_status: The sales_status of this Show.
:type: str
"""
self._sales_status = sales_status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Show):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
7367,
270,
624,
30617,
7824,
628,
220,
220,
220,
383,
7367,
270,
624,
30617,
7824,
318,
257,
900,
286,
5050,
3501,
1895,
284,
18388,
11,
2836,
290,
6383,
4542,
1... | 2.123623 | 2,451 |
import time
import progressbar
import os
files = os.listdir('D:\Python')
for i in progressbar.progressbar(files):
do_something(i)
| [
11748,
640,
198,
11748,
4371,
5657,
198,
11748,
28686,
198,
198,
16624,
796,
28686,
13,
4868,
15908,
10786,
35,
7479,
37906,
11537,
198,
198,
1640,
1312,
287,
4371,
5657,
13,
33723,
5657,
7,
16624,
2599,
198,
197,
4598,
62,
18927,
7,
... | 2.854167 | 48 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/ai/vision/v1/text_detection.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from yandex.cloud.ai.vision.v1 import primitives_pb2 as yandex_dot_cloud_dot_ai_dot_vision_dot_v1_dot_primitives__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/ai/vision/v1/text_detection.proto',
package='yandex.cloud.ai.vision.v1',
syntax='proto3',
serialized_options=_b('\n\035yandex.cloud.api.ai.vision.v1ZDgithub.com/yandex-cloud/go-genproto/yandex/cloud/ai/vision/v1;vision'),
serialized_pb=_b('\n.yandex/cloud/ai/vision/v1/text_detection.proto\x12\x19yandex.cloud.ai.vision.v1\x1a*yandex/cloud/ai/vision/v1/primitives.proto\"@\n\x0eTextAnnotation\x12.\n\x05pages\x18\x01 \x03(\x0b\x32\x1f.yandex.cloud.ai.vision.v1.Page\"W\n\x04Page\x12\r\n\x05width\x18\x01 \x01(\x03\x12\x0e\n\x06height\x18\x02 \x01(\x03\x12\x30\n\x06\x62locks\x18\x03 \x03(\x0b\x32 .yandex.cloud.ai.vision.v1.Block\"q\n\x05\x42lock\x12\x38\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32\".yandex.cloud.ai.vision.v1.Polygon\x12.\n\x05lines\x18\x02 \x03(\x0b\x32\x1f.yandex.cloud.ai.vision.v1.Line\"\x84\x01\n\x04Line\x12\x38\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32\".yandex.cloud.ai.vision.v1.Polygon\x12.\n\x05words\x18\x02 \x03(\x0b\x32\x1f.yandex.cloud.ai.vision.v1.Word\x12\x12\n\nconfidence\x18\x03 \x01(\x01\"\xe6\x01\n\x04Word\x12\x38\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32\".yandex.cloud.ai.vision.v1.Polygon\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x12\n\nconfidence\x18\x03 \x01(\x01\x12\x43\n\tlanguages\x18\x04 \x03(\x0b\x32\x30.yandex.cloud.ai.vision.v1.Word.DetectedLanguage\x1a=\n\x10\x44\x65tectedLanguage\x12\x15\n\rlanguage_code\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x01\x42\x65\n\x1dyandex.cloud.api.ai.vision.v1ZDgithub.com/yandex-cloud/go-genproto/yandex/cloud/ai/vision/v1;visionb\x06proto3')
,
dependencies=[yandex_dot_cloud_dot_ai_dot_vision_dot_v1_dot_primitives__pb2.DESCRIPTOR,])
_TEXTANNOTATION = _descriptor.Descriptor(
name='TextAnnotation',
full_name='yandex.cloud.ai.vision.v1.TextAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pages', full_name='yandex.cloud.ai.vision.v1.TextAnnotation.pages', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=121,
serialized_end=185,
)
_PAGE = _descriptor.Descriptor(
name='Page',
full_name='yandex.cloud.ai.vision.v1.Page',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='width', full_name='yandex.cloud.ai.vision.v1.Page.width', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='yandex.cloud.ai.vision.v1.Page.height', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='blocks', full_name='yandex.cloud.ai.vision.v1.Page.blocks', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=187,
serialized_end=274,
)
_BLOCK = _descriptor.Descriptor(
name='Block',
full_name='yandex.cloud.ai.vision.v1.Block',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_box', full_name='yandex.cloud.ai.vision.v1.Block.bounding_box', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lines', full_name='yandex.cloud.ai.vision.v1.Block.lines', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=276,
serialized_end=389,
)
_LINE = _descriptor.Descriptor(
name='Line',
full_name='yandex.cloud.ai.vision.v1.Line',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_box', full_name='yandex.cloud.ai.vision.v1.Line.bounding_box', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='words', full_name='yandex.cloud.ai.vision.v1.Line.words', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence', full_name='yandex.cloud.ai.vision.v1.Line.confidence', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=392,
serialized_end=524,
)
_WORD_DETECTEDLANGUAGE = _descriptor.Descriptor(
name='DetectedLanguage',
full_name='yandex.cloud.ai.vision.v1.Word.DetectedLanguage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='language_code', full_name='yandex.cloud.ai.vision.v1.Word.DetectedLanguage.language_code', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence', full_name='yandex.cloud.ai.vision.v1.Word.DetectedLanguage.confidence', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=696,
serialized_end=757,
)
_WORD = _descriptor.Descriptor(
name='Word',
full_name='yandex.cloud.ai.vision.v1.Word',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_box', full_name='yandex.cloud.ai.vision.v1.Word.bounding_box', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='text', full_name='yandex.cloud.ai.vision.v1.Word.text', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence', full_name='yandex.cloud.ai.vision.v1.Word.confidence', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='languages', full_name='yandex.cloud.ai.vision.v1.Word.languages', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_WORD_DETECTEDLANGUAGE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=527,
serialized_end=757,
)
_TEXTANNOTATION.fields_by_name['pages'].message_type = _PAGE
_PAGE.fields_by_name['blocks'].message_type = _BLOCK
_BLOCK.fields_by_name['bounding_box'].message_type = yandex_dot_cloud_dot_ai_dot_vision_dot_v1_dot_primitives__pb2._POLYGON
_BLOCK.fields_by_name['lines'].message_type = _LINE
_LINE.fields_by_name['bounding_box'].message_type = yandex_dot_cloud_dot_ai_dot_vision_dot_v1_dot_primitives__pb2._POLYGON
_LINE.fields_by_name['words'].message_type = _WORD
_WORD_DETECTEDLANGUAGE.containing_type = _WORD
_WORD.fields_by_name['bounding_box'].message_type = yandex_dot_cloud_dot_ai_dot_vision_dot_v1_dot_primitives__pb2._POLYGON
_WORD.fields_by_name['languages'].message_type = _WORD_DETECTEDLANGUAGE
DESCRIPTOR.message_types_by_name['TextAnnotation'] = _TEXTANNOTATION
DESCRIPTOR.message_types_by_name['Page'] = _PAGE
DESCRIPTOR.message_types_by_name['Block'] = _BLOCK
DESCRIPTOR.message_types_by_name['Line'] = _LINE
DESCRIPTOR.message_types_by_name['Word'] = _WORD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TextAnnotation = _reflection.GeneratedProtocolMessageType('TextAnnotation', (_message.Message,), {
'DESCRIPTOR' : _TEXTANNOTATION,
'__module__' : 'yandex.cloud.ai.vision.v1.text_detection_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ai.vision.v1.TextAnnotation)
})
_sym_db.RegisterMessage(TextAnnotation)
Page = _reflection.GeneratedProtocolMessageType('Page', (_message.Message,), {
'DESCRIPTOR' : _PAGE,
'__module__' : 'yandex.cloud.ai.vision.v1.text_detection_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ai.vision.v1.Page)
})
_sym_db.RegisterMessage(Page)
Block = _reflection.GeneratedProtocolMessageType('Block', (_message.Message,), {
'DESCRIPTOR' : _BLOCK,
'__module__' : 'yandex.cloud.ai.vision.v1.text_detection_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ai.vision.v1.Block)
})
_sym_db.RegisterMessage(Block)
Line = _reflection.GeneratedProtocolMessageType('Line', (_message.Message,), {
'DESCRIPTOR' : _LINE,
'__module__' : 'yandex.cloud.ai.vision.v1.text_detection_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ai.vision.v1.Line)
})
_sym_db.RegisterMessage(Line)
Word = _reflection.GeneratedProtocolMessageType('Word', (_message.Message,), {
'DetectedLanguage' : _reflection.GeneratedProtocolMessageType('DetectedLanguage', (_message.Message,), {
'DESCRIPTOR' : _WORD_DETECTEDLANGUAGE,
'__module__' : 'yandex.cloud.ai.vision.v1.text_detection_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ai.vision.v1.Word.DetectedLanguage)
})
,
'DESCRIPTOR' : _WORD,
'__module__' : 'yandex.cloud.ai.vision.v1.text_detection_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.ai.vision.v1.Word)
})
_sym_db.RegisterMessage(Word)
_sym_db.RegisterMessage(Word.DetectedLanguage)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
331,
392,
1069,
14,
17721,
14,
1872,
14,
10178,
14,
85,
16,
14,
5239,
... | 2.354816 | 5,679 |
from lib.ANSIEscape import ANSIEscape
#TODO: copy paste | [
6738,
9195,
13,
15037,
10008,
6794,
1330,
3537,
50,
10008,
6794,
628,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628,
198,
220,
220,
220,
1303,
51,
3727,
46,
25,
4866,
17008
] | 2.181818 | 33 |
"""
This module assumes that OpenCanary has been installed and is running.
In particular it assumes that OpenCanary is logging to /var/tmp/opencanary.log
and that the services it's testing are enabled.
It would be much better to setup tests to start the services needed and provide
the configuration files so that tests can be run without needing to reinstall
and start the service before each test. It would also be better to be able to
test the code directly rather than relying on the out put of logs.
Still this is a start.
"""
import time
import json
from ftplib import FTP, error_perm
import unittest
import socket
import warnings # Used in the TestSSHModule (see comment there)
# These libraries are only needed by the test suite and so aren't in the
# OpenCanary requirements, there is a requirements.txt file in the tests folder
# Simply run `pip install -r opencanary/test/requirements.txt`
import requests
import paramiko
import pymysql
import git
def get_last_log():
"""
Gets the last line from `/var/tmp/opencanary.log` as a dictionary
"""
with open('/var/tmp/opencanary.log', 'r') as log_file:
return json.loads(log_file.readlines()[-1])
class TestFTPModule(unittest.TestCase):
"""
Tests the cases for the FTP module.
The FTP server should not allow logins and should log each attempt.
"""
def test_anonymous_ftp(self):
"""
Try to connect to the FTP service with no username or password.
"""
self.assertRaises(error_perm, self.ftp.login)
log = get_last_log()
self.assertEqual(log['dst_port'], 21)
self.assertEqual(log['logdata']['USERNAME'], "anonymous")
self.assertEqual(log['logdata']['PASSWORD'], "anonymous@")
def test_authenticated_ftp(self):
"""
Connect to the FTP service with a test username and password.
"""
self.assertRaises(error_perm,
self.ftp.login,
user='test_user',
passwd='test_pass')
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 21)
self.assertEqual(last_log['logdata']['USERNAME'], "test_user")
self.assertEqual(last_log['logdata']['PASSWORD'], "test_pass")
class TestGitModule(unittest.TestCase):
"""
Tests the Git Module by trying to clone a repository from localhost.
"""
def test_log_git_clone(self):
"""
Check that the git clone attempt was logged
"""
# This test must be run after the test_clone_a_repository.
# Unless we add an attempt to clone into this test, or the setup.
last_log = get_last_log()
self.assertEqual(last_log['logdata']['HOST'], "localhost")
self.assertEqual(last_log['logdata']['REPO'], "test.git")
class TestHTTPModule(unittest.TestCase):
"""
Tests the cases for the HTTP module.
The HTTP server should look like a NAS and present a login box, any
interaction with the server (GET, POST) should be logged.
"""
def test_get_http_home_page(self):
"""
Simply get the home page.
"""
request = requests.get('http://localhost/')
self.assertEqual(request.status_code, 200)
self.assertIn('Synology RackStation', request.text)
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 80)
self.assertEqual(last_log['logdata']['HOSTNAME'], "localhost")
self.assertEqual(last_log['logdata']['PATH'], "/index.html")
self.assertIn('python-requests', last_log['logdata']['USERAGENT'])
def test_log_in_to_http_with_basic_auth(self):
"""
Try to log into the site with basic auth.
"""
request = requests.post('http://localhost/', auth=('user', 'pass'))
# Currently the web server returns 200, but in future it should return
# a 403 statuse code.
self.assertEqual(request.status_code, 200)
self.assertIn('Synology RackStation', request.text)
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 80)
self.assertEqual(last_log['logdata']['HOSTNAME'], "localhost")
self.assertEqual(last_log['logdata']['PATH'], "/index.html")
self.assertIn('python-requests', last_log['logdata']['USERAGENT'])
# OpenCanary doesn't currently record credentials from basic auth.
def test_log_in_to_http_with_parameters(self):
"""
Try to log into the site by posting the parameters
"""
login_data = {
'username': 'test_user',
'password': 'test_pass',
'OTPcode': '',
'rememberme': '',
'__cIpHeRtExt': '',
'isIframeLogin': 'yes'}
request = requests.post('http://localhost/index.html', data=login_data)
# Currently the web server returns 200, but in future it should return
# a 403 status code.
self.assertEqual(request.status_code, 200)
self.assertIn('Synology RackStation', request.text)
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 80)
self.assertEqual(last_log['logdata']['HOSTNAME'], "localhost")
self.assertEqual(last_log['logdata']['PATH'], "/index.html")
self.assertIn('python-requests', last_log['logdata']['USERAGENT'])
self.assertEqual(last_log['logdata']['USERNAME'], "test_user")
self.assertEqual(last_log['logdata']['PASSWORD'], "test_pass")
def test_get_directory_listing(self):
"""
Try to get a directory listing should result in a 403 Forbidden message.
"""
request = requests.get('http://localhost/css/')
self.assertEqual(request.status_code, 403)
self.assertIn('Forbidden', request.text)
# These request are not logged at the moment. Maybe we should.
def test_get_non_existent_file(self):
"""
Try to get a file that doesn't exist should give a 404 error message.
"""
request = requests.get('http://localhost/this/file/doesnt_exist.txt')
self.assertEqual(request.status_code, 404)
self.assertIn('Not Found', request.text)
# These request are not logged at the moment. Maybe we should.
def test_get_supporting_image_file(self):
"""
Try to download a supporting image file
"""
request = requests.get('http://localhost/img/synohdpack/images/Components/checkbox.png')
# Just an arbitrary image
self.assertEqual(request.status_code, 200)
class TestSSHModule(unittest.TestCase):
"""
Tests the cases for the SSH server
"""
def test_ssh_with_basic_login(self):
"""
Try to log into the SSH server
"""
# FIXME: At the time of this writing, paramiko calls cryptography
# which throws a depreciation warning. It looks like this has been
# fixed https://github.com/paramiko/paramiko/issues/1369 but the fix
# hasn't been pushed to pypi. When the fix is pushed we can update
# and remove the import warnings and the warnings.catch.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(paramiko.ssh_exception.AuthenticationException,
self.connection.connect,
hostname="localhost",
port=22,
username="test_user",
password="test_pass")
last_log = get_last_log()
self.assertEqual(last_log['dst_port'], 22)
self.assertIn('paramiko', last_log['logdata']['REMOTEVERSION'])
self.assertEqual(last_log['logdata']['USERNAME'], "test_user")
self.assertEqual(last_log['logdata']['PASSWORD'], "test_pass")
class TestNTPModule(unittest.TestCase):
"""
Tests the NTP server. The server doesn't respond, but it will log attempts
to trigger the MON_GETLIST_1 NTP commands, which is used for DDOS attacks.
"""
def test_ntp_server_monlist(self):
"""
Check that the MON_GETLIST_1 NTP command was logged correctly
"""
# The logs take about a second to show up, in other tests this is not
# an issue, because there are checks that run before looking at the log
# (e.g. request.status_code == 200 for HTTP) but for NTP we just check
# the log. A hardcoded time out is a horible solution, but it works.
time.sleep(1)
last_log = get_last_log()
self.assertEqual(last_log['logdata']['NTP CMD'], "monlist")
self.assertEqual(last_log['dst_port'], 123)
class TestMySQLModule(unittest.TestCase):
"""
Tests the MySQL Server attempting to login should fail and
"""
def test_mysql_server_login(self):
"""
Login to the mysql server
"""
self.assertRaises(pymysql.err.OperationalError,
pymysql.connect,
host="localhost",
user="test_user",
password="test_pass",
db='db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
last_log = get_last_log()
self.assertEqual(last_log['logdata']['USERNAME'], "test_user")
self.assertEqual(last_log['logdata']['PASSWORD'], "b2e5ed6a0e59f99327399ced2009338d5c0fe237")
self.assertEqual(last_log['dst_port'], 3306)
if __name__ == '__main__':
unittest.main()
| [
37811,
198,
1212,
8265,
18533,
326,
4946,
6090,
560,
468,
587,
6589,
290,
318,
2491,
13,
198,
198,
818,
1948,
340,
18533,
326,
4946,
6090,
560,
318,
18931,
284,
1220,
7785,
14,
22065,
14,
9654,
5171,
560,
13,
6404,
198,
392,
326,
26... | 2.376235 | 4,048 |
"""
Abstract base classes for different kinds of feature.
"""
import numpy as np
class Feature:
"""
Feature function base class.
Implements various methods common to feature functions, which are generally
the same across the various features in this library.
"""
@property
class FunctionalFeature(Feature):
"""
Base class for features that are essentially functional in nature, i.e.,
they could be applied to arbitrary arrays, and have no side effects.
In order to ensure our feature pipeline is well-formed, this class provides
some of the various methods and properties common to such features.
"""
def __init__(self, n_input, n_output, func=None, *args, **kwargs):
"""
Initialize the functional feature, specifying the number of inputs,
outputs, and optionally the function to compute the resulting feature.
Args:
n_input (int) : The number of inputs the feature expects.
n_output (int) : The number of outputs the feature will return.
func (Callable, optional): The function that computes features.
"""
super().__init__(n_input, n_output, *args, **kwargs)
self.n_input = n_input
self.n_output = n_output
if func is not None:
self.apply = func
class OneToMany(Feature):
"""
Base class for features which return multi-element arrays from inputs
consisting of a single element.
"""
class ManyToOne(Feature):
"""
Base class for features which take arrays containing multiple elements and
return single element arrays.
"""
class BinaryFeature(Feature):
"""
Base class for binary valued features.
"""
class UnaryFeature(Feature):
"""
Base class for unary features (i.e., those with a single nonzero bit).
""" | [
37811,
198,
23839,
2779,
6097,
329,
1180,
6982,
286,
3895,
13,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
220,
628,
198,
4871,
27018,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
27018,
2163,
2779,
1398,
13,
628,
220,
220,
... | 3.037705 | 610 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
#Reading the input instructions and names of the objects.
obj_names=[]
f = open("../run/input.dat","r")
obj_count=int(f.readline())
for obj in range(obj_count):
obj_names.append(f.readline().split()[0])
obj_names=sorted(obj_names)
step=float(f.readline())
sim_length=float(f.readline())
step_count=float(f.readline())
if step==0:
step=sim_length/step_count
elif sim_length==0:
sim_length=step*step_count
else:
step_count=sim_length/step
f.close()
#Reading the results and sorting them first by name and then by index (time)
sim_results=pd.read_csv("../run/output.dat")
sim_results["indCol"]=sim_results.index
sim_results=sim_results.sort_values(["Object","indCol"])
#Number of iterations
iters=int((sim_results.count()/obj_count)["indCol"])
#Visualization initialization. Title is added separately, and lims and ticks are changed as needed.
fig, ax = plt.subplots()
ax.set_xlabel("X [meters]")
ax.set_ylabel("Y [meters]")
#ax.set_xlim(-3e11,3e11)
#ax.set_ylim(-3e11,3e11)
#ax.set_xticks(np.arange(0,10e9,step=1e9))
#ax.set_yticks(np.arange(0,10e9,step=1e9))
#Place a text box in upper left in axes coords
textstr = f'Time: {sim_length} days'
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
#Paths of the objects
plot= [ax.plot(sim_results["X"][i*iters:i*iters+iters],sim_results["Y"][i*iters:i*iters+iters],label=obj_names[i]) for i in range(obj_count)]
#Initial and final markers
scatters_init=[ax.scatter(sim_results["X"].iloc[i*iters],sim_results["Y"].iloc[i*iters],s=25,marker="x") for i in range(obj_count)]
scatter_final=[ax.scatter(sim_results["X"].iloc[i*iters+iters-1],sim_results["Y"].iloc[i*iters+iters-1],s=25,marker="v") for i in range(obj_count)]
#Display
#plt.legend()
plt.show() | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
67,
13,
897,
274,
18,
67,
355,
279,
18... | 2.458128 | 812 |
if __name__ == '__main__':
test()
| [
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1332,
3419,
198,
220,
220,
220,
220,
198
] | 1.916667 | 24 |
from io import StringIO
from django.core import management
from django.core.management.base import BaseCommand
from django.db import connection
def reset_db():
"""
Reset database to a blank state by removing all the tables and recreating them.
"""
with connection.cursor() as cursor:
cursor.execute("select tablename from pg_tables where schemaname = 'public'")
tables = [row[0] for row in cursor.fetchall()]
# Can't use query parameters here as they'll add single quotes which are not
# supported by postgres
for table in tables:
cursor.execute('drop table "' + table + '" cascade')
# Call migrate so that post-migrate hooks such as generating a default Site object
# are run
management.call_command("migrate", "--noinput", stdout=StringIO())
| [
6738,
33245,
1330,
10903,
9399,
198,
198,
6738,
42625,
14208,
13,
7295,
1330,
4542,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
9945,
1330,
4637,
628,
198,
4299,
13259,
62,
994... | 3.085502 | 269 |
"""
Rational number type based on Python integers.
The PythonRational class from here has been moved to
sympy.external.pythonmpq
This module is just left here for backwards compatibility.
"""
from sympy.core.numbers import Rational
from sympy.core.sympify import _sympy_converter
from sympy.utilities import public
from sympy.external.pythonmpq import PythonMPQ
PythonRational = public(PythonMPQ)
_sympy_converter[PythonRational] = sympify_pythonrational
| [
37811,
198,
49,
864,
1271,
2099,
1912,
319,
11361,
37014,
13,
198,
198,
464,
11361,
49,
864,
1398,
422,
994,
468,
587,
3888,
284,
198,
1837,
3149,
88,
13,
22615,
13,
29412,
3149,
80,
198,
198,
1212,
8265,
318,
655,
1364,
994,
329,
... | 3.397059 | 136 |