content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import (
BoolInputType,
DefaultInputs,
InputSpec,
Operation,
precondition,
StringInputType,
TensorInputType,
TupleInputType,
types,
)
from coremltools.converters.mil.mil.operation import VALUE
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from ._op_reqs import register_op
from ._utils import broadcast_shapes, parse_einsum_equation
@register_op(doc_str="")
class linear(Operation):
"""
Perform ``x * weight.T + bias`` where ``weight`` and ``bias`` are constant at
compile time.
Parameters
----------
x: tensor<[\*D,D_in], T> (Required)
* ``1 <= rank <= 3``.
* ``0 <= rank(*D) <= 2``.
weight: const tensor<[D_out,D_in], T> (Required)
bias: const tensor<[D_out],T> (Optional)
* Default to ``0``.
Returns
-------
tensor<[\*D,D_out], T>
* Same rank as the input ``x``.
Attributes
----------
T: fp16, fp32, i32
"""
input_spec = InputSpec(
x=TensorInputType(),
weight=TensorInputType(const=True),
bias=TensorInputType(const=True, optional=True),
)
@precondition(allow=VALUE)
@register_op(doc_str="")
class matmul(Operation):
"""
Perform N-D batch matrix multiplication with NumPy-style broadcasting
based on the following rules:
Rule 1. If both ``x, y`` are 1-D, return the scalar from the dot product.
Rule 2. If both ``x, y`` are 2-D or higher, perform a broadcast on the batch dimensions
(all dimensions except the last ``2``).
For example:
* ``x.shape == (10, 4, 3)``
* ``y.shape == (5, 10, 3, 2)``
* ``matmul(x, y).shape == (5, 10, 4, 2)``
Conventional matrix multiplication is a special case where both ``x, y`` are
exactly 2-D. For example:
* ``x.shape == (4, 3)``
* ``y.shape == (3, 2)``
* ``matmul(x, y).shape == (4, 2)``
If ``x`` is 1-D, and ``y`` is N-D where ``N >= 2``, ``x`` is first promoted to
matrix ``xm`` by prepending a ``1`` to its dimension, and the resulting ``xm`` is
broadcast to ``y`` following Rule 2 above. After this, remove the inserted dimension.
For example:
* ``x.shape == (4)``
* ``y.shape == (10, 4, 3)``
* ``xm.shape == (1, 4)``
* ``matmul(xm, y).shape == (10, 1, 3)``
* Removing the inserted dimension results in ``matmul(x, y).shape == (10, 3)``.
* Note: ``xm`` and ``matmul(xm, y)`` are for illustration only.
If ``x`` is N-D where ``N >= 2``, and ``y`` is 1-D, ``y`` is first promoted to
matrix ``ym`` by appending a ``1`` to its dimension, and the resulting ``ym`` is
broadcast to ``x`` following Rule 2 above. After this, remove the inserted dimension.
For example:
* ``x.shape == (10, 3, 4)``
* ``y.shape == (4,)``
* ``ym.shape == (4, 1)``
* ``matmul(x, ym).shape == (10, 3, 1)``
* Removing the inserted dimension results in ``matmul(x, y).shape == (10, 3)``.
* Note: ``xm`` and ``matmul(xm, y)`` are for illustration only.
Parameters
----------
x: tensor<[\*,K1], T> (Required)
* ``x`` must be 1-D or higher.
y: tensor<[\*,K2], T> (Required)
* ``y`` must be 1-D or higher.
transpose_x: const bool (Optional)
* Default to ``False``.
* Use ``True`` to transpose the last two dimensions of ``x`` before multiplication.
It has no effect when ``x`` is 1-D.
transpose_y: const bool (Optional)
* Default to ``False``.
* Use ``True`` to transpose the last two dimensions of ``y`` before multiplication.
It has no effect when ``y`` is 1-D.
Returns
-------
tensor<\*, T>
* Scalar or tensor output.
Attributes
----------
T: fp16, fp32, i32
"""
input_spec = InputSpec(
x=TensorInputType(),
y=TensorInputType(),
transpose_x=BoolInputType(const=True, optional=True),
transpose_y=BoolInputType(const=True, optional=True),
)
@precondition(allow=VALUE)
@register_op(doc_str="")
class einsum(Operation):
"""
Perform tensor multiplication expressed according to the einsum notation.
The mode/equation that is currently supported is mutiplying matrices that are laid out on
dimensions -1 and -3, treating all the other dimensions as batch. Broadcasting is supported along batch dimensions.
In particular, the inputs must be of the following shapes:
* Rank 4 input case
* Input 1: ``[B, C, H, W1]``
* Input 2: ``[B, W1, H, W2]``
* Output: ``[B, C, H, W2]``
* If, for one of the inputs, the dimensions ``"B"`` or ``"H"`` is 1, they are broadcast to match the other input.
* Rank 3 input case
* Input 1: ``[C, H, W1]``
* Input 2: ``[W1, H, W2]``
* Output: ``[C, H, W2]``
* If, for one of the inputs, the dimension ``"H"`` is 1, it is broadcast to match the other input.
Parameters
----------
values : Tuple(tensor_1, tensor_2)
* Where:
* ``tensor_1``: ``tensor<[*D, C, H, W1], T>``
* Must be of rank 3 or 4.
* ``tensor_2``: ``tensor<[*D, W1, H, W2], T>``
* Must be of rank 3 or 4.
equation: const<str>
* Supported equations are:
* ``"nchw,nwhu->nchu"`` and its equivalent equation strings
* ``"chw,whr->chr"`` and its equivalent equation strings
Returns
-------
tensor<[*D, C, H, W2], T>
* Same ranks as the inputs.
Attributes
----------
T: fp16, fp32
"""
input_spec = InputSpec(values=TupleInputType(),
equation=StringInputType(const=True))
@precondition(allow=VALUE)
| [
2,
220,
15069,
357,
66,
8,
12131,
11,
4196,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
18,
12,
565,
682,
5964,
326,
460,
307,
198,
2,
220,
1043,
287,
... | 2.412146 | 2,470 |
import pygame
from pygame.locals import *
from random import randint
from gameobjects.vector2 import Vector2
SCREEN_SIZE = (640, 480)
# In pixels per second, per second
GRAVITY = 250.0
# Increase for more bounciness, but don't go over 1!
BOUNCINESS = 0.7
if __name__ == "__main__":
run()
| [
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
983,
48205,
13,
31364,
17,
1330,
20650,
17,
198,
198,
6173,
2200,
1677,
62,
33489,
796,
357,
31102,
11,
23487,
8,
19... | 2.830189 | 106 |
import unittest
import bokeh.plotting as plt
from bokeh.objects import Grid, LinearAxis
| [
11748,
555,
715,
395,
198,
198,
11748,
1489,
365,
71,
13,
29487,
889,
355,
458,
83,
198,
6738,
1489,
365,
71,
13,
48205,
1330,
24846,
11,
44800,
31554,
271,
628,
628
] | 2.967742 | 31 |
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Calendar.settings')
app = Celery('Calendar')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| [
11748,
28686,
198,
198,
6738,
18725,
1924,
1330,
15248,
1924,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
628,
198,
418,
13,
268,
2268,
13,
2617,
12286,
10786,
35028,
1565,
11230,
62,
28480,
51,
20754,
62,
33365,
24212,
3256,
705,
9... | 2.863158 | 95 |
# Copyright 2015 OpenStack Foundation
# Copyright 2015 VMware Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from tempest import config
from tempest import test
from tempest_lib.common.utils import data_utils
from tempest_lib import decorators
from vmware_nsx_tempest.services import base_l2gw
from vmware_nsx_tempest.services import l2_gateway_client as L2GW
CONF = config.CONF
L2GW_RID = 'l2_gateway'
L2GW_RIDs = 'l2_gateways'
MSG_DIFF = "l2gw %s=%s is not the same as requested=%s"
class L2GatewayTest(base.BaseAdminNetworkTest):
"""Test l2-gateway operations:
l2-gateway-create
l2-gateway-show
l2-gateway-update
l2-gateway-list
l2-gateway-delete
over single device/interface/vlan
over single device/interface/multiple-vlans
over single device/multiple-interfaces/multiple-vlans
over multiple-device/multiple-interfaces/multiple-vlans
"""
credentials = ['primary', 'admin']
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@test.idempotent_id('8b45a9a5-468b-4317-983d-7cceda367074')
def test_csuld_single_device_interface_without_vlan(self):
"""Single device/interface/vlan
specify that the l2gw accept one and only one VLAN.
"""
dev_profile = self.getattr_or_skip_test("device_one_vlan")
_name = data_utils.rand_name('l2gw-1v1')
_devices = base_l2gw.get_l2gw_body(dev_profile)
self._pop_segmentation_id(_devices, 0, 0)
self._csuld_single_device_interface_vlan(_name, _devices)
@test.idempotent_id('af57cf56-a169-4d88-b32e-7f49365ce407')
def test_csuld_single_device_interface_vlan(self):
"""Single device/interface/vlan
specify that the l2gw don't specify VLAN, so the
l2-gateway-connection need to specify the VLAN.
"""
dev_profile = self.getattr_or_skip_test("device_one_vlan")
_name = data_utils.rand_name('l2gw-1v2')
_devices = base_l2gw.get_l2gw_body(dev_profile)
self._csuld_single_device_interface_vlan(_name, _devices)
@test.idempotent_id('cb59145e-3d2b-46b7-8f7b-f30f794a4d51')
@decorators.skip_because(bug="1559913")
@decorators.skip_because(bug="1559913")
@test.idempotent_id('5522bdfe-ebe8-4eea-81b4-f4075bb608cf')
@decorators.skip_because(bug="1559913")
@test.idempotent_id('5bec26e0-855f-4537-b31b-31663a820ddb')
@decorators.skip_because(bug="1558782")
@test.idempotent_id('f57d2d83-df5e-485f-9097-e3843e2b18a0')
| [
2,
15069,
1853,
4946,
25896,
5693,
198,
2,
15069,
1853,
37754,
3457,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
... | 2.403361 | 1,309 |
import json
import unittest
from pytopojson import quantize
| [
11748,
33918,
198,
11748,
555,
715,
395,
198,
198,
6738,
12972,
4852,
13210,
1559,
1330,
5554,
1096,
628
] | 3.444444 | 18 |
from lark import Lark
from pkgutil import get_data
from itertools import chain
from kestrel.codegen.relations import all_relations
| [
6738,
300,
668,
1330,
406,
668,
198,
6738,
279,
10025,
22602,
1330,
651,
62,
7890,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
198,
6738,
479,
395,
2411,
13,
8189,
5235,
13,
39468,
1330,
477,
62,
39468,
628
] | 3.5 | 38 |
from calendar import c
from distutils.log import error
import pandas as pd
import csv
import asyncio
import os
import shutil
from atividades import *
from tkinter import *
root = Tk()
usuario_logado = ''
user_logado = Usuario('', 0, 0,'','')
if __name__ == '__main__':
#Adiciona_Exercicio_Dia('Dança', 'sullo152@gmail.com')
pass | [
6738,
11845,
1330,
269,
198,
6738,
1233,
26791,
13,
6404,
1330,
4049,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
269,
21370,
198,
11748,
30351,
952,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
379,
1699,
2367,
1330,
1635... | 2.386076 | 158 |
# Generated by Django 2.0.6 on 2019-10-03 17:36
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
21,
319,
13130,
12,
940,
12,
3070,
1596,
25,
2623,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import neo4j
import uvicorn
from ariadne.asgi import GraphQL
from neo4j_graphql_py import neo4j_graphql, make_executable_schema
typeDefs = '''
directive @cypher(statement: String!) on FIELD_DEFINITION
directive @relation(name:String!, direction:String!) on FIELD_DEFINITION
type Movie {
_id: ID
title: String
released: Int
tagline: String
similar(first: Int = 3, offset: Int = 0): [Movie] @cypher(statement: "WITH {this} AS this MATCH (o:Movie) WHERE this.released=o.released AND id(this)<>id(o) RETURN o")
degree: Int @cypher(statement: "WITH {this} AS this RETURN SIZE((this)--())")
actors(first: Int = 3, offset: Int = 0): [Person] @relation(name: "ACTED_IN", direction:"IN")
}
type Person {
_id: ID
name: String
born: Int
}
type Query {
Movie(title: String, released: Int, tagline: String, first: Int, offset: Int): [Movie]
MoviesByYear(year: Int): [Movie]
Hello: String
}
'''
resolvers = {
# root entry point to GraphQL service
'Query': {
'Movie': lambda obj, info, **kwargs: neo4j_graphql(obj, info.context, info, **kwargs),
'MoviesByYear': lambda obj, info, **kwargs: neo4j_graphql(obj, info.context, info, **kwargs)
}
}
schema = make_executable_schema(typeDefs, resolvers)
driver = None
root_value = {}
app = GraphQL(schema=schema, root_value=root_value, context_value=context, debug=True)
uvicorn.run(app)
| [
11748,
19102,
19,
73,
198,
11748,
334,
25531,
1211,
198,
6738,
257,
21244,
710,
13,
292,
12397,
1330,
29681,
9711,
198,
6738,
19102,
19,
73,
62,
34960,
13976,
62,
9078,
1330,
19102,
19,
73,
62,
34960,
13976,
11,
787,
62,
18558,
18187,... | 2.671815 | 518 |
#!/usr/bin/env python
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
README = """
Automatically add a specific legacy flag to multiple Skia client repos.
This would only work on Google desktop.
Example usage:
$ python add_legacy_flag.py SK_SUPPORT_LEGACY_SOMETHING \\
-a /data/android -c ~/chromium/src -g legacyflag
If you only need to add the flag to one repo, for example, Android, please give
only -a (--android-dir) argument:
$ python add_legacy_flag.py SK_SUPPORT_LEGACY_SOMETHING -a /data/android
"""
import os, sys
import argparse
import subprocess
import getpass
from random import randint
ANDROID_TOOLS_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'android')
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
2177,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
... | 2.927869 | 305 |
#!/usr/bin/env python3
# Copyright 2014-present PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Gather relevant information about an executable using pefile
Special Thanks
==============
Thanks to those at Facebook that provided most of the contributions for this plugin.
"""
import time
import pefile
import struct
import peutils
import hashlib
import binascii
from typing import Any, Dict, List, Optional, Tuple, Union
from stoq.data_classes import (
ExtractedPayload,
Payload,
PayloadMeta,
Request,
WorkerResponse,
)
from stoq.plugins import WorkerPlugin
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
220,
220,
15069,
1946,
12,
25579,
350,
47461,
15101,
30437,
4912,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
... | 3.489426 | 331 |
from flask import Flask, render_template
import requests
# posts = requests.get("https://api.npoint.io/43644ec4f0013682fc0d").json()
posts = requests.get('https://api.npoint.io/5083070470e7db605e88').json()
app = Flask(__name__)
@app.route('/')
@app.route("/post/<int:index>")
@app.route("/about")
@app.route("/contact")
if __name__ == "__main__":
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
198,
11748,
7007,
198,
198,
2,
6851,
796,
7007,
13,
1136,
7203,
5450,
1378,
15042,
13,
77,
4122,
13,
952,
14,
3559,
29173,
721,
19,
69,
405,
1485,
43950,
16072,
15,
67,
11074,
17752,
34... | 2.519737 | 152 |
from __future__ import annotations
from typing import Dict, Optional, TYPE_CHECKING
from subway import HTTPStatus
from subway.headers import Headers
from subway.streams import StreamReader
from subway.request import HTTPConnection
if TYPE_CHECKING:
from .abc import Hooker
class HTTPResponse(HTTPConnection):
"""
An HTTP Response.
Attributes
----------
status: :class:`subway.response.HTTPStatus`
The status of the response.
version: :class:`str`
The HTTP version of the response.
headers: :class:`dict`
The headers of the response.
"""
@property
def hooker(self) -> Hooker:
"""
The hooker that created this response.
"""
return self._hooker
@property
def charset(self) -> Optional[str]:
"""
The charset of the response.
"""
return self.headers.charset
@property
def content_type(self) -> Optional[str]:
"""
The content type of the response.
"""
return self.headers.content_type
def is_closed(self) -> bool:
"""
Whether the response is closed.
"""
return self.hooker.closed
async def close(self) -> None:
"""
Close the response.
"""
await self.hooker.close() | [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
360,
713,
11,
32233,
11,
41876,
62,
50084,
2751,
198,
198,
6738,
19612,
1330,
14626,
19580,
198,
6738,
19612,
13,
50145,
1330,
7123,
364,
198,
6738,
19612,
13,
5532,
82,... | 2.520992 | 524 |
# -*- coding: utf-8 -*-
###
### DO NOT CHANGE THIS FILE
###
### The code is auto generated, your change will be overwritten by
### code generating.
###
from __future__ import absolute_import
from .api.oauth_token_code import OauthTokenCode
from .api.accounts_self import AccountsSelf
from .api.oauth_token import OauthToken
from .api.self_password_reset import SelfPasswordReset
from .api.self_password import SelfPassword
from .api.oauth_token_refresh import OauthTokenRefresh
from .api.accounts_wxapp import AccountsWxapp
routes = [
dict(resource=OauthTokenCode.as_view(), urls=['/oauth/token/code'], endpoint='oauth_token_code'),
dict(resource=AccountsSelf.as_view(), urls=['/accounts/self'], endpoint='accounts_self'),
dict(resource=OauthToken.as_view(), urls=['/oauth/token'], endpoint='oauth_token'),
dict(resource=SelfPasswordReset.as_view(), urls=['/self/password/reset'], endpoint='self_password_reset'),
dict(resource=SelfPassword.as_view(), urls=['/self/password'], endpoint='self_password'),
dict(resource=OauthTokenRefresh.as_view(), urls=['/oauth/token/refresh'], endpoint='oauth_token_refresh'),
dict(resource=AccountsWxapp.as_view(), urls=['/accounts/wxapp'], endpoint='accounts_wxapp'),
] | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
21017,
198,
21017,
8410,
5626,
5870,
27746,
12680,
45811,
198,
21017,
220,
198,
21017,
383,
2438,
318,
8295,
7560,
11,
534,
1487,
481,
307,
6993,
9108,
416,
220,
19... | 2.92217 | 424 |
n1 = int(input('Diga um numero: '))
n2 = int(input('Diga um numero: '))
n3 = int(input('Diga um numero: '))
maior = n1
soma = n2 + n3
if n2 > n1 and n2 > n3:
maior = n2
soma = n1 + n3
elif n3 > n1 and n3 > n2:
maior = n3
soma = n2 + n1
tri = ''
if n1 != n2 != n3 != n1:
tri = 'Escaleno'
elif n1 == n2 == n3:
tri = 'Equilátero'
else:
tri = 'Isósceles'
print('\033[0;31m-=\033[m' * 30)
if maior >= soma:
print('Você não pode fazer um triângulo')
else:
print('Você pode forma um triângulo e ele será {}'.format(tri))
print('\033[32m-=\033[m' * 30)
| [
77,
16,
796,
493,
7,
15414,
10786,
35,
13827,
23781,
997,
3529,
25,
705,
4008,
198,
77,
17,
796,
493,
7,
15414,
10786,
35,
13827,
23781,
997,
3529,
25,
705,
4008,
198,
77,
18,
796,
493,
7,
15414,
10786,
35,
13827,
23781,
997,
3529... | 1.911765 | 306 |
"""
UB_ID : 50291708
Name : Md Moniruzzaman Monir
Character Detection
(Due date: March 8th, 11: 59 P.M.)
The goal of this task is to experiment with template matching techniques. Specifically, the task is to find ALL of
the coordinates where a specific character appears using template matching.
There are 3 sub tasks:
1. Detect character 'a'.
2. Detect character 'b'.
3. Detect character 'c'.
You need to customize your own templates. The templates containing character 'a', 'b' and 'c' should be named as
'a.jpg', 'b.jpg', 'c.jpg' and stored in './data/' folder.
Please complete all the functions that are labelled with '# TODO'. Whem implementing the functions,
comment the lines 'raise NotImplementedError' instead of deleting them. The functions defined in utils.py
and the functions you implement in task1.py are of great help.
Hints: You might want to try using the edge detectors to detect edges in both the image and the template image,
and perform template matching using the outputs of edge detectors. Edges preserve shapes and sizes of characters,
which are important for template matching. Edges also eliminate the influence of colors and noises.
Do NOT modify the code provided.
Do NOT use any API provided by opencv (cv2) and numpy (np) in your code.
Do NOT import any library (function, module, etc.).
"""
import argparse
import json
import os
import utils
import task1
import numpy as np
import cv2
def read_image(img_path, show=False):
"""Reads an image into memory as a grayscale array"""
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
if not img.dtype == np.uint8:
pass
if show:
show_image(img)
img = [list(row) for row in img] # converting to nested list
return img
def show_image(img, delay=1000):
"""Shows an image"""
cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('image', img)
cv2.waitKey(delay)
cv2.destroyAllWindows()
# NCC (Normalized Cross Correlation) between image and kernel
def corr2d(img, kernel):
"""
Args:
img : nested list (int), image.
kernel: nested list (int), kernel.
Returns:
img_conv: nested list (int), image.
"""
padding_layer = len(kernel)//2
img = utils.zero_pad(img,padding_layer,padding_layer) # padding the image
row_count = len(img)
col_count = len(img[0])
# output image having same size as the input image
img_corr = []
zeros = [0]*(col_count-2*padding_layer)
for i in range(row_count-2*padding_layer):
img_corr.insert(0, [0 for value in enumerate(zeros)])
kernel_h = len(kernel)
kernel_w = len(kernel[0])
img = np.asarray(img, dtype=np.float32)
kernel = np.asarray(kernel, dtype=np.float32)
for i in range(row_count - kernel_h + 1):
for j in range(col_count - kernel_w + 1):
mult_result = utils.elementwise_mul(utils.crop(img,i,i+kernel_h,j,j+kernel_w), kernel)
sqr_img = utils.elementwise_mul(utils.crop(img,i,i+kernel_h,j,j+kernel_w),
utils.crop(img,i,i+kernel_h,j,j+kernel_w))
sqr_ker = utils.elementwise_mul(kernel, kernel)
sum = 0
sum_sqr_img = 0
sum_sqr_ker = 0
for p in range(len(mult_result)):
for q in range(len(mult_result[p])):
sum += mult_result[p][q]
sum_sqr_img += sqr_img[p][q]
sum_sqr_ker += sqr_ker[p][q]
img_corr[i][j] = sum / np.sqrt(sum_sqr_img * sum_sqr_ker)
img_corr = [list(row) for row in img_corr]
return img_corr
def detect(img, template):
"""Detect a given character, i.e., the character in the template image.
Args:
img: nested list (int), image that contains character to be detected.
template: nested list (int), template image.
Returns:
coordinates: list (tuple), a list whose elements are coordinates where the character appears.
format of the tuple: (x (int), y (int)), x and y are integers.
x: row that the character appears (starts from 0).
y: column that the character appears (starts from 0).
"""
# TODO: DONE
img1 = corr2d (img,template)
img1 = np.asarray(img1,dtype=np.float32)
coordinates = []
for r in range(len(img1)):
for c in range(len(img1[r])):
if img1[r][c] >= threshold:
row = r - (h//2)
col = c - (w//2)
corordinate = (row,col)
coordinates.append(corordinate)
# raise NotImplementedError
return coordinates
if __name__ == "__main__":
main()
| [
37811,
201,
198,
220,
220,
220,
471,
33,
62,
2389,
1058,
2026,
1959,
1558,
2919,
201,
198,
220,
220,
220,
6530,
220,
1058,
39762,
2892,
343,
4715,
10546,
2892,
343,
201,
198,
220,
220,
220,
220,
201,
198,
220,
220,
220,
15684,
46254... | 2.187713 | 2,344 |
# Imports from python.
import os
import uuid
# Imports from Django.
from django.db import models
from django.utils.html import format_html
# Imports from politico-civic-entity.
from entity.models.base import CivicBaseModel
# from entity.models.base import NaturalKeyMixin
from entity.models.image_tag import ImageTag
from entity.models.person import Person
# Unfortunately, this is only around so the old migrations don't break
class PersonImage(CivicBaseModel):
"""
Image attached to a person, which can be serialized
by a tag.
"""
# NOTE: Subclassing CivicBaseModel would replace standard PK with a UUID.
person = models.ForeignKey(
Person,
related_name="images",
on_delete=models.PROTECT
)
tag = models.ForeignKey(
ImageTag,
related_name="+",
on_delete=models.PROTECT,
help_text="Used to serialize images.",
)
image = models.URLField()
# created = models.DateTimeField(auto_now_add=True, editable=False)
# updated = models.DateTimeField(auto_now=True, editable=False)
| [
2,
1846,
3742,
422,
21015,
13,
198,
11748,
28686,
198,
11748,
334,
27112,
628,
198,
2,
1846,
3742,
422,
37770,
13,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
5794,
62,
6494,
628,... | 2.862797 | 379 |
import pandas as pd
import albumentations
import joblib
import numpy as np
import torch
from PIL import Image
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
435,
65,
1713,
602,
198,
11748,
1693,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
6738,
350,
4146,
1330,
7412,
198
] | 3.363636 | 33 |
# Crie um programa que leia um número inteiro e mostra na tela se ele é par ou ímpar.
num = float(input('Digite um número: '))
resto = num % 2
if resto == 1:
print('O número é ímpar!')
else:
print('O número é par!')
# Resolução da aula
# número = int(input('Me diga um número qualquer: '))
# resultado = número % 2
# if resultado == 0:
# print('O número {} é PAR'.format(número))
# else:
# print('O número {} é ÍMPAR'.format(número))
| [
2,
327,
5034,
23781,
1430,
64,
8358,
443,
544,
23781,
299,
21356,
647,
78,
493,
68,
7058,
304,
749,
430,
12385,
256,
10304,
384,
9766,
38251,
1582,
267,
84,
6184,
255,
76,
1845,
13,
198,
198,
22510,
796,
12178,
7,
15414,
10786,
1951... | 2.110599 | 217 |
import json
import os
import subprocess
from pathlib import Path
PROJECT_ROOT = Path(__file__).absolute().parent.parent
if __name__ == "__main__":
write_vscode_settings()
delete_myself()
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
31190,
23680,
62,
13252,
2394,
796,
10644,
7,
834,
7753,
834,
737,
48546,
22446,
8000,
13,
8000,
628,
628,
198,
198,
361,
11593,
3672... | 2.871429 | 70 |
#!/usr/bin/python
from TraefikUpdater import TraefikUpdater
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
4759,
891,
1134,
4933,
67,
729,
1330,
4759,
891,
1134,
4933,
67,
729,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
... | 2.372093 | 43 |
from django.db import migrations
| [
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 3.777778 | 9 |
import scipy.interpolate
from scipy.special import expit
import scipy.constants as constants
import numpy as np | [
11748,
629,
541,
88,
13,
3849,
16104,
378,
198,
6738,
629,
541,
88,
13,
20887,
1330,
1033,
270,
198,
11748,
629,
541,
88,
13,
9979,
1187,
355,
38491,
198,
11748,
299,
32152,
355,
45941
] | 3.264706 | 34 |
import logging
import os
| [
11748,
18931,
198,
11748,
28686,
198
] | 4.166667 | 6 |
import io
import json
import boto3
import os
from pydub import AudioSegment
| [
11748,
33245,
198,
11748,
33918,
198,
11748,
275,
2069,
18,
198,
11748,
28686,
198,
198,
6738,
279,
5173,
549,
1330,
13491,
41030,
434,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628
] | 2.636364 | 33 |
""" netplan.py generates netplan file for triage.
There are two cases, one for during triage where a machine generates a default netplan yaml file,
and other is for server.
"""
from ..const import const
from ..components.network import NetworkDeviceType, NetworkDevice
import sys, os, json, hashlib
indentspace = ' '
# Create/generate netplan.yaml file
def generate_default_netplan_config(filename, devices):
"""create netplan config - used for creating default one."""
generate_default_config(devices)
generate_netplan_file(filename, devices)
pass
#
# generate netplan.yaml file from the devices
def generate_netplan_file(filename, devices):
"""generate a netplan file from the devices' config."""
ifdecl = [ {'version': '2' },
{'renderer': 'networkd' } ]
ethernets = []
wifis = []
bonds = {}
for dev in devices:
if dev.config is None:
continue
if dev.config["device_type"] == "ethernet":
dev_config = [ { 'dhcp4': dev.config["dhcp4"] },
{ 'optional': dev.config["optional"] } ]
addresses = dev.config.get("addresses")
if addresses:
dev_config.append({"addresses": format_addresses(addresses)})
pass
ethernets.append( { dev.device_name: dev_config } )
pass
elif dev.config["device_type"] == "wifi":
wifis.append({ dev.device_name: [ { 'dhcp4': dev.config["dhcp4"] },
{ 'optional': dev.config["dhcp4"] },
{ 'access-points': dev.config["access-points"] } ] })
pass
bond = dev.config.get("bond")
if bond:
bond_device = bond["device_name"]
this_bond = bonds.get(bond_device)
if this_bond:
this_bond["interfaces"].append(dev.device_name)
else:
this_bond = {
"interfaces": [dev.device_name],
"addresses": bond["addresses"]
}
bonds[bond_device] = this_bond
pass
pass
pass
bonds2 = []
for bond_device in bonds.keys():
bond = bonds[bond_device]
bonds2.append( [ { bond_device:
[ {"interfaces": "[ " + ", ".join(bond["interfaces"]) + " ]",
"addresses": format_addresses(bond["addresses"]) } ] } ] )
pass
if ethernets:
ifdecl.append( {'ethernets': ethernets } )
pass
if bonds2:
ifdecl.append( {'bonds': bonds2 })
pass
if wifis:
ifdecl.append( {'wifis': wifis} )
pass
netplan = [ '# This file is auto-generated by wce triage lib/netplan.py.',
{"network": ''},
ifdecl ]
if filename:
output = open(filename, "w")
else:
output = sys.stdout
pass
printer = Printer( output )
printer.print_tree(netplan, 0)
pass
def generate_ap_param():
""" generate access-point param."""
SSID = os.environ.get('TRIAGE_SSID', 'wcetriage')
WIFIPASSWORD = os.environ.get('TRIAGE_PASSWORD', 'thepasswordiswcetriage')
if len(SSID) > 0 and len(WIFIPASSWORD) > 0:
return { SSID: [ {"password": WIFIPASSWORD } ] }
pass
return {}
def generate_default_config(devices):
"""generates the default config for network devices and set it to the
device.config.
Note that the shape of config is flat and different from netplan printer.
"""
ethernets = []
wifis = []
WCE_SERVER = os.environ.get(const.WCE_SERVER, 'false')
# Uber-default interface setup
for dev in devices:
if dev.device_type == NetworkDeviceType.Ethernet:
dev.set_config({ "device_type": "ethernet",
"dhcp4": 'yes',
'optional': 'yes' })
ethernets.append(dev)
elif dev.device_type == NetworkDeviceType.Wifi:
# netplan works with wpa-supplicant, generates a simple config file
# in the same directory and hands off the auth.
dev.set_config({ "device_type": "wifi",
'dhcp4': 'yes',
'optional': 'yes',
'access-points': [generate_ap_param()]})
wifis.append(dev)
pass
pass
# Redo default for server
if WCE_SERVER == "true":
if len(ethernets) >= 2:
for eth in ethernets:
eth.set_config({ "device_type": "ethernet",
'dhcp4': 'no',
'optional': 'yes',
"bond": { "device_name": "bond0",
"addresses": [ ("10.3.2.1", 24) ] } })
pass
pass
else:
for eth in ethernets:
eth.set_config({ "device_type": "ethernet",
'dhcp4': 'no',
'optional': 'yes',
"addresses": [ ("10.3.2.1", 24) ]})
pass
pass
pass
pass
#
_network_id = None
def generate_network_id(devices):
"""generates an unique ID for particular machine."""
global _network_id
if _network_id is not None:
return _network_id
machine_id = None
try:
with open("/etc/ssh/ssh_host_rsa_key.pub") as ssh_key:
machine_id = ssh_key.read().encode("iso-8859-1")
pass
except FileNotFoundError:
# This means ssh server is not installed.
netdir = "/sys/class/net"
for netdev in os.listdir(netdir):
if netdev == "lo":
continue
netaddrpath = os.path.join(netdir, netdev, "address")
try:
with open(netaddrpath) as macaddrfile:
macaddr = macaddrfile.read().encode("iso-8859-1").strip()
if macaddr == "00:00:00:00:00:00":
continue
machine_id = macaddr
break
pass
except FileNotFoundError:
pass
pass
pass
device_names = []
for device in devices:
device_names.append(device.device_name)
pass
_network_id = hashlib.sha1(machine_id + " ".join(sorted(device_names)).encode('iso-8859-1')).hexdigest()
return _network_id
def save_network_config(config_dir, devices):
"""saves network device config"""
network_id = generate_network_id(devices)
configs = []
for device in devices:
configs.append((device.device_name, device.config))
pass
config_filename = ".netplan_" + network_id
with open(os.path.join(config_dir, config_filename), "w") as cfg:
json.dump({ "id": network_id, "configs": configs}, cfg, indent=2)
pass
pass
def load_network_config(config_dir, devices):
"""loads network device config, if the config file is available."""
network_id = generate_network_id(devices)
config_filename = ".netplan_" + network_id
config_filepath = os.path.join(config_dir, config_filename)
if not os.path.exists(config_filepath):
return False
device_map = {}
for device in devices:
device_map[device.device_name] = device
pass
try:
with open(config_filepath, "r") as cfg:
dev_configs = json.load(cfg)
if network_id == dev_configs["id"]:
for device_name, config in dev_configs["configs"]:
device = device_map.get(device_name)
if device:
device.set_config(config)
pass
pass
pass
pass
pass
except:
return False
# Update the WIFI connection if provided.
ap_param = generate_ap_param()
if ap_param:
for device in devices:
if device.device_type == NetworkDeviceType.Wifi:
config = device.config
if config:
config['access-points'] = [ap_param]
device.set_config(config)
pass
pass
pass
pass
return True
if __name__ == "__main__":
os.environ["TRIAGE_SSID"] = "fakessid"
os.environ["TRIAGE_PASSWORD"] = "fake-password"
eth0 = NetworkDevice(device_name="eth0", device_type=NetworkDeviceType.Ethernet)
eth1 = NetworkDevice(device_name="eth1", device_type=NetworkDeviceType.Ethernet)
eth2 = NetworkDevice(device_name="eth2", device_type=NetworkDeviceType.Wifi)
netdevs = [eth0, eth1, eth2]
os.environ[const.WCE_SERVER] = 'false'
generate_default_config(netdevs)
generate_netplan_file(None, netdevs)
os.environ[const.WCE_SERVER] = 'true'
generate_default_config(netdevs)
generate_netplan_file(None, netdevs)
netdevs = [eth0]
generate_default_config(netdevs)
generate_netplan_file(None, netdevs)
netdevs = [eth0, eth1, eth2]
eth0.set_config(None)
eth1.set_config(None)
eth2.set_config(None)
os.environ[const.WCE_SERVER] = 'false'
generate_default_config(netdevs)
eth1.config["optional"] = "no"
save_network_config("/tmp", netdevs)
eth0.set_config(None)
eth1.set_config(None)
eth2.set_config(None)
load_network_config("/tmp", netdevs)
generate_netplan_file(None, netdevs)
pass
| [
37811,
2010,
11578,
13,
9078,
18616,
2010,
11578,
2393,
329,
1333,
496,
13,
198,
198,
1858,
389,
734,
2663,
11,
530,
329,
1141,
1333,
496,
810,
257,
4572,
18616,
257,
4277,
2010,
11578,
331,
43695,
2393,
11,
198,
392,
584,
318,
329,
... | 2.293931 | 3,773 |
import requests
DCARD_URL = 'https://www.dcard.tw'
DCARD_API = DCARD_URL + '/_api/posts?popular=true'
#https://www.dcard.tw/_api/posts?popular=true&limit=最大幾項&before=最後一篇文章號碼
if __name__ == '__main__':
num_page = 2
posts = list(requests.get(DCARD_API).json())
for i in range(1, num_page):
id_last_post = posts[-1]['id']
posts += list(requests.get(DCARD_API + '&before=' + str(id_last_post)).json())
print('共 %d 頁, %d 篇文章' %(num_page, len(posts)))
for p in posts:
print('第 %d 篇:' %(posts.index(p)+1))
show(p) | [
11748,
7007,
628,
198,
9697,
9795,
62,
21886,
796,
705,
5450,
1378,
2503,
13,
67,
9517,
13,
4246,
6,
198,
9697,
9795,
62,
17614,
796,
6257,
9795,
62,
21886,
1343,
31051,
62,
15042,
14,
24875,
30,
47568,
28,
7942,
6,
198,
2,
5450,
... | 1.948097 | 289 |
"""
flask_zs.exceptions
~~~~~~~~~~~~
flask-zs exceptions module
:copyright: (c) 2018 by codeif.
:license: MIT, see LICENSE for more details.
"""
| [
37811,
198,
220,
220,
220,
42903,
62,
89,
82,
13,
1069,
11755,
198,
220,
220,
220,
220,
15116,
8728,
628,
220,
220,
220,
42903,
12,
89,
82,
13269,
8265,
628,
220,
220,
220,
1058,
22163,
4766,
25,
357,
66,
8,
2864,
416,
2438,
361,
... | 2.615385 | 65 |
"""
# Problem Statement
Given a list of non-negative numbers and a target integer k, write a function to
check if the array has a continuous subarray of size at least 2 that sums up to
a multiple of k, that is, sums up to n*k where n is also an integer.
## Constraints:
The length of the array won't exceed 10,000.
You may assume the sum of all the numbers is in the range of a signed 32-bit integer.
# Examples
## Example 1:
Input: [23, 2, 4, 6, 7], k=6 Output: True
Explanation: Because [2, 4] is a continuous subarray of size 2 and sums up to 6.
## Example 2:
Input: [23, 2, 6, 4, 7], k=6 Output: True
Explanation: Because [23, 2, 6, 4, 7] is an continuous subarray of size 5 and sums up to 42.
"""
def solution2(nums, K):
"""sum_til record sum from begining
sum between (i, j] sum_til[j] - sum_til[i]
"""
s = 0
sum_til = []
for n in nums:
s += n
sum_til.append(s)
l = len(nums)
for i in range(l):
for j in range(i+1, l):
sum_ij = sum_til[j] if i == 0 else sum_til[j] - sum_til[i-1]
if K != 0 and sum_ij % K == 0:
return True
if K == 0 and sum_ij == 0:
return True
return False
def solution3(nums, K):
"""if sum_ij is K*n --> sum_til[i-1] and sum_til[j] have the same modulus
sum_til[i-1] % K == sum_til[j] % K
"""
modSeen = {0:-1}
s = 0
for i in range(len(nums)):
n = nums[i]
s += n
mod = s % K if K != 0 else s
if mod in modSeen:
if i - modSeen[mod] > 1:
return True
else:
modSeen[mod] = i
return False
import unittest
from unittest_data_provider import data_provider
from testdata.continuous_subarray_sum import TestData as data
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
37811,
198,
2,
20647,
21983,
198,
198,
15056,
257,
1351,
286,
1729,
12,
31591,
3146,
290,
257,
2496,
18253,
479,
11,
3551,
257,
2163,
284,
198,
9122,
611,
262,
7177,
468,
257,
12948,
850,
18747,
286,
2546,
379,
1551,
362,
326,
21784,
... | 2.281139 | 843 |
# Generated by Django 2.2.17 on 2021-01-29 22:34
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1558,
319,
33448,
12,
486,
12,
1959,
2534,
25,
2682,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.840909 | 44 |
from django.contrib import admin
from django.contrib.auth import views as auth
from django.urls import path, include
import main.views
import accounts.views
urlpatterns = [
path('',main.views.IndexView, name='index'),
path('login/', accounts.views.SignInView.as_view(), name='login'),
path('logout/', auth.LogoutView.as_view(template_name='index.html'), name='logout'),
path('register/', accounts.views.SignUpView.as_view(), name='register'),
path('activate/<code>/', accounts.views.ActivateView.as_view(), name='activate'),
path('password/change/', auth.PasswordChangeView.as_view(
template_name='accounts/password_change_form.html'), name='password_change'),
path('password/change/done/', auth.PasswordChangeDoneView.as_view(
template_name='accounts/password_change_done.html'), name='password_change_done'),
path('password/reset/', auth.PasswordResetView.as_view(template_name='accounts/password_reset.html'),
name='password_reset'),
path('password/reset/done/', auth.PasswordResetDoneView.as_view(
template_name='accounts/password_reset_done.html'), name='password_reset_done'),
path('password/<uidb64>/<token>/',
auth.PasswordResetConfirmView.as_view(template_name='accounts/password_reset_confirm.html'),
name='password_reset_confirm'),
path('password/reset/done/',
auth.PasswordResetCompleteView.as_view(template_name='accounts/password_reset_complete.html'),
name='password_reset_complete'),
# path('^oauth/', include('social_django.urls', namespace='social')),
]
(r'^logout/$', 'django.contrib.auth.views.logout',
{'next_page': '/successfully_logged_out/'}) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
5009,
355,
6284,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
198,
11748,
1388,
13,
33571,
198,
11748,
5504,
... | 2.746006 | 626 |
#!/usr/bin/env python3
# coding: utf-8
import os
from detectron2.data.datasets import register_coco_instances
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer
from torch import cuda
register_coco_instances("person_bag_train", {}, "./datasets/person_bag/annotations/train.json",
"./datasets/person_bag/images/train/")
register_coco_instances("person_bag_val", {}, "./datasets/person_bag/annotations/val.json",
"./datasets/person_bag/images/val/")
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
cfg.DATALOADER.NUM_WORKERS = 2
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml")
cfg.DATASETS.TRAIN = ("person_bag_train",)
cfg.DATASETS.TEST = ("person_bag_val",)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2 # only has two classes (person and bag)
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 32
cfg.SOLVER.MAX_ITER = 80000
if not cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
if not os.path.exists('./output'):
os.makedirs('./output')
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
28686,
198,
220,
220,
220,
220,
198,
6738,
4886,
1313,
17,
13,
7890,
13,
19608,
292,
1039,
1330,
7881,
62,
66,
25634,
62,
8625,... | 2.402141 | 654 |
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Shonan Rotation Averaging CLI reads a *pose* graph, extracts the
rotation constraints, and runs the Shonan algorithm.
Author: Frank Dellaert
Date: August 2020
"""
# pylint: disable=invalid-name, E1101
import argparse
import matplotlib.pyplot as plt
import numpy as np
import gtsam
from gtsam.utils import plot
def estimate_poses_given_rot(factors: gtsam.BetweenFactorPose3s,
rotations: gtsam.Values,
d: int = 3):
""" Estimate Poses from measurements, given rotations. From SfmProblem in shonan.
Arguments:
factors -- data structure with many BetweenFactorPose3 factors
rotations {Values} -- Estimated rotations
Returns:
Values -- Estimated Poses
"""
I_d = np.eye(d)
graph = gtsam.GaussianFactorGraph()
model = gtsam.noiseModel.Unit.Create(d)
# Add a factor anchoring t_0
graph.add(0, I_d, np.zeros((d,)), model)
# Add a factor saying t_j - t_i = Ri*t_ij for all edges (i,j)
for factor in factors:
keys = factor.keys()
i, j, Tij = keys[0], keys[1], factor.measured()
measured = R(i).rotate(Tij.translation())
graph.add(j, I_d, i, -I_d, measured, model)
# Solve linear system
translations = graph.optimize()
# Convert to Values.
result = gtsam.Values()
for j in range(rotations.size()):
tj = translations.at(j)
result.insert(j, pose(R(j), tj))
return result
def run(args):
"""Run Shonan averaging and then recover translations linearly before saving result."""
# Get input file
if args.input_file:
input_file = args.input_file
else:
if args.named_dataset == "":
raise ValueError(
"You must either specify a named dataset or an input file")
input_file = gtsam.findExampleDataFile(args.named_dataset)
if args.dimension == 2:
print("Running Shonan averaging for SO(2) on ", input_file)
shonan = gtsam.ShonanAveraging2(input_file)
if shonan.nrUnknowns() == 0:
raise ValueError("No 2D pose constraints found, try -d 3.")
initial = shonan.initializeRandomly()
rotations, _ = shonan.run(initial, 2, 10)
factors = gtsam.parse2DFactors(input_file)
elif args.dimension == 3:
print("Running Shonan averaging for SO(3) on ", input_file)
shonan = gtsam.ShonanAveraging3(input_file)
if shonan.nrUnknowns() == 0:
raise ValueError("No 3D pose constraints found, try -d 2.")
initial = shonan.initializeRandomly()
rotations, _ = shonan.run(initial, 3, 10)
factors = gtsam.parse3DFactors(input_file)
else:
raise ValueError("Can only run SO(2) or SO(3) averaging")
print("Recovering translations")
poses = estimate_poses_given_rot(factors, rotations, args.dimension)
print("Writing result to ", args.output_file)
gtsam.writeG2o(gtsam.NonlinearFactorGraph(), poses, args.output_file)
plot.plot_trajectory(1, poses, scale=0.2)
plot.set_axes_equal(1)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--named_dataset', type=str, default="pose3example-grid",
help='Find and read frome example dataset file')
parser.add_argument('-i', '--input_file', type=str, default="",
help='Read pose constraints graph from the specified file')
parser.add_argument('-o', '--output_file', type=str, default="shonan.g2o",
help='Write solution to the specified file')
parser.add_argument('-d', '--dimension', type=int, default=3,
help='Optimize over 2D or 3D rotations')
parser.add_argument("-p", "--plot", action="store_true", default=True,
help="Plot result")
run(parser.parse_args())
| [
37811,
198,
38,
4694,
2390,
15069,
3050,
12,
7908,
11,
7859,
9634,
4992,
10501,
11,
198,
43482,
11,
7859,
1542,
32148,
12,
3023,
1314,
198,
3237,
6923,
33876,
198,
30515,
669,
25,
5278,
360,
12627,
861,
11,
2123,
435,
13,
357,
3826,
... | 2.415691 | 1,708 |
from fabric.api import run,put,cd,parallel,roles,serial,local,runs_once
from env_setup import *
counter = 0
"""
Used to measure the time taken to simply upload
data into HDFS.
"""
@parallel
@roles('master')
@parallel
@roles('master')
@roles('master')
@roles('master')
@roles('master')
@roles('master')
@roles('master')
@parallel
@roles('master')
@runs_once
@parallel
@roles('master')
@parallel
#############
# Running Queries
############
@roles('master')
| [
6738,
9664,
13,
15042,
1330,
1057,
11,
1996,
11,
10210,
11,
1845,
29363,
11,
305,
829,
11,
46911,
11,
12001,
11,
48381,
62,
27078,
198,
6738,
17365,
62,
40406,
1330,
1635,
198,
198,
24588,
796,
657,
198,
198,
37811,
198,
38052,
284,
... | 2.624309 | 181 |
from dsgrn_net_gen.makejobs import Job
import sys
paramfile = sys.argv[1]
job = Job(paramfile)
job.run()
| [
6738,
288,
82,
2164,
77,
62,
3262,
62,
5235,
13,
15883,
43863,
1330,
15768,
198,
11748,
25064,
198,
198,
17143,
7753,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
21858,
796,
15768,
7,
17143,
7753,
8,
198,
21858,
13,
5143,
3419,
628
] | 2.488372 | 43 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
MAST Microservices API
======================
This module contains various methods for querying MAST microservice APIs.
"""
import json
import time
import warnings
import numpy as np
from astropy.table import Table, Column, MaskedColumn
from ..query import BaseQuery
from ..utils import async_to_sync
from ..utils.class_or_instance import class_or_instance
from ..exceptions import TimeoutError, NoResultsWarning
from . import conf, utils
__all__ = []
def _json_to_table(json_obj):
"""
Takes a JSON object as returned from a MAST microservice request and turns it into an `~astropy.table.Table`.
Parameters
----------
json_obj : dict
A MAST microservice response JSON object (python dictionary)
Returns
-------
response : `~astropy.table.Table`
"""
data_table = Table(masked=True)
if not all(x in json_obj.keys() for x in ['info', 'data']):
raise KeyError("Missing required key(s) 'data' and/or 'info.'")
# determine database type key in case missing
type_key = 'type' if json_obj['info'][0].get('type') else 'db_type'
# for each item in info, store the type and column name
for idx, col, col_type, ignore_value in \
[(idx, x['name'], x[type_key], "NULL") for idx, x in enumerate(json_obj['info'])]:
# if default value is NULL, set ignore value to None
if ignore_value == "NULL":
ignore_value = None
# making type adjustments
if col_type == "char" or col_type == "STRING":
col_type = "str"
ignore_value = "" if (ignore_value is None) else ignore_value
elif col_type == "boolean" or col_type == "BINARY":
col_type = "bool"
elif col_type == "unsignedByte":
col_type = np.ubyte
elif col_type == "int" or col_type == "short" or col_type == "long" or col_type == "NUMBER":
# int arrays do not admit Non/nan vals
col_type = np.int64
ignore_value = -999 if (ignore_value is None) else ignore_value
elif col_type == "double" or col_type == "float" or col_type == "DECIMAL":
# int arrays do not admit Non/nan vals
col_type = np.float64
ignore_value = -999 if (ignore_value is None) else ignore_value
elif col_type == "DATETIME":
col_type = "str"
ignore_value = "" if (ignore_value is None) else ignore_value
# Make the column list (don't assign final type yet or there will be errors)
# Step through data array of values
col_data = np.array([x[idx] for x in json_obj['data']], dtype=object)
if ignore_value is not None:
col_data[np.where(np.equal(col_data, None))] = ignore_value
# no consistant way to make the mask because np.equal fails on ''
# and array == value fails with None
if col_type == 'str':
col_mask = (col_data == ignore_value)
else:
col_mask = np.equal(col_data, ignore_value)
# add the column
data_table.add_column(MaskedColumn(col_data.astype(col_type), name=col, mask=col_mask))
return data_table
@async_to_sync
class ServiceAPI(BaseQuery):
"""
MAST microservice API calss.
Class that allows direct programatic access to MAST microservice APIs.
Should be used to facilitate all microservice API queries.
"""
def set_service_params(self, service_dict, service_name="", server_prefix=False):
"""
Initialize the request url and available queries for a given service.
Parameters
----------
service_dict : dict
Dictionary of available service queries in the form
{service_name:{"path":service_path, "args":service_args}}
service_name : str
Name of the specific service, i.e. catalogs or tesscut
server_prefix : bool
Optional, default False. If true url is formed as service_name.mast.stsci.edu
vs. the default of mast.stsci.edu/service_name
"""
service_url = conf.server
if server_prefix:
service_url = service_url.replace("mast", f"{service_name}.mast")
else:
service_url += f"/{service_name}"
self.REQUEST_URL = f"{service_url}/api/v0.1/"
self.SERVICES = service_dict
def _request(self, method, url, params=None, data=None, headers=None,
files=None, stream=False, auth=None, cache=False):
"""
Override of the parent method:
A generic HTTP request method, similar to `~requests.Session.request`
This is a low-level method not generally intended for use by astroquery
end-users.
This method wraps the _request functionality to include raise_for_status
Caching is defaulted to False but may be modified as needed
Also parameters that allow for file download through this method are removed
Parameters
----------
method : 'GET' or 'POST'
url : str
params : None or dict
data : None or dict
headers : None or dict
auth : None or dict
files : None or dict
stream : bool
See `~requests.request`
cache : bool
Default False. Use of bulit in _request caching
Returns
-------
response : `~requests.Response`
The response from the server.
"""
start_time = time.time()
response = super()._request(method, url, params=params, data=data, headers=headers,
files=files, cache=cache, stream=stream, auth=auth)
if (time.time() - start_time) >= self.TIMEOUT:
raise TimeoutError("Timeout limit of {} exceeded.".format(self.TIMEOUT))
response.raise_for_status()
return response
def _parse_result(self, response, verbose=False):
"""
Parses the results of a `~requests.Response` object and returns an `~astropy.table.Table` of results.
Parameters
----------
responses : `~requests.Response`
The restponse from a self._request call.
verbose : bool
(presently does nothing - there is no output with verbose set to
True or False)
Default False. Setting to True provides more extensive output.
Returns
-------
response : `~astropy.table.Table`
"""
result = response.json()
result_table = _json_to_table(result)
# Check for no results
if not result_table:
warnings.warn("Query returned no results.", NoResultsWarning)
return result_table
@class_or_instance
def service_request_async(self, service, params, page_size=None, page=None, **kwargs):
"""
Given a MAST fabric service and parameters, builds and excecutes a fabric microservice catalog query.
See documentation `here <https://catalogs.mast.stsci.edu/docs/index.html>`__
for information about how to build a MAST catalogs microservice request.
Parameters
----------
service : str
The MAST catalogs service to query. Should be present in self.SERVICES
params : dict
JSON object containing service parameters.
page_size : int, optional
Default None.
Can be used to override the default pagesize (set in configs) for this query only.
E.g. when using a slow internet connection.
page : int, optional
Default None.
Can be used to override the default behavior of all results being returned to obtain
a specific page of results.
**kwargs :
See Catalogs.MAST properties in documentation referenced above
Returns
-------
response : list of `~requests.Response`
"""
service_config = self.SERVICES.get(service.lower())
service_url = service_config.get('path')
compiled_service_args = {}
# Gather URL specific parameters
for service_argument, default_value in service_config.get('args', {}).items():
found_argument = params.pop(service_argument, None)
if found_argument is None:
found_argument = kwargs.pop(service_argument, default_value)
compiled_service_args[service_argument] = found_argument.lower()
request_url = self.REQUEST_URL + service_url.format(**compiled_service_args)
headers = {
'User-Agent': self._session.headers['User-Agent'],
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
# Params as a list of tuples to allow for multiple parameters added
catalogs_request = []
if not page:
page = params.pop('page', None)
if not page_size:
page_size = params.pop('page_size', None)
if page is not None:
catalogs_request.append(('page', page))
if page_size is not None:
catalogs_request.append(('pagesize', page_size))
# Decompose filters, sort
for prop, value in kwargs.items():
params[prop] = value
catalogs_request.extend(self._build_catalogs_params(params))
response = self._request('POST', request_url, data=catalogs_request, headers=headers)
return response
def _build_catalogs_params(self, params):
"""
Gathers parameters for Catalogs.MAST usage and translates to valid API syntax tuples
Parameters
----------
params: dict
A dict of parameters to convert into valid API syntax. Will omit the "format" parameter
Returns
-------
response : list(tuple)
List of tuples representing API syntax parameters
"""
catalog_params = []
for prop, value in params.items():
if prop == 'format':
# Ignore format changes
continue
elif prop == 'page_size':
catalog_params.extend(('pagesize', value))
elif prop == 'sort_by':
# Loop through each value if list
if isinstance(value, list):
for sort_item in value:
# Determine if tuple with sort direction
if isinstance(sort_item, tuple):
catalog_params.append(('sort_by', sort_item[1] + '.' + sort_item[0]))
else:
catalog_params.append(('sort_by', sort_item))
else:
# A single sort
# Determine if tuple with sort direction
if isinstance(value, tuple):
catalog_params.append(('sort_by', value[0] + '.' + value[1]))
else:
catalog_params.append(('sort_by', value))
elif prop == 'columns':
catalog_params.extend(tuple(('columns', col) for col in value))
else:
if isinstance(value, list):
# A composed list of multiple filters for a single column
# Extract each filter value in list
for filter_value in value:
# Determine if tuple with filter decorator
if isinstance(filter_value, tuple):
catalog_params.append((prop + '.' + filter_value[0], filter_value[1]))
else:
# Otherwise just append the value without a decorator
catalog_params.append((prop, filter_value))
else:
catalog_params.append((prop, value))
return catalog_params
def check_catalogs_criteria_params(self, criteria):
"""
Tests a dict of passed criteria for Catalogs.MAST to ensure that at least one parameter is for a given criteria
Parameters
----------
criteria: dict
A dict of parameters to test for at least one criteria parameter
Returns
-------
response : boolean
Whether the passed dict has at least one criteria parameter
"""
criteria_check = False
non_criteria_params = ["columns", "sort_by", "page_size", "pagesize", "page"]
criteria_keys = criteria.keys()
for key in criteria_keys:
if key not in non_criteria_params:
criteria_check = True
break
return criteria_check
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
37811,
198,
44,
11262,
4527,
30416,
7824,
198,
4770,
50155,
198,
198,
1212,
8265,
4909,
2972,
5050,
329,
42517,
1112,
337,
11262,... | 2.346676 | 5,446 |
import numpy as np
##
## use in week 2, 4 and 5
##
def get_rss(model, data, y):
"""
get prediction, then calc. rss
"""
preds = model.predict(data) # First get the predictions
diff = y - preds # Then compute the residuals/errors
rss = (diff * diff).sum() # Then square and add them up
return rss
| [
11748,
299,
32152,
355,
45941,
198,
198,
2235,
198,
2235,
779,
287,
1285,
362,
11,
604,
290,
642,
198,
2235,
198,
198,
4299,
651,
62,
42216,
7,
19849,
11,
1366,
11,
331,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
651,
17... | 2.457143 | 140 |
#! /usr/bin/python
import cv2
import sys
import os
import ntpath
f = sys.argv[1]
print('Binarizing ',f)
name = ntpath.basename(f)
dir = os.path.dirname(f)
im_gray = cv2.imread(f, cv2.IMREAD_GRAYSCALE)
#THRESH_BINARY_INV
if(len(sys.argv) > 2):
thresh = 130
im_bw = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY_INV)[1]
else:
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imwrite(dir + '/bin_' + name, im_bw) | [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
11748,
269,
85,
17,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
299,
83,
6978,
198,
198,
69,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
4798,
10786,
33,
22050,
2890,
46083,
69,
8,
... | 1.912 | 250 |
__author__ = 'Williams Mendez'
| [
834,
9800,
834,
796,
705,
27869,
20442,
8471,
6,
198,
220,
220
] | 2.75 | 12 |
# Generating Tables (Latex)
| [
2,
2980,
803,
33220,
357,
26302,
87,
8,
198
] | 3.111111 | 9 |
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
features = np.vectorize(features)
baby= pd.read_csv("data/name.csv")
baby.columns=['name','sex']
baby=baby.drop_duplicates()
baby.sex = pd.to_numeric(baby.sex, errors='coerce')
baby.head()
df_X=features(baby.name)
df_y = baby.sex
dfX_train,dfX_test,dfy_train,dfy_test = train_test_split(df_X,df_y,test_size = 0.2)
dv = DictVectorizer()
dv.fit_transform(dfX_train)
dclf = MultinomialNB()
my_xfeatures = dv.transform(dfX_train)
dclf.fit(my_xfeatures,dfy_train)
mnames=["king","sir","lord","prince"]
fnames=["queen","lady","princess","nurse"]
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
2764,
38469,
7509,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
1330,
360,
713,
38469,
7509,
... | 2.616099 | 323 |
# from http.client import OK
# from urllib import request, response
# import frappe
# import requests
# import json
# from erpnext.selling.doctype.sales_order.sales_order import make_sales_invoice
# import os
# @frappe.whitelist(allow_guest=True)
# def webhook(*args,**kwargs):
# from dotenv import load_dotenv
# load_dotenv()
# data=json.loads(frappe.request.data)
# head=frappe.request.headers['x-notification-secret']
# print(head)
# secret=os.environ.get('secret')
# print(secret)
# if head=='CA30951A5324FCCC66EFE9C4890E93A5':
# data=json.loads(frappe.request.data)
# doc=frappe.new_doc('Webhook Capture')
# doc.webhook_response=str(data)
# print(doc.webhook_response)
# doc.insert(ignore_permissions=True)
# doc.save(ignore_permissions=True)
# print(doc)
# data=json.loads(frappe.request.data)
# print(data)
# status=data.get('result')
# order_id=data.get('id')
# amount=data.get('amount')
# pay_req=frappe.get_doc('Payment Request',order_id)
# reference_doc_id=pay_req.get('reference_name')
# reference_doc=pay_req.get('reference_doctype')
# sales_order_doc=frappe.get_doc(reference_doc,reference_doc_id)
# total=sales_order_doc.get('total')
# if status=='SUCCESS':
# invoice= make_sales_invoice(source_name=reference_doc_id,ignore_permissions=True)
# invoice.submit()
# return invoice
# # def invoice_testing(source_name,test_id):
# # if test_id==1:
# # x=make_sales_invoice(source_name)
# # x.submit()
# # return x
| [
2,
422,
2638,
13,
16366,
1330,
7477,
198,
2,
422,
2956,
297,
571,
1330,
2581,
11,
2882,
198,
2,
1330,
5306,
27768,
198,
2,
1330,
7007,
198,
2,
1330,
33918,
198,
2,
422,
1931,
79,
19545,
13,
16473,
13,
4598,
310,
2981,
13,
82,
20... | 2.113493 | 793 |
# -*- coding: utf-8 -*-
"""Tests for the `Pw2gwCalculation` class."""
from aiida import orm
from aiida.common import datastructures
from aiida_quantumespresso.utils.resources import get_default_options
def test_pw_default(
aiida_profile, fixture_localhost, fixture_sandbox, fixture_code, generate_calc_job, generate_remote_data, tmpdir,
file_regression
):
"""Test a default `Pw2gwCalculation`."""
entry_point_name = 'quantumespresso.pw2gw'
parameters = {
'INPUTPP': {
'qplda': False,
'vxcdiag': False,
'vkb': False,
'Emin': 0.0,
'Emax': 15.0,
'DeltaE': 0.001,
}
}
parent = generate_remote_data(
fixture_localhost,
str(tmpdir),
'quantumespresso.pw',
)
inputs = {
'code': fixture_code(entry_point_name),
'parameters': orm.Dict(dict=parameters),
'parent_folder': parent,
'metadata': {
'options': get_default_options()
}
}
calc_info = generate_calc_job(fixture_sandbox, entry_point_name, inputs)
retrieve_list = ['aiida.out', 'epsX.dat', 'epsY.dat', 'epsZ.dat', 'epsTOT.dat']
assert isinstance(calc_info, datastructures.CalcInfo)
assert sorted(calc_info.retrieve_list) == sorted(retrieve_list)
with fixture_sandbox.open('aiida.in') as handle:
input_written = handle.read()
# Checks on the files written to the sandbox folder as raw input
assert sorted(fixture_sandbox.get_content_list()) == sorted(['aiida.in'])
file_regression.check(input_written, encoding='utf-8', extension='.in')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
51,
3558,
329,
262,
4600,
47,
86,
17,
70,
86,
9771,
14902,
63,
1398,
526,
15931,
198,
6738,
257,
72,
3755,
1330,
393,
76,
198,
6738,
257,
72,
3755,
13,
11321,... | 2.267684 | 721 |
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from typing import List, Union, Tuple, Iterable
import keras.layers as kl
import keras.optimizers as ko
import keras.regularizers as kreg
from keras import Model
from deeppavlov.core.common.registry import register
from deeppavlov.core.data.vocab import DefaultVocabulary
from deeppavlov.core.models.keras_model import KerasWrapper
from .cells import Highway
from .common_tagger import *
log = getLogger(__name__)
MAX_WORD_LENGTH = 30
class CharacterTagger:
"""A class for character-based neural morphological tagger
Parameters:
symbols: character vocabulary
tags: morphological tags vocabulary
word_rnn: the type of character-level network (only `cnn` implemented)
char_embeddings_size: the size of character embeddings
char_conv_layers: the number of convolutional layers on character level
char_window_size: the width of convolutional filter (filters).
It can be a list if several parallel filters are applied, for example, [2, 3, 4, 5].
char_filters: the number of convolutional filters for each window width.
It can be a number, a list (when there are several windows of different width
on a single convolution layer), a list of lists, if there
are more than 1 convolution layers, or **None**.
If **None**, a layer with width **width** contains
min(**char_filter_multiple** * **width**, 200) filters.
char_filter_multiple: the ratio between filters number and window width
char_highway_layers: the number of highway layers on character level
conv_dropout: the ratio of dropout between convolutional layers
highway_dropout: the ratio of dropout between highway layers,
intermediate_dropout: the ratio of dropout between convolutional
and highway layers on character level
lstm_dropout: dropout ratio in word-level LSTM
word_vectorizers: list of parameters for additional word-level vectorizers,
for each vectorizer it stores a pair of vectorizer dimension and
the dimension of the corresponding word embedding
word_lstm_layers: the number of word-level LSTM layers
word_lstm_units: hidden dimensions of word-level LSTMs
word_dropout: the ratio of dropout before word level (it is applied to word embeddings)
regularizer: l2 regularization parameter
verbose: the level of verbosity
"""
@property
def symbols_number_(self) -> int:
"""Character vocabulary size
"""
return len(self.symbols)
@property
def tags_number_(self) -> int:
"""Tag vocabulary size
"""
return len(self.tags)
def build(self):
"""Builds the network using Keras.
"""
word_inputs = kl.Input(shape=(None, MAX_WORD_LENGTH+2), dtype="int32")
inputs = [word_inputs]
word_outputs = self._build_word_cnn(word_inputs)
if len(self.word_vectorizers) > 0:
additional_word_inputs = [kl.Input(shape=(None, input_dim), dtype="float32")
for input_dim, dense_dim in self.word_vectorizers]
inputs.extend(additional_word_inputs)
additional_word_embeddings = [kl.Dense(dense_dim)(additional_word_inputs[i])
for i, (_, dense_dim) in enumerate(self.word_vectorizers)]
word_outputs = kl.Concatenate()([word_outputs] + additional_word_embeddings)
outputs, lstm_outputs = self._build_basic_network(word_outputs)
compile_args = {"optimizer": ko.nadam(lr=0.002, clipnorm=5.0),
"loss": "categorical_crossentropy", "metrics": ["accuracy"]}
self.model_ = Model(inputs, outputs)
self.model_.compile(**compile_args)
if self.verbose > 0:
self.model_.summary(print_fn=log.info)
return self
def _build_word_cnn(self, inputs):
"""Builds word-level network
"""
inputs = kl.Lambda(kb.one_hot, arguments={"num_classes": self.symbols_number_},
output_shape=lambda x: tuple(x) + (self.symbols_number_,))(inputs)
char_embeddings = kl.Dense(self.char_embeddings_size, use_bias=False)(inputs)
conv_outputs = []
self.char_output_dim_ = 0
for window_size, filters_number in zip(self.char_window_size, self.char_filters):
curr_output = char_embeddings
curr_filters_number = (min(self.char_filter_multiple * window_size, 200)
if filters_number is None else filters_number)
for _ in range(self.char_conv_layers - 1):
curr_output = kl.Conv2D(curr_filters_number, (1, window_size),
padding="same", activation="relu",
data_format="channels_last")(curr_output)
if self.conv_dropout > 0.0:
curr_output = kl.Dropout(self.conv_dropout)(curr_output)
curr_output = kl.Conv2D(curr_filters_number, (1, window_size),
padding="same", activation="relu",
data_format="channels_last")(curr_output)
conv_outputs.append(curr_output)
self.char_output_dim_ += curr_filters_number
if len(conv_outputs) > 1:
conv_output = kl.Concatenate(axis=-1)(conv_outputs)
else:
conv_output = conv_outputs[0]
highway_input = kl.Lambda(kb.max, arguments={"axis": -2})(conv_output)
if self.intermediate_dropout > 0.0:
highway_input = kl.Dropout(self.intermediate_dropout)(highway_input)
for i in range(self.char_highway_layers - 1):
highway_input = Highway(activation="relu")(highway_input)
if self.highway_dropout > 0.0:
highway_input = kl.Dropout(self.highway_dropout)(highway_input)
highway_output = Highway(activation="relu")(highway_input)
return highway_output
def _build_basic_network(self, word_outputs):
"""
Creates the basic network architecture,
transforming word embeddings to intermediate outputs
"""
if self.word_dropout > 0.0:
lstm_outputs = kl.Dropout(self.word_dropout)(word_outputs)
else:
lstm_outputs = word_outputs
for j in range(self.word_lstm_layers-1):
lstm_outputs = kl.Bidirectional(
kl.LSTM(self.word_lstm_units[j], return_sequences=True,
dropout=self.lstm_dropout))(lstm_outputs)
lstm_outputs = kl.Bidirectional(
kl.LSTM(self.word_lstm_units[-1], return_sequences=True,
dropout=self.lstm_dropout))(lstm_outputs)
pre_outputs = kl.TimeDistributed(
kl.Dense(self.tags_number_, activation="softmax",
activity_regularizer=self.regularizer),
name="p")(lstm_outputs)
return pre_outputs, lstm_outputs
def train_on_batch(self, data: List[Iterable], labels: Iterable[list]) -> None:
"""Trains model on a single batch
Args:
data: a batch of word sequences
labels: a batch of correct tag sequences
Returns:
the trained model
"""
X, Y = self._transform_batch(data, labels)
self.model_.train_on_batch(X, Y)
def predict_on_batch(self, data: Union[list, tuple],
return_indexes: bool = False) -> List[List[str]]:
"""
Makes predictions on a single batch
Args:
data: a batch of word sequences together with additional inputs
return_indexes: whether to return tag indexes in vocabulary or tags themselves
Returns:
a batch of label sequences
"""
X = self._transform_batch(data)
objects_number, lengths = len(X[0]), [len(elem) for elem in data[0]]
Y = self.model_.predict_on_batch(X)
labels = np.argmax(Y, axis=-1)
answer: List[List[str]] = [None] * objects_number
for i, (elem, length) in enumerate(zip(labels, lengths)):
elem = elem[:length]
answer[i] = elem if return_indexes else self.tags.idxs2toks(elem)
return answer
def _make_sent_vector(self, sent: List, bucket_length: int =None) -> np.ndarray:
"""Transforms a sentence to Numpy array, which will be the network input.
Args:
sent: input sentence
bucket_length: the width of the bucket
Returns:
A 3d array, answer[i][j][k] contains the index of k-th letter
in j-th word of i-th input sentence.
"""
bucket_length = bucket_length or len(sent)
answer = np.zeros(shape=(bucket_length, MAX_WORD_LENGTH+2), dtype=np.int32)
for i, word in enumerate(sent):
answer[i, 0] = self.tags.tok2idx("BEGIN")
m = min(len(word), MAX_WORD_LENGTH)
for j, x in enumerate(word[-m:]):
answer[i, j+1] = self.symbols.tok2idx(x)
answer[i, m+1] = self.tags.tok2idx("END")
answer[i, m+2:] = self.tags.tok2idx("PAD")
return answer
def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray:
"""Transforms a sentence of tags to Numpy array, which will be the network target.
Args:
tags: input sentence of tags
bucket_length: the width of the bucket
Returns:
A 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence.
"""
bucket_length = bucket_length or len(tags)
answer = np.zeros(shape=(bucket_length,), dtype=np.int32)
for i, tag in enumerate(tags):
answer[i] = self.tags.tok2idx(tag)
return answer
def save(self, outfile) -> None:
"""Saves model weights to a file
Args:
outfile: file with model weights (other model components should be given in config)
"""
self.model_.save_weights(outfile)
def load(self, infile) -> None:
"""Loads model weights from a file
Args:
infile: file to load model weights from
"""
self.model_.load_weights(infile)
@register("morpho_tagger")
class MorphoTagger(KerasWrapper):
"""
A wrapper over :class:`CharacterTagger`.
It is inherited from :class:`~deeppavlov.core.keras_model.KerasWrapper`.
It accepts initialization parameters of :class:`CharacterTagger`
""" | [
2,
15069,
2177,
47986,
27862,
290,
10766,
18252,
2248,
11,
337,
4061,
51,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.273148 | 4,968 |
from distutils.core import setup
setup(
name="turkce_isimler",
packages=["turkce_isimler"],
version="2.0",
license="MIT",
description="Bu paket ile rastgele Türkçe kadın ve erkek isimleri üretebilirsiniz.",
author="Yasin Balcancı, Emre Sülün",
author_email="ybalcanci@gmail.com",
url="https://github.com/ybalcanci/turkce_isimler",
download_url='https://github.com/ybalcanci/turkce_isimler/archive/2.0.tar.gz',
keywords=["TURKISH", "MOCK-DATA", "İSİM"],
install_requires=[],
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
36590,
74,
344,
62,
271,
320,
1754,
1600,
198,
220,
220,
220,
10392,
28,
14692,
36590,
74,
344,
62,
271,
320,
1754,
33116,
198,
220,
220,
220,
... | 2.20339 | 236 |
import os
APP_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
default_app_config = 'selia.apps.SeliaConfig'
| [
11748,
28686,
628,
198,
24805,
62,
17931,
23988,
15513,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
198,
12286,
62,
1324,
62,
11250,
796,
705,
741,
544,
13,
18211,
13,
50,
... | 2.489362 | 47 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'tagging.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
#
url(r'^$', include('accounts.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^home/', include('design.urls')),
url(r'^system/', include('system.urls')),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
28482,
13,
2306,
375,
29392,
3419,
198,
198,
6371,
33279,
82,
796,
7572,
10786,
3256,
198,
22... | 2.382353 | 204 |
import numba
import numpy as np
@numba.jit(nopython=True, cache=True)
@numba.jit(nopython=True, cache=True)
@numba.jit(nopython=True, cache=True)
@numba.jit(nopython=True, cache=True)
@numba.jit(nopython=True, cache=True)
@numba.jit(nopython=True, cache=True)
@numba.jit(nopython=True, cache=True)
@numba.jit(nopython=True, cache=True)
| [
11748,
997,
7012,
198,
11748,
299,
32152,
355,
45941,
628,
198,
31,
77,
2178,
64,
13,
45051,
7,
77,
404,
7535,
28,
17821,
11,
12940,
28,
17821,
8,
628,
198,
31,
77,
2178,
64,
13,
45051,
7,
77,
404,
7535,
28,
17821,
11,
12940,
28... | 2.179012 | 162 |
#!/usr/bin/env python3
from Afvaldienst import Afvaldienst
#trash = Afvaldienst('mijnafvalwijzer', '', '5146EG', '2', '', 'false', 'Geen')
#trash = Afvaldienst('mijnafvalwijzer', '6851GJ', '2', '', 'false')
#trash = Afvaldienst('mijnafvalwijzer', '3911CX', '178', '', 'false')
#trash = Afvaldienst('mijnafvalwijzer', '1906KD', '17', '', 'false')
#trash = Afvaldienst('mijnafvalwijzer', '3863AT', '27', '', 'false')
trash = Afvaldienst('mijnafvalwijzer', '5018EG', '1', '', 'false')
#print(trash.trash_raw_json)
#print("\n")
print(trash.trash_schedule)
print("\n")
print(trash.trash_schedule_custom)
print("\n")
print(trash.trash_types)
print("\n")
print(trash.trash_types_from_schedule)
print("\n")
# print(trash.trash_json)
# print("\n")
# TEST_DATA
# self.date_today = '2020-09-17'
# json_data = [{"nameType":"gft","type":"gft","date":"2020-01-03"},{"nameType":"pmd","type":"pmd","date":"2020-01-09"},{"nameType":"restafval","type":"restafval","date":"2020-01-10"},{"nameType":"kerstbomen","type":"kerstbomen","date":"2020-01-11"},{"nameType":"restafval","type":"restafval","date":"2020-07-24"},{"nameType":"gft","type":"gft","date":"2020-07-31"},{"nameType":"pmd","type":"pmd","date":"2020-08-06"},{"nameType":"gft","type":"gft","date":"2020-08-14"},{"nameType":"papier","type":"papier","date":"2020-08-19"},{"nameType":"pmd","type":"pmd","date":"2020-08-20"},{"nameType":"restafval","type":"restafval","date":"2020-08-21"},{"nameType":"gft","type":"gft","date":"2020-08-28"},{"nameType":"pmd","type":"pmd","date":"2020-09-03"},{"nameType":"gft","type":"gft","date":"2020-09-11"},{"nameType":"papier","type":"papier","date":"2020-09-17"},{"nameType":"pmd","type":"pmd","date":"2020-09-17"},{"nameType":"restafval","type":"restafval","date":"2020-09-18"},{"nameType":"gft","type":"gft","date":"2020-09-25"},{"nameType":"pmd","type":"pmd","date":"2020-10-01"},{"nameType":"gft","type":"gft","date":"2020-10-09"},{"nameType":"pmd","type":"pmd","date":"2020-10-15"},{"nameType":"restafval","type":"restafval","date":"2020-10-16"},{"nameType":"papier","type":"papier","date":"2020-10-21"},{"nameType":"gft","type":"gft","date":"2020-10-23"},{"nameType":"pmd","type":"pmd","date":"2020-10-29"},{"nameType":"gft","type":"gft","date":"2020-11-06"},{"nameType":"pmd","type":"pmd","date":"2020-11-12"},{"nameType":"restafval","type":"restafval","date":"2020-11-13"},{"nameType":"papier","type":"papier","date":"2020-11-18"},{"nameType":"gft","type":"gft","date":"2020-11-20"},{"nameType":"pmd","type":"pmd","date":"2020-11-26"},{"nameType":"gft","type":"gft","date":"2020-12-04"},{"nameType":"pmd","type":"pmd","date":"2020-12-10"},{"nameType":"restafval","type":"restafval","date":"2020-12-11"},{"nameType":"papier","type":"papier","date":"2020-12-16"},{"nameType":"gft","type":"gft","date":"2020-12-18"},{"nameType":"pmd","type":"pmd","date":"2020-12-24"}]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
2483,
85,
1940,
2013,
301,
1330,
2483,
85,
1940,
2013,
301,
198,
198,
2,
2213,
1077,
796,
2483,
85,
1940,
2013,
301,
10786,
76,
48848,
1878,
2100,
86,
2926,
9107,
3256... | 2.58552 | 1,105 |
from flask import Flask,request, abort,jsonify
from datetime import datetime
import rfc3339
from apscheduler.schedulers.background import BackgroundScheduler
from flask_cors import CORS, cross_origin
import random
from influxdb import InfluxDBClient
#app init for flask
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secrejslfwfsfsnjuettoeepwlslsldsmc,saala'
cors = CORS(app)
uri = 'https+influxdb://avnadmin:pbc584brpvkjnfj@influx1-nine6959-eg8c.aivencloud.com:13986/mydb' #get influx uri and insert here. Current value is obsolete
client = InfluxDBClient.from_dsn(uri, timeout=3.0, ssl=True)
#set up functions
def time_logger():
"""
Generates a random number every X minutes and saves to influx db
Inputs:
None
Returns:
None
"""
number = random.randint(0,100)
rfc_time = rfc3339.rfc3339(datetime.now(), utc=True, use_system_timezone=False)
json_body = [
{
"measurement": "randomdata",
"tags": {
"source": "randomgenerator",
},
"time": rfc_time,
"fields": {
"level": number
}
}
]
client.write_points(json_body)
#Running the task every X mins
scheduler = BackgroundScheduler(daemon=True)
scheduler.start()
main_task = scheduler.add_job(time_logger,'interval',seconds=10)
#serve data from the db. Demo route
@app.route('/get-data/<date>', methods=['GET'])
if __name__=='__main__':
app.run(port=5007, debug=True)
| [
6738,
42903,
1330,
46947,
11,
25927,
11,
15614,
11,
17752,
1958,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
374,
16072,
2091,
2670,
198,
6738,
257,
862,
1740,
18173,
13,
1416,
704,
377,
364,
13,
25249,
1330,
25353,
50,
1740,
... | 2.320493 | 649 |
from utils import load_yaml_file
inputs = {"Number": Number, "Flag": Flag}
data = load_yaml_file('data.yaml')
#print(data)
for key in data:
print(data[key])
if data[key] in inputs:
data[key] = inputs[key]
print(data) | [
6738,
3384,
4487,
1330,
3440,
62,
88,
43695,
62,
7753,
628,
628,
628,
198,
15414,
82,
796,
19779,
15057,
1298,
7913,
11,
366,
34227,
1298,
19762,
92,
628,
198,
7890,
796,
3440,
62,
88,
43695,
62,
7753,
10786,
7890,
13,
88,
43695,
11... | 2.392157 | 102 |
import sys
from autoprocess.engine.process import DataSet
from autoprocess.utils import log, xdsio
logger = log.get_module_logger('auto.process')
| [
11748,
25064,
198,
6738,
22320,
305,
919,
13,
18392,
13,
14681,
1330,
6060,
7248,
198,
6738,
22320,
305,
919,
13,
26791,
1330,
2604,
11,
2124,
9310,
952,
198,
198,
6404,
1362,
796,
2604,
13,
1136,
62,
21412,
62,
6404,
1362,
10786,
237... | 3.170213 | 47 |
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from edfi_performance.api.client import EdFiAPIClient
| [
171,
119,
123,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
49962,
284,
262,
1717,
12,
10547,
10302,
739,
530,
393,
517,
11704,
13,
198,
2,
383,
1717,
12,
10547,
10302,
16625,
428,
2393,
284,
345,
... | 3.538462 | 91 |
import os
from artificial_bias_experiments.known_prop_scores.dataset_generation_file_naming import \
get_root_dir_experiment_known_propensity_scores, get_root_dir_images_known_prop_scores, get_pca_token
from artificial_bias_experiments.known_prop_scores.sar_two_subject_groups.experiment_info import \
KnownPropScoresSARExperimentInfo
| [
11748,
28686,
198,
198,
6738,
11666,
62,
65,
4448,
62,
23100,
6800,
13,
4002,
62,
22930,
62,
1416,
2850,
13,
19608,
292,
316,
62,
20158,
62,
7753,
62,
77,
3723,
1330,
3467,
198,
220,
220,
220,
651,
62,
15763,
62,
15908,
62,
23100,
... | 2.875 | 120 |
"""
This module implements a :class:`.Group` intended to hold multiple
parameters that are to be gotten and set by the same command. The parameters
should be of type :class:`GroupParameter`
"""
from collections import OrderedDict
from typing import List, Union, Callable, Dict, Any, Optional
from qcodes.instrument.parameter import Parameter, ParamRawDataType
from qcodes import Instrument
class GroupParameter(Parameter):
"""
Group parameter is a :class:`.Parameter`, whose value can be set or get
only with other group parameters. This happens when an instrument
has commands which set and get more than one parameter per call.
The ``set_raw`` method of a group parameter forwards the call to the
group, and the group then makes sure that the values of other parameters
within the group are left unchanged. The ``get_raw`` method of a group
parameter also forwards the call to the group, and the group makes sure
that the command output is parsed correctly, and the value of the
parameter of interest is returned.
After initialization, the group parameters need to be added to a group.
See :class:`.Group` for more information.
Args:
name: Name of the parameter.
instrument: Instrument that this parameter belongs to; this instrument
is used by the group to call its get and set commands.
initial_value: Initial value of the parameter. Note that either none or
all of the parameters in a :class:`.Group` should have an initial
value.
**kwargs: All kwargs used by the :class:`.Parameter` class, except
``set_cmd`` and ``get_cmd``.
"""
class Group:
"""
The group combines :class:`.GroupParameter` s that are to be gotten or set
via the same command. The command has to be a string, for example,
a VISA command.
The :class:`Group`'s methods are used within :class:`GroupParameter` in
order to properly implement setting and getting of a single parameter in
the situation where one command sets or gets more than one parameter.
The command used for setting values of parameters has to be a format
string which contains the names of the parameters the group has been
initialized with. For example, if a command has syntax ``CMD a_value,
b_value``, where ``a_value`` and ``b_value`` are values of two parameters
with names ``a`` and ``b``, then the command string has to be ``CMD {a},
{b}``, and the group has to be initialized with two ``GroupParameter`` s
``a_param`` and ``b_param``, where ``a_param.name=="a"`` and
``b_param.name=="b"``.
**Note** that by default, it is assumed that the command used for getting
values returns a comma-separated list of values of parameters, and their
order corresponds to the order of :class:`.GroupParameter` s in the list
that is passed to the :class:`Group`'s constructor. Through keyword
arguments of the :class:`Group`'s constructor, it is possible to change
the separator, and even the parser of the output of the get command.
The get and set commands are called via the instrument that the first
parameter belongs to. It is assumed that all the parameters within the
group belong to the same instrument.
Example:
::
class InstrumentWithGroupParameters(VisaInstrument):
def __init__(self, name, address, **kwargs):
super().__init__(name, address, **kwargs)
...
# Here is how group of group parameters is defined for
# a simple case of an example "SGP" command that sets and gets
# values of "enabled" and "gain" parameters (it is assumed that
# "SGP?" returns the parameter values as comma-separated list
# "enabled_value,gain_value")
self.add_parameter('enabled',
label='Enabled',
val_mapping={True: 1, False: 0},
parameter_class=GroupParameter)
self.add_parameter('gain',
label='Some gain value',
get_parser=float,
parameter_class=GroupParameter)
self.output_group = Group([self.enabled, self.gain],
set_cmd='SGP {enabled}, {gain}',
get_cmd='SGP?')
...
Args:
parameters: a list of :class:`.GroupParameter` instances which have
to be gotten and set via the same command; the order of
parameters in the list should correspond to the order of the
values returned by the ``get_cmd``.
set_cmd: Format string of the command that is used for setting the
valueS of the parameters; for example, ``CMD {a}, {b}``.
get_cmd: String of the command that is used for getting the values
of the parameters; for example, ``CMD?``.
separator: A separator that is used when parsing the output of the
``get_cmd`` in order to obtain the values of the parameters; it
is ignored in case a custom ``get_parser`` is used.
get_parser: A callable with a single string argument that is used to
parse the output of the ``get_cmd``; the callable has to return a
dictionary where parameter names are keys, and the values are the
values (as directly obtained from the output of the get command;
note that parsers within the parameters will take care of
individual parsing of their values).
"""
def _separator_parser(self, separator: str
) -> Callable[[str], Dict[str, Any]]:
"""A default separator-based string parser"""
return parser
def set(self, set_parameter: GroupParameter, value: Any) -> None:
"""
Sets the value of the given parameter within a group to the given
value by calling the ``set_cmd``.
Args:
set_parameter: The parameter within the group to set.
value: The new value for this parameter.
"""
if any((p.get_latest() is None) for p in self.parameters.values()):
self.update()
calling_dict = {name: p.raw_value
for name, p in self.parameters.items()}
calling_dict[set_parameter.name] = value
self._set_from_dict(calling_dict)
def _set_from_dict(self, calling_dict: Dict[str, Any]) -> None:
"""
Use ``set_cmd`` to parse a dict that maps parameter names to parameter
values, and actually perform setting the values.
"""
if self.set_cmd is None:
raise RuntimeError("Calling set but no `set_cmd` defined")
command_str = self.set_cmd.format(**calling_dict)
if self.instrument is None:
raise RuntimeError("Trying to set GroupParameter not attached "
"to any instrument.")
self.instrument.write(command_str)
def update(self) -> None:
"""
Update the values of all the parameters within the group by calling
the ``get_cmd``.
"""
if self.instrument is None:
raise RuntimeError("Trying to update GroupParameter not attached "
"to any instrument.")
ret = self.get_parser(self.instrument.ask(self.get_cmd))
for name, p in list(self.parameters.items()):
p.get(parameter_value=ret[name])
| [
37811,
198,
1212,
8265,
23986,
257,
1058,
4871,
25,
44646,
13247,
63,
5292,
284,
1745,
3294,
198,
17143,
7307,
326,
389,
284,
307,
7891,
290,
900,
416,
262,
976,
3141,
13,
383,
10007,
198,
21754,
307,
286,
2099,
1058,
4871,
25,
63,
... | 2.567505 | 3,022 |
from morsecode_dict import MORSE_CODE_DICT
# This function takes the text message and returns the converted morse code.
# This function takes the morse code cipher and returns the text message.
| [
6738,
2146,
2363,
1098,
62,
11600,
1330,
35208,
5188,
62,
34,
16820,
62,
35,
18379,
198,
198,
2,
770,
2163,
2753,
262,
2420,
3275,
290,
5860,
262,
11513,
2146,
325,
2438,
13,
628,
198,
2,
770,
2163,
2753,
262,
2146,
325,
2438,
38012... | 3.94 | 50 |
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from language.python import program, ast
import util
import analysis.cpasignature
import util.python.calling
import util.canonical
CanonicalObject = util.canonical.CanonicalObject
from analysis.storegraph import extendedtypes
from analysis.storegraph import storegraph
###########################
### Evaluation Contexts ###
###########################
| [
2,
15069,
2813,
20320,
43050,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 4.030172 | 232 |
from typing import Dict, List, Tuple, Union
__all__ = ("JsonValue",)
# +-------------------+---------------+
# | Python | JSON |
# +===================+===============+
# | dict | object |
# +-------------------+---------------+
# | list, tuple | array |
# +-------------------+---------------+
# | str | string |
# +-------------------+---------------+
# | int, float | number |
# +-------------------+---------------+
# | True | true |
# +-------------------+---------------+
# | False | false |
# +-------------------+---------------+
# | None | null |
# +-------------------+---------------+
JsonValue = Union[
Dict[str, "JsonValue"],
List["JsonValue"],
Tuple["JsonValue", ...],
str,
int,
float,
bool,
None,
]
| [
6738,
19720,
1330,
360,
713,
11,
7343,
11,
309,
29291,
11,
4479,
198,
198,
834,
439,
834,
796,
5855,
41,
1559,
11395,
1600,
8,
198,
198,
2,
1343,
1783,
6329,
10,
24305,
10,
198,
2,
930,
11361,
220,
220,
220,
220,
220,
220,
220,
... | 2.322165 | 388 |
from django.urls import path
from .views import get_ips, get_lists, update_lists
urlpatterns = {
path('ip', get_ips, name="get_ips"),
path('list', get_lists, name="get_lists"),
path('management/update', update_lists, name='update_lists')
} | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
651,
62,
2419,
11,
651,
62,
20713,
11,
4296,
62,
20713,
198,
198,
6371,
33279,
82,
796,
1391,
198,
220,
220,
220,
3108,
10786,
541,
3256,
651,
62,
2419,... | 2.811111 | 90 |
import os
import re
import sys
import sysconfig
import platform
import subprocess
import distutils
import glob
import tempfile
import shutil
from distutils.version import LooseVersion
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
import distutils.spawn
import urllib.request
import tarfile
setup(
name="triton",
version="1.0.0",
author="Philippe Tillet",
author_email="phil@openai.com",
description="A language and compiler for custom Deep Learning operations",
long_description="",
packages=["triton", "triton/_C", "triton/tools", "triton/ops", "triton/ops/blocksparse"],
install_requires=["numpy", "torch"],
package_data={"triton/ops": ["*.c"], "triton/ops/blocksparse": ["*.c"]},
include_package_data=True,
ext_modules=[CMakeExtension("triton", "triton/_C/")],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
# for PyPI
keywords=["Compiler", "Deep Learning"],
url="https://github.com/ptillet/triton/",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
)
| [
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
25064,
11250,
198,
11748,
3859,
198,
11748,
850,
14681,
198,
11748,
1233,
26791,
198,
11748,
15095,
198,
11748,
20218,
7753,
198,
11748,
4423,
346,
198,
6738,
1233,
26791,
13,
... | 2.825364 | 481 |
# Best Practices
| [
198,
198,
2,
6705,
42134,
198
] | 3.166667 | 6 |
import numpy as np
import matplotlib.pyplot as plt
import random
from deap import base
from deap import creator
from deap import tools
if __name__ == "__main__":
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
img_ans = plt.imread('TAsample2.png')
img_ans = img_ans[..., 1]
img_ans = img_ans.reshape(40*30, )
img_ans = img_ans*255
for i in range(40*30):
if img_ans[i] < 128:
img_ans[i] = 0
else:
img_ans[i] = 1
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register(
"individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, 40*30
)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", evalOneMax)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
results = plt.figure()
graph = results.add_subplot(2, 2, 1) # Setting of graph
main()
input() # in order to prevent window from disapearing
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
4738,
198,
6738,
390,
499,
1330,
2779,
198,
6738,
390,
499,
1330,
13172,
198,
6738,
390,
499,
1330,
4899,
628,
628,
198,
198... | 2.445135 | 483 |
from datetime import datetime
from callWHODataAPI import WHODataAPI
from s3Upload import S3Uploader
s3_uploader = S3Uploader(WHODataAPI())
all_buckets = s3_uploader.get_all_buckets()
print_list(all_buckets)
response = s3_uploader.apiClient.get_who_dimensions()
myBucket = s3_uploader.get_target_bucket()
timestamp = int(datetime.now().timestamp())
print(timestamp)
myBucket.put_object(Key=('dimensions_' + str(timestamp) + '.json'), Body=response)
all_object = s3_uploader.get_all_objects_from_bucket(myBucket)
print_list(all_object)
| [
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
201,
198,
6738,
869,
12418,
3727,
1045,
17614,
1330,
7655,
3727,
1045,
17614,
201,
198,
6738,
264,
18,
41592,
1330,
311,
18,
41592,
263,
201,
198,
201,
198,
201,
198,
201,
198,
82,
18,
6... | 2.465217 | 230 |
# Generated by Django 3.2.3 on 2021-06-03 14:23
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
18,
319,
33448,
12,
3312,
12,
3070,
1478,
25,
1954,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#!/usr/bin/env python
import logging, bz2, random
from gensim import interfaces, matutils
from gensim.corpora import IndexedCorpus
numDoc1 = 0
numRow1 = 0
numCol1 = 0
numDoc2 = 0
numRow2 = 0
numCol2 = 0
split = []
inpObj = matutils.MmReader(bz2.BZ2File('sparse_wiki_tfidf.mm.bz2'))
prob = 0.5
print "The first pass"
docNo = 1
for (id, vect) in inpObj:
if len(vect) > 0:
if random.random() > prob :
numDoc1 = numDoc1 + 1
numRow1 = numRow1 + len(vect)
# This is the maximum word id among all the tuples and numCol1
numCol1 = max(numCol1,max(vect)[0])
split.append(0)
else:
numDoc2 = numDoc2 + 1
numRow2 = numRow2 + len(vect)
numCol2 = max(numCol2,max(vect)[0])
split.append(1)
if docNo % 1000 == 0:
print docNo
docNo = docNo + 1
#if docNo > 100: break
# TODO I wonder if writing zeros in the headers is Ok
outObj1 = matutils.MmWriter('sparse_wiki_tfidf_part1.mm')
outObj1.write_headers(numDoc1, numCol1, numRow1)
outObj2 = matutils.MmWriter('sparse_wiki_tfidf_part2.mm')
outObj2.write_headers(numDoc2, numCol2, numRow2)
inpObj = matutils.MmReader(bz2.BZ2File('sparse_wiki_tfidf.mm.bz2'))
print "The second pass"
docNo = 1
numDoc1 = 0
numDoc2 = 0
for (id, vect) in inpObj:
# see the commented out above, this is for debug purposes only,
# to stop if we process incomplete collection
if docNo > len(split): break
if split[docNo - 1] == 0:
outObj1.write_vector(numDoc1, vect)
numDoc1 = numDoc1 + 1
else:
outObj2.write_vector(numDoc2, vect)
numDoc2 = numDoc2 + 1
if docNo % 1000 == 0:
print docNo
docNo = docNo + 1
outObj1.close()
outObj2.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
18931,
11,
275,
89,
17,
11,
4738,
198,
198,
6738,
308,
641,
320,
1330,
20314,
11,
2603,
26791,
198,
6738,
308,
641,
320,
13,
10215,
38851,
1330,
12901,
276,
45680,
385,
198,
1... | 2.281207 | 729 |
from restic.repo import Repo
from restic.snapshot import Snapshot
from restic.core import version, self_update, generate
from restic.config import restic_bin
from restic.test import test_all
| [
6738,
1334,
291,
13,
260,
7501,
1330,
1432,
78,
198,
6738,
1334,
291,
13,
45380,
9442,
1330,
16026,
9442,
198,
6738,
1334,
291,
13,
7295,
1330,
2196,
11,
2116,
62,
19119,
11,
7716,
198,
6738,
1334,
291,
13,
11250,
1330,
1334,
291,
6... | 3.446429 | 56 |
from pd import *
## we need the gromacs module for the gromacs file formats
from gromacs import *
cseed(3)
info()
timer()
## create a water box
ffps = FFParamSet("tip3.ff")
sim = System(ffps)
## create a new molecule (water, TIP3)
mol = NewMolecule(ffps,"TIP3")
boundary = PeriodicBox(18.1500)
## and fill the box with it
sim.solvate_N(mol, boundary ,200)
## now make a workspace
wspace = WorkSpace( sim )
wspace.info()
wspace.printPDB("inout.pdb")
wspace.setSpace(boundary)
## Now add various trajectories. Note that they're all independent
## and can be added in any combination.
## Bristol format (unofficial)
tra = OutTra_BTF("output",wspace)
wspace.addTra(tra)
## NAMD Format. Note that this trajectory automatically creates
## Both the PSF and DCD file needed
tra1 = OutTra_NAMD("output",wspace);
wspace.addTra(tra1)
## Gromacs also uses a two part trajectory but here we represented them
## separately, so you need to add the GRO and the XTC part.
## This was because gromacs also has a full precision format called
## TRR which will be added later.
tra2 = OutTra_GRO("output",wspace);
wspace.addTra(tra2)
tra3 = OutTra_XTC("output",wspace);
wspace.addTra(tra3)
## A PDB makeshift trajectory (creates a multi-model PDB which for example
## VMD can read.
tra4 = OutTra_PDB("output",wspace);
wspace.addTra(tra4)
## Run some simple MD to demonstrate.
ff = Forcefield(wspace)
bonds = FF_Bonded(wspace)
nb = FF_NonBonded(wspace)
nb.ForceSwitch = True
nb.EnergySwitch = False
nb.Cutoff = 8.00
nb.InnerCutoff = 6.00
nb.VdwCutoff = 8.00
nb.VdwInnerCutoff = 6.00
ff.add(bonds)
ff.add(nb)
ff.info()
ff.printEnergySummary()
timer()
## Minimise for a few steps first
min = Minimisation(ff)
min.Steps = 50
min.UpdateScr = 20
min.UpdateTra = 0
min.run()
#### MD to relax the unequilibrarted starting structure
md = MolecularDynamics(ff)
md.Steps = 250000
md.Timestep = float(after("-timestep","1.0E-15"))
md.UpdateScr = 10 ## Update screen
md.UpdateTra = 50 ## Trajectory dumps occur every 50 steps
md.UpdateNList = 10 ## Calculate new neighbor list every 10 steps
md.Integrator = MolecularDynamics.Langevin
md.UpdateRemoveTotalMomentum = False
md.setTargetTemp(300)
md.Barostat = MolecularDynamics.BerendsenBaro
md.run()
| [
6738,
279,
67,
1330,
1635,
198,
2235,
356,
761,
262,
308,
398,
16436,
8265,
329,
262,
308,
398,
16436,
2393,
17519,
198,
6738,
308,
398,
16436,
1330,
1635,
198,
198,
66,
28826,
7,
18,
8,
198,
10951,
3419,
198,
45016,
3419,
198,
198,... | 2.685748 | 856 |
"""cameraTest2 controller."""
# You may need to import some classes of the controller module. Ex:
# from controller import Robot, Motor, DistanceSensor
from controller import Robot
import cv2
import numpy as np
from controller import Keyboard
from movement_commands import *
from gesture_commands import *
import math
KEY_F=70
# create the Robot instance.
robot = Robot()
# get the time step of the current world.
timestep = int(robot.getBasicTimeStep())
#init vision stuff
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2(0,50)
kernel=cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))
matcher=cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)
detect=cv2.AKAZE.create()
textOrg=(20,50)
#can change to support more or less, but 5 to support the finger gap code
numGestures=5
x=0
while x<10:
ret, frame = cap.read()
x+=1
x=0
while x<10:
ret, frame = cap.read()
img= fgbg.apply(frame,learningRate=0)
x+=1
#gather gestures
desList=[]
while len(desList)<numGestures:
while True:
ret,img=cap.read()
cam=img
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img=cv2.bilateralFilter(img,9,300,150)
img= cv2.GaussianBlur(img,(5,5),0)
img= fgbg.apply(img,learningRate=0)
img= cv2.morphologyEx(img,cv2.MORPH_ERODE,kernel)
img= cv2.morphologyEx(img,cv2.MORPH_DILATE,kernel,iterations=2)
img= cv2.threshold(img, 128, 255, cv2.THRESH_BINARY| cv2.THRESH_OTSU)[1]
cv2.imshow('img',img)
cv2.imshow('cam',cam)
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
cv2.destroyAllWindows()
kp,des=detect.detectAndCompute(img,None)
imgWkp=cv2.drawKeypoints(img,kp,None)
cv2.imshow('img',img)
cv2.imshow('kp',imgWkp)
key=cv2.waitKey(0)
if key!=ord('r'):
desList.append(des)
cv2.destroyAllWindows()
#initialize the robot
gripper_init(robot)
arm_init(robot)
base_init(robot)
keyboard=Keyboard()
keyboard.enable(2*timestep)
#display commands
display_helper_message()
#accesss this var to get the gesture
#CURRENTLY 1 FINGER UP and 0 FINGERS ARE IDENTICAL
#FOR BEST RESULTS USE WITH CONTRASTED BACKGROUND TO HAND
fingerCount=0
prevCount=0
key=keyboard.getKey()
prevkey=0
# array to store last prevNum computed gestures, used in deciding what gesture to output
prev=[]
prevNum=5
#threshold of similarity, must pass this to register as a gesture
thresh=.2
while robot.step(timestep) != -1:
#read and clean
ret,frame=cap.read()
frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
frame=cv2.bilateralFilter(frame,9,300,150)
frame= cv2.GaussianBlur(frame,(5,5),0)
frame= fgbg.apply(frame,learningRate=0)
frame= cv2.morphologyEx(frame,cv2.MORPH_ERODE,kernel)
frame= cv2.morphologyEx(frame,cv2.MORPH_DILATE,kernel,iterations=5)
frame= cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
#compute key points
kpt,dest=detect.detectAndCompute(frame,None)
showImg=cv2.drawKeypoints(frame,kpt,None)
ratGood=0
index=-1
#loop to compare defined gestures to camera input
for i,d in enumerate(desList):
#find keypoint matches
try:
matches=matcher.knnMatch(d,dest,k=2)
except:
cv2.imshow('img',showImg)
continue
#find the actual good ones with Lowes ratio test
if not matches is None:
goodOnes=[]
rat=0.0
try:
for m1,m2 in matches:
if m1.distance<0.75 * m2.distance:
goodOnes.append(m1)
rat+=1
except:
continue
#compute ratio of matches to number of gesture keypoints
rat/=len(d)
if rat>ratGood:
ratGood=rat
index=i
#compare against threshold
if ratGood>thresh and index !=-1:
print(index)
prevCount=fingerCount
#slow update for less twitchiness
fingerCount=index
if len(prev)<prevNum:
prev.append(fingerCount)
else:
temp=max(prev,key=prev.count)
prev.pop(0)
prev.append(fingerCount)
fingerCount=temp
cv2.putText(showImg, str(fingerCount), textOrg,cv2.FONT_HERSHEY_SIMPLEX,1,[255,255,255])
#no matches
else:
cv2.putText(showImg, 'no matches', textOrg,cv2.FONT_HERSHEY_SIMPLEX,1,[255,255,255])
fingerCount=-1
cv2.imshow('img',showImg)
# the match command module
prevkey=key
key=keyboard.getKey()
if fingerCount==4 and (prevCount!=fingerCount or prevkey!=key):
command5(key)
elif fingerCount==3 and (prevCount!=fingerCount or prevkey!=key):
command4(key)
elif fingerCount==2 and (prevCount!=fingerCount or prevkey!=key):
command3(key)
elif fingerCount==1 and (prevCount!=fingerCount or prevkey!=key):
command2(key)
elif fingerCount==0 and (prevCount!=fingerCount or prevkey!=key):
command0(key)
elif fingerCount==-1:
stop()
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
#USE THIS TO RECALIBRATE THE BACKGROUND, kinda buggy, hold down R until camera freezes
if (cv2.waitKey(1) & 0xFF == ord('r')):
fgbg = cv2.createBackgroundSubtractorMOG2(0,50)
cap.release()
cv2.destroyAllWindows()
| [
37811,
25695,
14402,
17,
10444,
526,
15931,
198,
198,
2,
921,
743,
761,
284,
1330,
617,
6097,
286,
262,
10444,
8265,
13,
1475,
25,
198,
2,
220,
422,
10444,
1330,
16071,
11,
12533,
11,
34600,
47864,
198,
6738,
10444,
1330,
16071,
198,
... | 2.151019 | 2,503 |
# %%
import numpy as np
import pandas as pd
import scipy as sp
import math
import matplotlib.animation as animation
from scipy.integrate import odeint
from numpy import arange
from scipy.integrate import odeint
import scipy.optimize
from scipy.optimize import leastsq
from math import exp
from collections import OrderedDict
from sklearn.linear_model import LinearRegression
pd.options.mode.chained_assignment = None
import git
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# %%
# Load data
df_hplc = pd.read_csv(f'{homedir}/data/raw/HPLC/hplc_master_table_raw_100.csv')
# Sort values
# Add real concentration column to the hplc master table
#Create empty list
real_conc=[]
#loop through rows
for i, row in df_hplc.iterrows():
# Add real concentration to empty list if it exists
if math.isnan(row.Calc_conc):
real_conc.append (row.Real_conc)
# If ther real concentration does not exist, calculate it by multiplying by 10
#(1:10 dilution) the calculated concentration
else:
real_conc.append(row.Calc_conc*10)
df_hplc['Real_conc'] = real_conc
# Sort values
df_hplc = df_hplc.sort_values(['Name', 'Replicate', 'Time_min'])
df_hplc.head()
# %%
# Calculate the fraction of reactant remaining for each replicate at each time point
#Create ID column with the combination of enzyme, time and replicate
df_hplc['ID'] = df_hplc['Name'] + '_' +\
df_hplc['Time_min'].astype(int).map(str) + '_' + \
df_hplc['Replicate'].map(str)
# Create new name_date column with the combination of enzyme and replicate
df_hplc['Name_Rep'] = df_hplc['Name'] + '_' +\
df_hplc['Replicate'].map(str)
# Get the max values for corrected concentration for each enzyme and replicate and
# append it to a new column
df_hplc['Real_conc_max'] = \
df_hplc.groupby(['Name_Rep'])['Real_conc'].transform(max)
# Get the fraction of reactant remaining for each time point
df_hplc['f_R'] = abs(np.divide(df_hplc['Real_conc'], \
df_hplc['Real_conc_max']))
df_hplc.head()
# %%
# Export data table
df_hplc.to_csv(f'{homedir}/data/processed/HPLC/hplc_master_table_raw_100.csv')
# %% | [
2,
43313,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
629,
541,
88,
355,
599,
198,
11748,
10688,
198,
11748,
2603,
29487,
8019,
13,
11227,
341,
355,
11034,
198,
6738,
629,
541,
88,
13,
18908,
... | 2.647131 | 819 |
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import LabelEncoder #IMPORTA A BIBLIOTECA DO ENCODER
from keras.utils import np_utils #IMPORTA A BIBLIOTECA PARA COMPLEMENTAR O ENCODER
import numpy as np
from sklearn.metrics import confusion_matrix
base = pd.read_csv('iris.csv')
previsores = base.iloc[:, 0:4].values
classe = base.iloc[:, 4].values
labelEncoder = LabelEncoder() #CRIA O OBJETO DO ENCODER
classe = labelEncoder.fit_transform(classe) #TRANSFORMA AS CLASSES DAS PLANTAS EM VALORES NUMERICOS
classe_dummy = np_utils.to_categorical(classe)
"""
iris setosa 1 0 0
iris virginica 0 1 0
iris versicolor 0 0 1
"""
previsores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores,
classe_dummy,
test_size=0.25)
classificador = Sequential()
classificador.add(Dense(units = 4, activation = 'relu', input_dim = 4))
classificador.add(Dense(units = 4, activation = 'relu'))
classificador.add(Dense(units = 3, activation = 'softmax'))
classificador.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
classificador.fit(previsores_treinamento, classe_treinamento, batch_size = 10, epochs = 1000)
resultado = classificador.evaluate(previsores_teste, classe_teste)
previsoes = classificador.predict(previsores_teste)
previsoes = (previsoes > 0.5)
classe_teste2 = [np.argmax(t) for t in classe_teste] #Pega o codigo da planta e transforma ela pelos indices 0 1 e 2
previsoes2 = [np.argmax(t) for t in previsoes]
matriz = confusion_matrix(previsoes2, classe_teste2) | [
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
198,
6738,
1341... | 2.319202 | 802 |
#!/usr/bin/env python3
import cgi, cgitb
import smtplib
import requests
import json
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
customer_name = str(form.getvalue('customer_name'))
customer_email = str(form.getvalue('customer_email'))
customer_message = str(form.getvalue('message'))
# variables for mail
sender = 'noreply@yourdomain.com'
receivers = ['you@example.com']
# variables for hcaptcha
hcaptcha_token = form.getvalue('h-captcha-response')
hcaptcha_secret = '123456789'
# post to endpoint with captcha token
api_endpoint = 'https://hcaptcha.com/siteverify'
hc_data = {'response':hcaptcha_token,'secret':hcaptcha_secret}
r = requests.post(url = api_endpoint, data = hc_data)
# now parse the response from /siteverify and check for success
answer = r.text
result = json.loads(r.text)
if result['success'] == bool(1):
sendMessage()
else:
print('Content-type:text/html\n\n')
print('Please complete the captcha challenge.')
quit()
print('Content-type:text/html')
print('')
print('')
print('<html>')
print('<head>')
print('</head>')
print('<body>')
print('<center><p>Your message has been sent. <br />You will be redirected to the home page in 5 seconds. </p></center>')
print('<script>setTimeout(function(){window.location.href = "https://example.com";}, 5000);</script>')
print('</body>')
print('</html>')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
269,
12397,
11,
269,
18300,
65,
198,
11748,
895,
83,
489,
571,
198,
11748,
7007,
198,
11748,
33918,
198,
2,
13610,
4554,
286,
7663,
31425,
198,
687,
796,
269,
12397,
13,
1... | 2.914712 | 469 |
"""Add linelist_deaths schema
Revision ID: c11ac992c04a
Revises: ffec75dfdadd
Create Date: 2021-09-19 07:54:19.691798
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c11ac992c04a'
down_revision = 'ffec75dfdadd'
branch_labels = None
depends_on = None
| [
37811,
4550,
9493,
46331,
62,
22595,
82,
32815,
198,
198,
18009,
1166,
4522,
25,
269,
1157,
330,
41561,
66,
3023,
64,
198,
18009,
2696,
25,
31246,
721,
2425,
7568,
67,
2860,
198,
16447,
7536,
25,
33448,
12,
2931,
12,
1129,
8753,
25,
... | 2.557377 | 122 |
cel = float(input('°C = '))
f = cel * 18
fh = f + 32
print(f'A temperatura convertida de {cel} °C para °F é de {fh} ')
| [
5276,
796,
12178,
7,
15414,
10786,
7200,
34,
796,
705,
4008,
198,
69,
796,
18725,
1635,
1248,
220,
198,
69,
71,
796,
277,
1343,
3933,
198,
4798,
7,
69,
6,
32,
4124,
2541,
64,
10385,
3755,
390,
1391,
5276,
92,
22074,
34,
31215,
220... | 2.222222 | 54 |
"""
fabulous.text
~~~~~~~~~~~~~
I let you print TrueType text to your terminal. The easiest way
to get started with me is by running::
jart@compy:~$ python -m fabulous.text --help
To make things simple, Fabulous comes with my favorite serif,
non-serif, and monospace fonts:
- IndUni-H-Bold: Open Source Helvetica Bold clone (sans-serif)
This is the real deal and not some cheap ripoff like Verdana.
IndUni-H-Bold is the default because not only does it look
great, but also renders *perfectly*. and is also used for the
Fabulous logo. Commonly found on stret signs.
This font is licensed under the GPL. If you're developing
proprietary software you might want to ask its author or a
lawyer if Fabulous' use of IndUni-H would be considered a "GPL
Barrier."
- cmr10: Computer Modern (serif)
Donald Knuth wrote 23,000 lines for the sole purpose of
bestowing this jewel upon the world. This font is commonly seen
in scholarly papers.
- DejaVuSansMono: DejaVu Sans Mono (formerly Bitstream Vera Sans Mono)
At point size 8, this is my favorite programming/terminal font.
For other fonts, I'll try my best to figure out where your font
files are stored. If I have trouble finding your font, try using
an absolute path *with* the extension. You could also try putting
the font in your ``~/.fonts`` folder and running ``fc-cache -fv
~/.fonts``.
"""
import os
import sys
import grapefruit
from fabulous import utils, image
class Text(image.Image):
"""Renders TrueType Text to Terminal
I'm a sub-class of :class:`fabulous.image.Image`. My job is
limited to simply getting things ready. I do this by:
- Turning your text into an RGB-Alpha bitmap image using
:mod:`PIL`
- Applying way cool effects (if you choose to enable them)
For example::
>>> assert Text("Fabulous", shadow=True, skew=5)
>>> txt = Text("lorem ipsum", font="IndUni-H-Bold")
>>> len(str(txt)) > 0
True
>>> txt = Text("lorem ipsum", font="cmr10")
>>> len(str(txt)) > 0
True
>>> txt = Text("lorem ipsum", font="DejaVuSansMono")
>>> len(str(txt)) > 0
True
:param text: The text you want to display as a string.
:param fsize: The font size in points. This obviously end up
looking much larger because in fabulous a single
character is treated as one horizontal pixel and two
vertical pixels.
:param color: The color (specified as you would in HTML/CSS) of
your text. For example Red could be specified as
follows: ``red``, ``#00F`` or ``#0000FF``.
:param shadow: If true, render a simple drop-shadow beneath text.
The Fabulous logo uses this feature.
:param skew: Skew size in pixels. This applies an affine
transform to shift the top-most pixels to the right.
The Fabulous logo uses a five pixel skew.
:param font: The TrueType font you want. If this is not an
absolute path, Fabulous will search for your font by
globbing the specified name in various directories.
"""
class FontNotFound(ValueError):
"""I get raised when the font-searching hueristics fail
This class extends the standard :exc:`ValueError` exception so you
don't have to import me if you don't want to.
"""
def resolve_font(name):
"""Sloppy way to turn font names into absolute filenames
This isn't intended to be a proper font lookup tool but rather a
dirty tool to not have to specify the absolute filename every
time.
For example::
>>> path = resolve_font('IndUni-H-Bold')
>>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts')
>>> indunih_path = os.path.join(fontdir, 'IndUni-H-Bold.ttf')
>>> assert path == indunih_path
This isn't case-sensitive::
>>> assert resolve_font('induni-h') == indunih_path
Raises :exc:`FontNotFound` on failure::
>>> resolve_font('blahahaha')
Traceback (most recent call last):
...
FontNotFound: Can't find 'blahahaha' :'( Try adding it to ~/.fonts
"""
for fontdir, fontfiles in get_font_files():
for fontfile in fontfiles:
if name.lower() in fontfile.lower():
return os.path.join(fontdir, fontfile)
raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts")
@utils.memoize
def get_font_files():
"""Returns a list of all font files we could find
Returned as a list of dir/files tuples::
get_font_files() -> [('/some/dir', ['font1.ttf', ...]), ...]
For example::
>>> fabfonts = os.path.join(os.path.dirname(__file__), 'fonts')
>>> 'IndUni-H-Bold.ttf' in get_font_files()[fabfontdir]
True
>>> 'DejaVuSansMono.ttf' in get_font_files()[fabfontdir]
True
>>> 'cmr10.ttf' in get_font_files()[fabfontdir]
True
>>> assert len(get_font_files()) > 0
>>> for dirname, filename in get_font_files():
... assert os.path.exists(os.path.join(dirname, filename))
...
"""
dirs = [os.path.join(os.path.dirname(__file__), 'fonts'),
os.path.expanduser('~/.fonts')]
sys_dirs = [
# this is where ubuntu puts fonts
'/usr/share/fonts/truetype',
# this is where fedora puts fonts
'/usr/share/fonts',
]
for dirname in sys_dirs:
try:
dirs += [os.path.join(dirname, subdir)
for subdir in os.listdir(dirname)]
except OSError:
pass
return [(p, os.listdir(p)) for p in dirs if os.path.isdir(p)]
def main(args):
"""I provide a command-line interface for this module
"""
import optparse
parser = optparse.OptionParser()
parser.add_option(
"-S", "--skew", dest="skew", type="int", default=None,
help=("Apply skew effect (measured in pixels) to make it look "
"extra cool. For example, Fabulous' logo logo is skewed "
"by 5 pixels. Default: %default"))
parser.add_option(
"-C", "--color", dest="color", default="#0099ff",
help=("Color of your text. This can be specified as you would "
"using HTML/CSS. Default: %default"))
parser.add_option(
"-B", "--term-color", dest="term_color", default=None,
help=("If you terminal background isn't black, please change "
"this value to the proper background so semi-transparent "
"pixels will blend properly."))
parser.add_option(
"-F", "--font", dest="font", default='IndUni-H-Bold',
help=("Path to font file you wish to use. This defaults to a "
"free Helvetica-Bold clone which is included with Fabulous. "
"Default value: %default"))
parser.add_option(
"-Z", "--size", dest="fsize", type="int", default=20,
help=("Size of font in points. Default: %default"))
parser.add_option(
"-s", "--shadow", dest="shadow", action="store_true", default=False,
help=("Size of font in points. Default: %default"))
(options, args) = parser.parse_args(args=args)
if options.term_color:
utils.term.bgcolor = options.term_color
for line in " ".join(args).split("\n"):
fab_text = Text(line, skew=options.skew, color=options.color,
font=options.font, fsize=options.fsize,
shadow=options.shadow)
for chunk in fab_text:
print chunk
if __name__ == '__main__':
main(sys.argv[1:])
| [
37811,
198,
220,
220,
220,
28294,
13,
5239,
198,
220,
220,
220,
220,
15116,
8728,
93,
628,
220,
220,
220,
314,
1309,
345,
3601,
6407,
6030,
2420,
284,
534,
12094,
13,
220,
383,
16638,
835,
198,
220,
220,
220,
284,
651,
2067,
351,
... | 2.463476 | 3,176 |
from functools import reduce
from operator import add
from math import cos, pi
from scipy import signal
import matplotlib.pyplot as plt
from migen import *
from migen.fhdl import verilog
import numpy
sample = 5000 #Number of Samples
Fs = 5000 #Sample Frequency
fc = 800 #Cutoff frequ
w_c = 2*fc/Fs #Digital Frequ
n = 3 #Order of the filter
# A synthesizable FIR filter.
# A test bench for our FIR filter.
# Generates a sine wave at the input and records the output.
if __name__ == "__main__":
# Compute filter coefficients with SciPy.
#coef = signal.remez(30, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
#coefb = signal.firwin(100, cutoff = 0.1, window = "hanning", pass_zero=True)
#coefb, coefa = signal.iirfilter(n,w_c, btype="lowpass", analog=False, ftype="butter")
coefb, coefa = signal.butter(n, w_c, btype='lowpass', analog=False, output='ba', fs=None)
print(coefb,coefa)
#Rescale-----------------------------------------------------------------
ValueArray = []
ValueArray.append(abs(numpy.amin(coefb)))
ValueArray.append(abs(numpy.amax(coefb)))
ValueArray.append(abs(numpy.amin(coefa)))
ValueArray.append(abs(numpy.amax(coefa)))
info = numpy.amax(ValueArray)
print(info)
coefb = coefb/info
coefa = coefa/info
#-------------------------------------------------------------------------
# Simulate for different frequencies and concatenate
# the results.
in_signals = []
out_signals = []
for frequency in [1,10,100,1000]:
dut = FIR(coefb,coefa)
tb = fir_tb(dut, frequency, in_signals, out_signals)
run_simulation(dut, tb)
# Plot data from the input and output waveforms.
plt.plot(in_signals)
plt.plot(out_signals)
plt.show()
print(coefb,coefa)
# Print the Verilog source for the filter.
fir = FIR(coefb,coefa)
print(verilog.convert(fir, ios={fir.i, fir.o}))
| [
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
10088,
1330,
751,
198,
198,
6738,
10688,
1330,
8615,
11,
31028,
198,
6738,
629,
541,
88,
1330,
6737,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
285,
932... | 2.445828 | 803 |
# Copyright (c) 2018 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module for running tests for MediaSDK open source
This module gets binary files of CI MediaSDK build
from share folder and tests them by 'ted'
"""
import sys
import argparse
import shutil
import subprocess
import os
import pathlib
import tarfile
import traceback
import adapter_conf
class TedAdapter(object):
"""
Wrapper for 'ted'
"""
#TODO: add relevant path and delete it
test_driver_dir = pathlib.Path('/localdisk/bb/worker/infrastructure') #TODO: hardcoded path
test_results_dir = test_driver_dir / 'ted/results'
tests_timeout = 300 # 5 minutes
def __init__(self, build_artifacts_dir, tests_artifacts_dir, root_dir):
"""
:param build_artifacts_dir: Path to build artifacts
:type build_artifacts_dir: pathlib.Path
:param tests_artifacts_dir: Path to tests artifacts
:type tests_artifacts_dir: pathlib.Path
:param root_dir: Path to workdir for unpacking build artifacts
:type root_dir: pathlib.Path
"""
self.build_artifacts_dir = build_artifacts_dir
self.tests_artifacts_dir = tests_artifacts_dir
self.root_dir = root_dir
def _get_artifacts(self):
"""
Get artifacts archive from share
and extract them
:return: None
"""
pkg_name = 'install_pkg.tar.gz'
remote_pkg = self.build_artifacts_dir / pkg_name
#TODO: implement exceptions
# Clean workdir and re-create it
self._remove(str(self.root_dir))
self._mkdir(str(self.root_dir))
# Copy `install_pkg.tar` to the workdir and untar it
self._copy(str(remote_pkg), str(self.root_dir))
self._untar(str(self.root_dir / pkg_name), str(self.root_dir))
# Remove old `/opt/intel/mediasdk` and copy fresh built artifacts
self._remove(str(adapter_conf.MEDIASDK_PATH), sudo=True)
self._copy(str(self.root_dir / 'opt' / 'intel' / 'mediasdk'), str(adapter_conf.MEDIASDK_PATH), sudo=True)
def run_test(self):
"""
'Ted' runner
:return: Count of failed cases
:rtype: Integer | Exception
"""
self._get_artifacts()
env = os.environ.copy()
#Path to mediasdk fodler which will be tested
env['MFX_HOME'] = str(adapter_conf.MEDIASDK_PATH)
#Path to the folder lib64 where located driver
env['LIBVA_DRIVERS_PATH'] = str(adapter_conf.DRIVER_PATH)
process = subprocess.run('python3 ted/ted.py',
shell=True,
cwd=self.test_driver_dir,
env=env,
timeout=self.tests_timeout,
encoding='utf-8',
errors='backslashreplace')
return process.returncode
# Direct calls of rm, cp commands needs to use them with `sudo`
# because we need to copy CI build artifacts to the
# `/opt/intel/mediasdk`
# Note: user should be sudoer without asking the password!
# TODO use extract_archive() from common.helper
def main():
"""
Tests runner
:return: None
"""
#Check existence of driver
check_driver()
parser = argparse.ArgumentParser(prog="build_runner.py",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--version", action="version", version="%(prog)s 1.0")
parser.add_argument('-br', "--branch", metavar="String", required=True,
help="Branch of triggered repository")
parser.add_argument('-e', "--build-event", default='commit',
choices=['pre_commit', 'commit', 'nightly', 'weekly'],
help='Event of commit')
parser.add_argument('-c', "--commit-id", metavar="String", required=True,
help="SHA of triggered commit")
parser.add_argument('-p', "--product-type", default='linux',
choices=['linux', 'embedded', 'open_source', 'windows', 'api_latest'],
help='Type of product')
parser.add_argument('-b', "--build-type", default='release',
choices=['release', 'debug'],
help='Type of build')
parser.add_argument('-d', "--root-dir", metavar="PATH", required=True,
help="Path to worker directory")
args = parser.parse_args()
directories_layout = [
args.branch,
args.build_event,
args.commit_id,
args.product_type,
args.build_type
]
build_artifacts_dir = MediaSdkDirectories.get_build_dir(*directories_layout)
tests_artifacts_dir = MediaSdkDirectories.get_tests_dir(*directories_layout)
adapter = TedAdapter(build_artifacts_dir, tests_artifacts_dir, root_dir=pathlib.Path(args.root_dir))
try:
failed_cases = adapter.run_test()
except:
print("Exception occurred:\n", traceback.format_exc())
# TODO return json string
failed_cases = 1
try:
adapter.copy_logs_to_share()
except:
print("Exception occurred while copying results:\n", traceback.format_exc())
failed_cases = 1
exit(failed_cases)
if __name__ == '__main__':
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import MediaSdkDirectories
from common.helper import rotate_dir
main()
| [
2,
15069,
357,
66,
8,
2864,
8180,
10501,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
366,
25423,
12340,
284,
1... | 2.473783 | 2,651 |
""".. _pre_transformation:
Pre-Transformation:
---------------------
Pre-transformation constraints determine if a transformation is valid based on only the original input and the position of the replacement. These constraints are applied before the transformation is even called. For example, these constraints can prevent search methods from swapping words at the same index twice, or from replacing stopwords.
"""
from .stopword_modification import StopwordModification
from .repeat_modification import RepeatModification
from .input_column_modification import InputColumnModification
from .max_word_index_modification import MaxWordIndexModification
from .min_word_length import MinWordLength
from .max_modification_rate import MaxModificationRate
| [
37811,
492,
4808,
3866,
62,
7645,
1161,
25,
198,
198,
6719,
12,
8291,
1161,
25,
198,
19351,
12,
198,
198,
6719,
12,
7645,
1161,
17778,
5004,
611,
257,
13389,
318,
4938,
1912,
319,
691,
262,
2656,
5128,
290,
262,
2292,
286,
262,
9014... | 4.625767 | 163 |
from PIL import Image
import os
image_file_path = '../dataset/lec'
out = '../dataset/lattice_ec'
for root, dirs_name, files_name in os.walk(image_file_path):
for i in files_name:
if i.split('.')[-1] == 'png':
file_name = os.path.join(root, i)
print(file_name)
img = Image.open(file_name) # 调用图片
cropped = img.crop((0, 0, 750, 750)) # (left, upper, right, lower)
cropped.save(os.path.join(out, i))
| [
6738,
350,
4146,
1330,
7412,
198,
11748,
28686,
198,
198,
9060,
62,
7753,
62,
6978,
796,
705,
40720,
19608,
292,
316,
14,
293,
66,
6,
198,
448,
796,
705,
40720,
19608,
292,
316,
14,
75,
1078,
501,
62,
721,
6,
198,
198,
1640,
6808,... | 1.887218 | 266 |
#!/usr/bin/python3
import pythonwhois
import csv
import time
with open('bulk_whois_results.csv', 'w+', newline="") as outfile:
w = csv.writer(outfile)
w.writerow(['Domain Name', 'Registrar', 'Expiration Date', 'Nameservers'])
# outfile.close()
result_list = []
with open('domain_list', 'r') as f:
for domain in f.readlines():
# whois_csv(domain.strip())
whois_data = pythonwhois.get_whois(domain.strip())
try:
eDate = ' '.join(str(x) for x in whois_data['expiration_date'])
result_dict = {
'domain': domain.strip(),
'registrar': whois_data['registrar'],
'edate': eDate,
'nameservers': whois_data['nameservers']
}
result_list.append(result_dict)
time.sleep(15)
except Exception as e:
# pass
print(e, domain.strip())
for dom in result_list:
print(dom)
with open('bulk_whois_results.csv', 'a+', newline="") as outfile:
w = csv.writer(outfile)
for i in result_list: # eg list or dictionary i'm assuming a list structure
w.writerow([
i['domain'],
i['registrar'],
i['edate'],
i['nameservers'],
])
print('done')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
21015,
8727,
271,
198,
11748,
269,
21370,
198,
11748,
640,
198,
198,
4480,
1280,
10786,
65,
12171,
62,
8727,
271,
62,
43420,
13,
40664,
3256,
705,
86,
10,
3256,
649,
1370,
2... | 2.036508 | 630 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphitemActionSet(Model):
"""itemActionSet.
:param comment:
:type comment: ~users.models.MicrosoftgraphcommentAction
:param create:
:type create: object
:param delete:
:type delete: ~users.models.MicrosoftgraphdeleteAction
:param edit:
:type edit: object
:param mention:
:type mention: ~users.models.MicrosoftgraphmentionAction
:param move:
:type move: ~users.models.MicrosoftgraphmoveAction
:param rename:
:type rename: ~users.models.MicrosoftgraphrenameAction
:param restore:
:type restore: object
:param share:
:type share: ~users.models.MicrosoftgraphshareAction
:param version:
:type version: ~users.models.MicrosoftgraphversionAction
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'MicrosoftgraphcommentAction'},
'create': {'key': 'create', 'type': 'object'},
'delete': {'key': 'delete', 'type': 'MicrosoftgraphdeleteAction'},
'edit': {'key': 'edit', 'type': 'object'},
'mention': {'key': 'mention', 'type': 'MicrosoftgraphmentionAction'},
'move': {'key': 'move', 'type': 'MicrosoftgraphmoveAction'},
'rename': {'key': 'rename', 'type': 'MicrosoftgraphrenameAction'},
'restore': {'key': 'restore', 'type': 'object'},
'share': {'key': 'share', 'type': 'MicrosoftgraphshareAction'},
'version': {'key': 'version', 'type': 'MicrosoftgraphversionAction'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
6127,
7560,
416,
5413,
357,
49,
8,
11160,
19452,
6127,
35986,
13,
198,
2,
19179,
743,
2728,
11491,
4069,
290,
481,
307,
2626,
611,
262,
2438,
318,
198,
2,
16935,
515,
13,
... | 3.008251 | 606 |
"""Planemo-specific wrappers around galaxy-tool-util tool functionality."""
from __future__ import absolute_import
import os
import sys
import traceback
from galaxy.tool_util import loader_directory
from galaxy.tool_util.fetcher import ToolLocationFetcher
from planemo.io import error, info
is_tool_load_error = loader_directory.is_tool_load_error
SKIP_XML_MESSAGE = "Skipping XML file - does not appear to be a tool %s."
SHED_FILES = ["tool_dependencies.xml", "repository_dependencies.xml"]
LOAD_ERROR_MESSAGE = "Error loading tool with path %s"
def load_tool_sources_from_path(path, recursive, register_load_errors=False):
"""Generator for tool sources on a path."""
return loader_directory.load_tool_sources_from_path(
path,
_load_exception_handler,
recursive=recursive,
register_load_errors=register_load_errors,
)
__all__ = (
"is_tool_load_error",
"load_tool_sources_from_path",
"yield_tool_sources",
"yield_tool_sources_on_paths",
)
| [
37811,
20854,
41903,
12,
11423,
7917,
11799,
1088,
16161,
12,
25981,
12,
22602,
2891,
11244,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
198,
67... | 2.752717 | 368 |
from django.contrib import messages
from django.shortcuts import render, redirect
from Jetbrain.blog.models import Post
from . forms.user_forms import SignUp, UpdateUser, UpdateProfile
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
@login_required
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
198,
6738,
19013,
27825,
13,
14036,
13,
27530,
1330,
2947,
198,
6738,
764,
5107,
13,
7220,
62,
23914,
1330,
5865,
4933... | 3.651685 | 89 |
##########################################################################
# NSAp - Copyright (C) CEA, 2017
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# CW import
from cubicweb.server.sources.ldapfeed import LDAPFeedSource
# If the LDAP source ends with '/', it producesses a crash in LDAP 3 version
# <= 1.4
LDAPFeedSource._connection_info = LDAPFeedSource.connection_info
def connection_info(*args, **kwargs):
""" Make sure the LDAP source does not finished by '/'.
"""
protocol, host, port = LDAPFeedSource._connection_info(*args, **kwargs)
if host.endswith("/"):
host = host[:-1]
return protocol, host, port
LDAPFeedSource.connection_info = connection_info
| [
29113,
29113,
7804,
2235,
198,
2,
10551,
79,
532,
15069,
357,
34,
8,
327,
16412,
11,
2177,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
20101,
34,
8267,
12,
33,
5964,
11,
355,
3199,
416,
198,
2,
262,
327,
16412,
12,
34,
41256,
... | 3.322581 | 279 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
import time
if __name__ == '__main__':
# 驱动地址
# 修改为对应谷歌浏览器版本的驱动
binary_location = '../chromedriver'
login_and_get_cookie('username', 'password')
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
5313,
32103,
21321,
198,
6738,
384,
119... | 2.404762 | 168 |
from matplotlib.finance import quotes_historical_yahoo
from matplotlib.dates import DateFormatter
from matplotlib.dates import DayLocator
from matplotlib.dates import MonthLocator
import sys
from datetime import date
import matplotlib.pyplot as plt
import numpy as np
today = date.today()
start = (today.year - 1, today.month, today.day)
symbol = 'DISH'
if len(sys.argv) == 2:
symbol = sys.argv[1]
quotes = quotes_historical_yahoo(symbol, start, today)
quotes = np.array(quotes)
dates = quotes.T[0]
volume = quotes.T[5]
alldays = DayLocator()
months = MonthLocator()
month_formatter = DateFormatter("%b %Y")
fig = plt.figure()
ax = fig.add_subplot(111)
plt.semilogy(dates, volume)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(month_formatter)
fig.autofmt_xdate()
plt.show()
| [
6738,
2603,
29487,
8019,
13,
69,
14149,
1330,
13386,
62,
10034,
12409,
62,
40774,
198,
6738,
2603,
29487,
8019,
13,
19581,
1330,
7536,
8479,
1436,
198,
6738,
2603,
29487,
8019,
13,
19581,
1330,
3596,
33711,
1352,
198,
6738,
2603,
29487,
... | 2.608563 | 327 |
"""
/!\ Code not made by me, got from https://github.com/dongjk/faster_rcnn_keras/blob/master/utils.py
"""
import sys
import numpy as np
import keras.backend as K
import xml.etree.ElementTree as ET
from PIL import Image, ImageDraw
def generate_anchors(base_width=16, base_height=16, ratios=[0.5, 1, 2],
scales=np.asarray([3,6,12])):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, w_stride-1, h_stride-1) window.
"""
base_anchor = np.array([1, 1, base_width, base_height]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def bbox_overlaps(boxes, query_boxes):
"""
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
boxes=boxes.astype(int)
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=np.float)
for k in range(K):
box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1)
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1)
if ih > 0:
ua = float((boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1) + box_area - iw * ih)
overlaps[n, k] = iw * ih / ua
return overlaps
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret | [
37811,
198,
220,
220,
220,
1220,
0,
59,
6127,
407,
925,
416,
502,
11,
1392,
422,
3740,
1378,
12567,
13,
785,
14,
67,
506,
73,
74,
14,
69,
1603,
62,
6015,
20471,
62,
6122,
292,
14,
2436,
672,
14,
9866,
14,
26791,
13,
9078,
220,
... | 2.045326 | 2,471 |
import logging
import numpy as np
import optimization.de as de
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger(__name__)
# define objective function
bounds = np.asarray([(-5.0, 5.0)])
solution = de.differential_evolution(f, bounds, n_iter=1, debug=True)
logger.info(f'F({solution[0]})={f(solution[0])}')
| [
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
23989,
13,
2934,
355,
390,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
10778,
11,
5794,
11639,
4,
7,
20500,
8,
82,
11537,
198,
6404,
1362,
796,
1... | 2.656489 | 131 |
# -*- coding: utf-8 -*-
import numpy as np
from .base import Resampler
from ..types.numeric import Probability
from ..types.particle import Particle
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
8692,
1330,
1874,
321,
20053,
198,
6738,
11485,
19199,
13,
77,
39223,
1330,
30873,
1799,
198,
6738,
11485,
19199,
13,
... | 3.081633 | 49 |
from astropy.table import Table, unique
import os
import logging
| [
6738,
6468,
28338,
13,
11487,
1330,
8655,
11,
3748,
198,
11748,
28686,
198,
11748,
18931,
628
] | 4.125 | 16 |
# Django settings for nextfeed project.
import os
import djcelery
djcelery.setup_loader()
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
TEMPLATE_DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@example.com'),
("Amir Rachum", "amir@rachum.com"),
)
SERVER_EMAIL = 'webmaster@nextfeed.org'
MANAGERS = ADMINS
DATABASES = {}
LOGIN_URL = '/openid/login'
LOGIN_REDIRECT_URL = '/dashboard'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = 'staticfiles'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'b!bl4%t6w-59wx5k@bwn=p%3f)$b83h4l^5d2s2#-v1)_u0u*y'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'nextfeed.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'nextfeed.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"templates"
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request',
'django.core.context_processors.request',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_openid_auth',
'feeds',
'profiles',
'kombu.transport.django',
'djcelery',
)
AUTHENTICATION_BACKENDS = (
'django_openid_auth.auth.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
OPENID_CREATE_USERS = True
# For Profiles
AUTH_PROFILE_MODULE = 'profiles.UserProfile'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# List of modules to import when celery starts.
CELERY_IMPORTS = ("feeds.tasks",)
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'add-every-30-seconds': {
'task': 'feeds.tasks.poll_all_feeds',
'schedule': timedelta(minutes=5),
'args': ()
},
}
CELERY_TIMEZONE = 'UTC'
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
static_dir = os.path.join(BASE_DIR, '../../', 'static')
dirs = ('nextfeed',
'bootstrap',
'jquery',
'openid-selector',
'zero-clipboard')
STATICFILES_DIRS = tuple(os.path.join(static_dir, dir) for dir in dirs)
try:
GITHUB_API_KEY = os.environ['GITHUB_API_KEY']
except KeyError:
GITHUB_API_KEY = ""
| [
2,
37770,
6460,
329,
1306,
12363,
1628,
13,
198,
11748,
28686,
198,
11748,
42625,
7015,
88,
198,
28241,
7015,
88,
13,
40406,
62,
29356,
3419,
198,
198,
31190,
23680,
62,
34219,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
... | 2.624172 | 2,565 |
"""
Run a small task by
* Uploading files
* Creating a pool and a job
* Creating a task and submitting it
* Waiting till end of task
* Retrieving files and checking
"""
import pytest
import datetime
import dmsbatch
from tests.test_blob_client import container_name
@pytest.fixture()
@pytest.fixture
@pytest.fixture
@pytest.mark.integration | [
37811,
198,
220,
220,
220,
5660,
257,
1402,
4876,
416,
198,
220,
220,
220,
1635,
36803,
278,
3696,
198,
220,
220,
220,
1635,
30481,
257,
5933,
290,
257,
1693,
198,
220,
220,
220,
1635,
30481,
257,
4876,
290,
24353,
340,
198,
220,
22... | 2.929134 | 127 |
# thread1-extra.py to create simple threads with function (using threads list to simplly the code)
import threading
from threading import Thread as Thread
from time import sleep
threads = []
# creating threads
threads.append(Thread(target=print_hello, name="Th 1"))
threads.append(Thread(target=print_hello, name="Th 2"))
threads.append(Thread(target=print_message, args=["Good morning"], name="Th 3"))
# start the threads
for th in threads:
th.start()
# wait till all are done
for th in threads:
th.join()
| [
2,
4704,
16,
12,
26086,
13,
9078,
284,
2251,
2829,
14390,
351,
2163,
357,
3500,
14390,
1351,
284,
985,
79,
12810,
262,
2438,
8,
198,
198,
11748,
4704,
278,
198,
6738,
4704,
278,
1330,
14122,
355,
14122,
198,
6738,
640,
1330,
3993,
1... | 3.28481 | 158 |
from anki.hooks import addHook
import re
clozeField = "Meaning"
finalField = "Expression"
addHook("setupEditorButtons", addSentenceFromClozeButton)
| [
6738,
281,
4106,
13,
25480,
82,
1330,
751,
39,
566,
198,
11748,
302,
198,
198,
565,
78,
2736,
15878,
796,
366,
5308,
7574,
1,
198,
20311,
15878,
796,
366,
16870,
2234,
1,
628,
628,
198,
198,
2860,
39,
566,
7203,
40406,
17171,
1537,
... | 2.8 | 55 |
from __future__ import print_function, division
"""
Broken axis example, where the y-axis will have a portion cut out.
"""
import matplotlib.pyplot as plt
import numpy as np
mu1 = 30
sig1 = 1
mu2 = 10
sig2 = 1
pts = np.hstack((np.random.randn(100)*sig1 + mu1, np.random.randn(20)*sig2 + mu2 ))
# # 30 points between [0, 0.2) originally made using np.random.rand(30)*.2
# pts = np.array([
# 0.015, 0.166, 0.133, 0.159, 0.041, 0.024, 0.195, 0.039, 0.161, 0.018,
# 0.143, 0.056, 0.125, 0.096, 0.094, 0.051, 0.043, 0.021, 0.138, 0.075,
# 0.109, 0.195, 0.050, 0.074, 0.079, 0.155, 0.020, 0.010, 0.061, 0.008])
#
# # Now let's make two outlier points which are far away from everything.
# pts[[3, 14]] += .8
# If we were to simply plot pts, we'd lose most of the interesting
# details due to the outliers. So let's 'break' or 'cut-out' the y-axis
# into two portions - use the top (ax) for the outliers, and the bottom
# (ax2) for the details of the majority of our data
f, (ax, ax2) = plt.subplots(1, 2, sharey=True)
bin_width = 1.
max_age = np.ceil(np.max(pts))+ 0.5*bin_width
min_age = np.floor(np.min(pts)) - 0.5*bin_width
nbins = int((max_age - min_age) / bin_width)
# plot the same data on both axes
hist, edges = np.histogram(pts, bins=nbins)
ax.bar(edges[:-1], hist, align='edge', hatch='/', edgecolor='r', color='none')
# BUG!?!! SETTING ALPHA MAKES FACECOLOR GO TO BLACK
# ax.bar(edges[:-1], hist, align='edge', alpha=0.2, hatch='/', edgecolor='r', color='none')
ax.bar(edges[:-1]+0.3, hist, align='edge', hatch='+', edgecolor='b', color='none')
ax2.bar(edges[:-1], hist, align='edge', hatch='/')
# ax.hist(pts, bins=nbins)
# ax2.hist(pts, bins=nbins)
# zoom-in / limit the view to different portions of the data
ax.set_xlim(5, 15) # outliers only
ax2.set_xlim(25, 35) # most of the data
# hide the spines between ax and ax2
ax.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
# ax.xaxis.tick_top()
ax.yaxis.tick_left()
ax.tick_params(labelright='off') # don't put tick labels at the top
ax2.yaxis.tick_right()
# This looks pretty good, and was fairly painless, but you can get that
# cut-out diagonal lines look with just a bit more work. The important
# thing to know here is that in axes coordinates, which are always
# between 0-1, spine endpoints are at these locations (0,0), (0,1),
# (1,0), and (1,1). Thus, we just need to put the diagonals in the
# appropriate corners of each of our axes, and so long as we use the
# right transform and disable clipping.
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
# ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
ax.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (-d, +d), **kwargs) # bottom-left diagonal
ax2.plot((- d, + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
# What's cool about this is that now if we vary the distance between
# ax and ax2 via f.subplots_adjust(hspace=...) or plt.subplot_tool(),
# the diagonal lines will move accordingly, and stay right at the tips
# of the spines they are 'breaking'
plt.savefig('temp_plots/broken_axes.pdf')
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
198,
37811,
198,
15783,
3464,
16488,
1672,
11,
810,
262,
331,
12,
22704,
481,
423,
257,
6903,
2005,
503,
13,
198,
37811,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355... | 2.608862 | 1,309 |
#!/usr/bin/env python3
"""
The MIT License (MIT)
Copyright (c) 2021 Amy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
464,
17168,
13789,
357,
36393,
8,
198,
198,
15269,
357,
66,
8,
33448,
14235,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
167... | 3.707395 | 311 |
"""Provides device automations for Nest."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event as event_trigger
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN
from .device_info import async_nest_devices_by_device_id
from .events import DEVICE_TRAIT_TRIGGER_MAP, NEST_EVENT
DEVICE = "device"
TRIGGER_TYPES = set(DEVICE_TRAIT_TRIGGER_MAP.values())
TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device triggers for a Nest device."""
devices = async_nest_devices_by_device_id(hass)
if not (device := devices.get(device_id)):
raise InvalidDeviceAutomationConfig(f"Device not found {device_id}")
trigger_types = [
trigger_type
for trait in device.traits
if (trigger_type := DEVICE_TRAIT_TRIGGER_MAP.get(trait))
]
return [
{
CONF_PLATFORM: DEVICE,
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: trigger_type,
}
for trigger_type in trigger_types
]
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
event_config = event_trigger.TRIGGER_SCHEMA(
{
event_trigger.CONF_PLATFORM: "event",
event_trigger.CONF_EVENT_TYPE: NEST_EVENT,
event_trigger.CONF_EVENT_DATA: {
CONF_DEVICE_ID: config[CONF_DEVICE_ID],
CONF_TYPE: config[CONF_TYPE],
},
}
)
return await event_trigger.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
| [
37811,
15946,
1460,
3335,
3557,
602,
329,
21420,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
13,
2306,
296,
341,
1330,
357,
198,... | 2.398204 | 1,002 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Connector to MISP."""
from pymisp import ExpandedPyMISP
import config
def get_misp_attributes():
'''Method to pull the attributes (IOCs) from MISP server.'''
misp = ExpandedPyMISP(config.MISP_DOMAIN, config.MISP_KEY, config.MISP_VERIFYCERT)
attributes = misp.search(controller='attributes', return_format='json', **config.MISP_EVENT_FILTERS)
return attributes['Attribute']
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
34525,
284,
50029,
47,
526,
15931,
198,
198,
6738,
279,
4948,
8802,
1330,
5518,
12249,
20519,
44,
1797,
47,
198,... | 2.634731 | 167 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2020 NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generation of HDF5 datasets with summary pileup encodings."""
import argparse
from functools import partial
import multiprocessing as mp
import os
import subprocess
import h5py
import numpy as np
import glob
from variantworks.encoders import SummaryEncoder, HaploidLabelEncoder
from variantworks.types import FileRegion
from variantworks.utils.encoders import sliding_window
def validate_data_dirs(data_dirs):
"""Ensure that each data directory contains subreads, draft, and truth."""
for directory in data_dirs:
if (not os.path.exists(os.path.join(directory, "subreads.fa"))):
raise RuntimeError("subreads.fa not present in all data folders.")
if (not os.path.exists(os.path.join(directory, "draft.fa"))):
raise RuntimeError("draft.fa not present in all data folders.")
if (not os.path.exists(os.path.join(directory, "truth.fa"))):
raise RuntimeError("truth.fa not present in all data folders.")
def create_pileup(data_dir):
"""Create a pileup file from subreads, draft, and truth."""
subreads_file = os.path.join(data_dir, "subreads.fa")
draft_file = os.path.join(data_dir, "draft.fa")
truth_file = os.path.join(data_dir, "truth.fa")
suffix = os.path.basename(os.path.normpath(data_dir))
subreads_draft_bam = "{}_{}.bam".format("subreads2draft", suffix)
subreads_align_cmd = [
"minimap2",
"-x",
"map-pb",
"-t",
"1",
draft_file,
subreads_file,
"--MD",
"-a",
"-o",
subreads_draft_bam]
subprocess.check_call(subreads_align_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subreads_draft_sorted_bam = "{}_{}.sorted.bam".format("subreads2draft", suffix)
subreads_sort_cmd = [
"samtools",
"sort",
subreads_draft_bam,
"-o",
subreads_draft_sorted_bam]
subprocess.check_call(subreads_sort_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subreads_idx_cmd = [
"samtools", "index", subreads_draft_sorted_bam]
subprocess.check_call(subreads_idx_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
truth_draft_bam = "{}_{}.bam".format("truth2draft", suffix)
truth_align_cmd = [
"minimap2",
"-x",
"map-pb",
"-t",
"1",
draft_file,
truth_file,
"--MD",
"-a",
"-o",
truth_draft_bam]
subprocess.check_call(truth_align_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
truth_draft_sorted_bam = "{}_{}.sorted.bam".format("truth2draft", suffix)
truth_sort_cmd = [
"samtools",
"sort",
truth_draft_bam,
"-o",
truth_draft_sorted_bam]
subprocess.check_call(truth_sort_cmd)
truth_idx_cmd = ["samtools", "index", truth_draft_sorted_bam]
subprocess.check_call(truth_idx_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
mpileup_file = "subreads_and_truth_{}.pileup".format(suffix)
pileup_cmd = ["samtools", "mpileup", subreads_draft_sorted_bam,
truth_draft_sorted_bam, "-s", "--reverse-del", "-o", mpileup_file]
subprocess.check_call(pileup_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Remove intermediate files
files = glob.glob("*_{}.*bam*".format(suffix))
for f in files:
os.remove(f)
return FileRegion(start_pos=0, end_pos=None, file_path=mpileup_file)
def encode(sample_encoder, label_encoder, chunk_len, chunk_ovlp, data_dir):
"""Generate sample and label encoding for variant."""
region = create_pileup(data_dir)
# Generate matrix and label encoding.
try:
encoding, encoding_positions = sample_encoder(region)
label, label_positions = label_encoder(region)
assert(len(encoding) == len(label)), print("Encoding and label dimensions not as expected:",
encoding.shape,
label.shape,
region)
assert(len(encoding_positions) == len(encoding)), print("Encoding and positions not as expected:",
encoding.shape,
encoding_positions.shape,
region)
os.remove(region.file_path)
encoding_chunks = sliding_window(encoding, chunk_len, step=chunk_len - chunk_ovlp)
position_chunks = sliding_window(encoding_positions, chunk_len, step=chunk_len - chunk_ovlp)
label_chunks = sliding_window(label, chunk_len, step=chunk_len - chunk_ovlp)
return (encoding_chunks, position_chunks, label_chunks)
except Exception:
os.remove(region.file_path)
return ([], [], [])
def generate_hdf5(args):
"""Generate encodings in multiprocess loop and save tensors to HDF5."""
data_dirs = []
for data_dir in args.data_dir:
for subdir in os.listdir(data_dir):
subdir = os.path.abspath(os.path.join(data_dir, subdir))
if os.path.isdir(subdir):
data_dirs.append(subdir)
for subdir in args.single_dir:
data_dirs.append(subdir)
# Validate directories
validate_data_dirs(data_dirs)
# Setup encoder for samples and labels.
sample_encoder = SummaryEncoder(exclude_no_coverage_positions=True)
label_encoder = HaploidLabelEncoder(exclude_no_coverage_positions=True)
encode_func = partial(encode, sample_encoder, label_encoder, args.chunk_len, args.chunk_ovlp)
# Multi-processing
pool = mp.Pool(args.threads)
features = []
labels = []
positions = []
print('Serializing {} pileup files...'.format(len(data_dirs)))
label_idx = 0
for out in pool.imap(encode_func, data_dirs):
if (label_idx + 1) % 100 == 0:
print('Generated {} pileups'.format(label_idx + 1))
(encoding_chunks, position_chunks, label_chunks) = out
if encoding_chunks and position_chunks and label_chunks:
if encoding_chunks[0].shape[0] == args.chunk_len \
and label_chunks[0].shape[0] == args.chunk_len \
and position_chunks[0].shape[0] == args.chunk_len:
features += (encoding_chunks)
labels += (label_chunks)
positions += (position_chunks)
label_idx += 1
print('Generated {} pileup files'.format(len(data_dirs)))
features = np.stack(features, axis=0)
labels = np.stack(labels, axis=0)
positions = np.stack(positions, axis=0)
h5_file = h5py.File(args.output_file, 'w')
h5_file.create_dataset('features', data=features)
h5_file.create_dataset('positions', data=positions)
h5_file.create_dataset('labels', data=labels)
h5_file.close()
def build_parser():
"""Setup option parsing for sample."""
parser = \
argparse.ArgumentParser(description='Store encoded data in HDF5 format.'
)
parser.add_argument('-d', '--data-dir', nargs='+',
help='Directory with folders containing subreads, draft, truth.', default=[])
parser.add_argument('-r', '--single-dir', nargs='+',
help='Directory containing subreads, draft, truth.', default=[])
parser.add_argument('-o', '--output-file', type=str,
help='Path to output HDF5 file.')
parser.add_argument('-t', '--threads', type=int,
help='Threads to parallelize over.',
default=mp.cpu_count())
parser.add_argument('-c', '--chunk-len', type=int,
help='Length of chunks to be created from pileups.', default=1000)
parser.add_argument('--chunk-ovlp', type=int,
help='Length of overlaps between chunks.', default=200)
return parser
if __name__ == '__main__':
parser = build_parser()
args = parser.parse_args()
generate_hdf5(args)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
12131,
15127,
23929,
44680,
6234,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.252055 | 3,892 |
from tabulate import tabulate
from .structs import STRUCTS
| [
6738,
7400,
5039,
1330,
7400,
5039,
198,
198,
6738,
764,
7249,
82,
1330,
19269,
18415,
50,
628
] | 3.588235 | 17 |