content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""
class Event:
def __init__(
self, relative_match_position, sample,
incomming_data, correlation, percentage):
self.data = {
"relative_match_position": relative_match_position,
"sample": sample,
"incoming_data": incomming_data,
"correlation": correlation,
"percentage": percentage,
}
def is_percentage_equal_or_bigger(self, percentage):
return percentage <= self.data["percentage"]
def __str__(self):
return str(self.data)
def get(self):
return self.data
"""
"""
def filter_events(events, percentage=45.0):
return [e for e in events
if e.is_percentage_equal_or_bigger(percentage) is True]
"""
| [
37811,
198,
4871,
8558,
25,
628,
220,
220,
220,
825,
11593,
15003,
834,
7,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2116,
11,
3585,
62,
15699,
62,
9150,
11,
6291,
11,
198,
220,
220,
220,
220,
220,
220,
220,
22... | 2.264264 | 333 |
"""
User Entity Module
"""
# Standard Library
import datetime
# Third Party Library
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.aggregates import Count
from django.contrib.auth.hashers import make_password
| [
37811,
198,
12982,
20885,
19937,
198,
37811,
198,
198,
2,
8997,
10074,
198,
11748,
4818,
8079,
198,
198,
2,
10467,
3615,
10074,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,... | 3.533333 | 75 |
import datetime
import os
from io import BytesIO
import boto3
from botocore.exceptions import ClientError
_S3_ACCESS_KEY = os.environ["AWSAccessKeyId"].replace("\r", "")
_S3_SECRET_KEY = os.environ["AWSSecretKey"].replace("\r", "")
_S3_BUCKET_NAME = "coronavirus-calculator-data"
DATESTRING_FORMAT_READABLE = "%A %d %B %Y, %H:%M %Z" # 'Sunday 30 November 2014'
def upload_file(data: bytes, object_name: str):
"""
Upload a file to an S3 bucket
:param data: Bytes to upload.
:param object_name: S3 object name.
:return: True if file was uploaded, else False
"""
buf = BytesIO(data)
s3_client = _configure_client()
try:
response = s3_client.put_object(
Body=buf, Bucket=_S3_BUCKET_NAME, Key=object_name
)
except ClientError as e:
print(e)
return False
return True
def download_file(object_name: str):
"""
Download a file from S3 bucket.
:param object_name: Name of object to download.
:return: Object bytes and date last modified.
"""
s3_client = _configure_client()
download = s3_client.get_object(Key=object_name, Bucket=_S3_BUCKET_NAME)
content = download["Body"].read()
last_modified = download["LastModified"].strftime(DATESTRING_FORMAT_READABLE)
return content, last_modified
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
11748,
275,
2069,
18,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
20985,
12331,
198,
198,
62,
50,
18,
62,
26861,
7597,
62,
20373,
796,
28... | 2.478343 | 531 |
#!/usr/bin/env python
# coding: utf-8
# In[12]:
import tensorflow as tf
import numpy as np
import keras
import pandas as pd
import os
import matplotlib.pyplot as plt
dropout_rate = 0.4
# In[2]:
# In[3]:
#image = tf.placeholder(tf.float32 , [batch_size,224,224,3])
#x = image_layer(image)
#print(x.shape)
# In[4]:
# In[5]:
#quest = tf.placeholder(tf.int32 , [batch_size ,q_len])
#temp = question_layer(512 , 256 , quest , batch_size)
#print(temp.shape)
# In[6]:
# In[7]:
#att = attention(x , temp , 512 , True)
# In[8]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
1065,
5974,
628,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
41927,
292,
198,
117... | 2.32766 | 235 |
"""
This project trains an AI to detect german traffic signs and sends the recognized signs to ros
TODO:
- interpolate ROI from CSV to new dimensions
- integrate ROS platform
Authors: Jan Fuesting
Last edited: 10.09.2018
"""
import os
from Misc import Misc
from Recognition import Recognition
from Training import Training
# Conflict ROS Kinetic and OpenCV
# https://stackoverflow.com/questions/43019951/after-install-ros-kinetic-cannot-import-opencv
class Main:
"""
Main class
"""
def __init__(self):
"""
Initialization
"""
self.misc = Misc()
self.recognition = Recognition()
self.training = Training()
def run(self):
"""
This method controls program sequence
:return:
"""
# Initialize system
self.misc.logger.debug("Program started")
dataset_path = self.misc.project_root + "/dataset"
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
# Getting and manipulating datasets
# self.training.download_pos_files(images=True, haar=True)
# self.training.download_neg_files()
# self.training.download_face_recognition_haar()
# self.training.manipulate_image()
# self.training.generate_description_traffic()
# self.training.generate_description_airplanes()
# Get camera image and find traffic signs
# self.recognition.face_recognition()
self.recognition.get_camera_image()
self.misc.logger.debug("Program finished")
main = Main()
main.run()
| [
37811,
198,
1212,
1628,
13404,
281,
9552,
284,
4886,
308,
2224,
4979,
5895,
290,
12800,
262,
8018,
5895,
284,
686,
82,
198,
198,
51,
3727,
46,
25,
198,
12,
39555,
378,
15107,
40,
422,
44189,
284,
649,
15225,
198,
12,
19386,
48263,
3... | 2.564103 | 624 |
# -*- coding: utf-8 -*-
"""
权限表
"""
from typing import Optional
from pydantic import BaseModel
class RoleCreate(BaseModel):
"""
创建角色字段
"""
role_id: int
role_name: str
permission_id: int
re_mark: Optional[str] = None
class RoleUpdate(BaseModel):
"""
角色更新字段
"""
role_name: Optional[str] = None
re_mark: Optional[str] = None
| [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
30266,
225,
165,
247,
238,
26193,
101,
198,
37811,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
628,
198,
487... | 1.984211 | 190 |
import pyiem.cscap_utils as util
drive = util.get_driveclient(util.get_config(), "cscap")
spr_client = util.get_spreadsheet_client(util.get_config())
res = drive.files().list(q="title contains 'Plot Identifiers'").execute()
for item in res["items"]:
if item["mimeType"] != "application/vnd.google-apps.spreadsheet":
continue
print(item["title"])
spreadsheet = util.Spreadsheet(spr_client, item["id"])
spreadsheet.get_worksheets()
sheet = spreadsheet.worksheets["Sheet 1"]
for col in ["AGRO", "SOIL", "GHG", "IPM_CSCAP", "IPM_USB"]:
sheet.add_column(col)
| [
11748,
12972,
26597,
13,
66,
1416,
499,
62,
26791,
355,
7736,
198,
198,
19472,
796,
7736,
13,
1136,
62,
19472,
16366,
7,
22602,
13,
1136,
62,
11250,
22784,
366,
66,
1416,
499,
4943,
198,
34975,
62,
16366,
796,
7736,
13,
1136,
62,
43... | 2.614035 | 228 |
from airflow import AirflowException
| [
6738,
45771,
1330,
3701,
11125,
16922,
628
] | 5.428571 | 7 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
import pandapower.networks as nw
from pandapower.pd2ppc import _pd2ppc
from pandapower.pypower.makePTDF import makePTDF
from pandapower.pypower.makeLODF import makeLODF
from pandapower.test.loadflow.result_test_network_generator import result_test_network_generator_dcpp
from pandapower.test.toolbox import add_grid_connection, create_test_line, assert_net_equal
if __name__ == "__main__":
pytest.main([__file__, "-xs"])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
1584,
12,
42334,
416,
2059,
286,
15035,
741,
290,
39313,
403,
71,
30288,
5136,
329,
6682,
18963,
198,
2,
290,
6682,
4482,
8987,
357,
40,
6... | 3.013333 | 225 |
""" Class to organize QA for a full DESI production run
"""
from __future__ import print_function, absolute_import, division
import numpy as np
import glob, os
import warnings
from lvmspec.io import get_exposures
from lvmspec.io import get_files
from lvmspec.io import read_frame
from lvmspec.io import read_meta_frame
from lvmspec.io import specprod_root
from lvmutil.log import get_logger
# log = get_logger()
| [
37811,
5016,
284,
16481,
1195,
32,
329,
257,
1336,
22196,
40,
3227,
1057,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
11,
7297,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
15095,
... | 3.051095 | 137 |
import io
import os
import nest_asyncio
import uvicorn
# from PIL import Image
from dotenv import load_dotenv
from fastapi import FastAPI, UploadFile, File, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import base64
# from coreai.tichHop import Vgg16DetectFace
from rete import tinhluong
import numpy as np
load_dotenv()
# HOST = os.getenv("HOST")
app = FastAPI()
LIST_BANG_CAP = ["Cử nhân", "Thạc sĩ", "Tiến sĩ", "Phó giáo sư", "Giáo sư", "Khác"]
# vgg = Vgg16DetectFace()
# vgg.LoadModel()
origins = [
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
@app.post("/tinhLuong/")
# def getAndDeCodeImage(data):
# file_bytes = np.asarray(bytearray(data), dtype=np.uint8)
# img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
# return img
# def stringToRGB(base64_string):
# imgdata = base64.b64decode(str(base64_string))
# image = Image.open(io.BytesIO(imgdata))
# return cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
# def encodeImage(image):
# retval, buffer = cv2.imencode('.jpg', image)
# jpg_as_text = base64.b64encode(buffer)
# return jpg_as_text
# @app.post("/deAndRecorg/")
# async def create_file(
# request: Request,
# data: Item2,
# ):
# img = stringToRGB(data.image.split(",")[1])
# data = vgg.predictFace(img)
# data['image'] = "data:image/jpg;base64," + encodeImage(data['image']).decode('utf-8')
# return data
# PORT = 8000
# ngrok_tunnel = ngrok.connect(PORT)
# print('Public URL:', ngrok_tunnel.public_url)
# nest_asyncio.apply()
# uvicorn.run(app, host=HOST, port=PORT)
# uvicorn.run(app, host=HOST, port=PORT)
# uvicorn.run(app, port=PORT)
| [
11748,
33245,
198,
11748,
28686,
198,
198,
11748,
16343,
62,
292,
13361,
952,
198,
11748,
334,
25531,
1211,
198,
2,
422,
350,
4146,
1330,
7412,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
6738,
3049,
15042,
1330,
12549,
... | 2.291469 | 844 |
import tkinter as tk
if __name__ == '__main__':
master = tk.Tk()
dict_entries = {
'item 1': int,
'item 2': str,
}
master.title('Hello World!')
i = 0
dict_tk_entry = {}
for key, val in dict_entries.items():
tk.Label(master, text=str(key)).grid(row=i)
dict_tk_entry[key] = tk.Entry(master)
dict_tk_entry[key].grid(row=i, column=1)
i += 1
# tk.Label(master, text="First").grid(row=0)
# tk.Label(master, text="Second").grid(row=1)
# e1 = tk.Entry(master)
# e2 = tk.Entry(master)
#
# e1.grid(row=0, column=1)
# e2.grid(row=1, column=1)
master.mainloop() | [
11748,
256,
74,
3849,
355,
256,
74,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4958,
796,
256,
74,
13,
51,
74,
3419,
628,
220,
220,
220,
8633,
62,
298,
1678,
796,
1391,
198,
220,
220,
... | 2 | 332 |
#THIRD PARTY IMPORTS
from datetime import datetime
from views.utils import RequiredLoginViewMixin
from flask.globals import current_app, g
from flask.helpers import send_file
from flask.templating import render_template
from flask.views import MethodView
import os
import shutil
# LOCAL IMPORTS
| [
2,
4221,
46833,
16652,
56,
30023,
33002,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
6738,
5009,
13,
26791,
1330,
20906,
47790,
7680,
35608,
259,
201,
198,
6738,
42903,
13,
4743,
672,
874,
1330,
1459,
62,
1324,
11,
308,
20... | 3.038095 | 105 |
from collections import deque
print(stock_availability(["choco", "vanilla", "banana"], "delivery", "caramel", "berry"))
print(stock_availability(["chocolate", "vanilla", "banana"], "delivery", "cookie","banana"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", 1, 1))
print(stock_availability(["chocolate", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["cookie", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", "cookie"))
| [
6738,
17268,
1330,
390,
4188,
628,
198,
198,
4798,
7,
13578,
62,
47274,
7,
14692,
354,
25634,
1600,
366,
10438,
5049,
1600,
366,
3820,
2271,
33116,
366,
12381,
6315,
1600,
366,
7718,
17983,
1600,
366,
8396,
48774,
198,
4798,
7,
13578,
... | 3.193717 | 191 |
from .fasthdl import module, In, Out, Reg, Wire, X # noqa
__version__ = "0.0.1"
| [
6738,
764,
69,
292,
400,
25404,
1330,
8265,
11,
554,
11,
3806,
11,
3310,
11,
14712,
11,
1395,
220,
1303,
645,
20402,
198,
198,
834,
9641,
834,
796,
366,
15,
13,
15,
13,
16,
1,
198
] | 2.277778 | 36 |
from urllib.parse import quote_plus
import click
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
def get_db_connection(config):
"""Configures db connection with config object.""" # TODO: URL-Parsing for special characters.
if config["DB"]["driver"].get() is None:
db_connector = f"{config['DB']['dialect'].get(str)}"
else:
db_connector = (
f"{config['DB']['dialect'].get(str)}+{config['DB']['driver'].get(str)}"
)
db = (
f"{db_connector}://{config['DB']['username'].get(str)}:{quote_plus(config['DB']['password'].get(str))}@"
f"{config['DB']['host'].get(str)}:{config['DB']['port'].get(int)}/{config['DB']['database'].get(str)}"
)
engine = create_engine(db)
try:
Base.metadata.create_all(bind=engine)
session = sessionmaker(bind=engine)
except OperationalError as e:
click.echo(f"Could not connect to '{db}'.\n" f"Details on error:\n" f"{e}")
raise click.Abort
connection = session()
return connection
| [
6738,
2956,
297,
571,
13,
29572,
1330,
9577,
62,
9541,
198,
198,
11748,
3904,
198,
6738,
44161,
282,
26599,
1330,
41146,
11,
29201,
11,
8708,
9218,
11,
34142,
11,
10903,
11,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
41194,
13... | 2.480962 | 499 |
"""Core YAML Path classes."""
# Establish the version number common to all components
__version__ = "3.6.3"
from yamlpath.yamlpath import YAMLPath
from yamlpath.processor import Processor
| [
37811,
14055,
575,
2390,
43,
10644,
6097,
526,
15931,
198,
2,
10062,
17148,
262,
2196,
1271,
2219,
284,
477,
6805,
198,
834,
9641,
834,
796,
366,
18,
13,
21,
13,
18,
1,
198,
198,
6738,
331,
43695,
6978,
13,
88,
43695,
6978,
1330,
... | 3.315789 | 57 |
#=============================================================================
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See LICENSE.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#=============================================================================
"""
Test smtk.attribute.System.copyDefinition() method
Uses copyDefinitionTest.sbt in the SMTKTestData repo.
"""
import logging
import os
import sys
try:
import smtk
except ImportError:
print
print 'Not able to import smtk library. You might need to:'
print ' - Use the PYTHONPATH variable to point to the smtk python lib'
print ' - And/or use the LD_LIBRARY_PATH variable to point to the shiboken libraries'
print
sys.exit(-1)
logging.basicConfig(level=logging.DEBUG)
SBT_FILENAME = 'copyDefinitionTest.sbt'
SBI_FILENAME = 'copyDefinitionTest.sbi'
# ---------------------------------------------------------------------
if __name__ == '__main__':
# First (and) only argument is the path to the smtk data directory
if len(sys.argv) < 2:
print
print 'Test smtk.attribute.System.copyDefinition()'
print 'Usage: python %s path-to-SMTKTestData'
print
sys.exit(-1)
logging.debug('LD_LIBRARY_PATH = %s' % os.environ.get('LD_LIBRARY_PATH'))
logging.debug('PYTHONPATH = %s' % os.environ.get('PYTHONPATH'))
# Load attribute file into system
smtk_test_data = sys.argv[1]
att_folder = os.path.join(smtk_test_data, 'smtk', 'attribute')
att_path = os.path.join(att_folder, SBT_FILENAME)
logging.info('Reading %s' % att_path)
input_system = smtk.attribute.System()
reader = smtk.io.AttributeReader()
logger = smtk.io.Logger()
err = reader.read(input_system, att_path, logger)
if err:
logging.error("Unable to load template file")
logging.error(logger.convertToString())
sys.exit(-2)
err_count = 0
# Instantiate 2nd system
test_system = smtk.attribute.System()
# Copy SecondConcrete definition, which should copy alot of stuff
source_def = input_system.findDefinition('SecondConcrete')
test_system.copyDefinition(source_def, 0)
expected_types = [
'SecondConcrete', 'AnotherAbstractBase', 'CommonBase',
'FirstConcrete', 'PolyLinearFunction'
]
for def_type in expected_types:
defn = test_system.findDefinition(def_type)
if defn is None:
logging.error('Expected %s definition, found None' % def_type)
err_count += 1
# Add explicit test for conditional children
defn = test_system.findDefinition('SecondConcrete')
if defn:
i = defn.findItemPosition('ConditionalSelectionList')
item = defn.itemDefinition(i)
if item:
string_item = smtk.attribute.to_concrete(item)
list_one = string_item.conditionalItems('One')
if len(list_one) != 1:
msg = 'Expected \"One\" enum to have 1 conditional item, found %d' % \
len(list_one)
logging.error(msg)
err_count += 1
list_two = string_item.conditionalItems('Two')
if len(list_two) != 3:
msg = 'Expected \"Two\" enum to have 3 conditional items, found %d' % \
len(list_two)
logging.error(msg)
err_count += 1
else:
logging.error('Did not find ConditionalSelectionList item')
err_count += 1
# Note there is ALOT more that could & should be verified here
logging.debug('Writing system')
# Write data out FYI
writer = smtk.io.AttributeWriter()
err = writer.write(test_system, SBI_FILENAME, logger)
if err:
logging.error("Unable to write output file")
sys.exit(-3)
logging.info('Wrote %s' % SBI_FILENAME)
# Check error count
if err_count > 0:
sys.exit(err_count)
sys.exit(0)
| [
2,
23926,
25609,
28,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
10897,
1574,
11,
3457,
13,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
220,
4091,
38559,
24290,
13,
14116,
329,
3307,
13,
198,
2,
198,
2,
220,
770,
3788,
318,
9387,
... | 2.857988 | 1,352 |
from math import floor
from random import shuffle
from pandas import DataFrame
import numpy as np
from matplotlib import pyplot
from PIL import Image
BOT_USER = 'usr-71'
TRAIN_SET_PERCENT = 0.75
SCALE_X = 299
SCALE_Y = 299
| [
6738,
10688,
1330,
4314,
198,
6738,
4738,
1330,
36273,
198,
6738,
19798,
292,
1330,
6060,
19778,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
198,
6738,
350,
4146,
1330,
7412,
628,
198,
33,
2394,
... | 2.973684 | 76 |
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Array proxy base class
The proxy API is - at minimum:
* The object has a read-only attribute ``shape``
* read only ``is_proxy`` attribute / property set to True
* the object returns the data array from ``np.asarray(prox)``
* returns array slice from ``prox[<slice_spec>]`` where ``<slice_spec>`` is any
ndarray slice specification that does not use numpy 'advanced indexing'.
* modifying no object outside ``obj`` will affect the result of
``np.asarray(obj)``. Specifically:
* Changes in position (``obj.tell()``) of passed file-like objects will
not affect the output of from ``np.asarray(proxy)``.
* if you pass a header into the __init__, then modifying the original
header will not affect the result of the array return.
See :mod:`nibabel.tests.test_proxy_api` for proxy API conformance checks.
"""
from contextlib import contextmanager
from threading import RLock
import numpy as np
from .deprecated import deprecate_with_version
from .volumeutils import array_from_file, apply_read_scaling
from .fileslice import fileslice
from .keywordonly import kw_only_meth
from . import openers
"""This flag controls whether a new file handle is created every time an image
is accessed through an ``ArrayProxy``, or a single file handle is created and
used for the lifetime of the ``ArrayProxy``. It should be set to one of
``True``, ``False``, or ``'auto'``.
Management of file handles will be performed either by ``ArrayProxy`` objects,
or by the ``indexed_gzip`` package if it is used.
If this flag is set to ``True``, a single file handle is created and used. If
``False``, a new file handle is created every time the image is accessed. For
gzip files, if ``'auto'``, and the optional ``indexed_gzip`` dependency is
present, a single file handle is created and persisted. If ``indexed_gzip`` is
not available, behaviour is the same as if ``keep_file_open is False``.
If this is set to any other value, attempts to create an ``ArrayProxy`` without
specifying the ``keep_file_open`` flag will result in a ``ValueError`` being
raised.
.. warning:: Setting this flag to a value of ``'auto'`` will become deprecated
behaviour in version 2.4.0. Support for ``'auto'`` will be removed
in version 3.0.0.
"""
KEEP_FILE_OPEN_DEFAULT = False
class ArrayProxy(object):
""" Class to act as proxy for the array that can be read from a file
The array proxy allows us to freeze the passed fileobj and header such that
it returns the expected data array.
This implementation assumes a contiguous array in the file object, with one
of the numpy dtypes, starting at a given file position ``offset`` with
single ``slope`` and ``intercept`` scaling to produce output values.
The class ``__init__`` requires a spec which defines how the data will be
read and rescaled. The spec may be a tuple of length 2 - 5, containing the
shape, storage dtype, offset, slope and intercept, or a ``header`` object
with methods:
* get_data_shape
* get_data_dtype
* get_data_offset
* get_slope_inter
A header should also have a 'copy' method. This requirement will go away
when the deprecated 'header' propoerty goes away.
This implementation allows us to deal with Analyze and its variants,
including Nifti1, and with the MGH format.
Other image types might need more specific classes to implement the API.
See :mod:`nibabel.minc1`, :mod:`nibabel.ecat` and :mod:`nibabel.parrec` for
examples.
"""
# Assume Fortran array memory layout
order = 'F'
_header = None
@kw_only_meth(2)
def __init__(self, file_like, spec, mmap=True, keep_file_open=None):
"""Initialize array proxy instance
Parameters
----------
file_like : object
File-like object or filename. If file-like object, should implement
at least ``read`` and ``seek``.
spec : object or tuple
Tuple must have length 2-5, with the following values:
#. shape: tuple - tuple of ints describing shape of data;
#. storage_dtype: dtype specifier - dtype of array inside proxied
file, or input to ``numpy.dtype`` to specify array dtype;
#. offset: int - offset, in bytes, of data array from start of file
(default: 0);
#. slope: float - scaling factor for resulting data (default: 1.0);
#. inter: float - intercept for rescaled data (default: 0.0).
OR
Header object implementing ``get_data_shape``, ``get_data_dtype``,
``get_data_offset``, ``get_slope_inter``
mmap : {True, False, 'c', 'r'}, optional, keyword only
`mmap` controls the use of numpy memory mapping for reading data.
If False, do not try numpy ``memmap`` for data array. If one of
{'c', 'r'}, try numpy memmap with ``mode=mmap``. A `mmap` value of
True gives the same behavior as ``mmap='c'``. If `file_like`
cannot be memory-mapped, ignore `mmap` value and read array from
file.
keep_file_open : { None, 'auto', True, False }, optional, keyword only
`keep_file_open` controls whether a new file handle is created
every time the image is accessed, or a single file handle is
created and used for the lifetime of this ``ArrayProxy``. If
``True``, a single file handle is created and used. If ``False``,
a new file handle is created every time the image is accessed. If
``'auto'``, and the optional ``indexed_gzip`` dependency is
present, a single file handle is created and persisted. If
``indexed_gzip`` is not available, behaviour is the same as if
``keep_file_open is False``. If ``file_like`` is an open file
handle, this setting has no effect. The default value (``None``)
will result in the value of ``KEEP_FILE_OPEN_DEFAULT`` being used.
"""
if mmap not in (True, False, 'c', 'r'):
raise ValueError("mmap should be one of {True, False, 'c', 'r'}")
self.file_like = file_like
if hasattr(spec, 'get_data_shape'):
slope, inter = spec.get_slope_inter()
par = (spec.get_data_shape(),
spec.get_data_dtype(),
spec.get_data_offset(),
1. if slope is None else slope,
0. if inter is None else inter)
# Reference to original header; we will remove this soon
self._header = spec.copy()
elif 2 <= len(spec) <= 5:
optional = (0, 1., 0.)
par = spec + optional[len(spec) - 2:]
else:
raise TypeError('spec must be tuple of length 2-5 or header object')
# Copies of values needed to read array
self._shape, self._dtype, self._offset, self._slope, self._inter = par
# Permit any specifier that can be interpreted as a numpy dtype
self._dtype = np.dtype(self._dtype)
self._mmap = mmap
# Flags to keep track of whether a single ImageOpener is created, and
# whether a single underlying file handle is created.
self._keep_file_open, self._persist_opener = \
self._should_keep_file_open(file_like, keep_file_open)
self._lock = RLock()
def __del__(self):
"""If this ``ArrayProxy`` was created with ``keep_file_open=True``,
the open file object is closed if necessary.
"""
if hasattr(self, '_opener') and not self._opener.closed:
self._opener.close_if_mine()
self._opener = None
def __getstate__(self):
"""Returns the state of this ``ArrayProxy`` during pickling. """
state = self.__dict__.copy()
state.pop('_lock', None)
return state
def __setstate__(self, state):
"""Sets the state of this ``ArrayProxy`` during unpickling. """
self.__dict__.update(state)
self._lock = RLock()
def _should_keep_file_open(self, file_like, keep_file_open):
"""Called by ``__init__``.
This method determines how to manage ``ImageOpener`` instances,
and the underlying file handles - the behaviour depends on:
- whether ``file_like`` is an an open file handle, or a path to a
``'.gz'`` file, or a path to a non-gzip file.
- whether ``indexed_gzip`` is present (see
:attr:`.openers.HAVE_INDEXED_GZIP`).
An ``ArrayProxy`` object uses two internal flags to manage
``ImageOpener`` instances and underlying file handles.
- The ``_persist_opener`` flag controls whether a single
``ImageOpener`` should be created and used for the lifetime of
this ``ArrayProxy``, or whether separate ``ImageOpener`` instances
should be created on each file access.
- The ``_keep_file_open`` flag controls qwhether the underlying file
handle should be kept open for the lifetime of this
``ArrayProxy``, or whether the file handle should be (re-)opened
and closed on each file access.
The internal ``_keep_file_open`` flag is only relevant if
``file_like`` is a ``'.gz'`` file, and the ``indexed_gzip`` library is
present.
This method returns the values to be used for the internal
``_persist_opener`` and ``_keep_file_open`` flags; these values are
derived according to the following rules:
1. If ``file_like`` is a file(-like) object, both flags are set to
``False``.
2. If ``keep_file_open`` (as passed to :meth:``__init__``) is
``True``, both internal flags are set to ``True``.
3. If ``keep_file_open`` is ``False``, but ``file_like`` is not a path
to a ``.gz`` file or ``indexed_gzip`` is not present, both flags
are set to ``False``.
4. If ``keep_file_open`` is ``False``, ``file_like`` is a path to a
``.gz`` file, and ``indexed_gzip`` is present, ``_persist_opener``
is set to ``True``, and ``_keep_file_open`` is set to ``False``.
In this case, file handle management is delegated to the
``indexed_gzip`` library.
5. If ``keep_file_open`` is ``'auto'``, ``file_like`` is a path to a
``.gz`` file, and ``indexed_gzip`` is present, both internal flags
are set to ``True``.
6. If ``keep_file_open`` is ``'auto'``, and ``file_like`` is not a
path to a ``.gz`` file, or ``indexed_gzip`` is not present, both
internal flags are set to ``False``.
Note that a value of ``'auto'`` for ``keep_file_open`` will become
deprecated behaviour in version 2.4.0, and support for ``'auto'`` will
be removed in version 3.0.0.
Parameters
----------
file_like : object
File-like object or filename, as passed to ``__init__``.
keep_file_open : { 'auto', True, False }
Flag as passed to ``__init__``.
Returns
-------
A tuple containing:
- ``keep_file_open`` flag to control persistence of file handles
- ``persist_opener`` flag to control persistence of ``ImageOpener``
objects.
"""
if keep_file_open is None:
keep_file_open = KEEP_FILE_OPEN_DEFAULT
if keep_file_open not in ('auto', True, False):
raise ValueError('keep_file_open should be one of {None, '
'\'auto\', True, False}')
# file_like is a handle - keep_file_open is irrelevant
if hasattr(file_like, 'read') and hasattr(file_like, 'seek'):
return False, False
# if the file is a gzip file, and we have_indexed_gzip,
have_igzip = openers.HAVE_INDEXED_GZIP and file_like.endswith('.gz')
if keep_file_open == 'auto':
return have_igzip, have_igzip
elif keep_file_open:
return True, True
else:
return False, have_igzip
@property
@deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0')
@property
@property
@property
@property
@property
@property
@property
@contextmanager
def _get_fileobj(self):
"""Create and return a new ``ImageOpener``, or return an existing one.
The specific behaviour depends on the value of the ``keep_file_open``
flag that was passed to ``__init__``.
Yields
------
ImageOpener
A newly created ``ImageOpener`` instance, or an existing one,
which provides access to the file.
"""
if self._persist_opener:
if not hasattr(self, '_opener'):
self._opener = openers.ImageOpener(
self.file_like, keep_open=self._keep_file_open)
yield self._opener
else:
with openers.ImageOpener(
self.file_like, keep_open=False) as opener:
yield opener
def get_unscaled(self):
""" Read of data from file
This is an optional part of the proxy API
"""
with self._get_fileobj() as fileobj, self._lock:
raw_data = array_from_file(self._shape,
self._dtype,
fileobj,
offset=self._offset,
order=self.order,
mmap=self._mmap)
return raw_data
def reshape(self, shape):
""" Return an ArrayProxy with a new shape, without modifying data """
size = np.prod(self._shape)
# Calculate new shape if not fully specified
from operator import mul
from functools import reduce
n_unknowns = len([e for e in shape if e == -1])
if n_unknowns > 1:
raise ValueError("can only specify one unknown dimension")
elif n_unknowns == 1:
known_size = reduce(mul, shape, -1)
unknown_size = size // known_size
shape = tuple(unknown_size if e == -1 else e for e in shape)
if np.prod(shape) != size:
raise ValueError("cannot reshape array of size {:d} into shape "
"{!s}".format(size, shape))
return self.__class__(file_like=self.file_like,
spec=(shape, self._dtype, self._offset,
self._slope, self._inter),
mmap=self._mmap)
def is_proxy(obj):
""" Return True if `obj` is an array proxy
"""
try:
return obj.is_proxy
except AttributeError:
return False
def reshape_dataobj(obj, shape):
""" Use `obj` reshape method if possible, else numpy reshape function
"""
return (obj.reshape(shape) if hasattr(obj, 'reshape')
else np.reshape(obj, shape))
| [
2,
795,
16436,
25,
532,
9,
12,
4235,
25,
21015,
12,
14171,
26,
12972,
12,
521,
298,
12,
28968,
25,
604,
26,
33793,
12,
8658,
82,
12,
14171,
25,
18038,
532,
9,
12,
198,
2,
25357,
25,
900,
10117,
28,
29412,
39747,
28,
19,
40379,
... | 2.469433 | 6,265 |
import pandas as pd
from .base import *
from tqdm import tqdm | [
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
764,
8692,
1330,
1635,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020
] | 2.818182 | 22 |
import json
import logging
import os
from typing import Any, Dict, Optional
import boto3
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
198,
198,
11748,
275,
2069,
18,
198,
198,
25294,
30373,
796,
18931,
13,
1136,
11187,
1362,
3419,
198,
25294,
30373,
13,
2617,
497... | 3.145833 | 48 |
# -*- coding: utf-8 -*-
"""Script writes out numpy archive of nd AUC and peak latency array data"""
# Authors: Kambiz Tavabi <ktavabi@gmail.com>
# License: MIT
from os import path as op
import time
import numpy as np
from mne import read_evokeds
from badbaby import parameters as params
from badbaby import return_dataframes as rd
# Some parameters
data_dir = params.meg_dirs['mmn']
df, cdi_df = rd.return_simms_mmn_dfs()
subjects = df.Subject_ID.values.tolist()
agency = 'SIMMS'
analysis = 'Individual'
conditions = ['standard', 'Ba', 'Wa']
lpf = 30
erf_data = np.load(op.join(data_dir,
'%s_Analysis-%s_%d-ERF-data.npz'
% (agency, analysis, lpf)))
file_out = op.join(data_dir,
'%s_Analysis-%s_%d-DepMeas-data.npz'
% (agency, analysis, lpf))
# Loop over subjects and write ND data matrix
t0 = time.time()
for ci, cond in enumerate(conditions):
print(' %s...' % cond)
for si, subj in enumerate(subjects):
print(' %s' % subj)
evoked_file = op.join(data_dir, 'bad_%s' % subj, 'inverse',
'%s_%d-sss_eq_bad_%s-ave.fif'
% (analysis, lpf, subj))
evoked = read_evokeds(evoked_file, condition=cond,
baseline=(None, 0))
if len(evoked.info['bads']) > 0:
print(' Interpolating bad channels...')
evoked.interpolate_bads()
times = evoked.times
sfreq = evoked.info['sfreq']
ch_names = evoked.info['ch_names']
assert(all(np.asarray(ch_names) == np.asarray(params.vv_ch_order))) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
7391,
6797,
503,
299,
32152,
15424,
286,
299,
67,
317,
9598,
290,
9103,
24812,
7177,
1366,
37811,
198,
198,
2,
46665,
25,
509,
4131,
528,
41489,
17914,
1279,... | 2.039457 | 811 |
# coding:UTF-8
'''
Date:20161030
@author: zhaozhiyong
'''
from math import pow
class node:
'''树的节点的类
'''
def split_tree(data, fea, value):
'''根据特征fea中的值value将数据集data划分成左右子树
input: data(list):数据集
fea(int):待分割特征的索引
value(float):待分割的特征的具体值
output: (set1,set2)(tuple):分割后的左右子树
'''
set_1 = []
set_2 = []
for x in data:
if x[fea] >= value:
set_1.append(x)
else:
set_2.append(x)
return (set_1, set_2)
def label_uniq_cnt(data):
'''统计数据集中不同的类标签label的个数
input: data(list):原始数据集
output: label_uniq_cnt(int):样本中的标签的个数
'''
label_uniq_cnt = {}
for x in data:
label = x[len(x) - 1] # 取得每一个样本的类标签label
if label not in label_uniq_cnt:
label_uniq_cnt[label] = 0
label_uniq_cnt[label] = label_uniq_cnt[label] + 1
return label_uniq_cnt
def cal_gini_index(data):
'''计算给定数据集的Gini指数
input: data(list):树中
output: gini(float):Gini指数
'''
total_sample = len(data) # 样本的总个数
if len(data) == 0:
return 0
label_counts = label_uniq_cnt(data) # 统计数据集中不同标签的个数
# 计算数据集的Gini指数
gini = 0
for label in label_counts:
gini = gini + pow(label_counts[label], 2)
gini = 1 - float(gini) / pow(total_sample, 2)
return gini
def build_tree(data):
'''构建树
input: data(list):训练样本
output: node:树的根结点
'''
# 构建决策树,函数返回该决策树的根节点
if len(data) == 0:
return node()
# 1、计算当前的Gini指数
currentGini = cal_gini_index(data)
bestGain = 0.0
bestCriteria = None # 存储最佳切分属性以及最佳切分点
bestSets = None # 存储切分后的两个数据集
feature_num = len(data[0]) - 1 # 样本中特征的个数
# 2、找到最好的划分
for fea in range(0, feature_num):
# 2.1、取得fea特征处所有可能的取值
feature_values = {} # 在fea位置处可能的取值
for sample in data: # 对每一个样本
feature_values[sample[fea]] = 1 # 存储特征fea处所有可能的取值
# 2.2、针对每一个可能的取值,尝试将数据集划分,并计算Gini指数
for value in feature_values.keys(): # 遍历该属性的所有切分点
# 2.2.1、 根据fea特征中的值value将数据集划分成左右子树
(set_1, set_2) = split_tree(data, fea, value)
# 2.2.2、计算当前的Gini指数
nowGini = float(len(set_1) * cal_gini_index(set_1) + \
len(set_2) * cal_gini_index(set_2)) / len(data)
# 2.2.3、计算Gini指数的增加量
gain = currentGini - nowGini
# 2.2.4、判断此划分是否比当前的划分更好
if gain > bestGain and len(set_1) > 0 and len(set_2) > 0:
bestGain = gain
bestCriteria = (fea, value)
bestSets = (set_1, set_2)
# 3、判断划分是否结束
if bestGain > 0:
right = build_tree(bestSets[0])
left = build_tree(bestSets[1])
return node(fea=bestCriteria[0], value=bestCriteria[1], \
right=right, left=left)
else:
return node(results=label_uniq_cnt(data)) # 返回当前的类别标签作为最终的类别标签
def predict(sample, tree):
'''对每一个样本sample进行预测
input: sample(list):需要预测的样本
tree(类):构建好的分类树
output: tree.results:所属的类别
'''
# 1、只是树根
if tree.results != None:
return tree.results
else:
# 2、有左右子树
val_sample = sample[tree.fea]
branch = None
if val_sample >= tree.value:
branch = tree.right
else:
branch = tree.left
return predict(sample, branch)
| [
2,
19617,
25,
48504,
12,
23,
198,
7061,
6,
198,
10430,
25,
5304,
940,
1270,
198,
31,
9800,
25,
1976,
3099,
8590,
5303,
88,
506,
198,
7061,
6,
198,
6738,
10688,
1330,
7182,
198,
198,
4871,
10139,
25,
198,
220,
220,
220,
705,
7061,
... | 1.369912 | 2,506 |
# Calculator using Python Tkinter
from tkinter import Tk, Entry, Button, StringVar
base = Tk()
calculator__pad = Calculator(base)
base.mainloop()
| [
2,
43597,
1262,
11361,
309,
74,
3849,
198,
6738,
256,
74,
3849,
1330,
309,
74,
11,
21617,
11,
20969,
11,
10903,
19852,
628,
220,
220,
220,
220,
220,
220,
220,
220,
198,
198,
8692,
796,
309,
74,
3419,
198,
198,
9948,
3129,
1352,
83... | 2.77193 | 57 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import textwrap
import unittest
from io import StringIO
from calmjs.parse import asttypes
from calmjs.parse.parsers.es5 import Parser
from calmjs.parse.parsers.es5 import parse
from calmjs.parse.parsers.es5 import read
from calmjs.parse.unparsers.es5 import pretty_print
from calmjs.parse.walkers import walk
from calmjs.parse.tests.parser import (
ParserCaseMixin,
build_node_repr_test_cases,
build_asi_test_cases,
build_syntax_error_test_cases,
build_regex_syntax_error_test_cases,
build_comments_test_cases,
)
ParsedNodeTypeTestCase = build_node_repr_test_cases(
'ParsedNodeTypeTestCase', parse, 'ES5Program')
# ASI - Automatic Semicolon Insertion
ParserToECMAASITestCase = build_asi_test_cases(
'ParserToECMAASITestCase', parse, pretty_print)
ECMASyntaxErrorsTestCase = build_syntax_error_test_cases(
'ECMASyntaxErrorsTestCase', parse)
ECMARegexSyntaxErrorsTestCase = build_regex_syntax_error_test_cases(
'ECMARegexSyntaxErrorsTestCase', parse)
ParsedNodeTypesWithCommentsTestCase = build_comments_test_cases(
'ParsedNodeTypeWithCommentsTestCase', parse, 'ES5Program')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
2420,
37150,
198,
11748,
555,
715,
395,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
6738,
... | 2.706818 | 440 |
from tkinter import *
root = Tk()
root.geometry("300x400")
btn_fill = Button(root, text="Button fill")
btn_fill.pack(fill=X)
btn_expand = Button(root, text="Button expand ")
btn_expand.pack(expand=YES)
btn_side = Button(root, text="Button side")
btn_side.pack(side=LEFT)
root.mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
198,
15763,
796,
309,
74,
3419,
198,
15763,
13,
469,
15748,
7203,
6200,
87,
7029,
4943,
198,
198,
46118,
62,
20797,
796,
20969,
7,
15763,
11,
2420,
2625,
21864,
6070,
4943,
198,
46118,
62,
20797,
13,
... | 2.589286 | 112 |
from sklearn import datasets
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
import numpy as np
from mlfromscratch.unsupervised_learning import PCA
if __name__ == "__main__":
main() | [
6738,
1341,
35720,
1330,
40522,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
11215,
355,
12067,
87,
198,
11748,
2603,
29487,
8019,
13,
4033,
669,
355,
7577,
198,
11748,
299,
32152,
355... | 3.078947 | 76 |
"""
hubspot workflows api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import get_log
WORKFLOWS_API_VERSION = "3"
class WorkflowsClient(BaseClient):
"""
The hubspot3 Workflows client uses the _make_request method to call the
API for data. It returns a python object translated from the json returned
"""
def __init__(self, *args, **kwargs):
"""initialize a workflows client"""
super(WorkflowsClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.workflows")
def get_all_workflow_ids(self, **options):
"""
Get all workflow IDs
:see: https://developers.hubspot.com/docs/methods/workflows/v3/get_workflows
"""
return self._call("workflows", **options)
def get_workflow_by_id(self, workflow_id: int = None, **options):
"""
Get workflow specified by ID
:see: https://developers.hubspot.com/docs/methods/workflows/v3/get_workflow
"""
if workflow_id is not None:
return self._call(f"workflows/{workflow_id}")
return None
| [
37811,
198,
40140,
20485,
670,
44041,
40391,
198,
37811,
198,
6738,
12575,
20485,
18,
13,
8692,
1330,
7308,
11792,
198,
6738,
12575,
20485,
18,
13,
26791,
1330,
651,
62,
6404,
198,
198,
33249,
3697,
22845,
62,
17614,
62,
43717,
796,
366... | 2.509091 | 440 |
import os
import io
import sys
import time
import string
import random
import pstats
import unittest
import cProfile
import itertools
import statistics
from unittest.mock import patch, MagicMock
import bucky3.statsd as statsd
if __name__ == '__main__':
unittest.main()
| [
198,
198,
11748,
28686,
198,
11748,
33245,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
4731,
198,
11748,
4738,
198,
11748,
279,
34242,
198,
11748,
555,
715,
395,
198,
11748,
269,
37046,
198,
11748,
340,
861,
10141,
198,
11748,
7869,
... | 3.097826 | 92 |
from collections import OrderedDict
if __name__ == '__main__':
main()
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.851852 | 27 |
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
import sys
from logilab.common.testlib import TestCase, unittest_main, require_version
from astroid.node_classes import Assign
from astroid.manager import AstroidManager
from astroid.builder import AstroidBuilder
if __name__ == '__main__':
unittest_main()
| [
2,
6634,
5816,
12,
6390,
41605,
4146,
6242,
311,
13,
32,
13,
357,
40313,
11,
8782,
19240,
828,
477,
2489,
10395,
13,
198,
2,
2800,
2638,
1378,
2503,
13,
6404,
346,
397,
13,
8310,
14,
1377,
6920,
1462,
25,
32057,
31,
6404,
346,
397... | 3.470968 | 310 |
from weighted_random import random_choice
import requests
import random
import uuid
import json
import logging
from ipaddress import IPv4Address, AddressValueError
from mixpanel import Mixpanel
from constants import *
from typing import List, ClassVar, Any, Optional
import sys
import threading
from random_user import generate_random_user_properties
# Logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def init_mixpannel_clients(mxp_tokens: List[str]) -> List[Mixpanel]:
"""
Return a list of mixpannel clients.
"""
projects: List[Mixpanel] = []
logger.info('Found %s Mixpannel tokens.', len(mxp_tokens))
for project_token in mxp_tokens:
mp = Mixpanel(project_token)
projects.append(mp)
logger.info('%s Mixpannel projects ready to go.', len(projects))
return projects
MXP_PROJECTS = init_mixpannel_clients(mxp_tokens=MIXPANNEL_TOKENS)
def generate_random_ip() ->str:
"""
Generate random IP address. Copied from
https://codereview.stackexchange.com/questions/200337/random-ip-address-generator
with some changes to generate valid looking IP addresses.
"""
while (True):
trials: int = 0
try:
trials += 1
# instances an IPv4Address object from those bits
# generates an integer with 32 random bits
bits = random.getrandbits(32)
addr = IPv4Address(bits)
except AddressValueError:
continue
if not addr.is_private or not addr.is_reserved:
break
ip_address = str(addr)
logger.info('Generated %s IP address after %s attempt', ip_address, trials)
return ip_address
class User(BaseShopper):
"""
A registered customer.
"""
@classmethod
users_pool: List[User] = []
class Visit(object):
"""
Simple customer of the website. This might be a registered user or a random unregistered user.
"""
user_journy: List[str] = []
user_cart: List[str] = []
def choose_requester(self) -> BaseShopper:
"""
Return a Shopper object
"""
self.is_registered = random_bool()
requester: BaseShopper
if self.is_registered and users_pool:
requester = random.choice(users_pool) # type: ignore
else:
requester = UnregisteredShopper()
return requester
def _visit_main_page(self):
"""
In main page, the user might visit an item page or drop.
"""
self.requester.visit('main page')
self._visit_item_page()
def _visit_item_page(self):
"""
In an item page, users can:
1. Add the item into the cart.
2. Return to main page.
3. Drop.
"""
requester_progressed = random_choice([(True, 70), (False, 30)])
if requester_progressed:
product = random_choice(SHOP_PRODUCTS)
self.requester.visit(
'Visit item page',
extra={
'item name': product
}
)
self._add_item_to_cart(product)
else:
requester_progressed = random_bool()
if requester_progressed:
# Let us assume that they need to go to home page.
self._visit_main_page()
if __name__ == '__main__':
start_script()
| [
6738,
26356,
62,
25120,
1330,
4738,
62,
25541,
198,
11748,
7007,
198,
11748,
4738,
198,
11748,
334,
27112,
198,
11748,
33918,
198,
11748,
18931,
198,
6738,
20966,
21975,
1330,
25961,
19,
20231,
11,
17917,
11395,
12331,
198,
6738,
5022,
35... | 2.349738 | 1,524 |
#!/usr/bin/env python
''' ODC-GEE Setup '''
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as fh:
LONG_DESCRIPTION = fh.read()
setup(name='odc-gee',
version='2.25',
author='Andrew Lubawy',
author_email='andrew.m.lubawy@ama-inc.com',
description='Google Earth Engine indexing tools for Open Data Cube',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
license='Apache-2.0',
url='https://github.com/ceos-seo/odc-gee',
project_urls={
'Bug Tracker': 'https://github.com/ceos-seo/odc-gee/issues'
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: GIS',
],
install_requires=[
"click-plugins>=1.1.1",
"click>=7.1.2",
"datacube>=1.8.3",
"earthengine-api>=0.1.24",
"numpy>=1.18.4",
"rasterio>=1.1.8",
],
packages=find_packages(exclude=['tests*']),
python_requires=">=3.6",
scripts=['scripts/index_gee', 'scripts/new_product'],)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
440,
9697,
12,
38,
6500,
31122,
705,
7061,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
9132,
3256,
705,
... | 2.17052 | 692 |
import os
import glob
import logging
import random
import geckodriver_autoinstaller
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from .deepl import Translator
from .srt_parser import wrap_line, save_srt
from .utils import get_proxies
# Check if the current version of geckodriver exists
geckodriver_autoinstaller.install()
INPUT_LANG = {
"auto": "Any language (detect)",
"bg": "Bulgarian",
"zh": "Chinese",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"de": "German",
"el": "Greek",
"hu": "Hungarian",
"it": "Italian",
"ja": "Japanese",
"lv": "Latvian",
"lt": "Lithuanian",
"pl": "Polish",
"pt": "Portuguese",
"ro": "Romanian",
"ru": "Russian",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"sv": "Swedish",
}
OUTPUT_LANG = {
"bg": "Bulgarian",
"zh": "Chinese (simplified)",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"de": "German",
"el": "Greek",
"hu": "Hungarian",
"it": "Italian",
"ja": "Japanese",
"lv": "Latvian",
"lt": "Lithuanian",
"pl": "Polish",
"pt": "Portuguese",
"br": "Portuguese (Brazilian)",
"ro": "Romanian",
"ru": "Russian",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"sv": "Swedish",
}
| [
11748,
28686,
198,
11748,
15095,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
4903,
694,
375,
38291,
62,
23736,
17350,
263,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,... | 2.173851 | 696 |
'''
This is the worker module for the votes task handler.
'''
from google.appengine.ext import ndb
import json
import logging
import model.poll as Poll
import model.vote as Vote
import webapp2
USER_VOTES = {}
#Called when there is a new vote
#Add a New Vote
app = webapp2.WSGIApplication([
('/worker/process_vote', VoteHandler)
],debug = True)
| [
7061,
6,
198,
1212,
318,
262,
8383,
8265,
329,
262,
5690,
4876,
21360,
13,
198,
7061,
6,
198,
198,
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
299,
9945,
198,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
2746,
13,
30393,
355,
... | 3.025641 | 117 |
"""
A language to describe data, its interpretations and its transformations.
""" | [
37811,
198,
32,
3303,
284,
6901,
1366,
11,
663,
26146,
290,
663,
38226,
13,
198,
37811
] | 5.0625 | 16 |
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', config_dir="configs")
@pytest.fixture(scope="module", autouse=True)
@pytest.fixture(autouse=True)
| [
11748,
12972,
9288,
198,
6738,
49385,
13,
565,
5819,
1330,
6914,
18102,
2601,
5819,
198,
198,
565,
5819,
796,
6914,
18102,
2601,
5819,
7,
834,
7753,
834,
8,
198,
39098,
796,
13946,
13,
2860,
62,
39098,
10786,
39098,
3256,
4566,
62,
15... | 3.08642 | 81 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
Health Calculation Process
This process calculates the health of a code base by using the code smells for
the project. It reads the csv file from code analyzer and the code_smell.toml
"""
import csv
import toml
import os
def health_function(_type, _smell , _cm, rows, switch_cs_data):
"""
For each transaction of code analyzer calculates the corresponding health
based on the code smell.
Args:
_type (str): type of code is either class or method
_smell (str): description of the code smell to evaluate
_cm (str) : code measure value
rows (int) : number of rows calculated for a specific code smell
switch_cs_data (int, float) : code smell data dictionary
Returns:
h (float): health of the transaction code analized
"""
if 'Ratio' in _smell: #For ratio measures , multiply by 100 and use float type
_cm = float(_cm) * 100
elif _cm == '-':
return 0
else:
_cm = int(_cm)
rw = 100 #Maximum reward for good code health
#health, Small Code Smell, Large Code Smell
h = scs = lcs = 0.00
#Weigth Small Code Smell, Weight Large Code Smell
wt_scs = wt_lcs = 1
#Check the type of code (Class or Method) then find the code smell ranges
if _type == "class":
if _smell == "Lines of Code":
scs_list = switch_cs_data.get(_type).get('SmallClass')
scs = scs_list[0]
wt_scs = scs_list[1]
lcs_list = switch_cs_data.get(_type).get('LargeClass')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Comment-to-Code Ratio":
scs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioLower')
scs = scs_list[0] * 100
wt_scs = scs_list[1] * 100
lcs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioUpper')
lcs = lcs_list[0] * 100
wt_lcs = lcs_list [1] * 100
elif _smell == "Number of Outgoing Invocations": #GOD class for Classes
lcs_list = switch_cs_data.get(_type).get('GodClass')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Number of Directly-Used Elements": #InappropiateIntimacy for Classes
lcs_list = switch_cs_data.get(_type).get('InappropriateIntimacy')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Number of Parameters":
return 0
else:
return 0
elif _type == "method":
if _smell == "Lines of Code":
scs_list = switch_cs_data.get(_type).get('SmallMethod')
scs = scs_list[0]
wt_scs = scs_list[1]
lcs_list = switch_cs_data.get(_type).get('LargeMethod')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Comment-to-Code Ratio":
scs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioLower')
scs = scs_list[0] * 100
wt_scs = scs_list[1] * 100
lcs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioUpper')
lcs = lcs_list[0] * 100
wt_lcs = lcs_list [1] * 100
elif _smell == "Number of Outgoing Invocations": #NO GOD class for Methods
return 0
elif _smell == "Number of Directly-Used Elements": #NO InappropiateIntimacy for Methods
return 0
elif _smell == "Number of Parameters":
lcs_list = switch_cs_data.get(_type).get('LargeParameterList')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
else:
return 0
#Fixes zero division if both code smells are zero
scs = scs * wt_scs # Multiply Code Smell by Weight
lcs = lcs * wt_lcs # Multiply Code Smell by Weight
if scs == 0 and lcs ==0:
return 0
rows[_smell] = rows[_smell] + 1 # Row counter per type of smell
if _cm < scs: #Condition for penalization when code metric is under small Code Smell (cm < scm)
h = rw - ((_cm - scs)**2) / (scs**2) * rw
return h
elif _cm <= lcs:
h = rw
return h
#Fixes zero division if large code smells is zero
elif _cm > lcs and lcs != 0: #Condition for penalization when code metric is over large Code Smell (cm > lcs)
h = rw - ((_cm - lcs)**2) / (lcs**2) * rw
if h < 0:
h = 0
return h
else:
return 100
def calculate_health(suse_config, csv_path):
"""
Opens the csv file from code analyzer that contains all the transactions of the
code base. A for loop traverses each transaction to call the health_function,
sums the results and gets the average heal for the code base.
Args:
suse_config (int, float) : code smell data dictionary
csv_path (str): Type of code is either class or method
Returns:
total_health (float): Total health of the code base
"""
if os.path.exists(csv_path):
with open(csv_path, newline='') as csvfile:
# Using csv Reader
reader = csv.reader(csvfile)
# CSV Header list:
# 0: Type of Smell, 1: Name, 2: Lines of Code, 3: Comment-to-Code Ratio
# 4: Number of Directly-Used Elements, 5: Number of Outgoing Invocations
# 6: Name of Owner Class, 7: Number of Parameters
head = next(reader)
# h is a DD with the necessary Header to count returned by health_function
h = {head[2]: 0, head[3]: 0.00, head[5]: 0, head[4]: 0,head[7]: 0}
rows = {head[2]: 0, head[3]: 0, head[5]: 0, head[4]: 0, head[7]: 0}
avg = {head[2]: 0.00, head[3]: 0.00, head[5]: 0, head[4]: 0, head[7]: 0.00}
lines = 0
for x in reader:
h[head[2]] = h[head[2]] + health_function(x[0].lower(), head[2], x[2], rows, suse_config)
h[head[3]] = h[head[3]] + health_function(x[0].lower(), head[3], x[3], rows, suse_config)
h[head[4]] = h[head[4]] + health_function(x[0].lower(), head[4], x[4], rows, suse_config)
h[head[5]] = h[head[5]] + health_function(x[0].lower(), head[5], x[5], rows, suse_config)
h[head[7]] = h[head[7]] + health_function(x[0].lower(), head[7], x[7], rows, suse_config)
lines = lines +1
if lines == 0:
total_health = -2
return (total_health) # Return -2 when file is empty
#Calculate average of each header
#Validates each measure has rows > 0
div = 0
if rows[head[2]] > 0:
avg[head[2]] = h[head[2]]/rows[head[2]]
div = div +1
if rows[head[3]]>0:
avg[head[3]] = h[head[3]]/rows[head[3]]
div = div +1
if rows[head[5]]>0:
avg[head[5]] = h[head[5]]/rows[head[5]]
div = div +1
if rows[head[4]]>0:
avg[head[4]] = h[head[4]]/rows[head[4]]
div = div +1
if rows[head[7]]>0:
avg[head[7]] = h[head[7]]/rows[head[7]]
div = div +1
#Validates number of code smells calculated > 0
if div > 0:
total_health = (avg[head[2]] + avg[head[3]] + avg[head[5]] + avg[head[4]] + avg[head[7]]) / div
else:
total_health = 0
return total_health
else:
print("File not found")
total_health = -1
return (total_health) # Return -1 when file is not found
| [
2,
15069,
1584,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330... | 2.263905 | 3,524 |
from persistence.models.models_base import Base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
| [
6738,
30802,
13,
27530,
13,
27530,
62,
8692,
1330,
7308,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
2776,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,
11,
41146,
11,
8708,
9218,
628,
198
] | 4.27027 | 37 |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'qr_to_text.ui'
##
## Created by: Qt User Interface Compiler version 5.15.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
# setupUi
# retranslateUi
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
29113,
29113,
14468,
201,
198,
2235,
5178,
7560,
422,
3555,
12454,
2393,
705,
80,
81,
62,
1462,
62,
5239,
13,
9019,
6,
201,
198,
2235,
201,
198,
2235,
... | 3.030769 | 260 |
""" test maf_transform """
import pytest
import transform.mc3.mc3_maf_transform as mc3_maf_transform
from bmeg.ioutils import reader
import os
import contextlib
import shutil
import json
@pytest.fixture
def maf_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test.maf')
@pytest.fixture
def gz_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_gz-test.maf.gz')
@pytest.fixture
def no_center_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test-NO_CENTER.maf')
@pytest.fixture
def NO_BARCODE_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test-NO_BARCODE.maf')
@pytest.fixture
def id_lookup_path(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/gdc/id_lookup.tsv')
@pytest.fixture
def project_lookup_path(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/gdc/project_lookup.tsv')
def test_simple(helpers, maf_file, emitter_directory, id_lookup_path, project_lookup_path):
""" simple test """
validate(helpers, maf_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_gz(helpers, gz_file, emitter_directory, id_lookup_path, project_lookup_path):
""" simple test """
validate(helpers, gz_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_no_center(helpers, no_center_file, emitter_directory, id_lookup_path, project_lookup_path):
""" 'Center column' renamed """
validate(helpers, no_center_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_NO_BARCODE(helpers, NO_BARCODE_file, emitter_directory, id_lookup_path, project_lookup_path):
""" no barcode """
validate(helpers, NO_BARCODE_file, emitter_directory, id_lookup_path, project_lookup_path)
| [
198,
198,
37811,
1332,
285,
1878,
62,
35636,
37227,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
6121,
13,
23209,
18,
13,
23209,
18,
62,
76,
1878,
62,
35636,
355,
36650,
18,
62,
76,
1878,
62,
35636,
198,
6738,
275,
28917,
13,
72,
... | 2.664151 | 795 |
import os
import torch
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from dataloaders.utils import decode_seg_map_sequence, decode_confidence_map_sequence
# def visualize_image(self, writer, dataset, image, target, output,
# global_step, flag='imviz'):
# grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
# writer.add_image(flag+'/Image', grid_image, global_step)
# grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
# dataset=dataset), 3, normalize=False, range=(0, 255))
# writer.add_image(flag+'/Predicted label', grid_image, global_step)
# grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
# dataset=dataset), 3, normalize=False, range=(0, 255))
# writer.add_image(flag+'/Groundtruth label', grid_image, global_step)
| [
11748,
28686,
198,
11748,
28034,
198,
6738,
28034,
10178,
13,
26791,
1330,
787,
62,
25928,
198,
6738,
11192,
273,
3526,
55,
1330,
21293,
34379,
198,
6738,
4818,
282,
1170,
364,
13,
26791,
1330,
36899,
62,
325,
70,
62,
8899,
62,
43167,
... | 2.215031 | 479 |
from pathlib import Path
KIDCONNECT_LOGIN="your_email@kidconnect.pl"
KIDCONNECT_PASSWORD="YourPassword12345"
IFTTT_KEY="Get it from https://ifttt.com/services/maker_webhooks/settings"
HISTORY_FILE=Path(__file__).parent.joinpath('history.json') # Where to store the news history (so you don't get double-notified)"
CONVERSATIONS={} # {id: title} map for tracked conversations. Get the ID from the KidConnect URL.
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
42,
2389,
10943,
48842,
62,
25294,
1268,
2625,
14108,
62,
12888,
31,
38439,
8443,
13,
489,
1,
198,
42,
2389,
10943,
48842,
62,
47924,
54,
12532,
2625,
7120,
35215,
10163,
2231,
1,
198,
5064,
15... | 3.074074 | 135 |
from collections import OrderedDict
from datetime import date
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.db.models import Q, Sum
from django.http import HttpResponse
from openpyxl import Workbook
from openpyxl.styles import Font
from openpyxl.utils import get_column_letter
from ..models import (
Availability, CorpContact, Corporation, Course, Section, Student, Teacher,
Training,
)
from ..utils import school_year_start
openxml_contenttype = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
EXPORT_FIELDS = [
# Student fields
('ID externe', 'student__ext_id'),
('Prénom', 'student__first_name'), ('Nom', 'student__last_name'),
('Titre', 'student__gender'),
('Classe', 'student__klass__name'),
('Filière', 'student__klass__section__name'),
('Rue élève', 'student__street'),
('NPA_élève', 'student__pcode'),
('Localité élève', 'student__city'),
('Tél élève', 'student__tel'),
('Email élève', 'student__email'),
('Date de naissance', 'student__birth_date'),
('No AVS', 'student__avs'),
# Stage fields
('Nom de la pratique professionnelle', 'availability__period__title'),
('Début', 'availability__period__start_date'), ('Fin', 'availability__period__end_date'),
('Remarques pratique professionnelle', 'comment'),
('Prénom référent', 'referent__first_name'), ('Nom référent', 'referent__last_name'),
('Courriel référent', 'referent__email'),
('Institution', 'availability__corporation__name'),
('ID externe Inst', 'availability__corporation__ext_id'),
('Rue Inst', 'availability__corporation__street'),
('NPA Inst', 'availability__corporation__pcode'),
('Ville Inst', 'availability__corporation__city'),
('Tél Inst', 'availability__corporation__tel'),
('Domaine', 'availability__domain__name'),
('Remarques Inst', 'availability__comment'),
('Civilité contact', 'availability__contact__civility'),
('Prénom contact', 'availability__contact__first_name'),
('Nom contact', 'availability__contact__last_name'),
('ID externe contact', 'availability__contact__ext_id'),
('Tél contact', 'availability__contact__tel'),
('Courriel contact', 'availability__contact__email'),
('Courriel contact - copie', None),
]
NON_ATTR_EXPORT_FIELDS = [
('Filière', 'period__section__name'),
('Nom de la pratique professionnelle', 'period__title'),
('Début', 'period__start_date'), ('Fin', 'period__end_date'),
('Institution', 'corporation__name'),
('Rue Inst', 'corporation__street'),
('NPA Inst', 'corporation__pcode'),
('Ville Inst', 'corporation__city'),
('Tél Inst', 'corporation__tel'),
('Domaine', 'domain__name'),
('Remarques Inst', 'comment'),
('Civilité contact', 'contact__civility'),
('Prénom contact', 'contact__first_name'),
('Nom contact', 'contact__last_name'),
('Tél contact', 'contact__tel'),
('Courriel contact', 'contact__email'),
('Courriel contact - copie', None),
]
GENERAL_EXPORT_FIELDS = [
('Num_Ele', 'ext_id'),
('Nom_Ele', 'last_name'),
('Prenom_Ele', 'first_name'),
('Genre_Ele', 'gender'),
('Rue_Ele', 'street'),
('NPA_Ele', 'pcode'),
('Ville_Ele', 'city'),
('DateNaissance_Ele', 'birth_date'),
('NOAVS_Ele', 'avs'),
('Canton_Ele', 'district'),
('Email_Ele', 'email'),
('Mobile_Ele', 'mobile'),
('Compte_RPN', 'login_rpn'),
('DispenseCG_Ele', 'dispense_ecg'),
('DispenseEPS_Ele', 'dispense_eps'),
('SoutienDYS_Ele', 'soutien_dys'),
('Classe_Ele', 'klass__name'),
('Filiere_Ele', 'klass__section__name'),
('MaitreDeClasseNom_Ele', 'klass__teacher__last_name'),
('MaitreDeClassePrenom_Ele', 'klass__teacher__first_name'),
('OptionASE_Ele', 'option_ase__name'),
('Num_Emp', 'corporation__ext_id'),
('Nom_Emp', 'corporation__name'),
('Rue_Emp', 'corporation__street'),
('NPA_Emp', 'corporation__pcode'),
('Ville_Emp', 'corporation__city'),
('Canton_Emp', 'corporation__district'),
('Secteur_Emp', 'corporation__sector'),
('Type_EMP', 'corporation__typ'),
('Tel_Emp', 'corporation__tel'),
('Num_Form', 'instructor__ext_id'),
('Titre_Form', 'instructor__civility'),
('Prenom_Form', 'instructor__first_name'),
('Nom_Form', 'instructor__last_name'),
('Tel_Form', 'instructor__tel'),
('Email_Form', 'instructor__email'),
('Num_Form2', 'instructor2__ext_id'),
('Titre_Form2', 'instructor2__civility'),
('Prenom_Form2', 'instructor2__first_name'),
('Nom_Form2', 'instructor2__last_name'),
('Tel_Form2', 'instructor2__tel'),
('Email_Form2', 'instructor2__email'),
('EmailCopie_Form', None),
]
def general_export(request):
"""
Export all current students data
"""
export_fields = OrderedDict(GENERAL_EXPORT_FIELDS)
export = OpenXMLExport('Exportation')
export.write_line(export_fields.keys(), bold=True) # Headers
# Data
query_keys = [f for f in export_fields.values() if f is not None]
query = Student.objects.filter(archived=False).order_by('klass__name', 'last_name', 'first_name')
for line in query.values(*query_keys):
values = []
for field in query_keys:
if field == 'gender':
values.append(('Madame', 'Monsieur')[line[field] == 'M'])
elif field in ('dispense_ecg', 'dispense_eps', 'soutien_dys'):
values.append('Oui' if line[field] is True else '')
else:
values.append(line[field])
export.write_line(values)
return export.get_http_response('general_export')
ORTRA_EXPORT_FIELDS = [
('Num_Ele', 'ext_id'),
('Nom_Ele', 'last_name'),
('Prenom_Ele', 'first_name'),
('Genre_Ele', 'gender'),
('Rue_Ele', 'street'),
('NPA_Ele', 'pcode'),
('Ville_Ele', 'city'),
('DateNaissance_Ele', 'birth_date'),
('Email_Ele', 'email'),
('Mobile_Ele', 'mobile'),
('Classe_Ele', 'klass__name'),
('Filiere_Ele', 'klass__section__name'),
('MaitreDeClasseNom_Ele', 'klass__teacher__last_name'),
('MaitreDeClassePrenom_Ele', 'klass__teacher__first_name'),
('OptionASE_Ele', 'option_ase__name'),
('Num_Emp', 'corporation__ext_id'),
('Nom_Emp', 'corporation__name'),
('Rue_Emp', 'corporation__street'),
('NPA_Emp', 'corporation__pcode'),
('Ville_Emp', 'corporation__city'),
('Tel_Emp', 'corporation__tel'),
('Titre_Form', 'instructor__civility'),
('Prenom_Form', 'instructor__first_name'),
('Nom_Form', 'instructor__last_name'),
('Tel_Form', 'instructor__tel'),
('Email_Form', 'instructor__email'),
]
def ortra_export(request):
"""
Export students data from sections ASAFE, ASEFE and ASSCFE
"""
export_fields = OrderedDict(ORTRA_EXPORT_FIELDS)
export = OpenXMLExport('Exportation')
export.write_line(export_fields.keys(), bold=True) # Headers
# Data
query_keys = [f for f in export_fields.values() if f is not None]
query = Student.objects.filter(Q(klass__name__contains='ASAFE') |
Q(klass__name__contains='ASEFE') |
Q(klass__name__contains='ASSCFE'),
archived=False).order_by('klass__name',
'last_name',
'first_name')
for line in query.values(*query_keys):
values = []
for field in query_keys:
if field == 'gender':
values.append(('Madame', 'Monsieur')[line[field] == 'M'])
else:
values.append(line[field])
export.write_line(values)
return export.get_http_response('ortra_export')
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
4818,
8079,
1330,
3128,
198,
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8979,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
... | 2.36413 | 3,312 |
import torch
import torch.nn as nn
from torch_scatter import scatter_add, scatter_max, scatter_mean, scatter_min, scatter_mul
from utils import decompose_graph
LATENT_SIZE = 32
class GlobalBlock(nn.Module):
"""Global block, f_g.
A block that updates the global features of each graph based on
the previous global features, the aggregated features of the
edges of the graph, and the aggregated features of the nodes of the graph.
"""
class EdgeBlock(nn.Module):
"""Edge block, f_e.
Update the features of each edge based on the previous edge features,
the features of the adjacent nodes, and the global features.
"""
class NodeBlock(nn.Module):
"""Node block, f_v.
Update the features of each node based on the previous node features,
the aggregated features of the received edges,
the aggregated features of the sent edges, and the global features.
"""
def __init__(self,
in_features,
out_features,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=True,
sent_edges_reducer=scatter_add,
received_edges_reducer=scatter_add,
custom_func=None):
"""Initialization of the NodeBlock module.
Args:
in_features: Input dimension.
If node, 2*edge(sent, received), and global are used, d_v+(2*d_e)+d_g.
h'_i = f_v(h_i, AGG(h_ij), AGG(h_ji), u)
out_features: Output dimension.
h'_i will have the dimension.
use_nodes: Whether to condition on node attributes.
use_sent_edges: Whether to condition on sent edges attributes.
use_received_edges: Whether to condition on received edges attributes.
use_globals: Whether to condition on the global attributes.
reducer: Aggregator. scatter_* [add, mul, max, min, mean]
"""
super(NodeBlock, self).__init__()
if not (use_nodes or use_sent_edges or use_received_edges or use_globals):
raise ValueError("At least one of use_received_edges, use_sent_edges, "
"use_nodes or use_globals must be True.")
self._use_nodes = use_nodes
self._use_sent_edges = use_sent_edges
self._use_received_edges = use_received_edges
self._use_globals = use_globals
self._sent_edges_reducer = sent_edges_reducer
self._received_edges_reducer = received_edges_reducer
# f_v() is a function: R^in_features -> R^out_features
if custom_func:
# Customized function can be used for self.net instead of deafult function.
# It is highly recommended to use nn.Sequential() type.
self.net = custom_func
else:
self.net = nn.Sequential(nn.Linear(in_features, LATENT_SIZE),
nn.ReLU(),
nn.Linear(LATENT_SIZE, out_features),
)
class NodeBlockInd(NodeBlock):
"""Node-level feature transformation.
Each node is considered independently. (No edge is considered.)
Args:
in_features: input dimension of node representations.
out_features: output dimension of node representations.
(node embedding size)
(N^v, d_v) -> (N^v, out_features)
NodeBlockInd(graph) -> updated graph
"""
class EdgeBlockInd(EdgeBlock):
"""Edge-level feature transformation.
Each edge is considered independently. (No node is considered.)
Args:
in_features: input dimension of edge representations.
out_features: output dimension of edge representations.
(edge embedding size)
(N^e, d_e) -> (N^e, out_features)
EdgeBlockInd(graph) -> updated graph
"""
class GlobalBlockInd(GlobalBlock):
"""Global-level feature transformation.
No edge/node is considered.
Args:
in_features: input dimension of global representations.
out_features: output dimension of global representations.
(global embedding size)
(1, d_g) -> (1, out_features)
GlobalBlockInd(graph) -> updated graph
"""
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
28034,
62,
1416,
1436,
1330,
41058,
62,
2860,
11,
41058,
62,
9806,
11,
41058,
62,
32604,
11,
41058,
62,
1084,
11,
41058,
62,
76,
377,
198,
198,
6738,
3384,
44... | 2.275168 | 1,937 |
#!/usr/bin/python
# coding: utf-8
import logging
import os
from cattledb.core.models import MetricDefinition, EventDefinition, MetricType, EventSeriesType
METRICS = [
MetricDefinition("test", "test", MetricType.FLOATSERIES, True),
# Raw Metrics
MetricDefinition("rawph", "rph", MetricType.FLOATSERIES, False),
MetricDefinition("adcph", "aph", MetricType.FLOATSERIES, False),
MetricDefinition("rawtemp", "rtp", MetricType.FLOATSERIES, False),
MetricDefinition("adctemp", "atp", MetricType.FLOATSERIES, False),
MetricDefinition("rawact", "rac", MetricType.FLOATSERIES, False),
MetricDefinition("rawhum", "rhu", MetricType.FLOATSERIES, False),
# Stage 1
MetricDefinition("ph", "ph", MetricType.FLOATSERIES, True),
MetricDefinition("temp", "tmp", MetricType.FLOATSERIES, True),
MetricDefinition("act", "act", MetricType.FLOATSERIES, True),
MetricDefinition("hum", "hum", MetricType.FLOATSERIES, True),
MetricDefinition("act_index", "aci", MetricType.FLOATSERIES, True),
MetricDefinition("rawphuncorrected", "uph", MetricType.FLOATSERIES, True)
]
EVENTS = [
EventDefinition("test_daily", EventSeriesType.DAILY),
EventDefinition("test_monthly", EventSeriesType.MONTHLY),
EventDefinition("test_monthly_*", EventSeriesType.MONTHLY)
]
TESTING = False
DEBUG = False
ENGINE = "bigtable"
ENGINE_OPTIONS = {
"credentials": None,
"project_id": "proj1",
"instance_id": "inst1"
}
READ_ONLY = False
ADMIN = True
POOL_SIZE = 10
TABLE_PREFIX = "mycdb"
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"stream": "ext://sys.stdout"
}
},
"root": {
"level": "INFO",
"handlers": ["console"]
}
}
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
269,
43535,
65,
13,
7295,
13,
27530,
1330,
3395,
1173,
36621,
11,
8558,
36621,
11,
3395,
1173,
603... | 2.450593 | 759 |
from .package import run, start, check
| [
6738,
764,
26495,
1330,
1057,
11,
923,
11,
2198,
198
] | 3.9 | 10 |
import csv
import logging
import os
from enum import Enum
from army_ant.util.text import textrank
from . import Index
logger = logging.getLogger(__name__)
| [
11748,
269,
21370,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
6738,
5428,
62,
415,
13,
22602,
13,
5239,
1330,
2420,
43027,
198,
198,
6738,
764,
1330,
12901,
198,
198,
6404,
1362,
796,
18931,
13,
... | 3.18 | 50 |
from typing import List
from webdnn.backend.code_generator.allocator import MemoryLayout
from webdnn.backend.code_generator.injectors.buffer_injector import BufferInjector
from webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector
from webdnn.backend.webgpu.generator import WebGPUDescriptorGenerator
from webdnn.backend.webgpu.kernel import GPUSize, Kernel
from webdnn.graph.axis import Axis
from webdnn.graph.operators.im2col import Im2Col
from webdnn.graph.order import OrderNHWC, OrderCNHW
template_CNHW = """
kernel void %%FUNC_NAME%%(device float * %%STATIC_BUFFER%%[[buffer(0)]],
device float * %%DYNAMIC_BUFFER%%[[buffer(1)]],
const device int * %%META_BUFFER%% [[buffer(2)]],
uint index[[thread_position_in_grid]],
uint num_threads[[threads_per_grid]])
{
const device float *im = %%LOAD_BUFFER(im2col_im)%%;
device float *col = %%LOAD_BUFFER(im2col_col)%%;
const int N = %%LOAD_BUFFER(im2col_N)%%;
const int C1 = %%LOAD_BUFFER(im2col_C1)%%;
const int H1 = %%LOAD_BUFFER(im2col_H1)%%;
const int W1 = %%LOAD_BUFFER(im2col_W1)%%;
const int H2 = %%LOAD_BUFFER(im2col_H2)%%;
const int W2 = %%LOAD_BUFFER(im2col_W2)%%;
const int KH = %%LOAD_BUFFER(im2col_KH)%%;
const int KW = %%LOAD_BUFFER(im2col_KW)%%;
const int DH = %%LOAD_BUFFER(im2col_DH)%%;
const int DW = %%LOAD_BUFFER(im2col_DW)%%;
const int SH = %%LOAD_BUFFER(im2col_SH)%%;
const int SW = %%LOAD_BUFFER(im2col_SW)%%;
const int PH = %%LOAD_BUFFER(im2col_PH)%%;
const int PW = %%LOAD_BUFFER(im2col_PW)%%;
for (int gid = index; gid < N*H2*W2*KH*KW*C1; gid += num_threads) {
const int w2 = gid % W2;
const int h2 = gid / W2 % H2;
const int n = gid / W2 / H2 % N;
const int c1 = gid / W2 / H2 / N % C1;
const int kw = gid / W2 / H2 / N / C1 % KW;
const int kh = gid / W2 / H2 / N / C1 / KW;
const int h1 = h2 * SH - PH + kh * DH;
const int w1 = w2 * SW - PW + kw * DW;
col[gid] = (h1 < 0 || h1 >= H1 || w1 < 0 || w1 >= W1) ? 0 : im[((n*H1+h1)*W1+w1)*C1+c1];
}
}
"""
@WebGPUDescriptorGenerator.register_handler(Im2Col)
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
3992,
67,
20471,
13,
1891,
437,
13,
8189,
62,
8612,
1352,
13,
32332,
1352,
1330,
14059,
32517,
198,
6738,
3992,
67,
20471,
13,
1891,
437,
13,
8189,
62,
8612,
1352,
13,
259,
752,
669,
13,
2225... | 2.107807 | 1,076 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import datetime
from django.contrib.auth.models import AbstractUser
from django.urls import reverse
from django.db import models
from six import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
@python_2_unicode_compatible
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
4112,
62,
11748,
198,
11748,
4818,
8079,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
2753... | 3.298246 | 114 |
import nltk
import re
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.model_selection import cross_val_predict
#nltk.download('punkt')
from textblob import TextBlob
from DataLake import Mongo
obj = SentimentAnalysis()
obj.test()
#print(obj.cleanTweet('😂😂😂😬👀🙄👹😍😜😎 Gabriel é lindo'))
| [
11748,
299,
2528,
74,
198,
11748,
302,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
2764,
38469,
7509,
198,
6738,
1341,
35720,
13,
2616,
425,
62,
24406,
274,
1330,
7854,
259,... | 2.794521 | 146 |
from flask import Flask, render_template, request, Markup, jsonify
import json, time
app = Flask(__name__)
@app.route('/')
@app.route('/heatmap')
@app.route('/parallel-coordinates')
@app.route('/getArticlesByAuthor/<author_name>')
if __name__ == '__main__':
app.run(port = 5000)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
2940,
929,
11,
33918,
1958,
198,
198,
11748,
33918,
11,
640,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
628,
198,
31,
1324,
13,
38629,
10786,
14,
11537,
198,
198,
... | 2.621622 | 111 |
import os
import json
import subprocess
import logging
from .ninja import NinjaBackend
class Meson:
"""
Base class that handles data fetching and setting options for Meson.
"""
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
850,
14681,
198,
11748,
18931,
198,
198,
6738,
764,
35073,
6592,
1330,
14152,
7282,
437,
628,
198,
4871,
14937,
261,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7308,
1398,
326,
17105,... | 3.368421 | 57 |
score = None
count = 0
total = 0
average = 0
while score != 'done':
try:
number = input('Enter a number: ')
if number == 'done':
break
number = float(number)
total = total + number
count = count + 1
average = total/count
except:
print('Invalid input')
print(total, count, average)
| [
26675,
796,
6045,
201,
198,
9127,
796,
657,
201,
198,
23350,
796,
657,
201,
198,
23913,
796,
657,
201,
198,
201,
198,
4514,
4776,
14512,
705,
28060,
10354,
201,
198,
220,
220,
220,
1949,
25,
201,
198,
220,
220,
220,
220,
220,
220,
... | 2.154286 | 175 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'AppTemplateConfigList',
'GetAppTemplatesTemplateResult',
'GetAppTemplatesTemplateConfigListResult',
]
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.267857 | 168 |
import FWCore.ParameterSet.Config as cms
dtCalibValidation = cms.EDAnalyzer("DTCalibValidation",
# Write the histos on file
OutputMEsInRootFile = cms.bool(False),
# Lable to retrieve 2D segments from the event
segment2DLabel = cms.untracked.string('dt2DSegments'),
OutputFileName = cms.string('residuals.root'),
# Lable to retrieve 4D segments from the event
segment4DLabel = cms.untracked.string('dt4DSegments'),
debug = cms.untracked.bool(False),
# Lable to retrieve RecHits from the event
recHits1DLabel = cms.untracked.string('dt1DRecHits'),
# Detailed analysis
detailedAnalysis = cms.untracked.bool(False)
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
28664,
9771,
571,
7762,
24765,
796,
269,
907,
13,
1961,
37702,
9107,
7203,
35,
4825,
282,
571,
7762,
24765,
1600,
198,
220,
220,
220,
1303,
19430,
262,
1554,
41... | 2.669355 | 248 |
pets = {
'Felix': {
'kind': 'cat',
'owner': 'Milton',
},
'Nelly': {
'kind': 'dog',
'owner': 'Stilton',
},
'Maurice': {
'kind': 'parrot',
'owner': 'Tyra',
},
}
for pet, pet_details in pets.items():
print(f"\n {pet_details['owner']}'s {pet_details['kind']}'s name is {pet}!") | [
79,
1039,
796,
1391,
628,
197,
6,
42493,
844,
10354,
1391,
198,
197,
197,
6,
11031,
10354,
705,
9246,
3256,
198,
197,
197,
6,
18403,
10354,
705,
44,
9044,
3256,
198,
197,
5512,
628,
197,
6,
45,
6148,
10354,
1391,
198,
197,
197,
6,... | 2.06993 | 143 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from counting_sequences import SequenceCounter, CutadaptMatch
import pypipegraph as ppg
import pandas as pd
import pytest
import collections
__author__ = "MarcoMernberger"
__copyright__ = "MarcoMernberger"
__license__ = "mit"
@pytest.fixture
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
14143,
62,
3107,
3007,
1330,
45835,
31694,
11,
9712,
42552,
23850,
198,
11748,
... | 2.450479 | 313 |
from datetime import date
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly import graph_objs as go
import streamlit as st
import yfinance as yf
START = "2010-01-01"
TODAY = date.today().strftime("%Y-%m-%d")
st.title("Stock Prediction App")
st.header('Parameters')
stocks = ("AAPL", "GOOG", "MSFT")
selected_stock = st.selectbox(
"Select dataset for prediction",
stocks
)
@st.cache
st.header('Stock Data')
data = load_data(selected_stock)
st.subheader("Raw data")
st.write(data.tail())
plot_raw_data()
n_years = st.slider("Years to predict:", 1, 5)
period = n_years * 365
df_train = data[['Date', 'Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.subheader("Forecasted data")
st.write(forecast.tail())
st.write("Forecasted Time Series")
fig1 = plot_plotly(m, forecast, trend=True)
st.plotly_chart(fig1)
| [
6738,
4818,
8079,
1330,
3128,
198,
6738,
277,
65,
22930,
3202,
1330,
13583,
198,
6738,
277,
65,
22930,
3202,
13,
29487,
1330,
7110,
62,
29487,
306,
198,
6738,
7110,
306,
1330,
4823,
62,
672,
8457,
355,
467,
198,
11748,
4269,
18250,
35... | 2.644385 | 374 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.service import ServiceMaker
TwistedSOCKS = ServiceMaker(
"Twisted SOCKS", "twisted.tap.socks", "A SOCKSv4 proxy service.", "socks"
)
| [
2,
15069,
357,
66,
8,
40006,
24936,
46779,
13,
198,
2,
4091,
38559,
24290,
329,
3307,
13,
198,
198,
6738,
19074,
13,
31438,
13,
15271,
1330,
4809,
48890,
198,
198,
5080,
6347,
50,
11290,
50,
796,
4809,
48890,
7,
198,
220,
220,
220,
... | 3.105263 | 76 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 23 17:32:31 2021
@author: ifti
"""
from ipapy import UNICODE_TO_IPA
from ipapy import is_valid_ipa
from ipapy.ipachar import IPAConsonant
from ipapy.ipachar import IPAVowel
from ipapy.ipastring import IPAString
from IPATranscription import IPA
import importlib
from importlib import reload
def IPA_of_token(token):
'''
IPA_of_token() is a linguistic function to find the IPA of Pasto letter
parameter : it take a token which actaully a single pashto word
return : it will return the IPA of given pashto word from the lexicon
'''
# iterate over the each token
#print("token : {}".format(token))
ipa = []
temp =""
for char in token:
#print("char : {} , {} ".format(char ,IPA(char)))
temp = str(IPA(char)).replace("[", "")
temp = temp.replace("]", "")
temp = temp.replace(",", "")
if temp =="ʔ":
print("dump")
f = open("Datasets/not_available_ipa.txt","+w" ,encoding='utf-8')
f.write(char)
f.close()
# print(temp,len(temp))
# if more then IPA then we will use first for the time being
ipa.append(temp)
#print(ipa)
return ipa
def is_valid_syllable(cv):
'''
is_valid_syllable() is helper function of linguistic part
parameter : it will syllables
return : it will return the string to tell you it is valid syllable or not.
'''
if cv in ["V","VC","CV","CVC","CCV","CVCC","CCVC","CCCV","CCCVC"]:
return "Valid syllables"
else:
return "Not Valid syllables"
def make_syllables(IPA_list):
'''
make_syllables() is the function of linguistic part of the program and
it will make the syllable of the given IPA
paramter : it takes the list of ipa of the token ,
return : it will return the syllables of the ipa
'''
#=============================================================================
#reverse_list = reversed(IPA_list)
ipa_str = ""
cv_Form = ""
for char_ipa in range(0,len(IPA_list)):
#print("ipa :",char_ipa)
if IPA_list[char_ipa] =="None":
continue
if IPA_list[char_ipa] in ['əi','ə','u','ɑ','ā','ai','a','i','o','u','e','əi','A','E','I','U','O' ]:
cv_Form+="V"
ipa_str += IPA_list[char_ipa]
else:
#print(char_ipa)
cv_Form+="C"
ipa_str += IPA_list[char_ipa] + " "
print(cv_Form)
print(is_valid_syllable(cv_Form))
return ipa_str
# =============================================================================
if __name__ == "__main__":
print()
print(make_syllables(IPA_of_token("اړنګمن"))) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
4280,
2242,
1596,
25,
2624,
25,
3132,
33448,
198,
198,
31,
9800,
25,
611,
20259,
1... | 2.128755 | 1,398 |
#recursive approach
numTerms = int(input("How many terms of Fibonacci sequence to print? "))
# What are the first few terms of the fib seq?
# 0 1 1 2 3
# main method
# check if the number of terms is valid
if numTerms <= 0:
print("Please enter a positive integer")
else:
print("Fibonacci sequence:")
for i in range(numTerms):
print(fibonacci(i))
| [
2,
8344,
30753,
3164,
198,
22510,
15156,
907,
796,
493,
7,
15414,
7203,
2437,
867,
2846,
286,
41566,
261,
44456,
8379,
284,
3601,
30,
366,
4008,
198,
198,
2,
1867,
389,
262,
717,
1178,
2846,
286,
262,
12900,
33756,
30,
198,
2,
657,
... | 2.832061 | 131 |
import unittest
import pdf2images
import os
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
ASSETS_DIR = os.path.join(THIS_DIR, "assets")
| [
11748,
555,
715,
395,
198,
11748,
37124,
17,
17566,
198,
11748,
28686,
198,
198,
43559,
62,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
4008,
198,
10705,
32716,
62,
34720,
796,
... | 2.625 | 56 |
import os
| [
11748,
28686,
628
] | 3.666667 | 3 |
VERSION = '8.0.0'
| [
43717,
796,
705,
23,
13,
15,
13,
15,
6,
198
] | 1.8 | 10 |
import string
import random
from flask_bcrypt import Bcrypt
import names
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from sqlalchemy.ext.hybrid import hybrid_property, Comparator
from sqlalchemy.schema import MetaData
from sqlalchemy.orm import backref
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
import uuid
import json
import datetime
import six
from pebbles.utils import validate_ssh_pubkey, get_full_blueprint_config, get_blueprint_fields_from_config
MAX_PASSWORD_LENGTH = 100
MAX_EMAIL_LENGTH = 128
MAX_NAME_LENGTH = 128
MAX_VARIABLE_KEY_LENGTH = 512
MAX_VARIABLE_VALUE_LENGTH = 512
MAX_NOTIFICATION_SUBJECT_LENGTH = 255
db = SQLAlchemy()
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
db.Model.metadata = MetaData(naming_convention=convention)
bcrypt = Bcrypt()
NAME_ADJECTIVES = (
'happy',
'sad',
'bright',
'dark',
'blue',
'yellow',
'red',
'green',
'white',
'black',
'clever',
'witty',
'smiley',
)
group_banned_user = db.Table( # Secondary Table for many-to-many mapping
'groups_banned_users',
db.Column('group_id', db.String(32), db.ForeignKey('groups.id')),
db.Column('user_id', db.String(32), db.ForeignKey('users.id')), db.PrimaryKeyConstraint('group_id', 'user_id')
)
class NamespacedKeyValue(db.Model):
""" Stores key/value pair data, separated by namespaces
This model should be initialized by providing namespace and key as mandatory arguments.
It is highly recommended to have a schema for the JSON value field,
and provide it during model initialization.
"""
__tablename__ = 'namespaced_keyvalues'
namespace = db.Column(db.String(32), primary_key=True)
key = db.Column(db.String(128), primary_key=True)
_value = db.Column(db.Text)
_schema = db.Column(db.Text)
created_ts = db.Column(db.Float)
updated_ts = db.Column(db.Float)
@classmethod
def str_to_bool(cls, val):
""" Convert the string into boolean.
Useful when value comes from UI and becomes True even if False
By default, this function shall return False
"""
if val:
val = val.lower()
if val in ('true', u'true', '1'):
return True
return False
@hybrid_property
@schema.setter
@hybrid_property
@value.setter
| [
11748,
4731,
198,
11748,
4738,
198,
6738,
42903,
62,
15630,
6012,
1330,
347,
29609,
198,
11748,
3891,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
44161,
282,
26599,
1330,
25439,
198,
6738,
44161,
282,
265... | 2.513222 | 1,021 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pyscaffold import templates
__author__ = "Florian Wilhelm"
__copyright__ = "Blue Yonder"
__license__ = "new BSD"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
198,
6738,
279,
28349,
2001,
727,
1330,
24019,
198,
198,
834,
9800,
834,
796,
366,
26953,
666,
... | 2.608696 | 69 |
"""
VISIUALIZATION MODULE loading Parameter Matrices
CALL BY: <visiualize.py>
RETURN: Environment simulation (animated) & Plots
INFO: This Module can load a specific File Dump (cPickle) and visiualize the containig matrices onto a OpenAI Gym Environment
"""
# Some dependencies
import numpy as np
import matplotlib.pyplot as plt
import hickle as hkl
import gym
from .lif import I_syn_calc, I_gap_calc, U_neuron_calc
from .parameters import *
from .random_search_v2 import compute as compute_v2
from .random_search_v2 import observe
from .weights_nn import compute as compute_with_weights
# Initializing OpenAI Environments------------------------------------------------------
env = gym.make('CartPole-v0')
env.reset()
env_vis = []
#---------------------------------------------------------------------------------------
# Initialization----------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Append Function---------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Plot Function-----------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# OpenAI Gym--------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Main Function-----------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
if __name__=="__main__":
main()
| [
37811,
198,
29817,
40,
25620,
14887,
6234,
33893,
11046,
25139,
2357,
6550,
45977,
198,
198,
34,
7036,
11050,
25,
220,
220,
220,
1279,
4703,
72,
723,
1096,
13,
9078,
29,
198,
198,
26087,
27064,
25,
220,
220,
220,
220,
9344,
18640,
357... | 5.81155 | 329 |
from django.db import transaction
from rest_framework.generics import get_object_or_404
from SocialNetwork_API.models import *
from SocialNetwork_API.services.base import BaseService
| [
6738,
42625,
14208,
13,
9945,
1330,
8611,
198,
198,
6738,
1334,
62,
30604,
13,
8612,
873,
1330,
651,
62,
15252,
62,
273,
62,
26429,
628,
198,
6738,
5483,
26245,
62,
17614,
13,
27530,
1330,
1635,
198,
6738,
5483,
26245,
62,
17614,
13,
... | 3.76 | 50 |
# Purple Damage Skin
success = sm.addDamageSkin(2435432)
if success:
sm.chat("The Purple Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2435432)
| [
2,
17265,
8995,
17847,
198,
13138,
796,
895,
13,
2860,
22022,
42455,
7,
1731,
32182,
2624,
8,
198,
361,
1943,
25,
198,
220,
220,
220,
895,
13,
17006,
7203,
464,
17265,
8995,
17847,
468,
587,
2087,
284,
534,
1848,
338,
2465,
4168,
49... | 3.233333 | 60 |
a, b = input().split()
a = int(a)
b = int(b)
print(a+b)
print(a-b)
print(a*b)
print(a//b)
print(a%b)
print(format(a/b, ".2f"))
| [
64,
11,
275,
796,
5128,
22446,
35312,
3419,
198,
64,
796,
493,
7,
64,
8,
198,
65,
796,
493,
7,
65,
8,
198,
198,
4798,
7,
64,
10,
65,
8,
198,
4798,
7,
64,
12,
65,
8,
198,
4798,
7,
64,
9,
65,
8,
198,
4798,
7,
64,
1003,
6... | 1.777778 | 72 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %matplotlib inline
# %%
# %run notebook_setup
# %% [markdown]
# # Sampling
#
# `pymc3-ext` comes with some functions to make sampling more flexible in some cases and improve the default parameter choices for the types of problems encountered in astrophysics.
# These features are accessed through the `pymc3_ext.sample` function that behaves mostly like the `pymc3.sample` function with a couple of different arguments.
# The two main differences for all users is that the `pymc3_ext.sample` function defaults to a target acceptance fraction of `0.9` (which will be better for many models in astrophysics) and to adapting a full dense mass matrix (instead of diagonal).
# Therefore, if there are covariances between parameters, this method will generally perform better than the PyMC3 defaults.
#
# ## Correlated parameters
#
# A thorough discussion of this [can be found elsewhere online](https://dfm.io/posts/pymc3-mass-matrix/), but here is a simple demo where we sample a covariant Gaussian using `pymc3_ext.sample`.
#
# First, we generate a random positive definite covariance matrix for the Gaussian:
# %%
import numpy as np
ndim = 5
np.random.seed(42)
L = np.random.randn(ndim, ndim)
L[np.diag_indices_from(L)] = 0.1 * np.exp(L[np.diag_indices_from(L)])
L[np.triu_indices_from(L, 1)] = 0.0
cov = np.dot(L, L.T)
# %% [markdown]
# And then we can set up this model using PyMC3:
# %%
import pymc3 as pm
with pm.Model() as model:
pm.MvNormal("x", mu=np.zeros(ndim), chol=L, shape=ndim)
# %% [markdown]
# If we sample this using PyMC3 default sampling method, things don't go so well (we're only doing a small number of steps because we don't want it to take forever, but things don't get better if you run for longer!):
# %%
with model:
trace = pm.sample(tune=500, draws=500, chains=2, cores=2)
# %% [markdown]
# But, we can use `pymc3_ext.sample` as a drop in replacement to get much better performance:
# %%
import pymc3_ext as pmx
with model:
tracex = pmx.sample(tune=1000, draws=1000, chains=2, cores=2)
# %% [markdown]
# As you can see, this is substantially faster (even though we generated twice as many samples).
#
# We can compare the sampling summaries to confirm that the default method did not produce reliable results in this case, while the `pymc3_ext` version did:
# %%
import arviz as az
az.summary(trace).head()
# %%
az.summary(tracex).head()
# %% [markdown]
# In this particular case, you could get similar performance using the `init="adapt_full"` argument to the `sample` function in PyMC3, but the implementation in `pymc3-ext` is somewhat more flexible.
# Specifically, `pymc3_ext` implements a tuning procedure that it more similar to [the one implemented by the Stan project](https://mc-stan.org/docs/2_24/reference-manual/hmc-algorithm-parameters.html).
# The relevant parameters are:
#
# - `warmup_window`: The length of the initial "fast" window. This is called "initial buffer" in the Stan docs.
# - `adapt_window`: The length of the initial "slow" window. This is called "window" in the Stan docs.
# - `cooldown_window`: The length of the final "fast" window. This is called "term buffer" in the Stan docs.
#
# Unlike the Stan implementation, here we have support for updating the mass matrix estimate every `recompute_interval` steps based on the previous window and all the steps in the current window so far.
# This can improve warm up performance substantially so the default value is `1`, but this might be intractable for high dimensional models.
# To only recompute the estimate at the end of each window, set `recompute_interval=0`.
#
# If you run into numerical issues, you can try increasing `adapt_window` or use the `regularization_steps`and `regularization_variance` to regularize the mass matrix estimator.
# The `regularization_steps` parameter sets the effective number of steps that are used for regularization and `regularization_variance` is the effective variance for those steps.
# %% [markdown]
# ## Parameter groups
#
# If you are fitting a model with a large number of parameters, it might not be computationally or numerically tractable to estimate the full dense mass matrix.
# But, sometimes you might know something about the covariance structure of the problem that you can exploit.
# Perhaps some parameters are correlated with each other, but not with others.
# In this case, you can use the `parameter_groups` argument to exploit this structure.
#
# Here is an example where `x`, `y`, and `z` are all independent with different covariance structure.
# We can take advantage of this structure using `pmx.ParameterGroup` specifications in the `parameter_groups` argument.
# Note that by default each group will internally estimate a dense mass matrix, but here we specifically only estimate a diagonal mass matrix for `z`.
# %%
with pm.Model():
x = pm.MvNormal("x", mu=np.zeros(ndim), chol=L, shape=ndim)
y = pm.MvNormal("y", mu=np.zeros(ndim), chol=L, shape=ndim)
z = pm.Normal("z", shape=ndim) # Uncorrelated
tracex2 = pmx.sample(
tune=1000,
draws=1000,
chains=2,
cores=2,
parameter_groups=[
[x],
[y],
pmx.ParameterGroup([z], "diag"),
],
)
# %%
| [
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
2420,
62,
15603,
341,
25,
198,
2,
220,
220,
220,
220,
220,
220,
7552,
25,
764,
9078,
198,
2,
220,
220,
220,
220,
... | 3.162085 | 1,746 |
# old code from iris example
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.datasets import load_iris
# from sklearn.externals import joblib
#import all the needed imports
import numpy as np
import pandas as pd
import os
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.learning_curve import learning_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
# old code from iris example for reference
# if __name__ == "__main__":
# # Load Iris Data
# iris_data = load_iris()
# features = iris_data.data
# feature_names = iris_data.feature_names
# target = iris_data.target
# target_names = iris_data.target_names
#
# knn = KNeighborsClassifier(n_neighbors=3) # replace with your own ML model here
# knn.fit(features, target)
#
# joblib.dump(knn, 'models/iris_model.pkl')
if __name__ == "__main__":
#load data
df = pd.read_csv('default_of_credit_card_clients.csv')
df = df.dropna()
df = df.drop('ID', axis = 1)
df['default payment next month'] = df['default payment next month'].replace(to_replace=0, value="Paid")
df['default payment next month'] = df['default payment next month'].replace(to_replace=1, value="Default")
df['LIMIT_BAL'] = df['LIMIT_BAL']/1000
#makes the percentage columns I was talking about - pct paid 1 is 1 month ago, pct paid 2 is 2 months ago, etc.
percent_maker(df)
#replaces null and infinite values
df = df.replace({None:0, np.inf:1})
#new X features for modeling...
features = df[['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE','pct_paid_1', 'pct_paid_2', 'pct_paid_3',
'pct_paid_4', 'pct_paid_5', 'pct_paid_6']]
feature_names = list(features.columns.values)
target = df['default payment next month']
target_names = ["Paid", "Default"]
# run randomforest on data we have
RF = RandomForestClassifier()
RF.fit(features, target)
joblib.dump(RF, 'models/credit_model.pkl')
| [
2,
1468,
2438,
422,
4173,
271,
1672,
198,
2,
422,
1341,
35720,
13,
710,
394,
32289,
1330,
509,
46445,
32289,
9487,
7483,
198,
2,
422,
1341,
35720,
13,
19608,
292,
1039,
1330,
3440,
62,
29616,
198,
2,
422,
1341,
35720,
13,
1069,
759,... | 2.747788 | 904 |
from PySide6.QtWidgets import QApplication, QMainWindow, QPushButton
from PySide6.QtGui import QScreen
from PySide6.QtCore import QSize
from config import t_max, t_min, t_norm, b_x, b_y
if __name__ == "__main__":
app = QApplication([])
mainWin = MainWindow()
mainWin.show()
app.exec() | [
6738,
9485,
24819,
21,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
13383,
27703,
11,
1195,
49222,
21864,
198,
6738,
9485,
24819,
21,
13,
48,
83,
8205,
72,
1330,
1195,
23901,
198,
6738,
9485,
24819,
21,
13,
48,
83,
14055... | 2.223684 | 152 |
import git
import git.exc
import os
from pathlib import Path
import subprocess
import sys
import collections
import enum
from typing import List, Dict, Set, Optional
from smartsquash.decorators import memorize_files_changed
from loguru import logger
def retrieve_commits(
repo: git.Repo, target_branch: str, reverse: bool = True
) -> List[git.Commit]:
"""
retrieves commits that are only part of the currently active branch,
and are not in the target branch
- git cherry could be used for this, but GitPython doesn't support it
- Just run raw git command, if this becomes bottleneck
"""
target_commits_sha: List[git.Commit] = [
commit.hexsha
for commit in repo.iter_commits(rev=target_branch)
if len(commit.parents) < 2 # ignore merge commits
]
commits: List[git.Commit] = [
commit
for commit in repo.iter_commits(rev=repo.active_branch)
if commit.hexsha not in target_commits_sha
and len(commit.parents) < 2 # ignore merge commits
]
if reverse:
commits.reverse()
return commits
@memorize_files_changed
| [
11748,
17606,
198,
11748,
17606,
13,
41194,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
17268,
198,
11748,
33829,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
5345,
11,
... | 2.825 | 400 |
import tensorflow as tf
from tensorflow.python.keras.layers import Input
# gpus = tf.config.list_physical_devices('GPU')
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
29412,
13,
6122,
292,
13,
75,
6962,
1330,
23412,
198,
198,
2,
27809,
385,
796,
48700,
13,
11250,
13,
4868,
62,
42854,
62,
42034,
10786,
33346,
11537,
198,
2,
329,... | 2.6875 | 80 |
print( '<' + __name__ + ' name=\'' + __file__ + '\'>' )
import IceRayPy.core.material.instruction.label.coord3d.const
import IceRayPy.core.material.instruction.label.coord3d.dynamic
import IceRayPy.core.material.instruction.label.coord3d.temp
print( '</' + __name__ + ' name=\'' + __file__ + '\'>' )
| [
4798,
7,
705,
27,
6,
1343,
11593,
3672,
834,
1343,
705,
1438,
28,
59,
7061,
1343,
220,
220,
11593,
7753,
834,
1343,
705,
43054,
29,
6,
1267,
201,
198,
201,
198,
11748,
6663,
19591,
20519,
13,
7295,
13,
33665,
13,
8625,
2762,
13,
1... | 2.460938 | 128 |
#equations for constraints
import numpy as np
from calcModuleTP import ATransformMatrixTHETA as A_Theta, link2index
from calcModuleTP import ATransformMatrix as A_i | [
2,
4853,
602,
329,
17778,
198,
11748,
299,
32152,
220,
220,
220,
220,
220,
220,
220,
355,
45941,
198,
6738,
220,
220,
42302,
26796,
7250,
220,
220,
1330,
317,
41762,
46912,
4221,
20892,
355,
317,
62,
464,
8326,
11,
2792,
17,
9630,
1... | 2.875 | 64 |
import tkinter as tk
import random
board_layout = []
utilities = {}
turn = 'X'
radio_button = '0'
winner = ''
count = 0
def minimax(board, min_max):
"""Computes all possible ways to proceed from the current state and selects the optimal way."""
result = win_draw(board)
if result != 2:
return result, None
maximum = -1
minimum = 1
best_index = (0, 0)
for index in empty_cells(board):
new_board = [i.copy() for i in board]
# puts in the board X or O according the turn
new_board[index[0]][index[1]] = 'O' if min_max is True else 'X'
# the recursive step
result = minimax(new_board, not min_max)[0]
# computer turn
if min_max is True:
# improvement of the algorithm for saving unnecessary steps
if result == 1:
return 1, index
# Finds the maximum result out of the possible ways and its index (one step from the current board)
if maximum <= result:
maximum = result
best_index = index
# player turn
else:
# improvement of the algorithm for saving unnecessary steps
if result == -1:
return -1, index
# Finds the minimum result out of the possible ways and its index (one step from the current board)
if minimum >= result:
minimum = result
best_index = index
# returns the result and the optimal index
return (maximum, best_index) if min_max is True else (minimum, best_index)
def labels_bind():
""" Enables clicking on the grid """
for i in range(len(board_layout)):
board_layout[i].bind("<Button-1>", func=lambda event, item=i: player_step(board_layout[item]))
def labels_unbind():
""" Disables clicking on the grid """
for i in range(len(board_layout)):
board_layout[i].bind("<Button-1>", func=lambda x: x)
if __name__ == "__main__":
main()
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
11748,
4738,
198,
3526,
62,
39786,
796,
17635,
198,
315,
2410,
796,
23884,
198,
15344,
796,
705,
55,
6,
198,
37004,
62,
16539,
796,
705,
15,
6,
198,
39791,
796,
10148,
198,
9127,
796,
657,
... | 2.454657 | 816 |
from gym.envs.registration import register
register(
id='SimpleNavigation-v0',
entry_point='safe_il.envs:SimpleNavigation',
max_episode_steps=10000,
reward_threshold=200,
)
register(
id='BoneDrilling2D-v0',
entry_point='safe_il.envs:BoneDrilling2D',
max_episode_steps=10000,
reward_threshold=200,
)
| [
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
198,
30238,
7,
198,
220,
220,
220,
4686,
11639,
26437,
30575,
7065,
12,
85,
15,
3256,
198,
220,
220,
220,
5726,
62,
4122,
11639,
21230,
62,
346,
13,
268,
14259,
25,
2643... | 2.430657 | 137 |
from brownie import network, config, AuthenticatorProvider
from scripts.utils import get_account, is_network_local
from scripts.deploy import deploy
import pytest
| [
6738,
7586,
494,
1330,
3127,
11,
4566,
11,
31885,
26407,
29495,
198,
6738,
14750,
13,
26791,
1330,
651,
62,
23317,
11,
318,
62,
27349,
62,
12001,
198,
6738,
14750,
13,
2934,
1420,
1330,
6061,
198,
198,
11748,
12972,
9288,
628
] | 4.125 | 40 |
'''
Q:You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
'''
final answer: 171
'''
# init constant
months = [
31, # Jan
-1, # Feb - this will be filled later depending on the year
31, # Mar
30, # Apr
31, # May
30, # Jun
31, # Jul
31, # Aug
30, # Sep
31, # Oct
30, # Nov
31, # Dec
]
# compute
if __name__ == '__main__':
print(" " + str(compute()) + " ")
| [
7061,
6,
198,
48,
25,
1639,
389,
1813,
262,
1708,
1321,
11,
475,
345,
743,
4702,
284,
466,
617,
2267,
329,
3511,
13,
198,
198,
16,
2365,
21489,
373,
257,
3321,
13,
198,
38856,
1528,
468,
2693,
11,
198,
16784,
11,
2795,
290,
3389,
... | 2.916388 | 299 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Finding, Agency, Grant, Grantee, User, Comment
admin.site.register(User, UserAdmin)
admin.site.register(Agency)
admin.site.register(Finding)
admin.site.register(Grant)
admin.site.register(Grantee)
admin.site.register(Comment)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
28482,
1330,
11787,
46787,
198,
6738,
764,
27530,
1330,
27063,
11,
7732,
11,
12181,
11,
12181,
1453,
11,
11787,
11,
18957,
198,
198,
... | 3.28 | 100 |
import unittest
import numpy as np
import nearest_correlation
from nearest_correlation import nearcorr
# References
# [1] 'Computing the nearest correlation matrix - a problem from finance': Higham, IMA Journal of Numerical Analysis (2002) 22, 329.343
# This test is taken from the example given in the
# NAG Mark 24 documentation for g02aa
# It originally appeared in [1]
# This example taken from [1]
# This uses the same input matrix as test_HighamExample2002
# but I made up the weights vector since I couldn't find an example. No idea if it makes sense or not
# Higham's MATLAB original was used as an oracle
# A single calculation that fails after 3 iterations should give the same result as three calculations
# that each perform 1 iteration, restarting where they left off
# Ensure that an exception is raised when a non-symmetric matrix is passed
# Ensure that an exception is raised when calculation does not converge befer maxiterations is exceeded
# Ensure that an exception is not raised when calculation does not converge befer maxiterations is exceeded
# and except_on_too_many_iterations = False
if __name__ == '__main__':
main() | [
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
16936,
62,
10215,
49501,
198,
6738,
16936,
62,
10215,
49501,
1330,
1474,
10215,
81,
198,
198,
2,
31458,
198,
2,
685,
16,
60,
705,
5377,
48074,
262,
16936,
16096,
17... | 3.681818 | 330 |
from pathlib import Path
from plmbr.version import version
pys = list(Path('.').rglob('*.py'))
mds = list(Path('.').rglob('*.md'))
sdist = Path('dist') / f'plmbr-{version}.tar.gz'
| [
6738,
3108,
8019,
1330,
10644,
198,
6738,
458,
76,
1671,
13,
9641,
1330,
2196,
198,
198,
79,
893,
796,
1351,
7,
15235,
10786,
2637,
737,
81,
4743,
672,
10786,
24620,
9078,
6,
4008,
198,
9132,
82,
796,
1351,
7,
15235,
10786,
2637,
73... | 2.434211 | 76 |
"""
Abstract base classes for construction asynchronous workers.
"""
from abc import ABCMeta, abstractmethod
from typing import Sequence, Tuple
from aiohttp.web import Application
class AbstractWorker(metaclass=ABCMeta):
"""
This base class provides most basic functionality for the worker.
"""
@property
@abstractmethod
@property
@abstractmethod
@abstractmethod
class AbstractStreamWorker(AbstractWorker):
"""
Base class for the worker, who operates on a stream of events.
"""
@property
@abstractmethod
@abstractmethod
@abstractmethod
class AbstractHttpServerWorker(AbstractWorker):
"""
Base for the HTTP server implementation on top of aiohttp.
"""
@property
@abstractmethod
@abstractmethod
@abstractmethod
@abstractmethod
@abstractmethod
@abstractmethod
@property
@abstractmethod
@abstractmethod
class HttpServerWorkerMixinMeta(ABCMeta):
"""
Ensures that mixin only applies to HttpServerWorker concrete classes.
"""
| [
37811,
198,
23839,
2779,
6097,
329,
5103,
39354,
3259,
13,
198,
37811,
198,
198,
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
198,
6738,
19720,
1330,
45835,
11,
309,
29291,
198,
198,
6738,
257,
952,
4023,
13,
12384,
1330,
15678,
... | 3.066092 | 348 |
# -*- coding: utf-8 -*-
#
# ojar - [?] rune
# https://github.com/vesche/ojar
#
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
198,
2,
267,
9491,
532,
685,
26398,
35930,
198,
2,
3740,
1378,
12567,
13,
785,
14,
1158,
2395,
14,
78,
9491,
198,
2,
628
] | 2.025 | 40 |
import torch
import torch.nn as nn
from util.utils import positive_mask
import os
import math
import util.utils as utils
import torch.nn.functional as F
class NTXent(nn.Module):
"""
The Normalized Temperature-scaled Cross Entropy Loss
Source: https://github.com/Spijkervet/SimCLR
"""
def forward(self, zx, zy, zx1, zy1, global_step):
"""
zx: projection output of batch zx
zy: projection output of batch zy
:return: normalized loss
"""
positive_samples, negative_samples = self.sample_no_dict(zx, zy, zx1, zy1)
if self.margin:
m = self.temperature * math.log(self.alpha / negative_samples.shape[1])
positive_samples = ((positive_samples * self.temperature) - m) / self.temperature
labels = torch.zeros(self.N).to(positive_samples.device).long()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= self.N
return loss
def sample_no_dict(self, zx, zy, zx1, zy1):
"""
Negative samples without dictionary
"""
# print(zx.shape)
z = torch.cat((zx, zy, zx1,zy1), dim=0)
sim = self.similarity_f(z.unsqueeze(1), z.unsqueeze(0)) / self.temperature
# print(sim.shape,self.batch_size )
# Splitting the matrix into 4 blocks so as to count number of positive and negative samples
sim_left, sim_right = torch.chunk(sim, 2, dim=1)
sim_lu,sim_ll = torch.chunk(sim_left, 2, dim=0)
sim_ru,sim_rl = torch.chunk(sim_right, 2, dim=0)
# print(sim_lu.shape,self.batch_size )
# Extract positive samples from each block
#sim_xy = torch.diag(sim, self.batch_size)
pos_1 = torch.diag(sim_lu, self.batch_size)
pos_2 = torch.diag(sim_lu, -self.batch_size)
pos_3 = torch.diag(sim_rl, self.batch_size)
pos_4 = torch.diag(sim_rl, -self.batch_size)
# sim_yx = torch.diag(sim, -self.batch_size)
positive_samples = torch.cat((pos_1, pos_2, pos_3, pos_4), dim=0).reshape(self.N, 1)
# Extract negative samples
neg_lu = sim_lu[self.mask].reshape(self.batch_size*2, 2*(self.batch_size-1) )
neg_rl = sim_rl[self.mask].reshape(self.batch_size*2, 2*(self.batch_size-1))
# Concatenating the extracted negatives from sim block left upper and right lower.
neg_u = torch.cat((neg_lu, sim_ru), dim=1)
neg_l = torch.cat((sim_ll, neg_rl), dim=1)
negative_samples = torch.cat((neg_u, neg_l), dim=0)
return positive_samples, negative_samples
class BarlowTwinsLoss(torch.nn.Module):
"""
loss function taken from https://github.com/IgorSusmelj/barlowtwins
paper: https://arxiv.org/abs/2103.03230
"""
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
7736,
13,
26791,
1330,
3967,
62,
27932,
198,
11748,
28686,
198,
11748,
10688,
198,
11748,
7736,
13,
26791,
355,
3384,
4487,
198,
11748,
28034,
13,
20471,
13,
45124,
35... | 2.2488 | 1,250 |
import datetime
import os
import requests
import re
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
if __name__ == '__main__':
civil_names = CivilNames()
civil_names.write_civil_file(civil_names.get_civil_names())
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
7007,
198,
11748,
302,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
361,
11593,
3672,
834,
6624,
... | 2.987805 | 82 |
import gc
import prometheus_client
import sys
enabled = prometheus_client.Gauge(
'python_gc_enabled', 'Whether the garbage collector is enabled.')
enabled.set_function(gc.isenabled)
debug = prometheus_client.Gauge(
'python_gc_debug', 'The debug flags currently set on the Python GC.')
debug.set_function(gc.get_debug)
count = prometheus_client.Gauge(
'python_gc_count',
'Count of objects tracked by the Python garbage collector, by generation.',
['generation'])
set_function_on_map_gauge(count, (0, 1, 2), gc.get_count)
thresholds = prometheus_client.Gauge(
'python_gc_threshold',
'GC thresholds by generation',
['generation'])
set_function_on_map_gauge(thresholds, (0, 1, 2), gc.get_threshold)
if sys.version_info >= (3, 4):
# The following 3 metrics are gauges because they come from a
# callback, but their values behave like counters (the values
# returned by gc.get_stats() are counters).
collections = prometheus_client.Gauge(
'python_gc_collections_total',
'Number of GC collections that occurred by generation',
['generation'])
set_function_on_map_gauge(collections, (0, 1, 2), lambda: [
x['collections'] for x in gc.get_stats()])
collected = prometheus_client.Gauge(
'python_gc_collected_total',
'Number of garbage collected objects by generation',
['generation'])
set_function_on_map_gauge(collected, (0, 1, 2), lambda: [
x['collected'] for x in gc.get_stats()])
uncollectables = prometheus_client.Gauge(
'python_gc_uncollectables',
'Number of uncollectable objects by generation',
['generation'])
set_function_on_map_gauge(uncollectables, (0, 1, 2), lambda: [
x['uncollectable'] for x in gc.get_stats()])
| [
11748,
308,
66,
198,
11748,
1552,
36916,
62,
16366,
198,
11748,
25064,
628,
198,
198,
25616,
796,
1552,
36916,
62,
16366,
13,
38,
559,
469,
7,
198,
220,
220,
220,
705,
29412,
62,
36484,
62,
25616,
3256,
705,
15354,
262,
15413,
22967,
... | 2.639175 | 679 |
import unittest
from ddt import ddt, data, unpack #installation of this library is required 'pip install ddt'
from pyunitreport import HTMLTestRunner
from selenium import webdriver
@ddt
if __name__ == "__main__":
unittest.main(verbosity = 2) | [
11748,
555,
715,
395,
198,
6738,
288,
28664,
1330,
288,
28664,
11,
1366,
11,
555,
8002,
1303,
17350,
341,
286,
428,
5888,
318,
2672,
705,
79,
541,
2721,
288,
28664,
6,
198,
6738,
12972,
20850,
13116,
1330,
11532,
14402,
49493,
198,
67... | 3.128205 | 78 |
import asyncio
import random
import typing
from aiozipkin.span import SpanAbc
from ....quru_logger import logger
from ....env import HEARTBEAT_INTERVAL
from ....words import RaftLog
from ..timer import Timer
from .base import BaseState, log_consistency_check, candidate_qualification
| [
11748,
30351,
952,
198,
11748,
4738,
198,
11748,
19720,
198,
198,
6738,
257,
952,
13344,
5116,
13,
12626,
1330,
49101,
4826,
66,
198,
198,
6738,
19424,
80,
14717,
62,
6404,
1362,
1330,
49706,
198,
6738,
19424,
24330,
1330,
11179,
7227,
... | 3.54321 | 81 |
import bpy; | [
11748,
275,
9078,
26
] | 2.75 | 4 |
# Reference Codes
# https://github.com/kentaroy47/vision-transformers-cifar10
# https://github.com/FrancescoSaverioZuppichini/ViT
# https://github.com/lucidrains/vit-pytorch
#Lib import
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import pandas as pd
import csv
import time
from torchvision.utils import save_image
from timm.models import create_model
from models import *
from models.vit import ViT
from utils import progress_bar
from models.convmixer import ConvMixer
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.data import Mixup
from dataset import my_Cifar10
from distillation_loss import DistillationLoss
# from models.CIFAR10.custom_models_cifar10 import resnet50
import pdb
# parsers
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate') # resnets.. 1e-3, Vit..1e-4?
parser.add_argument('--opt', default="adam")
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--aug', action='store_true', help='use randomaug') # store_true : False
parser.add_argument('--amp', action='store_true', help='enable AMP training')
# parser.add_argument('--mixup', action='store_true', help='add mixup augumentations')
parser.add_argument('--net', type=str, default='vit')
parser.add_argument('--bs', type=int, default='256')
parser.add_argument('--size', type=int, default="32")
parser.add_argument('--classes', type=int, default="10")
parser.add_argument('--hidden_dim', type=int, default="512")
parser.add_argument('--encoder_blocks', type=int, default="6")
parser.add_argument('--mha_head_cnt', type=int, default="8")
parser.add_argument('--n_epochs', type=int, default='50')
parser.add_argument('--patch', default='4', type=int)
parser.add_argument('--convkernel', default='8', type=int)
parser.add_argument('--cos', action='store_false', help='Train with cosine annealing scheduling')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='hard', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# smooding
parser.add_argument('--smoothing', action='store_true', help='use smoothing')
# check quantization, not implemented for ViT
parser.add_argument('--is_quant', type=int, default=0, help='0: no quant or 1: quant')
# parser.add_argument('--dataset', default="cifar10")
args = parser.parse_args()
# Use wandb for visualize & debug
# User guide(Korean): https://greeksharifa.github.io/references/2020/06/10/wandb-usage/
# take in args
import wandb
watermark = "{}_lr{}".format(args.net, args.lr)
if args.amp:
watermark += "_useamp"
wandb.init(project="cifar10-challange",
name=watermark)
wandb.config.update(args)
# Use albumentations for image augmentations
# User guide(Korean): https://hoya012.github.io/blog/albumentation_tutorial/
print('aug: ', args.aug)
if args.aug:
import albumentations
bs = int(args.bs)
imsize = int(args.size)
use_amp = args.amp
if args.net=="vit_timm_large":
size = 384
elif args.net=="vit_timm_small" or args.net=="vit_timm_base":
size = 224
else:
size = imsize
# Load dataset
train_dataset, test_dataset, train_dataloader, test_dataloader = my_Cifar10(imageSize=size, aug=args.aug)
print('train_dataset', len(train_dataset))
print('test_dataset', len(test_dataset))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Check sample image
dataiter = iter(train_dataloader)
images, labels = dataiter.next()
print(images.shape)
img1 = images[0]
print('label', classes[labels[0]])
save_image(img1, "./visualize/cifar10_sample1_{}.png".format(classes[labels[0]]))
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
# Model
print('==> Building model..')
# net = VGG('VGG19')
if args.net=='res18':
net = ResNet18()
elif args.net=='vgg':
net = VGG('VGG19')
elif args.net=='res34':
net = ResNet34()
elif args.net=='res50':
net = ResNet50()
elif args.net=='res101':
net = ResNet101()
elif args.net=="convmixer":
# from paper, accuracy >96%. you can tune the depth and dim to scale accuracy and speed.
net = ConvMixer(256, 16, kernel_size=args.convkernel, patch_size=1, n_classes=10)
elif args.net=="vit":
# ViT for cifar10
net = ViT(
image_size = args.size,
patch_size = args.patch,
num_classes = args.classes,
dim = args.hidden_dim,
depth = args.encoder_blocks,
heads = args.mha_head_cnt,
mlp_dim = args.hidden_dim,
dropout = 0.1,
emb_dropout = 0.1,
distilled = False,
# teacher_model=None,
)
elif args.net=="deit":
# DeiT for cifar10
# load teacher model
teacher_model = ResNet50()
teacher_checkpoint = torch.load("checkpoint/res50-4-ckpt.t7")
teacher_model.load_state_dict(teacher_checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# import timm
# teacher_model = None
# if args.distillation_type != 'none':
# assert args.teacher_path, 'need to specify teacher-path when using distillation'
# print(f"Creating teacher model: {args.teacher_model}")
# teacher_model = create_model(
# args.teacher_model,
# pretrained=False,
# num_classes=args.classes,
# global_pool='avg',
# )
# if args.teacher_path.startswith('https'):
# checkpoint = torch.hub.load_state_dict_from_url(
# args.teacher_path, map_location='cpu', check_hash=True)
# else:
# checkpoint = torch.load(args.teacher_path, map_location='cpu')
# teacher_model.load_state_dict(checkpoint['model'])
# teacher_model.to(device)
# teacher_model.eval()
net = ViT(
image_size = args.size,
patch_size = args.patch,
num_classes = args.classes,
dim = args.hidden_dim,
depth = args.encoder_blocks,
heads = args.mha_head_cnt,
mlp_dim = args.hidden_dim,
dropout = 0.1,
emb_dropout = 0.1,
distilled = True,
)
elif args.net=="vit_timm_large" or args.net=="vit_timm_base" or args.net=="vit_timm_small":
import timm
print("Available Vision Transformer Models: ")
print(timm.list_models("vit*"))
if args.net=="vit_timm_base":
net = timm.create_model("vit_base_patch16_224", pretrained=True)
elif args.net=="vit_timm_small":
net = timm.create_model("vit_small_patch16_224", pretrained=True)
elif args.net=="vit_timm_large":
net = timm.create_model("vit_large_patch16_384", pretrained=True)
net.head = nn.Linear(net.head.in_features, 10)
# # fix the seed for reproducibility
# seed = args.seed + utils.get_rank()
# torch.manual_seed(seed)
# np.random.seed(seed)
# # random.seed(seed)
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
net = net.to(device)
# if device == 'cuda':
# net = nn.DataParallel(net) # make parallel
# cudnn.benchmark = True
print('resume: ', args.resume)
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/{}-ckpt.t7'.format(args.net))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.classes)
# Loss is CE
# if args.net!="deit":
# criterion = nn.CrossEntropyLoss()
# else:
# if mixup_active:
# # smoothing is handled with mixup label transform
# criterion = SoftTargetCrossEntropy()
# elif args.smoothing:
# criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
# else:
# criterion = nn.CrossEntropyLoss()
criterion = nn.CrossEntropyLoss()
if args.net=="deit":
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
if args.opt == "adam":
optimizer = optim.Adam(net.parameters(), lr=args.lr)
elif args.opt == "sgd":
optimizer = optim.SGD(net.parameters(), lr=args.lr)
# use cosine or reduce LR on Plateau scheduling
if not args.cos:
from torch.optim import lr_scheduler
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True, min_lr=1e-3*1e-5, factor=0.1)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_epochs)
if args.cos:
wandb.config.scheduler = "cosine"
else:
wandb.config.scheduler = "ReduceLROnPlateau"
##### Training
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
##### Validation
list_loss = []
list_acc = []
wandb.watch(net)
for epoch in range(start_epoch, args.n_epochs):
start = time.time()
trainloss = train(epoch)
val_loss, acc = test(epoch)
if args.cos:
scheduler.step(epoch-1)
list_loss.append(val_loss)
list_acc.append(acc)
# Log training..
wandb.log({'epoch': epoch, 'train_loss': trainloss, 'val_loss': val_loss, "val_acc": acc, "lr": optimizer.param_groups[0]["lr"],
"epoch_time": time.time()-start})
# Write out csv..
with open(f'log/log_{args.net}_patch{args.patch}.csv', 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(list_loss)
writer.writerow(list_acc)
print(list_loss)
# writeout wandb
wandb.save("wandb_{}.h5".format(args.net)) | [
2,
20984,
44380,
198,
2,
3740,
1378,
12567,
13,
785,
14,
74,
298,
283,
726,
2857,
14,
10178,
12,
35636,
364,
12,
66,
361,
283,
940,
198,
2,
3740,
1378,
12567,
13,
785,
14,
6732,
1817,
1073,
50,
8770,
952,
57,
7211,
488,
5362,
14... | 2.516402 | 4,603 |
"""
Goes over a directory, looks for all matching filenames (csv's), picks one row and writes them to another file.
"""
import re, sys, os
import polygon2cog as p2c
import validation as v
if __name__ == "__main__":
# line index we want to collect
TARGET_LINE = 0
# priority list of file keys, the first one found is taken
file_keys = ["cluster_cogs", "controlPoints"]
regex_str = "_(?P<frameIndex>\d+)\.csv"
if len(sys.argv) >= 3:
src_dir = os.path.abspath(sys.argv[1])
out_dir = os.path.abspath(sys.argv[2])
if len(sys.argv) < 3 or not os.path.isdir(src_dir):
print('Usage: \n\
Argument 1: source directory with files to aggregate\n\
Argument 2: output directory')
frame_indices = [[] for i in range(len(file_keys))]
file_key = "notFound"
# search for file key
for i in range(len(file_keys)):
file_key_t = file_keys[i]
frame_indices[i] = p2c.findFrameIndices(re.compile(file_key_t + regex_str), 'frameIndex', src_dir)
file_key = file_keys[i]
# choose the first non-empty one
for i in range(len(frame_indices)):
if len(frame_indices[i]) > 0:
frame_indices = frame_indices[i]
file_key = file_keys[i]
break
frame_indices = sorted(frame_indices)
agg = []
for f in frame_indices:
path = os.path.join(src_dir, file_key + "_" + str(f) + ".csv")
lines = v.read_controlpoints(path)
row = lines[TARGET_LINE].T.tolist()[0]
row.insert(0, f)
agg.append(row)
print(agg)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
out_file = os.path.join(out_dir, 'aggregated.csv')
p2c.write_csv(out_file, agg)
| [
37811,
198,
38,
3028,
625,
257,
8619,
11,
3073,
329,
477,
12336,
1226,
268,
1047,
357,
40664,
338,
828,
11103,
530,
5752,
290,
6797,
606,
284,
1194,
2393,
13,
198,
37811,
198,
11748,
302,
11,
25064,
11,
28686,
198,
11748,
7514,
14520,... | 2.180348 | 804 |
from a_top_k import *
from b_top_k import *
import sys
import time
if __name__ == "__main__":
main()
| [
6738,
257,
62,
4852,
62,
74,
1330,
1635,
198,
6738,
275,
62,
4852,
62,
74,
1330,
1635,
198,
11748,
25064,
198,
11748,
640,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.511628 | 43 |
import fishtank
from machine import Pin, I2C
import machine
import time
import ssd1306
from adafruit_mqtt import AdafruitMQTTClient
import passwords
try:
i2c = I2C(-1, scl=Pin(22), sda=Pin(21))
oled_width = 128
oled_height = 64
oled = ssd1306.SSD1306_I2C(oled_width, oled_height, i2c)
sensor = fishtank.FishtankSensor(4)
mqtt_client = AdafruitMQTTClient(passwords.adafruit_io_url,
passwords.adafruit_io_username,
passwords.adafruit_io_key)
webserver = fishtank.FishtankWebserver(sensor, oled, mqtt_client=mqtt_client)
webserver.start()
except Exception as e:
print("something has gone wrong %s\nrebooting in 30 seconds" % str(e))
time.sleep(30)
machine.reset()
| [
11748,
5916,
28451,
198,
6738,
4572,
1330,
13727,
11,
314,
17,
34,
198,
11748,
4572,
198,
11748,
640,
198,
11748,
264,
21282,
12952,
21,
198,
6738,
512,
1878,
4872,
62,
76,
80,
926,
1330,
1215,
1878,
4872,
49215,
15751,
11792,
198,
11... | 2.18232 | 362 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team and Jangwon Park
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional, Union
import torch
import numpy as np
from transformers import (
BasicTokenizer,
PreTrainedTokenizer,
Pipeline,
ModelCard,
is_tf_available,
is_torch_available
)
from transformers.pipelines import ArgumentHandler
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
class NerPipeline(Pipeline):
"""
Named Entity Recognition pipeline using ModelForTokenClassification head. See the
`named entity recognition usage <../usage.html#named-entity-recognition>`__ examples for more information.
This token recognition pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "ner", for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous.
The models that this pipeline can use are models that have been fine-tuned on a token classification task.
See the list of available community models fine-tuned on such a task on
`huggingface.co/models <https://huggingface.co/models?search=&filter=token-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "sequences"
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
383,
12905,
2667,
32388,
3457,
13,
1074,
290,
449,
648,
26502,
3250,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
34... | 3.084746 | 1,121 |