blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c20193a4a0fa3353bee23294fb620c713f1af01 | 0165bcf6a864351cbe5a339389262179f2095890 | /poemarket/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/config.gypi | b46548f2e6034e6533e9042f855dd1a7bb6ef649 | [
"Apache-2.0",
"MIT"
] | permissive | RaphaelHub/WebServices | 5b655fec0727ee7a5cfd748476fd03b2f866a412 | 6d26809d4d985b4bb8a439c2ab841ee9a4c323b2 | refs/heads/master | 2021-01-10T16:12:05.873749 | 2016-02-01T19:55:02 | 2016-02-01T19:55:02 | 44,159,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,819 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_small": "false",
"node_install_npm": "false",
"node_prefix": "/usr/local/Cellar/node/0.12.0",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/local/opt/python/bin/python2.7",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/Users/bernhardfritz/.node-gyp/0.12.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/bernhardfritz/Documents/poemarket/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/Release/bson.node",
"module_name": "bson",
"module_path": "/Users/bernhardfritz/Documents/poemarket/node_modules/mongoose/node_modules/bson/node_modules/bson-ext/build/Release",
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/bernhardfritz/.npm-init.js",
"userconfig": "/Users/bernhardfritz/.npmrc",
"node_version": "0.12.0",
"user": "501",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/bernhardfritz/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.5.1 node/v0.12.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/8k/3y9_8gks17j02bb65r85x2f00000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
| [
"bernhard.e.fritz@gmail.com"
] | bernhard.e.fritz@gmail.com |
1ba786707835a26c199af8b1c77a727eb200dbd1 | 6e9a9c0b8c5d28fdc2f1ad74242f47b8729741c9 | /TCN/tcnn.py | 77b041fbdf19003c28e052c5a94f37a31b30068d | [
"MIT"
] | permissive | h3dema/deepwifi | 8567d48e313f344c4432873db4dfd2ecaf026168 | 2df0f5b4de7c9cb4c1d26e5629fa689e2a6036e4 | refs/heads/master | 2020-06-20T07:24:44.113125 | 2019-11-13T21:51:33 | 2019-11-13T21:51:33 | 197,040,858 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,416 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module implements Temporal Convolutional Network
Making the TCN architecture non-causal allows it to take the future into consideration to do its prediction.
However, it is not anymore suitable for real-time applications.
To use a non-causal TCN, specify padding='valid' or padding='same' when initializing the TCN layers.
code based on:
* https://github.com/philipperemy/keras-tcn
* https://github.com/locuslab/TCN/
ref.:
* BAI, Shaojie; KOLTER, J. Zico; KOLTUN, Vladlen.
An empirical evaluation of generic convolutional and recurrent networks for sequence modeling.
arXiv preprint arXiv:1803.01271, 2018.
https://arxiv.org/pdf/1803.01271
* OORD, Aaron van den et al.
Wavenet: A generative model for raw audio. arXiv preprint arXiv:1609.03499, 2016.
https://arxiv.org/pdf/1609.03499.pdf
"""
# from typing import List
# from typing import Tuple
import logging
import keras.backend as K
import keras.layers
from keras import optimizers
# from keras.engine.base_layer import Layer
from keras.layers import Activation, Lambda
from keras.layers import Conv1D, SpatialDropout1D
from keras.layers import Dense, BatchNormalization
from keras.models import Input, Model
LOG = logging.getLogger('TCNN')
LOG.setLevel(logging.DEBUG)
def residual_block(x, dilation_rate,
nb_filters, kernel_size, padding,
dropout_rate=0,
activation='relu',
kernel_initializer='he_normal',
use_batch_norm=False):
# type: (Layer, int, int, int, str, str, float, str, bool) -> Tuple[Layer, Layer]
"""Defines the residual block for the WaveNet TCN
:param x: The previous layer in the model
:param dilation_rate: The dilation power of 2 we are using for this residual block
:param nb_filters: The number of convolutional filters to use in this block
:param kernel_size: The size of the convolutional kernel
:param padding: The padding used in the convolutional layers, 'same' or 'causal'.
:param activation: The final activation used in o = Activation(x + F(x))
:param dropout_rate: Float between 0 and 1. Fraction of the input units to drop.
:param kernel_initializer: Initializer for the kernel weights matrix (Conv1D).
:param use_batch_norm: Whether to use batch normalization in the residual layers or not.
:return A tuple where the first element is the residual model layer, and the second
is the skip connection.
"""
prev_x = x
for k in range(2):
x = Conv1D(filters=nb_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
padding=padding)(x)
if use_batch_norm:
# TODO:
# should be WeightNorm here, but using BatchNormalization instead
# check the original code in https://github.com/openai/weightnorm/tree/master
# but it works with Keras 1.x
# a ported version to Keras 2.x can be found in
# https://github.com/krasserm/weightnorm/tree/master/keras_2
# and it is also downloaded in the current TCN folder
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SpatialDropout1D(rate=dropout_rate)(x)
# 1x1 conv to match the shapes (channel dimension).
prev_x = Conv1D(nb_filters, 1, padding='same')(prev_x)
res_x = keras.layers.add([prev_x, x])
res_x = Activation(activation)(res_x)
return res_x, x
def process_dilations(dilations):
def is_power_of_two(num):
return num != 0 and ((num & (num - 1)) == 0)
if all([is_power_of_two(i) for i in dilations]):
return dilations
else:
new_dilations = [2 ** i for i in dilations]
return new_dilations
class TCN:
"""Creates a TCN layer.
Input shape:
A tensor of shape (batch_size, timesteps, input_dim).
Args:
nb_filters: The number of filters to use in the convolutional layers.
kernel_size: The size of the kernel to use in each convolutional layer.
dilations: The list of the dilations. Example is: [1, 2, 4, 8, 16, 32, 64].
nb_stacks : The number of stacks of residual blocks to use.
padding: The padding to use in the convolutional layers, 'causal' or 'same'.
use_skip_connections: Boolean. If we want to add skip connections from input to each residual block.
return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence.
activation: The activation used in the residual blocks o = Activation(x + F(x)).
dropout_rate: Float between 0 and 1. Fraction of the input units to drop.
name: Name of the model. Useful when having multiple TCN.
kernel_initializer: Initializer for the kernel weights matrix (Conv1D).
use_batch_norm: Whether to use batch normalization in the residual layers or not.
Returns:
A TCN layer.
"""
def __init__(self,
nb_filters=64,
kernel_size=2,
nb_stacks=1,
dilations=[1, 2, 4, 8, 16, 32],
padding='causal',
use_skip_connections=True,
dropout_rate=0.0,
return_sequences=False,
activation='linear',
name='tcn',
kernel_initializer='he_normal',
use_batch_norm=False):
self.name = name
self.return_sequences = return_sequences
self.dropout_rate = dropout_rate
self.use_skip_connections = use_skip_connections
self.dilations = dilations
self.nb_stacks = nb_stacks
self.kernel_size = kernel_size
self.nb_filters = nb_filters
self.activation = activation
self.padding = padding
self.kernel_initializer = kernel_initializer
self.use_batch_norm = use_batch_norm
if padding != 'causal' and padding != 'same':
raise ValueError("Only 'causal' or 'same' padding are compatible for this layer.")
if not isinstance(nb_filters, int):
LOG.info('An interface change occurred after the version 2.1.2.')
LOG.info('Before: tcn.TCN(x, return_sequences=False, ...)')
LOG.info('Now should be: tcn.TCN(return_sequences=False, ...)(x)')
LOG.info('The alternative is to downgrade to 2.1.2 (pip install keras-tcn==2.1.2).')
raise Exception()
def __call__(self, inputs):
x = inputs
# 1D FCN.
x = Conv1D(self.nb_filters, 1, padding=self.padding, kernel_initializer=self.kernel_initializer)(x)
skip_connections = []
for s in range(self.nb_stacks):
for d in self.dilations:
x, skip_out = residual_block(x,
dilation_rate=d,
nb_filters=self.nb_filters,
kernel_size=self.kernel_size,
padding=self.padding,
activation=self.activation,
dropout_rate=self.dropout_rate,
kernel_initializer=self.kernel_initializer,
use_batch_norm=self.use_batch_norm)
skip_connections.append(skip_out)
if self.use_skip_connections:
x = keras.layers.add(skip_connections)
if not self.return_sequences:
x = Lambda(lambda tt: tt[:, -1, :])(x)
return x
def get_opt(opt, lr, decay=0.0):
"""
Args:
opt: Optimizer name.
lr: Learning rate.
decay: Learning rate decay over each update.
"""
assert opt in ['adam', 'rmsprop', 'nadam'], '{} is not a valid optimizer'.format(opt)
if opt == 'adam':
return optimizers.Adam(lr=lr, clipnorm=1.0, decay=decay)
elif opt == 'rmsprop':
return optimizers.RMSprop(lr=lr, clipnorm=1.0, decay=decay)
elif opt == 'nadam':
return optimizers.Nadam(lr=lr, clipnorm=1.0, decay=decay)
else:
raise Exception('Only Adam, Nadam and RMSProp are available here')
# https://github.com/keras-team/keras/pull/11373
# It's now in Keras@master but still not available with pip.
# TODO remove later.
def accuracy(y_true, y_pred):
# reshape in case it's in shape (num_samples, 1) instead of (num_samples,)
if K.ndim(y_true) == K.ndim(y_pred):
y_true = K.squeeze(y_true, -1)
# convert dense predictions to labels
y_pred_labels = K.argmax(y_pred, axis=-1)
y_pred_labels = K.cast(y_pred_labels, K.floatx())
return K.cast(K.equal(y_true, y_pred_labels), K.floatx())
def compiled_tcn(num_feat, # type: int
num_classes, # type: int
nb_filters, # type: int
kernel_size, # type: int
dilations, # type: List[int]
nb_stacks, # type: int
max_len, # type: int
padding='causal', # type: str
use_skip_connections=True, # type: bool
return_sequences=True,
regression=False, # type: bool
dropout_rate=0.05, # type: float
name='tcn', # type: str,
kernel_initializer='he_normal', # type: str,
activation='linear', # type:str,
opt='adam',
lr=0.002,
decay=0.0,
use_batch_norm=False,
):
# type: (...) -> keras.Model
"""Creates a compiled TCN model for a given task (i.e. regression or classification).
Classification uses a sparse categorical loss. Please input class ids and not one-hot encodings.
Args:
num_feat: The number of features of your input, i.e. the last dimension of: (batch_size, timesteps, input_dim).
num_classes: The size of the final dense layer, how many classes (or values) we are predicting.
nb_filters: The number of filters to use in the convolutional layers.
kernel_size: The size of the kernel to use in each convolutional layer.
dilations: The list of the dilations. Example is: [1, 2, 4, 8, 16, 32, 64].
nb_stacks : The number of stacks of residual blocks to use.
max_len: The maximum sequence length, use None if the sequence length is dynamic.
padding: The padding to use in the convolutional layers.
use_skip_connections: Boolean. If we want to add skip connections from input to each residual block.
return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence.
regression: Whether the output should be continuous or discrete.
dropout_rate: Float between 0 and 1. Fraction of the input units to drop.
activation: The activation used in the residual blocks o = Activation(x + F(x)).
name: Name of the model. Useful when having multiple TCN.
kernel_initializer: Initializer for the kernel weights matrix (Conv1D).
opt: Optimizer name.
lr: Learning rate.
decay: Learning rate decay over each update.
use_batch_norm: Whether to use batch normalization in the residual layers or not.
Returns:
A compiled keras TCN.
"""
LOG.debug('num_feat={} num_classes={} nb_filters={} kernel_size={}'.format(num_feat, num_classes, nb_filters, kernel_size))
LOG.debug('nb_stacks={} max_len={} padding={}'.format(nb_stacks, max_len, padding))
LOG.debug('use_skip_connections={} return_sequences={} regression={}'.format(use_skip_connections, return_sequences, regression))
dilations = process_dilations(dilations)
input_layer = Input(shape=(max_len, num_feat))
LOG.debug('input_layer.shape={}'.format(input_layer.shape))
x = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
use_skip_connections, dropout_rate, return_sequences,
activation, name, kernel_initializer, use_batch_norm)(input_layer)
LOG.debug('x.shape={}'.format(x.shape))
# obtain the optimizer object from Keras
optimizer = get_opt(opt, lr, decay)
# create regression or classification
if regression:
# regression
x = Dense(num_classes)(x)
x = Activation('linear')(x)
output_layer = x
model = Model(input_layer, output_layer)
model.compile(optimizer, loss='mean_squared_error')
else:
# classification
x = Dense(num_classes)(x)
x = Activation('softmax')(x)
output_layer = x
model = Model(input_layer, output_layer)
model.compile(optimizer, loss='sparse_categorical_crossentropy', metrics=[accuracy])
LOG.debug('model.x = {}'.format(input_layer.shape))
LOG.debug('model.y = {}'.format(output_layer.shape))
model.summary(print_fn=LOG.info)
LOG.debug('model.loss {}'.format(model.loss))
LOG.debug('opt.config {}'.format(model.optimizer.get_config()))
return model
| [
"henriquemoura@hotmail.com"
] | henriquemoura@hotmail.com |
e00c9ece66e8282d4197400082845d7a238a3b03 | f04dff97f154ca57ea4c94ada816e45ec19c41fd | /modul6/test2.py | fb98072d5265408c96ca1db39a58a55bb50a888b | [] | no_license | miriam1506/PEP21G02 | b049ee8c92fa99c97d8f955ddd565e60a82effa2 | a43b819b391a72dfaf23ff89c86f542472edff7d | refs/heads/master | 2023-06-08T11:17:05.539546 | 2021-06-26T14:49:44 | 2021-06-26T14:49:44 | 357,973,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,643 | py | """
A factory needs an iterable object to keep track of employee working schedule for each day.
Each employee has a string name and an object of type datetime that indicate when employee started work
Iterating the object will return tuple with name and time that employee entered the factory
1) 40p: Definition
a) 10p: Class with constructor that receives the date in the format you desire (representing the day)
b) 10p: Create method to add worker information when he/she enters the factory
- if worker is already in the factory a custom exception inheriting ValueError (exception: WorkStartError)
will be raised with message indicating employee name and current time
c) 10p: Create method to remove worker information when he/she leaves the factory
- if worker is not in the factory a custom exception inheriting ValueError (exception: WorkEndError)
will be raised with message indicating employee name and current time
c) 10p: Iterating the object will return tuple with name and time employee entered the factory
2) 40p: Execution:
a) 10p: Create instance of class with date format you selected.
b) 10p: Add the following employees with time of arrival:
- Joe: 09:01:20
- Ana: 09:03:15
- Tim: 09:04:25
- Tim: 09:04:30 - treat this exception
c) 10p: Remove the following employees:
- Joe
- Ana
- Tim
- Tim - treat this exception
d) 10p: Iterate the created object and save each value on a new line in a file
3) 20p: Documenting:
a) 5p: type hints for all arguments (optional for returned values)
a) 5p: module documentation
b) 5p: class documentation for all classes
c) 5p: method documentation for all methods
"""
from datetime import datetime
class WorkStartError(ValueError):
pass
class WorkEndError(ValueError):
pass
class TimeIter:
"""Iterator for working hours by name"""
def __init__(self, working_time: list):
self.working_time = working_time
def __iter__(self):
return self
def __next__(self):
if not self.working_time:
raise StopIteration
else:
return self.working_time.pop(0)
class TimeKeeper:
"""Keeps track of entering hours for employees"""
ledger = {}
def __init__(self, date: tuple):
self.date = date
def __iter__(self):
remove_from_factory =[]
for name, start in self.ledger.items():
remove_from_factory.append((name, start))
return TimeIter(remove_from_factory)
def start_work(self, name: str, start: tuple):
"""add start work time"""
if self.ledger.get(name,None):
raise WorkStartError(f'{name} already started work')
self.ledger[name] = [datetime(*self.date, *start)]
def remove_from_factory(self, name: str):
"""remove from factory and raise error"""
if self.ledger.get(name) is None:
raise WorkEndError(f'{name} is not in the factory{datetime.now()}')
self.ledger.pop(name)
time = TimeKeeper((2021, 5, 6))
time.start_work('Joe', (9, 1, 20))
time.start_work('Ana', (9, 3, 15))
time.start_work('Tim', (9, 4, 25))
try:
time.start_work('Tim', (9, 4, 30))
except WorkStartError as e:
print(e,'got passed WorkStartError')
time.remove_from_factory('Joe')
time.remove_from_factory('Ana')
time.remove_from_factory('Tim')
try:
time.remove_from_factory('Tim')
except WorkEndError as e:
print(e,'got passed WorkEndError')
with open('timer.log', 'w') as file:
for date in time :
file.write(f'{date[0]}: {date[1]}\n')
| [
"miriam152000\"yahoo.com"
] | miriam152000"yahoo.com |
2d249ca82fe60e427a1fdf541d6ceb66cbdf9509 | b97609a5f9525c4b248dca25e51fa50ca257973a | /scraper/linkedin_jobs_scraper.py | bc81d23a54d793aedf4e3dc44b770e04cc6fd0bd | [] | no_license | ahmadabdullah247/linkedin_analytics | 485cf82cbc58f879f6f46de58616c6d950f5b3ce | 4e11621ba89e0791d8f4b0bb150746af612fcf51 | refs/heads/main | 2023-03-13T15:57:03.216737 | 2021-03-15T16:52:29 | 2021-03-15T16:52:29 | 342,906,010 | 0 | 17 | null | null | null | null | UTF-8 | Python | false | false | 5,573 | py | import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import time
import pandas as pd
from scraper.utils.helpers import *
from urllib.parse import urlencode, quote_plus
class LinkedInJobsScraper:
def __init__(self, num_jobs, query, config_path=None):
self.num_jobs = num_jobs
self.query = query
self.job_ids = [] ## list for holding per page job ids
self.scraper_config, self.credentials = read_config(config_path) ## loading configuration
self.scraper_logger = get_logger() ## get logger for logging system state
## connecting to mongo db cloud
self.mongo_collection = get_mongo_client(self.scraper_config, self.credentials)
#self.es_client = Elasticsearch(hosts=self.scraper_config['es_host'])
def search_jobs_ids(self, search_term):
for i in range(self.scraper_config['total_search_pages']):
# Set the URL you want to webscrape from
url = self.scraper_config['search_url'].format(search_term,i)
self.scraper_logger.info('Searching jobs in page {}/{}'.format(i+1, self.scraper_config['total_search_pages']))
# Connect to the URL
response = requests.get(url)
# Parse HTML and save to BeautifulSoup object
soup = BeautifulSoup(response.text, "html.parser")
self.scraper_logger.info('Extracting Job Ids from the page')
## extract job ids from the selected page
self.extract_job_ids(soup)
if len(self.job_ids) > 0:
self.scraper_logger.info('Found {} new jobs'.format(len(self.job_ids)))
self.fetch_job_info()
else:
self.scraper_logger.info('Found {} new jobs'.format(len(self.job_ids)))
def extract_job_ids(self, soup):
jobs = soup.findAll(self.scraper_config['job_title_element'],
attrs={"class":self.scraper_config['job_title_element_class']})
## iterating over job elements to extract job ids
for job in jobs:
self.job_ids.append('{}'.format(job[self.scraper_config['job_id_element_identifier']]))
def get_job_data(self, job_id):
url = self.scraper_config['li_jobs_api'].format(job_id)
# Connect to the URL
response = requests.get(url)
# Parse HTML and save to BeautifulSoup object
soup = BeautifulSoup(response.text, "html.parser")
job_info = {}
## find jd section
job_info['_id'] = job_id
if soup.find("h2",attrs={"class":self.scraper_config['job_title_class']}):
job_info['job_title'] = soup.find("h2",attrs={"class":self.scraper_config['job_title_class']}).text
else:
job_info['job_title'] = '<NOT_GIVEN>'
if soup.find("section",attrs={"class":"description"}):
job_info['description'] = soup.find("section",attrs={"class":"description"}).text
else:
job_info['description'] = '<NOT_GIVEN>'
if soup.find("span",attrs={"class":self.scraper_config['job_location_class']}):
job_info['location'] = soup.find("span",attrs={"class":self.scraper_config['job_location_class']}).text
else:
job_info['location'] = '<NOT_GIVEN>'
if soup.find("a",attrs={"class":self.scraper_config['employer_name_class']}):
job_info['employer_name'] = soup.find("a",attrs={"class":self.scraper_config['employer_name_class']}).text
else:
job_info['employer_name'] = '<NOT_GIVEN>'
if soup.find("span",attrs={"class":self.scraper_config['job_date_class']}):
job_info['date_posted'] = rel_time_to_absolute_datetime(soup.find("span",attrs={"class":self.scraper_config['job_date_class']}).text)
else:
job_info['date_posted'] = '<NOT_GIVEN>'
job_meta_ul = soup.find("ul",attrs={"class": self.scraper_config['job_meta_info_class'] })
if soup.find("span",attrs={"class": self.scraper_config['n_applicants_class'] }):
job_info['n_applicants'] = int(soup.find("span",attrs={"class": self.scraper_config['n_applicants_class'] }).text.split(' ')[0])
else:
job_info['n_applicants'] = 0
if job_meta_ul:
for item in job_meta_ul.findAll('li'):
key = item.find('h3').text.lower()
for index, meta_data in enumerate(item.findAll('span')):
if meta_data.text:
job_info['{}_{}'.format(key, index)] = meta_data.text
return job_info
def fetch_job_info(self):
total_jobs = len(self.job_ids)
while (len(self.job_ids)>0): ## iterate until no jobs left
self.scraper_logger.info('Fetching data for JOB[{}/{}]'.format((total_jobs - len(self.job_ids)), total_jobs))
job_id = self.job_ids.pop() ## get last job in queue
job_info = self.get_job_data(job_id)
if job_info:
## TODO: update status and dump to ES
self.scraper_logger.info('dumping to mongo')
#write_to_es(self.scraper_config['es_index'], job_info, self.es_client)
response = write_to_mongo(self.mongo_collection, job_info)
self.scraper_logger.info('[MongoDB] for new row insert: {}'.format(response))
time.sleep(1) ## sleep for 1 seconds
| [
"sk28671@gmail.com"
] | sk28671@gmail.com |
40bc9517c3d63c2e2959e5474bf675be29055349 | 3f039fdc60cf44aba4d0c490c70e8e7c13d7c237 | /demo_day5/args and kwargs.py | d1f012ab8d7a09c1546aaec76f20594a96bfd07c | [] | no_license | ashutoshgoy/s1_project | ad57498f0d4b21d2182e459b40f15018096edebb | 85d2cb0023713e4c3d9ee965ead98ee942bdb4bc | refs/heads/master | 2023-07-09T23:47:48.692843 | 2021-08-16T07:38:35 | 2021-08-16T07:38:35 | 396,674,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | def myFun(arg1, **kwargs):
print(f"greeting message is {arg1} ")
for key, value in kwargs.items():
print("%s == %s" % (key, value))
# Driver code
myFun("Hi", first='Geeks', mid='for', last='Geeks') | [
"ashutoshgoyal46@gmail.com"
] | ashutoshgoyal46@gmail.com |
a2715be509d8f2b3a8228c75fd162f124338d2c8 | 124d53974ed1298e4167c24aa53b871bdd49ce08 | /tests/emukit/quadrature/test_integral_bounds.py | a2e7021f1a7b2bd0a1abca799e51423d4d6b44c9 | [
"Apache-2.0"
] | permissive | bouhlelma/emukit | 0d3b1d46617d8988a5548c22e14e6a2c6cc60d45 | 1cfcb852d2a3e0ca4360201d56c33e1c27dab148 | refs/heads/master | 2020-05-02T21:57:08.559639 | 2019-03-15T11:56:41 | 2019-03-15T11:56:41 | 178,236,169 | 1 | 0 | Apache-2.0 | 2019-03-28T15:53:36 | 2019-03-28T15:53:36 | null | UTF-8 | Python | false | false | 871 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from emukit.quadrature.kernels.integral_bounds import IntegralBounds
def test_integral_bounds_values():
bounds = [(-1, 1), (-2, 0)]
lower_bounds = np.array([[-1, -2]])
upper_bounds = np.array([[1, 0]])
bounds = IntegralBounds(name='test_name', bounds=bounds)
res = bounds.get_lower_and_upper_bounds()
assert len(res) == 2
assert np.all(res[0] == lower_bounds)
assert np.all(res[1] == upper_bounds)
assert len(bounds.convert_to_list_of_continuous_parameters()) == 2
assert bounds.name == 'test_name'
def test_integral_bounds_wrong_bounds():
bounds_wrong = [(-1, 1), (0, -2)]
with pytest.raises(ValueError):
IntegralBounds(name='test_name', bounds=bounds_wrong)
| [
"noreply@github.com"
] | bouhlelma.noreply@github.com |
692e0d8a07c7975f6faa00bc7961ee7689d3ef3b | 784f861ca472f9e50700f9d5e13ac0b54b22d59c | /Population model/new_pop.py | 2f7c810ab5f6e996c9696d59410545309ace7415 | [] | no_license | garbagetimeisfine/RosalinFranklin | b86c9479d54a1d10dd1c7b60146edc49cf9e1a4c | fbd1ef23452cb49893ccd7b901f2fc87127ed625 | refs/heads/master | 2021-01-10T11:11:58.516149 | 2016-03-07T17:59:15 | 2016-03-07T17:59:15 | 52,109,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | """ Takes an old generation and makes a new one """
import scipy
def generation(population):
N = len(population)
new_generation = []
for i in range(N):
mom = scipy.random.randint(N)
dad = scipy.random.randint(N)
mom_chr = scipy.random.randint(2)
dad_chr = 1 - mom_chr
offspring = (population[mom][mom_chr],population[dad][dad_chr])
new_generation.append(offspring)
return new_generation
| [
"p.belenky@gmail.com"
] | p.belenky@gmail.com |
5ed90045c773f997ddbcd1162308f9d38f3620ad | 77bd7aad5e137092155d48f675081d99cddd5835 | /model.py | 8a37a25017d8fc8a2f476f74847056f334ee9c5d | [] | no_license | MobileRoboticistsW21/Mask_RCNN_with_Optical_Flow | 325d8f9dfb895be9bc4c9c181f321bcf852a9d17 | f343ad3ddeb8a71f9d8617727055b954a5a6ff6d | refs/heads/main | 2023-04-16T15:19:41.852604 | 2021-04-16T13:37:31 | 2021-04-16T13:37:31 | 352,096,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106,222 | py | """
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
from collections import OrderedDict
import numpy as np
import scipy.misc
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.initializers as KI
import keras.engine as KE
import keras.models as KM
import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else ""))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False)
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res'+str(stage)+block+'_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res'+str(stage)+block+'_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False):
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(axis=3, name='bn_conv1')(x)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98+i))
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, 4] each row is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
# Split corners
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinment detals to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, anchors,
config=None, **kwargs):
"""
anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates
"""
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
self.anchors = anchors.astype(np.float32)
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Base anchors
anchors = self.anchors
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(10000, self.anchors.shape[0])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True, name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = self.proposal_count - tf.shape(proposals)[0]
proposals = tf.concat([proposals, tf.zeros([padding, 4])], 0)
return proposals
proposals = utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.math.log(x) / tf.math.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, chanells]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, image_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
self.image_shape = tuple(image_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(self.image_shape[0] * self.image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h*w) / (224.0/tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:,2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )
############################################################
# Detection Target Layer
############################################################
def detection_targets_graph(proposals, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2, class_id)] in
normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinments.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove proposals zero padding
non_zeros = tf.cast(tf.reduce_sum(tf.abs(proposals), axis=1), tf.bool)
proposals = tf.boolean_mask(proposals, non_zeros)
# TODO: Remove zero padding from gt_boxes and gt_masks
# Compute overlaps matrix [rpn_rois, gt_boxes]
# 1. Tile GT boxes and repeate ROIs tensor. This
# allows us to compare every ROI against every GT box without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
rois = tf.reshape(tf.tile(tf.expand_dims(proposals, 1),
[1, 1, tf.shape(gt_boxes)[0]]), [-1, 4])
boxes = tf.tile(gt_boxes, [tf.shape(proposals)[0], 1])
# 2. Compute intersections
roi_y1, roi_x1, roi_y2, roi_x2 = tf.split(rois, 4, axis=1)
box_y1, box_x1, box_y2, box_x2, class_ids = tf.split(boxes, 5, axis=1)
y1 = tf.maximum(roi_y1, box_y1)
x1 = tf.maximum(roi_x1, box_x1)
y2 = tf.minimum(roi_y2, box_y2)
x2 = tf.minimum(roi_x2, box_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
roi_area = (roi_y2 - roi_y1) * (roi_x2 - roi_x1)
box_area = (box_y2 - box_y1) * (box_x2 - box_x1)
union = roi_area + box_area - intersection
# 4. Compute IoU and reshape to [rois, boxes]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(proposals)[0], tf.shape(gt_boxes)[0]])
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box
negative_indices = tf.where(roi_iou_max < 0.5)[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
# Negative ROIs. Fill the rest of the batch.
negative_count = config.TRAIN_ROIS_PER_IMAGE - tf.shape(positive_indices)[0]
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes[:,:4])
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2, _ = tf.split(roi_gt_boxes, 5, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N+P), (0, 0)])
deltas = tf.pad(deltas, [(0, N+P), (0, 0)])
masks = tf.pad(masks, [[0, N+P], (0, 0), (0, 0)])
return rois, roi_gt_boxes[:, 4], deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinment, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2, class_id)] in
normalized coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinments.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_boxes = inputs[1]
gt_masks = inputs[2]
# Slice the batch and run a graph for each slice
# TODO: Optimize by supporting batch > 1
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_boxes, gt_masks],
lambda x, y, z: detection_targets_graph(x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])
return boxes
def refine_detections(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)]
"""
# Class IDs per ROI
class_ids = np.argmax(probs, axis=1)
# Class probability of the top class of each ROI
class_scores = probs[np.arange(class_ids.shape[0]), class_ids]
# Class-specific bounding box deltas
deltas_specific = deltas[np.arange(deltas.shape[0]), class_ids]
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = utils.apply_box_deltas(
rois, deltas_specific * config.BBOX_STD_DEV)
# Convert coordiates to image domain
# TODO: better to keep them normalized until later
height, width = config.IMAGE_SHAPE[:2]
refined_rois *= np.array([height, width, height, width])
# Clip boxes to image window
refined_rois = clip_to_window(window, refined_rois)
# Round and cast to int since we're deadling with pixels now
refined_rois = np.rint(refined_rois).astype(np.int32)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = np.where(class_ids > 0)[0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
keep = np.intersect1d(
keep, np.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[0])
# Apply per-class NMS
pre_nms_class_ids = class_ids[keep]
pre_nms_scores = class_scores[keep]
pre_nms_rois = refined_rois[keep]
nms_keep = []
for class_id in np.unique(pre_nms_class_ids):
# Pick detections of this class
ixs = np.where(pre_nms_class_ids == class_id)[0]
# Apply NMS
class_keep = utils.non_max_suppression(
pre_nms_rois[ixs], pre_nms_scores[ixs],
config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = keep[ixs[class_keep]]
nms_keep = np.union1d(nms_keep, class_keep)
keep = np.intersect1d(keep, nms_keep).astype(np.int32)
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
top_ids = np.argsort(class_scores[keep])[::-1][:roi_count]
keep = keep[top_ids]
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are in image domain.
result = np.hstack((refined_rois[keep],
class_ids[keep][..., np.newaxis],
class_scores[keep][..., np.newaxis]))
return result
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
# TODO: Add support for batch_size > 1
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
def wrapper(rois, mrcnn_class, mrcnn_bbox, image_meta):
# currently supports one image per batch
b = 0
_, _, window, _ = parse_image_meta(image_meta)
detections = refine_detections(
rois[b], mrcnn_class[b], mrcnn_bbox[b], window[b], self.config)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = self.config.DETECTION_MAX_INSTANCES - detections.shape[0]
assert gap >= 0
if gap > 0:
detections = np.pad(detections, [(0, gap), (0, 0)],
'constant', constant_values=0)
# Cast to float32
# TODO: track where float64 is introduced
detections = detections.astype(np.float32)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
return np.reshape(detections,
[1, self.config.DETECTION_MAX_INSTANCES, 6])
# Return wrapped function
return tf.py_func(wrapper, inputs, tf.float32)
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
# Region Proposal Network (RPN)
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation("softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location*4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_classifier")([rois] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.Dropout(0.5)(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_class_bn2')(x)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes*4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
#mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
if s[1]==None:
mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name="mrcnn_bbox")(x)
else:
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_mask")([rois] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn2')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn3')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn4')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2,2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1-less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1-less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
bbox: [instance_count, (y1, x1, y2, x2, class_id)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
# Random horizontal flips.
if augment:
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Add class_id as the last value in bbox
bbox = np.hstack([bbox, class_ids[:, np.newaxis]])
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, shape, window, active_class_ids)
return image, image_meta, bbox, mask
def build_detection_targets(rpn_rois, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_boxes: [instance count, (y1, x1, y2, x2, class_id)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Int class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, 5]. Rows are class-specific
bbox refinments [y, x, log(h), log(w), weight].
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_boxes[:, 4] > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * (rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i][:4]
overlaps[:,i] = utils.compute_iou(gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(overlaps.shape[0]), rpn_roi_iou_argmax]
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax] # GT box assigned to each ROI
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep, :4]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
class_ids = roi_gt_boxes[:,4].astype(np.int32)
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox shifts. [y, x, log(h), log(w), weight]. Weight is 0 or 1 to
# determine if a bbox is included in the loss.
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.NUM_CLASSES, 5), dtype=np.float32)
pos_ids = np.where(class_ids > 0)[0]
bboxes[pos_ids, class_ids[pos_ids], :4] = utils.box_refinement(rois[pos_ids], roi_gt_boxes[pos_ids, :4])
bboxes[pos_ids, class_ids[pos_ids], 4] = 1 # weight = 1 to influence the loss
# Normalize bbox refinments
bboxes[:, :, :4] /= config.BBOX_STD_DEV
# Generate class-specific target masks.
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id][:4]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),
interp='nearest') / 255.0).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i][:4].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = scipy.misc.imresize(m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0
masks[i,:,:,class_id] = mask
return rois, class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2, class_id)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Areas of anchors and GT boxes
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] - gt_boxes[:, 1])
anchor_area = (anchors[:, 2] - anchors[:, 0]) * (anchors[:, 3] - anchors[:, 1])
# Compute overlaps [num_anchors, num_gt_boxes]
# Each cell contains the IoU of an anchor and GT box.
overlaps = np.zeros((anchors.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i][:4]
overlaps[:,i] = utils.compute_iou(gt, anchors, gt_box_area[i], anchor_area)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. It gets overwritten if a gt box is matched to them.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[anchor_iou_max < 0.3] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE - np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinment() rather that duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i], :4]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_boxes: [N, (y1, x1, y2, x2, class_id)] Ground trugh boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i,:4]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1-h, 0)
r_y2 = min(gt_y2+h, image_shape[0])
r_x1 = max(gt_x1-w, 0)
r_x2 = min(gt_x2+w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box*2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box*2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:,0] - y1y2[:,1]) >= threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:,0] - x1x2[:,1]) >= threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box*i:rois_per_box*(i+1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:,0] - y1y2[:,1]) >= threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:,0] - x1x2[:,1]) >= threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=True, random_rois=0,
batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: If True, applies image augmentation to images (currently only
horizontal flips are supported)
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, size of image meta]
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2, class_id)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment, use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if np.sum(gt_boxes) <= 0:
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(image.shape, random_rois, gt_boxes)
if detection_targets:
# Append two columns of zeros. TODO: needed?
rpn_rois = np.hstack([rpn_rois, np.zeros([rpn_rois.shape[0], 2], dtype=np.int32)])
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(rpn_rois, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros((batch_size,)+image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros([batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros([batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros((batch_size,)+image.shape, dtype=np.float32)
batch_gt_boxes = np.zeros((batch_size, config.MAX_GT_INSTANCES, 5), dtype=np.int32)
if config.USE_MINI_MASK:
batch_gt_masks = np.zeros((batch_size, config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1],
config.MAX_GT_INSTANCES))
else:
batch_gt_masks = np.zeros((batch_size, image.shape[0], image.shape[1], config.MAX_GT_INSTANCES))
if random_rois:
batch_rpn_rois = np.zeros((batch_size,rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros((batch_size,)+rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros((batch_size,)+mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros((batch_size,)+mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros((batch_size,)+mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:,:,ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_boxes[b,:gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b,:,:,:gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois[:,:4]
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(batch_mrcnn_class_ids, -1)
outputs.extend([batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h/2**6 != int(h/2**6) or w/2**6 != int(w/2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(shape=config.IMAGE_SHAPE.tolist(), name="input_image")
input_image_meta = KL.Input(shape=[None], name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# GT Boxes (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2, class_id)] in image coordinates
input_gt_boxes = KL.Input(shape=[None, 5], name="input_gt_boxes", dtype=tf.int32)
# Normalize coordinates
h, w = K.shape(input_image)[1], K.shape(input_image)[2]
image_scale = K.cast(K.stack([h, w, h, w, 1], axis=0), tf.float32)
gt_boxes = KL.Lambda(lambda x: K.cast(x, tf.float32) / image_scale)(input_gt_boxes)
# GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, "resnet101", stage5=True)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Generate Anchors
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [N, (y1, x1, y2, x2)] in normalized coordinates.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(proposal_count=proposal_count,
nms_threshold=0.7,
name="ROI",
anchors=self.anchors,
config=config)([rpn_class, rpn_bbox])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
_, _, _, active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x),
mask=[None, None, None, None])(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates to 0-1 range.
target_rois = KL.Lambda(lambda x: K.cast(x, tf.float32) / image_scale[:4])(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposals, gt_boxes, and gt_masks might be zero padded
# Equally, returned rois and targets might be zero padded as well
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Convert boxes to normalized coordinates
# TODO: let DetectionLayer return normalized coordinates to avoid
# unnecessary conversions
h, w = config.IMAGE_SHAPE[:2]
detection_boxes = KL.Lambda(lambda x: x[...,:4]/np.array([h, w, h, w]))(detections)
# Create masks for detections
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
model = KM.Model([input_image, input_image_meta],
[detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,
clipnorm=5.0)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = ["rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
self.keras_model.add_loss(tf.reduce_mean(layer.output, keep_dims=True))
# Add L2 Regularization
reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w)
for w in self.keras_model.trainable_weights]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(optimizer=optimizer, loss=[None]*len(self.keras_model.outputs))
# Add metrics
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
self.keras_model.metrics_tensors.append(tf.reduce_mean(layer.output,
keep_dims=True))
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(layer_regex, keras_model=layer, indent=indent+4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
self.epoch = int(m.group(6)) + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From Resnet stage 4 layers and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Common parameters to pass to fit_generator()
fit_kwargs = {
"steps_per_epoch": self.config.STEPS_PER_EPOCH,
"callbacks": callbacks,
"validation_data": next(val_generator),
"validation_steps": self.config.VALIDATION_STPES,
"max_queue_size": 100,
"workers": max(self.config.BATCH_SIZE // 2, 2),
"use_multiprocessing": True,
}
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
**fit_kwargs
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image to fit the model expected size
# TODO: move resizing to mold_image()
molded_image, window, scale, padding = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
max_dim=self.config.IMAGE_MAX_DIM,
padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, window,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:,4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
exclude_ix = np.where((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 2] - boxes[:, 0]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Compute scale and shift to translate coordinates to image domain.
h_scale = image_shape[0] / (window[2] - window[0])
w_scale = image_shape[1] / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] # y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
# Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty((0,) + masks.shape[1:3])
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \
rois, rpn_class, rpn_bbox =\
self.keras_model.predict([molded_images, image_metas], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs):
"""Runs a sub-set of the computation graph that computes the given
outputs.
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Run inference
molded_images, image_metas, windows = self.mold_inputs(images)
# TODO: support training mode?
# if TEST_MODE == "training":
# model_in = [molded_images, image_metas,
# target_rpn_match, target_rpn_bbox,
# gt_boxes, gt_masks]
# if not config.USE_RPN_ROIS:
# model_in.append(target_rois)
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(1.)
# outputs_np = kf(model_in)
# else:
model_in = [molded_images, image_metas]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v) for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array. Use
parse_image_meta() to parse the values back.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) # size=num_classes
)
return meta
# Two functions (for Numpy and TF) to parse image_meta tensors.
def parse_image_meta(meta):
"""Parses an image info Numpy array to its components.
See compose_image_meta() for more details.
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return image_id, image_shape, window, active_class_ids
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8]
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
TODO: use this function to reduce code duplication
"""
area = tf.boolean_mask(boxes, tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1),
tf.bool))
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
| [
"noreply@github.com"
] | MobileRoboticistsW21.noreply@github.com |
28ac291d1ae4422fbc31b029ac29566e60bb06d6 | aa01560e68a07033d4b24c4770966771349e2b4f | /src/jobs/migrations/0006_auto_20201209_1527.py | 6892aa2f3a57b13172292e19d88ab9cece02673e | [] | no_license | fluffcoding/solitaireHR | a0a357e1b19b955caae8df11ca92188cad79e217 | b97a29f9accc5b45cd62986b62673a6ba802771b | refs/heads/main | 2023-04-05T11:46:41.855323 | 2021-04-26T04:57:27 | 2021-04-26T04:57:27 | 322,067,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | # Generated by Django 3.1.2 on 2020-12-09 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0005_jobapplication'),
]
operations = [
migrations.AddField(
model_name='jobapplication',
name='applied',
field=models.BooleanField(blank=True, default=True, null=True),
),
migrations.AddField(
model_name='jobapplication',
name='interviewed',
field=models.BooleanField(blank=True, null=True),
),
migrations.AddField(
model_name='jobapplication',
name='selected',
field=models.BooleanField(blank=True, null=True),
),
migrations.AddField(
model_name='jobapplication',
name='shortlisted',
field=models.BooleanField(blank=True, null=True),
),
]
| [
"fluffcoding@gmail.com"
] | fluffcoding@gmail.com |
fa413eb6da07469734ae32798eb95a689d78e8c8 | d46d1e1bf1040ae5191d49511c697a4e7afc9b40 | /order/models.py | 0cf41f21c66dd00701fdc3611c1a7a7ddd04c563 | [] | no_license | aigerim955/DRF_last_project | acabd152b7d02bd8d78193c7480b5abecd9528a4 | a11c0879355b1e64ff8578517b6bd39c67263795 | refs/heads/master | 2023-04-19T17:06:39.214836 | 2021-05-15T07:54:51 | 2021-05-15T07:54:51 | 366,063,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | from django.db import models
from django.contrib.auth import get_user_model
from product.models import Product
ORDER_STATUS_CHOICES = (
('pending', 'Pending'),
('proccessing', 'Processing'),
('in_delievery', 'In delievery'),
('finished', 'Finished'),
('canceled', 'Canceled')
)
class OrderItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='order_items')
quantity = models.DecimalField(max_digits=10, decimal_places=2)
price = models.DecimalField(max_digits=10, decimal_places=2)
class Order(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, related_name='orders', null=True)
created_at = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=20, choices=ORDER_STATUS_CHOICES)
comment = models.TextField(blank=True)
address = models.CharField(max_length=255)
total = models.DecimalField(max_digits=10, decimal_places=2)
item = models.ManyToManyField(OrderItem)
| [
"akimbaeva.ai08@gmail.com"
] | akimbaeva.ai08@gmail.com |
238c62497aa6e0edfb73c50391541a606c36a8eb | 0bebd8f9000a77f8758a05ea9e1901363510f0e0 | /qualcomm.py | 74a3e6771dfed2c77299537650ab1a63047f44c2 | [] | no_license | PrabhaPandey/megathon_2k19 | a6fcc3fe8f73a3c3d341f5caa7719b57db27e5e4 | 104fcafbad1d497ff1ecede355dcc1127f91da41 | refs/heads/master | 2020-08-03T02:57:48.855659 | 2019-09-29T06:48:37 | 2019-09-29T06:48:37 | 211,605,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | import re
import os
import nltk
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
import csv
import numpy
import sys
from numpy import linalg as LA
docs = []
content = sys.argv[1]
csv_file = open(content, 'rb')
for line in csv_file.readlines():
# print(line)
page_content = line.decode().split(',')[1]
tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
tokens = tokenizer.tokenize(page_content)
tokens = [x for x in tokens]
stemmer = SnowballStemmer('english')
words = []
for x in tokens:
words.append(stemmer.stem(x))
page_content = ' '.join(words)
docs.append(page_content)
abstract = []
abs = sys.argv[2]
csv_file = open(abs, 'rb')
for line in csv_file.readlines():
# print(line)
page_content = line.decode().split(',')[1]
tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
tokens = tokenizer.tokenize(page_content)
tokens = [x for x in tokens]
stemmer = SnowballStemmer('english')
words = []
for x in tokens:
words.append(stemmer.stem(x))
page_content = ' '.join(words)
abstract.append(page_content)
import re, math
from collections import Counter
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
result_matrix = []
for d in docs:
cos_for_one_doc = []
for a in abstract:
vec1 = text_to_vector(d)
vec2 = text_to_vector(a)
cosine = get_cosine(vec1,vec2)
cos_for_one_doc.append(cosine)
result_matrix.append(cos_for_one_doc)
result_matrix = numpy.array(result_matrix).T.tolist()
a = numpy.asarray(result_matrix)
numpy.savetxt("similarity_matrix.csv", a, delimiter=",") | [
"prabha.pandey@students.iiit.ac.in"
] | prabha.pandey@students.iiit.ac.in |
5eff8ace561f9e24d707b69638d5cd318a521dd6 | 2224053e33ada6b64e3f43f0f97ea739ae9c418c | /DjangoEmail/venv/bin/sqlformat | 10c9801b2531a7ea1a83da84dc2344852ae9374c | [] | no_license | vishvajitrao/Django | b07fbf33fcd72daa7f2f7e7c901bb5d229ce5eff | f5969879687d20267df10a2d8cfbcabd912699eb | refs/heads/master | 2023-05-01T02:35:21.719487 | 2019-11-15T16:29:47 | 2019-11-15T16:29:47 | 219,923,712 | 0 | 0 | null | 2023-04-21T20:39:49 | 2019-11-06T06:05:15 | Python | UTF-8 | Python | false | false | 252 | #!/home/jiyo-india/Desktop/DjangoDemo/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"vishvajitrao@gmail.com"
] | vishvajitrao@gmail.com | |
cb51c98b1c6d352e88be15761c467964c0ef7eba | 51aa2894c317f60726fe9a778999eb7851b6be3e | /070_oop/006_enumerations/examples/enum_in_Python/003_enum_in_Python.py | 7807d992fe49805a35f9f5e1d57682d31b41b1e5 | [] | no_license | pranaymate/Python_Topics | dd7b288ab0f5bbee71d57080179d6481aae17304 | 33d29e0a5bf4cde104f9c7f0693cf9897f3f2101 | refs/heads/master | 2022-04-25T19:04:31.337737 | 2020-04-26T00:36:03 | 2020-04-26T00:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | # Accessing Modes : Enum members can be accessed by two ways
# 1. By value :- In this method, the value of enum member is passed.
# 2. By name :- In this method, the name of enum member is passed.
# Seperate value or name can also be accessed using name or value keyword.
# Comparison : Enumerations supports two types of comparisons
# 1. Identity :- These are checked using keywords is and is not.
# 2. Equality :- Equality comparisons of == and != types are also supported.
# Python code to demonstrate enumerations
# Access and comparison
# importing enum for enumerations
from __future__ import print_function
import enum
# creating enumerations using class
class Animal(enum.Enum):
dog = 1
cat = 2
lion = 3
# Accessing enum member using value
print ("The enum member associated with value 2 is : ", end="")
print (Animal(2))
# Accessing enum member using name
print ("The enum member associated with name lion is : ", end="")
print (Animal['lion'])
# Assigning enum member
mem = Animal.dog
# Displaying value
print ("The value associated with dog is : ", end="")
print (mem.value)
# Displaying name
print ("The name associated with dog is : ", end="")
print (mem.name)
# Comparison using "is"
if Animal.dog is Animal.cat:
print ("Dog and cat are same animals")
else:
print ("Dog and cat are different animals")
# Comparison using "!="
if Animal.lion != Animal.cat:
print ("Lions and cat are different")
else:
print ("Lions and cat are same") | [
"noreply@github.com"
] | pranaymate.noreply@github.com |
0f4e86f485a7e31a353b4a31d61e854e43b4ec0c | 05aa54fbdc2aba31d30fc6bac5fce14f251b12e7 | /Sutton_Barto/ch_6_temporal_difference.py | 2345959ac09c30e0220f6da956f36614f249dff5 | [] | no_license | adhish9899/Reinforcement | 01e716ddc8b698c645ae7a651a5c45ce28707f98 | 8e4183d698c66bd349b1b8fcc3411698327b4233 | refs/heads/master | 2020-07-30T10:18:07.366895 | 2020-06-21T12:39:39 | 2020-06-21T12:39:39 | 210,190,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,968 | py |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# world height
WORLD_HEIGHT = 4
# world width
WORLD_WIDTH = 12
# Probability of exploration (EPSILON)
EPSILON = 0.1
# Step Size (alpha)
ALPHA = 0.5
# Gamma for Q-learning and expected SARSA
GAMMA = 0.1
# all possible ACTIONS
ACTION_UP = 0
ACTION_DOWN = 1
ACTION_LEFT = 2
ACTION_RIGHT = 3
ACTIONS = [ACTION_UP, ACTION_DOWN, ACTION_LEFT, ACTION_RIGHT]
# initial state action pair values
START = [3,0]
GOAL = [3,11]
def step(state, action):
i,j = state
if action == ACTION_UP:
next_state = [max(0, i-1), j]
elif action == ACTION_DOWN:
next_state = [min(WORLD_HEIGHT - 1, i+1), j]
elif action == ACTION_LEFT:
next_state = [i, max(0, j-1)]
elif action == ACTION_RIGHT:
next_state = [i, min(WORLD_WIDTH - 1, j+1)]
else:
raise IndexError("Your action {} is not valid".format(action))
reward = -1
if (action == ACTION_DOWN and i == 2 and 1 <= j <= 10) or (action == ACTION_RIGHT and state == START):
reward = -100
next_state = START
return next_state, reward
# reward for each action in each state
# action_rewards = np.zeros(WORLD_HEIGHT, WORLD_WIDTH, 4)
# action_rewards[:,:,:] = -1.0
# action_rewards[2, 1:11, ACTION_DOWN] = -100
# action_rewards[3, 0 , ACTION_RIGHT] = -100
# Choose an action based on epsilon greedy policy
def choose_action(state, q_value):
if np.random.binomial(1, EPSILON) == 1:
return np.random.choice(ACTIONS)
else:
values_ = q_value[state[0], state[1], :]
return np.random.choice([action_ for action_, value in enumerate(values_) if value == np.max(values_)])
# an episode with SARSA
# @q_value: values for state, action pair will be upgraded
# @expected: if True, it will use expected SARSA algorithm
# @step_size: step size for updateing
# @return: total rewards within this episode
def sarsa(q_value, expected=False, step_size=ALPHA):
state = START
action = choose_action(state, q_value)
rewards = 0.0
while state != GOAL:
next_state, reward = step(state, action)
next_action = choose_action(next_state, q_value)
rewards += reward
if not expected:
target = q_value[next_state[0], next_state[1], next_action]
else:
# Calculate the expected value of new state
target = 0.0
q_next = q_value[next_state[0], next_state[1], :]
best_action = np.argwhere(q_next == np.max(q_next))
for action_ in ACTIONS:
if action_ in best_action:
target += ((1 - EPSILON)/len(best_action) + EPSILON/len(ACTIONS)) * q_value[next_state[0], next_state[1], action_]
else:
target += (EPSILON/len(ACTIONS)) * q_value[next_state[0], next_state[1], action_]
target *= GAMMA
#Updating current state action values
q_value[state[0], state[1], action] += step_size * (reward + target - q_value[state[0], state[1], action])
state = next_state
action = next_action
return rewards
# an episode with Q-Learning
# @q_value: values for state, action will be updated
# @step_size: step size for updating
# @return: total rewards within this episode
def q_learning(q_value, step_size=ALPHA):
state = START
rewards = 0.0
while state != GOAL:
action = choose_action(state, q_value)
next_state, reward = step(state, action)
rewards += reward
# Q Learning update
q_value[state[0], state[1], action] += step_size * (reward + GAMMA * np.max(q_value[next_state[0], next_state[1], :]) -
q_value[state[0], state[1], action])
state = next_state
return rewards
# Print optimal policy
def print_optimal_policy(q_value):
optimal_policy = []
for i in range(0, WORLD_HEIGHT):
optimal_policy.append([])
for j in range(0, WORLD_WIDTH):
if [i, j] == GOAL:
optimal_policy[-1].append("G")
continue
best_action = np.argmax(q_value[i, j, :])
if best_action == ACTION_UP:
optimal_policy[-1].append("U")
elif best_action == ACTION_LEFT:
optimal_policy[-1].append("L")
elif best_action == ACTION_DOWN:
optimal_policy[-1].append("D")
elif best_action == ACTION_RIGHT:
optimal_policy[-1].append("R")
for row in optimal_policy:
print(row)
# Use multiple runs instead of a single run and a sliding window
# However the optimal policy converges wll with a single path
# SARSA converges to the safe path, while Q-learning converges to the optimal path
def figure_6_4():
# Episodes for each run
episodes = 500
# Independent run
runs = 50
rewards_sarsa = np.zeros(episodes)
rewards_q_learning = np.zeros(episodes)
for r in range(runs):
print(r)
q_sarsa = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
q_q_learning = np.copy(q_sarsa)
for i in range(episodes):
rewards_sarsa[i] += sarsa(q_sarsa)
rewards_q_learning[i] += q_learning(q_q_learning)
# Averaging over independent runs
rewards_sarsa /= runs
rewards_q_learning /= runs
plt.plot(rewards_sarsa, label="SARSA")
plt.plot(rewards_q_learning, label="Q-LEARNING")
plt.xlabel("Episodes")
plt.ylabel("Sum of rewards during episode")
plt.ylim([-100,0])
plt.legend()
plt.savefig("figure_6_4.png")
plt.close("all")
# Display optimal policy
print("SARSA OPTIMAL POLICY")
print_optimal_policy(q_sarsa)
print("Q-LEARNING OPTIMAL POLICY")
print_optimal_policy(q_q_learning)
# ACTUAL EXPEIMENT IS WITH 100,000 EPISODES AND 50,000 RUNS TO GET THE FULLY AVERAGED PERFORMANCE
def figure_6_6():
step_sizes = np.arange(0.1, 1.1, 0.1)
epidoes = 1000
runs = 10
ASY_SARSA = 0
ASY_EXPECTED_SARSA = 1
ASY_QLEARNING = 2
INT_SARSA = 3
INT_EXPECTED_SARSA = 4
INT_QLEARNING = 5
methods = range(0,6)
performances = np.zeros((6, len(step_sizes)))
for run in range(runs):
for ind, step_size in list(zip(range(0, len(step_sizes)), step_sizes)):
q_sarsa = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
q_expected_sarsa = np.copy(q_sarsa)
q_q_learning = np.copy(q_sarsa)
for ep in range(epidoes):
sarsa_reward = sarsa(q_sarsa, expected=False, step_size=step_size)
sarsa_expected_reward = sarsa(q_expected_sarsa, expected=True, step_size=step_size)
q_learning_reward = q_learning(q_q_learning, step_size=step_size)
performances[ASY_SARSA, ind] += sarsa_reward
performances[ASY_EXPECTED_SARSA, ind] += sarsa_expected_reward
performances[ASY_QLEARNING, ind] += q_learning_reward
if ep < 100:
performances[INT_SARSA, ind] += sarsa_reward
performances[INT_EXPECTED_SARSA, ind] += sarsa_expected_reward
performances[INT_QLEARNING, ind] += q_learning_reward
performances[:3, :] /= epidoes * runs
performances[3:, :] /= 100 * runs
labels = ['Asymptotic Sarsa', 'Asymptotic Expected Sarsa', 'Asymptotic Q-Learning',
'Interim Sarsa', 'Interim Expected Sarsa', 'Interim Q-Learning']
for method, label in zip(methods, labels):
plt.plot(step_sizes, performances[method, :], label=label)
plt.xlabel('alpha')
plt.ylabel('reward per episode')
plt.legend()
plt.savefig('../images/figure_6_6.png')
plt.close()
if __name__ == "__main__":
figure_6_4()
figure_6_6()
| [
"adhish@niveshi.com"
] | adhish@niveshi.com |
be4dc6b82a739c6373f3f76ca9b40558b0e72d4b | f15449e438b0b799a3866ba21243924ce0e4fa2d | /survey/migrations/0026_auto__add_field_paper_step.py | d21b49cfb317dedb70e0a7dafc09a7da47aa375e | [] | no_license | xmduhan/qisite | 46af79d0e4d1af814298862cfaa18c6f7ddf3a74 | 2c9d7513c3e0cd483341dc457a8d289e5e174f20 | refs/heads/master | 2021-01-17T08:44:29.826082 | 2020-02-07T11:22:29 | 2020-02-07T11:22:29 | 14,419,020 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,819 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Paper.step'
db.add_column(u'survey_paper', 'step',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Paper.step'
db.delete_column(u'survey_paper', 'step')
models = {
u'account.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'birthDate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'userCreated'", 'null': 'True', 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'userModified'", 'null': 'True', 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'survey.branch': {
'Meta': {'object_name': 'Branch'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'branchCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'branchModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'nextQuestion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'fromBranch'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['survey.Question']"}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'survey.custlist': {
'Meta': {'object_name': 'CustList'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'descrition': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'survey.custlistitem': {
'Meta': {'object_name': 'CustListItem'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListItemCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'custList': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListItem_set'", 'to': u"orm['survey.CustList']"}),
'defineInfo_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.DefineInfo']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custListItemModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'survey.defineinfo': {
'Meta': {'object_name': 'DefineInfo'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'defineInfoCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'defineInfoModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'survey.paper': {
'Meta': {'ordering': "['title']", 'object_name': 'Paper'},
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paperCreated_set'", 'null': 'True', 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inOrder': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lookBack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paperModified_set'", 'null': 'True', 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'paging': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'questionNumStyle': ('django.db.models.fields.CharField', [], {'default': "'123'", 'max_length': '50'}),
'step': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paperReversed_set'", 'null': 'True', 'to': u"orm['survey.Survey']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'T'", 'max_length': '10'})
},
u'survey.papercatalog': {
'Meta': {'object_name': 'PaperCatalog'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paperCatalogCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paperCatalogModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'paper_set': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['survey.Paper']", 'through': u"orm['survey.PaperCatalogPaper']", 'symmetrical': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.PaperCatalog']", 'null': 'True', 'blank': 'True'})
},
u'survey.papercatalogpaper': {
'Meta': {'object_name': 'PaperCatalogPaper'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paperCatalogPaperCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paperCatalogPaperModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'paper': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Paper']"}),
'paperCatalog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.PaperCatalog']"})
},
u'survey.question': {
'Meta': {'ordering': "['ord']", 'object_name': 'Question'},
'branchNumStyle': ('django.db.models.fields.CharField', [], {'default': "'ABC'", 'max_length': '50'}),
'confused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contentLength': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'nextQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'paper': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Paper']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valueMax': ('django.db.models.fields.FloatField', [], {'default': '10', 'null': 'True', 'blank': 'True'}),
'valueMin': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'survey.questioncatalog': {
'Meta': {'object_name': 'QuestionCatalog'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCatalogCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCatalogModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.QuestionCatalog']", 'null': 'True', 'blank': 'True'}),
'question_set': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['survey.Question']", 'through': u"orm['survey.QuestionCatalogQuestion']", 'symmetrical': 'False'})
},
u'survey.questioncatalogquestion': {
'Meta': {'object_name': 'QuestionCatalogQuestion'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCatalogQuestionCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionCatalogQuestionModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'ord': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'questionCatalog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.QuestionCatalog']"})
},
u'survey.resource': {
'Meta': {'object_name': 'Resource'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resourceCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'height': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resourceModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'resourceType': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'resourceUrl': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'width': ('django.db.models.fields.FloatField', [], {})
},
u'survey.sample': {
'Meta': {'object_name': 'Sample'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sampleCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipAddress': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'isValid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sampleModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'paper': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Paper']"}),
'targetCust': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.TargetCust']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.User']", 'null': 'True', 'blank': 'True'})
},
u'survey.sampleitem': {
'Meta': {'object_name': 'SampleItem'},
'branch_set': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['survey.Branch']", 'symmetrical': 'False'}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sampleItemCreated_set'", 'null': 'True', 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sampleItemModified_set'", 'null': 'True', 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Sample']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bonus': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveyCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'custList': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['survey.CustList']", 'null': 'True', 'blank': 'True'}),
'endTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 9, 20, 0, 0)'}),
'fee': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'hardCost': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipLimit': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'lastSmsSendTime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'macLimit': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveyModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'paper': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'survey_set'", 'null': 'True', 'to': u"orm['survey.Paper']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pay': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'publishTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'resubmit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '5'}),
'targetOnly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'validSampleLimit': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'viewResult': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'survey.targetcust': {
'Meta': {'object_name': 'TargetCust'},
'createBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'targetCustCreated_set'", 'to': u"orm['account.User']"}),
'createTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'defineInfo_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.DefineInfo']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifyBy': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'targetCustModified_set'", 'to': u"orm['account.User']"}),
'modifyTime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'targetCust_set'", 'to': u"orm['survey.Survey']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['survey'] | [
"xmduhan@gmail.com"
] | xmduhan@gmail.com |
50599df0827e95c9355ad29846dfe137719ed10f | 892e88c974eb25d4d3a4c05c0054abc089c9fd06 | /guess.py | 4ad6867350b13c2f0a7789a16414f47f6868c173 | [] | no_license | SanketNalage/guess_number | 67b3c2a4194fff4987b18104226e14dbd0214e98 | 435e7f04bf1f5c80e10be8c1dfc312bfadd901ac | refs/heads/main | 2023-06-03T00:44:48.449125 | 2021-06-26T07:22:39 | 2021-06-26T07:22:39 | 380,437,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | from random import randint
start = 1
end = 1000
value = randint(start ,end)
print("The Computer choose a number Between ", start,"and", end)
guess = None
while guess != value:
text = input("Guess the number: ")
guess = int(text)
if guess < value:
print("The number is Higher")
elif guess > value:
print("The number is Lower")
print("Congratulation!!! You guessed the number. You Won 🔥💥")
| [
"sanketnalage88@gmail.com"
] | sanketnalage88@gmail.com |
5475fd8aff569d6a98572d9810a4cc37fac0e1ec | 3c3da21e91168dc4b57ecd6bd5b3804819ff940b | /Video 21/exceptionhandling.py | f99ea8993e93e6ccee0449066a018e4dfe049739 | [] | no_license | fakhtar/CodeIsLife | 99fc6cfd92d7b70fbec65baae2ffde04d29f1fb1 | 33c21e798129b70cb76be2db83b67c06229753ac | refs/heads/master | 2021-01-15T05:04:07.758966 | 2020-12-15T01:12:18 | 2020-12-15T01:12:18 | 242,886,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | try:
f = open('testfile.txt')
except FileNotFoundError as e:
print(e)
print('This is where other code would go if the file does not exist.')
else:
print(f.read())
f.close()
| [
"faisalakhtar@yahoo.com"
] | faisalakhtar@yahoo.com |
01d9dee49178a58b0de0cdc11cb1e3eaf6480bdf | 96be63c5761664f64bd9d85f44d122cf10ce8a47 | /Tarea 5/Tarea 5.py | 96ec2aba4156519fb801acfe1474d62fee0118c8 | [] | no_license | ivanrojo07/Python | b8a0be0ec3d821ebd796f4ff0c94c808131b9a89 | ea862d071f89af55807ce68e51d7878aa44c90ed | refs/heads/master | 2021-01-20T07:56:19.692316 | 2017-05-02T18:42:37 | 2017-05-02T18:42:37 | 85,637,914 | 0 | 0 | null | null | null | null | ISO-8859-3 | Python | false | false | 439 | py | # -*- coding: cp1252 -*-
#Rojo Orea Guillermo Iván
#cargaPalabras("words.txt") o escribir la dirección del directorio
def cargaPalabras(NombredelDocumento):
palabras=[]
lectura = open(NombredelDocumento)
lines=lectura.read().splitlines()
for i in range(len(lines)):
data = lines[i]
data = data.split()
palabras=(data)
return len(palabras)
lectura.close
print (cargaPalabras("words.txt"))
| [
"ivanrojo07@gmail.com"
] | ivanrojo07@gmail.com |
2724f23a8a000db05d4b6e6ee7a76e6dc88ed008 | 7673f49f24af38805ff3b4e972cadb217269ef02 | /projects/models.py | 3a748d6c1fb50ade765453881e7ae2c6a7dcb18d | [] | no_license | guerrerj/Django-Porfiolio-Learning-Project | f1eeee258c1b4e73a55b7248b4512310d09576a3 | e8d3166f36d27d3025548a1b7d7f39c5b39546b5 | refs/heads/master | 2022-12-26T18:59:12.466777 | 2020-09-24T23:45:03 | 2020-09-24T23:45:03 | 298,415,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | from django.db import models
from django.conf import settings
class Project(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
technology = models.CharField(max_length=20)
image = models.FilePathField(path="projects/static/img") #required full path
| [
"jose.guerrero10@yahoo.com"
] | jose.guerrero10@yahoo.com |
66f1b73f1c4b0ccaf6237b68812b26724d057818 | 391d7876b022d8959f78b0d99865f0adbe778aab | /cda_data.py | 4c0ac59212453e6650a236fd2e5449c6f5ee27af | [] | no_license | YongXie-ICMM/CDR-SR | 89665b683ed7ebeec338ef389fe2cf3eb1810735 | 5a11894fd1bfadfda3c3a48b32f214dd91096ef4 | refs/heads/master | 2023-04-17T02:03:48.937807 | 2019-06-28T03:18:09 | 2019-06-28T03:18:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,145 | py | # ==============================================================================
# Autor: Joseph Jiang
#
# cda_data.py: data management
# 1. train data: preprocess , batch API
# 2. test data: set5 and set14, test data API
# 3. cda generate data: gen data according to 2
# ==============================================================================
from PIL import Image
import numpy as NP
import os as OS
import tensorflow as tf
import random as Rand
# define color format to be used during init, training, test.
# 1: RGB 2: YCbCr 3: YCbCr, but Y for training only.
CDA_COLOR_FORMAT = 1
#----------------------------------------------------------------------------------------
# BMP file operation begin.
#----------------------------------------------------------------------------------------
#Get 9*9 block data
#---------------------------------------------------------
def getdatafromarray(img_arr, row_b, col_b, row_num, col_num):
# row * col * 1, ele contains RGB
red_arr = NP.zeros(81, 'uint8');
green_arr = NP.zeros(81, 'uint8');
blue_arr = NP.zeros(81, 'uint8');
row_start = row_b * 9
row_end = row_start + row_num
col_start = col_b * 9
col_end = col_start + col_num
index = 0;
for i in range(row_start, row_end):
for j in range(col_start, col_end):
color = img_arr[i][j] # (R, G, B)
#print("------------color----", color)
red_arr[index] = color[0]
green_arr[index] = color[1]
blue_arr[index] = color[2]
index += 1
#print("red_arr==", red_arr.shape, red_arr)
#print("green_arr==", red_arr.shape, green_arr)
#print("blue_arr==", red_arr.shape, blue_arr)
return red_arr, green_arr, blue_arr
#Get 9*9 block data list from one bmp file
#flag: 1: LR 2: HR
#CDA_COLOR_FORMAT: 1 RGB 2: YCbCr
#---------------------------------------------------------
def getbmparray_orig(filename, flag):
im_rgb = Image.open(filename) # 读取图片
#print("getbmparray_orig---",filename, im_rgb.width, im_rgb.height)
#print("getbmparray_orig---mode------", im_rgb.mode, flag)
#print("getbmparray_orig--CDA_COLOR_FORMAT=", CDA_COLOR_FORMAT)
im = im_rgb
if CDA_COLOR_FORMAT == 2 or CDA_COLOR_FORMAT == 3:
im = im_rgb.convert("YCbCr")
#print("-getbmparray_orig-converted mode------", im.mode, flag)
#init variable
img_arr = ""
#print("mode------", im.mode, flag)
#Get HR image to array
if flag == 2 :
img_arr = NP.array(im)
#print("hr_img_arr---", img_arr.shape, img_arr.dtype)
else:
orgsize = im.width, im.height
scalesize = im.width * 3, im.height * 3
im.resize(scalesize)
im.resize(orgsize, Image.BICUBIC)
#Get lR image to array
img_arr = NP.array(im)
#print("lr_img_arr---", img_arr.shape, img_arr.dtype)
im.close()
return img_arr
def readfromfile_rgb(filename, flag, callback = ''):
#init variable
img_arr = ""
rb_num = 0
cb_num = 0
block_num = 0
img_arr = getbmparray_orig(filename, flag)
#print("lr_img_arr---", img_arr.shape, img_arr.dtype)
#cal block to read.
rb_num = int(img_arr.shape[0]/9)
cb_num = int(img_arr.shape[1]/9)
block_num = int(rb_num * cb_num)
#print("rb_num=", rb_num, "---cb_num=", cb_num)
#print("img_arr---read-", img_arr)
shape_result = [block_num, 243] # 81 * 3
result_data = NP.zeros(shape_result, 'uint8');
block_count = 0;
for rr in range(0, rb_num):
for cc in range(0, cb_num):
red, green, blue = getdatafromarray(img_arr, rr, cc, 9, 9)
#color preprocesing
for kk in range(81):
ll = kk * 3
result_data[block_count][ll] = red[kk]
result_data[block_count][ll+1] = green[kk]
result_data[block_count][ll+2] = blue[kk]
block_count += 1
if callback != '' :
callback(rb_num, cb_num)
#print("result_data-----", result_data)
return result_data
# 1: yes 0 : no
def is_bmp_file(filename):
fnamelen = len(filename)
if fnamelen < 4 :
return 0
file_name_ext = filename[fnamelen-4:fnamelen]
file_name_ext.lower()
#print("fnamelen = ", file_name_ext)
if file_name_ext != ".bmp" :
#print("file ext ...")
return 0
im_rgb = Image.open(filename) # 读取图片
#print("getbmparray_orig---mode------", im_rgb.mode)
if im_rgb.mode != "RGB" :
#print("EEEE")
return 0
im_rgb.close()
return 1
#--------------------------------------------------------------------
# new add for overlaping process...
#--------------------------------------------------------------------
BMP_OVERLAPPED_PIX_NUM = 3
BMP_OVERLAPPED_POS_COEF = 6
def getdatafromarray_overlap(img_arr, row_b, col_b, row_num, col_num):
# row * col * 1, ele contains RGB
red_arr = NP.zeros(81, 'uint8');
green_arr = NP.zeros(81, 'uint8');
blue_arr = NP.zeros(81, 'uint8');
row_start = row_b * BMP_OVERLAPPED_POS_COEF
row_end = row_start + row_num
col_start = col_b * BMP_OVERLAPPED_POS_COEF
col_end = col_start + col_num
index = 0;
for i in range(row_start, row_end):
for j in range(col_start, col_end):
color = img_arr[i][j] # (R, G, B)
#print("------------color----", color)
red_arr[index] = color[0]
green_arr[index] = color[1]
blue_arr[index] = color[2]
index += 1
#print("red_arr==", red_arr.shape, red_arr)
#print("green_arr==", red_arr.shape, green_arr)
#print("blue_arr==", red_arr.shape, blue_arr)
return red_arr, green_arr, blue_arr
#Get 9*9 block data list from one bmp file
#flag: 1: LR 2: HR
#CDA_COLOR_FORMAT: 1 RGB 2: YCbCr
#---------------------------------------------------------
def readfromfile_rgb_overlap(filename, flag, callback = ''):
#init variable
img_arr = ""
rb_num = 0
cb_num = 0
block_num = 0
img_arr = getbmparray_orig(filename, flag)
#print("lr_img_arr---", img_arr.shape, img_arr.dtype)
#cal block to read.
rb_num = int((img_arr.shape[0] - 3)/BMP_OVERLAPPED_POS_COEF)
cb_num = int((img_arr.shape[1] - 3)/BMP_OVERLAPPED_POS_COEF)
block_num = int(rb_num * cb_num)
#print("rb_num=", rb_num, "---cb_num=", cb_num)
#print("img_arr---read-", img_arr)
shape_result = [block_num, 243] # 81 * 3
result_data = NP.zeros(shape_result, 'uint8');
block_count = 0;
for rr in range(0, rb_num):
for cc in range(0, cb_num):
red, green, blue = getdatafromarray_overlap(img_arr, rr, cc, 9, 9)
#color preprocesing
for kk in range(81):
ll = kk * 3
result_data[block_count][ll] = red[kk]
result_data[block_count][ll+1] = green[kk]
result_data[block_count][ll+2] = blue[kk]
block_count += 1
#im.close()
if callback != '' :
callback(rb_num, cb_num)
#print("result_data-----", result_data)
return result_data
#----------------------------------------------------------------------------------------
# BMP file operation end.
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# Training batch data Generated begin
#----------------------------------------------------------------------------------------
#Global variable define
#---------------------------------------------------------
train_data_dir = OS.getcwd() + "/data/training/"
train_filelist = OS.listdir(train_data_dir)
train_file_num = len(train_filelist)
train_file_index = 0
preprocess_data_dir = OS.getcwd() + "/data/preprocess"
preprocess_data_dir_lr = preprocess_data_dir + "/lr"
preprocess_data_dir_hr = preprocess_data_dir + "/hr"
#for Generate batch npy data
g_batch_train_data_size = 100
g_batch_shape = [g_batch_train_data_size, 243] # 81 * 3
g_batch_data = NP.zeros(g_batch_shape, 'uint8');
g_batch_file_count = 0
g_batch_cur_index = 0
# for get data
g_batch_shape_file_num = 0
def check_train_dir_files():
global train_filelist
global train_file_num
#print("check_train_dir_files-----")
for i in range(train_file_num):
filename = train_data_dir + "/" + train_filelist[i]
flag = is_bmp_file(filename)
if flag == 0:
OS.remove(filename)
train_filelist = OS.listdir(train_data_dir)
train_file_num = len(train_filelist)
def get_batch_data_file_dir(flag) :
tmp_dir = preprocess_data_dir_lr
if flag == 2 :
tmp_dir = preprocess_data_dir_hr
return tmp_dir
def get_batch_data_file_name(index, flag) :
batch_dir = get_batch_data_file_dir(flag)
batch_file_name1 = batch_dir + "/batch" + str(index) + ".npy"
return batch_file_name1
# flag: 1: LR 2: HR
def save_batch_data_to_file(flag):
global g_batch_file_count
#if g_batch_file_count < 3 :
# print("batch----", g_batch_file_count)
# print(g_batch_data)
batch_file_name = get_batch_data_file_name(g_batch_file_count, flag)
NP.save(batch_file_name, g_batch_data)
g_batch_file_count += 1
# flag: 1: LR 2: HR
def save_bmp_array_by_batch(bmp_data_arr, flag):
global g_batch_cur_index
rnum = bmp_data_arr.shape[0]
for i in range(rnum) :
for j in range(243) : # pixel copy
g_batch_data[g_batch_cur_index][j] = bmp_data_arr[i][j]
if CDA_COLOR_FORMAT == 3: # Only input Y
if j % 3 != 0 : # CbCr set to 0
g_batch_data[g_batch_cur_index][j] = 0
g_batch_cur_index += 1
if g_batch_cur_index == g_batch_train_data_size :
save_batch_data_to_file(flag)
g_batch_cur_index = 0
#create the preprocess dir
def batch_dir_init() :
if not OS.path.exists(preprocess_data_dir):
OS.mkdir(preprocess_data_dir)
if OS.path.exists(preprocess_data_dir_lr):
#OS.rmdir(preprocess_data_dir_lr)
cmd = "rm -rf " + preprocess_data_dir_lr
OS.system(cmd)
if OS.path.exists(preprocess_data_dir_hr):
#OS.rmdir(preprocess_data_dir_hr)
cmd = "rm -rf " + preprocess_data_dir_hr
OS.system(cmd)
OS.mkdir(preprocess_data_dir_lr)
OS.mkdir(preprocess_data_dir_hr)
def get_train_data_from_specific_file_rgb(index, img_flag):
result = ""
if train_file_num > 0 :
if index < train_file_num :
filename = train_data_dir + train_filelist[index]
#print("-----get_train_data_from_specific_file_rgb--------", filename)
result = readfromfile_rgb(filename, img_flag)
return result
#generate training preprocess data
def gen_train_data_batch_npy_file(flag) :
global g_batch_file_count
global g_batch_cur_index
g_batch_file_count = 0
g_batch_cur_index = 0
#bmp data preprocess
bmp_data_arr = ""
#for i in range(1):
for i in range(train_file_num):
bmp_data_arr = get_train_data_from_specific_file_rgb(i, flag)
#print("bmp_data_arr.shape = ", bmp_data_arr.shape)
#print("bmp_data_arr : ", bmp_data_arr)
#print("------------------------------------------------")
save_bmp_array_by_batch(bmp_data_arr, flag)
#preprcess: convert bmpfile to batch data used for training.
#---------------------------------------------------------
def gen_train_data_batch_npy_file_all():
print("-----gen_train_data_batch_npy_file_all-----")
check_train_dir_files()
batch_dir_init()
#LR batch init.
gen_train_data_batch_npy_file(1)
#HR batch init.
gen_train_data_batch_npy_file(2)
def load_train_batch_from_npy_file(index, flag) :
filename = get_batch_data_file_name(index, flag)
batach_data = NP.load(filename)
#print("load_train_batch_npy_file--------", index, filename)
#print(batach_data)
return batach_data
#
def generate_train_batch_random_index():
global g_batch_shape_file_num
if g_batch_shape_file_num == 0 :
batch_dir = get_batch_data_file_dir(1)
batch_filelist = OS.listdir(batch_dir)
g_batch_shape_file_num = len(batch_filelist)
if g_batch_shape_file_num <= 0 :
return -1
#print("g_batch_shape_file_num----", g_batch_shape_file_num)
index = Rand.randint(0, g_batch_shape_file_num - 1)
#print("index ----", index)
return index
# uint8 to float32
# NP.zeros(81, 'uint8');
def format_batch_data_to_float32(batch_data):
#print("-----------format_batch_data_to_float32---", batch_data)
#print("------------", batch_data.shape)
rnum = batch_data.shape[0]
cnum = batch_data.shape[1]
batch_float_arr = ""
if rnum <= 0 and cnum <= 0:
return batch_float_arr
#-------------------------------------------------------------------
# must smaller for training.
# color RGB [0, 255] change to [-0.5, 0.5]
# color/256 - 0.5
#-------------------------------------------------------------------
batch_float_arr = NP.zeros([rnum, cnum], 'float32')
for i in range(rnum):
for j in range(cnum) :
batch_float_arr[i][j] = batch_data[i][j]
# RGB : training data using 256 , to be compitible with training data.
#YCbCr: training data using 255, 255 is better!!!
if CDA_COLOR_FORMAT == 1 :
batch_float_arr[i][j] = batch_float_arr[i][j]/255
if CDA_COLOR_FORMAT == 2:
batch_float_arr[i][j] = batch_float_arr[i][j]/255
#Y only.
if CDA_COLOR_FORMAT == 3:
batch_float_arr[i][j] = batch_float_arr[i][j]/255
if j % 3 != 0 :
batch_float_arr[i][j] = 0
#print("------batch_data----RGB--------")
#print(batch_data)
#print("------batch_data----float32--------")
#print(batch_float_arr)
return batch_float_arr
def load_train_batch_random(flag) :
batch_data = ""
index = generate_train_batch_random_index()
if index < 0 :
return
batch_data = load_train_batch_from_npy_file(index, flag)
return format_batch_data_to_float32(batch_data)
#
# define API function for train use
#---------------------------------------------------------
def load_train_batch_random_lr_and_hr_old() :
batch_data = ""
index = generate_train_batch_random_index()
if index < 0 :
return
batch_data1 = load_train_batch_from_npy_file(index, 1)
batch_data2 = load_train_batch_from_npy_file(index, 2)
#print("--------batch_data1-------", batch_data1)
#print("--------batch_data2-------", batch_data2)
lr_data = format_batch_data_to_float32(batch_data1)
hr_data = format_batch_data_to_float32(batch_data2)
return lr_data, hr_data
def load_train_batch_random_lr_old():
return load_train_batch_random(1)
def load_train_batch_random_hr_old():
return load_train_batch_random(2)
#-----------------------------------------------------------------------------
# second times training data. based on previous
#-----------------------------------------------------------------------------
def generate_train_batch_random_pos(random_num):
pos = Rand.randint(0, random_num)
#print("position ----", pos, random_num)
return pos
def load_train_batch_random_s2(flag) :
global g_batch_train_data_size
global g_batch_shape
index1 = generate_train_batch_random_index()
index2 = generate_train_batch_random_index()
if index1 < 0 or index2 < 0:
return
#debug
#print("-load_train_batch_random_s2--", index1, index2)
#pos
random_num = 2 * g_batch_train_data_size - 1
batch_data1 = load_train_batch_from_npy_file(index1, flag)
batch_data2 = load_train_batch_from_npy_file(index2, flag)
#print("batch_data1--", batch_data1)
#print("batch_data2--", batch_data2)
bdata = ""
batch_data = NP.zeros(g_batch_shape, 'uint8');
for i in range(g_batch_train_data_size) :
pos = generate_train_batch_random_pos(random_num)
#print("pos --", pos)
if pos < g_batch_train_data_size :
bdata = batch_data1
else :
bdata = batch_data2
pos = pos - g_batch_train_data_size
for j in range(243) :
batch_data[i][j] = bdata[pos][j]
return format_batch_data_to_float32(batch_data)
def load_train_batch_random_s2_lr_hr() :
global g_batch_train_data_size
index1 = generate_train_batch_random_index()
index2 = generate_train_batch_random_index()
if index1 < 0 or index2 < 0:
return
#print("-load_train_batch_random_s2_lr_hr--", index1, index2)
#pos
random_num = 2 * g_batch_train_data_size - 1
batch_data1 = load_train_batch_from_npy_file(index1, 1) #LR
batch_data2 = load_train_batch_from_npy_file(index2, 1)
batch_data3 = load_train_batch_from_npy_file(index1, 2) #HR
batch_data4 = load_train_batch_from_npy_file(index2, 2)
bdatalr = "" #tmp var
bdatahr = "" #tmp var
batch_data_lr = NP.zeros(g_batch_shape, 'uint8');
batch_data_hr = NP.zeros(g_batch_shape, 'uint8');
for i in range(g_batch_train_data_size) :
pos = generate_train_batch_random_pos(random_num)
if pos < g_batch_train_data_size :
bdatalr = batch_data1
bdatahr = batch_data3
else :
bdatalr = batch_data2
bdatahr = batch_data4
pos = pos - g_batch_train_data_size
for j in range(243) :
batch_data_lr[i][j] = bdatalr[pos][j]
batch_data_hr[i][j] = bdatahr[pos][j]
batch_data_lr_float32 = format_batch_data_to_float32(batch_data_lr)
batch_data_hr_float32 = format_batch_data_to_float32(batch_data_hr)
#print("batch_data_lr--", batch_data_lr)
#print("batch_data_lr_float32--", batch_data_lr_float32)
#print("batch_data_hr--", batch_data_hr)
#print("batch_data_hr--", batch_data_hr_float32)
return batch_data_lr_float32, batch_data_hr_float32
def load_train_batch_random_lr_and_hr() :
return load_train_batch_random_s2_lr_hr()
def load_train_batch_random_lr():
return load_train_batch_random_s2(1)
def load_train_batch_random_hr():
return load_train_batch_random_s2(2)
############test...
#print(load_train_batch_random_lr())
#print(load_train_batch_random_hr())
#load_train_batch_random_lr_and_hr()
#----------------------------------------------------------------------------------------
# Training batch data Generated end
#----------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------
# Test data function define begin
#----------------------------------------------------------------------------------------
test_data_dir = OS.getcwd() + "/data/test"
test_data_dir_set5 = test_data_dir + "/set5"
test_data_dir_set14 = test_data_dir + "/set14"
test_cur_file_dir = ""
test_cur_file_list = ""
test_cur_file_num = ""
test_cur_file_name = ""
test_cur_file_row_num = 0
test_cur_file_col_num = 0
test_cda_data_file_dir = OS.getcwd() + "/result_cda"
test_cda_data_file_dir_set5 = test_cda_data_file_dir + "/set5"
test_cda_data_file_dir_set14 = test_cda_data_file_dir + "/set14"
test_cda_data_file_cur_dir = ""
#print("train_imgfile_dir ====", test_data_dir)
#print("file ====", test_filelist)
#print("filenum ====", test_file_num)
#
# define function for get test file data
#---------------------------------------------------------
def test_data_directory_init(test_dir):
global test_cur_file_dir
global test_cur_file_list
global test_cur_file_num
global test_cur_file_name
global test_cur_file_row_num
global test_cur_file_col_num
print("test_data_directory_init-----", test_dir)
test_cur_file_dir = test_dir
test_cur_file_list = OS.listdir(test_dir)
test_cur_file_num = len(test_cur_file_list)
print("test_cur_filelist-----", test_cur_file_list)
print("test_cur_file_num-----", test_cur_file_num)
test_cur_file_name = ""
test_cur_file_row_num = 0
test_cur_file_col_num = 0
def test_cda_gen_data_directory_init(test_cda_dir):
if not OS.path.exists(test_cda_data_file_dir):
OS.mkdir(test_cda_data_file_dir)
if not OS.path.exists(test_cda_dir):
OS.mkdir(test_cda_dir)
global test_cda_data_file_cur_dir
test_cda_data_file_cur_dir = test_cda_dir
def set_bmp_info_callback(row_n, col_n):
global test_cur_file_row_num
global test_cur_file_col_num
test_cur_file_row_num = row_n
test_cur_file_col_num = col_n
#print("set_bmp_info_callback----", row_n, col_n, test_cur_file_row_num, test_cur_file_col_num)
def get_test_data_from_specific_file(index):
global test_cur_file_name
global test_cur_filelist
test_cur_file_name = ""
#print("get_test_data_from_specific_file-----", index)
#print("get_test_data_from_specific_file-----", test_cur_file_list)
callback = set_bmp_info_callback
result = ""
if test_cur_file_num > 0 :
if index < test_cur_file_num :
test_cur_file_name = test_cur_file_list[index]
filename = test_cur_file_dir + "/" + test_cur_file_name
print("read: ", filename)
result = readfromfile_rgb(filename, 1, callback) # 1: lr img
return result
def get_test_data_from_specific_file_overlap(index):
global test_cur_file_name
global test_cur_filelist
test_cur_file_name = ""
#print("get_test_data_from_specific_file_overlap-----", index)
#print("get_test_data_from_specific_file_overlap-----", test_cur_file_list)
callback = set_bmp_info_callback
result = ""
if test_cur_file_num > 0 :
if index < test_cur_file_num :
test_cur_file_name = test_cur_file_list[index]
filename = test_cur_file_dir + "/" + test_cur_file_name
print("overlap read: ", filename)
result = readfromfile_rgb_overlap(filename, 1, callback) # 1: lr img
return result
def check_test_dir_files(file_dir):
print("check_test_dir_files-----", file_dir)
tmp_file_list = OS.listdir(file_dir)
#print("tmp_file_list-----", tmp_file_list)
tmp_file_num = len(tmp_file_list)
for i in range(tmp_file_num):
filename = file_dir + "/" + tmp_file_list[i]
flag = is_bmp_file(filename)
#print("check_test_dir_files--flag---", flag)
if flag == 0:
#print("remove....", filename)
OS.remove(filename)
def test_data_set5_init():
#print("test_data_set5_init----")
check_test_dir_files(test_data_dir_set5)
test_data_directory_init(test_data_dir_set5)
test_cda_gen_data_directory_init(test_cda_data_file_dir_set5)
def test_data_set14_init():
#print("test_data_set5_init----")
check_test_dir_files(test_data_dir_set14)
test_data_directory_init(test_data_dir_set14)
test_cda_gen_data_directory_init(test_cda_data_file_dir_set14)
def get_test_file_num():
return test_cur_file_num
#def
def get_test_file_set(index):
batch_data = get_test_data_from_specific_file(index)
batch_data_float = format_batch_data_to_float32(batch_data)
return batch_data_float
def get_test_file_set_overlap(index):
batch_data = get_test_data_from_specific_file_overlap(index)
batch_data_float = format_batch_data_to_float32(batch_data)
return batch_data_float
#
# define function for save cda generate data to file.
#---------------------------------------------------------
def get_file_name_without_suffix(file_name):
#new_img.show()
#find position of .bmp
tmp_file_name = file_name
tmp_file_name.lower()
pos_bmp_suffix = tmp_file_name.index(".bmp")
#print("-----------", tmp_file_name, pos_bmp_suffix)
#get substring
tmp_file_name = file_name[0:pos_bmp_suffix]
#print("-------get_file_name_without_suffix---------", tmp_file_name)
return tmp_file_name
# cal R G B value
# RGB : training data using 256 , to be compitible with training data.
#YCbCr: training data using 255, 255 is better!!!
def convert_cda_pixel_data_to_color(c1, c2, c3):
#RGB
if CDA_COLOR_FORMAT == 1 :
red = int((c1)*255)
green = int((c2)*255)
blue = int((c3)*255)
#YCbCr
if CDA_COLOR_FORMAT == 2:
red = int((c1)*255)
green = int((c2)*255)
blue = int((c3)*255)
#3: YCbCr, only Y used, other not used.
if CDA_COLOR_FORMAT == 3:
red = int((c1)*255)
green = 0
blue = 0
return red, green, blue
#data block 81 * float
def fill_data_block_to_bmp_array(data_block, row_b, col_b, pixels_per_row, img_arr):
#cal position in img_arr
img_row_i = row_b * 9
img_col_j = col_b * 9
#fill 9*9 block by 81 float variable
#data_block: 243 * float:
#formate: RGB -- RGB---RGB
aa = NP.zeros(1, 'uint8')
b = -120
for i in range(81):
#print("----", data_block[3*i], data_block[3*i + 1], data_block[3*i + 2])
#print("RGB--", red, green, blue)
red, green, blue = convert_cda_pixel_data_to_color(data_block[3*i], data_block[3*i + 1], data_block[3*i + 2])
# save back to img_arr.
img_arr[img_row_i][img_col_j][0] = red
img_arr[img_row_i][img_col_j][1] = green
img_arr[img_row_i][img_col_j][2] = blue
img_col_j += 1
if (i + 1) % 9 == 0 :
img_row_i += 1
img_col_j = col_b * 9
#for 3 only:
def set_data_array_CbCr(img_arr) :
if CDA_COLOR_FORMAT != 3 :
return
filename = test_cur_file_dir + "/" + test_cur_file_name
bmp_arr = getbmparray_orig(filename, 2)
rnum = img_arr.shape[0]
cnum = img_arr.shape[1]
print("set_data_array_CbCr---", filename)
print("set_data_array_CbCr-bmp_arr.shape--", bmp_arr.shape)
print("set_data_array_CbCr---", rnum, cnum)
#copy CbCr back.
for i in range(rnum):
for j in range(cnum):
img_arr[i][j][1] = bmp_arr[i][j][1] #Cb
img_arr[i][j][2] = bmp_arr[i][j][2] #Cr
#data_arr: color data.
#CDA_COLOR_FORMAT: 1 RGB 2: YCbCr
#overlap: 0 -- no overlap 1 --- overlap
def save_data_array_to_bmp_file(img_arr, overlap = 0):
#if img_arr == "" :
# return
#print("save_data_array_to_bmp_file--CDA_COLOR_FORMAT=", CDA_COLOR_FORMAT)
# data is YCbCr, only Y is used, other should copy back.
if CDA_COLOR_FORMAT == 3:
set_data_array_CbCr(img_arr)
new_img_rgb = ""
#print("save_data_array_to_bmp_file----")
if CDA_COLOR_FORMAT == 1 : # data is RGB
new_img_rgb = Image.fromarray(img_arr, "RGB")
if CDA_COLOR_FORMAT == 2 or CDA_COLOR_FORMAT == 3: # data is YCbCr
new_img = Image.fromarray(img_arr, "YCbCr")
new_img_rgb = new_img.convert("RGB")
#print("-------convert--YCbCr--to-----RGB-----")
#print("-------test_cur_file_name-----------", test_cur_file_name)
tmp_file_name = get_file_name_without_suffix(test_cur_file_name)
tmp_file_name1 = test_cda_data_file_cur_dir + "/" + tmp_file_name + "_cda.bmp"
if overlap == 1:
tmp_file_name1 = test_cda_data_file_cur_dir + "/" + tmp_file_name + "_cda_overlap.bmp"
print("save_data_array_to_bmp_file-----------", tmp_file_name1)
new_img_rgb.save(tmp_file_name1)
#resul_data format: [none, 243] * float32
#block: 81
def save_cda_gen_data_to_bmp_file(result_data):
#print("--save_cda_gen_data_to_bmp_file---", test_cur_file_row_num, test_cur_file_col_num)
rb_num = test_cur_file_row_num
cb_num = test_cur_file_col_num
row_pixel_num = rb_num * 9
col_pixel_num = cb_num * 9
pixel_shape = [row_pixel_num, col_pixel_num, 3]
pixels_per_row = col_pixel_num
#print("pixel_shape---", pixel_shape)
img_arr = NP.zeros(pixel_shape, 'uint8')
#print("result_data---------", result_data)
index = 0
for row_b in range(rb_num):
for col_b in range(cb_num):
fill_data_block_to_bmp_array(result_data[index], row_b, col_b, pixels_per_row, img_arr)
index += 1
save_data_array_to_bmp_file(img_arr)
# overlapped process.
#--------------------------------------------------------------------------
#data block 81 * float
def fill_data_block_to_bmp_array_overlap_averaged(i, j, img_arr, avg_img_arr):
rflag = (int) (i / BMP_OVERLAPPED_POS_COEF)
rflag1 = (int) (i % BMP_OVERLAPPED_POS_COEF)
colflag = (int) (j / BMP_OVERLAPPED_POS_COEF)
colflag1 = (int) (j % BMP_OVERLAPPED_POS_COEF)
if rflag == 0 : # row [0--5]
if colflag == 0 : # col [0--5]
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0])
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1])
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2])
else: # other cols in the [0--5]
if colflag1 < 3 : # the first 3 overlapped. x/2
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0]/2)
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1]/2)
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2]/2)
else :
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0])
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1])
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2])
# [6, xx] row.
if rflag != 0 :
if colflag == 0 : # col [0--5]
if rflag1 < 3 : # the first 3 overlapped. x/2
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0]/2)
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1]/2)
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2]/2)
else :
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0])
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1])
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2])
else : # col[6, xx]
if colflag1 < 3 : # /4
if rflag1 < 3 :
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0]/4)
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1]/4)
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2]/4)
else :
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0]/2)
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1]/2)
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2]/2)
else: # col 3--6 # /2
if rflag1 < 3 :
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0]/2)
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1]/2)
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2]/2)
else :
avg_img_arr[i][j][0] = (int)(img_arr[i][j][0])
avg_img_arr[i][j][1] = (int)(img_arr[i][j][1])
avg_img_arr[i][j][2] = (int)(img_arr[i][j][2])
def fill_data_block_to_bmp_array_overlap(data_block, row_b, col_b, pixels_per_row, img_arr):
#cal position in img_arr
img_row_i = row_b * BMP_OVERLAPPED_POS_COEF
img_col_j = col_b * BMP_OVERLAPPED_POS_COEF
img_row_start = img_row_i
img_col_start = img_col_j
#print("row_b =", row_b)
#print("col_b =", col_b)
#fill 9*9 block by 81 float variable
#data_block: 243 * float:
#formate: RGB -- RGB---RGB
for i in range(81):
# cal R G B value
red, green, blue = convert_cda_pixel_data_to_color(data_block[3*i], data_block[3*i + 1], data_block[3*i + 2])
#print("img_row_i---", img_row_i)
#print("img_col_j---", img_col_j)
# save back to img_arr.
img_arr[img_row_i][img_col_j][0] += red
img_arr[img_row_i][img_col_j][1] += green
img_arr[img_row_i][img_col_j][2] += blue
img_col_j += 1
if (i + 1) % 9 == 0 : # next line.
img_row_i += 1
img_col_j = img_col_start
#resul_data format: [none, 243] * float32
#block: 81
#overlap: 0 --- no overlap; 1 overlap.
def save_cda_gen_data_to_bmp_file_overlap(result_data):
#print("--save_cda_gen_data_to_bmp_file_overlap---", test_cur_file_row_num, test_cur_file_col_num)
rb_num = test_cur_file_row_num
cb_num = test_cur_file_col_num
row_pixel_num = rb_num * BMP_OVERLAPPED_POS_COEF + 3
col_pixel_num = cb_num * BMP_OVERLAPPED_POS_COEF + 3
pixel_shape = [row_pixel_num, col_pixel_num, 3]
pixels_per_row = col_pixel_num
#print("pixel_shape---", pixel_shape)
img_arr = NP.zeros(pixel_shape, 'uint8')
img_arr1 = NP.zeros(pixel_shape, 'float32')
#print("pixel_shape-111--", img_arr1)
index = 0
for row_b in range(rb_num):
for col_b in range(cb_num):
fill_data_block_to_bmp_array_overlap(result_data[index], row_b, col_b, pixels_per_row, img_arr1)
index += 1
#print("img_arr1--222-", img_arr1)
#unoverlaped
for i in range(row_pixel_num):
for j in range(col_pixel_num):
fill_data_block_to_bmp_array_overlap_averaged(i, j, img_arr1, img_arr)
save_data_array_to_bmp_file(img_arr, 1)
#######################test
#print("test-------------------------------")
#test_data_set5_init()
#print("test--------------------111-----------")
#data = get_test_file_set(0)
#save_cda_gen_data_to_bmp_file(data)
#----------------------------------------------------------------------------------------
# Test data function define end
#----------------------------------------------------------------------------------------
#test...
#----------------------------------------------------------
#print("-----gen_train_data_batch_npy_file_all-----")
#gen_train_data_batch_npy_file_all()
#load_train_batch_from_npy_file(0, 1)
#load_train_batch_from_npy_file(1)
#load_train_batch_from_npy_file(2)
#load_train_batch_random_lr()
#load_train_batch_random_hr()
#load_train_batch_random_lr_and_hr()
#------------------------------------------------------------------------
# Training batch data Generated end
#------------------------------------------------------------------------
#lr hr: shape like:
#shape_result = [block_num, 81]
#lr, hr = readfromfile('t1.bmp')
#hr = get_train_data_from_file(2, 1)
#print("---hr-------", hr.shape)
#print("----train_file_index-----", train_file_index)
#hr = get_train_data_from_next_file(1)
#print("----train_file_index-----", train_file_index)
#tttresult = [11, 81, 22]
#for i in range(len(tttresult)):
# tttresult[i] *= 10
#tttresult1 = [[10, 20], [40,60]]
#tttresult2 = [[110, 120], [140, 160]]
#tttresult3 = tttresult2 - tttresult1
#print("-----------tttresult3----------", tttresult3)
##print("-----------tttresult1----------", tttresult1, 0x1000000)
#tttttt = NP.reshape(tttresult1, 4)
#print("-----------tttresult1----------", tttttt)
#print("-----------tttresult1----------", tttttt[3], len(tttttt))
#get_hr_train_data()
#save test
#weghts = {
# 'b1' : tf.Variable(tf.zeros([3])),
# 'w2' :tf.Variable(tf.truncated_normal([3, 3], stddev=0.1))
#}
#tttresult1 = NP.array([(10, 20), (40,60)] )
#tttresult2 = NP.array([(110, 27), (140, 160)])
#t1 = tf.convert_to_tensor(tttresult1)
#t2 = tf.convert_to_tensor(tttresult2)
#t3 = t1 - t2
#t4 = tf.square(t3)
#t5 = tf.reduce_sum(t4)
#sess = tf.InteractiveSession()
#tf.global_variables_initializer().run()
#print("t1----", sess.run(t1))
#print("t2----", sess.run(t2))
#print("t3----", sess.run(t3))
#print("t4----", sess.run(t4))
#print("t5----", sess.run(t5))
#print(sess.run(weghts['b1']))
#print(sess.run(weghts['w2']))
#------------------------------------------------------------------------
# data output function define
#------------------------------------------------------------------------
#res = get_hr_train_data_specific(1)
#print("res---", res)
#save_test_data_to_bmp_file(res, 12, 13)
#tttresult1 = NP.array([(10, 20), (40,60)] )
#tttresult2 = NP.array([(110, 27), (140, 160)])
#filename11 = "/home/tcl/tensor/src/test/bmp/temp.npy"
#NP.save(filename11, tttresult1)
#tttresult3 = NP.load(filename11)
#print("tttresult3---", tttresult3)
#filehandle = open("/home/tcl/tensor/src/test/bmp/temp.bin", 'rb+')
#filehandle.write(tttresult1)
#filehandle.write(tttresult2)
#tttresult3 = filehandle.read(len(tttresult1))
#filehandle.seek(0)
#tttresult3 = filehandle.read()
#tttresult4 = filehandle.read(len(tttresult2))
#print("tttresult3---", tttresult3)
#print("tttresult4---", tttresult4)
#filehandle.close()
| [
"noreply@github.com"
] | YongXie-ICMM.noreply@github.com |
b1cee5cb9136edce4f25961c81ce6d0da67dc2b8 | 04d23af9762fc4deb787ada710a6b22d2d98924e | /api_prototype/oliver/test_10_dynamic_diffusion_constants.py | 668d0d379a8e3d0b7781b4757c8d7035127a338b | [] | no_license | marordyan/libMCellPP | 67a9c80c26a5cf6a9425844d600d818f37790a43 | 53855b7d679cf6390a281b4a28507d33647baea9 | refs/heads/master | 2021-01-12T09:21:26.420398 | 2016-12-10T21:51:34 | 2016-12-10T21:51:34 | 76,146,742 | 0 | 0 | null | 2016-12-11T01:04:25 | 2016-12-11T01:04:25 | null | UTF-8 | Python | false | false | 454 | py | import pymcell as m
# Make a model
model = m.create_model()
# Set timestep
model.dt = 0.1
###
# Box
###
# Create a box
box = model.create_simple_object(name="My box", type="CUBE", center=[0,0,0], radius=[1,1,1])
###
# Species
###
mol_A = model.create_species(name="A",dc=1)
###
# Run the simulation
###
n_iter = 100
for i_iter in range(0,n_iter):
model.run_timestep() # runs by one timestep by default
# Update the reaction rate
mol_A.dc += 1 | [
"oernst@ucsd.edu"
] | oernst@ucsd.edu |
34c71705c40faa5424bbd00c7fcfc0754f02dac6 | 3f1e90cd8b5792db91375cd354fbe2f9ccec003f | /Virginia/Outputs/HDSR_plots2.py | 5cb65042405e8818ee8b5e34497f7c34f051fb70 | [] | no_license | drdeford/recom-VA | 4e192130adb6c813f7a418b1002371be5b995c65 | 3666ec2cb0e776a6a9d8d60d7341469407049387 | refs/heads/master | 2020-07-03T19:38:16.912855 | 2019-08-20T16:41:05 | 2019-08-20T16:41:05 | 202,026,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | # sns.set_style('white')
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# sns.set_style('darkgrid')
sns.set_style("darkgrid", {"axes.facecolor": ".97"})
def draw_plot(data, offset, edge_color, fill_color):
pos = 10*np.arange(data.shape[1])+offset
#bp = ax.boxplot(data, positions= pos, widths=0.3, patch_artist=True, manage_xticks=False)
bp = ax.boxplot(data, positions= pos,widths=.5, whis=[1,99],showfliers=False, patch_artist=True, manage_ticks=False,zorder=4)
for element in ['boxes', 'whiskers', 'medians', 'caps']:
plt.setp(bp[element], color=edge_color,zorder=4)
for patch in bp['boxes']:
patch.set(facecolor=fill_color,zorder=0)
num_elections = 4
election_names = [
"BVAP",
"LTGOV",
"GOV",
"AG"]
election_columns = [
["VAPBLACK", "nBVAP"],
["D_LTGOV", "R_LTGOV"],
["D_GOV", "R_GOV"],
["D_ATTGEN", "R_ATTGEN"],
["PRES12D", "PRES12R"],
]
newdir = "./Plots/Compare/"
datadir1= "./ReCOM_Enacted_uu/"
datadir2= "./ReCOM_Tree31_uutk3/"
datadir3= "./ReCOM_Tree99_uutk3/"
datadir1= "./FLIP_Enacted/"
datadir2= "./FLIP_Tree31/"
datadir3= "./FLIP_Tree99/"
os.makedirs(os.path.dirname(newdir + "init.txt"), exist_ok=True)
with open(newdir + "init.txt", "w") as f:
f.write("Created Folder")
max_steps = 10000000#20000#
step_size = 10000#2000#
ts = [x * step_size for x in range(1, int(max_steps / step_size) + 1)]
for elect in range(1):
a = []
b = []
c = []
for t in ts:
tempvotes = np.loadtxt(
datadir1 + election_names[elect] + "_" + str(t) + ".csv", delimiter=","
)
for s in range(step_size):
a.append(tempvotes[s, :])
tempvotes = np.loadtxt(
datadir2 + election_names[elect] + "_" + str(t) + ".csv", delimiter=","
)
for s in range(step_size):
b.append(tempvotes[s, :])
tempvotes = np.loadtxt(
datadir3 + election_names[elect] + "_" + str(t) + ".csv", delimiter=","
)
for s in range(step_size):
c.append(tempvotes[s, :])
a = np.array(a)
b = np.array(b)
c = np.array(c)
#medianprops = dict(color="black")
fig, ax = plt.subplots()
draw_plot(a,-2,'r',None)
draw_plot(b,0,'y',None)
draw_plot(c,2,'b',None)
plt.plot([],[],color='r',label='Enacted')
plt.plot([],[],color='y',label='Seed 31')
plt.plot([],[],color='b',label='Seed 99')
plt.legend()
#plt.xticks([5,10,15,20,25,30],[5,10,15,20,25,30])
plt.xticks([],[])
#plt.xlim([.5,34])
plt.xlabel("Indexed Districts")
plt.ylabel("BVAP %")
plt.legend()
plt.savefig("./Plots/HDSR2/FLIPseed_compare.png")
fig = plt.gcf()
fig.set_size_inches((12,8), forward=False)
fig.savefig("./Plots/HDSR2/FLIPseed_compare2.png", dpi=1000)
plt.close()
| [
"daryl.r.deford@gmail.com"
] | daryl.r.deford@gmail.com |
1527d595e17bc4979d4106893919773733bb7c3f | 3b8e281b4166f8c485b995b1d959e9765c57434b | /jobapp/urls.py | 6c595f32a7190bf03afaadd8b89d577258adb060 | [] | no_license | SinghalAyushh/Dignizant-job-portal | 90bec823819601048d5cd3605df13356c3b71222 | 93cb580d26e8638e57063058f649dadd230963c0 | refs/heads/master | 2023-04-12T18:53:04.008592 | 2021-05-04T11:16:45 | 2021-05-04T11:16:45 | 364,226,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | from django.urls import path
from jobapp import views
app_name = "jobapp"
urlpatterns = [
path('', views.home_view, name='home'),
path('resume/', views.resume_view, name='resume'),
path('resumebuilder/', views.resumeMaker_view, name='builder'),
path('about/', views.about_view, name='about'),
path('jobs/', views.job_list_View, name='job-list'),
path('job/create/', views.create_job_View, name='create-job'),
path('job/<int:id>/', views.single_job_view, name='single-job'),
path('apply-job/<int:id>/', views.apply_job_view, name='apply-job'),
path('bookmark-job/<int:id>/', views.job_bookmark_view, name='bookmark-job'),
path('about/', views.single_job_view, name='about'),
path('contact/', views.single_job_view, name='contact'),
path('result/', views.search_result_view, name='search_result'),
path('dashboard/', views.dashboard_view, name='dashboard'),
path('dashboard/employer/job/<int:id>/applicants/', views.all_applicants_view, name='applicants'),
path('dashboard/employer/job/edit/<int:id>', views.job_edit_view, name='edit-job'),
path('dashboard/employer/applicant/<int:id>/', views.applicant_details_view, name='applicant-details'),
path('dashboard/employer/close/<int:id>/', views.make_complete_job_view, name='complete'),
path('dashboard/employer/delete/<int:id>/', views.delete_job_view, name='delete'),
path('dashboard/employee/delete-bookmark/<int:id>/', views.delete_bookmark_view, name='delete-bookmark'),
]
| [
"developer@apoyar.eu"
] | developer@apoyar.eu |
801ffd6735d148a588bc83c55473c3c5b913e66f | e0c46c108ba8683d8d845d29b1d44883411bc467 | /Plot_Module.py | f2518834e4a54590edd12ab62eafd9654911dff8 | [] | no_license | RisakaLogin/Compu_Bot | f3c91c232a7e1691fd394ace0464e06ae60f171e | a140db19fe80312d5023713f4760c18b92b7c33c | refs/heads/main | 2023-03-27T23:32:00.672102 | 2021-04-03T11:21:33 | 2021-04-03T11:21:33 | 354,272,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,297 | py | import numpy as np
import matplotlib.pyplot as plt
def gacha(num,x,y):
star5 = ["Keqing","Mona","Qiqi","Diluc","Jean","Amos' Bow","Skyward Harp","Lost prayer to the Sacred Winds","Skyward Atlas"
,"Primordial Jade Winged-Spear","Skyward Spine","Wolf's Gravestone","Skyward Pride","Skyward Blade","Aquila Favonia"]
star4 = ["Xinyan","Sucrose","Diona","Chongyun","Noelle","Bennett","Fischl","Ningguang","Xingqiu"
,"Beidou","Xiangling","Amber","Razor","Kaeya","Barbara","Lisa","Rust","Sacrificial Bow","The Stringless","Favonius Warbow"
,"Eye of Perception","Sacrificial Fragments","The Widsith","Favonius Codex","Favonius Lance","Dragon's Bane","Rainslasher"
,"Sacrificial Greatsword","The Bell","Favonius Greatsword","Lion's Roar","Sacrificial Sword","The Flute","Favonius Sword"]
star3 = ["Slingshot","Sharpshooter's Oath","Raven Bow","Emerald Orb","Thrilling Tales of Dragon Slayers","Magic Guide","Black Tassel","Debate Club","Bloodtainted Greatsword"
,"Ferrous Shadow","Skyrider Sword","Harbinger of Dawn","Cool Steel"]
rate4,rate5=0.051,0.006
count4,count5=x,y
reward4,reward5,reward=0,0,0
get = []
stack = []
find5star = []
for i in range(num):
r=np.random.uniform(0,1)
if(count5<75):
if(r<rate5):
reward=5
get.append(5)
find5star.append(i)
else:
if count4<8:
if r<rate5+rate4:
reward=4
get.append(4)
else:
reward=0
get.append(3)
elif count4<9:
if r <rate5+0.511:
reward=4
get.append(4)
else:
reward=0
get.append(3)
else:
reward=4
get.append(4)
elif count5<89:
if r<0.324:
reward=5
get.append(5)
find5star.append(i)
else:
if count4<8:
if r<rate5+rate4:
reward=4
get.append(4)
else:
reward=0
get.append(3)
elif count4<9:
if r <rate5+0.511:
reward=4
get.append(4)
else:
reward=0
get.append(3)
else:
reward=4
get.append(4)
else:
reward=5
get.append(5)
find5star.append(i)
if reward==5:
reward5+=1
count4+=1
count5=0
elif reward==4:
reward4+=1
count5+=1
count4=0
else:
count4+=1
count5+=1
for e in get:
if e == 5:
findstar5 = "**[★5]** "+np.random.choice(star5)+"\n"
stack.append(findstar5)
return find5star #นับเฉพาะไอเทม 5 ดาวแล้ว return ค่าออกมา
| [
"46447258+RisakaLogin@users.noreply.github.com"
] | 46447258+RisakaLogin@users.noreply.github.com |
14d4a2ea49a5d2efef1b9fe8d6229d541dfcbddb | 2e5bbd2995aa01931b4a19c094c197b649be2732 | /Douyu/pipelines.py | 7ab70d9d7c76a99f52085dee8c279e51fc53450c | [] | no_license | IvanReen/Douyu | fed077ce594d67a8ad1252e9924c964f52821c61 | 3e4144e49b4f455e0de3d139a322e72371579660 | refs/heads/master | 2022-07-08T10:45:40.541793 | 2018-09-16T08:02:29 | 2018-09-16T08:02:29 | 148,971,616 | 0 | 0 | null | 2022-06-28T06:58:28 | 2018-09-16T06:59:17 | Python | UTF-8 | Python | false | false | 676 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import scrapy
from scrapy.pipelines.images import ImagesPipeline
from Douyu.settings import IMAGES_STORE as images_store
class DouyuPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
image_link = item['imagelink']
yield scrapy.Request(image_link)
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
os.rename(images_store + image_path[0], item['nickname'] + '.jpg') | [
"small_pupil@126.com"
] | small_pupil@126.com |
6dd430728751b0645ff790a549d4c38da9c49c9a | da8007b686dcb74a7611f841443f6782db9ebfbc | /physicsly.py | da8623e9f110f2a3ecc5368f1118e991115f79bd | [
"MIT"
] | permissive | YummyPotatoPie/Physicsly | 0729b0d36f90243e42afe235dbff637b923f71ce | b35f8cc639172359f1dca9c86a5932423984ee88 | refs/heads/master | 2021-07-08T19:53:46.871862 | 2020-10-18T22:18:04 | 2020-10-18T22:18:04 | 204,992,679 | 2 | 0 | MIT | 2019-08-28T21:35:58 | 2019-08-28T18:01:19 | Python | UTF-8 | Python | false | false | 152 | py | from astronomy import *
from relativistic import *
from mechanics import *
from thermal import *
from atmosphere import *
from electricity import * | [
"noreply@github.com"
] | YummyPotatoPie.noreply@github.com |
37b65e6858488a975b250f1f79e157d37747ae5c | d2380a4d4347a382188d8c0c6765249082807738 | /Scrapy/venv/bin/flask | fecc24f53f35361f8dbd24cf83fe47fd0d4ca8e4 | [] | no_license | fan-xin/DeepLearning | e2d1aa0508bda02745089dd7722b84385542c70f | 6d11d2934b49e982ae560b1243da7c5c225c7b14 | refs/heads/0523-paper | 2020-05-26T07:28:26.222770 | 2019-07-05T06:06:03 | 2019-07-05T06:06:03 | 188,149,211 | 0 | 0 | null | 2019-07-05T06:06:05 | 2019-05-23T02:44:50 | Python | UTF-8 | Python | false | false | 269 | #!/home/CORPUSERS/xp024975/work/Execrise/DeepLearning/Scrapy/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"xin.x.fan@sony.com"
] | xin.x.fan@sony.com | |
981ec36ebcb33c0e9e9f353d8d485118061ad5a6 | 78bc02b858f77459533458e026d7a6a1454f055a | /doutu.py | c6814d7f2bcbd765fea6dc42841641c09775ceec | [
"LicenseRef-scancode-mulanpsl-1.0-en",
"MulanPSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wp3211111/python-study-spider | daa0e7a283b85387b9ec69fdbb3911de507be5a1 | 2f23da9851e11387280e01ae0404fa0007d80440 | refs/heads/master | 2023-05-04T03:19:02.000703 | 2021-05-17T09:08:56 | 2021-05-17T09:08:56 | 368,121,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | import requests
import re
import os
headers = {
"Accept-Encoding": "Gzip", # 使用gzip压缩传输数据让访问更快
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0",
}
# 搜索页面数据
def get_text(keyword,page):
url = f'https://www.doutula.com/search?type=photo&more=1&keyword={keyword}&page={page}'
# 请求数据
resp = requests.get(url,headers=headers)
# 去掉非字符
text = re.sub('\s','',resp.text)
return text
def down_meme(keyword):
# 由于表情较多,这里只取10页(也有接近700左右)
pages = 10
num = 0
for page in range(1,pages+1):
text = get_text(keyword,page)
# 表情包区域
search_result = re.findall(r'divclass="search-resultlist-group-item"(.*?)class="text-center"',text)[0]
# 表情包下载地址
meme_urls = re.findall(r'"data-original="(.*?)"',search_result)
# 下载每页的表情包
for meme_url in meme_urls:
num += 1
# 表情包文件名
meme_name = re.findall(r'http://img.doutula.com/.*/(.*)',meme_url)[0]
meme_img = requests.get(meme_url)
# 表情包内容 bytes 格式
meme = meme_img.content
# 写入本地(判断关键字文件夹是不是存在,不存在则创建一个)
if not os.path.exists(f'./{keyword}'):
os.mkdir(f'./{keyword}')
with open(f'./{keyword}/{meme_name}','wb') as f:
f.write(meme)
print(f'{num} 个 {keyword} 表情包已经下载...')
if __name__ == "__main__":
# keyword = '呵呵'
keyword = input('请输入你想查询的表情包:')
down_meme(keyword) | [
"robin@sixthnet.com"
] | robin@sixthnet.com |
1bea5339c89536f4280f5c5343143c9785817adb | dec1c68ec3d867c40f504ce2719fd43bd1142c7e | /git.py | e5a1496081b3583219a46fff91437a565dcce882 | [] | no_license | quanbanno2/first_upload | 7a01b45befc9b806d4c3faf8081e060f7a3cf65c | 60f5309f274f3af78b90daf1d278cb83a8e4c0a7 | refs/heads/master | 2020-04-01T17:39:53.068552 | 2018-11-05T08:24:19 | 2018-11-05T08:24:19 | 153,442,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('fpo'.upper(),'FOO')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
s='hello world'
self.assertEqual(s.split(),['hello','world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
# if __name__ =='__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestStringMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"f821336459@163.com"
] | f821336459@163.com |
1082e266e9b610565c47360420abf56154db4303 | bedc852c913b5c174f8c360b2097ebf1053612de | /form.py | 6b18d98c1550f177e5dce8cf996748d85cc76bf1 | [] | no_license | Finian1007/Line-Investment-Chatbot | 856580919030b36ae71a53f6f9d548597cbd6110 | 309fed5d7f0efe2226308d2689dbc9c7d9ef9c87 | refs/heads/master | 2022-04-17T07:26:16.608232 | 2020-04-07T10:55:34 | 2020-04-07T10:55:34 | 253,731,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,191 | py |
def createForm(question, ans1text, ans1value, ans2text, ans2value, ans3text, ans3value, ans4text, ans4value, ans5text, ans5value) :
form = {
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "風險評估問券",
"weight": "bold",
"color": "#1DB446",
"size": "sm"
},
{
"type": "text",
"text": question,
"weight": "bold",
"size": "sm",
"margin": "md"
},
{
"type": "separator",
"margin": "xxl"
},
{
"type": "button",
"height": "xs",
"style": "link",
"color": "#398CCD",
"action": {
"type": "message",
"label": ans1text,
"text": ans1value
}
},
{
"type": "button",
"height": "xs",
"style": "link",
"color": "#398CCD",
"action": {
"type": "message",
"label": ans2text,
"text": ans2value
}
},
{
"type": "button",
"height": "xs",
"style": "link",
"color": "#398CCD",
"action": {
"type": "message",
"label": ans3text,
"text": ans3value
}
},
{
"type": "button",
"height": "xs",
"style": "link",
"color": "#398CCD",
"action": {
"type": "message",
"label": ans4text,
"text": ans4value
}
},
{
"type": "button",
"height": "xs",
"style": "link",
"color": "#398CCD",
"action": {
"type": "message",
"label": ans5text,
"text": ans5value
}
}
]
}
}
return form
formDict = {
"form1": {
"question" : "1.你的主要收入來源是:",
"ans1" : {
"text" : "無固定收入",
"value" : "formA0"
},
"ans2" : {
"text" : "非金融性資產收入",
"value" : "formA2"
},
"ans3" : {
"text" : "金融性資產收入",
"value" : "formA4"
},
"ans4" : {
"text" : "生產經營所得",
"value" : "formA6"
},
"ans5" : {
"text" : "工資、勞務報酬",
"value" : "formA8"
},
},
"form2": {
"question" : "2.你的家庭就業狀況是:",
"ans1" : {
"text" : "未婚、暫無穩定收入",
"value" : "formB0"
},
"ans2" : {
"text" : "未婚、有穩定收入",
"value" : "formB2"
},
"ans3" : {
"text" : "與配偶無穩定收入或已退休",
"value" : "formB4"
},
"ans4" : {
"text" : "與配偶其中一人有穩定收入",
"value" : "formB6"
},
"ans5" : {
"text" : "與配偶皆有穩定收入",
"value" : "formB8"
},
},
"form3": {
"question" : "3.你主要想將自己的投資回報用於:",
"ans1" : {
"text" : "償還債務",
"value" : "formC0"
},
"ans2" : {
"text" : "本人養老或醫療",
"value" : "formC2"
},
"ans3" : {
"text" : "履行扶養或贍養義務",
"value" : "formC4"
},
"ans4" : {
"text" : "工作或證券以外的投資行為",
"value" : "formC6"
},
"ans5" : {
"text" : "改善生活",
"value" : "formC8"
},
},
"form4": {
"question" : "4.您的家庭可支配年收入(TWD)為:",
"ans1" : {
"text" : "250萬以下",
"value" : "formD0"
},
"ans2" : {
"text" : "250萬~500萬",
"value" : "formD2"
},
"ans3" : {
"text" : "500萬~2500萬",
"value" : "formD4"
},
"ans4" : {
"text" : "2500萬~5000萬",
"value" : "formD6"
},
"ans5" : {
"text" : "5000萬以上",
"value" : "formD8"
},
},
"form5": {
"question" : "5.你可用來投資的資產總額:",
"ans1" : {
"text" : "250萬以下",
"value" : "formE0"
},
"ans2" : {
"text" : "250萬~500萬",
"value" : "formE2"
},
"ans3" : {
"text" : "500萬~2500萬",
"value" : "formE4"
},
"ans4" : {
"text" : "2500萬~5000萬",
"value" : "formE6"
},
"ans5" : {
"text" : "5000萬以上",
"value" : "formE8"
},
},
"form6": {
"question" : "6.年家庭可支配收入中可投資比例為:",
"ans1" : {
"text" : "小於10%",
"value" : "formF2"
},
"ans2" : {
"text" : "10% ~ 25%",
"value" : "formF4"
},
"ans3" : {
"text" : "25% ~ 50%",
"value" : "formF6"
},
"ans4" : {
"text" : "大於50%",
"value" : "formF8"
},
"ans5" : {
"text" : "x",
"value" : "formF0"
},
},
"form7": {
"question" : "7.是否有未償還債務? 如有,性值為?",
"ans1" : {
"text" : "有,親朋間借款",
"value" : "formG2"
},
"ans2" : {
"text" : "有,信用卡等短期債務",
"value" : "formG4"
},
"ans3" : {
"text" : "有,房債等長期債務",
"value" : "formG6"
},
"ans4" : {
"text" : "無",
"value" : "formG8"
},
"ans5" : {
"text" : "x",
"value" : "formG0"
},
},
"form8": {
"question" : "8.你的投資知識可描述為:",
"ans1" : {
"text" : "無基本金融產品知識",
"value" : "formH0"
},
"ans2" : {
"text" : "有基本金融產品知識",
"value" : "formH3"
},
"ans3" : {
"text" : "有豐富金融產品知識",
"value" : "formH6"
},
"ans4" : {
"text" : "x",
"value" : "formH0"
},
"ans5" : {
"text" : "x",
"value" : "formH0"
},
},
"form9": {
"question" : "9.你的投資經驗可描述為:",
"ans1" : {
"text" : "無銀行儲蓄外的投資經驗",
"value" : "formI2"
},
"ans2" : {
"text" : "買過債券、保險等理財商品",
"value" : "formI4"
},
"ans3" : {
"text" : "參與過股票、基金等產品交易",
"value" : "formI6"
},
"ans4" : {
"text" : "參與過證券、期貨等產品交易",
"value" : "formI8"
},
"ans5" : {
"text" : "x",
"value" : "formI0"
},
},
"form10": {
"question" : "10.你有多少年金融性產品投資經驗",
"ans1" : {
"text" : "無經驗",
"value" : "formJ0"
},
"ans2" : {
"text" : "低於2年",
"value" : "formJ2"
},
"ans3" : {
"text" : "2 ~ 5年",
"value" : "formJ4"
},
"ans4" : {
"text" : "5 ~ 10年",
"value" : "formJ6"
},
"ans5" : {
"text" : "10年以上",
"value" : "formJ8"
},
},
"form11": {
"question" : "11.以下何者為你的投資態度:",
"ans1" : {
"text" : "厭惡風險,想有穩定收入",
"value" : "formK0"
},
"ans2" : {
"text" : "保守投資,願意承擔一定風險",
"value" : "formK3"
},
"ans3" : {
"text" : "求較高效益,願承擔較高風險",
"value" : "formK6"
},
"ans4" : {
"text" : "尋求高效益,願承擔一定損失",
"value" : "formK9"
},
"ans5" : {
"text" : "x",
"value" : "formK0"
},
},
"form12": {
"question" : "A: 10%收益,小風險 B: 30%收益,大風險",
"ans1" : {
"text" : "全部投資於A",
"value" : "formL2"
},
"ans2" : {
"text" : "都投資,但大部分A",
"value" : "formL4"
},
"ans3" : {
"text" : "都投資,但大部分B",
"value" : "formL6"
},
"ans4" : {
"text" : "全部投資於B",
"value" : "formL8"
},
"ans5" : {
"text" : "x",
"value" : "formL0"
},
},
"form13": {
"question" : "13.你認為自己能承受最大損失為",
"ans1" : {
"text" : "10%以內",
"value" : "formM0"
},
"ans2" : {
"text" : "10% ~ 30%",
"value" : "formM2"
},
"ans3" : {
"text" : "30% ~ 50%",
"value" : "formM4"
},
"ans4" : {
"text" : "大於50%",
"value" : "formM6"
},
"ans5" : {
"text" : "x",
"value" : "formM0"
},
},
"form14": {
"question" : "14.你是否為以下類型投資者",
"ans1" : {
"text" : "沒有任何風險承受度",
"value" : "no"
},
"ans2" : {
"text" : "不能接受投資損失",
"value" : "no"
},
"ans3" : {
"text" : "以上皆非",
"value" : "yes"
},
"ans4" : {
"text" : "x",
"value" : "no"
},
"ans5" : {
"text" : "x",
"value" : "no"
},
}
}
score = 0
def checkResult(result, yesno):
formAnswer = ''
if result <= 20 or yesno =='no':
formAnswer = '你的風險承受能力為C1,可購買(R1)型金融產品'
elif result <= 40:
formAnswer = '你的風險承受能力為C2,可購買(R1,R2)型金融產品'
elif result <=70:
formAnswer = '你的風險承受能力為C3,可購買(R1,R2,R3)型金融產品'
elif result <=85:
formAnswer = '你的風險承受能力為C4,可購買(R1,R2,R3,R4)型金融產品'
else:
formAnswer = '你的風險承受能力為C5,可購買(R1,R2,R3,R4,R5)型金融產品'
return formAnswer | [
"finian@zhanzhiyude-MacBook-Pro.local"
] | finian@zhanzhiyude-MacBook-Pro.local |
217f78fd7aefe4509966a90eea4f3d200ad8f52b | 457a1aefc5c5a5e670dbd721fed06976acf4560a | /dataset.py | acd3b98c7b85b3a9b520d45d6411260f2ff711cc | [] | no_license | DevHyung/nlp-AdhocTableSearch-benchmark | 66520fcb1802f0eb079351378503bca3d44db830 | e925eb0950a3141d522f0d51be92e12e49aaa8e5 | refs/heads/main | 2023-04-13T15:38:04.078761 | 2021-04-28T05:42:07 | 2021-04-28T05:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,325 | py | import itertools
import json
import os
import random
from math import ceil
from collections import defaultdict
from pathlib import Path
import re
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizer, BertModel
from table_bert import Table, Column, TableBertModel
class Sample(object):
def __init__(self, query, positive_tables, negative_tables):
self.query = query
self.positive_tables = positive_tables
self.negative_tables = negative_tables
class QueryTableDataset(Dataset):
def __init__(self, data_dir: str = '.data', data_type: str = 'train',
query_tokenizer=None, table_tokenizer=None, max_query_length=7,
prepare=False, is_slice=False):
self.data_dir = data_dir
self.query_file = data_type + '.query'
self.table_file = data_type + '.table'
self.ids_file = data_type + '.pair'
self.data_type = data_type # test, train 구분하기위해
self.is_slice = is_slice
if prepare:
self.prepare(data_dir, data_type, query_tokenizer, table_tokenizer, max_query_length)
self.data = torch.load(os.path.join(self.processed_folder, self.ids_file))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def prepare(self, data_dir, data_type, query_tokenizer, table_tokenizer, max_query_length):
if self._check_exists():
return
processed_dir = Path(self.processed_folder)
processed_dir.mkdir(exist_ok=True)
if not (query_tokenizer and table_tokenizer):
raise RuntimeError('Tokenizers are not found.' +
' You must set query_tokenizer and table_tokenizer')
print('Processing...')
query_dict = defaultdict()
pos_tables, neg_tables = defaultdict(list), defaultdict(list)
data = []
path = Path(data_dir + '/' + data_type + '.jsonl')
with open(path) as f:
for line in f.readlines():
if not line.strip():
break
# 테이블 Meta data parsing ( qid, tid, query, rel )
jsonStr = json.loads(line)
query = jsonStr['query']
qid = jsonStr['qid']
tid = jsonStr['docid']
# Query Encode
if qid not in query_dict:
# 추가200423 : add_special_tokens
query_tokenized = query_tokenizer.encode_plus(query,
max_length=max_query_length,
add_special_tokens=True,
padding='max_length',
truncation=True,
return_tensors="pt"
)
query_dict[qid] = query_tokenized # BERT **input input_ids, seg_ids, mas_ids
# Table Encode
caption_rep, column_reps = encode_tables(jsonStr, self.is_slice, query, table_tokenizer)
for (rel, column_rep) in column_reps:
if str(rel) == '0':
neg_tables[qid].append((column_rep, caption_rep))
else:
pos_tables[qid].append((column_rep, caption_rep))
for qid in query_dict:
if not pos_tables[qid]:
continue
for t in itertools.product(pos_tables[qid], neg_tables[qid]):
data.append([query_dict[qid]] + list(itertools.chain.from_iterable(t)))
# Save
with open(os.path.join(processed_dir, self.ids_file), 'wb') as f:
torch.save(data, f)
print('Done!')
@property
def processed_folder(self):
return os.path.join(self.data_dir, 'processed')
def _check_exists(self):
return os.path.exists(os.path.join(self.processed_folder, self.ids_file))
def infer_column_type_from_row_values(numeric_idx_list, heading, body):
heading_type_dict = {k: 'text' for k in heading}
for n_idx in numeric_idx_list:
heading_type_dict[heading[n_idx]] = 'real'
for i, rows in enumerate(body):
try:
float(rows[n_idx].strip().replace('−', '-').replace(',', '').replace('–', '-'))
except:
heading_type_dict[heading[n_idx]] = 'text'
break
return heading_type_dict
def encode_tables(table_json, is_slice, query, table_tokenizer):
rel = table_json['rel']
html_pattern = re.compile(r'<\w+ [^>]*>([^<]+)</\w+>')
tag_pattern = re.compile(r'<.*?>')
link_pattern = re.compile(r'\[.*?\|.*?\]')
# Raw Json parsing ( Detail table information )
raw_json = json.loads(table_json['table']['raw_json'])
textBeforeTable = raw_json['textBeforeTable'] # 추후
textAfterTable = raw_json['textAfterTable'] # 추후
title = raw_json['pageTitle']
caption = re.sub(r'[^a-zA-Z0-9]', ' ', raw_json['title']).strip() # Caption 역할
tableOrientation = raw_json['tableOrientation'] # [HORIZONTAL, VERTICAL]
headerPosition = raw_json['headerPosition'] # ['FIRST_ROW', 'MIXED', 'FIRST_COLUMN', 'NONE’]
hasHeader = raw_json['hasHeader'] # [true, false]
keyColumnIndex = raw_json['keyColumnIndex']
headerRowIndex = raw_json['headerRowIndex'] # 0 == 첫줄, -1 == 없음
heading = []
body = []
# 방향은 달라도 데이터 표현은 같이 해줘서 우선은 동일하게 코드구성
# TODO: 나중에 하나씩 원본 URL들어가서 확인해볼 부분
# hasHeader, headerRowIndex가 있든 없든 0번째 줄이 header역할
# TODO: 나중에 Keycolumn을 헤더가 없을때 사용할수있을까?
if tableOrientation.strip() == "HORIZONTAL":
# Col List -> Table
table_data = raw_json['relation']
col_cnt = len(table_data)
row_cnt = len(table_data[0])
for row in range(row_cnt):
tmp_row_data = []
for col in range(col_cnt):
tmp_row_data.append(table_data[col][row])
body.append(tmp_row_data)
# Header
for table_col in table_data:
heading.append(table_col[0])
elif tableOrientation.strip() == "VERTICAL":
# Col List -> Table
table_data = raw_json['relation']
col_cnt = len(table_data)
row_cnt = len(table_data[0])
for row in range(row_cnt):
tmp_row_data = []
for col in range(col_cnt):
tmp_row_data.append(table_data[col][row])
body.append(tmp_row_data)
# Header
for table_col in table_data:
heading.append(table_col[0])
else:
print(">>> Check the table data")
exit(-1)
# Heading preprocessing + link remove
heading_str = ' '.join(heading)
if html_pattern.search(heading_str):
if link_pattern.search(heading_str): # 같이 있는 경우
heading = [re.sub(tag_pattern, '', column).strip() for column in heading]
for idx, column in enumerate(heading):
if link_pattern.search(column):
real_text = link_pattern.search(column).group().split('|')[-1][:-1].strip()
heading[idx] = real_text
else:
heading = [re.sub(html_pattern, '', column).strip() for column in heading]
# Row preporcessing + link remove
cell_sum_str = ''
for rows in body:
cell_sum_str += ' '.join(rows)
if html_pattern.search(cell_sum_str):
if link_pattern.search(cell_sum_str): # 같이 있으면
for i, rows in enumerate(body):
for j, cell in enumerate(rows):
if link_pattern.search(cell):
cell = re.sub(tag_pattern, '', cell).strip()
real_text = link_pattern.search(cell).group().split('|')[-1][:-1]
body[i][j] = real_text
else:
cell = re.sub(html_pattern, '', cell).strip()
body[i][j] = cell
else:
row_list = []
for rows in body:
row_list.append([re.sub(html_pattern, '', row).strip() for row in rows])
body = row_list
# TODO: Context부분을 다양하게 주는부분, 비교실험 해볼부분임
# TODO: Special Token을 추가 안해두 되는지 비교실험
# TODO: Text Before after부분 활용?
caption = " ".join(heading) + " " + title + " " + caption
caption_rep = table_tokenizer.tokenize(caption)
if is_slice:
column_reps = slice_table(title, heading, body, caption, table_tokenizer, query, rel)
else:
column_reps = [(rel,
Table(id=caption,
header=[Column(h.strip(), 'text') for h in heading],
data=body
).tokenize(table_tokenizer))]
return caption_rep, column_reps
def slice_table( title, heading, datas, caption, table_tokenizer, query, rel):
table_rep_list = []
min_row = 10 # 최소 5개의 행은 있어야 함
max_table_nums = 10 # 테이블은 최대 10개로 나뉘어짐
# TODO: max_table_nums = 2, 5, 10 으로 바꿔보면서 테스트
if len(datas) <= min_row: # 테이블이 최소행 보다 작은 경우
column_rep = Table(id=title,
header=[Column(h.strip(), 'text') for h in heading],
data=datas
).tokenize(table_tokenizer)
table_rep_list.append((rel, column_rep))
else:
row_n = max(min_row, ceil(len(datas) / max_table_nums))
slice_row_data = [datas[i * row_n:(i + 1) * row_n] for i in range((len(datas) + row_n - 1) // row_n)]
if str(rel) == 0: # Negative
for rows in slice_row_data:
column_rep = Table(id=title,
header=[Column(h.strip(), 'text') for h in heading],
data=rows
).tokenize(table_tokenizer)
table_rep_list.append((rel, column_rep))
else: # Positive
query_tokens = [token.strip() for token in query.split(' ')]
is_always_postive = False
for token in query_tokens:
if token in caption:
is_always_postive = True
break
if is_always_postive: # caption에 포함되어있는 경우
for rows in slice_row_data:
column_rep = Table(id=title,
header=[Column(h.strip(), 'text') for h in heading],
data=rows
).tokenize(table_tokenizer)
table_rep_list.append((rel, column_rep))
else:
for rows in slice_row_data:
column_rep = Table(id=title,
header=[Column(h.strip(), 'text') for h in heading],
data=rows
).tokenize(table_tokenizer)
modify_rel = '0'
# Row data를 하나의 string으로
cell_string_sum = ''
for row in rows:
cell_string_sum += ' '.join(row)
# Query tokens과 overlap
for token in query_tokens:
if token in cell_string_sum:
modify_rel = '1'
break
table_rep_list.append((modify_rel, column_rep))
return table_rep_list
def query_table_collate_fn(batch):
query, pos_column, pos_caption, neg_column, neg_caption = zip(*batch)
input_ids, token_type_ids, attention_mask = [], [], []
for q in query:
input_ids.append(q["input_ids"].squeeze())
token_type_ids.append(q["token_type_ids"].squeeze())
attention_mask.append(q["attention_mask"].squeeze())
query = {"input_ids": torch.stack(input_ids),
"token_type_ids": torch.stack(token_type_ids),
"attention_mask": torch.stack(attention_mask)}
return query, pos_column, pos_caption, neg_column, neg_caption
class QueryTablePredictionDataset(Dataset):
def __init__(self, data_dir: str = '.data', data_type: str = 'test',
query_tokenizer=None, table_tokenizer=None, max_query_length=27,
prepare=False, is_slice=False):
self.data_dir = data_dir
self.query_file = data_type + '.query'
self.table_file = data_type + '.table'
self.ids_file = data_type + '.pair'
self.is_slice = is_slice
if prepare:
self.prepare(data_dir, data_type, query_tokenizer, table_tokenizer, max_query_length)
self.pair_ids = torch.load(os.path.join(self.processed_folder, self.ids_file))
def __len__(self):
return len(self.pair_ids)
def __getitem__(self, index):
return self.pair_ids[index]
def prepare(self, data_dir, data_type, query_tokenizer, table_tokenizer, max_query_length):
if self._check_exists():
return
processed_dir = Path(self.processed_folder)
processed_dir.mkdir(exist_ok=True)
if not (query_tokenizer and table_tokenizer):
raise RuntimeError('Tokenizers are not found.' +
' You must set query_tokenizer and table_tokenizer')
print('Processing...')
query_dict = defaultdict()
pairs = []
path = Path(data_dir + '/' + data_type + '.jsonl')
with open(path) as f:
for line in f.readlines():
if not line.strip():
break
# 테이블 Meta data parsing ( qid, tid, query, rel )
jsonStr = json.loads(line)
tid = jsonStr['docid']
query = jsonStr['query']
qid = jsonStr['qid']
rel = jsonStr['rel']
if qid not in query_dict:
# 추가200423 : add_special_tokens
query_tokenized = query_tokenizer.encode_plus(query,
max_length=max_query_length,
add_special_tokens=True,
padding='max_length',
truncation=True,
return_tensors="pt"
)
query_dict[qid] = query_tokenized # BERT **input input_ids, seg_ids, mas_ids
# Table Encode
caption_rep, column_reps = encode_tables(jsonStr, self.is_slice, query, table_tokenizer)
for column_rep in column_reps:
pairs.append([qid, query_dict[qid], tid, column_rep, caption_rep, rel])
# Save
with open(os.path.join(processed_dir, self.ids_file), 'wb') as f:
torch.save(pairs, f)
print('Done!')
@property
def processed_folder(self):
return os.path.join(self.data_dir, 'processed')
def _check_exists(self):
return os.path.exists(os.path.join(self.processed_folder, self.ids_file))
def query_table_prediction_collate_fn(batch):
qid, query, tid, column, caption, rel = zip(*batch)
input_ids, token_type_ids, attention_mask = [], [], []
for q in query:
input_ids.append(q["input_ids"].squeeze())
token_type_ids.append(q["token_type_ids"].squeeze())
attention_mask.append(q["attention_mask"].squeeze())
query = {"input_ids": torch.stack(input_ids),
"token_type_ids": torch.stack(token_type_ids),
"attention_mask": torch.stack(attention_mask)}
return query, column, caption, rel, qid, tid
if __name__ == "__main__":
query_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert_model = BertModel.from_pretrained('bert-base-uncased')
table_model = TableBertModel.from_pretrained('model/tabert_base_k3/model.bin')
table_tokenizer = table_model.tokenizer
dataset = QueryTableDataset(data_dir='data/1',
data_type='train',
query_tokenizer=query_tokenizer,
table_tokenizer=table_tokenizer,
prepare=True,
)
dataloader = DataLoader(dataset,
batch_size=2,
collate_fn=query_table_collate_fn)
for _ in range(1):
for d in dataloader:
print(d)
break | [
"khuphj@gmail.com"
] | khuphj@gmail.com |
b278b4862cd9d38dc0cdb0f2c03022f71c38a1d3 | c5e0d0fd765a6b387385c301aeafd243099cff68 | /test/test_script_utils.py | 50ffd239e91643aeb6dc9db197cf9ba00e515240 | [] | no_license | aschroed/pyscripts | ef1f9a1907b9d525a25e93320c74b2f3e346dedd | 33ba2eff4c39a7e90adb85fda41962a524bf9188 | refs/heads/master | 2021-09-14T07:33:02.611480 | 2018-05-09T19:50:01 | 2018-05-09T19:50:01 | 114,464,312 | 0 | 0 | null | 2018-05-09T19:50:02 | 2017-12-16T13:50:11 | Python | UTF-8 | Python | false | false | 8,742 | py | import pytest
from wrangling import script_utils as scu
@pytest.fixture
def eset_json():
return {
"schema_version": "2",
"accession": "4DNES4GSP9S4",
"award": "4871e338-b07d-4665-a00a-357648e5bad6",
"alternate_accessions": [],
"aliases": [
"ren:HG00512_repset"
],
"experimentset_type": "replicate",
"status": "released",
"experiments_in_set": [
"d4b0e597-8c81-43e3-aeda-e9842fc18e8f",
"8d10f11f-95a8-4b8d-8ff2-748ea8631a23"
],
"lab": "795847de-20b6-4f8c-ba8d-185215469cbf",
"public_release": "2017-06-30",
"uuid": "9eb40c13-cf85-487c-9819-71ef74a22dcc",
"documents": [],
"description": "Dilution Hi-C experiment on HG00512",
"submitted_by": "da4f53e5-4e54-4ae7-ad75-ba47316a8bfa",
"date_created": "2017-04-28T17:46:08.642218+00:00",
"replicate_exps": [
{
"replicate_exp": "d4b0e597-8c81-43e3-aeda-e9842fc18e8f",
"bio_rep_no": 1,
"tec_rep_no": 1
},
{
"replicate_exp": "8d10f11f-95a8-4b8d-8ff2-748ea8631a23",
"bio_rep_no": 2,
"tec_rep_no": 1
}
],
}
@pytest.fixture
def bs_embed_json():
return {
"lab": {
"display_title": "David Gilbert, FSU",
"uuid": "6423b207-8176-4f06-a127-951b98d6a53a",
"link_id": "~labs~david-gilbert-lab~",
"@id": "/labs/david-gilbert-lab/"
},
"display_title": "4DNBSLACJHX1"
}
@pytest.fixture
def profiles():
return {
"ExperimentSetReplicate": {
"title": "Replicate Experiments",
"description": "Experiment Set for technical/biological replicates.",
"properties": {
"tags": {"uniqueItems": "true", "description": "Key words that can tag an item - useful for filtering.", "type": "array", "ff_clear": "clone", "items": {"title": "Tag", "description": "A tag for the item.", "type": "string"}, "title": "Tags"}, # noqa: E501
"documents": {"uniqueItems": "true", "description": "Documents that provide additional information (not data file).", "type": "array", "default": [], "comment": "See Documents sheet or collection for existing items.", "title": "Documents", "items": {"title": "Document", "description": "A document that provides additional information (not data file).", "type": "string", "linkTo": "Document"}}, # noqa: E501
"notes": {"exclude_from": ["submit4dn", "FFedit-create"], "title": "Notes", "description": "DCIC internal notes.", "type": "string", "elasticsearch_mapping_index_type": {"title": "Field mapping index type", "description": "Defines one of three types of indexing available", "type": "string", "default": "analyzed", "enum": ["analyzed", "not_analyzed", "no"]}} # noqa: E501
}
},
"TreatmentChemical": {
"title": "Chemical Treatment",
"description": "A Chemical or Drug Treatment on Biosample.",
"properties": {
"documents": {"uniqueItems": "true", "description": "Documents that provide additional information (not data file).", "type": "array", "default": [], "comment": "See Documents sheet or collection for existing items.", "title": "Documents", "items": {"title": "Document", "description": "A document that provides additional information (not data file).", "type": "string", "linkTo": "Document"}}, # noqa: E501
"public_release": {"anyOf": [{"format": "date-time"}, {"format": "date"}], "exclude_from": ["submit4dn", "FFedit-create"], "description": "The date which the item was released to the public", "permission": "import_items", "type": "string", "comment": "Do not submit, value is assigned when released.", "title": "Public Release Date"}, # noqa: E501
}
}
}
def test_is_uuid():
uuids = [
'231111bc-8535-4448-903e-854af460b254',
'231111bc-8535-4448-903e-854af460b25',
'231111bc85354448903e854af460b254'
]
for i, u in enumerate(uuids):
if i == 0:
assert scu.is_uuid(u)
else:
assert not scu.is_uuid(u)
def test_find_uuids_from_eset(eset_json):
field2uuid = {
"award": "4871e338-b07d-4665-a00a-357648e5bad6",
"lab": "795847de-20b6-4f8c-ba8d-185215469cbf",
"uuid": "9eb40c13-cf85-487c-9819-71ef74a22dcc",
"submitted_by": "da4f53e5-4e54-4ae7-ad75-ba47316a8bfa"
}
exps = ["d4b0e597-8c81-43e3-aeda-e9842fc18e8f", "8d10f11f-95a8-4b8d-8ff2-748ea8631a23"]
for field, val in eset_json.items():
ulist = scu.find_uuids(val)
if field in field2uuid:
assert field2uuid[field] == ulist[0]
elif field in ["experiments_in_set", "replicate_exps"]:
for u in ulist:
assert u in exps
def test_filter_dict_by_value(eset_json):
to_filter = {
"schema_version": "2",
"accession": "4DNES4GSP9S4",
"aliases": ["ren:HG00512_repset"]
}
vals = list(to_filter.values())
included = scu.filter_dict_by_value(eset_json, vals)
assert len(included) == len(to_filter)
for f in to_filter.keys():
assert f in included
excluded = scu.filter_dict_by_value(eset_json, vals, include=False)
assert len(excluded) == len(eset_json) - len(to_filter)
for f in to_filter.keys():
assert f not in excluded
def test_has_field_value_check_for_field_only(eset_json):
fieldnames = ['schema_version', 'award', 'alternate_accessions']
for f in fieldnames:
assert scu.has_field_value(eset_json, f)
def test_has_field_value_no_it_doesnt(eset_json):
fieldnames = ['biosample', 'blah', 'bio_rep_no']
for f in fieldnames:
assert not scu.has_field_value(eset_json, f)
def test_has_field_value_check_for_field_and_value(eset_json):
fields_w_values = {
"schema_version": "2",
"accession": "4DNES4GSP9S4",
"aliases": "ren:HG00512_repset"
}
for f, v in fields_w_values.items():
assert scu.has_field_value(eset_json, f, v)
def test_has_field_value_check_for_field_w_item(bs_embed_json):
f = "lab"
v = "/labs/david-gilbert-lab/"
assert scu.has_field_value(bs_embed_json, f, v)
def test_get_types_that_can_have_field(mocker, profiles):
field = 'tags'
with mocker.patch('dcicutils.submit_utils.get_FDN', return_value=profiles):
types_w_field = scu.get_types_that_can_have_field('conn', field)
assert 'ExperimentSetReplicate' in types_w_field
assert 'TreatmentChemical' not in types_w_field
def test_get_item_type_from_dict(eset_json):
eset_json['@type'] = ['ExperimentSetReplicate', 'ExperimentSet', 'Item']
es_ty = scu.get_item_type('blah', eset_json)
assert es_ty == 'ExperimentSetReplicate'
def test_get_item_type_from_id(mocker, connection):
with mocker.patch('dcicutils.submit_utils.get_FDN', return_value={'@type': ['ExperimentSetReplicate']}):
result = scu.get_item_type(connection, 'blah')
assert result == 'ExperimentSetReplicate'
@pytest.fixture
def items_w_uuids():
return [
{'name': 'one', 'uuid': 'a'},
{'name': 'two', 'uuid': 'b'},
{'name': 'three', 'uuid': 'c'},
]
def test_get_item_ids_from_list(connection):
ids = ['a', 'b', 'c']
result = scu.get_item_ids_from_args(ids, connection)
for a in [i in ids for i in result]:
assert a
def test_get_item_ids_from_search(mocker, connection, items_w_uuids):
ids = ['a', 'b', 'c']
with mocker.patch('dcicutils.submit_utils.get_FDN', return_value=items_w_uuids):
result = scu.get_item_ids_from_args('search', connection, True)
for a in [i in ids for i in result]:
assert a
def test_get_item_uuid_w_uuid(connection):
uid = '7868f960-50ac-11e4-916c-0800200c9a66'
result = scu.get_item_uuid(uid, connection)
assert result == uid
def test_get_item_uuid_w_atid(mocker, connection):
atid = '/labs/test-lab'
with mocker.patch('dcicutils.submit_utils.get_FDN',
return_value={'uuid': 'test_uuid'}) as mt:
result = scu.get_item_uuid(atid, connection)
assert mt.called_with(atid, connection)
assert result == 'test_uuid'
def test_get_item_uuid_not_found(mocker, connection):
atid = '/labs/non-lab'
with mocker.patch('dcicutils.submit_utils.get_FDN',
return_value={'status': 'error'}) as mt:
result = scu.get_item_uuid(atid, connection)
assert mt.called_with(atid, connection)
assert result is None
| [
"andrew_schroeder@hms.harvard.edu"
] | andrew_schroeder@hms.harvard.edu |
151195b8c833253e8d92ab996736cc4af7056b96 | eeebaeef745bd59ee2b5679d3755e95abd483484 | /POS_Management/settings.py | 5174e10e175eac5779be7c58b9d98f3ffb4dfffc | [] | no_license | chandanbcsm012/Point-Of-Sale-POS | f478444dd97a23f4f71b43b86411881a2a11a6a3 | 1f2fef8fd47a954b71d8d7dd33633bba15470383 | refs/heads/master | 2020-06-19T12:47:03.339999 | 2019-07-13T11:39:26 | 2019-07-13T11:39:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,013 | py | """
Django settings for POS_Management project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nu+sb#9e!crkcrrc!13%zo_8i_^6y0mn4swl-l71v7na!6d)3u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_bootstrap_breadcrumbs',
'view_breadcrumbs',
'django_countries',
'crispy_forms',
'widget_tweaks',
'customer',
'supplier',
'category',
'brand',
'product_type',
'product',
'purchase',
'purchase_product_details',
'sale',
'sale_product_details',
'tax_rate',
'bootstrap_select.apps.BootstrapSelectConfig',
'reports',
'mathfilters',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'user.middleware.LoginRequiredMiddleware',
]
ROOT_URLCONF = 'POS_Management.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'POS_Management.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'squarelogic$POS',
# 'USER': 'root',
# 'PASSWORD': 'Square@95',
# 'HOST': '127.0.0.1',
# 'PORT': '3306'
# }
# }
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'squarelogic$POS',
# 'USER': 'squarelogic',
# 'PASSWORD': 'mysqlsquare',
# 'HOST': 'squarelogic.mysql.pythonanywhere-services.com',
# }
# }
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'posdb',
# 'USER': 'postgres',
# 'PASSWORD': 'Square@95',
# 'HOST': '127.0.0.1',
# 'PORT': '5432',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_URL = '/user/login'
# STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [
STATIC_DIR,
]
| [
"chandanbcsm012@gmail.com"
] | chandanbcsm012@gmail.com |
ee9975574c4cd6f83d4d818c6b2d0c88f19ce127 | 76c331693361509655785ddcbed7b42805eef39c | /test_xtrace_parser.py | 97c47c5a751d8684aa16b41bacce4e58bb66233f | [] | no_license | palvaro/callgraph_parsing | 53b2a7e459cee7b71f7d33ee0618e667cb99a4fd | 094a89170663a9e625d92816291d0f760cfb81ce | refs/heads/master | 2022-05-24T14:01:42.332639 | 2022-05-17T19:22:54 | 2022-05-17T19:22:54 | 197,433,870 | 1 | 0 | null | 2020-05-10T17:35:45 | 2019-07-17T17:27:41 | Jupyter Notebook | UTF-8 | Python | false | false | 1,001 | py | import json
import pytest
from xtrace_parser import XTraceParser
@pytest.fixture
def testdir():
return "xtrace_data/"
def test_xtrace_edges(testdir):
fnames = ["hdfs_trace.json", "ds_trace.json"]
for name in fnames:
ip_fpath = testdir + name
trace = XTraceParser(ip_fpath)
trace.process()
f = open(ip_fpath, "r")
json_data = json.load(f)
f.close()
# Count the number of parent events that are also events in the trace
# to find the number of edges in the trace
trace_data = json_data[0]["reports"]
events = set(map(lambda x: x["EventID"], trace_data))
num_edges = 0
for span in trace_data:
if not "ParentEventID" in span:
continue
parents = set(span["ParentEventID"])
num_edges += len(parents.intersection(events))
# Check that the number of edges before and after processing match
assert(num_edges == len(trace.edges))
| [
"kamala.ramas@gmail.com"
] | kamala.ramas@gmail.com |
0d04bd3854dda5ce09a0ee3aa7f1f60626f35220 | 0d5e4ad0a693492204aa6210c2de470b26732509 | /commands/eztv_mininova.py | f03143d557ebdcff904fff885f927ad0d6d242bd | [] | no_license | enlavin/tvscrap | 7d4ffe16a5af9f1747c021a0cc6bd187a5b0c91e | 28d9baf1a2b2db4321b59747e85f1302f92f3a98 | refs/heads/master | 2020-04-29T10:20:45.150974 | 2015-04-26T18:11:26 | 2015-04-26T18:11:26 | 18,444,784 | 1 | 1 | null | 2015-04-28T20:24:58 | 2014-04-04T16:18:24 | Python | UTF-8 | Python | false | false | 2,225 | py | # -*- coding: utf-8 -*-
# GNU General Public Licence (GPL)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
try:
import feedparser
except ImportError:
print "feedparser support not installed. Try easy_install feedparser."
import sys
sys.exit(1)
import re
from optparse import OptionParser
from db import Show, Episode
from lib.feed_command import FeedCommand
EZTV_MININOVA_RSS="http://www.mininova.org/rss.xml?user=eztv"
class Command(FeedCommand):
def __init__(self, store):
super(Command, self).__init__(store)
self.rx_episode_size = re.compile(u'Size:\s+([0-9.]+)')
def _config_feed(self):
import feedparser
if getattr(self.options, "file"):
self.feed = feedparser.parse(self.options.file)
elif getattr(self.options, "url"):
self.feed = feedparser.parse(self.options.url)
else:
self.feed = feedparser.parse(EZTV_MININOVA_RSS)
if not self.feed["entries"]:
raise Exception()
def _iter_feed(self):
for entry in self.feed["entries"]:
try:
size = float(self.rx_episode_size.findall(entry["summary"])[0])
except IndexError:
print "File size not available. Skipping"
continue
except TypeError:
print "File size field corrupt. Skipping"
continue
yield {
"name": entry["title"],
"size": size,
"url_torrent": [entry['enclosures'][0]["href"]],
}
| [
"devnull@localhost"
] | devnull@localhost |
16f45f604368f5c17ae6948378384aecc986b6a4 | 37fece52e34ac15472fbfbb7d10b683d674e99a9 | /src/rotator/e2e/util/PoeticEdda.py | db64b3800ea12d2022150e289392065948ad3c77 | [] | no_license | bharathkeshavamurthy/Stormbreaker | 7aa8dcde48df66c15e56b66c29ae67b10a59250b | d28889c926de816f1e68f9a71c30467fbc4f63b9 | refs/heads/master | 2023-04-11T10:44:17.543345 | 2023-01-20T06:45:06 | 2023-01-20T06:45:06 | 480,673,653 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,383 | py | """
Poetic Edda : GPS TxRealm and/or RxRealm route (and/or signal power) visualizations using Bokeh and the Google Maps API
This Python script encapsulates the operations involved in the visualization of the routes traversed by the Tx and Rx
realm units -- on the Google Maps API [2D Maps | 3D Maps | Satellite | Hybrid | Earth | Roads].
Author: Bharath Keshavamurthy <bkeshava@purdue.edu | bkeshav1@asu.edu>
Organization: School of Electrical and Computer Engineering, Purdue University, West Lafayette, IN.
School of Electrical, Computer and Energy Engineering, Arizona State University, Tempe, AZ.
Copyright (c) 2021. All Rights Reserved.
"""
# Project Odin Route Post-Processing Engine (Codex Regius | Poetic Edda)
# The imports
import os
import json
import pandas
from bokeh.plotting import gmap
from bokeh.io import export_png
from bokeh.palettes import brewer
from dataclasses import make_dataclass
from bokeh.models import GMapOptions, ColumnDataSource, ColorBar, LinearColorMapper
"""
Data Object Setup
"""
gps_coordinates = list()
gps_coordinates_dataframe = None
gps_coordinate = make_dataclass('GPS_Coordinate', [('latitude', float), ('longitude', float)])
"""
TODO: Configurations-I | Input and Output File Locations
"""
visualization_mode = 'route' # Allowed modes = 'route', 'rx-power'
# Input directories and files
gps_log_files_dir = 'C:/Users/kesha/Workspaces/Odin/deployment/measurement-campaign/routes/gps-data/urban-stadium/'
# gps_log_files_dir = 'C:/Users/kesha/Workspaces/Odin/deployment/measurement-campaign/routes/gps-data/urban-campus-II/'
rx_power_matched_csv_file = 'urban-stadium-rx-power.csv'
# rx_power_matched_csv_file = 'urban-campus-II-rx-power.csv'
rx_power_matched_csv_dir = 'C:/Users/kesha/Workspaces/Odin/src/rotator/e2e/test/ArkAngel-VI/'
# Output directories and files
png_file_name = 'urban-stadium-route.png'
# png_file_name = 'urban-campus-II-route.png'
png_file_dir = 'C:/Users/kesha/Workspaces/Odin/src/rotator/e2e/test/ArkAngel-VI/'
"""
TODO: Configurations-II | Map Visualization Options
"""
map_type = 'hybrid' # Allowed types = 'satellite', 'terrain', 'hybrid', 'roadmap'
map_width, map_height, map_zoom_level, map_title = 3000, 5000, 20, 'Urban Stadium Route [Van]'
# map_width, map_height, map_zoom_level, map_title = 6300, 6300, 20, 'Urban Campus-II Route [Van]'
map_central = gps_coordinate(40.7640, -111.8479) # urban-stadium central <latitude, longitude> in degrees
# map_central = gps_coordinate(40.7640, -111.8515) # urban-campus-II central <latitude, longitude> in degrees
tx_location = gps_coordinate(40.766173670, -111.847939330) # <latitude, longitude> in degrees
tx_pin_size, tx_pin_alpha, tx_pin_color = 80, 1.0, 'red'
rx_pins_size, rx_pins_alpha, rx_pins_color = 30, 1.0, 'yellow'
color_palette, color_palette_index = 'RdYlGn', 11
# urban-campus-II
# color_bar_width, color_bar_height, color_bar_label_size, color_bar_orientation = 125, 6250, '125px', 'vertical'
# urban-stadium
color_bar_width, color_bar_height, color_bar_label_size, color_bar_orientation = 125, 3950, '125px', 'vertical'
color_bar_layout_location = 'right'
google_maps_api_key = 'AIzaSyDzb5CB4L9l42MyvSmzvaSZ3bnRINIjpUk'
png_file_export_timeout = 300 # In seconds [Selenium Requirements: <FireFox, GeckoDriver> | <Chromium, ChromeDriver>]
# Extraction: Read and Collect the JSON logs (GPS publishes/subscriptions corresponding to a certain realm) AND
# Collection: Create a Pandas Dataframe from a collection constituting the parsed GPS_Coordinate dataclass instances
if visualization_mode == 'route':
for gps_log_file in os.listdir(gps_log_files_dir):
with open(''.join([gps_log_files_dir, gps_log_file]), 'r') as gps_data:
gps_data_dict = json.loads(gps_data.read())
gps_coordinates.append(gps_coordinate(gps_data_dict['latitude']['component'],
gps_data_dict['longitude']['component']))
gps_coordinates_dataframe = pandas.DataFrame(gps_coordinates)
else:
gps_coordinates_dataframe = pandas.read_csv(''.join([rx_power_matched_csv_dir, rx_power_matched_csv_file]),
names=['latitude', 'longitude', 'rx-power'])
# Visualization: Google Maps rendition of the specified route OR received signal power levels along the specified route
gps_coordinates_dataframe.drop(gps_coordinates_dataframe[gps_coordinates_dataframe['longitude'] <= -111.85].index,
inplace=True) # Specific to urban-stadium to drop the west-side parking-lot
google_maps_options = GMapOptions(lat=map_central.latitude, lng=map_central.longitude,
map_type=map_type, zoom=map_zoom_level)
figure = gmap(google_maps_api_key, google_maps_options, title=map_title, width=map_width, height=map_height)
figure_tx_point = figure.diamond([tx_location.longitude], [tx_location.latitude],
size=tx_pin_size, alpha=tx_pin_alpha, color=tx_pin_color)
if visualization_mode == 'route':
figure_rx_points = figure.circle('longitude', 'latitude', size=rx_pins_size, alpha=rx_pins_alpha,
color=rx_pins_color, source=ColumnDataSource(gps_coordinates_dataframe))
else:
palette = brewer[color_palette][color_palette_index]
color_mapper = LinearColorMapper(palette=palette, low=gps_coordinates_dataframe['rx-power'].min(),
high=gps_coordinates_dataframe['rx-power'].max())
color_bar = ColorBar(color_mapper=color_mapper, width=color_bar_width, height=color_bar_height,
major_label_text_font_size=color_bar_label_size,
label_standoff=color_palette_index, orientation=color_bar_orientation)
figure_rx_points = figure.circle('longitude', 'latitude', size=rx_pins_size, alpha=rx_pins_alpha,
color={'field': 'rx-power', 'transform': color_mapper},
source=ColumnDataSource(gps_coordinates_dataframe))
figure.add_layout(color_bar, color_bar_layout_location)
# Output image file export
export_png(figure, filename=''.join([png_file_dir, png_file_name]), timeout=png_file_export_timeout)
# The End
| [
"bkeshav1@asu.edu"
] | bkeshav1@asu.edu |
3fcdfddc6d13051a9dca15b880b1b4b6fe496fbc | d88397be1c6a31985bc2283280e743fd3b988dd1 | /nncf/hw_config.py | 1167773db5e48ea9112bf8784a671aa0ad028ed1 | [
"Apache-2.0"
] | permissive | sshyran/openvino-nncf-pytorch | f5e09066a216fa786927937a91a0e6742f347660 | fd02652950cd803a36f5283f5a5df999bb45433b | refs/heads/develop | 2023-04-18T06:58:54.646669 | 2021-03-12T15:41:39 | 2021-03-12T15:41:39 | 347,374,166 | 0 | 0 | Apache-2.0 | 2023-04-03T23:52:21 | 2021-03-13T13:11:32 | null | UTF-8 | Python | false | false | 11,105 | py | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from enum import Enum
from pathlib import Path
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Type
import addict as ad
import jstyleson as json
import warnings
from nncf.common.os import safe_open
from nncf.config import product_dict
from nncf.definitions import HW_CONFIG_RELATIVE_DIR
from nncf.definitions import NNCF_PACKAGE_ROOT_DIR
from nncf.dynamic_graph.operator_metatypes import OPERATOR_METATYPES
from nncf.hw_config_op_names import HWConfigOpName
from nncf.quantization.layers import AsymmetricQuantizer
from nncf.quantization.layers import QuantizationMode
from nncf.quantization.layers import QuantizerConfig
from nncf.quantization.layers import SymmetricQuantizer
class HWConfigType(Enum):
CPU = 'CPU'
GPU = 'GPU'
VPU = 'VPU'
@staticmethod
def from_str(config_value: str) -> 'HWConfigType':
if config_value == HWConfigType.CPU.value:
return HWConfigType.CPU
if config_value == HWConfigType.GPU.value:
return HWConfigType.GPU
if config_value == HWConfigType.VPU.value:
return HWConfigType.VPU
raise RuntimeError("Unknown HW config type string")
HW_CONFIG_TYPE_TARGET_DEVICE_MAP = {
'ANY': HWConfigType.CPU.value,
'CPU': HWConfigType.CPU.value,
'VPU': HWConfigType.VPU.value,
'GPU': HWConfigType.GPU.value,
'TRIAL': None
}
def get_metatypes_by_hw_config_name(hw_config_name: HWConfigOpName) -> List['OperatorMetatype']:
retval = []
for op_meta in OPERATOR_METATYPES.registry_dict.values(): # type: OperatorMetatype
if hw_config_name in op_meta.hw_config_names:
retval.append(op_meta)
return retval
class HWConfig(list):
QUANTIZATION_ALGORITHM_NAME = "quantization"
ATTRIBUTES_NAME = "attributes"
SCALE_ATTRIBUTE_NAME = "scales"
UNIFIED_TYPE_NAME = "unified"
ADJUST_PADDING_ATTRIBUTE_NAME = "adjust_padding"
TYPE_TO_CONF_NAME_DICT = {
HWConfigType.CPU: "cpu.json",
HWConfigType.VPU: "vpu.json",
HWConfigType.GPU: "gpu.json"
}
def __init__(self):
super().__init__()
self.registered_algorithm_configs = {}
self.target_device = None
@staticmethod
def get_path_to_hw_config(hw_config_type: HWConfigType):
return '/'.join([NNCF_PACKAGE_ROOT_DIR, HW_CONFIG_RELATIVE_DIR,
HWConfig.TYPE_TO_CONF_NAME_DICT[hw_config_type]])
@classmethod
def from_dict(cls, dct: dict):
# pylint:disable=too-many-nested-blocks,too-many-branches
hw_config = cls()
hw_config.target_device = dct['target_device']
for algorithm_name, algorithm_configs in dct.get('config', {}).items():
hw_config.registered_algorithm_configs[algorithm_name] = {}
for algo_config_alias, algo_config in algorithm_configs.items():
for key, val in algo_config.items():
if not isinstance(val, list):
algo_config[key] = [val]
hw_config.registered_algorithm_configs[algorithm_name][algo_config_alias] = list(
product_dict(algo_config))
for op_dict in dct.get('operations', []):
for algorithm_name in op_dict:
if algorithm_name not in hw_config.registered_algorithm_configs:
continue
tmp_config = {}
for algo_and_op_specific_field_name, algorithm_configs in op_dict[algorithm_name].items():
if not isinstance(algorithm_configs, list):
algorithm_configs = [algorithm_configs]
tmp_config[algo_and_op_specific_field_name] = []
for algorithm_config in algorithm_configs:
if isinstance(algorithm_config, str): # Alias was supplied
tmp_config[algo_and_op_specific_field_name].extend(
hw_config.registered_algorithm_configs[algorithm_name][algorithm_config])
else:
for key, val in algorithm_config.items():
if not isinstance(val, list):
algorithm_config[key] = [val]
tmp_config[algo_and_op_specific_field_name].extend(list(product_dict(algorithm_config)))
op_dict[algorithm_name] = tmp_config
hw_config.append(ad.Dict(op_dict))
return hw_config
@classmethod
def from_json(cls, path):
file_path = Path(path).resolve()
with safe_open(file_path) as f:
json_config = json.load(f, object_pairs_hook=OrderedDict)
return HWConfig.from_dict(json_config)
@staticmethod
def get_quantization_mode_from_config_value(str_val: str):
if str_val == "symmetric":
return QuantizationMode.SYMMETRIC
if str_val == "asymmetric":
return QuantizationMode.ASYMMETRIC
raise RuntimeError("Invalid quantization type specified in HW config")
@staticmethod
def get_is_per_channel_from_config_value(str_val: str):
if str_val == "perchannel":
return True
if str_val == "pertensor":
return False
raise RuntimeError("Invalid quantization granularity specified in HW config")
@staticmethod
def get_qconf_from_hw_config_subdict(quantization_subdict: Dict):
bits = quantization_subdict["bits"]
mode = HWConfig.get_quantization_mode_from_config_value(quantization_subdict["mode"])
is_per_channel = HWConfig.get_is_per_channel_from_config_value(quantization_subdict["granularity"])
signedness_to_force = None
if 'level_low' in quantization_subdict and 'level_high' in quantization_subdict:
signedness_to_force = False
if mode == QuantizationMode.SYMMETRIC:
if quantization_subdict['level_low'] < 0 < quantization_subdict['level_high']:
signedness_to_force = True
true_level_low, true_level_high, _ = SymmetricQuantizer.calculate_level_ranges(bits, True)
else:
signedness_to_force = True
true_level_low, true_level_high, _ = AsymmetricQuantizer.calculate_level_ranges(bits)
assert quantization_subdict['level_low'] == true_level_low, \
"Invalid value of quantizer parameter `level_low`.\
The parameter must be consistent with other parameters!"
assert quantization_subdict['level_high'] == true_level_high, \
"Invalid value of quantizer parameter `level_high`.\
The parameter must be consistent with other parameters!"
return QuantizerConfig(num_bits=bits,
mode=mode,
per_channel=is_per_channel,
signedness_to_force=signedness_to_force)
@staticmethod
def is_qconf_list_corresponding_to_unspecified_op(qconf_list: Optional[List[QuantizerConfig]]):
return qconf_list is None
@staticmethod
def is_wildcard_quantization(qconf_list: Optional[List[QuantizerConfig]]):
# Corresponds to an op itself being specified in the HW config, but having no associated quantization
# configs specified
return qconf_list is not None and len(qconf_list) == 0
def get_metatype_vs_quantizer_configs_map(self, for_weights=False) -> Dict[Type['OperatorMetatype'],
Optional[List[QuantizerConfig]]]:
# 'None' for ops unspecified in HW config, empty list for wildcard quantization ops
retval = {k: None for k in OPERATOR_METATYPES.registry_dict.values()}
config_key = "weights" if for_weights else "activations"
for op_dict in self:
hw_config_op_name = op_dict.type # type: HWConfigOpName
metatypes = get_metatypes_by_hw_config_name(hw_config_op_name)
if not metatypes:
warnings.warn("Operation name {} in HW config is not registered in NNCF under any supported operation "
"metatype - will be ignored".format(hw_config_op_name))
if self.QUANTIZATION_ALGORITHM_NAME in op_dict:
allowed_qconfs = op_dict[self.QUANTIZATION_ALGORITHM_NAME][config_key]
else:
allowed_qconfs = []
qconf_list_with_possible_duplicates = []
for hw_config_qconf_dict in allowed_qconfs:
qconf_list_with_possible_duplicates.append(
self.get_qconf_from_hw_config_subdict(hw_config_qconf_dict))
qconf_list = list(OrderedDict.fromkeys(qconf_list_with_possible_duplicates))
for meta in metatypes:
retval[meta] = qconf_list
return retval
def _get_operations_with_attribute_values(self, attribute_name_per_its_value: Dict[str, Any]) -> \
Set[Type['OperatorMetatype']]:
result = set()
for op_dict in self:
if self.ATTRIBUTES_NAME not in op_dict:
continue
for attr_name, attr_value in attribute_name_per_its_value.items():
is_value_matched = op_dict[self.ATTRIBUTES_NAME][attr_name] == attr_value
is_attr_set = attr_name in op_dict[self.ATTRIBUTES_NAME]
if is_value_matched and is_attr_set:
hw_config_op_name = op_dict.type # type: HWConfigOpName
metatypes = get_metatypes_by_hw_config_name(hw_config_op_name)
if not metatypes:
warnings.warn(
"Operation name {} in HW config is not registered in NNCF under any supported "
"operation metatype - will be ignored".format(hw_config_op_name))
result.update(metatypes)
return result
def get_operations_with_unified_scales(self) -> Set[Type['OperatorMetatype']]:
return self._get_operations_with_attribute_values({self.SCALE_ATTRIBUTE_NAME: self.UNIFIED_TYPE_NAME})
def get_operations_with_adjusted_paddings(self) -> Set[Type['OperatorMetatype']]:
return self._get_operations_with_attribute_values({self.ADJUST_PADDING_ATTRIBUTE_NAME: True})
| [
"noreply@github.com"
] | sshyran.noreply@github.com |
a95071156b455721a03968795e2f8b317dfe27a2 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/compute/instances/set_scheduling.py | 978e800fe7a236c88bdf266e3ed098e87d9fc1bf | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 9,785 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for setting scheduling for virtual machine instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.instances import flags
from googlecloudsdk.command_lib.compute.sole_tenancy import flags as sole_tenancy_flags
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.core.util import times
@base.ReleaseTracks(base.ReleaseTrack.GA)
class SetSchedulingInstances(base.SilentCommand):
"""Set scheduling options for Compute Engine virtual machines.
*${command}* is used to update scheduling options for VM instances.
You can only call this method on a VM instance that is stopped
(a VM instance in a `TERMINATED` state).
"""
detailed_help = {
'EXAMPLES':
"""
To set instance to be terminated during maintenance, run:
$ {command} example-instance --maintenance-policy=TERMINATE --zone=us-central1-b
"""
}
_support_host_error_timeout_seconds = False
_support_local_ssd_recovery_timeout = True
_support_max_run_duration = False
@classmethod
def Args(cls, parser):
parser.add_argument(
'--restart-on-failure',
action=arg_parsers.StoreTrueFalseAction,
help="""\
The instances will be restarted if they are terminated by Compute
Engine. This does not affect terminations performed by the user.
This option is mutually exclusive with --preemptible.
""")
flags.AddPreemptibleVmArgs(parser, is_update=True)
flags.AddProvisioningModelVmArgs(parser)
flags.AddInstanceTerminationActionVmArgs(parser, is_update=True)
flags.AddMaintenancePolicyArgs(parser)
sole_tenancy_flags.AddNodeAffinityFlagToParser(parser, is_update=True)
flags.INSTANCE_ARG.AddArgument(parser)
flags.AddMinNodeCpuArg(parser, is_update=True)
flags.AddLocalSsdRecoveryTimeoutArgs(parser)
def _Run(self, args):
"""Issues request necessary for setting scheduling options."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
instance_ref = flags.INSTANCE_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=flags.GetInstanceZoneScopeLister(client))
scheduling_options = client.messages.Scheduling()
scheduling_options.automaticRestart = args.restart_on_failure
if args.IsSpecified('preemptible'):
scheduling_options.preemptible = args.preemptible
if self._support_host_error_timeout_seconds and hasattr(
args, 'host_error_timeout_seconds'):
scheduling_options.hostErrorTimeoutSeconds = args.host_error_timeout_seconds
if self._support_local_ssd_recovery_timeout and hasattr(
args, 'local_ssd_recovery_timeout') and args.IsSpecified(
'local_ssd_recovery_timeout'):
scheduling_options.localSsdRecoveryTimeout = client.messages.Duration(
seconds=args.local_ssd_recovery_timeout)
if (hasattr(args, 'provisioning_model') and
args.IsSpecified('provisioning_model')):
scheduling_options.provisioningModel = (
client.messages.Scheduling.ProvisioningModelValueValuesEnum(
args.provisioning_model))
cleared_fields = []
if (hasattr(args, 'instance_termination_action') and
args.IsSpecified('instance_termination_action')):
flags.ValidateInstanceScheduling(args, self._support_max_run_duration)
scheduling_options.instanceTerminationAction = (
client.messages.Scheduling.InstanceTerminationActionValueValuesEnum(
args.instance_termination_action))
elif args.IsSpecified('clear_instance_termination_action'):
scheduling_options.instanceTerminationAction = None
cleared_fields.append('instanceTerminationAction')
if args.IsSpecified('min_node_cpu'):
scheduling_options.minNodeCpus = int(args.min_node_cpu)
elif args.IsSpecified('clear_min_node_cpu'):
scheduling_options.minNodeCpus = None
cleared_fields.append('minNodeCpus')
if args.IsSpecified('maintenance_policy'):
scheduling_options.onHostMaintenance = (
client.messages.Scheduling.OnHostMaintenanceValueValuesEnum(
args.maintenance_policy))
if hasattr(args, 'max_run_duration') and args.IsSpecified(
'max_run_duration'
):
scheduling_options.maxRunDuration = client.messages.Duration(
seconds=args.max_run_duration
)
elif hasattr(args, 'clear_max_run_duration') and args.IsSpecified(
'clear_max_run_duration'
):
scheduling_options.maxRunDuration = None
cleared_fields.append('maxRunDuration')
if hasattr(args, 'termination_time') and args.IsSpecified(
'termination_time'
):
scheduling_options.terminationTime = times.FormatDateTime(
args.termination_time
)
elif hasattr(args, 'clear_termination_time') and args.IsSpecified(
'clear_termination_time'
):
scheduling_options.terminationTime = None
cleared_fields.append('terminationTime')
if instance_utils.IsAnySpecified(args, 'node', 'node_affinity_file',
'node_group'):
affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, client.messages)
scheduling_options.nodeAffinities = affinities
elif args.IsSpecified('clear_node_affinities'):
scheduling_options.nodeAffinities = []
cleared_fields.append('nodeAffinities')
with holder.client.apitools_client.IncludeFields(cleared_fields):
request = client.messages.ComputeInstancesSetSchedulingRequest(
instance=instance_ref.Name(),
project=instance_ref.project,
scheduling=scheduling_options,
zone=instance_ref.zone)
return client.MakeRequests([(client.apitools_client.instances,
'SetScheduling', request)])
def Run(self, args):
return self._Run(args)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SetSchedulingInstancesBeta(SetSchedulingInstances):
"""Set scheduling options for Compute Engine virtual machines.
*${command}* is used to update scheduling options for VM instances.
You can only call this method on a VM instance that is stopped
(a VM instance in a `TERMINATED` state).
"""
_support_host_error_timeout_seconds = True
_support_max_run_duration = True
_support_local_ssd_recovery_timeout = True
@classmethod
def Args(cls, parser):
parser.add_argument(
'--restart-on-failure',
action=arg_parsers.StoreTrueFalseAction,
help="""\
The instances will be restarted if they are terminated by Compute
Engine. This does not affect terminations performed by the user.
This option is mutually exclusive with --preemptible.
""")
flags.AddPreemptibleVmArgs(parser, is_update=True)
flags.AddProvisioningModelVmArgs(parser)
flags.AddInstanceTerminationActionVmArgs(parser, is_update=True)
flags.AddMaintenancePolicyArgs(parser)
sole_tenancy_flags.AddNodeAffinityFlagToParser(parser, is_update=True)
flags.INSTANCE_ARG.AddArgument(parser)
flags.AddMinNodeCpuArg(parser, is_update=True)
flags.AddHostErrorTimeoutSecondsArgs(parser)
flags.AddMaxRunDurationVmArgs(parser, is_update=True)
flags.AddLocalSsdRecoveryTimeoutArgs(parser)
def Run(self, args):
return self._Run(args)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SetSchedulingInstancesAlpha(SetSchedulingInstancesBeta):
"""Set scheduling options for Compute Engine virtual machines.
*${command}* is used to update scheduling options for VM instances.
You can only call this method on a VM instance that is stopped
(a VM instance in a `TERMINATED` state).
"""
_support_host_error_timeout_seconds = True
_support_local_ssd_recovery_timeout = True
_support_max_run_duration = True
@classmethod
def Args(cls, parser):
parser.add_argument(
'--restart-on-failure',
action=arg_parsers.StoreTrueFalseAction,
help="""\
The instances will be restarted if they are terminated by Compute
Engine. This does not affect terminations performed by the user.
This option is mutually exclusive with --preemptible.
""")
flags.AddPreemptibleVmArgs(parser, is_update=True)
flags.AddProvisioningModelVmArgs(parser)
flags.AddInstanceTerminationActionVmArgs(parser, is_update=True)
# Deprecated in Alpha
flags.AddMaintenancePolicyArgs(parser, deprecate=True)
sole_tenancy_flags.AddNodeAffinityFlagToParser(parser, is_update=True)
flags.INSTANCE_ARG.AddArgument(parser)
flags.AddMinNodeCpuArg(parser, is_update=True)
flags.AddHostErrorTimeoutSecondsArgs(parser)
flags.AddLocalSsdRecoveryTimeoutArgs(parser)
flags.AddMaxRunDurationVmArgs(parser, is_update=True)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
ce9cb63e73460a78cd8200a2aa9493e151d347bf | defed253075482cbad31e455dbb5d1d3ada41bf0 | /stewart.py | 94c6b0b591365893e852486c35371c9ea7a4774f | [
"MIT"
] | permissive | victorkich/Stewart-Platform-Environment | 5e9c6f2853102ec58f181d58fa2f8d72fe4b6e92 | 527a579ecfc2f98679e2a44e6becce198dc9bf48 | refs/heads/master | 2022-12-26T02:41:40.325155 | 2020-10-10T16:07:06 | 2020-10-10T16:07:06 | 258,905,533 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,955 | py | from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d import axes3d
from matplotlib import pyplot as plt
from matplotlib import cm
import numpy as np
import math
import time
# Create a figure and axes for plotting
fig = plt.figure()
ax = plt.gca(projection='3d')
class environment():
''' Simulation environment with all its functions that will be used
'''
def __init__(self):
''' Initial constant setting
'''
self.w_atuador = 15
self.max_ganho = 5.0
self.z_joint_axes = 5.0
self.th_l = 2.1
self.th_e = 3.1
self.grande = 12.856
self.pequeno = 6.84
self.meio = 8.5285
self.actual_angles = np.array([0,0,0,0,0,0])
self.plate_cords = np.array(self.base())
self.joint_axes_cords = np.array(self.joint_axes())
def showPlot(self):
# Create an environment and call the plotting function animate
_ = FuncAnimation(fig, self.animate)
plt.show()
def base(self):
''' Calculates the hexagon corners
'''
base = [(-8.5285, 3.42, 0), (2.6, 9.848, 0),\
(8.5285, 6.428, 0), (8.5285, -6.428, 0),\
(2.6, -9.848, 0), (-8.5285, -3.42, 0),\
(-8.5285, 3.42, 0)]
return base
def joint_axes(self):
''' Calculates the coordinate points for the center of the servo axes.
'''
th_l = self.th_l
z_joint_axes = self.z_joint_axes
plate_cords = self.plate_cords
joint_axes_cords = []
joint_axes_cords.append((th_l*np.cos(math.radians(30))+plate_cords[0, 0], \
th_l*np.sin(math.radians(30))+plate_cords[0, 1], z_joint_axes))
joint_axes_cords.append((-th_l*np.cos(math.radians(30))+plate_cords[1, 0], \
-th_l*np.sin(math.radians(30))+plate_cords[1, 1], z_joint_axes))
joint_axes_cords.append((th_l*np.cos(math.radians(90))+plate_cords[2, 0], \
-th_l*np.sin(math.radians(90))+plate_cords[2, 1], z_joint_axes))
joint_axes_cords.append((th_l*np.cos(math.radians(90))+plate_cords[3, 0], \
th_l*np.sin(math.radians(90))+plate_cords[3, 1], z_joint_axes))
joint_axes_cords.append((-th_l*np.cos(math.radians(30))+plate_cords[4, 0], \
th_l*np.sin(math.radians(30))+plate_cords[4, 1], z_joint_axes))
joint_axes_cords.append((th_l*np.cos(math.radians(30))+plate_cords[5, 0], \
-th_l*np.sin(math.radians(30))+plate_cords[5, 1], z_joint_axes))
return joint_axes_cords
def actuator(self):
''' Calculates the start and end coordinates of all end_actuators.
'''
th_e = self.th_e
z_joint_axes = self.z_joint_axes
actual_angles = self.actual_angles
plate_cords = self.plate_cords
w_atuador = self.w_atuador
x, y, z = (self.joint_axes_cords[:, 0].copy(), self.joint_axes_cords[:, 1].copy(), self.joint_axes_cords[:, 2].copy())
z[0] = z_joint_axes + 5*np.sin(math.radians(actual_angles[0]))
x[0] = th_e*np.cos(math.radians(actual_angles[0]))*np.cos(math.radians(30)) + x[0]
y[0] = th_e*np.cos(math.radians(actual_angles[0]))*np.sin(math.radians(30)) + y[0]
z[1] = z_joint_axes + 5*np.sin(math.radians(actual_angles[1]))
x[1] = -th_e*np.cos(math.radians(actual_angles[1]))*np.cos(math.radians(30)) + x[1]
y[1] = -th_e*np.cos(math.radians(actual_angles[1]))*np.sin(math.radians(30)) + y[1]
z[2] = z_joint_axes + 5*np.sin(math.radians(actual_angles[2]))
x[2] = th_e*np.cos(math.radians(actual_angles[2]))*np.cos(math.radians(90)) + x[2]
y[2] = -th_e*np.cos(math.radians(actual_angles[2]))*np.sin(math.radians(90)) + y[2]
z[3] = z_joint_axes + 5*np.sin(math.radians(actual_angles[3]))
x[3] = th_e*np.cos(math.radians(actual_angles[3]))*np.cos(math.radians(90)) + x[3]
y[3] = th_e*np.cos(math.radians(actual_angles[3]))*np.sin(math.radians(90)) + y[3]
z[4] = z_joint_axes + 5*np.sin(math.radians(actual_angles[4]))
x[4] = -th_e*np.cos(math.radians(actual_angles[4]))*np.cos(math.radians(30)) + x[4]
y[4] = th_e*np.cos(math.radians(actual_angles[4]))*np.sin(math.radians(30)) + y[4]
z[5] = z_joint_axes + 5*np.sin(math.radians(actual_angles[5]))
x[5] = th_e*np.cos(math.radians(actual_angles[5]))*np.cos(math.radians(30)) + x[5]
y[5] = -th_e*np.cos(math.radians(actual_angles[5]))*np.sin(math.radians(30)) + y[5]
end = []
for i in range (6):
b = math.sqrt(abs(x[i]-plate_cords[i,0])**2+abs(y[i]-plate_cords[i,1])**2)
z_atuador = z[i] + math.sqrt(w_atuador**2-b**2)
end.append([plate_cords[i,0], plate_cords[i,1], z_atuador])
z_atuador = z[0] + math.sqrt(w_atuador**2-b**2)
end.append([plate_cords[0,0], plate_cords[0,1], z_atuador])
start = np.array([x, y, z])
end = np.array(end)
return start, end
def step(self, action, delay, fraction):
''' Performs all steps to achieve the desired state of actions,
starting from the point of the current angles.
'''
actual = self.actual_angles
target = action
space = np.linspace(actual, target, num=fraction)
for i in range(fraction):
for j in range(6):
self.actual_angles[j] = space[i,j]
time.sleep(delay)
def animate(self, i):
''' Plot animation loop
'''
plate_cords = self.plate_cords
joint_axes_cords = self.joint_axes_cords
start_actuators, end_actuators = self.actuator()
ax.clear()
ax.scatter3D(end_actuators[:,0], end_actuators[:,1], end_actuators[:,2], color='y',\
linestyle='-', linewidth=3, label='Vertices')
ax.plot3D(end_actuators[:,0], end_actuators[:,1], end_actuators[:,2], color='b',\
linestyle='-', linewidth=3, label='Edges')
ax.scatter3D(plate_cords[:, 0], plate_cords[:, 1], plate_cords[:, 2]+10, color='y',\
linestyle='-', linewidth=3)
ax.plot3D(plate_cords[:, 0], plate_cords[:, 1], plate_cords[:, 2]+10, color='b',\
linestyle='-', linewidth=3)
ax.scatter3D(plate_cords[:, 0], plate_cords[:, 1], plate_cords[:, 2], color='y',\
linestyle='-', linewidth=3)
ax.plot3D(plate_cords[:, 0], plate_cords[:, 1], plate_cords[:, 2], color='b',\
linestyle='-', linewidth=3)
for i in range(6):
ax.plot3D([plate_cords[i, 0], plate_cords[i, 0]], [plate_cords[i, 1], plate_cords[i, 1]],\
[plate_cords[i, 2], plate_cords[i, 2]+10], color='b', linestyle='-', linewidth=3)
ax.scatter3D(start_actuators[0, :], start_actuators[1, :], start_actuators[2, :], color='g', linestyle='-', linewidth=3)
ax.scatter3D(joint_axes_cords[:, 0], joint_axes_cords[:, 1], joint_axes_cords[:, 2], color='r', linestyle='-', linewidth=3)
for i in range (6):
haste_x = [start_actuators[0, i], plate_cords[i,0]]
haste_y = [start_actuators[1, i], plate_cords[i,1]]
haste_z = [start_actuators[2, i], end_actuators[i,2]]
ax.plot3D(haste_x, haste_y, haste_z, color='g', linestyle='-', linewidth=3)
eixo_x = [start_actuators[0, i], joint_axes_cords[i, 0]]
eixo_y = [start_actuators[1, i], joint_axes_cords[i, 1]]
eixo_z = [start_actuators[2, i], joint_axes_cords[i, 2]]
ax.plot3D(eixo_x, eixo_y, eixo_z, color='k', linestyle='-', linewidth=6)
ax.set_title('Stewart Platform', size=20)
ax.legend(loc=2, prop={'size':10})
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d([-25, 25])
ax.set_ylim3d([-25, 25])
ax.set_zlim3d([-3, 40])
| [
"victorkich@yahoo.com.br"
] | victorkich@yahoo.com.br |
8ec24f1b1554727d877bc3dc9f4884c8b5a7f4f7 | eacb726dfb05071fa65877f44960826fb4561af0 | /sqlshare_rest/test/api/permissions.py | 94d9a5e8461143e0a4f8c64a1239dc5ff412df2c | [
"Apache-2.0"
] | permissive | uw-it-aca/sqlshare-rest | 4d629cf13d058b2168c07ad69e451584bf63af49 | e441ce9286a915586a68a0bfa3105f122d6ae18f | refs/heads/master | 2020-04-06T06:30:45.900372 | 2019-09-13T17:32:43 | 2019-09-13T17:32:43 | 31,608,784 | 0 | 1 | Apache-2.0 | 2019-09-13T17:32:44 | 2015-03-03T16:33:59 | Python | UTF-8 | Python | false | false | 30,130 | py | from django.test import TestCase
from unittest2 import skipIf
from django.db import connection
from django.core import mail
import json
from testfixtures import LogCapture
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.test import missing_url
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from sqlshare_rest.test.api.base import BaseAPITest
from sqlshare_rest.dao.dataset import create_dataset_from_query, add_public_access
from sqlshare_rest.util.query_queue import process_queue
from sqlshare_rest.util.dataset_emails import send_new_emails
from sqlshare_rest.models import Query
from sqlshare_rest.util.db import is_sqlite3, is_mysql
from sqlshare_rest.models import Dataset, DatasetSharingEmail
@skipIf(missing_url("sqlshare_view_dataset_list"), "SQLShare REST URLs not configured")
@override_settings(MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',),
SQLSHARE_QUERY_CACHE_DB="test_ss_query_db"
)
class DatasetPermissionsAPITest(BaseAPITest):
def setUp(self):
super(DatasetPermissionsAPITest, self).setUp()
# Try to cleanup from any previous test runs...
self.remove_users = []
self.client = Client()
try:
cursor = connection.cursor()
cursor.execute("DROP DATABASE test_ss_query_db")
except Exception as ex:
pass
def test_unauthenticated(self):
url = reverse("sqlshare_view_dataset_permissions", kwargs={"owner":"foo", "name":"bar"})
response = self.client.get(url)
self.assertEquals(response.status_code, 403)
def test_accounts(self):
owner = "permissions_user1"
dataset_name = "ds1"
other_user1 = "permissions_user2"
other_user2 = "permissions_user3"
other_user3 = "permissions_user4"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
self.remove_users.append(other_user2)
self.remove_users.append(other_user3)
backend = get_backend()
backend.get_user(other_user1)
backend.get_user(other_user2)
backend.get_user(other_user3)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(other_user1)
user2_auth_headers = self.get_auth_header_for_username(other_user2)
user3_auth_headers = self.get_auth_header_for_username(other_user3)
# Test the default situation...
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user2_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user3_auth_headers)
self.assertEquals(response.status_code, 403)
# Test the default state of the permissions api...
with LogCapture() as l:
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], False)
self.assertEquals(data["accounts"], [])
self.assertEquals(data["emails"], [])
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'GET dataset permissions; owner: permissions_user1; name: ds1'))
# Test round 1 of changes...
new_data = { "accounts": [ other_user1, other_user2 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **user1_auth_headers)
self.assertEquals(response.status_code, 403)
with LogCapture() as l:
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: permissions_user1; name: ds1; set account: permissions_user2'))
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: permissions_user1; name: ds1; set account: permissions_user3'))
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["emails"], [])
accounts = data["accounts"]
lookup = {}
for account in accounts:
lookup[account["login"]] = account
self.assertTrue(other_user1 in lookup)
self.assertTrue(other_user2 in lookup)
self.assertFalse(other_user3 in lookup)
self.assertEquals(lookup[other_user1]["login"], other_user1)
self.assertEquals(lookup[other_user2]["login"], other_user2)
# Make sure they can get the dataset...
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_shared"], True)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(permissions_url, **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user2_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(url, **user3_auth_headers)
self.assertEquals(response.status_code, 403)
# Test round 2 of changes... add a new user, drop a user
new_data = { "accounts": [ other_user3, other_user2 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["emails"], [])
accounts = data["accounts"]
lookup = {}
for account in accounts:
lookup[account["login"]] = account
self.assertTrue(other_user3 in lookup)
self.assertTrue(other_user2 in lookup)
self.assertFalse(other_user1 in lookup)
self.assertEquals(lookup[other_user3]["login"], other_user3)
self.assertEquals(lookup[other_user2]["login"], other_user2)
# Make sure they can get the dataset...
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_shared"], True)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user2_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(url, **user3_auth_headers)
self.assertEquals(response.status_code, 200)
# Test round 3 of changes... remove all acces
new_data = { "accounts": [] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], False)
self.assertEquals(data["emails"], [])
self.assertEquals(data["accounts"], [])
# Make sure they can get the dataset...
response = self.client.get(url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_shared"], False)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user2_auth_headers)
self.assertEquals(response.status_code, 403)
response = self.client.get(url, **user3_auth_headers)
self.assertEquals(response.status_code, 403)
def test_emails(self):
owner = "email_permissions_user2"
dataset_name = "ds2"
self.remove_users.append(owner)
owner_auth_headers = self.get_auth_header_for_username(owner)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
# Test the default state of the permissions api...
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], False)
self.assertEquals(data["accounts"], [])
self.assertEquals(data["emails"], [])
# Add 2 emails:
new_data = { "emails": [ "user1@example.com", "user2@example.com" ] }
with LogCapture() as l:
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: email_permissions_user2; name: ds2'))
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: email_permissions_user2; name: ds2; set email: user1@example.com'))
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions; owner: email_permissions_user2; name: ds2; set email: user2@example.com'))
self.assertTrue(self._has_log(l, owner, None, 'sqlshare_rest.views.dataset_permissions', 'INFO', 'PUT dataset permissions finished; owner: email_permissions_user2; name: ds2'))
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["accounts"], [])
emails = data["emails"]
lookup = {}
for email in emails:
lookup[email] = True
self.assertEquals(lookup, { "user1@example.com": True, "user2@example.com": True })
# Change the 2 emails, keeping 1 the same...
new_data = { "emails": [ "user2@example.com", "user3@example.com" ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["accounts"], [])
emails = data["emails"]
lookup = {}
for email in emails:
lookup[email] = True
self.assertEquals(lookup, { "user2@example.com": True, "user3@example.com": True })
# Drop all emails...
new_data = { "emails": [] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], False)
self.assertEquals(data["accounts"], [])
self.assertEquals(data["emails"], [])
def test_send_emails(self):
owner = "email_permissions_user3"
dataset_name = "ds3"
self.remove_users.append(owner)
owner_obj = get_backend().get_user(owner)
owner_auth_headers = self.get_auth_header_for_username(owner)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
# Add 2 emails:
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
new_data = { "emails": [ "user1@example.com"] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
# empty out the memory outbox:
mail.outbox = []
# Now make sure we send 1 email
send_new_emails()
self.assertEquals(len(mail.outbox), 1)
obj = Dataset.objects.get(owner=owner_obj, name=dataset_name)
sharing = DatasetSharingEmail.objects.filter(dataset=obj)[0]
self.assertEquals(mail.outbox[0].to, ["user1@example.com"])
self.assertEquals(mail.outbox[0].from_email, "sqlshare-noreply@uw.edu")
self.assertTrue(mail.outbox[0].body.find(sharing.access_token) > 0)
new_data = { "emails": [ "user2@example.com"] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
# Make sure we send a new email
send_new_emails()
self.assertEquals(len(mail.outbox), 2)
new_data = { "emails": [ "user2@example.com", "user1@example.com"] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
# Make sure we send a replacement email for user1
send_new_emails()
self.assertEquals(len(mail.outbox), 3)
# Now make sure we don't send any more emails:
send_new_emails()
self.assertEquals(len(mail.outbox), 3)
def test_preview_table_permissions(self):
# We need to process the preview query - purge any existing queries
# to make sure we process ours.
Query.objects.all().delete()
owner = "permissions_preview_user1"
dataset_name = "ds4"
other_user1 = "permissions_preview_user2"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
backend = get_backend()
backend.get_user(other_user1)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name})
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(other_user1)
query = Query.objects.all()[0]
remove_pk = query.pk
process_queue()
new_data = { "accounts": [ other_user1 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data"], [[1]])
def test_preview_table_permissions_pre_process(self):
# We need to process the preview query - purge any existing queries
# to make sure we process ours.
Query.objects.all().delete()
owner = "permissions_preview_user5"
dataset_name = "ds5"
other_user1 = "permissions_preview_user6"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
backend = get_backend()
backend.get_user(other_user1)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name})
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(other_user1)
new_data = { "accounts": [ other_user1 ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
# Test that we get a 200 while the preview is being built
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data_status"], "working")
query = Query.objects.all()[0]
remove_pk = query.pk
process_queue()
# Test that permission was added after the query is finished.
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data"], [[1]])
def test_preview_table_permissions_public(self):
# We need to process the preview query - purge any existing queries
# to make sure we process ours.
Query.objects.all().delete()
owner = "permissions_preview_user7"
dataset_name = "ds6"
other_user1 = "permissions_preview_user8"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
backend = get_backend()
backend.get_user(other_user1)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': owner,
'name': dataset_name})
owner_auth_headers = self.get_auth_header_for_username(owner)
user1_auth_headers = self.get_auth_header_for_username(other_user1)
add_public_access(ds1)
# Test that we get a 200 while the preview is being built
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data_status"], "working")
query = Query.objects.all()[0]
remove_pk = query.pk
process_queue()
# Test that permission was added after the query is finished.
response = self.client.get(url, **user1_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["sample_data"], [[1]])
def test_public_to_shared(self):
owner = "permissions_xpublic_user1"
other_user1 = "permissions_xpublic_user2"
dataset_name = "ds7"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
backend = get_backend()
backend.get_user(other_user1)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
add_public_access(ds1)
owner_auth_headers = self.get_auth_header_for_username(owner)
new_data = { "accounts": [ other_user1 ], "is_public": False }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
self.assertEquals(data["emails"], [])
self.assertEquals(data["accounts"], [{'login': 'permissions_xpublic_user2'}])
def test_sharing_tokens(self):
owner = "permissions_token_user1"
other = "permissions_token_taker"
other2 = "permissions_token_taker2"
dataset_name = "ds8"
self.remove_users.append(owner)
self.remove_users.append(other)
self.remove_users.append(other2)
backend = get_backend()
owner_obj = backend.get_user(owner)
backend.get_user(other)
backend.get_user(other2)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
owner_auth_headers = self.get_auth_header_for_username(owner)
other_auth_headers = self.get_auth_header_for_username(other)
other_auth_headers2 = self.get_auth_header_for_username(other2)
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
new_data = { "emails": [ "test_user1@example.com" ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
obj = Dataset.objects.get(owner=owner_obj, name=dataset_name)
sharing = DatasetSharingEmail.objects.filter(dataset=obj)[0]
email = sharing.email
access_token1 = sharing.access_token
self.assertEquals(email.email, "test_user1@example.com")
# Clear the emails, then put the same one back - make sure we get a
# different token
new_data = { "emails": [] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
obj = Dataset.objects.get(owner=owner_obj, name=dataset_name)
self.assertEquals(len(DatasetSharingEmail.objects.filter(dataset=obj)), 0)
new_data = { "emails": [ "test_user1@example.com" ] }
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
obj = Dataset.objects.get(owner=owner_obj, name=dataset_name)
sharing = DatasetSharingEmail.objects.filter(dataset=obj)[0]
email = sharing.email
self.assertEquals(email.email, "test_user1@example.com")
access_token2 = sharing.access_token
self.assertNotEqual(access_token1, access_token2)
# Make sure that token 1 doesn't give access
token1_url = reverse("sqlshare_token_access", kwargs={"token": access_token1})
response = self.client.post(token1_url, data={}, **other_auth_headers)
self.assertEquals(response.status_code, 404)
# Make sure that token 2 does give access
token2_url = reverse("sqlshare_token_access", kwargs={"token": access_token2})
response = self.client.post(token2_url, data={}, **other_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["owner"], "permissions_token_user1")
self.assertEquals(data["name"], "ds8")
# the token is reusable - if someone emails a mailing list, say:
response = self.client.post(token2_url, data={}, **other_auth_headers2)
self.assertEquals(response.status_code, 200)
# Make sure if we try to add the user a second time, nothing weird happens
token2_url = reverse("sqlshare_token_access", kwargs={"token": access_token2})
response = self.client.post(token2_url, data={}, **other_auth_headers)
self.assertEquals(response.status_code, 200)
# Make sure that if we add the owner this way, they don't end up in the list
token2_url = reverse("sqlshare_token_access", kwargs={"token": access_token2})
response = self.client.post(token2_url, data={}, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
# Now, make sure the email is still in the permissions api document,
# But also the 2 new users.
response = self.client.get(permissions_url, **owner_auth_headers)
data = json.loads(response.content.decode("utf-8"))
accounts = list(map(lambda x: x["login"], data["accounts"]))
self.assertEquals(len(accounts), 2)
self.assertTrue(other in accounts)
self.assertTrue(other2 in accounts)
emails = data["emails"]
self.assertEquals(emails, ["test_user1@example.com"])
def test_flat_auth_list(self):
owner = "permissions_flat_user1"
dataset_name = "ds_flat1"
other_user1 = "permissions_flat_user2"
other_user2 = "permissions_flat_user3"
self.remove_users.append(owner)
self.remove_users.append(other_user1)
self.remove_users.append(other_user2)
backend = get_backend()
backend.get_user(other_user1)
backend.get_user(other_user2)
ds1 = create_dataset_from_query(owner, dataset_name, "SELECT(1)")
permissions_url = reverse("sqlshare_view_dataset_permissions", kwargs={'owner':owner, 'name':dataset_name})
new_data = { "authlist": [ other_user1, other_user2, "test@example.com", "not_email_but_whatever"] }
owner_auth_headers = self.get_auth_header_for_username(owner)
response = self.client.put(permissions_url, data=json.dumps(new_data), **owner_auth_headers)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content.decode("utf-8"), "")
response = self.client.get(permissions_url, **owner_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["is_public"], False)
self.assertEquals(data["is_shared"], True)
accounts = data["accounts"]
lookup = {}
for account in accounts:
lookup[account["login"]] = True
self.assertEquals(lookup, { "permissions_flat_user2": True, "permissions_flat_user3": True })
lookup = {}
emails = data["emails"]
for email in emails:
lookup[email] = True
self.assertEquals(lookup, { "test@example.com": True, "not_email_but_whatever": True })
# empty out the memory outbox:
mail.outbox = []
# Now make sure we send 1 email
send_new_emails()
# empty out the memory outbox:
mail.outbox = []
@classmethod
def setUpClass(cls):
super(DatasetPermissionsAPITest, cls).setUpClass()
def _run_query(sql):
cursor = connection.cursor()
try:
cursor.execute(sql)
except Exception as ex:
# Hopefully all of these will fail, so ignore the failures
pass
# This is just an embarrassing list of things to cleanup if something fails.
# It gets added to when something like this blocks one of my test runs...
_run_query("drop login permissions_preview_user8")
_run_query("drop login permissions_preview_user2")
_run_query("drop login permissions_preview_user5")
_run_query("drop login permissions_preview_user6")
_run_query("drop login permissions_preview_user7")
_run_query("drop login permissions_token_user1")
_run_query("drop login permissions_xpublic_user1")
_run_query("drop login permissions_user1")
_run_query("drop login email_permissions_user2")
| [
"pmichaud@uw.edu"
] | pmichaud@uw.edu |
9e99407c11dc865ab0eb7a0ee056e7e2a3ffb99d | 3ef7f119ca83ff17510628d4d16aad218fc41ace | /dogs/admin.py | 501c48c241fa8e13b62612650f761418813e248e | [] | no_license | JoshuaAaron/bit465-assignment5 | 301209e375ff97d7d84b3bbdd55d1be15e4e598e | fc76d59eb10a47c1ae7df66bf64d3baa38f564f9 | refs/heads/main | 2023-03-22T21:56:39.650866 | 2021-03-08T05:58:40 | 2021-03-08T05:58:40 | 345,541,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Dog
from .models import Breed
admin.site.register(Dog)
admin.site.register(Breed) | [
"joshuaaaronmartinez@gmail.com"
] | joshuaaaronmartinez@gmail.com |
c9649b9d01cfd93d34088a0de17a5a1d8962e59f | 81fff22868d03aba33233c845aefedf38eb24a0e | /hyq/analysis.py | 86a54504ff317e12a5369e1bbdd1ddf84434753b | [] | no_license | gurbain/hyq_ml | 6c38c581eb1dd49db3c9c3bcd2d9a44e54af94bd | b927407202cd1bff66192d1fa8659d47f00a6f2b | refs/heads/master | 2020-03-22T11:31:41.638503 | 2019-10-08T14:01:31 | 2019-10-08T14:01:31 | 139,977,011 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,061 | py | import ConfigParser
import itertools
import matplotlib
import numpy as np
import os
import pickle
from tqdm import tqdm
from hyq.picker import *
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
plt.rc('text', usetex=True)
plt.rc('axes', facecolor='white')
plt.rc('savefig', facecolor='white')
plt.rc('figure', autolayout=True)
EXP_FOLDER = "/home/gabs48/src/quadruped/hyq/hyq_ml/data/"
def get_cols(i):
if 'axes.prop_cycle' in plt.rcParams:
cols = [p['color'] for p in plt.rcParams['axes.prop_cycle']]
cols[2], cols[3] = cols[3], cols[2]
return cols[i % len(cols)]
def get_blue():
return get_cols(0)
def get_red():
return get_cols(1)
def get_green():
return get_cols(2)
def get_yellow():
return get_cols(3)
def get_gray():
return get_cols(4)
def get_purple():
return get_cols(5)
def get_lines(i):
lines = ["-", "--", ":", "-."]
return lines[i % len(lines)]
def get_config_items(c):
d = {}
for sect in c.sections():
for (key, val) in c.items(sect):
d[str(sect).lower() + "_" + str(key).lower()] = val
return d
def get_folder():
# Find plotting config
conf = None
f = os.path.join(EXP_FOLDER, "plot_conf.pkl")
if os.path.isfile(f):
conf = pickle.load(open(f, "rb"))
subdirs = [f for f in os.listdir(EXP_FOLDER) if os.path.isdir(os.path.join(EXP_FOLDER, f))]
if conf is not None:
subdirs_mask = [s == conf[0].split("/")[-1] for s in subdirs]
else:
subdirs_mask = [False for _ in subdirs]
f = Picker(title="Select the experiment folder to analyze "
"(only the first will be plotted)",
options=subdirs, init_options=subdirs_mask).getIndex()
return EXP_FOLDER + subdirs[f[0]], conf
def get_data(folder):
# Read the folder datas
metrics_data = []
config_data = []
for subdir in tqdm(os.listdir(folder)):
config_filename = os.path.join(os.path.join(folder, subdir), "config.txt")
physics_filename = os.path.join(os.path.join(folder, subdir), "metrics.pkl")
if os.path.isfile(config_filename) and os.path.isfile(physics_filename):
config = ConfigParser.ConfigParser()
config.read(config_filename)
config_data.append(get_config_items(config))
d = pickle.load(open(physics_filename, "rb"))
metrics_data.append(d)
# Find changing parameter in config
changing_config = []
i = 0
for a, b in tqdm(itertools.combinations(config_data, 2)):
if i > 300000:
break
for key, value in a.iteritems():
if key in b:
if a[key] != b[key]:
if key not in changing_config:
changing_config.append(key)
else:
print " === ERROR: All the configuration files of the experiment " \
" directories must have the same fields!"
return -1
i += 1
# Mix all in a big dictionary
data = metrics_data
for i, d in enumerate(data):
for key in changing_config:
c = config_data[i][key]
if c.isdigit():
d[key] = float(c)
else:
d[key] = str(c)
# Add missing fields and cleanup others
clean_data(data)
return data, changing_config
def get_foot_data(folder):
# Read the folder datas
foot_data = []
config_data = []
for subdir in tqdm(os.listdir(folder)):
config_filename = os.path.join(os.path.join(folder, subdir), "config.txt")
foot_filename = os.path.join(os.path.join(folder, subdir), "feet.pkl")
if os.path.isfile(config_filename) and os.path.isfile(foot_filename):
config = ConfigParser.ConfigParser()
config.read(config_filename)
config_data.append(get_config_items(config))
d = pickle.load(open(foot_filename, "rb"))
foot_data.append({"f1": d[0], "f2": d[1], "t_train": d[2], "t_cl": d[3], "t_test": d[4]})
# Find changing parameter in config
changing_config = []
i = 0
for a, b in tqdm(itertools.combinations(config_data, 2)):
if i > 300000:
break
for key, value in a.iteritems():
if key in b:
if a[key] != b[key]:
if key not in changing_config:
changing_config.append(key)
else:
print " === ERROR: All the configuration files of the experiment " \
" directories must have the same fields!"
return -1
i += 1
# Mix all in a big dictionary
data = foot_data
for i, d in enumerate(data):
for key in changing_config:
c = config_data[i][key]
if c.isdigit():
d[key] = float(c)
else:
d[key] = str(c)
return foot_data, changing_config
def save_conf(folder, field_x, field_y, field_z):
with open(os.path.join(os.path.dirname(folder), "plot_conf.pkl"), "wb") as f:
pickle.dump([folder, field_x, field_y, field_z], f, protocol=2)
def get_fields(data, config_fields, conf):
fields = sorted(data[0].keys())
if conf is not None:
f_x_mask = [f == conf[1] for f in config_fields]
f_y_mask = [f == conf[2] for f in fields]
else:
f_x_mask = [False for _ in config_fields]
f_y_mask = [False for _ in fields]
f_x = config_fields[Picker(title="Select the Graph X-Axis (only one choice)",
options=config_fields, init_options=f_x_mask).getIndex()[0]]
f_y_i = Picker(title="Select the Graph Y-Axis (only one choice)",
options=fields, init_options=f_y_mask).getIndex()
f_y = []
for i in f_y_i:
f_y.append(fields[i])
z_fields = ["yes", "no - average all"]
z_set = []
config_fields.remove(f_x)
for z in config_fields:
z_set.append(sorted(list(set([d[z] for d in data]))))
for x in itertools.product(*z_set):
opt = "no - select "
for i, z in enumerate(x):
opt += config_fields[i] + "=" + str(z) + " "
z_fields.append(opt)
if conf is not None:
f_z_mask = [f == conf[3] for f in z_fields]
else:
f_z_mask = [False for _ in z_fields]
f_z_i = Picker(title="Do you want to plot multiple graphs?",
options=z_fields, init_options=f_z_mask).getIndex()[0]
if f_z_i == 0:
f_z = config_fields[Picker(title="Select the Graph Z field",
options=config_fields,
init_options=[False for _ in config_fields]).getIndex()[0]]
else:
f_z = z_fields[f_z_i]
return f_x, f_y, f_z
def clean_data(data):
print len(data)
#data = [d for d in data if len(d) == 115]
print len(data)
for d in data:
print len(d)
d["test_grf_steps"] = (d["test_lh_grf_steps"] + d["test_lf_grf_steps"] +
d["test_rf_grf_steps"] + d["test_rh_grf_steps"]) / 4
d["test_grf_step_len"] = (d["test_lh_grf_step_len"] + d["test_lf_grf_step_len"] +
d["test_rf_grf_step_len"] + d["test_rh_grf_step_len"]) / 4
d["cl_grf_steps"] = (d["cl_lh_grf_steps"] + d["cl_lf_grf_steps"] +
d["cl_rf_grf_steps"] + d["cl_rh_grf_steps"]) / 4
d["cl_grf_step_len"] = (d["cl_lh_grf_step_len"] + d["cl_lf_grf_step_len"] +
d["cl_rf_grf_step_len"] + d["cl_rh_grf_step_len"]) / 4
d["train_grf_steps"] = (d["train_lh_grf_steps"] + d["train_lf_grf_steps"] +
d["train_rf_grf_steps"] + d["train_rh_grf_steps"]) / 4
d["train_grf_step_len"] = (d["train_lh_grf_step_len"] + d["train_lf_grf_step_len"] +
d["train_rf_grf_step_len"] + d["train_rh_grf_step_len"]) / 4
for k in d.keys():
if "y_speed" in k:
d[k] = abs(d[k])
if "roll_range" in k or "pitch_range" in k:
d[k] = float(d[k]) % (2 * np.pi)
if "physics_init_impedance" == k:
imp = eval(d["physics_init_impedance"])
if imp is None:
d["physics_kp"] = np.nan
d["physics_kd"] = np.nan
else:
d["physics_kp"] = (imp[2] + imp[4]) / 2
d["physics_kd"] = (imp[3] + imp[5]) / 2
if "entropy" in k:
for i, name in enumerate(["perm", "svd", "app", "sample", "spectral"]):
d[k + "_" + name] = d[k][i]
if "snr_actuators" in k:
d[k + "_" + "mean_std"] = d[k][0]
d[k + "_" + "tgt_pred"] = 0.3 / d[k][1]
d["diff_dist"] = abs(d["test_dist"] - d["train_dist"])
d["diff_speed"] = abs(d["test_speed"] - d["train_speed"])
d["diff_nrmse"] = abs(d["test_nrmse"] - d["train_nrmse"])
d["diff_x_speed"] = abs(d["test_x_speed"] - d["train_x_speed"])
d["diff_y_speed"] = abs(d["test_y_speed"] - d["train_y_speed"])
d["diff_power"] = abs(d["test_power"] - d["train_power"])
d["diff_COT"] = abs(d["test_power"] - d["train_power"])
d["diff_grf_step_len"] = abs(d["test_grf_step_len"] - d["train_grf_step_len"])
d["diff_grf_steps"] = abs(d["test_grf_steps"] - d["train_grf_steps"])
d["diff_grf_max"] = abs(d["test_grf_max"] - d["train_grf_max"])
d["diff_z_range"] = abs(d["test_z_range"] - d["train_z_range"])
d["diff_pitch_range"] = abs(d["test_pitch_range"] - d["train_pitch_range"])
d["diff_roll_range"] = abs(d["test_roll_range"] - d["train_roll_range"])
d["test_stability"] = d["test_z_range"] + 0.5*np.tan(min(0.8, d["test_pitch_range"])) + 0.25*np.tan(min(0.8, d["test_roll_range"]))
d["train_stability"] = d["train_z_range"] + 0.5*np.tan(min(0.8, d["train_pitch_range"])) + 0.25*np.tan(min(0.8, d["train_roll_range"]))
d["cl_stability"] = d["cl_z_range"] + 0.5*np.tan(min(0.8, d["cl_pitch_range"])) + 0.25*np.tan(min(0.8, d["cl_roll_range"]))
def get_graph_data(data, field_x, field_y, field_z):
if field_z != "No Field":
x_list = [d[field_x] for d in data]
x_set = sorted(list(set(x_list)))
z_list = [d[field_z] for d in data]
z_set = sorted(list(set(z_list)))
n_sample = max(max([x_list.count(e) for e in x_set]),
max([z_list.count(e) for e in z_set]))
y_val = np.empty((len(x_set), len(z_set), n_sample))
y_val[:, :, :] = np.nan
sampling_index = np.zeros((len(x_set), len(z_set)), dtype=np.int8)
for d in data:
x_index = x_set.index(d[field_x])
z_index = z_set.index(d[field_z])
y_val[x_index, z_index, sampling_index[x_index, z_index]] = d[field_y]
sampling_index[x_index, z_index] += 1
y_av = np.nanmean(y_val, axis=2)
y_std = np.nanstd(y_val, axis=2)
return np.array(x_set), y_av, y_std, z_set
else:
x_list = [d[field_x] for d in data]
x_set = sorted(list(set(x_list)))
n_sample = max([x_list.count(e) for e in x_set])
y_val = np.empty((len(x_set), n_sample))
y_val[:, :] = np.nan
sampling_index = np.zeros((len(x_set)), dtype=np.int8)
for d in data:
x_index = x_set.index(d[field_x])
y_val[x_index, sampling_index[x_index]] = d[field_y]
sampling_index[x_index] += 1
# Filter out the unconsitent values found via FFT peaks
if field_y == "train_grf_steps" or field_y == "test_grf_steps" or field_y == "cl_grf_steps":
y_val[y_val > 70] = np.nan
if field_y == "train_grf_step_len" or field_y == "test_grf_step_len" or field_y == "cl_grf_step_len":
y_val[y_val < 0.01] = np.nan
y_av = np.nanmean(y_val, axis=1)
y_std = np.nanstd(y_val, axis=1)
return np.array(x_set), y_av, y_std
def plot_graph(graph_data, field_x, field_y, field_z):
x_scale = "linear"
if 100 * (graph_data[0][1] - graph_data[0][0]) <= graph_data[0][-1] - graph_data[0][-2]:
x_scale = "log"
if z != "No Field":
plt.figure(figsize=(10, 8), dpi=80)
for j in range(len(graph_data[3])):
plt.plot(graph_data[0], graph_data[1][:, j], linestyle=get_lines(j),
linewidth=2, color=get_cols(j),
label=str(field_z).replace("_", " ") + " = " +
str(graph_data[3][j]))
plt.fill_between(graph_data[0],
graph_data[1][:, j] - graph_data[2][:, j]/5.0,
graph_data[1][:, j] + graph_data[2][:, j]/5.0,
alpha=0.1, edgecolor=get_cols(j), facecolor=get_cols(j))
plt.title((str(field_y) + " depending on " + str(field_x) +
" with different " + str(field_z)).replace("_", " "))
plt.xscale(x_scale)
plt.legend()
plt.show()
else:
plt.figure(figsize=(10, 8), dpi=80)
plt.plot(graph_data[0], graph_data[1], linewidth=2)
plt.fill_between(graph_data[0], graph_data[1] - graph_data[2]/5.0,
graph_data[1] + graph_data[2]/5.0, alpha=0.1)
plt.title((str(field_y) + " depending on " +
str(field_x)).replace("_", " "))
plt.xscale(x_scale)
plt.legend()
plt.show()
if __name__ == "__main__":
folder, default_conf = get_folder()
data, data_config_fields = get_data(folder)
field_x, fields_y, field_z = get_fields(data, data_config_fields, default_conf)
save_conf(folder, field_x, fields_y, field_z)
graph_data = get_graph_data(data, field_x, fields_y, field_z)
plot_graph(graph_data, field_x, fields_y, field_z) | [
"gabrielurbain@gmail.com"
] | gabrielurbain@gmail.com |
92d43bb06a0698e103b176f73dfc4ffb25eff32b | fe9b3ca439596fe29efea098d61fd72bb8322f8c | /forcast/forcast/wsgi.py | ab5d39f9bd1a945834de9d268e6c0d29d8bf0c51 | [] | no_license | rakshit12/weather-app | 356ce9f648b8bd6c5e878073b62f2b126172900a | e69730fb4925ee916f271e76441d25eeef7132ff | refs/heads/main | 2023-02-14T17:08:06.657888 | 2021-01-04T16:59:11 | 2021-01-04T16:59:11 | 326,750,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for forcast project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'forcast.settings')
application = get_wsgi_application()
| [
"rakshitsaxena119@.com"
] | rakshitsaxena119@.com |
6472cba4c7ef3836b8f8cb5885e62a14c23ebc7a | e03c6b1f7b7bf895a24774d92d172ba487df1043 | /Coco/Capacitacion Analisis/utils/__init__.py | ec0dd83964a95b78f6a3aab4ccf4c561c858f498 | [] | no_license | CocoUrbina/Python | a3cec6b7d5f3387c5c9d72bf6ddafd7eb5994eb3 | f5ad88715eb06cc56911aafb42a00e609699b88f | refs/heads/main | 2023-03-05T22:19:52.446674 | 2021-02-18T02:36:44 | 2021-02-18T02:36:44 | 329,175,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | # coding: utf8
# Copyright: MathDecision | [
"Pmontenegro@coopenae.fi.cr"
] | Pmontenegro@coopenae.fi.cr |
14eea9b9d76993c047f29422479048d98914da2e | 9bf3f83dbc66ff727df17d8883eea1f492782eef | /knapi/theapi/models.py | d34df3fe49e0991ab950d4c762bed31c8e04b8f4 | [] | no_license | lee-van-oetz/knapi | 2db1153e9992c341a8d547c8f46cd00b063659b3 | 39b60ddb4be3f7c9df5824cdb7c949994b302f54 | refs/heads/master | 2021-01-23T04:10:08.190376 | 2017-03-27T05:38:57 | 2017-03-27T05:38:57 | 86,168,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,718 | py | from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
import zlib
##########################
# Create your models here.
##########################
class KnapsackProblem(models.Model):
"""Specific knapsack problem model"""
created = models.DateTimeField(auto_now_add=True)
finished = models.DateTimeField(null=True)
task_json = models.TextField()
task_hash = models.CharField(max_length=8)
in_knapsack_json = models.TextField()
total_value = models.IntegerField(default=0)
total_weight = models.IntegerField(default=0)
@property
def seconds_took(self):
if self.finished:
return (self.finished - self.created).total_seconds()
else:
return 'Still running'
@property
def state(self):
return 'FINISHED' if self.finished else 'RUNNING'
class Meta:
ordering = ('created',)
class KnapsackProblemRequest(models.Model):
created = models.DateTimeField(auto_now_add=True)
#created.editable = True
knapsack_problem = models.ForeignKey(KnapsackProblem)
num_items = models.IntegerField()
capacity = models.IntegerField()
items = models.TextField()
@property
def time_elapsed(self):
if self.knapsack_problem.finished:
# solution was fetched instantly
if self.knapsack_problem.finished < self.created:
return 0
else:
return max(self.knapsack_problem.seconds_took, (self.knapsack_problem.finished - self.created).total_seconds())
else:
return (timezone.now() - self.created).total_seconds()
class Meta:
ordering = ('created',)
| [
"jindrich.prokop@gmail.com"
] | jindrich.prokop@gmail.com |
0d655252b153700d41ac2c4ff37a319441630edc | 975dfd26c369f2f1bc57ddd0afa399a226a02525 | /PyBall/models/draft/home.py | 76d7d4e36055c6c8cf13424a9e73c09703f2a2b8 | [
"MIT"
] | permissive | a-hacker/PyBall | cc18ca36e2707a7a8c531b6c99a3017c687b6ca5 | ed88b28dceddf4c8b9f1370d931e4cfa74ce5fda | refs/heads/master | 2021-04-26T23:31:35.211559 | 2018-03-23T17:58:08 | 2018-03-23T17:58:08 | 124,009,734 | 0 | 0 | MIT | 2018-03-23T17:58:09 | 2018-03-06T02:39:38 | Python | UTF-8 | Python | false | false | 267 | py | from PyBall.models import BaseModel
class Home(BaseModel):
_fields = {
'city': {'default_value': None, 'field_type': str},
'country': {'default_value': None, 'field_type': str},
'state': {'default_value': None, 'field_type': str},
}
| [
"bradley.hurley@gmail.com"
] | bradley.hurley@gmail.com |
0bcde7ba149facae00a9e3c782315ca4ab3be275 | 637d2b471ab26a683cf67b259c58d2f9318a1bf2 | /McUtils/Coordinerds/CoordinateSystems/ZMatrixToCartesian.py | 790c2e356f7ad998828695adb0844d7da096249b | [
"MIT"
] | permissive | McCoyGroup/McUtils | 9c1c3befcef88d6094961e23a894efb4d97c84b1 | c7c3910e7cb5105c65b01ecb17a6668d126b2063 | refs/heads/master | 2023-08-18T16:24:17.718849 | 2023-08-11T23:10:21 | 2023-08-11T23:10:21 | 188,920,933 | 0 | 2 | MIT | 2022-12-15T19:17:02 | 2019-05-27T23:29:24 | Python | UTF-8 | Python | false | false | 8,885 | py | from .CoordinateSystemConverter import CoordinateSystemConverter
from .CommonCoordinateSystems import CartesianCoordinates3D, ZMatrixCoordinates
from ...Numputils import *
import numpy as np
class ZMatrixToCartesianConverter(CoordinateSystemConverter):
"""
A converter class for going from ZMatrix coordinates to Cartesian coordinates
"""
@property
def types(self):
return (ZMatrixCoordinates, CartesianCoordinates3D)
def default_ordering(self, coordlist):
if coordlist.shape[-1] == 6:
ordering = coordlist[:, :, (0, 2, 4)]
coordlist = coordlist[:, :, (1, 3, 5)]
else:
r = np.arange(len(coordlist[0]))
ordering = np.broadcast_to(
np.array([r, np.roll(r, 1), np.roll(r, 2)]).T[np.newaxis],
coordlist.shape[:2] + (3,)
)
return ordering, coordlist
def convert_many(self,
coordlist,
ordering=None, origins=None, axes=None, use_rad=True,
return_derivs=False,
**kw
):
"""Expects to get a list of configurations
These will look like:
[
[dist, angle, dihedral]
...
]
and ordering will be
[
[pos, point, line, plane]
...
]
**For efficiency it is assumed that all configurations have the same length**
:param coordlist:
:type coordlist:
:param origins:
:type origins:
:param axes:
:type axes:
:param use_rad:
:type use_rad:
:param kw:
:type kw:
:param ordering:
:type ordering:
:param return_derivs:
:type return_derivs:
:return:
:rtype:
"""
# make sure we have the ordering stuff in hand
if ordering is None:
ordering, coordlist = self.default_ordering(coordlist)
else:
ordering = np.array(ordering)
coordlist = np.asarray(coordlist)
if np.min(ordering) > 0:
ordering = ordering - 1
dim_diff = coordlist.ndim - ordering.ndim
if dim_diff > 0:
missing = coordlist.shape[:dim_diff]
ordering = np.broadcast_to(ordering, missing + ordering.shape )
if ordering.shape[-1] > 3:
atom_ordering = ordering[:, :, 0]
ordering = ordering[:, 1:, 1:]
else:
atom_ordering = None
sysnum = len(coordlist)
coordnum = len(coordlist[0])
total_points = np.empty((sysnum, coordnum+1, 3))
if return_derivs is not True and return_derivs is not False and isinstance(return_derivs, int):
return_derivs = True
return_deriv_order = return_derivs
elif return_derivs:
return_deriv_order = 2
if return_derivs:
derivs = [
None, # no need to stoare a copy of total_points here...
np.zeros((sysnum, coordnum, 3, coordnum + 1, 3)),
np.zeros((sysnum, coordnum, 3, coordnum, 3, coordnum + 1, 3))
]
# first we put the origin whereever the origins are specified
if origins is None:
origins = [0, 0, 0]
origins = np.asarray(origins)
if len(origins.shape) < 2:
origins = np.broadcast_to(origins, (sysnum, 3))
total_points[:, 0] = origins
# set up the next points by just setting them along the x-axis by default
if axes is None:
axes = [1, 0, 0]
axes = np.asarray(axes)
if axes.ndim == 1:
axes = np.array([
axes,
[0, 1, 0]
]) # np.concatenate((np.random.uniform(low=.5, high=1, size=(2,)), np.zeros((1,)) ))])
if axes.ndim == 2:
axes = np.broadcast_to(axes[np.newaxis], (sysnum, 2, 3))
x_pts = origins + vec_normalize(axes[:, 0])
y_pts = origins + vec_normalize(axes[:, 1])
dists = coordlist[:, 0, 0]
if return_derivs:
der_stuff = cartesian_from_rad_derivatives(origins,
x_pts, y_pts, dists,
None, None,
0,
np.full((len(dists),), -1, dtype=int),
np.full((len(dists),), -1, dtype=int),
np.full((len(dists),), -1, dtype=int),
derivs,
order=return_deriv_order
)
total_points[:, 1] = der_stuff[0]
if return_deriv_order > 0:
derivs[1][np.arange(sysnum), :1, :, 1, :] = der_stuff[1]
if return_deriv_order > 1:
derivs[2][np.arange(sysnum), :1, :, :1, :, 1, :] = der_stuff[2]
else:
ref_points_1, _ = cartesian_from_rad(origins, x_pts, y_pts, dists, None, None)
total_points[:, 1] = ref_points_1
# print(">> z2c >> ordering", ordering[0])
# iteratively build the rest of the coords with one special cases for n=2
for i in range(1, coordnum):
# Get the distances away
ref_coords1 = ordering[:, i, 0] # reference atom numbers for first coordinate
refs1 = total_points[np.arange(sysnum), ref_coords1.astype(int)] # get the actual reference coordinates
dists = np.reshape(coordlist[:, i, 0], (sysnum, 1)) # pull the requisite distances
ref_coords2 = ordering[:, i, 1] # reference atom numbers for second coordinate
refs2 = total_points[np.arange(sysnum), ref_coords2.astype(int)] # get the actual reference coordinates for the angle
angle = coordlist[:, i, 1] # pull the requisite angle values
if not use_rad:
angle = np.deg2rad(angle)
if i == 1:
refs3 = y_pts
dihed = None
ref_coords3 = np.full((len(dists),), -1, dtype=int)
psi_flag = False
else:
ref_coords3 = ordering[:, i, 2] # reference atom numbers for dihedral ref coordinate
refs3 = total_points[np.arange(sysnum), ref_coords3.astype(int)] # get the actual reference coordinates for the dihed
dihed = coordlist[:, i, 2] # pull proper dihedral values
if not use_rad:
dihed = np.deg2rad(dihed)
if ordering.shape[-1] == 4:
raise ValueError("Unclear if there is a difference between tau and psi")
psi_flag = ordering[:, i, 3] == 1
# dihed[psi_flag] = -dihed[psi_flag]
else:
psi_flag = False
if return_derivs:
if ordering.shape[-1] == 4:
raise NotImplementedError("don't have derivatives for case with psi angles")
der_stuff = cartesian_from_rad_derivatives(
refs1, refs2, refs3,
dists, angle, dihed,
i,
ref_coords1,
ref_coords2,
ref_coords3,
derivs,
order=return_deriv_order
)
# crd, d1, d2 = stuff
total_points[:, i+1] = der_stuff[0]
if return_deriv_order > 0:
derivs[1][np.arange(sysnum), :i+1, :, i+1, :] = der_stuff[1]
if return_deriv_order > 1:
derivs[2][np.arange(sysnum), :i+1, :, :i+1, :, i+1, :] = der_stuff[2]
else:
ref_points_1, _ = cartesian_from_rad(refs1, refs2, refs3, dists, angle, dihed, psi=psi_flag)
total_points[:, i+1] = ref_points_1
if atom_ordering is not None:
rev_ord = atom_ordering#np.argsort(atom_ordering, axis=1)
total_points = total_points[np.arange(len(atom_ordering))[:, np.newaxis], rev_ord] #wat?
converter_opts = dict(use_rad=use_rad, ordering=ordering)
if return_derivs:
if return_deriv_order > 0:
converter_opts['derivs'] = derivs[1:][:return_deriv_order]
return total_points, converter_opts
def convert(self, coords, **kw):
"""dipatches to convert_many but only pulls the first"""
total_points, opts = self.convert_many(coords[np.newaxis], **kw)
return total_points[0], opts
__converters__ = [ ZMatrixToCartesianConverter() ] | [
"b3m2a1@gmail.com"
] | b3m2a1@gmail.com |
1a254cdc13044408437afdc922e0f764e45c5795 | 0e9fad9c000430a735e10568644dc3e0c6a1de54 | /curriculum_ctvt/input_mistakes.py | f578c6927d70e2f14f213a7c2b373d114132797e | [] | no_license | pedal-edu/curriculum-ctvt | 54d489926f366b486a3e5663de444221a5924f92 | 2f8472627b9adceb90466f206f1131fdecc3a2e5 | refs/heads/master | 2023-04-01T20:17:58.542300 | 2021-03-25T13:54:51 | 2021-03-25T13:54:51 | 276,709,305 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from pedal.core.commands import gently, explain
from pedal.cait.cait_api import *
def unnecessary_cast(needed_casts):
"""
Args:
needed_casts: List of casts that are necessary to this problem
Returns:
"""
message = "Converting to {} is unnecessary in this problem"
code = "ex_cast"
tldr = "Unnecessary Conversion"
known_casts = ["float", "int", "str"]
matches = find_matches("_cast_(___)")
for match in matches:
user_cast = match["_cast_"].id
if user_cast not in needed_casts and user_cast in known_casts:
return explain(message.format(user_cast), label=code, title=tldr)
return False
| [
"acbart@vt.edu"
] | acbart@vt.edu |
1c459705148d5b935a6f2166345b8b6e897b9b97 | 763774bbcd6aa6adf64bde5fbe9521a937785362 | /tests/test_concise_keras.py | 6203e9afeee825596836ae6f6bce515552396ef0 | [
"MIT"
] | permissive | morphinggen/concise | 969075dfbed99071fae53b0cba637bb5c25e3359 | 12078d75f37fe176bb7d221134b8b14aeb48e11f | refs/heads/master | 2022-04-28T03:11:08.606943 | 2020-04-15T19:19:34 | 2020-04-15T19:19:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,079 | py | """
test_concise_keras
----------------------------------
Tests for `concise_keras` module
"""
import keras
from keras.models import model_from_json
from concise.legacy.models import single_layer_pos_effect as concise_model
from concise.eval_metrics import mse
from sklearn.linear_model import LinearRegression
import pytest
from tests.setup_concise_load_data import load_example_data
import numpy as np
def test_serialization():
c = concise_model(init_motifs=["TAATA", "TGCGAT"],
pooling_layer="sum",
n_splines=10,
)
js = c.to_json()
assert isinstance(model_from_json(js), keras.models.Model)
def test_serialization_disk(tmpdir):
param, X_feat, X_seq, y, id_vec = load_example_data()
dc = concise_model(pooling_layer="sum",
init_motifs=["TGCGAT", "TATTTAT"],
n_splines=10,
n_covariates=X_feat.shape[1],
seq_length=X_seq.shape[1],
**param)
dc.fit([X_seq, X_feat], y, epochs=1,
validation_data=([X_seq, X_feat], y))
fn = tmpdir.mkdir('data').join('test_keras.h5')
dc.save(str(fn))
dc = keras.models.load_model(str(fn))
assert isinstance(dc, keras.models.Model)
class TestKerasConciseBasic(object):
@classmethod
def setup_class(cls):
cls.data = load_example_data()
# pass
def test_no_error(self):
# test the nice print:
param, X_feat, X_seq, y, id_vec = self.data
dc = concise_model(pooling_layer="max",
n_covariates=X_feat.shape[1],
seq_length=X_seq.shape[1],
**param)
dc.fit([X_seq, X_feat], y, epochs=1,
validation_data=([X_seq, X_feat], y))
y_pred = dc.predict([X_seq, X_feat])
y_pred
def test_train_predict_no_X_feat(self):
# test the nice print:
param, X_feat, X_seq, y, id_vec = self.data
dc = concise_model(pooling_layer="max",
n_covariates=0,
seq_length=X_seq.shape[1],
**param)
dc.fit(X_seq, y, epochs=1,
validation_data=(X_seq, y))
y_pred = dc.predict(X_seq)
y_pred
@classmethod
def teardown_class(cls):
pass
class TestMultiTaskLearning(TestKerasConciseBasic):
"""
Test multi-task learning
"""
@classmethod
def setup_class(cls):
cls.data = load_example_data(num_tasks=3)
class TestConcisePrediction(object):
@classmethod
def setup_class(cls):
cls.data = load_example_data(trim_seq_len=1, standardize_features=False)
cls.data[0]["n_motifs"] = 1
cls.data[0]["motif_length"] = 1
cls.data[0]["step_size"] = 0.001
cls.data[0]["early_stop_patience"] = 3
def test_non_std(self):
# test the nice print:
param, X_feat, X_seq, y, id_vec = self.data
dc = concise_model(pooling_layer="max",
n_covariates=X_feat.shape[1],
lambd=0,
seq_length=X_seq.shape[1],
**param)
callback = keras.callbacks.EarlyStopping(patience=param["early_stop_patience"])
dc.fit([X_seq, X_feat], y, epochs=50,
callbacks=[callback],
validation_data=([X_seq, X_feat], y))
dc_coef = dc.layers[-1].get_weights()[0][-X_feat.shape[1]:, 0]
lm = LinearRegression()
lm.fit(X_feat, y)
# np.allclose(lm.coef_, dc_coef, atol=0.02)
# # weights has to be the same as for linear regression
# (dc_coef - lm.coef_) / lm.coef_
# they both have to predict the same
y_pred = dc.predict([X_seq, X_feat])
mse_lm = mse(y, lm.predict(X_feat))
mse_dc = mse(y, y_pred)
print("mse_lm")
print(mse_lm)
print("mse_dc")
print(mse_dc)
assert mse_dc < mse_lm + 0.01
| [
"zigaavsec@gmail.com"
] | zigaavsec@gmail.com |
a66f9c41196e481531527bb60b25dca5cff97b40 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/numba/tests/test_lists.py | 5e10c7df5ba329a8e6492e7b5fde5844df12789a | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 23,959 | py | from __future__ import print_function
from collections import namedtuple
import contextlib
import itertools
import math
import sys
from numba.compiler import compile_isolated, Flags
from numba import jit, types
import numba.unittest_support as unittest
from numba import testing
from .support import TestCase, MemoryLeakMixin, tag
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
Point = namedtuple('Point', ('a', 'b'))
def noop(x):
pass
def unbox_usecase(x):
"""
Expect a list of numbers
"""
res = 0
for v in x:
res += v
return res
def unbox_usecase2(x):
"""
Expect a list of tuples
"""
res = 0
for v in x:
res += len(v)
return res
def unbox_usecase3(x):
"""
Expect a (number, list of numbers) tuple.
"""
a, b = x
res = a
for v in b:
res += v
return res
def unbox_usecase4(x):
"""
Expect a (number, list of tuples) tuple.
"""
a, b = x
res = a
for v in b:
res += len(v)
return res
def create_list(x, y, z):
return [x, y, z]
def create_nested_list(x, y, z, a, b, c):
return [[x, y, z], [a, b, c]]
def list_comprehension1():
return sum([x**2 for x in range(10)])
def list_comprehension2():
return sum([x for x in range(10) if x % 2 == 0])
def list_comprehension3():
return sum([math.pow(x, 2) for x in range(10)])
def list_comprehension4():
return sum([x * y for x in range(10) for y in range(10)])
def list_comprehension5():
return [x * 2 for x in range(10)]
def list_comprehension6():
return [[x for x in range(y)] for y in range(3)]
def list_constructor(n):
return list(range(n))
def list_append(n):
l = []
l.append(42)
for i in range(n):
l.append(i)
return l
def list_append_heterogenous(n):
l = []
l.append(42.0)
for i in range(n):
l.append(i)
return l
def list_extend(n):
l = []
# A non-list iterable and a list
l.extend(range(n))
l.extend(l[:-1])
l.extend(range(n, 0, -1))
return l
def list_extend_heterogenous(n):
l = []
# Extend with various iterables, including lists, with different types
l.extend(range(n))
l.extend(l[:-1])
l.extend((5, 42))
l.extend([123.0])
return l
def list_pop0(n):
l = list(range(n))
res = 0
while len(l) > 0:
res += len(l) * l.pop()
return res
def list_pop1(n, i):
l = list(range(n))
x = l.pop(i)
return x, l
def list_len(n):
l = list(range(n))
return len(l)
def list_getitem(n):
l = list(range(n))
res = 0
# Positive indices
for i in range(len(l)):
res += i * l[i]
# Negative indices
for i in range(-len(l), 0):
res -= i * l[i]
return res
def list_setitem(n):
l = list(range(n))
res = 0
# Positive indices
for i in range(len(l)):
l[i] = i * l[i]
# Negative indices
for i in range(-len(l), 0):
l[i] = i * l[i]
for i in range(len(l)):
res += l[i]
return res
def list_getslice2(n, start, stop):
l = list(range(n))
return l[start:stop]
def list_getslice3(n, start, stop, step):
l = list(range(n))
return l[start:stop:step]
def list_setslice2(n, n_source, start, stop):
# Generic setslice with size change
l = list(range(n))
v = list(range(100, 100 + n_source))
l[start:stop] = v
return l
def list_setslice3(n, start, stop, step):
l = list(range(n))
v = l[start:stop:step]
for i in range(len(v)):
v[i] += 100
l[start:stop:step] = v
return l
def list_setslice3_arbitrary(n, n_src, start, stop, step):
l = list(range(n))
l[start:stop:step] = list(range(100, 100 + n_src))
return l
def list_delslice0(n):
l = list(range(n))
del l[:]
return l
def list_delslice1(n, start, stop):
l = list(range(n))
del l[start:]
del l[:stop]
return l
def list_delslice2(n, start, stop):
l = list(range(n))
del l[start:stop]
return l
def list_clear(n):
l = list(range(n))
l.clear()
return l
def list_copy(n):
l = list(range(n))
ll = l.copy()
l.append(42)
return l, ll
def list_iteration(n):
l = list(range(n))
res = 0
for i, v in enumerate(l):
res += i * v
return res
def list_contains(n):
l = list(range(n))
return (0 in l, 1 in l, n - 1 in l, n in l)
def list_index1(n, v):
l = list(range(n, 0, -1))
return l.index(v)
def list_index2(n, v, start):
l = list(range(n, 0, -1))
return l.index(v, start)
def list_index3(n, v, start, stop):
l = list(range(n, 0, -1))
return l.index(v, start, stop)
def list_remove(n, v):
l = list(range(n - 1, -1, -1))
l.remove(v)
return l
def list_insert(n, pos, v):
l = list(range(0, n))
l.insert(pos, v)
return l
def list_count(n, v):
l = []
for x in range(n):
l.append(x & 3)
return l.count(v)
def list_reverse(n):
l = list(range(n))
l.reverse()
return l
def list_add(m, n):
a = list(range(0, m))
b = list(range(100, 100 + n))
res = a + b
res.append(42) # check result is a copy
return a, b, res
def list_add_heterogenous():
a = [1]
b = [2.0]
c = a + b
d = b + a
# check result is a copy
a.append(3)
b.append(4.0)
return a, b, c, d
def list_add_inplace(m, n):
a = list(range(0, m))
b = list(range(100, 100 + n))
a += b
return a, b
def list_add_inplace_heterogenous():
a = [1]
b = [2.0]
a += b
b += a
return a, b
def list_mul(n, v):
a = list(range(n))
return a * v
def list_mul_inplace(n, v):
a = list(range(n))
a *= v
return a
def list_bool(n):
a = list(range(n))
return bool(a), (True if a else False)
def eq_usecase(a, b):
return list(a) == list(b)
def ne_usecase(a, b):
return list(a) != list(b)
def gt_usecase(a, b):
return list(a) > list(b)
def ge_usecase(a, b):
return list(a) >= list(b)
def lt_usecase(a, b):
return list(a) < list(b)
def le_usecase(a, b):
return list(a) <= list(b)
def identity_usecase(n):
a = list(range(n))
b = a
c = a[:]
return (a is b), (a is not b), (a is c), (a is not c)
def bool_list_usecase():
# Exercise getitem, setitem, iteration with bool values (issue #1373)
l = [False]
l[0] = True
x = False
for v in l:
x = x ^ v
return l, x
def reflect_simple(l, ll):
x = l.pop()
y = l.pop()
l[0] = 42.
l.extend(ll)
return l, x, y
def reflect_conditional(l, ll):
# `l` may or may not actually reflect a Python list
if ll[0]:
l = [11., 22., 33., 44.]
x = l.pop()
y = l.pop()
l[0] = 42.
l.extend(ll)
return l, x, y
def reflect_exception(l):
l.append(42)
raise ZeroDivisionError
def reflect_dual(l, ll):
l.append(ll.pop())
return l is ll
class TestLists(MemoryLeakMixin, TestCase):
def test_create_list(self):
pyfunc = create_list
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1, 2, 3), pyfunc(1, 2, 3))
def test_create_nested_list(self):
pyfunc = create_nested_list
with self.assertTypingError():
cr = compile_isolated(pyfunc, (types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32))
cfunc = cr.entry_point
self.assertEqual(cfunc(1, 2, 3, 4, 5, 6), pyfunc(1, 2, 3, 4, 5, 6))
@testing.allow_interpreter_mode
def test_list_comprehension(self):
list_tests = [list_comprehension1,
list_comprehension2,
list_comprehension3,
list_comprehension4,
list_comprehension5,
list_comprehension6]
for test in list_tests:
pyfunc = test
cr = compile_isolated(pyfunc, ())
cfunc = cr.entry_point
self.assertEqual(cfunc(), pyfunc())
def check_unary_with_size(self, pyfunc, precise=True):
cfunc = jit(nopython=True)(pyfunc)
# Use various sizes, to stress the allocation algorithm
for n in [0, 3, 16, 70, 400]:
eq = self.assertPreciseEqual if precise else self.assertEqual
eq(cfunc(n), pyfunc(n))
def test_constructor(self):
self.check_unary_with_size(list_constructor)
def test_append(self):
self.check_unary_with_size(list_append)
@tag('important')
def test_append_heterogenous(self):
self.check_unary_with_size(list_append_heterogenous, precise=False)
def test_extend(self):
self.check_unary_with_size(list_extend)
@tag('important')
def test_extend_heterogenous(self):
self.check_unary_with_size(list_extend_heterogenous, precise=False)
def test_pop0(self):
self.check_unary_with_size(list_pop0)
@tag('important')
def test_pop1(self):
pyfunc = list_pop1
cfunc = jit(nopython=True)(pyfunc)
for n in [5, 40]:
for i in [0, 1, n - 2, n - 1, -1, -2, -n + 3, -n + 1]:
expected = pyfunc(n, i)
self.assertPreciseEqual(cfunc(n, i), expected)
def test_pop_errors(self):
# XXX References are leaked when an exception is raised
self.disable_leak_check()
cfunc = jit(nopython=True)(list_pop1)
with self.assertRaises(IndexError) as cm:
cfunc(0, 5)
self.assertEqual(str(cm.exception), "pop from empty list")
with self.assertRaises(IndexError) as cm:
cfunc(1, 5)
self.assertEqual(str(cm.exception), "pop index out of range")
def test_insert(self):
pyfunc = list_insert
cfunc = jit(nopython=True)(pyfunc)
for n in [5, 40]:
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for i in indices:
expected = pyfunc(n, i, 42)
self.assertPreciseEqual(cfunc(n, i, 42), expected)
def test_len(self):
self.check_unary_with_size(list_len)
@tag('important')
def test_getitem(self):
self.check_unary_with_size(list_getitem)
@tag('important')
def test_setitem(self):
self.check_unary_with_size(list_setitem)
def check_slicing2(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
sizes = [5, 40]
for n in sizes:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
for start, stop in itertools.product(indices, indices):
expected = pyfunc(n, start, stop)
self.assertPreciseEqual(cfunc(n, start, stop), expected)
def test_getslice2(self):
self.check_slicing2(list_getslice2)
def test_setslice2(self):
pyfunc = list_setslice2
cfunc = jit(nopython=True)(pyfunc)
sizes = [5, 40]
for n, n_src in itertools.product(sizes, sizes):
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
for start, stop in itertools.product(indices, indices):
expected = pyfunc(n, n_src, start, stop)
self.assertPreciseEqual(cfunc(n, n_src, start, stop), expected)
@tag('important')
def test_getslice3(self):
pyfunc = list_getslice3
cfunc = jit(nopython=True)(pyfunc)
for n in [10]:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
steps = [4, 1, -1, 2, -3]
for start, stop, step in itertools.product(indices, indices, steps):
expected = pyfunc(n, start, stop, step)
self.assertPreciseEqual(cfunc(n, start, stop, step), expected)
@tag('important')
def test_setslice3(self):
pyfunc = list_setslice3
cfunc = jit(nopython=True)(pyfunc)
for n in [10]:
indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n]
steps = [4, 1, -1, 2, -3]
for start, stop, step in itertools.product(indices, indices, steps):
expected = pyfunc(n, start, stop, step)
self.assertPreciseEqual(cfunc(n, start, stop, step), expected)
def test_setslice3_resize(self):
# XXX References are leaked when an exception is raised
self.disable_leak_check()
pyfunc = list_setslice3_arbitrary
cfunc = jit(nopython=True)(pyfunc)
# step == 1 => can resize
cfunc(5, 10, 0, 2, 1)
# step != 1 => cannot resize
with self.assertRaises(ValueError) as cm:
cfunc(5, 100, 0, 3, 2)
self.assertIn("cannot resize", str(cm.exception))
def test_delslice0(self):
self.check_unary_with_size(list_delslice0)
def test_delslice1(self):
self.check_slicing2(list_delslice1)
@tag('important')
def test_delslice2(self):
self.check_slicing2(list_delslice2)
def test_invalid_slice(self):
self.disable_leak_check()
pyfunc = list_getslice3
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(ValueError) as cm:
cfunc(10, 1, 2, 0)
self.assertEqual(str(cm.exception), "slice step cannot be zero")
def test_iteration(self):
self.check_unary_with_size(list_iteration)
@tag('important')
def test_reverse(self):
self.check_unary_with_size(list_reverse)
def test_contains(self):
self.check_unary_with_size(list_contains)
def check_index_result(self, pyfunc, cfunc, args):
try:
expected = pyfunc(*args)
except ValueError:
with self.assertRaises(ValueError):
cfunc(*args)
else:
self.assertPreciseEqual(cfunc(*args), expected)
def test_index1(self):
self.disable_leak_check()
pyfunc = list_index1
cfunc = jit(nopython=True)(pyfunc)
for v in (0, 1, 5, 10, 99999999):
self.check_index_result(pyfunc, cfunc, (16, v))
def test_index2(self):
self.disable_leak_check()
pyfunc = list_index2
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 10, 99999999):
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for start in indices:
self.check_index_result(pyfunc, cfunc, (16, v, start))
def test_index3(self):
self.disable_leak_check()
pyfunc = list_index3
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 10, 99999999):
indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1]
for start, stop in itertools.product(indices, indices):
self.check_index_result(pyfunc, cfunc, (16, v, start, stop))
def test_remove(self):
pyfunc = list_remove
cfunc = jit(nopython=True)(pyfunc)
n = 16
for v in (0, 1, 5, 15):
expected = pyfunc(n, v)
self.assertPreciseEqual(cfunc(n, v), expected)
def test_remove_error(self):
self.disable_leak_check()
pyfunc = list_remove
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(ValueError) as cm:
cfunc(10, 42)
self.assertEqual(str(cm.exception), "list.remove(x): x not in list")
def test_count(self):
pyfunc = list_count
cfunc = jit(nopython=True)(pyfunc)
for v in range(5):
self.assertPreciseEqual(cfunc(18, v), pyfunc(18, v))
@unittest.skipUnless(sys.version_info >= (3, 3),
"list.clear() needs Python 3.3+")
def test_clear(self):
self.check_unary_with_size(list_clear)
@unittest.skipUnless(sys.version_info >= (3, 3),
"list.copy() needs Python 3.3+")
def test_copy(self):
self.check_unary_with_size(list_copy)
def check_add(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
sizes = [0, 3, 50, 300]
for m, n in itertools.product(sizes, sizes):
expected = pyfunc(m, n)
self.assertPreciseEqual(cfunc(m, n), expected)
def test_add(self):
self.check_add(list_add)
def test_add_heterogenous(self):
pyfunc = list_add_heterogenous
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc()
self.assertEqual(cfunc(), expected)
def test_add_inplace(self):
self.check_add(list_add_inplace)
def test_add_inplace_heterogenous(self):
pyfunc = list_add_inplace_heterogenous
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc()
self.assertEqual(cfunc(), expected)
def check_mul(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
for n in [0, 3, 50, 300]:
for v in [1, 2, 3, 0, -1, -42]:
expected = pyfunc(n, v)
self.assertPreciseEqual(cfunc(n, v), expected)
def test_mul(self):
self.check_mul(list_mul)
def test_mul_inplace(self):
self.check_mul(list_mul_inplace)
@unittest.skipUnless(sys.maxsize >= 2**32,
"need a 64-bit system to test for MemoryError")
def test_mul_error(self):
self.disable_leak_check()
pyfunc = list_mul
cfunc = jit(nopython=True)(pyfunc)
# Fail in malloc()
with self.assertRaises(MemoryError):
cfunc(1, 2**58)
# Overflow size computation when multiplying by item size
with self.assertRaises(MemoryError):
cfunc(1, 2**62)
def test_bool(self):
pyfunc = list_bool
cfunc = jit(nopython=True)(pyfunc)
for n in [0, 1, 3]:
expected = pyfunc(n)
self.assertPreciseEqual(cfunc(n), expected)
def test_list_passing(self):
# Check one can pass a list from a Numba function to another
@jit(nopython=True)
def inner(lst):
return len(lst), lst[-1]
@jit(nopython=True)
def outer(n):
l = list(range(n))
return inner(l)
self.assertPreciseEqual(outer(5), (5, 4))
def _test_compare(self, pyfunc):
def eq(args):
self.assertIs(cfunc(*args), pyfunc(*args),
"mismatch for arguments %s" % (args,))
cfunc = jit(nopython=True)(pyfunc)
eq(((1, 2), (1, 2)))
eq(((1, 2, 3), (1, 2)))
eq(((1, 2), (1, 2, 3)))
eq(((1, 2, 4), (1, 2, 3)))
eq(((1.0, 2.0, 3.0), (1, 2, 3)))
eq(((1.0, 2.0, 3.5), (1, 2, 3)))
def test_eq(self):
self._test_compare(eq_usecase)
def test_ne(self):
self._test_compare(ne_usecase)
def test_le(self):
self._test_compare(le_usecase)
def test_lt(self):
self._test_compare(lt_usecase)
def test_ge(self):
self._test_compare(ge_usecase)
def test_gt(self):
self._test_compare(gt_usecase)
def test_identity(self):
pyfunc = identity_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(3), pyfunc(3))
def test_bool_list(self):
# Check lists of bools compile and run successfully
pyfunc = bool_list_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), pyfunc())
class TestUnboxing(MemoryLeakMixin, TestCase):
"""
Test unboxing of Python lists into native Numba lists.
"""
@contextlib.contextmanager
def assert_type_error(self, msg):
with self.assertRaises(TypeError) as raises:
yield
if msg is not None:
self.assertRegexpMatches(str(raises.exception), msg)
def check_unary(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
expected = pyfunc(arg)
got = cfunc(arg)
self.assertPreciseEqual(got, expected)
return check
def test_numbers(self):
check = self.check_unary(unbox_usecase)
check([1, 2])
check([1j, 2.5j])
def test_tuples(self):
check = self.check_unary(unbox_usecase2)
check([(1, 2), (3, 4)])
check([(1, 2j), (3, 4j)])
check([(), (), ()])
@tag('important')
def test_list_inside_tuple(self):
check = self.check_unary(unbox_usecase3)
check((1, [2, 3, 4]))
def test_list_of_tuples_inside_tuple(self):
check = self.check_unary(unbox_usecase4)
check((1, [(2,), (3,)]))
def test_errors(self):
# See #1545 and #1594: error checking should ensure the list is
# homogenous
msg = "can't unbox heterogenous list"
pyfunc = noop
cfunc = jit(nopython=True)(pyfunc)
lst = [1, 2.5]
with self.assert_type_error(msg):
cfunc(lst)
# The list hasn't been changed (bogus reflecting)
self.assertEqual(lst, [1, 2.5])
with self.assert_type_error(msg):
cfunc([1, 2j])
# Same when the list is nested in a tuple or namedtuple
with self.assert_type_error(msg):
cfunc((1, [1, 2j]))
with self.assert_type_error(msg):
cfunc(Point(1, [1, 2j]))
# Issue #1638: tuples of different size.
# Note the check is really on the tuple side.
lst = [(1,), (2, 3)]
with self.assertRaises(ValueError) as raises:
cfunc(lst)
self.assertEqual(str(raises.exception),
"size mismatch for tuple, expected 1 element(s) but got 2")
class TestListReflection(MemoryLeakMixin, TestCase):
"""
Test reflection of native Numba lists on Python list objects.
"""
def check_reflection(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
samples = [([1., 2., 3., 4.], [0.]),
([1., 2., 3., 4.], [5., 6., 7., 8., 9.]),
]
for dest, src in samples:
expected = list(dest)
got = list(dest)
pyres = pyfunc(expected, src)
with self.assertRefCount(got, src):
cres = cfunc(got, src)
self.assertPreciseEqual(cres, pyres)
self.assertPreciseEqual(expected, got)
self.assertEqual(pyres[0] is expected, cres[0] is got)
del pyres, cres
def test_reflect_simple(self):
self.check_reflection(reflect_simple)
def test_reflect_conditional(self):
self.check_reflection(reflect_conditional)
def test_reflect_exception(self):
"""
When the function exits with an exception, lists should still be
reflected.
"""
pyfunc = reflect_exception
cfunc = jit(nopython=True)(pyfunc)
l = [1, 2, 3]
with self.assertRefCount(l):
with self.assertRaises(ZeroDivisionError):
cfunc(l)
self.assertPreciseEqual(l, [1, 2, 3, 42])
@tag('important')
def test_reflect_same_list(self):
"""
When the same list object is reflected twice, behaviour should
be consistent.
"""
pyfunc = reflect_dual
cfunc = jit(nopython=True)(pyfunc)
pylist = [1, 2, 3]
clist = pylist[:]
expected = pyfunc(pylist, pylist)
got = cfunc(clist, clist)
self.assertPreciseEqual(expected, got)
self.assertPreciseEqual(pylist, clist)
self.assertPreciseEqual(sys.getrefcount(pylist), sys.getrefcount(clist))
def test_reflect_clean(self):
"""
When the list wasn't mutated, no reflection should take place.
"""
cfunc = jit(nopython=True)(noop)
# Use a complex, as Python integers can be cached
l = [12.5j]
ids = [id(x) for x in l]
cfunc(l)
self.assertEqual([id(x) for x in l], ids)
if __name__ == '__main__':
unittest.main()
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
92a176149b6adf9cc5a297e10f227ab9d00f6a35 | ceed0a361e951b72c1a0fd8c50f093d66d6bfc31 | /towel/__init__.py | 15d8d2ec20144e03ccef873dbdd4d9c90f102e2e | [
"BSD-2-Clause"
] | permissive | jensneuhaus/towel | 481914ea6c2a71021c890d8c3c74ff7659df4b88 | a91039d71e458c7d59457e472cfe5bd6cad6a492 | refs/heads/master | 2020-12-11T05:44:40.220543 | 2015-09-03T08:56:24 | 2015-09-03T08:56:45 | 49,214,911 | 0 | 0 | null | 2016-01-07T16:10:05 | 2016-01-07T16:06:30 | Python | UTF-8 | Python | false | false | 105 | py | """
Towel - Keeping you DRY since 2010
"""
VERSION = (0, 7, 0)
__version__ = '.'.join(map(str, VERSION))
| [
"mk@spinlock.ch"
] | mk@spinlock.ch |
06cf4963a42226f8051013efa857f39d4c892470 | 60ad284e56abbe6ed755147ac3b03baae5bffa96 | /main.py | 65046eb8a6364beaa6c42337ef019ac0b166b360 | [] | no_license | pooja-j-n/Human-Face-Detection-using-Boosting | ce50a9d1f37717113122d51d2f4db70954566757 | 198917b709a73c834a5d82042084e298b1fe08cf | refs/heads/master | 2020-04-15T08:03:14.887401 | 2019-01-07T23:36:45 | 2019-01-07T23:36:45 | 164,514,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,859 | py | import numpy as np
import time
import cv2
from boosting_classifier import Boosting_Classifier
from visualizer import Visualizer
from im_process import normalize
from utils import *
import pdb
import pickle
def main():
#flag for debugging
flag_subset = False
boosting_type = 'Ada' #'Real' or 'Ada'
training_epochs = 101 if not flag_subset else 20
act_cache_dir = 'wc_activations.npy' if not flag_subset else 'wc_activations_subset.npy'
chosen_wc_cache_dir = 'chosen_wcs.pkl' if not flag_subset else 'chosen_wcs_subset.pkl'
plot_haar_filter = 'haar_filters' if not flag_subset else 'haar_filters_subset'
plot_sc_errors = 'sc_errors' if not flag_subset else 'sc_errors_subset'
steps = [0, 10, 50, 100] if not flag_subset else [0, 10]
#data configurations
pos_data_dir = 'newface16'
neg_data_dir = 'nonface16'
image_w = 16
image_h = 16
data, labels = load_data(pos_data_dir, neg_data_dir, image_w, image_h, flag_subset)
#---HARD NEGATIVE MINING--
#putting non-faces into training data for hard-negative mining
'''for i in range(3):
negative_patches = pickle.load(open('wrong_patches_'+str(i)+'.pkl', 'rb'))
data = np.append(data, negative_patches, axis = 0)
labels = np.append(labels, np.full(len(negative_patches), -1))'''
#pdb.set_trace()
data = integrate_images(normalize(data))
#number of bins for boosting
num_bins = 25
#number of cpus for parallel computing
num_cores = 8 if not flag_subset else 1 #always use 1 when debugging
#create Haar filters
filters = generate_Haar_filters(4, 4, 16, 16, image_w, image_h, flag_subset)
print("Length of filters " + str(len(filters)))
#create visualizer to draw histograms, roc curves and best weak classifier accuracies
drawer = Visualizer([10, 20, 50, 100], [1, 10, 20, 50, 100])
#create boost classifier with a pool of weak classifier
boost = Boosting_Classifier(filters, data, labels, training_epochs, num_bins, drawer, num_cores, boosting_type, chosen_wc_cache_dir)
#calculate filter values for all training images
start = time.clock()
boost.calculate_training_activations(act_cache_dir, act_cache_dir)
end = time.clock()
print('%f seconds for activation calculation' % (end - start))
print("Start of train process")
boost.train(chosen_wc_cache_dir)
print("End of train process")
print("Plotting Haar Filters")
boost.display_haar_filters(chosen_wc_cache_dir, plot_haar_filter)
print("Plotting training error of strong classifier")
boost.draw_sc_errors(chosen_wc_cache_dir, plot_sc_errors)
#Histogram, ROC, weak classfier errors
boost.visualize(steps, chosen_wc_cache_dir)
print("------Face Detection---------")
original_img = cv2.imread('./Testing_Images/Face_2.jpg', cv2.IMREAD_GRAYSCALE)
result_img = boost.face_detection(original_img)
cv2.imwrite('Result_Face2_hardneg.png', result_img)
original_img = cv2.imread('./Testing_Images/Face_3.jpg', cv2.IMREAD_GRAYSCALE)
result_img = boost.face_detection(original_img)
cv2.imwrite('Result_Face3_hardneg.png', result_img)
#HARD NEGATIVE MINING
'''
print("------Hard Negative Mining---------")
image_names = ['Non_face_1', 'Non_Face_2', 'Non_face_3']
for img in image_names:
print('Testing_Images/' + img + '.jpg')
wrong_patches = []
for img in image_names:
original_img = cv2.imread('Testing_Images/' + img + '.jpg', cv2.IMREAD_GRAYSCALE)
wrong_patches.append(boost.get_hard_negative_patches(original_img))
wrong_patches_0 = wrong_patches[0].reshape(wrong_patches[0].shape[1:4])
wrong_patches_1 = wrong_patches[1].reshape(wrong_patches[1].shape[1:4])
wrong_patches_2 = wrong_patches[2].reshape(wrong_patches[2].shape[1:4])
pickle.dump(wrong_patches_0, open( 'wrong_patches_0.pkl', 'wb'))
pickle.dump(wrong_patches_1, open( 'wrong_patches_1.pkl', 'wb'))
pickle.dump(wrong_patches_2, open( 'wrong_patches_2.pkl', 'wb'))
'''
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | pooja-j-n.noreply@github.com |
38037bc06af16d3280a4b98da17a5261e4c36e7a | 8b0bf7bf3d6ff7fc027cd76cfd8e9653ecea3c8d | /bin/pip3 | fa444f3ae2c5aa972073f45910ab0d58e3dacc83 | [] | no_license | flacogabrielc/paradigmas_secondversion | fa604da31e501cb3f3bf4323f2706c9e0602433d | 672114d8afa91cc66f808addf13cf83dcb3be89c | refs/heads/master | 2021-01-14T19:55:49.619743 | 2020-02-24T13:59:16 | 2020-02-24T13:59:16 | 242,738,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | #!/Users/gabriel/Documents/flasky/UruSA/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"flacogabrielc@hotmail.com"
] | flacogabrielc@hotmail.com | |
380a77f8833270b64c71259e3e403e6f2347400e | 9dbbb9463d6b3ebf577a96b4619e15c705ebd664 | /assignment_2/cs231n/optim.py | 851d141869bf9fca42c7507bfb4dbec1387f1695 | [] | no_license | d-a-p/cs231_18_deep-learning | ff19bfa1e402d3478ebeadfb0af8ed6dc210551c | 562cb0c5bd1b67e8484facec05cbded8c48b3e32 | refs/heads/master | 2022-12-20T16:59:34.862181 | 2019-11-08T22:35:17 | 2019-11-08T22:35:17 | 133,770,631 | 0 | 0 | null | 2022-12-07T23:30:24 | 2018-05-17T06:43:47 | Jupyter Notebook | UTF-8 | Python | false | false | 5,856 | py | import numpy as np
"""
This file implements various first-order update rules that are commonly used
for training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning
rate, momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not
perform well; however the default values of the other hyperparameters should
work well for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
w -= config['learning_rate'] * dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a
moving average of the gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
next_w = None
v = config['momentum'] * v - config['learning_rate'] * dw
next_w = w + v
###########################################################################
# Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
###########################################################################
config['velocity'] = v
return next_w, config
def rmsprop(w, dw, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared
gradient values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('decay_rate', 0.99)
config.setdefault('epsilon', 1e-8)
config.setdefault('cache', np.zeros_like(w))
dr = config['decay_rate']
prev_cache = config['cache']
next_w = None
cache = dr * prev_cache + (1-dr)* dw**2
next_w = w - config['learning_rate'] * dw / (np.sqrt(cache) + config['epsilon'])
###########################################################################
# Implement the RMSprop update formula, storing the next value of w #
# in the next_w variable. Don't forget to update cache value stored in #
# config['cache']. #
###########################################################################
config['cache'] = cache
return next_w, config
def adam(w, dw, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-3)
config.setdefault('beta1', 0.9)
config.setdefault('beta2', 0.999)
config.setdefault('epsilon', 1e-8)
config.setdefault('m', np.zeros_like(w))
config.setdefault('v', np.zeros_like(w))
config.setdefault('t', 0)
B1 = config['beta1']
B2 = config['beta2']
t = config['t']+1 #refer to note below in comment; for adding one here
m = B1*config['m'] + (1-B1)*dw
mt = m / (1-B1**t)
v = B2*config['v'] + (1-B2)*(dw**2)
vt = v / (1-B2**t)
next_w = w - config['learning_rate'] * mt / (np.sqrt(vt) + config['epsilon'])
###########################################################################
# Implement the Adam update formula, storing the next value of w in #
# the next_w variable. Don't forget to update the m, v, and t variables #
# stored in config. #
# #
# NOTE: In order to match the reference output, please modify t _before_ #
# using it in any calculations. #
###########################################################################
config['t'] = t
config['m'] = m
config['v'] = v
return next_w, config
| [
"darshanparsanadap@gmail.com"
] | darshanparsanadap@gmail.com |
0c0f3727492283fb11e0d4ebc32b85337740b2e6 | 353d337eb066a1cb25217f5d9384e6eafb033f72 | /backend/env/bin/easy_install-3.8 | 7d82ddd7af9722fc6b2e56aaa8c6e0f61116d536 | [
"MIT"
] | permissive | KaustubhDighe/RedPlag | 9593089a669524510afd7e6d2aa831196180b55a | 6c6d7a26fcc5d23cf5bea26d3d1596a87f8dda7c | refs/heads/main | 2023-08-01T14:21:17.674453 | 2021-09-10T19:05:59 | 2021-09-10T19:05:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | 8 | #!/Users/amanyadav/Desktop/Repos/RedPlag/backend/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"amanyadav@cse.iitb.ac.in"
] | amanyadav@cse.iitb.ac.in |
f0ccfca09b01696b08adb4cc2e3765ed308e87fb | 5da98e4fe64ae1c87af75b22579b4cee053f03d3 | /main/views.py | 34cf04da51fbf670c4d505567dba6a83af497c61 | [] | no_license | pranav-nayak/mysite | 7c7893a1e52fd0f3497c09878ab445d2113be3fe | ec268eac40a8561d50c52bfbff393c35ff8917c4 | refs/heads/master | 2022-12-22T20:28:48.236431 | 2019-10-30T09:01:42 | 2019-10-30T09:01:42 | 218,214,899 | 0 | 0 | null | 2022-12-08T06:47:50 | 2019-10-29T06:00:45 | Python | UTF-8 | Python | false | false | 3,708 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Tutorial, TutorialCategory, TutorialSeries
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout, authenticate, login
from django.contrib import messages
from .forms import NewUserForm
# Create your views here.
def single_slug(request, single_slug):
categories = [c.category_slug for c in TutorialCategory.objects.all()]
if single_slug in categories:
matching_series = TutorialSeries.objects.filter(tutorial_category__category_slug=single_slug)
series_urls = {}
for m in matching_series.all():
part_one = Tutorial.objects.filter(tutorial_series__tutorial_series=m.tutorial_series).earliest("tutorial_published")
series_urls[m] = part_one.tutorial_slug
return render(request=request,
template_name='main/category.html',
context={"tutorial_series": matching_series, "part_ones": series_urls})
tutorials = [t.tutorial_slug for t in Tutorial.objects.all()]
if single_slug in tutorials:
this_tutorial = Tutorial.objects.get(tutorial_slug=single_slug)
tutorials_from_series = Tutorial.objects.filter(tutorial_series__tutorial_series=this_tutorial.tutorial_series).order_by("tutorial_published")
this_tutorial_idx = list(tutorials_from_series).index(this_tutorial)
return render(request=request,
template_name='main/tutorial.html',
context={"tutorial": this_tutorial, "sidebar": tutorials_from_series, "this_tutorial_idx": this_tutorial_idx})
return HttpResponse(f"'{single_slug}' does not correspond to anything we know of!")
def homepage(request):
return render(request = request,
template_name = "main/categories.html",
context = {"categories": TutorialCategory.objects.all})
def home(request):
return HttpResponse("pythonprogramming.net homepage! Wow so <strong>#amaze.</strong>")
def register(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
messages.success(request, f"New account created: {username}")
login(request, user)
messages.info(request, f"You are now logged in as: {username}")
return redirect("main:homepage")
else:
for msg in form.error_messages:
messages.error(request, f"{msg}: {form.error_messages[msg]}")
form = NewUserForm
return render(request = request,
template_name = "main/register.html",
context = {"form": form})
def logout_request(request):
logout(request)
messages.info(request, "Logged out successfully!")
return redirect("main:homepage")
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"Successfully logged in as {username}!")
return redirect("main:homepage")
else:
messages.error(request, "Invalid username or password!")
else:
messages.error(request, "Invalid username or password!")
form = AuthenticationForm()
return render(request,
"main/login.html",
{"form": form})
def account(request):
messages.info(request, f"Not configured yet....")
return redirect("main:homepage")
| [
"prasanna@latlong.in"
] | prasanna@latlong.in |
e150203f31888e1bdb6f7e783e9d245225c5296f | ade0202e633089e0eeae0f27e07f053aa3ba1ba5 | /Gui/utils.py | 388d3890ec6710a6ede0f0a9585a17d2d245edae | [] | no_license | reemharel22/GraphiX | 0ef6cfca3f08065f172b2aca677d3aa02f674bb7 | 2f3a95bc13ed38f8f713b3a405410e70ecdad765 | refs/heads/master | 2021-05-23T12:31:25.167035 | 2021-01-03T09:16:07 | 2021-01-03T09:16:07 | 253,287,144 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,996 | py | import socket
from asyncore import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
import math
import GraphiX
# For connections:
class Handler(dispatcher):
def __init__(self, socket, asyncon):
dispatcher.__init__(self, socket)
self.asyncon = asyncon
def handle_read(self):
self.asyncon.msg = self.recv(4096)
class AsyncConn(dispatcher):
def __init__(self, port=12346):
self.port = port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
dispatcher.__init__(self)
connected = False
while not connected:
try:
self.set_socket(s)
self.msg = ""
self.accepted = False
self.bind(('127.0.0.1', self.port))
connected = True
except socket.error, e:
connected = False
print "Connecting to port {0} failed. Trying port {1}".format(self.port, self.port - 1)
self.port = self.port - 1
self.listen(5)
self.set_reuse_addr()
def handle_read(self):
print "reading"
data = self.recv(1024)
if data:
print data
def handle_write(self):
pass
def readable(self):
return True
def handle_accept(self):
self.accepted = True
socket, addr = self.accept()
Handler(socket, self)
def kys(self):
self.close()
# Static functions:
def area_ptc(x1, y1, x2, y2, x3, y3):
return (x2 - x1) * (y3 - y1) - (y2 - y1)*(x3 - x1)
def change_color_name(clr):
return clr.lower()
def guiname_to_dataname(s_name):
name = str(s_name.lower())
if name.__eq__("pressure"):
return name.upper()
elif name.__eq__("artificial viscosity"):
return "arti_vis".upper()
elif name.__eq__("density"):
return name.upper()
elif name.__eq__("internal energy"):
return "internal_e".upper()
elif name.__eq__("temperature"):
return name.upper()
elif name.__eq__("t_radiation"):
return name.upper()
elif name.__eq__("kria"):
return name.upper()
elif name.__eq__("pixt"):
return name.upper()
elif name.__eq__("epsp"):
return name.upper()
elif name.__eq__("shet_ng"):
return name.upper()
elif name.__eq__("thermal"):
return name.upper()
elif name.__eq__("fusion_rate"):
return name.upper()
elif name.__eq__("ra_in"):
return name.upper()
elif name.__eq__("tshock"):
return name.upper()
elif name.__eq__("roshock"):
return name.upper()
elif name.__eq__("epspc"):
return name.upper()
elif name.__eq__("epspd"):
return name.upper()
elif name.__eq__("phase"):
return name.upper()
def calculate_limits_aspect_ratio(x_min, x_max, y_min, y_max, h=650, w=1250):
delta_x = abs(x_max - x_min)
delta_y = abs(y_max - y_min)
height = h
width = w
if delta_x > (w/h) * delta_y:
y_max1 = height * delta_x / width + y_min
if y_max1 > y_max:
y_max = y_max1
else:
x_max1 = width * delta_y / height + x_min
if x_max1 > x_max:
x_max = x_max1
return x_min, x_max, y_min, y_max
def get_numbers_in_file(f_name):
tmp = f_name.split('.')
return str(int(tmp[len(tmp) - 1]))
def get_last_number_in_file(f_name):
tmp = f_name.split('.')
while file_exists(f_name):
tmp = f_name.split('.')
num = str(int(tmp[len(tmp) - 1]) + 1)
tmp[len(tmp) - 1] = num
f_name = ".".join(tmp)
return num
def file_exists(f_name):
if not os.path.isfile(f_name):
return False
return True
def parse_operator(str, data, max, min):
str = "a"
str.lstrip(' ')
# First we want the first argument before the operator itself, then we want the second argment after the operator
# a OPERATOR b
operator = find_operator(str)
if not operator == "None":
if str.count(operator) > 1:
#we have the same operator twice
a = operator.partition(operator)[0]
else:
a = str.split(operator)[0]
b = str.split(operator)[1]
if b == "None":
#done parsing
execute_operator(a, b, operator)
def find_operator(str):
if str.__contains__("/"):
opeartor = "/"
elif str.__contains__("*"):
operator = "*"
elif str.__contains__("+"):
operator = "+"
elif str.__contains__("-"):
operator = "-"
elif str.__contains__("^"):
operator = "^"
else:
operator = "None"
return operator
def execute_operator(a, b, operator):
if operator.__eq__("/"):
return a / b
elif operator.__eq__("*"):
return a * b
elif operator.__eq__("+"):
return a + b
elif operator.__eq__("-"):
return a - b
elif operator.__eq__("^"):
return a ** b
else:
return a
def parse_execute_operator(char_arg, args_values=None):
arg = None
arg_type = None
# print parse_fortran_type('1.72223046614d26')
char_arg = str(char_arg) # casting into char for all types
if len(char_arg) > 0:
if 'E' in char_arg and arg is None:
try:
a = float(char_arg[:char_arg.index('E')])
b = float(char_arg[char_arg.index('E') + 1:])
arg = a * pow(10, b)
arg_type = "double"
except ValueError:
pass
elif 'e' in char_arg and arg is None:
try:
a = float(char_arg[:char_arg.index('e')])
b = float(char_arg[char_arg.index('e') + 1:])
arg = a * pow(10, b)
arg_type = "double"
except ValueError:
pass
if 'D' in char_arg and arg is None:
try:
a = float(char_arg[:char_arg.index('D')])
b = float(char_arg[char_arg.index('D') + 1:])
arg = a * pow(10, b)
arg_type = "double"
except ValueError:
pass
elif 'd' in char_arg and arg is None:
try:
a = float(char_arg[:char_arg.index('d')])
b = float(char_arg[char_arg.index('d') + 1:])
arg = a * pow(10, b)
arg_type = "double"
except ValueError:
pass
if '.' in char_arg and arg is None:
try:
a = float(char_arg)
arg = float(char_arg)
arg_type = "double"
except ValueError:
pass
calc = ['+', '-', '*', '/', '^', '(', ')']
try:
for c in calc:
if c in char_arg:
try:
# nsp = NumericStringParser() # complex expressions
# arg = nsp.eval(char_arg)
try:
arg = eval(char_arg, args_values)
arg_type_by_python = type(arg)
if arg_type_by_python == int:
arg_type = "integer"
if arg_type_by_python == float:
arg_type = "double"
except NameError or ValueError or SyntaxError:
arg = char_arg
arg_type = "char"
return arg, arg_type
except SyntaxError:
pass
except ValueError:
pass
if arg is None:
breaks = False
for c in char_arg:
try:
int(c)
except ValueError:
breaks = True
if breaks is False:
arg = int(char_arg)
arg_type = "integer"
if arg is None: # meaning all of the aboves didnt match
arg = char_arg
arg_type = "char"
return arg, arg_type
def remove_files(file_names):
for f in file_names:
if file_exists(f):
os.remove(f)
def convert_1d_to_2d_i(k, nx):
j = int(k / nx)
i = int(k - j * nx)
return int(i), int(j)
def position_to_cell(x, y, x_coord, y_coord, nx, ny):
x = np.float64(x)
y = np.float64(y)
return GraphiX.position_to_cell(x, y, x_coord, y_coord, nx, ny)
def get_max_min_coordinates(list_vertices):
xmax = -100000000.0
xmin = 100000000.0
ymax = -100000000.0
ymin = 100000000.0
for i in range(len(list_vertices["i"])):
x = float(list_vertices["x"][i])
y = float(list_vertices["y"][i])
xmin = min(x, xmin)
xmax = max(x, xmax)
ymin = min(y, ymin)
ymax = max(y, ymax)
return float(xmin), float(xmax), float(ymin), float(ymax)
def find_points_in_polygon(list_vertices, x_coord, y_coord):
xmin, xmax, ymin, ymax = get_max_min_coordinates(list_vertices)
list_points = []
e = (xmax - xmin) / 100
for j in range(len(x_coord)):
x = x_coord[j]
y = y_coord[j]
# print xmin, x, xmax, ymin, y, ymax
if x < xmin or x > xmax or y < ymin or y > ymax:
continue
else:
y2 = ymin - e
x2 = x
intersection = 0
k = len(list_vertices["i"]) - 1
for i in range(k):
if is_intersect(list_vertices["x"][i], list_vertices["y"][i],
list_vertices["x"][i + 1], list_vertices["y"][i + 1], x, y, x2, y2):
intersection = intersection + 1
if is_intersect(list_vertices["x"][0], list_vertices["y"][0],
list_vertices["x"][k], list_vertices["y"][k], x, y, x2, y2):
intersection = intersection + 1
if not intersection % 2 == 0:
list_points.append(j)
return list_points
def is_intersect(v1x1, v1y1, v1x2, v1y2, v2x1, v2y1, v2x2, v2y2):
v1x1, v1y1, v1x2, v1y2, v2x1, v2y1, v2x2, v2y2 = float(v1x1), float(v1y1), float(v1x2), float(v1y2), float(v2x1)\
, float(v2y1), float(v2x2), float(v2y2)
# See: http: // en.wikipedia.org / wiki / Linear_equation
a1 = v1y2 - v1y1
b1 = v1x1 - v1x2
c1 = (v1x2 * v1y1) - (v1x1 * v1y2)
d1 = (a1 * v2x1) + (b1 * v2y1) + c1
d2 = (a1 * v2x2) + (b1 * v2y2) + c1
if d1 > 0 and d2 > 0:
return False
if d1 < 0 and d2 < 0:
return False
a2 = v2y2 - v2y1
b2 = v2x1 - v2x2
c2 = (v2x2 * v2y1) - (v2x1 * v2y2)
# Calculate d1 and d2 again, this time using points of vector 1.
d1 = (a2 * v1x1) + (b2 * v1y1) + c2
d2 = (a2 * v1x2) + (b2 * v1y2) + c2
# Again,if both have the same sign ( and neither one is 0),
# no intersection is possible.
if d1 > 0 and d2 > 0:
return False
if d1 < 0 and d2 < 0:
return False
# If we get here, only two possibilities are left.Either the two vectors intersect in exactly
# one point or they are collinear, which means they intersect in any number of points from zero to infintie
if (a1 * b2) - (a2 * b1) == 0.0:
return True
return True
def get_real_path(f_name, f_name2):
path = f_name
if f_name.__contains__('~'):
path = os.path.expanduser('~') + f_name.split('~')[1]
elif f_name.__contains__('..'):
path = path
else:
splt = f_name2.split('/')[len(f_name2.split('/')) - 2]
path = os.path.abspath(os.path.join(f_name2 + "/../" + splt, f_name))
print path
return path
# def same_folder_different_file(f1, f2):
# st = f2
# if not f2.__contains__('~'):
# st = os.path.join(f1.split('/')[0:len(f1) - 2], f2)
# return st
| [
"reemharel22@gmail.com"
] | reemharel22@gmail.com |
9a5ec1a187e75627e6dcb81ca8146aa919e1183d | 69b4f343861f6fb366c8fbbe590376a1bdd0c658 | /Tests.py | 055c802f1a0a921b5024e5e83d059629edfe7772 | [] | no_license | freQuensy23-coder/CaptchServiceAPI | 81f8a705193b07892f65cdc05b84a8ac6961b286 | 85a8b3585a4c6e6b98ae5c11375567b9d4b4dbfa | refs/heads/main | 2023-03-13T14:43:45.044766 | 2021-03-02T19:03:22 | 2021-03-02T19:03:22 | 341,452,481 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | import unittest
from generator import generate_font_image, generate_random_word, do_image_dim
from PIL import Image
class Tester(unittest.TestCase):
def setUp(self) -> None:
pass
def test_get_font(self):
# TODO
for i in range(5555):
generate_font_image()
def test_generate_random_word(self):
for i in range(50):
print(str(generate_random_word()))
def test_do_image_dim(self):
im = Image.open("background.jpg")
do_image_dim(im, force=4096).show() | [
"you@example.com"
] | you@example.com |
a790801e0d32907e7ab4f399e4c1b336d7df7f4f | 9f65bbf608d48543093f91224055c9ed2299e150 | /migrations/versions/b964853843c0_.py | 33788e23944b4be5326f52b677b09f7e81daffb6 | [] | no_license | izowmart/Discover-flask-application | e1815a83174e21fdb8dcdbf9e88fbb4eaf8c650e | ce2df09d845e8a729cd932cd17f873cba2b86756 | refs/heads/master | 2020-04-27T08:08:16.820418 | 2019-03-06T17:25:20 | 2019-03-06T17:25:20 | 174,160,138 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | """empty message
Revision ID: b964853843c0
Revises:
Create Date: 2019-03-04 13:23:05.433648
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b964853843c0'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('password', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('articles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('author_id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=60), nullable=False),
sa.Column('body', sa.String(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('articles')
op.drop_table('users')
# ### end Alembic commands ###
| [
"izowmart8@gmail.com"
] | izowmart8@gmail.com |
08238eddb1543c6910ab14ef02b443f07cda73b1 | 9e1ff492125867c73c6c76dc1da69cd2f161deb9 | /taxret/tax/models.py | 63507e59abd20a1a379907c87eae67735900f3d3 | [] | no_license | Akshobhya1234/Income-tax-management-system-dbms | 11dc44210131be6d6dab52087cf598d69044605f | 6079bff9c18cc849824ff1dfe85d922e709ea70b | refs/heads/master | 2021-09-22T08:26:39.435334 | 2021-09-09T09:22:27 | 2021-09-09T09:22:27 | 201,876,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,910 | py | # Create your models here.
from django.db import models
from datetime import date
from django.utils import timezone
from .choices import *
class user(models.Model):
pan = models.CharField(max_length=200,primary_key = True)
Year_of_filing = models.IntegerField()
Aadhar = models.IntegerField()
Mobile_no = models.IntegerField()
DOB = models.DateField()
#Age = models.IntegerField()
Fname = models.CharField(max_length=200)
Mname = models.CharField(max_length=200)
Lname = models.CharField(max_length=200)
email = models.EmailField(max_length=200)
@property
def Age(self):
return timezone.now().year - self.DOB.year
'''class Meta:
unique_together = (('pan','Year_of_filing'),)'''
def __str__(self):
return self.pan
class tax_on_capital_gain(models.Model):
Gain_category = models.CharField(max_length = 20, choices = GAIN_CAT )
Asset_type = models.CharField(max_length=20,choices = ASSET_TYPE)
Holding_period = models.IntegerField()
Tax_percentage = models.IntegerField(choices = TAX_PER)
pan = models.ForeignKey(user, on_delete = models.CASCADE, primary_key = True)
@property
def toc(self):
ao=Capital_gain.objects.all()
for i in ao:
if(i.pan==self.pan):
aa = i.Asset_amount
if(self.Holding_period>10):
return (self.Tax_percentage+2)/100*aa
else:
return self.Tax_percentage/100*aa
def __str__(self):
return self.Asset_type
class Income_Tax_Slab(models.Model):
Age_Category = models.CharField(max_length=20, choices = AGE_CAT)
Income_Category = models.CharField(max_length = 20, choices = TAP)
pan = models.ForeignKey(user, on_delete = models.CASCADE,primary_key = True)
Tax_percentage = models.IntegerField(choices = TAP1)
#Year_of_filing = models.ForeignKey(user, on_delete = models.CASCADE)
class Meta:
unique_together = (('pan',),)
def __str__(self):
return self.Age_Category
class Salary(models.Model):
Standard_Deduction =models.IntegerField()
Special_allowance = models.IntegerField()
HRA = models.IntegerField( )
Basic_salary = models.IntegerField( )
pan = models.ForeignKey(user, on_delete = models.CASCADE, primary_key = True)
@property
def totinc(self):
return self.Special_allowance+self.Standard_Deduction+self.Basic_salary+self.HRA
@property
def tottax(self):
oi = Other_Income.objects.all()
its = Income_Tax_Slab.objects.all()
ded = Deduction.objects.all()
for i in oi:
if(i.pan== self.pan):
ot = i.oitot
for j in its:
if(j.pan == self.pan):
tp = j.Tax_percentage
for d in ded:
if(d.pan == self.pan):
de=d.totded1
return (self.totinc+ot-de)*tp/100
'''class Meta:
unique_together = (('pan','Basic_salary'),)'''
def __str__(self):
return self.pan
class Capital_gain(models.Model):
Asset_amount = models.IntegerField()
pan = models.ForeignKey(user, on_delete = models.CASCADE,primary_key = True)
Asset_type = models.CharField(max_length = 20 , choices = ASSET_TYPE )
'''class Meta:
unique_together=(('pan','Asset_type'),)'''
def __str__(self):
return self.Asset_type
class Deduction(models.Model):
pan = models.ForeignKey(user, on_delete = models.CASCADE,primary_key = True)
Life_insurance = models.IntegerField()
PPF = models.IntegerField()
NSC = models.IntegerField( )
Tax_saving_fd = models.IntegerField( )
Stamp_duty_reg = models.IntegerField( )
EPF = models.IntegerField( )
ELSS = models.IntegerField( )
@property
def totded1(self):
return self.Life_insurance+self.PPF+self.NSC+self.Tax_saving_fd+self.Stamp_duty_reg+self.ELSS+self.EPF
'''class Meta:
unique_together = (('pan','Life_insurance'),)'''
'''def __str__(self):
return self.pan'''
class Other_Income(models.Model):
pan = models.ForeignKey(user, on_delete = models.CASCADE,primary_key = True)
Savings = models.IntegerField()
Rent = models.IntegerField()
FD = models.IntegerField( )
@property
def oitot(self):
return self.Savings+self.Rent+self.FD
class Meta:
unique_together = (('pan','Savings'),)
'''def __str__(self):
return self.pan'''
'''class taxcalc(models.Model):
pan = models.ForeignKey(user, on_delete = models.CASCADE)
@property
def totinc(self):'''
| [
"noreply@github.com"
] | Akshobhya1234.noreply@github.com |
438840da9cd1e29ccbe13f649deccc2aab5d1664 | a30f6dda1f5268dfd590ca72911b0f30af2b046b | /webpersonal/manage.py | 5d3413089186b56f96791903d744a7a23289134a | [] | no_license | elejandra/pag_web_django | 4b4ae09705bb96f3c32fe6a8866f28e82b32c189 | 0d9ba4ef5673f22b84721a5681248cc353e69c8d | refs/heads/master | 2020-08-17T10:53:19.545104 | 2019-10-18T01:40:01 | 2019-10-18T01:40:01 | 215,655,322 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webpersonal.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"alejandrams846@gmail.com"
] | alejandrams846@gmail.com |
069fa8a45804c401cd23fa1b7d09a769b2e3a3f8 | bfd729146d631b62e57f5ff634344770f2ed9fcd | /standard/testrun.py | 1d938a4f78ce03ed579d580c54ac952001ba401e | [] | no_license | vnerhus/VNet | f2d2212f5479273805049f80283de81631ebb8cd | 22cf145680dcf145f2551fa0c1cb48f24a5f143f | refs/heads/master | 2021-07-05T04:03:26.517095 | 2017-09-28T12:54:29 | 2017-09-28T12:54:29 | 105,148,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | from rnn import lstm
import os
# -- TEXT IMPORT -- #
def text_import(file_path):
input_file = os.path.join(file_path)
with open(input_file, "r", errors='ignore') as f:
text = f.read()
text = text[81:]
return text
text = text_import('./data/simpsons/moes_tavern_lines.txt')
network = lstm(text)
network.setHyperparameters(hyperparameters = {
"num_epochs": 200, # Number of training epochs
"learning_rate": 0.01, # Learning rate
"batch_size": 128, # Size of each batch
"sequence_length": 16, # Length of sequence
"embed_dim": 128, # Embedding dimension size
"lstm_size": 128, # Lstm size
"stack_size": 1, # Number of stacked LSTM-cells
})
network.train()
network.generate(prime_words=("homer_simpson:", "moe_szyslak:"))
| [
"vegard@nerhus.no"
] | vegard@nerhus.no |
4496abf9846c71906f8b0516bc7b92c442b777d8 | 5dfac67e3054ffe8acdc44c4f48c0fd9e0af845e | /fetch_points_api/site/routes.py | 3dec5da854db451eba6466faabf49e9bc7bd9435 | [] | no_license | zachPorras/fetch-api | be7e78fc41f6288c2e73db6cd2c354aa9771ce61 | 19cdbb40e1dfd036faedee28a831bcc02b667378 | refs/heads/main | 2023-08-25T03:55:01.086759 | 2021-10-16T03:29:42 | 2021-10-16T03:29:42 | 417,236,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | from flask import Blueprint, render_template, request, jsonify, redirect
from flask.helpers import url_for
from fetch_points_api.forms import SpendPointsForm, AddTransactionsForm, CheckBalances
from fetch_points_api.models import db, Transactions, transactions_schema
from sqlalchemy.sql import func
from sqlalchemy import asc, update
site = Blueprint('site', __name__, template_folder='site_templates')
@site.route('/')
def home():
return render_template('index.html')
@site.route('/balances', methods=['GET'])
def balances():
balances = db.session.query(Transactions.partner_name, func.sum(Transactions.points))
balances = balances.group_by(Transactions.partner_name).all()
balances_dict = dict(balances)
return balances_dict
@site.route('/add_transactions', methods=['POST', 'PUT', 'GET'])
def add_transactions():
form = AddTransactionsForm()
if request.method == 'POST' and form.validate_on_submit:
points = form.points.data
partner_name = form.partner_name.data
new_transaction = Transactions(partner_name, points)
db.session.add(new_transaction)
db.session.commit()
return redirect(url_for('site.add_transactions', form = form))
return render_template('add_transactions.html', form = form)
@site.route('/spend_points', methods=['POST', 'PUT', 'GET', 'DELETE'])
def spend_points():
form = SpendPointsForm()
if form.validate_on_submit:
points = form.points.data
print(f'points:{points}')
# sort transactions by date, ascending
spent_points = db.session.query(Transactions.partner_name, Transactions.points)
spent_points = spent_points.order_by(asc(Transactions.timestamp)).all()
first_partner = spent_points[0][1]
print(f'first partner points before: {first_partner}')
# if first_partner >= points:
# if point total <= oldest transaction, subtract from oldest transaction & delete if at zero
# updated_points = Transactions.query.order_by(Transactions.timestamp)
# print(updated_points)
# db.session.commit()
# print(spent_points)
# conditionals for whether point total will bring partner total to zero
# if point total is higher than oldest transaction, move on to deduct the rest from the next transaction
# if partner point total reaches zero, move on to next transaction
# return receipt of point transactions in each response
return render_template('spend_points.html', form = form) | [
"porraszach@gmail.com"
] | porraszach@gmail.com |
5d1fcd1864f93ef59776ad5386b3de41af5b51ff | f310507ed02a3bf9b182ddf51e6f1fc2ea4addf7 | /unifiedpost/api/routes.py | fcaca96703bd84740bf521e6699170e8b2859fc3 | [] | no_license | Aristekrat/unified_api | 970e813405a2ad1c993f3092f3265437a5cd3acb | 58caccfd022c57be1a3dfe9aeab132913214cf98 | refs/heads/master | 2023-02-18T08:58:20.350482 | 2020-11-28T14:39:35 | 2020-11-28T14:39:35 | 314,629,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from aiohttp.web_app import Application
from .views import view_get_healthcheck
def setup_common_api_routes(app: Application):
app.router.add_get('/health', view_get_healthcheck)
| [
"porovozls@gmail.com"
] | porovozls@gmail.com |
823502ffd7540da3c869ebbc82a6e60a9a8ed019 | e71cd95491da86294b0a152cf474991292af71ba | /51-60/60_Prime_Pair_Sets/isPrime.py | 30c7cb2f67596446a1364cc1e0dd7c0891aa2832 | [] | no_license | omochibuster/project_euler | be2ba59a9b017716a7c1220129b297c2753ef0b3 | 1db5b665f583ef336406abf921538ee24fcfe182 | refs/heads/master | 2020-06-09T05:31:34.757743 | 2015-03-16T05:29:13 | 2015-03-16T05:29:13 | 31,703,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from math import sqrt
def isPrime(number):
if number == 0:return False
if number == 1:return False
if number == 2:return True
if number % 2 == 0:return False
end = int(sqrt(number)) + 1
for i in range(3,end,2):
if number % i == 0:return False
return True | [
"omochibuster@yahoo.co.jp"
] | omochibuster@yahoo.co.jp |
bf30ac927d4b6e2dbf1c220de3566e287eca76dd | f98edd76cc8432e84cd15bbb533a79270fd70f6e | /guessing_game/guessing_game.py | 45e88861c83943ea4c9f0a0b36f680c85e827517 | [] | no_license | crkubiak/pygames | 57c622f63a64b93a1cc1141a26092de52610bb35 | 53321aac15e03a609aaf74c6c00e4c9818c0cd14 | refs/heads/master | 2020-05-20T23:43:12.375591 | 2019-05-09T13:56:22 | 2019-05-09T13:56:22 | 185,809,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | from random import randint
def guessing_game():
number_to_guess = randint(1,1000)
number_of_guesses = 0
solved = False
previous_guesses = []
name = input('What is your name: ') or 'PlayerOne'
print(f"Well {name}, let's play a game. I'm thinking of a number between 1 and 1000. I'll give you 9 guesses to get it right.")
print(number_to_guess)
while solved == False and number_of_guesses < 10:
if len(previous_guesses) > 0:
print(f'Previous guesses: {previous_guesses}')
guess = int(input('Guess a number: '))
if guess == number_to_guess and number_of_guesses == 0:
print(f'Holy cow!!! You guessed {guess} and got it on your first guess!!!')
solved = True
elif guess == number_to_guess:
number_of_guesses += 1
print(f'Bingo! You guessed {guess} and that was the right number. You took {number_of_guesses} guesses!')
solved = True
elif guess > number_to_guess:
number_of_guesses += 1
previous_guesses.append(guess)
print(f'Too high! Guesses: {number_of_guesses}')
else:
number_of_guesses += 1
previous_guesses.append(guess)
print(f'Too low! Guesses: {number_of_guesses}')
if number_of_guesses == 10 and solved == False:
print(f'Game over {name}!!! That was your last guess.')
guessing_game() | [
"crkubiak@gmail.com"
] | crkubiak@gmail.com |
6c2e15fe001ee7f4ada3747278a504be5e557b84 | f4b79529109fbb4055f334d0d9c7c96cb0710447 | /colour/examples/colorimetry/examples_photometry.py | 1a933c8490479fed65be2c0deaf6b89803b4c56e | [
"BSD-3-Clause"
] | permissive | trevorandersen/colour | 167381b3d03e506a270a8d2a519a164808995437 | 02b595b26313c4b4f55adc41d599f90c4c9edbcd | refs/heads/develop | 2021-07-15T04:48:19.585586 | 2021-01-23T23:51:44 | 2021-01-23T23:51:44 | 230,421,054 | 0 | 0 | BSD-3-Clause | 2019-12-28T12:54:20 | 2019-12-27T10:10:30 | null | UTF-8 | Python | false | false | 858 | py | # -*- coding: utf-8 -*-
"""
Showcases *Photometry* computations.
"""
import colour
from colour.utilities import message_box
message_box('"Photometry" Computations')
sd_light_source = colour.SDS_LIGHT_SOURCES['Neodimium Incandescent']
message_box(('Computing "Luminous Flux" for given spectral '
'distribution:\n'
'\n\t{0}'.format(sd_light_source.name)))
print(colour.luminous_flux(sd_light_source))
print('\n')
message_box(('Computing "Luminous Efficiency" for given spectral '
'distribution:\n'
'\n\t{0}'.format(sd_light_source.name)))
print(colour.luminous_efficiency(sd_light_source))
print('\n')
message_box(('Computing "Luminous Efficacy" for given spectral '
'distribution:\n'
'\n\t{0}'.format(sd_light_source.name)))
print(colour.luminous_efficacy(sd_light_source))
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com |
6e2a7b3cc1ed7d40b6b17934376a95dfec013ebe | 00b830e56b4a51b258774deb58d2fd5a7ca73675 | /download_time.py | 8c895f3165213fe485fbacc6560f8bb56973e703 | [] | no_license | NickCorneau/PythonAlgorithms | 11f79e4a5dd3cc739d7b452fdaeb4bdd4022c21d | 743616bd0c71bbfcc3ba25790a0c307c7ab5e161 | refs/heads/master | 2021-01-01T17:56:55.597115 | 2015-08-25T12:54:13 | 2015-08-25T12:54:13 | 40,202,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py |
def convert_seconds(s):
h = int(s // 3600)
m = int((s % 3600) // 60)
sec = s % 60
if (sec % 1 == 0):
sec = int(sec)
ho = ' hours'
mi = ' minutes'
se = ' seconds'
comma = ', '
if h == 1: ho = ho[:len(ho)-1]
if m == 1: mi = mi[:len(mi)-1]
if sec == 1: se = se[:len(se)-1]
answer = [h, ho, comma, m, mi, comma, sec, se]
answer = [str(x) for x in answer]
return ''.join(answer)
def download_time(data1, type1, data2, type2):
if type1 == 'kb': type1 = 2 ** 10
if type1 == 'kB': type1 = 2 ** 10 * 8
if type1 == 'Mb': type1 = 2 ** 20
if type1 == 'MB': type1 = 2 ** 20 * 8
if type1 == 'Gb': type1 = 2 ** 30
if type1 == 'GB': type1 = 2 ** 30 * 8
if type1 == 'Tb': type1 = 2 ** 40
if type1 == 'TB': type1 = 2 ** 40 * 8
if type2 == 'kb': type2 = 2 ** 10
if type2 == 'kB': type2 = 2 ** 10 * 8
if type2 == 'Mb': type2 = 2 ** 20
if type2 == 'MB': type2 = 2 ** 20 * 8
if type2 == 'Gb': type2 = 2 ** 30
if type2 == 'GB': type2 = 2 ** 30 * 8
if type2 == 'Tb': type2 = 2 ** 40
if type2 == 'TB': type2 = 2 ** 40 * 8
bit_size1 = float(data1) * float(type1)
bit_size2 = float(data2) * float(type2)
download_speed = (bit_size1/ bit_size2)
return(convert_seconds(download_speed))
| [
"nicholascorneau@gmail.com"
] | nicholascorneau@gmail.com |
1ffa3df0ac91de114c1a556007ff7b48fdae3a68 | 36d067816e0c1c801898f3fcfe122edc746dfdb6 | /neutron/tests/unit/bigswitch/test_agent_scheduler.py | fe348eb1e040aee112633d4e19ba7bad5477ff1a | [
"Apache-2.0"
] | permissive | osrg/quantum | 9fd7b126167a15200d63277c36321d9b758d3680 | b78eea6146145793a7c61705a1602cf5e9ac3d3a | refs/heads/master | 2023-09-01T03:04:53.297582 | 2014-01-13T21:57:04 | 2014-01-20T21:49:55 | 2,808,163 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.tests.unit.bigswitch import test_base
from neutron.tests.unit.openvswitch import test_agent_scheduler
class BigSwitchDhcpAgentNotifierTestCase(
test_agent_scheduler.OvsDhcpAgentNotifierTestCase,
test_base.BigSwitchTestBase):
plugin_str = ('%s.NeutronRestProxyV2' %
test_base.RESTPROXY_PKG_PATH)
def setUp(self):
self.setup_config_files()
self.setup_patches()
super(BigSwitchDhcpAgentNotifierTestCase, self).setUp()
| [
"kevin.benton@bigswitch.com"
] | kevin.benton@bigswitch.com |
25e09abc936c8a17a9c8354f57ee811964894c10 | 4217ffbf12a59e5bcb5cec0d068507c1dff4e88f | /feature/feature.py | 751adf3e71b3f85230c2b66a32949e0ca118801b | [] | no_license | hoangtheanhhp/vnsplitter | fc0547d86f52bc656dc5fa48d722ea6906f84d41 | 4ede8bd9c63222a03efde0fceb0ca878d86c7925 | refs/heads/master | 2020-05-02T09:03:36.833280 | 2019-03-26T19:59:22 | 2019-03-26T19:59:22 | 177,858,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | # -*- coding: utf-8 -*-
import re
from map import CharMap
class Feature:
NONE_SPLITER_OFFSET = 100
NEXT_LOCAL_STEP = 15
MAX_NAME_LENGTH = 15
HALF_OFFSET_VECTOR = 7
CHAR_MAP = CharMap()
SPLITTER_CHAR = 200
def gen_feature_vector(self, str=None, pos_start=0, pos_end=0):
features = []
for i in xrange(pos_start, pos_end):
features.append(Feature.char2int(str, i))
return features
def gen_feature_matrix(self, str):
features_list = []
label_list = []
idx = 0
str_len = len(str)
while idx < str_len:
current_char = str[idx]
try:
next_char = str[idx+1]
except:
next_char = '\n'
pos_start = idx - Feature.HALF_OFFSET_VECTOR
pos_end = idx + Feature.HALF_OFFSET_VECTOR + 1
if Feature.is_splitter_candidate(current_char):
if Feature.is_new_line_char(next_char):
features_list.append(self.gen_feature_vector(str, pos_start, pos_end))
label_list.append(1)
else:
features_list.append(self.gen_feature_vector(str, pos_start, pos_end))
label_list.append(0)
idx += 1
return features_list, label_list
@staticmethod
def char2int(str, idx = 0):
if idx <=0 or idx >= len(str):
return 0
elif Feature.is_new_line_char(str[idx]):
return Feature.CHAR_MAP.char2int[u' ']
else:
try:
return Feature.CHAR_MAP.char2int[str[idx]]
except:
return Feature.CHAR_MAP.except_value
@staticmethod
def is_space_char(char):
return char == " "
@staticmethod
def is_splitter_candidate(char):
return char == u"." or char == u'!' or char == u'?'
@staticmethod
def is_new_line_char(char):
return char == "\n" or char == "\r"
@staticmethod
def is_3_dots(str, idx):
try:
return str[idx:idx+3] == "..."
except:
return False | [
"anhht@haposoft.com"
] | anhht@haposoft.com |
395fa81b18711e219bc6cd2cb0dbbacfb2042d17 | 6230dd7501bb504643cb3b8d8d18889f4bc9e292 | /web_frameworks/web_frameworks/settings.py | bbbc17a5d7ddd4c0c58cd449416a3ff2c7e94384 | [
"MIT"
] | permissive | Minkov/python-web-frameworks-2020-11 | f83a8560cbbcd06549bcacaca83de3af4824adc6 | 5857bb626792a9efe1f2d06677fa3779f5e2cc1d | refs/heads/main | 2023-01-21T07:02:46.141981 | 2020-12-01T18:30:20 | 2020-12-01T18:30:20 | 310,352,954 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,445 | py | """
Django settings for web_frameworks project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from os.path import join
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '73l^kfu(th-t&nk219%xvlg&29*5khenic!ji$(s-3r5-tc!ww'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'templates_advanced',
'resources',
'cbv',
'books',
'books_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web_frameworks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web_frameworks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
STATIC_ROOT = '/tmp/static'
MEDIA_URL = '/media/'
MEDIA_ROOT = join(BASE_DIR, 'media')
| [
"DonchoMinkov@gmail.com"
] | DonchoMinkov@gmail.com |
1a11c1156c151443fcc4da87cfe7a69120c4133e | aef5bb28678eb9ee6dd4038e8b874a2ca3d2b0e6 | /ml313/visualization.py | 563462952c84370106e8b1da1193455de00f517c | [
"BSD-3-Clause"
] | permissive | hubertgabrys/ml313 | aa19c533a1d0e27cf3b5313a7f9bfb150c46ec37 | efb92cea1f030cd3e3628713c406aea225edb65c | refs/heads/master | 2023-04-01T12:54:55.726914 | 2021-03-24T15:49:14 | 2021-03-24T15:49:14 | 166,210,094 | 2 | 1 | BSD-3-Clause | 2021-03-24T15:49:15 | 2019-01-17T10:51:33 | Python | UTF-8 | Python | false | false | 1,388 | py | import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import RobustScaler
def plot_roc_curve(df, column, y):
pipe = Pipeline(steps=[('scaler', RobustScaler()), ('clf', LogisticRegression())])
y_score = pipe.fit(df.loc[:, column].values.reshape(-1, 1), y).decision_function(
df.loc[:, column].values.reshape(-1, 1))
fpr, tpr, _ = metrics.roc_curve(y, y_score)
roc_auc = metrics.auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='AUC({}) = {:.2f}'.format(column, roc_auc))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
plt.tight_layout()
plt.show()
return fpr, tpr, roc_auc
def plot_auc_vs_wavelet(df_auc, feat2flip, rownames, colnames):
arr_foo = df_auc.loc[feat2flip, 'AUC'].values.reshape(9, 154).T
fig, ax = plt.subplots(figsize=(30, 30))
plt.imshow(arr_foo)
plt.yticks(range(154), rownames)
plt.xticks(range(9), colnames, rotation='vertical')
ax.xaxis.tick_top()
ax.set_xlabel('Wavelet transformation')
ax.set_ylabel('Feature')
plt.colorbar()
plt.show()
| [
"hubert.gabrys@gmail.com"
] | hubert.gabrys@gmail.com |
9933cbc795b40b62702b6da4421655a58bf4437b | 1525f172ffc1ff5ab3fe9f5ed298d6fe02dca6c0 | /proj/dsc/mr_base.py | 04577340ca3516c1877c0084b785a7fd3d7e7bb0 | [] | no_license | PorkBBQ/jKM | f2f67504090d73a0ed3707b19ac61617e5ecb831 | ed7e9441ee73f058b3b5ee4f11084bb54f7bb92b | refs/heads/master | 2020-12-30T09:59:18.639839 | 2014-10-02T09:06:02 | 2014-10-02T09:06:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | #!/home/biadmin/anaconda/bin/python
import sys
intermediate =[]
def main():
env='local' if len(sys.argv)==1 else 'mr'
localFile=r'D:\Josh\data\DSC01\heckle\web.log.2'
if env=='local':
mapper('local', localFile)
reducer('local')
elif env=='mr':
if sys.argv[1]=='-m':
mapper('mr')
elif sys.argv[1]=='-r':
reducer('mr')
def mapOut(env, line):
if env=='local':
intermediate.append(line)
elif env=='mr':
print(line)
def mapper(env, localFile=''):
if env=='local':
datafile = open(localFile)
it=datafile.readlines()
elif env=='mr':
it=sys.stdin
#------------------------------------------
for line in it:
line = line.strip()
mapOut(env, line)
#------------------------------------------
def reducer(env='local'):
if env=='local':
it=intermediate
elif env=='mr':
it=sys.stdin
#------------------------------------------
for line in it:
line = line.strip()
print(line)
#------------------------------------------
if __name__ == '__main__':
main()
'''
hadoop fs -rmr /tmp/dsc01_02a
hadoop jar $HADOOP_HOME/hadoop-streaming.jar \
-D mapred.job.name='dsc01_01a' \
-input /data/dsc01/heckle \
-input /data/dsc01/jeckle \
-output /tmp/dsc01_02a \
-file /home/biadmin/josh/script/dsc01/dsc01_02a.py \
-mapper "/home/biadmin/josh/script/dsc01/dsc01_02a.py -m" \
-reducer "/home/biadmin/josh/script/dsc01/dsc01_02a.py -r"
'''
| [
"gogoeowa@hotmail.com"
] | gogoeowa@hotmail.com |
49bed145881d38c6bea62960352ebda9dfbd4757 | 653661fed9e2a805506e33ac087a6cf33e0d0391 | /woo_commerce_ept/__manifest__.py | 5fa1a6ce70b18487b39835b3a2c29eefb751d4f9 | [] | no_license | chi-le/daan | 72e088343d42b8c373273df023848872da1eb759 | fde42306b832dd4b19d47b6146424b29288807b7 | refs/heads/master | 2023-05-08T00:24:50.959219 | 2021-04-07T04:28:54 | 2021-04-07T04:28:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,741 | py | # -*- coding: utf-8 -*-
{
'name': 'Odoo WooCommerce Connector',
'version': '13.0.36',
'license': 'OPL-1',
'category': 'Sale',
'summary': 'Odoo Woocommerce Connector helps you automate your vital business processes at Odoo by enabling '
'bi-directional data exchange between WooCommerce & Odoo.',
'author': 'Emipro Technologies Pvt. Ltd.',
'website': 'https://www.emiprotechnologies.com/',
'maintainer': 'Emipro Technologies Pvt. Ltd.',
'depends': ['auto_invoice_workflow_ept', 'common_connector_library'],
'data': ['security/group.xml',
'security/ir.model.access.csv',
'data/product_data.xml',
'data/ir_sequence.xml',
'data/ir_cron_data.xml',
'data/import_order_status_ept.xml',
'wizard/manual_queue_process_ept.xml',
'wizard/cron_configuration_ept.xml',
'views/instance_main_menu_view.xml',
'views/product_image_ept.xml',
'views/product_template_view.xml',
'wizard/woo_process_import_export_view.xml',
'views/web_templates.xml',
'views/sale_workflow_config.xml',
'wizard/res_config_view.xml',
'views/product_data_queue_ept_view.xml',
'views/product_data_queue_line_ept_view.xml',
'views/product_variant_view.xml',
'views/tags_ept.xml',
'views/product_attribute_view.xml',
'views/product_attribute_term_view.xml',
'views/product_category_view.xml',
'views/customer_data_queue_ept.xml',
'views/customer_data_queue_line_ept.xml',
'views/order_data_queue_ept.xml',
'views/order_data_queue_line_ept.xml',
'views/webhook_ept.xml',
'views/common_log_book_ept.xml',
'views/sale_order.xml',
'views/stock_picking_view.xml',
'views/res_partner.xml',
'views/payment_gateway.xml',
'views/account_move_view.xml',
'views/instance_view.xml',
'wizard/cancel_refund_order_wizard.xml',
'views/coupons_ept.xml',
'views/coupon_data_queue_ept.xml',
'views/coupon_data_queue_line_ept.xml',
'wizard/prepare_product_for_export.xml',
'data/change_type.xml'
],
'installable': True,
'auto_install': False,
'application': True,
'active': False,
'images': ['static/description/woocommerce-odoo-cover.gif'],
'live_test_url': 'https://www.emiprotechnologies.com/free-trial?app=woo-commerce-ept&version=13&edition=enterprise',
'price': 379.00,
'currency': 'EUR',
}
| [
"falinwa@falinwa.com"
] | falinwa@falinwa.com |
12dff4722892f3042a30723dc845bff0321cbf83 | c6f47e7e96c5a9f7f0f24026dffe60fbf5bb034d | /notebooks/pendigits/pendigits_dmkde_adp.py | 5a1d32c2b06c4dcb647eb5bb198c15006267ab63 | [] | no_license | Joaggi/anomaly-detection-density-matrix-kernel-density-estimation | 762b2a944cef2ea06172834e6f445f02a52a7f89 | 34c3eb16fde9f2aad4daaaf233947c362b0f5416 | refs/heads/master | 2023-04-08T06:53:16.742624 | 2022-11-09T21:39:50 | 2022-11-09T21:39:50 | 425,664,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | current_path = ""
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
if IN_COLAB:
import os
import sys
sys.path.append('submodules/qmc/')
#sys.path.append('../../../../submodules/qmc/')
print(sys.path)
else:
import sys
sys.path.append('submodules/qmc/')
sys.path.append('data/')
#sys.path.append('../../../../submodules/qmc/')
print(sys.path)
# %cd ../../
print(os.getcwd())
sys.path.append('scripts/')
import qmc.tf.layers as layers
import qmc.tf.models as models
import tensorflow as tf
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from experiments import experiments
from mlflow_create_experiment import mlflow_create_experiment
setting = {
"z_name_of_experiment": 'dmkde_adp-pendigits',
"z_run_name": "dmkde_adp",
"z_dataset": "pendigits",
"z_rff_components": 1000,
"z_num_samples": 10000,
"z_batch_size": 16,
"z_select_best_experiment": True,
"z_threshold": 0.0
}
prod_settings = {"z_gamma": [2**i for i in range(-9,6)]}
params_int = ["z_rff_components", "z_batch_size", "z_num_samples"]
params_float = ["z_gamma", "z_threshold"]
mlflow = mlflow_create_experiment(setting["z_name_of_experiment"])
experiments(setting, prod_settings, params_int, params_float, mlflow)
| [
"oabustosb@unal.edu.co"
] | oabustosb@unal.edu.co |
d3e8f077b7b108a38640896cd151fdd51a849809 | cdb3c89d4b4eeb0d632558e9dd8d6bb6870106f1 | /rules.py | ff88cd51aee46df0cbe2e6aa962ebaf5d6c96df2 | [] | no_license | eevelweezel/yahtzee | c55b00e6934895e040978040e4c0d8c9d4d2e83e | e01585a29034686123d7ee7678002c30a09df479 | refs/heads/master | 2016-09-05T22:39:14.800250 | 2014-09-12T17:01:43 | 2014-09-12T17:01:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | #!/usr/bin/python
import sys
class Rules:
def __init__(self, list, text, s=0):
self.rule = text
self.roll = list
self.score = 0
self.score += self.rolldice()+int(s)
def rolldice(self):
if self.rule == '1' or self.rule == '2' or self.rule == '3' or self.rule == '4' or self.rule == '5' or self.rule == '6':
self.n = int(self.rule)
self.num()
elif self.rule == '3x':
self.n = int(3)
self.nx()
elif self.rule == '4x':
self.n = int(4)
self.nx()
elif self.rule == 'full_house':
self.full_house()
elif self.rule == 'small_straight':
self.n = int(4)
self.straight()
elif self.rule == 'large_straight':
self.n = int(5)
self.straight()
elif self.rule == 'chance':
self.chance()
else:
self.yahtzee()
return self.score
def num(self):
self.score += self.n*self.roll.count(self.n)
return self.score
def nx(self):
for r in self.roll:
e = self.roll.count(r)
if e >= self.n:
self.score += r*self.n
break
else:
self.score += 0
return self.score
def full_house(self):
check = 0
for r in self.roll:
if self.roll.count(r) == 3 or self.roll.count(r) == 2:
check += 0
else:
check += 1
if check == 0:
for r in self.roll:
self.score += r
else:
self.score += 0
return self.score
def straight(self):
check = 0
for i in self.roll:
if self.roll.count(i+1) == 1:
check += 1
else:
check += 0
if check >= (self.n - 1):
if self.n == 4:
self.score += 15
else:
self.score += 20
else:
self.score += 0
return self.score
def chance(self):
for r in self.roll:
self.score += r
return self.score
def yahtzee(self):
for r in self.roll:
if self.roll.count(r) == 5:
self.score = 50
break
else:
self.score = 0
break
return self.score
| [
"eevel.weezel@gmail.com"
] | eevel.weezel@gmail.com |
fb507339b8ae3105e5af2a6c601b4f0a256df611 | 590f5f37026d67f248dbd0130149928bba9e9d9e | /testing3.py | 624c4ce08d022287ebd5c2ca2dc0c8775fb34f5e | [] | no_license | sant527/krishnacookbackend | ad2682ec17f2e0cff61f9f59a12573e186c96754 | 41b829047a8878bb87bd43dde1fff43733a7d537 | refs/heads/master | 2021-08-23T02:10:01.484585 | 2017-12-02T11:33:48 | 2017-12-02T11:33:48 | 112,837,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | %load_ext autoreload
%autoreload 2
%autoreload
Reload all modules (except those excluded by %aimport) automatically now.
%autoreload 0
Disable automatic reloading.
%autoreload 1
Reload all modules imported with %aimport every time before executing the Python code typed.
%autoreload 2
Reload all modules (except those excluded by %aimport) every time before executing the Python code typed.
from ingredients.api.pagination import TypeofIngredientSerializer
typeofingredientserializer = TypeofIngredientSerializer()
print(repr(typeofingredientserializer))
from ingredients.api.pagination import TypeofIngredientSerializerother1
typeofingredientserializer = TypeofIngredientSerializerother1()
print(repr(typeofingredientserializer)) | [
"simharupa.rns@gmail.com"
] | simharupa.rns@gmail.com |
b0d008d96078ed20d61bcb6b6903c0f6450614c7 | cd554f3a215d0d30a3c6d68e1f3705357862696c | /title/urls.py | aa0dfd92b861683bcea93dbff8effb326cb0fa8c | [] | no_license | m1j0/musicpi | d30bbce3651782dfca1db9a25777b439c04c0468 | 9bf5b701a0d4ce874059ead2ed63d2c2b72a1658 | refs/heads/master | 2020-06-03T13:24:50.885808 | 2014-03-21T10:50:50 | 2014-03-21T10:50:50 | 17,976,432 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from django.conf.urls import patterns, url
from django.conf import settings
from django.conf.urls.static import static
from title import views
urlpatterns = patterns(
'',
url(r'^$', views.TitleListView.as_view()),
url(r'add/$', views.CreateView.as_view())
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"Michael.Jope@googlemail.com"
] | Michael.Jope@googlemail.com |
f2af2af24857adaf4bb7b668cfd188325f254b77 | 0d8d5a1b720b5b6dab9e17ef695314159cd00c59 | /tests/test_SOMClustering.py | a85a36030d9b647f9d1e1eb7d4604fdce1384584 | [
"BSD-3-Clause"
] | permissive | mingx009/susi | 6c8e83d92c248e3f53901df4251c7b3885c9c001 | 77066154f9c1d8f44ee21e19b311fa141318f31f | refs/heads/master | 2022-11-30T00:25:09.939179 | 2020-07-28T18:48:50 | 2020-07-28T18:48:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,130 | py | """Test for susi.SOMClustering.
Usage:
python -m pytest tests/test_SOMClustering.py
"""
import pytest
import os
import sys
import numpy as np
from sklearn.datasets import make_biclusters
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import susi
X, _, _ = make_biclusters((100, 10), 3)
@pytest.mark.parametrize("n_rows,n_columns", [
(10, 10),
(12, 15),
])
def test_som_clustering_init(n_rows, n_columns):
som_clustering = susi.SOMClustering(
n_rows=n_rows, n_columns=n_columns)
assert som_clustering.n_rows == n_rows
assert som_clustering.n_columns == n_columns
@pytest.mark.parametrize(
"learning_rate_start,learning_rate_end,max_it,curr_it,mode,expected", [
(0.9, 0.1, 800, 34, "min", 0.8197609052582371),
(0.9, 0.1, 800, 34, "exp", 0.7277042846893071),
])
def test_calc_learning_rate(learning_rate_start, learning_rate_end, max_it,
curr_it, mode, expected):
som_clustering = susi.SOMClustering(
learning_rate_start=learning_rate_start,
learning_rate_end=learning_rate_end)
som_clustering.max_iterations_ = max_it
assert som_clustering.calc_learning_rate(curr_it, mode) == expected
@pytest.mark.parametrize(
"datapoint,som_array,distance_metric,expected", [
(np.array([0.3, 2.0, 1.0]),
np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
"euclidean",
np.array([[1.4525839, 0.14142136], [2.21585198, 4.64542786]])),
(np.array([0.3, 2.0, 1.0]),
np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
"manhattan",
np.array([[2.9, 2.9], [6.8, 6.8]])),
(np.array([0.3, 2.0, 1.0]),
np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
"mahalanobis",
np.array([[1.41421356, 1.41421356], [1.41421356, 1.41421356]])),
(np.array([0.3, 2.0, 1.0]),
np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
"tanimoto",
np.array([[0.5, 0.5], [0.8, 0.8]])),
])
def test_get_node_distance_matrix(datapoint, som_array, distance_metric,
expected):
som_clustering = susi.SOMClustering()
som_clustering.distance_metric = distance_metric
som_clustering.X_ = np.array([datapoint, datapoint])
som_clustering.n_rows = som_array.shape[0]
som_clustering.n_columns = som_array.shape[1]
som_clustering.init_unsuper_som()
assert np.allclose(som_clustering.get_node_distance_matrix(
datapoint, som_array), expected, rtol=1e-2)
@pytest.mark.parametrize(
"radius_max,radius_min,max_it,curr_it,mode,expected", [
(0.9, 0.1, 800, 34, "min", 0.8197609052582371),
(0.9, 0.1, 800, 34, "exp", 0.7277042846893071),
])
def test_calc_neighborhood_func(radius_max, radius_min, max_it, curr_it, mode,
expected):
som_clustering = susi.SOMClustering()
som_clustering.radius_max_ = radius_max
som_clustering.radius_min_ = radius_min
som_clustering.max_iterations_ = max_it
assert som_clustering.calc_neighborhood_func(curr_it, mode) == expected
@pytest.mark.parametrize("a_1,a_2,max_it,curr_it,mode,expected", [
(0.9, 0.1, 800, 34, "min", 0.8197609052582371),
(0.9, 0.1, 800, 34, "exp", 0.7277042846893071),
(0.9, 0.1, 800, 34, "expsquare", 0.8919084683204536),
(0.9, 0.1, 800, 34, "linear", 0.866),
(0.9, 0.1, 800, 34, "inverse", 0.026470588235294117),
(0.9, 0.1, 800, 34, "root", 0.9955321885817805),
(0.9, 0.1, 800, 34, "testerror", 0.7277042846893071),
])
def test_decreasing_rate(a_1, a_2, max_it, curr_it, mode, expected):
if mode == "testerror":
with pytest.raises(Exception):
assert susi.decreasing_rate(
a_1, a_2, max_it, curr_it, mode) == expected
else:
assert susi.decreasing_rate(
a_1, a_2, max_it, curr_it, mode) == expected
@pytest.mark.parametrize("X,init_mode", [
(np.array([[0., 1.1, 2.1], [0.3, 2.1, 1.1]]), "random"),
(np.array([[0., 1.1, 2.1], [0.3, 2.1, 1.1]]), "random_data"),
(np.array([[0., 1.1, 2.1], [0.3, 2.1, 1.1]]), "pca"),
(np.array([[0., 1.1, 2.1], [0.3, 2.1, 1.1]]), "rrandom"),
])
def test_init_unsuper_som(X, init_mode):
som_clustering = susi.SOMClustering(init_mode_unsupervised=init_mode)
som_clustering.X_ = X
if init_mode in ["random", "random_data", "pca"]:
som_clustering.init_unsuper_som()
# test type
assert isinstance(som_clustering.unsuper_som_, np.ndarray)
# test shape
n_rows = som_clustering.n_rows
n_columns = som_clustering.n_columns
assert som_clustering.unsuper_som_.shape == (n_rows, n_columns, X.shape[1])
else:
with pytest.raises(Exception):
som_clustering.init_unsuper_som()
@pytest.mark.parametrize("som_array,datapoint,expected", [
(np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
np.array([0.3, 2.0, 1.0]), (0, 1)),
])
def test_get_bmu(som_array, datapoint, expected):
som_clustering = susi.SOMClustering()
assert np.array_equal(som_clustering.get_bmu(datapoint, som_array),
expected)
@pytest.mark.parametrize(
"X,n_rows,n_columns,train_mode_unsupervised,random_state,expected", [
(np.array([[0., 0.1, 0.2], [2.3, 2.1, 2.1]]), 2, 2, "online", 42,
np.array([[[1.72651971, 1.60132149, 1.62625542],
[1.2091674, 1.15144991, 1.19887742]],
[[1.2091674, 1.15144991, 1.19887742],
[0.66132515, 0.67506535, 0.74631208]]])),
(np.array([[0., 0.1, 0.2], [2.3, 2.1, 2.1]]), 2, 2, "batch", 42,
np.array([[[1.68143473, 1.56211716, 1.5890113 ],
[1.15 , 1.1 , 1.15 ]],
[[1.15 , 1.1 , 1.15 ],
[0.61856527, 0.63788284, 0.7109887 ]]]))
])
def test_fit(X, n_rows, n_columns, train_mode_unsupervised, random_state,
expected):
som = susi.SOMClustering(
n_rows=n_rows,
n_columns=n_columns,
train_mode_unsupervised=train_mode_unsupervised,
random_state=random_state)
som.fit(X)
assert isinstance(som.unsuper_som_, np.ndarray)
assert som.unsuper_som_.shape == (n_rows, n_columns, X.shape[1])
assert np.allclose(som.unsuper_som_, expected, atol=1e-20)
with pytest.raises(Exception):
som = susi.SOMClustering(train_mode_unsupervised="alsdkf")
som.fit(X)
@pytest.mark.parametrize(
("n_rows,n_columns,random_state,neighborhood_func,bmu_pos,X,"
"mode,expected"), [
(2, 2, 42, 0.9, (0, 0),
np.array([[0., 0.1, 0.2, 0.3], [2.3, 2.1, 2.1, 2.5]]),
"pseudo-gaussian",
np.array([[[1.], [0.53940751]], [[0.53940751], [0.29096046]]])),
(2, 2, 42, 0.9, (0, 0),
np.array([[0., 0.1, 0.2, 0.3], [2.3, 2.1, 2.1, 2.5]]),
"mexican-hat",
np.array([[[1.], [-0.12652769]], [[-0.12652769], [-0.42746043]]])),
])
def test_get_nbh_distance_weight_matrix(n_rows, n_columns, random_state,
neighborhood_func, bmu_pos, X,
mode, expected):
som_clustering = susi.SOMClustering(
n_rows=n_rows, n_columns=n_columns,
nbh_dist_weight_mode=mode, random_state=random_state)
som_clustering.X_ = X
som_clustering.init_unsuper_som()
print(som_clustering.get_nbh_distance_weight_matrix(
neighborhood_func, bmu_pos)
)
print(expected)
assert np.allclose(som_clustering.get_nbh_distance_weight_matrix(
neighborhood_func, bmu_pos), expected, atol=1e-8)
@pytest.mark.parametrize(
("n_rows,n_columns,random_state,n_iter_unsupervised, X,learningrate,"
"neighborhood_func,bmu_pos,dp,expected"), [
(2, 2, 42, 2, np.array([[0., 0.1, 0.2], [2.3, 2.1, 2.1]]), 0.7, 0.4,
(1, 1), 1,
np.array([[[1.49058628, 1.61686991, 1.52492551],
[1.26125694, 0.91311475, 0.91310002]],
[[0.93121244, 1.34669682, 1.18486546],
[1.9329369 , 1.62053297, 1.83942631]]])),
])
def test_modify_weight_matrix_online(n_rows, n_columns, random_state,
n_iter_unsupervised, X, learningrate,
neighborhood_func, bmu_pos, dp, expected):
som_clustering = susi.SOMClustering(
n_rows=n_rows, n_columns=n_columns,
n_iter_unsupervised=n_iter_unsupervised, random_state=random_state)
som_clustering.fit(X)
assert np.allclose(susi.modify_weight_matrix_online(
som_array=som_clustering.unsuper_som_,
learningrate=learningrate,
dist_weight_matrix=som_clustering.get_nbh_distance_weight_matrix(
neighborhood_func, bmu_pos),
true_vector=som_clustering.X_[dp]), expected, atol=1e-8)
@pytest.mark.parametrize(
("X,nbh_func,bmus,expected"), [
(np.array([[0., 0.1, 0.2], [2.3, 2.1, 2.1]]), 0.4,
np.array([[1, 1], [1, 0]]),
np.array([[[2.20319823, 2.01582454, 2.02003332],
[0.09680177, 0.18417546, 0.27996668]],
[[2.20319823, 2.01582454, 2.02003332],
[0.09680177, 0.18417546, 0.27996668]]])),
])
def test_modify_weight_matrix_batch(X, nbh_func, bmus, expected):
som = susi.SOMClustering(
n_rows=2,
n_columns=2,
n_iter_unsupervised=5,
random_state=42)
som.fit(X)
# calculate distance weight matrix for all datapoints
dist_weight_block = np.zeros(
(len(X), som.n_rows, som.n_columns))
for i, bmu_pos in enumerate(bmus):
dist_weight_block[i] = som.get_nbh_distance_weight_matrix(
nbh_func, bmu_pos).reshape(
(som.n_rows, som.n_columns))
new_som = som.modify_weight_matrix_batch(
som_array=som.unsuper_som_,
dist_weight_matrix=dist_weight_block,
data=som.X_)
assert np.allclose(new_som, expected, atol=1e-8)
@pytest.mark.parametrize(
"n_rows,n_columns,X", [
(2, 2, np.array([[0., 0.1, 0.2], [2.3, 2.1, 2.1],
[2.3, 2.1, 2.1], [2.3, 2.1, 2.1]])),
])
def test_transform(n_rows, n_columns, X):
som_clustering = susi.SOMClustering(
n_rows=n_rows, n_columns=n_columns)
som_clustering.fit(X)
bmus = som_clustering.transform(X)
assert(len(bmus) == X.shape[0])
assert(len(bmus[0]) == 2)
@pytest.mark.parametrize(
"n_rows,n_columns,X", [
(2, 2, np.array([[0., 0.1, 0.2], [2.3, 2.1, 2.1],
[2.3, 2.1, 2.1], [2.3, 2.1, 2.1]])),
])
def test_fit_transform(n_rows, n_columns, X):
som_clustering = susi.SOMClustering(
n_rows=n_rows, n_columns=n_columns)
bmus = som_clustering.fit_transform(X)
assert(len(bmus) == X.shape[0])
assert(len(bmus[0]) == 2)
@pytest.mark.parametrize("som_array,X,n_jobs,expected", [
(np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
np.array([[0.3, 2.0, 1.0], [0.3, 2.0, 1.0],
[0.3, 2.0, 1.0], [1.2, 2.0, 3.4]]),
1, [(0, 1), (0, 1), (0, 1), (1, 0)]),
(np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
np.array([[0.3, 2.0, 1.0], [0.3, 2.0, 1.0],
[0.3, 2.0, 1.0], [1.2, 2.0, 3.4]]),
-1, [(0, 1), (0, 1), (0, 1), (1, 0)]),
])
def test_get_bmus(som_array, X, n_jobs, expected):
som_clustering = susi.SOMClustering(n_jobs=n_jobs)
assert np.array_equal(som_clustering.get_bmus(X, som_array), expected)
@pytest.mark.parametrize("som_array,X,n_jobs,expected", [
(np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
np.array([[0.3, 2.0, 1.0], [0.3, 2.0, 1.0],
[0.3, 2.0, 1.0], [1.2, 2.0, 3.4]]),
1, [(0, 1), (0, 1), (0, 1), (1, 0)]),
(np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
np.array([[0.3, 2.0, 1.0], [0.3, 2.0, 1.0],
[0.3, 2.0, 1.0], [1.2, 2.0, 3.4]]),
-1, [(0, 1), (0, 1), (0, 1), (1, 0)]),
])
def test_set_bmus(som_array, X, n_jobs, expected):
som_clustering = susi.SOMClustering(n_jobs=n_jobs)
som_clustering.set_bmus(X, som_array)
assert np.array_equal(som_clustering.bmus_, expected)
@pytest.mark.parametrize("n_rows,n_columns,som_array,X,node,expected", [
(3, 3, np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
np.array([[0.3, 2.0, 1.0], [0.3, 2.0, 1.0],
[0.3, 2.0, 1.0], [1.2, 2.0, 3.4]]),
np.array([0, 0]), []),
(3, 3, np.array([[[0., 1.1, 2.1], [0.3, 2.1, 1.1]],
[[1., 2.1, 3.1], [-0.3, -2.1, -1.1]]]),
np.array([[0.3, 2.0, 1.0], [0.3, 2.0, 1.0],
[0.3, 2.0, 1.0], [1.2, 2.0, 3.4]]),
np.array([0, 1]), [0, 1, 2]),
])
def test_get_datapoints_from_node(n_rows, n_columns, som_array, X, node,
expected):
som = susi.SOMClustering(n_rows=n_rows, n_columns=n_columns)
som.set_bmus(X, som_array)
assert(np.array_equal(som.get_datapoints_from_node(node), expected))
@pytest.mark.parametrize("n_rows,n_columns,mode", [
(3, 3, "mean"),
(10, 5, "median"),
(100, 3, "min"),
(30, 30, "max"),
])
def test_get_u_matrix(n_rows, n_columns, mode):
som = susi.SOMClustering(n_rows=n_rows, n_columns=n_columns)
som.fit(X)
u_matrix = som.get_u_matrix(mode=mode)
assert(isinstance(u_matrix, np.ndarray))
assert(u_matrix.shape == (n_rows*2-1, n_columns*2-1, 1))
def test_get_clusters():
som = susi.SOMClustering()
som.fit(X)
clusters = som.get_clusters(X)
assert(len(clusters) == len(X))
assert(len(clusters[0]) == 2)
| [
"felix.riese@kit.edu"
] | felix.riese@kit.edu |
8d545168dade82b694f437cd17c403e346372b8e | 87bfe0262f9603bd36f3560975f13980fc92993a | /blogs/admin.py | 132b87436ee602bcf22d0a46a142e94e82054b9e | [] | no_license | apoloa/BlogServer | 77a812ab2a74a66db61a13bea45417a0b92b0334 | 0677b0292e74331e430f634d7a9ee7b922a86e55 | refs/heads/master | 2021-06-10T22:18:41.063185 | 2016-12-28T20:21:41 | 2016-12-28T20:21:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | from django.contrib import admin
from blogs.models import Category, Blog
# Register your models here.
admin.site.register(Category)
@admin.register(Blog)
class BlogAdmin(admin.ModelAdmin):
list_display = ('name', 'owner')
| [
"a.whole.dev@gmail.com"
] | a.whole.dev@gmail.com |
9867fe19d328e3fb7a896205afc9498f7e784422 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Z8REdTE5P57f4q7dK_20.py | 02025f57265a048945b02e93032e46722f6d5199 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py |
def collatz(n, r=[]):
if not r: r = [n]
if n == 1: return (len(r), max(*r))
n = n * 3 + 1 if n & 1 else n // 2
return collatz(n, r + [n])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
ab076d87105dc2fb158144127713ff01fb4ab601 | c33b9f35bf5610675d1a13d2384f594729574919 | /week4/python/delaunayAnimation.py | f6e362e5f249cc1fd4d63c616e82d5034e14567b | [] | no_license | MarouaneMan/cv4faces_course | 172d13a63543be1d64a15ad10d660d782539bdb4 | 7f54f16b86d013a3b9273dc2d23c64966f32faf9 | refs/heads/master | 2023-04-14T14:22:31.186600 | 2021-04-06T18:05:28 | 2021-04-06T18:05:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,008 | py | #!/usr/bin/python
# Copyright 2017 BIG VISION LLC ALL RIGHTS RESERVED
#
# This code is made available to the students of
# the online course titled "Computer Vision for Faces"
# by Satya Mallick for personal non-commercial use.
#
# Sharing this code is strictly prohibited without written
# permission from Big Vision LLC.
#
# For licensing and other inquiries, please email
# spmallick@bigvisionllc.com
#
import cv2
import numpy as np
import random
# Check if a point is inside a rectangle
# Rect is an array of (x, y, w, h)
def rectContains(rect, point) :
if point[0] < rect[0] :
return False
elif point[1] < rect[1] :
return False
elif point[0] > rect[2] :
return False
elif point[1] > rect[3] :
return False
return True
# Draw a point on the image
def drawPoint(img, p, color ) :
cv2.circle( img, p, 2, color, -1, cv2.LINE_AA, 0 )
# Draw delaunay triangles
def drawDelaunay(img, subdiv, delaunayColor ) :
# Obtain the list of triangles.
# Each triangle is stored as vector of 6 coordinates
# (x0, y0, x1, y1, x2, y2)
triangleList = subdiv.getTriangleList();
size = img.shape
r = (0, 0, size[1], size[0])
# Will convert triangle representation to three vertices pt1, pt2, pt3
for t in triangleList :
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
# Draw triangles that are completely inside the image
if rectContains(r, pt1) and rectContains(r, pt2) and rectContains(r, pt3) :
cv2.line(img, pt1, pt2, delaunayColor, 1, cv2.LINE_AA, 0)
cv2.line(img, pt2, pt3, delaunayColor, 1, cv2.LINE_AA, 0)
cv2.line(img, pt3, pt1, delaunayColor, 1, cv2.LINE_AA, 0)
# Draw voronoi diagram
def drawVoronoi(img, subdiv) :
# Get facets and centers
( facets, centers) = subdiv.getVoronoiFacetList([])
for i in range(0,len(facets)) :
ifacetArr = []
for f in facets[i] :
ifacetArr.append(f)
# Extract ith facet
ifacet = np.array(ifacetArr, np.int)
# Generate random color
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
# Fill facet with a random color
cv2.fillConvexPoly(img, ifacet, color, cv2.LINE_AA, 0);
# Draw facet boundary
ifacets = np.array([ifacet])
cv2.polylines(img, ifacets, True, (0, 0, 0), 1, cv2.LINE_AA, 0)
# Draw centers.
cv2.circle(img, (centers[i][0], centers[i][1]), 3, (0, 0, 0), -1, cv2.LINE_AA, 0)
def findIndex(points, point):
diff = np.array(points) - np.array(point)
# Find the distance of point from all points
diffNorm = np.linalg.norm(diff, 2, 1)
# Find the index with minimum distance and return it
return np.argmin(diffNorm)
# write delaunay triangles to file
def writeDelaunay( subdiv, points, outputFileName ) :
# Obtain the list of triangles.
# Each triangle is stored as vector of 6 coordinates
# (x0, y0, x1, y1, x2, y2)
triangleList = subdiv.getTriangleList();
filePointer = open(outputFileName,'w')
# Will convert triangle representation to three vertices pt1, pt2, pt3
for t in triangleList :
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
# Find the landmark corresponding to each vertex
landmark1 = findIndex(points,pt1)
landmark2 = findIndex(points,pt2)
landmark3 = findIndex(points,pt3)
filePointer.writelines("{} {} {}\n".format(landmark1, landmark2, landmark3 ))
filePointer.close()
if __name__ == '__main__':
# Define window name
win = "Delaunay Triangulation & Voronoi Diagram"
# Define colors for drawing.
delaunayColor = (255,255,255)
pointsColor = (0, 0, 255)
# Read in the image.
img = cv2.imread("../data/images/smiling-man.jpg");
# Rectangle to be used with Subdiv2D
size = img.shape
rect = (0, 0, size[1], size[0])
# Create an instance of Subdiv2D
subdiv = cv2.Subdiv2D(rect);
# Create an array of points.
points = [];
# Allocate space for voronoi Diagram
imgVoronoi = np.zeros(img.shape, dtype = img.dtype)
# Read in the points from a text file
with open("../data/images/smiling-man-delaunay.txt") as file :
for line in file :
x, y = line.split()
points.append((int(x), int(y)))
outputFileName = "results/smiling-man-delaunay.tri"
# Draw landmark points on the image
for p in points :
drawPoint(img, p, pointsColor )
# Insert points into subdiv
plotPoints = []
for p in points :
subdiv.insert(p)
plotPoints.append(p)
imgDelaunay = img.copy()
# Draw delaunay triangles and voronoi diagrams
drawDelaunay(imgDelaunay, subdiv, delaunayColor);
drawVoronoi(imgVoronoi,subdiv)
for pp in plotPoints :
drawPoint(imgDelaunay, pp, pointsColor)
# Display as an animation
imgDisplay = np.hstack([imgDelaunay, imgVoronoi])
cv2.imshow(win,imgDisplay)
cv2.waitKey(100)
writeDelaunay(subdiv, points, outputFileName)
print("Writing Delaunay triangles to {}".format(outputFileName))
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"guillaume.leurquin@gmail.com"
] | guillaume.leurquin@gmail.com |
d8d7c2533a436b336dde94b74fadb5d8c040b775 | d05c946e345baa67e7894ee33ca21e24b8d26028 | /general/data-cleaning-pandas/data_cleaning.py | 7e03b3efd348f785821adfca186f950771cfa799 | [
"MIT"
] | permissive | x4nth055/pythoncode-tutorials | 327255550812f84149841d56f2d13eaa84efd42e | d6ba5d672f7060ba88384db5910efab1768c7230 | refs/heads/master | 2023-09-01T02:36:58.442748 | 2023-08-19T14:04:34 | 2023-08-19T14:04:34 | 199,449,624 | 1,858 | 2,055 | MIT | 2023-08-25T20:41:56 | 2019-07-29T12:35:40 | Jupyter Notebook | UTF-8 | Python | false | false | 202 | py | import pandas as pd
# Config settings
pd.set_option('max_columns', None)
pd.set_option('max_rows', 12)
# Import CSV data
data_frames = pd.read_csv (r'simulated_data.csv')
print(data_frames.head(10))
| [
"fullclip@protonmail.com"
] | fullclip@protonmail.com |
c265626f1b03bc17a4b8cb3dd2b3a9af041ae4da | 90070847de299a2890fd446ca06aae8731651f31 | /integrationtests/features/steps/post.py | d979a8e1a11c714945eb03cd13da582b9137efcd | [] | no_license | jailtonurbano/ws-marcaponto-fatec-public | d89608ccfa02bf8bf3131c32392a193ff39e57c9 | 0d7a0ddec394b70c27c1340c3824bd7b991dfdd3 | refs/heads/master | 2023-01-30T18:59:54.243806 | 2020-12-16T10:33:53 | 2020-12-16T10:33:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | import requests
from time import sleep
from behave import given, then, when, step
from helpers.json_helper import get_json_keys_on_list, return_json_from_url, find_correct_json, write_on_json_file
from helpers.json_helper import load_json_from_data_folder
@given('que eu carrego o arquivo JSON de nome "{name_json_file}"')
def load_json(context, name_json_file):
context.json_file = load_json_from_data_folder(name_json_file)
@when('envio o arquivo JSON "{json_file}", usando o método POST para a rota {url}')
def send_json_to_post_route(context, json_file, url):
url = str.replace(url, '"', '')
context.json_file = load_json_from_data_folder(json_file)
context.status = requests.post(url, json=context.json_file)
@then('devo ter um retorno "{code}" do content')
def request_on_route(context, code):
behave_status_code = int(code)
sleep(1)
assert behave_status_code == context.status.status_code, f'O retorno não foi {code} e sim {context.status.status_code}'
@step('encontro o id do JSON com o atributo "{key}": "{value}"')
def find_id_by_key_and_value(context, key, value):
json_data = context.data_requested
json_data = find_correct_json(json_data, key, value)
context.id_found = json_data['id']
assert context.id_found / 1 == context.id_found, "Id não encontrado"
@step('atualizo a chave "{key}" com o valor "{value}" no JSON "{json_file}"')
def update_json(context, key, value, json_file):
if value == 'id_encontrado':
value = context.id_found
new_json = write_on_json_file(json_file, key, value)
else:
new_json = write_on_json_file(json_file, key, value)
assert new_json[key] == value, 'Json não foi atualizado'
| [
"heitor.amaral90@outlook.com"
] | heitor.amaral90@outlook.com |
52eac8460cf6c4abbb1e67403248a83c19b9b541 | 1eae6b0848bbe4893cf3cc90b4b7a4e6b8a9a2a8 | /app/migrations/0004_auto_20210516_0338.py | e6b5cf717b980b8e1167053a1cceaf95b68652e5 | [] | no_license | a1usha/django-newspaper | 77c9ef448f7e585464bbe12296ae818c1344bd81 | 025124840a67f8ce9c2a7098f02b6ad8ade46ef7 | refs/heads/master | 2023-04-12T08:19:16.744973 | 2021-05-19T03:21:30 | 2021-05-19T03:21:30 | 325,965,384 | 0 | 0 | null | 2021-05-19T03:21:31 | 2021-01-01T11:22:35 | JavaScript | UTF-8 | Python | false | false | 1,347 | py | # Generated by Django 3.1.3 on 2021-05-16 03:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0003_auto_20210515_0250'),
]
operations = [
migrations.AddField(
model_name='articletask',
name='article',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.article'),
),
migrations.AlterField(
model_name='basetask',
name='assignee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='imagetask',
name='image',
field=models.ImageField(default='default.jpg', upload_to='uploads'),
),
migrations.AlterField(
model_name='newspaper',
name='page_size',
field=models.CharField(choices=[('tabloid', 'Tabloid - 280 x 430 mm (11.0" x 16.9")'), ('broadsheet', 'Broadsheet - 600 x 750 mm (23.5" x 29.5")'), ('berliner', 'Berliner - 315 x 470 mm (12.4" x 18.5")')], default='berliner', max_length=100),
),
]
| [
"aleksandr.ushaev@hotmail.com"
] | aleksandr.ushaev@hotmail.com |
1bd557eb3e9a1615190cdd7bab21f1d1d239008a | d283bcf8de30b8abebe971a3481f38c8f817a3e3 | /controler/logger.py | ad78e1b54c891cb11320b854801c918e4c8822a2 | [] | no_license | luonan211/autotest | 8e26a2cf98019a138f155d7597f71c74344ce21d | 251b036854f35e5c49e16412547439a826cca3da | refs/heads/master | 2023-03-27T05:06:26.526139 | 2021-03-28T16:46:56 | 2021-03-28T16:46:56 | 352,366,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,717 | py | # -*-coding:utf-8 -*-
# 解决配置的logger 在单独的文件中无法调试
import os
import logging
from logging.handlers import TimedRotatingFileHandler
from basepath import get_base_path
# log_path是存放日志的路径
log_path = os.path.join(get_base_path(), 'logs')
# 如果不存在这个logs文件夹,就自动创建一个
if not os.path.exists(log_path):
os.mkdir(log_path)
class MyLog:
def __init__(self):
# 文件的命名
self.log_name = os.path.join(log_path, 'log')
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
# 最多存放日志的数量
self.backup_count = 30
# 日志输出格式
# self.formatter = logging.Formatter('[%(asctime)s]-%(filename)s-%(processName)s]-%(levelname)s: %(message)s')
self.formatter = logging.Formatter('%(asctime)s-%(filename)s[line:%(lineno)d]-%(levelname)s: %(message)s')
def __console(self, level, message):
# 创建一个FileHandler,用于写到本地
# fh = logging.FileHandler(self.log_name, 'a') # 追加模式 这个是python2的
# fh = logging.FileHandler(self.log_name, 'a', encoding='utf-8') # 这个是python3的
# 每天重新创建一个日志文件,最多保留backup_count份
fh = TimedRotatingFileHandler(filename=self.log_name, when='D', interval=1,
backupCount=self.backup_count, delay=True, encoding='utf-8')
fh.setLevel(logging.DEBUG)
fh.setFormatter(self.formatter)
self.logger.addHandler(fh)
# 创建一个StreamHandler,用于输出到控制台
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(self.formatter)
self.logger.addHandler(ch)
if level == 'info':
self.logger.info(message)
elif level == 'debug':
self.logger.debug(message)
elif level == 'warning':
self.logger.warning(message)
elif level == 'error':
self.logger.error(message)
# 这两行代码是为了避免日志输出重复问题
self.logger.removeHandler(ch)
self.logger.removeHandler(fh)
# 关闭打开的文件
fh.close()
def debug(self, message):
self.__console('debug', message)
def info(self, message):
self.__console('info', message)
def warning(self, message):
self.__console('warning', message)
def error(self, message):
self.__console('error', message)
if __name__ == "__main__":
log = MyLog()
log.info("---测试开始----")
log.info("操作步骤1,2,3")
log.warning("----测试结束----") | [
"luonan211@163.com"
] | luonan211@163.com |
33abe524c60e0403ba655ab5a2611478e1986b30 | e3ec04b76eebedc26942cbbd2c7dde2758de7d30 | /temperature.py | 3c3c2aba0de090d4586f828f590f7028e169c5bb | [] | no_license | dontdiepls/temperature | 3e048196f3d3331ed0cd1ef122dde28689ec5697 | 0580081a40a4708f074a23dca303ccc796bc05a4 | refs/heads/master | 2020-06-13T13:25:21.663807 | 2019-07-01T17:54:22 | 2019-07-01T17:54:22 | 194,670,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | c = int(input("请输入摄氏温度: "))
f = c * 9 / 5 + 32
print(f"华氏温度是{f}")
| [
"qianqian1903@live.cn"
] | qianqian1903@live.cn |
6f68bfd6aeaca00bf8183c13f37d9ed9d3b5e400 | fa7cbf5ba86d148be7a9985f70e3df478d7bb31e | /newsproject/settings.py | cab40dc115006abcf4aa77d6fd87465d9ceecb39 | [] | no_license | BruceMWhealton/django-news-agg | 98e710e11b6293e6f3972965b73d71693002e05c | c2cc4c9f3646c4a383653d2ab6a234123d6988dd | refs/heads/master | 2023-01-14T18:10:13.334158 | 2016-02-15T16:46:44 | 2016-02-15T16:46:44 | 51,769,683 | 0 | 2 | null | 2022-12-21T05:50:33 | 2016-02-15T16:44:53 | Python | UTF-8 | Python | false | false | 3,193 | py | """
Django settings for newsproject project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c46t!r15_en2hxgrqmbyliul^epy6q&qqddph1w)i9%e9o=(ji'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'newsproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'newsproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"futurewavewebdevelopment@gmail.com"
] | futurewavewebdevelopment@gmail.com |
84cdd8514147ad1ea18d223d5299167d23b672e0 | e5d454570791c0ea09db5f62144249147e7bcb90 | /myvenv/bin/pilprint.py | 623cfd8ee5e76469bc04b9ca50219277c7503936 | [] | no_license | SowjanyaVallabhu/jango | 58ffe4517330cbd45dd37166f338bb92abfe328d | 7581bc745baf0491582f94ed10f4647f69fb361b | refs/heads/master | 2020-12-02T12:47:10.358645 | 2017-07-08T03:49:09 | 2017-07-08T03:49:09 | 96,594,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | #!/home/tsuser/jango/myvenv/bin/python3.4
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"tsuser@WISE.SVECW"
] | tsuser@WISE.SVECW |
ece8fced84649d422901181c4352ee541faa7bb4 | 2c64d2f57c455d890dc0bf68bde1a215d787c294 | /code_prep/abstract_structures/queue_stack/stack_of_plates.py | ba27dcb0283dead2d598688f8db63eb07697fe00 | [] | no_license | sanidhyamangal/interviews_prep | f933ab00b8501f900c5b730314527023c2b151b0 | b6d3b45094bbffe265cea1f3223557dad3c650af | refs/heads/master | 2023-03-16T07:22:12.425415 | 2018-06-10T15:27:40 | 2018-06-10T15:27:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py | # Implement a class that acts as a single stack made out of multiple stacks
# which each have a set capacity.
__author__ = 'tchaton'
class MultiStack():
def __init__(self, cap):
self.cap = cap
self.stacks = []
def push(self, item):
if len(self.stacks) and (len(self.stacks[-1]) < self.cap):
self.stacks[-1].append(item)
else:
self.stacks.append([item])
def pop(self):
if len(self.stacks) > 0:
if len(self.stacks[-1]) > 0:
return self.stacks[-1].pop()
else:
self._refound()
if len(self.stacks[-1]) > 0:
return self.stacks[-1].pop()
else:
return None
else:
return None
def _refound(self):
arr = []
h = []
for stack in self.stacks:
for v in stack:
h.append(v)
if len(h) == self.cap:
arr.append(h)
h = []
self.stacks = arr
if len(self.stacks) == 0:
self.stacks = [[]]
def pop_at(self, value):
if len(self.stacks) >= value:
if len(self.stacks[value]) > 0:
return self.stacks[value].pop()
else:
self._refound()
return None
else:
print('Can t access an un-existing column')
def _print(self):
print(self.stacks)
import unittest
class Test(unittest.TestCase):
def test_multi_stack(self):
stack = MultiStack(3)
stack.push(11)
stack.push(22)
stack.push(33)
stack.push(44)
stack.push(55)
stack.push(66)
stack.push(77)
stack.push(88)
stack._print()
self.assertEqual(stack.pop(), 88)
stack._print()
self.assertEqual(stack.pop_at(1), 66)
stack._print()
self.assertEqual(stack.pop_at(0), 33)
stack._print()
self.assertEqual(stack.pop_at(1), 55)
stack._print()
self.assertEqual(stack.pop_at(1), 44)
stack._print()
self.assertEqual(stack.pop_at(1), None)
stack._print()
stack.push(99)
stack._print()
self.assertEqual(stack.pop(), 99)
stack._print()
self.assertEqual(stack.pop(), 77)
stack._print()
self.assertEqual(stack.pop(), 22)
stack._print()
self.assertEqual(stack.pop(), 11)
stack._print()
self.assertEqual(stack.pop(), None)
if __name__ == "__main__":
unittest.main()
| [
"thomaschaton84@gmail.com"
] | thomaschaton84@gmail.com |
d428c80a93dbddae7db51bb6608f216358aee192 | 1b9dda363f8228d053ce2422ec29a8024c361595 | /data_manager.py | 8b9b716b51dad03327a76e6c4ca19b817f60dc17 | [
"MIT"
] | permissive | mangye16/Cross-Modal-Re-ID-baseline | 8ccd6eea99c34fe35c2f908322f2b5326e5468dc | 910c650fcdda75984dfc9da3d9d46eb044c56969 | refs/heads/master | 2023-01-30T06:28:12.427603 | 2022-03-18T06:15:34 | 2022-03-18T06:15:34 | 161,232,372 | 312 | 86 | MIT | 2023-01-17T03:03:15 | 2018-12-10T20:26:14 | Python | UTF-8 | Python | false | false | 2,868 | py | from __future__ import print_function, absolute_import
import os
import numpy as np
import random
def process_query_sysu(data_path, mode = 'all', relabel=False):
if mode== 'all':
ir_cameras = ['cam3','cam6']
elif mode =='indoor':
ir_cameras = ['cam3','cam6']
file_path = os.path.join(data_path,'exp/test_id.txt')
files_rgb = []
files_ir = []
with open(file_path, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
ids = ["%04d" % x for x in ids]
for id in sorted(ids):
for cam in ir_cameras:
img_dir = os.path.join(data_path,cam,id)
if os.path.isdir(img_dir):
new_files = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
files_ir.extend(new_files)
query_img = []
query_id = []
query_cam = []
for img_path in files_ir:
camid, pid = int(img_path[-15]), int(img_path[-13:-9])
query_img.append(img_path)
query_id.append(pid)
query_cam.append(camid)
return query_img, np.array(query_id), np.array(query_cam)
def process_gallery_sysu(data_path, mode = 'all', trial = 0, relabel=False):
random.seed(trial)
if mode== 'all':
rgb_cameras = ['cam1','cam2','cam4','cam5']
elif mode =='indoor':
rgb_cameras = ['cam1','cam2']
file_path = os.path.join(data_path,'exp/test_id.txt')
files_rgb = []
with open(file_path, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
ids = ["%04d" % x for x in ids]
for id in sorted(ids):
for cam in rgb_cameras:
img_dir = os.path.join(data_path,cam,id)
if os.path.isdir(img_dir):
new_files = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
files_rgb.append(random.choice(new_files))
gall_img = []
gall_id = []
gall_cam = []
for img_path in files_rgb:
camid, pid = int(img_path[-15]), int(img_path[-13:-9])
gall_img.append(img_path)
gall_id.append(pid)
gall_cam.append(camid)
return gall_img, np.array(gall_id), np.array(gall_cam)
def process_test_regdb(img_dir, trial = 1, modal = 'visible'):
if modal=='visible':
input_data_path = img_dir + 'idx/test_visible_{}'.format(trial) + '.txt'
elif modal=='thermal':
input_data_path = img_dir + 'idx/test_thermal_{}'.format(trial) + '.txt'
with open(input_data_path) as f:
data_file_list = open(input_data_path, 'rt').read().splitlines()
# Get full list of image and labels
file_image = [img_dir + '/' + s.split(' ')[0] for s in data_file_list]
file_label = [int(s.split(' ')[1]) for s in data_file_list]
return file_image, np.array(file_label) | [
"noreply@github.com"
] | mangye16.noreply@github.com |
ebf99e53d9553605d31370984fa7943e122771ac | 7dfaf8c93b4747a36093f09c77eb4308677c2792 | /kafka-rockset-integration/write_data_into_kafka.py | 618c439bf54a30d7d22c4bcb233e35cbb7a89e7f | [
"Apache-2.0"
] | permissive | rockset/recipes | 8ef8da540b4d968a57a297df6402d733a0850522 | d9f6dd1b9674d4b2b0a88d51965338e9fafed893 | refs/heads/master | 2023-07-23T02:33:48.258053 | 2022-07-08T21:15:15 | 2022-07-08T21:15:15 | 161,720,797 | 22 | 12 | Apache-2.0 | 2023-07-20T13:11:12 | 2018-12-14T02:25:13 | JavaScript | UTF-8 | Python | false | false | 4,493 | py | """Write a data into Kafka"""
import datetime
import json
import random
import time
from kafka import KafkaProducer
from config import *
START_INVOICE_ID_FROM = 0
# Generate orders across these countries
# Duplicates are intentional to simulate non-uniform data
COUNTRIES = ['United States', 'United States', 'United States', 'China', 'China', 'India', 'India',
'United Kingdom', 'Canada']
# List of available product database with price
# Duplicates are intentional to simulate non-uniform data
STOCK_DATA = [
{"Description": "WHITE HANGING HEART T-LIGHT HOLDER", "UnitPrice": 2.55, "StockCode": 3001},
{"Description": "WHITE HANGING HEART T-LIGHT HOLDER", "UnitPrice": 2.55, "StockCode": 3001},
{"Description": "WHITE HANGING HEART T-LIGHT HOLDER", "UnitPrice": 2.55, "StockCode": 3001},
{"Description": "WHITE METAL LANTERN", "UnitPrice": 3.39, "StockCode": 3002},
{"Description": "WHITE METAL LANTERN", "UnitPrice": 3.39, "StockCode": 3002},
{"Description": "WHITE METAL LANTERN", "UnitPrice": 3.39, "StockCode": 3002},
{"Description": "CREAM CUPID HEARTS COAT HANGER", "UnitPrice": 2.75, "StockCode": 3003},
{"Description": "CREAM CUPID HEARTS COAT HANGER", "UnitPrice": 2.75, "StockCode": 3003},
{"Description": "KNITTED UNION FLAG HOT WATER BOTTLE", "UnitPrice": 3.39, "StockCode": 3004},
{"Description": "KNITTED UNION FLAG HOT WATER BOTTLE", "UnitPrice": 3.39, "StockCode": 3004},
{"Description": "RED WOOLLY HOTTIE WHITE HEART.", "UnitPrice": 3.39, "StockCode": 3005},
{"Description": "RED WOOLLY HOTTIE WHITE HEART.", "UnitPrice": 3.39, "StockCode": 3005},
{"Description": "SET 7 BABUSHKA NESTING BOXES", "UnitPrice": 7.65, "StockCode": 3006},
{"Description": "GLASS STAR FROSTED T-LIGHT HOLDER", "UnitPrice": 4.25, "StockCode": 3007},
{"Description": "HAND WARMER UNION JACK", "UnitPrice": 1.85, "StockCode": 3008},
{"Description": "HAND WARMER RED POLKA DOT", "UnitPrice": 1.85, "StockCode": 3009},
{"Description": "ASSORTED COLOUR BIRD ORNAMENT", "UnitPrice": 1.69, "StockCode": 3010},
{"Description": "ASSORTED COLOUR BIRD ORNAMENT", "UnitPrice": 1.69, "StockCode": 3010},
{"Description": "ASSORTED COLOUR BIRD ORNAMENT", "UnitPrice": 1.69, "StockCode": 3010},
{"Description": "ASSORTED COLOUR BIRD ORNAMENT", "UnitPrice": 1.69, "StockCode": 3010},
]
def write_orders():
"""
Generate orders per second based on predefined products
Returns:
None
"""
kafka_producer = KafkaProducer(bootstrap_servers=KAFKA_BOOTSTRAP_SERVER)
print('Writing records into Kafka. Kafka Server - {}, Topic - {}'.format(
','.join(KAFKA_BOOTSTRAP_SERVER),
KAFKA_TOPIC))
invoice_no = START_INVOICE_ID_FROM
while True:
# To generate orders or not for this second
if random.choice([True, False]):
# Pick no. of orders to generate per sec
for _ in range(
random.randint(MIN_ORDERS_PER_SEC, MAX_ORDERS_PER_SEC)): # Orders in a sec
invoice_no += 1
invoice_date = int(datetime.datetime.now().timestamp())
country = random.choice(COUNTRIES)
customer_id = random.randint(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID)
# No. of products to include in the order
n_products = random.randint(MIN_PRODUCTS_PER_ORDER, MAX_PRODUCTS_PER_ORDER)
# Pick n_products randomly from STOCK_DATA
for product in random.sample(STOCK_DATA, n_products): # Product Selection
order = {
"InvoiceNo": invoice_no,
"InvoiceDate": invoice_date,
"CustomerID": customer_id,
"Country": country,
"StockCode": product['StockCode'],
"Description": product['Description'],
"Quantity": random.randint(MIN_PRODUCTS_PER_ORDER, MAX_PRODUCTS_PER_ORDER),
"UnitPrice": product['UnitPrice'],
}
kafka_producer.send(KAFKA_TOPIC, str.encode(json.dumps(order)))
# End Product Selection for loop
if invoice_no % 100 == 0:
print('{} records are written'.format(invoice_no))
# Ends Orders in a sec for loop
time.sleep(1)
def main():
write_orders()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | rockset.noreply@github.com |
5e296195a00d54385ea5c73d4375ee4ca91ad7be | 092241f7f874d78fc280c3aabf64920b7adbdc25 | /run_bert_concat.py | 8900895222c9e80e9908427e3811b659bc9b506c | [] | no_license | d-eremeev/BoolQ-Question-Answering | d6600a24bf3324764024bb4ca93de80f172bd824 | 72c347852e309ad087cf9f57517b769712a7a2a4 | refs/heads/main | 2023-06-17T03:59:27.125981 | 2021-07-07T19:43:35 | 2021-07-07T19:43:35 | 383,907,370 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | import os
import hydra
import logging
import transformers as ppb
from BERT.bert_concat import train_bert_concat
@hydra.main(config_path=r'configs\bert', config_name='bert_concat')
def run(cfg):
logger = logging.getLogger(__name__)
cfg_bert = cfg.bert
cfg_datasets = cfg.datasets
cfg_loaders = cfg.loaders
cfg_aug = cfg.augmentations
data_path = cfg_datasets['data_path']
train_path = os.path.join(data_path, cfg_datasets['train_filename'])
val_path = os.path.join(data_path, cfg_datasets['val_filename'])
test_path = os.path.join(data_path, cfg_datasets['test_filename'])
logger.info('Fine-tuning BERT ...')
train_bert_concat(logger=logger,
model_class=eval(cfg_bert['model_class']),
hidden_dropout_prob=cfg_bert['hidden_dropout_prob'],
tokenizer_class=eval(cfg_bert['tokenizer_class']),
pretrained_weights=cfg_bert['pretrained_weights'],
train_path=train_path,
val_path=val_path,
test_path=test_path,
cfg_datasets=cfg_loaders,
freeze_bert=cfg_bert['freeze_bert'],
epochs=cfg_bert['epochs'],
lr=cfg_bert['lr'],
cache_dir=cfg_datasets['cache_dir'],
augment=cfg_aug['augment'],
aug_steps=cfg_aug['aug_steps'],
enable_passage_aug=cfg_aug['enable_passage_aug'],
aug_batch_size=cfg_aug['aug_batch_size'])
logger.info('Done')
if __name__ == '__main__':
run() | [
"noreply@github.com"
] | d-eremeev.noreply@github.com |
287ccbc12cf46d63d2972071600fb8ed009446d4 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_13227.py | 4515d6bed05050aef381bac0d3fb8fb5bd8c6364 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | # Scrapy : spider which doesn't work
allowed_domains
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
b03dd50feb96af7aa08854250f80c3e5cc73d43f | a7d06a08c3fd7a7f6522c4b6eed9b90930c0ddc9 | /pytorch/detector/detectors/yolov2_slim/__init__.py | 129e1da98a1c1f4468307be458dc5aa46bccd5dd | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sipeed/maix_train | 85beeddb1e12a05b8c2a8c4f202371a779b9765a | 99605f057d466a8be5d8491508a614bf27784ae2 | refs/heads/master | 2023-08-29T11:11:30.875495 | 2021-11-06T05:43:40 | 2021-11-06T05:43:40 | 310,764,365 | 67 | 41 | Apache-2.0 | 2020-12-11T02:14:06 | 2020-11-07T04:08:57 | Python | UTF-8 | Python | false | false | 70 | py |
from .train import Train
from .test import Test
framwork = "torch"
| [
"CZD666666@gmail.com"
] | CZD666666@gmail.com |
fd47c591e63c195e36920834ec580d354e780d3e | c8d431815df78a3838ca58d8e9f5b221ce0ddca1 | /Gymkana/Gymkana/asgi.py | 76045419a4320a7b4409f8aafca8f27961208bf8 | [] | no_license | pasanca7/Gymkana-formacion-django | a62a21c4a87434f4322b518e213508d9c69371de | f74c49f61cec674bd2520fc1aabbd9c88aa19f43 | refs/heads/master | 2023-08-10T20:59:07.541120 | 2021-09-17T11:19:08 | 2021-09-17T11:19:08 | 404,349,162 | 1 | 0 | null | 2021-09-17T11:19:09 | 2021-09-08T13:01:06 | Python | UTF-8 | Python | false | false | 391 | py | """
ASGI config for Gymkana project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Gymkana.settings')
application = get_asgi_application()
| [
"pabdecar@alum.us.es"
] | pabdecar@alum.us.es |
e87ab6118cff6287802446efa6e5b0769cb7256f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/82/usersdata/205/42973/submittedfiles/decimal2bin.py | 1bdf2cb16de1f0f06f74293fae6aa792b5408320 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | # -*- coding: utf-8 -*-
soma=0
i=0
while (n>0):
resto=n%10
soma=soma+resto*(2**i)
i=i+1
n=n//10
print(soma)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
19336d3be69f6065ae890b7c90e28de999166652 | 564fe9c8409d9ff4ba5f88dd36c0743d417767fa | /opsgenie_swagger/models/contact.py | 8a11eaff8b7fc6180a2fc9809ec2522e53d44ade | [
"Apache-2.0"
] | permissive | criteo-forks/opsgenie-python-sdk | 28cf4b2e5eb5f10df582cfd6393a0e952dee5102 | 2a3924a0bd779eab47937925eb5d42ffbbd751d4 | refs/heads/master | 2020-04-05T23:09:41.002143 | 2019-04-12T13:37:22 | 2019-04-12T13:37:22 | 65,009,459 | 0 | 2 | null | 2016-08-05T10:08:55 | 2016-08-05T10:08:55 | null | UTF-8 | Python | false | false | 4,584 | py | # coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.contact_status import ContactStatus # noqa: F401,E501
class Contact(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'method': 'str',
'to': 'str',
'status': 'ContactStatus'
}
attribute_map = {
'id': 'id',
'method': 'method',
'to': 'to',
'status': 'status'
}
def __init__(self, id=None, method=None, to=None, status=None): # noqa: E501
"""Contact - a model defined in Swagger""" # noqa: E501
self._id = None
self._method = None
self._to = None
self._status = None
self.discriminator = None
if id is not None:
self.id = id
if method is not None:
self.method = method
if to is not None:
self.to = to
if status is not None:
self.status = status
@property
def id(self):
"""Gets the id of this Contact. # noqa: E501
:return: The id of this Contact. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Contact.
:param id: The id of this Contact. # noqa: E501
:type: str
"""
self._id = id
@property
def method(self):
"""Gets the method of this Contact. # noqa: E501
:return: The method of this Contact. # noqa: E501
:rtype: str
"""
return self._method
@method.setter
def method(self, method):
"""Sets the method of this Contact.
:param method: The method of this Contact. # noqa: E501
:type: str
"""
self._method = method
@property
def to(self):
"""Gets the to of this Contact. # noqa: E501
:return: The to of this Contact. # noqa: E501
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this Contact.
:param to: The to of this Contact. # noqa: E501
:type: str
"""
self._to = to
@property
def status(self):
"""Gets the status of this Contact. # noqa: E501
:return: The status of this Contact. # noqa: E501
:rtype: ContactStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Contact.
:param status: The status of this Contact. # noqa: E501
:type: ContactStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Contact):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"c.chary@criteo.com"
] | c.chary@criteo.com |
f85acdc337815cad6b5327b6734f33e04f597c1e | 30c6a4b4fb271e1b4c1544fc2705a026a4e6442c | /python/matchFaceId.py | e67767675eec17e7f89c839bca2da3f9d1c492a3 | [] | no_license | zedlen/jnmxFunctions | 7a9cc84d79555b6da5c7f0b95042aa93ab58ca14 | e7a517f2a94f8d98e9b38c47823152f072b4aef8 | refs/heads/master | 2022-03-27T19:17:42.004417 | 2019-11-26T04:47:36 | 2019-11-26T04:47:36 | 203,919,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,730 | py | from os import getenv
import pymysql
from pymysql.err import OperationalError
from google.cloud import storage
import tempfile
import numpy
import face_recognition
import json
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
CONNECTION_NAME = getenv(
'INSTANCE_CONNECTION_NAME',
'localhost')
DB_USER = getenv('MYSQL_USER', 'root')
DB_PASSWORD = getenv('MYSQL_PASSWORD', 'jurisnova@2019')
DB_NAME = getenv('MYSQL_DATABASE', 'jurisnova')
mysql_config = {
'user': DB_USER,
'password': DB_PASSWORD,
'db': DB_NAME,
'charset': 'utf8mb4',
'cursorclass': pymysql.cursors.DictCursor,
'autocommit': True
}
# Create SQL connection globally to enable reuse
# PyMySQL does not include support for connection pooling
mysql_conn = None
def __get_cursor():
"""
Helper function to get a cursor
PyMySQL does NOT automatically reconnect,
so we must reconnect explicitly using ping()
"""
try:
return mysql_conn.cursor()
except OperationalError:
mysql_conn.ping(reconnect=True)
return mysql_conn.cursor()
def validate(request):
global mysql_conn
# Initialize connections lazily, in case SQL access isn't needed for this
# GCF instance. Doing so minimizes the number of active SQL connections,
# which helps keep your GCF instances under SQL connection limits.
if not mysql_conn:
try:
mysql_conn = pymysql.connect(**mysql_config)
except OperationalError:
# If production settings fail, use local development ones
mysql_config['unix_socket'] = f'/cloudsql/{CONNECTION_NAME}'
mysql_conn = pymysql.connect(**mysql_config)
# Remember to close SQL resources declared while running this function.
# Keep any declared in global scope (e.g. mysql_conn) for later reuse.
with __get_cursor() as cursor:
request_json = request.get_json(silent=True)
if request_json and 'user_id' in request_json:
name = request_json['user_id']
else:
raise ValueError("JSON is invalid, or missing a 'user_id' property")
sql = '''SELECT
u.photos_path,
u.user_id,
ui.file_name,
ui.image_type
FROM `users_info` u
JOIN users_image ui
ON
ui.user_id=u.user_id
WHERE
`is_valid` = %s
AND u.user_id {}
AND ui.validated = 0;'''.format(user_id)
cursor.execute(sql, (0,))
results = cursor.fetchall()
no_validated_users = []
for result in results:
found = False
for user in no_validated_users:
if user['user_id'] == result['user_id']:
user[result['image_type']] = result['file_name']
found = True
if found is False:
no_validated_users.append({
'user_id': result['user_id'],
result['image_type']: result['file_name'],
'path': result['photos_path']
})
storage_client = storage.Client()
for user in no_validated_users:
try:
with tempfile.NamedTemporaryFile(mode="wb") as jpg:
storage_client.download_blob_to_file('gs://jurisnovamx.appspot.com/'+user['path']+user['INITIAL_PHOTO'], jpg)
face_photo = face_recognition.load_image_file(jpg.name)
face_photo_encoding = face_recognition.face_encodings(face_photo)[0]
except:
return json.dump({'success':False,'error': 'No face found in photo'})
try:
with tempfile.NamedTemporaryFile(mode="wb") as jpg:
storage_client.download_blob_to_file('gs://jurisnovamx.appspot.com/'+user['path']+user['INITIAL_ID_PHOTO'], jpg)
id_face_photo = face_recognition.load_image_file(jpg.name)
id_face_photo_encoding = face_recognition.face_encodings(id_face_photo)[0]
except:
return json.dump({'success':False,'error': 'No face found in id photo'})
dist = numpy.linalg.norm(face_photo_encoding-id_face_photo_encoding)
dist = 1-dist
if dist>.50:
print('validated')
# TODO: Create user model
else:
return json.dump({'success':False,'error': 'Face photo doesnt match with id'})
# TODO: Notify user photos where no validated
return json.dump({'success':False,'error': 'No user found'})
| [
"luis@MacBook-Air-de-Luis.local"
] | luis@MacBook-Air-de-Luis.local |
53ecfc18fea92742b8a446bc5099c3766772d8a8 | 4303e8701ad559a911d40498d0eae7f1033c63a5 | /__lib/RSSParse.py | 232b96827a9f4a7693ff434ad5862d918ed6ed25 | [] | no_license | swimclan/groundwire-predictor | 28f9d7738f0f8db0d652d7f17212c38978d7b8c2 | 22c29bccfffc09f7edf8fb1b80d5afd2fd85ed8c | refs/heads/master | 2020-03-13T01:39:31.860011 | 2017-10-01T21:57:01 | 2017-10-01T21:57:01 | 130,908,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | import xml.etree.ElementTree as ET
import utils
def parse(doc):
ret = {}
rss = ET.fromstring(doc)
channel = rss.find('channel')
ret['copyright'] = channel.find('copyright').text
ret['description'] = channel.find('description').text
items = channel.findall('item')
news_items = []
for item in items:
news_items.append({
'description': item.find('description').text,
'guid': item.find('guid').text,
'link': item.find('link').text,
'pubDate': utils.parsePubDate(item.find('pubDate').text),
'title': item.find('title').text
})
ret['items'] = news_items
ret['language'] = channel.find('language').text
ret['lastBuildDate'] = channel.find('lastBuildDate').text
ret['link'] = channel.find('link').text
ret['title'] = channel.find('title').text
return ret
| [
"matthew.herron77@gmail.com"
] | matthew.herron77@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.