text string | size int64 | token_count int64 |
|---|---|---|
n = int(input())
if n == 0: print('E')
elif n <= 35: print('D')
elif n <= 60: print('C')
elif n <= 85: print('B')
else: print('A')
| 131 | 62 |
import numpy as np
import pickle
from BCMemu import *
### Cosmology
Ob, Om = 0.0463, 0.2793
bcmdict = {'log10Mc': 13.32,
'mu' : 0.93,
'thej' : 4.235,
'gamma' : 2.25,
'delta' : 6.40,
'eta' : 0.15,
'deta' : 0.14,
}
k_eval = 10**np.linspace(-1,1.08,50)
def test_BCM_7param():
'''
With this test, the 7 parameter baryonic power suppression is tested.
'''
bfcemu = BCM_7param(Ob=Ob, Om=Om)
p0 = bfcemu.get_boost(0.0, bcmdict, k_eval)
p0p5 = bfcemu.get_boost(0.5, bcmdict, k_eval)
p1 = bfcemu.get_boost(1.0, bcmdict, k_eval)
p1p5 = bfcemu.get_boost(1.5, bcmdict, k_eval)
p2 = bfcemu.get_boost(2.0, bcmdict, k_eval)
assert np.abs(p0[0]-0.999129)<0.00001 and np.abs(p0p5[0]-0.998741)<0.00001 and np.abs(p1[0]-0.998928)<0.00001 and np.abs(p1p5[0]-0.999030)<0.00001 and np.abs(p2[0]-0.999575)<0.00001 | 871 | 523 |
from django import forms
from .models import Review,Website
class WebsiteForm(forms.ModelForm):
class Meta:
model= Website
exclude= ['reviews','owner']
class ReviewForm(forms.ModelForm):
class Meta:
model= Review
fields='__all__' | 271 | 74 |
import re
from django import forms
from django.utils.translation import gettext_lazy as _
from usuarios.models import Usuario
class RegistroForm(forms.Form):
REGEX_USERNAME = r'^[a-zA-Z0-9]([._-](?![._-])|[a-zA-Z0-9]){3,18}[a-zA-Z0-9]$'
username = forms.CharField(
max_length=100,
label=_('Usuário'),
required=True,
)
password1 = forms.CharField(
widget=forms.PasswordInput(render_value=False),
label=_('Senha'),
required=True,
)
password2 = forms.CharField(
widget=forms.PasswordInput(render_value=False),
label=_('Confirme a senha'),
required=True,
)
first_name = forms.CharField(
max_length=100,
label=_('Nome'),
required=True,
)
last_name = forms.CharField(
max_length=100,
label=_('Sobrenome'),
required=True,
)
email = forms.EmailField(
label=_('Email'),
required=True,
)
def clean_username(self):
if not re.search(self.REGEX_USERNAME, self.cleaned_data['username']):
raise forms.ValidationError(
_('Favor use somente letras, números e períodos.')
)
if Usuario.objects.filter(
username__exact=self.cleaned_data['username']
).exists():
raise forms.ValidationError(_('Já existe um usuário com o mesmo nome.'))
else:
return self.cleaned_data['username']
def clean(self):
if self.cleaned_data.get('password1') != self.cleaned_data.get('password2'):
raise forms.ValidationError(_('Os dois campos de senha não coincidem.'))
return self.cleaned_data
| 1,693 | 548 |
# Originally from:
# https://github.com/loboris/MicroPython_ESP32_psRAM_LoBo/blob/master/MicroPython_BUILD/components/micropython/esp32/modules_examples/bme280.py
import machine, _thread, time
import micropython, gc
import bme280
i2c=machine.I2C(scl=machine.Pin(22),sda=machine.Pin(21),speed=400000)
bme=bme280.BME280(i2c=i2c)
def bmevalues():
t, p, h = bme.read_compensated_data()
p = p // 256
pi = p // 100
pd = p - pi * 100
hi = h // 1024
hd = h * 100 // 1024 - hi * 100
return "[{}] T={}C ".format(time.strftime("%H:%M:%S",time.localtime()), t / 100) + "P={}.{:02d}hPa ".format(pi, pd) + "H={}.{:02d}%".format(hi, hd)
def bmerun(interval=10):
_thread.allowsuspend(True)
sendmsg = True
send_time = time.time() + interval
while True:
while time.time() < send_time:
notif = _thread.getnotification()
if notif == 10002:
_thread.sendmsg(_thread.getReplID(), bmevalues())
elif notif == 10004:
sendmsg = False
elif notif == 10006:
sendmsg = True
elif (notif <= 3600) and (notif >= 10):
interval = notif
send_time = time.time() + interval
_thread.sendmsg(_thread.getReplID(), "Interval set to {} seconds".format(interval))
time.sleep_ms(100)
send_time = send_time + interval
if sendmsg:
_thread.sendmsg(_thread.getReplID(), bmevalues())
_thread.stack_size(3*1024)
bmeth=_thread.start_new_thread("BME280", bmerun, (10,))
| 1,586 | 623 |
from preprocess_package.pdf2pages import pdf2pages
from preprocess_package.cut_images import cut_images_save
import os
import cv2
pwd = os.getcwd()
image_index = 1
if __name__ == '__main__':
imgs_file_path = './pictures/发票扫描图片'
failed_imgs_file_path = './pictures/failed_images'
single_img_file_path = './pictures/single_image/'
pdf_file_path = './pictures/pdf/山东宏瑞达开票4.28.pdf'
img_path = './pictures/image/Image_00096.jpg'
crops_save_path = './results/crops/'
path_now = pdf_file_path
# # # ------pdf转images------
if path_now[-3:] == 'pdf':
imgs_list = pdf2pages(path_now)
for img in imgs_list: # pdf文件夹
cut_images_save(img=img, if_show_pre=False, if_show=False, img_name='', save_path='./results/crops/')
else: # 单张图片
for img_name in os.listdir(path_now):
img = path_now + "/" + img_name
img = cv2.imread(img)
cut_images_save(img=img, if_show_pre=False, if_show=False, img_name='', save_path='./results/crops/')
#
# # -----------单张图片----------
# if type(imgs_list) == numpy.ndarray:
# invoices_num = detect_image_counts(imgs_list)
# if invoices_num > 1:
# cut_images_save(imgs_list, crops_save_path)
# -----------多张图片----------
# else:
# imgs_list = cv2.imread(imgs_file_path)
# path = imgs_file_path
#
# global if_show_pre
# if_show_pre = False
# for img_name in os.listdir(crops_save_path): # 图片文件夹
# img = crops_save_path + "/" + img_name
# img = cv2.imread(img)
# cut_images_save(img, if_show_pre, img_name, crops_save_path)
| 1,644 | 661 |
# bot.py
import os
from discord.ext import commands
from dotenv import load_dotenv
from rachmaninoff_bot import RachmaninoffBot
from cogs.general_cog import GeneralCog
from cogs.weather_cog import WeatherCog
from cogs.traffic_cog import TrafficCog
from cogs.covid_cog import CovidCog
from cogs.stock_cog import StockCog
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
MONGODB_CONNECTION = os.getenv('MONGODB_CONNECTION')
ALLOWED_USERS = os.getenv('ALLOWED_USERS')
OPENWEATHERMAP_APIKEY = os.getenv('OPENWEATHERMAP_APIKEY')
bot = RachmaninoffBot(command_prefix='!')
bot.add_cog(TrafficCog(bot=bot,
mongodb_connection=MONGODB_CONNECTION,
allowed_users=ALLOWED_USERS))
bot.add_cog(GeneralCog(bot=bot, allowed_users=ALLOWED_USERS))
bot.add_cog(WeatherCog(bot=bot,
allowed_users=ALLOWED_USERS,
openweathermap_apikey=OPENWEATHERMAP_APIKEY,
mongodb_connection=MONGODB_CONNECTION))
bot.add_cog(CovidCog(bot=bot, allowed_users=ALLOWED_USERS))
bot.add_cog(StockCog(bot=bot, allowed_users=ALLOWED_USERS))
bot.run(TOKEN)
| 1,238 | 462 |
# -*- coding: utf-8 -*-
import logging
import logging.handlers
from logging.handlers import TimedRotatingFileHandler
import gzip
import os
import time
from geeker.functions import Singleton
class GzTimedRotatingFileHandler(TimedRotatingFileHandler):
def __init__(self, filename, when, interval, **kwargs):
super(GzTimedRotatingFileHandler, self).__init__(filename, when, interval, **kwargs)
@staticmethod
def do_gzip(old_log):
with open(old_log, 'rb') as old:
with gzip.open(old_log.replace('.log', '', 1) + '.gz', 'wb') as comp_log:
comp_log.writelines(old)
os.remove(old_log)
# overwrite
def doRollover(self):
if self.stream:
self.stream.close()
self.stream = None
current_time = int(time.time())
dst_now = time.localtime(current_time)[-1]
t = self.rolloverAt - self.interval
if self.utc:
time_tuple = time.gmtime(t)
else:
time_tuple = time.localtime(t)
dst_then = time_tuple[-1]
if dst_now != dst_then:
if dst_now:
addend = 3600
else:
addend = -3600
time_tuple = time.localtime(t + addend)
dfn = self.baseFilename + "." + time.strftime(self.suffix, time_tuple)
if os.path.exists(dfn):
os.remove(dfn)
if os.path.exists(self.baseFilename):
os.rename(self.baseFilename, dfn)
self.do_gzip(dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
new_rollover_at = self.computeRollover(current_time)
while new_rollover_at <= current_time:
new_rollover_at = new_rollover_at + self.interval
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
ds_att_rollover = time.localtime(new_rollover_at)[-1]
if dst_now != ds_att_rollover:
if not dst_now: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
new_rollover_at += addend
self.rolloverAt = new_rollover_at
class LogBase(Singleton):
def __init__(self, dir_path='./logs/',
logger_name='special_log_name',
info_name='info.log',
error_name='error.log',
warning_name='warning.log',
debug_name='debug.log',
interval=7,
detail=False,
debug=False,
info=True,
error=True,
warning=True,
):
self.info_name = info_name
self.error_name = error_name
self.warning_name = warning_name
self.debug_name = debug_name
self.path = dir_path
self.interval = interval
self._logger = logging.getLogger(logger_name)
self._debug = debug
self._warning = warning
self._error = error
self._info = info
self._detail = detail
def __handler(self, log_name):
handler = GzTimedRotatingFileHandler(self.path + log_name,
when='D',
interval=self.interval,
backupCount=3,
encoding='utf-8')
return handler
def __filter_message(self, handler, log_level):
"""
过滤不同等级日志的其他信息,只保留当前日志等级的信息
:param handler: handler
:param log_level: 字符串
:return: handler
"""
if self._detail:
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(funcName)s - %(lineno)d - %(message)s",
"%Y%m%d %H:%M:%S")
else:
formatter = logging.Formatter("%(asctime)s - %(message)s", "%Y%m%d %H:%M:%S")
_filter = logging.Filter()
handler.suffix = "%Y%m%d.log"
handler.setFormatter(formatter)
handler.setLevel(log_level)
_filter.filter = lambda record: record.levelno == log_level
handler.addFilter(_filter)
return handler
def _get_logger(self):
# 添加此行,防止日志重复记录
if not self._logger.handlers:
# 设置日志等级,默认是 DEBUG
self._logger.setLevel(logging.DEBUG)
levels = [self._debug, self._info, self._warning, self._error]
log_names = [self.debug_name, self.info_name, self.warning_name, self.error_name]
levels_ = [10, 20, 30, 40]
for i, lev in enumerate(levels):
if lev:
_handler = self.__handler(log_names[i])
_handler = self.__filter_message(_handler, levels_[i])
# handler添加给日志对象
self._logger.addHandler(_handler)
return self._logger
| 5,305 | 1,670 |
import json
import logging
from binance.helpers import round_step_size
from sqlalchemy import false
from ..enums import *
import bson
import abc
import itertools
from ..objects import EState, EOrderType, ECommand, EnhancedJSONEncoder
from ..utils import safe_sum, round_step_downward, truncate, safe_multiply, safe_substract
from .. import binance_filters as filters
from ..exceptions import NotImplementedException
logger = logging.getLogger('app')
class StrategyBase(metaclass=abc.ABCMeta):
# NOTE: fee can stay here until a better place is found
fee = 0
def __init__(self, _name, _config, _symbol_info):
self.name = _name
self.alloc_ratio = 0
self.logger = logging.getLogger('app.{}'.format(__name__))
self.config = _config['strategy'][self.name]
self.max_lto = self.config.get('max_lto',1)
# NOTE: Assigning the fee multiple times is not the most optimal solution
StrategyBase.fee = _config['broker'].get('fee', 0)
# TODO: Rename this config as strategy config etc. because some modules means the whole config dict some are just a portion
self.quote_currency = _config['broker']['quote_currency']
# TODO: Make proper handling for symbol_info
self.symbol_info = _symbol_info
# NOTE: Hardcoded time-scales list (scales should be in ascending order)
self.min_period = self.config['time_scales'][0]
self.meta_do = list(itertools.product(self.config['time_scales'], self.config['pairs']))
# It seems possible to have this on_STAT_EXIT_EXP() like approach. Surely needs to be tried again.
# Since it facilitates so much new strategy creation and modular implementation
# NOTE: strategywise_alloc_rate determines the available rate of use from the main capital
# If self.strategywise_alloc_rate is 0.25 then this strategy can use max %25 of the main capital
self.strategywise_alloc_rate = 0 # Will be filled by the strategy manager
# NOTE: pairwise_alloc_rate determines the available rate of use from the strategywise allocated capital
# If self.strategywise_alloc_rate is 0.25 then this strategy can use max %25 of the main capital
pass
@staticmethod
def is_lto_dead(trade):
if trade.command == ECommand.CANCEL or trade.status == EState.CLOSED:
return True # Trade is dead
else:
return False # Trade is alive # Skip evaluation if non of this is true (LTO will be alive until the next cycle)
@staticmethod
async def run_logic(self, analysis_dict, trade_list, ikarus_time, total_qc, free_qc):
"""[summary]
Args:
analysis_dict ([type]): [description]
lto_list ([type]): [description]
df_balance ([type]): [description]
ikarus_time ([type]): [description]
total_qc ([type]): [description]
Returns:
[type]: [description]
"""
# Preliminary condition: all of the config['pairs'] exist in analysis_dict
if not set(self.config['pairs']).issubset(analysis_dict.keys()):
self.logger.warn(f"Configured pair \"{self.config['pairs']}\" does not exist in analysis_dict. Skipping {self.name}.run")
return []
# Initialize trade_dict to be filled
trade_objects = []
# Handle LTOs separately before the new evaluation
# Create a mapping between the pair and lto such as {'BTCUSDT':{...}, ...}
pair_grouped_ltos = {}
alive_lto_counter = 0
in_trade_capital = 0
dead_lto_capital = 0
for lto_idx in range(len(trade_list)):
# If handle_lto_logic fails then it means that the trade_list[lto_idx] is unchanged.
if not await StrategyBase.handle_lto_logic(self, analysis_dict, trade_list[lto_idx], ikarus_time):
self.logger.warn(f"Function failed: 'handle_lto_logic'. Trade info: '{trade_list[lto_idx]._id}', '{trade_list[lto_idx].strategy}'")
pair_grouped_ltos[trade_list[lto_idx].pair] = trade_list[lto_idx]
# It is needed to know how many of LTOs are dead or will be dead
if not StrategyBase.is_lto_dead(trade_list[lto_idx]):
# NOTE: in_trade_capital is only calcualted for LTOs that will last until at least next candle
#in_trade_capital += lto_list[lto_idx][PHASE_ENTER][TYPE_LIMIT]['amount']
# NOTE: For the enter_expire, PHASE_ENTER can be directly reflected to balance
# market_exit is not considered as dead lto
# The result of the OCO orders is unknown
in_trade_capital = safe_sum(in_trade_capital, trade_list[lto_idx].enter.amount)
alive_lto_counter += 1
# NOTE: TYPE_MARKET PHASE:_EXIT LTOs are considered as alive right here. Not sure if it is a good approach
else:
# Dead capital
dead_lto_capital = safe_sum(dead_lto_capital, trade_list[lto_idx].enter.amount)
# NOTE: Only iterate for the configured pairs. Do not run the strategy if any of them is missing in analysis_dict
total_lto_slot = min(self.max_lto, len(self.config['pairs']))
empty_lto_slot = total_lto_slot - alive_lto_counter
if empty_lto_slot < 1:
return [] # TODO Debug this ansync LTO issue buy doing debugging around here
# Evaluate pairwise_alloc_share
strategy_capital = safe_multiply(total_qc, self.strategywise_alloc_rate)
#for lto in lto_list:
# in_trade_capital += lto[PHASE_ENTER][TYPE_LIMIT]['amount']
free_strategy_capital = safe_substract(strategy_capital, in_trade_capital)
available_capital = min(free_strategy_capital, safe_sum(free_qc, dead_lto_capital))
# TODO: This can be updated to use some kind of precision from the symbol info instead of hardcoded 8
pairwise_alloc_share = truncate(available_capital/empty_lto_slot, 8)
#available_lto_capital = min(pairwise_alloc_share, free_qc+dead_lto_capital)
# Iterate over pairs and make decisions about them
for ao_pair in self.config['pairs']:
# Break if there is no empty_lto_slot left
if empty_lto_slot < 1:
break
# Continue if the LTO of the pair is not dead
if ao_pair in pair_grouped_ltos.keys():
if not StrategyBase.is_lto_dead(pair_grouped_ltos[ao_pair]):
continue
# Perform evaluation
if trade:= await self.make_decision(analysis_dict, ao_pair, ikarus_time, pairwise_alloc_share):
# Apply exchange filters
if not StrategyBase.apply_exchange_filters(trade.enter, self.symbol_info[ao_pair]):
continue
trade_objects.append(trade)
empty_lto_slot -= 1
return trade_objects
@staticmethod
async def handle_lto_logic(self, analysis_dict, trade, ikarus_time):
"""
This function decides what to do for the LTOs based on their 'status'
"""
is_success = False
if trade.status == EState.ENTER_EXP:
if self.config['action_mapping'][EState.ENTER_EXP] == ECommand.CANCEL:
is_success = await self.on_cancel(trade)
elif trade.status == EState.EXIT_EXP:
if self.config['action_mapping'][EState.EXIT_EXP] == ECommand.UPDATE:
is_success = await self.on_update(trade, ikarus_time, analysis_dict=analysis_dict)
elif self.config['action_mapping'][EState.EXIT_EXP] == ECommand.MARKET_EXIT:
# NOTE: Market exit requires the exit prices to be known, thus provide the analysis_dict to that
is_success = await StrategyBase.on_market_exit(self, trade, analysis_dict)
elif trade.status == EState.WAITING_EXIT:
# LTO is entered succesfully, so exit order should be executed
# NOTE: expire of the exit_module can be calculated after the trade entered
is_success = await self.on_waiting_exit(trade, analysis_dict)
else:
is_success = True
return is_success
@abc.abstractclassmethod
async def on_update(self):
pass
@staticmethod
async def on_market_exit(self, trade, analysis_dict):
# TODO: Create market exit logic
raise NotImplementedException()
'''
#lto = await StrategyBase._config_market_exit(lto, self.config['exit']['type'])
lto['exit'] = await StrategyBase._create_exit_module(
TYPE_MARKET,
0,
lto['result'][PHASE_ENTER]['quantity'],
analysis_dict[lto['pair']][self.min_period]['close'],
0)
lto['exit'][TYPE_MARKET] = await StrategyBase.apply_exchange_filters(lto, self.symbol_info[lto['pair']])
trade.exi
trade.command = ECommand.MARKET_EXIT
self.logger.info(f'LTO: market exit configured') # TODO: Add orderId
'''
return trade
@abc.abstractclassmethod
async def on_waiting_exit(self):
pass
@abc.abstractclassmethod
async def on_closed(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'run') and
callable(subclass.run) and
hasattr(subclass, 'dump_to') and
callable(subclass.dump_to) or
NotImplemented)
@staticmethod
def _eval_future_candle_time(start_time, count, minute): return bson.Int64(start_time + count*minute*60*1000)
@staticmethod
async def _config_market_exit(lto, type):
# TODO: NEXT NEXT Integrate fee to market order
# Continue here
# TODO: Integrate price to market order, even if it has no use
# For now, it works and I am not gonna touch it for a rework
lto['action'] = ACTN_MARKET_EXIT
lto['exit'][TYPE_MARKET] = {
'amount': lto['exit'][type]['amount'],
'quantity': lto['exit'][type]['quantity'],
'orderId': '',
}
return lto
@staticmethod
def apply_exchange_filters(trade_order, symbol_info):
# TODO: Make the function orer specific using trade_order instead of trade
"""
- Call this method prior to any order placement
- Apply the filter of exchange pair
- This methhod does not check if the current conditiones are good to go.
If a filter is not satisfied then it would create an exception. Validation
costs time. Maybe in future
- Separating enter and exit does not make any sense since the filters are valid for both side.
Returns:
Order: enter or exit module
"""
# LOT_SIZE
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#lot_size
if result := filters.lot_size(trade_order.quantity, symbol_info):
trade_order.quantity = result
else:
#logger.error(f"Filter failure: LOT_SIZE. {trade.strategy} in phase {phase} with quantity {str(trade.enter.quantity)}")
return False
# PRICE_FILTER
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#price_filter
if type(trade_order).__name__ == EOrderType.MARKET:
pass
elif type(trade_order).__name__ == EOrderType.LIMIT:
trade_order.set_price(round_step_downward(trade_order.price, float(symbol_info['filters'][0]['tickSize']))) # Fixing PRICE_FILTER: tickSize
if trade_order.price > float(symbol_info['filters'][0]['maxPrice']):
pass
# TODO: BUG: NEXT: Add proper error handling or check for the prices
elif type(trade_order).__name__ == EOrderType.OCO:
trade_order.set_price(round_step_downward(trade_order.price, float(symbol_info['filters'][0]['tickSize']))) # Fixing PRICE_FILTER: tickSize
trade_order.stopPrice = round_step_downward(trade_order.stopPrice, float(symbol_info['filters'][0]['tickSize']))
trade_order.stopLimitPrice = round_step_downward(trade_order.stopLimitPrice, float(symbol_info['filters'][0]['tickSize']))
if not filters.min_notional(trade_order.stopPrice, trade_order.quantity, symbol_info):
logger.warn(f"Trade object skipped due to MIN_NOTIONAL filter for {symbol_info['symbol']}. NTO: {json.dumps(trade_order, cls=EnhancedJSONEncoder)}")
return False
# MIN_NOTIONAL
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#min_notional
if not filters.min_notional(trade_order.price, trade_order.quantity, symbol_info):
logger.warn(f"Trade object skipped due to MIN_NOTIONAL filter for {symbol_info['symbol']}. NTO: {json.dumps(trade_order, cls=EnhancedJSONEncoder)}")
return False
return True
| 13,245 | 3,974 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import optparse
import os
import sys
import zipfile
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
from scripts.util import build_utils # noqa: E402
def parse_args(args):
args = build_utils.expand_file_args(args)
parser = optparse.OptionParser()
build_utils.add_depfile_option(parser)
parser.add_option('--output', help='generated ndk stub file')
parser.add_option('--os-irrelevant-dir',
help='base directory of ndk common files')
parser.add_option('--os-specific-dir',
help='base directory of os specific stuff')
parser.add_option('--prefix',
help='prefix string of directory in archive zipfile')
parser.add_option('--notice-file', help='path to notice file')
parser.add_option('--record-path', help='path to md5.stamp file')
options, _ = parser.parse_args(args)
return options
def do_archive(output, directory, prefix, compress_fn):
files = []
for root, _, filenames in os.walk(directory):
for f in filenames:
files.extend([os.path.join(root, f)])
with zipfile.ZipFile(output, 'a') as outfile:
for f in files:
compress = compress_fn(f) if compress_fn else None
if prefix:
zip_path = os.path.join(prefix, os.path.relpath(f, directory))
else:
zip_path = os.path.relpath(f, directory)
build_utils.add_to_zip_hermetic(outfile,
zip_path,
src_path=f,
compress=compress)
def archive_ndk(output, os_irrelevant_dir, os_specific_dir, prefix,
compress_fn, notice):
# Create an empty zipfile first, then add stuff to it.
with zipfile.ZipFile(output, 'w') as outfile:
pass
for directory in [os_irrelevant_dir, os_specific_dir]:
do_archive(output, directory, prefix, compress_fn)
with zipfile.ZipFile(output, 'a') as zip_file:
compress = compress_fn(notice) if compress_fn else None
if prefix:
zip_path = os.path.join(prefix, os.path.basename(notice))
else:
zip_path = os.path.basename(notice)
build_utils.add_to_zip_hermetic(zip_file,
zip_path,
src_path=notice,
compress=compress)
def main(args):
options = parse_args(args)
os_irrelevant_dir = options.os_irrelevant_dir
os_specific_dir = options.os_specific_dir
depfile_deps = set(
build_utils.get_all_files(os_irrelevant_dir) +
build_utils.get_all_files(os_specific_dir))
depfile_deps.add(options.notice_file)
build_utils.call_and_write_depfile_if_stale(lambda: archive_ndk(
options.output, os_irrelevant_dir, os_specific_dir, options.prefix,
lambda _: True, options.notice_file),
options,
depfile_deps=depfile_deps,
input_paths=depfile_deps,
output_paths=([options.output]),
record_path=options.record_path,
force=False,
add_pydeps=False)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 4,150 | 1,223 |
import numpy as np
"""
some really utils functions
"""
def get_score_label_array_from_dict(score_dict, label_dict):
"""
:param score_dict: defaultdict(list)
:param label_dict: defaultdict(list)
:return: np array with score and label
"""
assert len(score_dict) == len(label_dict), "The score_dict and label_dict don't match"
score = np.ones(len(score_dict))
label = np.ones(len(label_dict))
for idx, (key, score_l) in enumerate(score_dict.items()):
label[idx] = max(label_dict[key])
score[idx] = max(score_l)
return score, label | 587 | 200 |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the bonAppetit function below.
def bonAppetit(bill, k, b):
sum = 0
for i in bill:
sum+=i
sum = sum - bill[k]
if sum//2 == b:
print('Bon Appetit')
else:
print(b-(sum//2))
if __name__ == '__main__':
nk = input().rstrip().split()
n = int(nk[0])
k = int(nk[1])
bill = list(map(int, input().rstrip().split()))
b = int(input().strip())
bonAppetit(bill, k, b)
| 512 | 202 |
from kusto_tool import expression as exp
from pytest import fixture
from .fake_database import FakeDatabase
@fixture
def db():
return FakeDatabase("test", "testdb")
@fixture
def tbl(db):
return exp.TableExpr("tbl", database=db, columns={"foo": str, "bar": int})
def test_where_eq_str():
actual = str(exp.Where(exp.Infix(exp.OP.EQ, exp.Column("foo", str), "a")))
expected = "| where foo == 'a'"
assert actual == expected
def test_where_eq_int():
actual = str(exp.Where(exp.Infix(exp.OP.EQ, exp.Column("foo", str), 2)))
expected = "| where foo == 2"
assert actual == expected
def test_where_ne_int():
actual = str(exp.Where(exp.Infix(exp.OP.NE, exp.Column("foo", str), 2)))
expected = "| where foo != 2"
assert actual == expected
def test_where_lt_int():
actual = str(exp.Where(exp.Infix(exp.OP.LT, exp.Column("foo", str), 2)))
expected = "| where foo < 2"
assert actual == expected
def test_where_lte_int():
actual = str(exp.Where(exp.Infix(exp.OP.LE, exp.Column("foo", str), 2)))
expected = "| where foo <= 2"
assert actual == expected
def test_where_gt_int():
actual = str(exp.Where(exp.Infix(exp.OP.GT, exp.Column("foo", str), 2)))
expected = "| where foo > 2"
assert actual == expected
def test_where_gte_int():
actual = str(exp.Where(exp.Infix(exp.OP.GE, exp.Column("foo", str), 2)))
expected = "| where foo >= 2"
assert actual == expected
def test_table_where_eq(tbl):
q = str(tbl.where(tbl.bar == "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar == 'foo'\n"
assert q == ex
def test_table_where_ne(tbl):
q = str(tbl.where(tbl.bar != "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar != 'foo'\n"
assert q == ex
def test_table_where_lt(tbl):
q = str(tbl.where(tbl.bar < "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar < 'foo'\n"
assert q == ex
def test_table_where_le(tbl):
q = str(tbl.where(tbl.bar <= "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar <= 'foo'\n"
assert q == ex
def test_table_where_gt(tbl):
q = str(tbl.where(tbl.bar > "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar > 'foo'\n"
assert q == ex
def test_table_where_ge(tbl):
q = str(tbl.where(tbl.bar >= "foo"))
ex = "cluster('test').database('testdb').['tbl']\n| where bar >= 'foo'\n"
assert q == ex
def test_where_repr():
where = exp.Where(exp.Infix(exp.OP.EQ, exp.Column("foo", str), 2))
assert repr(where) == "Where(Column(\"foo\", <class 'str'>) == 2)"
def test_where_and():
foo = exp.Column("foo", str)
bar = exp.Column("bar", str)
where = exp.Where((foo == "a") & (bar == "b"))
assert str(where) == "| where (foo == 'a') and (bar == 'b')"
def test_where_or():
foo = exp.Column("foo", str)
bar = exp.Column("bar", str)
where = exp.Where((foo == "a") | (bar == "b"))
assert str(where) == "| where (foo == 'a') or (bar == 'b')"
def test_not():
foo = exp.Column("foo", bool)
where = exp.Where(~(foo == "a"))
assert str(where) == "| where not(foo == 'a')"
def test_where_cols():
foo = exp.Column("foo", str)
bar = exp.Column("bar", str)
where = exp.Where(foo == bar)
assert str(where) == "| where foo == bar"
def test_where_isin():
foo = exp.Column("foo", str)
where = exp.Where(foo.isin("bar", "baz"))
assert str(where) == "| where foo in ('bar', 'baz')"
def test_where_isin_int():
foo = exp.Column("foo", str)
where = exp.Where(foo.isin(1, 2, 3))
assert str(where) == "| where foo in (1, 2, 3)"
| 3,675 | 1,387 |
import requests
import simplejson as json
from six.moves.urllib.parse import urljoin
from datadog_checks.checks import AgentCheck
class TwitchtvCheck(AgentCheck):
CHECK_NAME = 'twitchtv'
def __init__(self, name, init_config, agentConfig, instances=None):
super(TwitchtvCheck, self).__init__(name, init_config, agentConfig, instances)
def check(self, instance):
# parse config fields
self._validate_instance(instance)
api_url = instance['api_url']
client_id = instance['client_id']
channels = instance.get("channels", [])
# get channel metrics from API
payload = {}
tags = {}
try:
payload = self._get_channel_data(instance, api_url, client_id, channels)
tags = self._get_game_tags(instance, api_url, client_id, payload)
except Exception, e:
self.log.error("Failed to get metrics with error: {}".format(e))
# send to DD
try:
self._report_channel_metrics(instance, payload, tags)
except Exception, e:
self.log.error("Failed to report channel metrics with error: {}".format(e))
# get follower metrics from API
users_payload = {}
follows = {}
try:
users_payload = self._get_user_data(instance, api_url, client_id, channels)
follows = self._get_all_follows(instance, api_url, client_id, users_payload)
except Exception, e:
self.log.error("Failed to get user follows with error: {}".format(e))
# send to DD
try:
self._report_follows_metrics(instance, follows)
except Exception, e:
self.log.error("Failed to report follows metrics with error: {}".format(e))
def _validate_instance(self, instance):
if any([x for x in ['api_url', 'client_id', 'channels'] if x not in instance]):
raise Exception("Missing 'api_url', 'client_id', or 'channels' in config")
def _report_channel_metrics(self, instance, payload, tags):
metric_name = 'twitchtv.live.viewers'
for ch in payload['data']:
self.gauge(metric_name, ch['viewer_count'],
tags=instance.get('tags', []) +
['channel:' + ch['user_name']] +
['language:' + ch['language']] +
['game:' + tags[ch['user_name']]])
def _report_follows_metrics(self, instance, follows):
metric_name = 'twitchtv.followers'
for ch, total in follows.items():
self.gauge(metric_name, total,
tags=instance.get('tags', []) +
['channel:' + ch])
def _get_channel_data(self, instance, api_url, client_id, channels):
path = "streams"
headers = {'Client-ID': client_id}
params = [('user_login', ch) for ch in channels]
r = requests.get(urljoin(api_url, path), headers=headers, params=params, timeout=60)
r.raise_for_status()
return json.loads(r.text)
def _get_game_data(self, instance, api_url, client_id, game_id):
path = "games"
headers = {'Client-ID': client_id}
params = {'id': game_id}
r = requests.get(urljoin(api_url, path), headers=headers, params=params, timeout=60)
r.raise_for_status()
return json.loads(r.text)
def _get_game_tags(self, instance, api_url, client_id, payload):
tags = {}
for ch in payload['data']:
try:
game_payload = self._get_game_data(instance, api_url, client_id, ch['game_id'])
tags[ch['user_name']] = game_payload['data'][0]['name']
except Exception, e:
self.log.error("Failed to get game name with error: {}".format(e))
return tags
def _get_user_data(self, instance, api_url, client_id, channels):
path = "users"
headers = {'Client-ID': client_id}
params = [('login', ch) for ch in channels]
r = requests.get(urljoin(api_url, path), headers=headers, params=params, timeout=60)
r.raise_for_status()
return json.loads(r.text)
def _get_follow_data(self, instance, api_url, client_id, user_id):
path = "users/follows"
headers = {'Client-ID': client_id}
params = {'to_id': user_id}
r = requests.get(urljoin(api_url, path), headers=headers, params=params, timeout=60)
r.raise_for_status()
return json.loads(r.text)
def _get_all_follows(self, instance, api_url, client_id, payload):
follows = {}
for ch in payload['data']:
try:
follow_payload = self._get_follow_data(instance, api_url, client_id, ch['id'])
follows[ch['login']] = follow_payload['total']
except Exception, e:
self.log.error("Failed to get user follows with error: {}".format(e))
return follows
| 4,991 | 1,481 |
from lib.util import get_info_by_url, insert_to_db
__host = ".amazon."
__sess = "x-main="
def parse(headers):
name = get_info_by_url('http://' + headers["Host"] + '/gp/history/',
headers, [("id='nav-signin-text' class='nav-button-em'>", "<")])[0]
if name is None:
return False
insert_to_db("Amazon", headers, name, "http://" + headers["Host"] + "/")
return True
| 393 | 147 |
import pytest
def test_conda_deps(tmpdir, newconfig):
config = newconfig(
[],
"""
[tox]
toxworkdir = {}
[testenv:py1]
deps=
hello
conda_deps=
world
something
""".format(
tmpdir
),
)
assert len(config.envconfigs) == 1
assert hasattr(config.envconfigs['py1'], 'deps')
assert hasattr(config.envconfigs['py1'], 'conda_deps')
assert len(config.envconfigs['py1'].conda_deps) == 2
# For now, as a workaround, we temporarily add all conda dependencies to
# deps as well. This allows tox to know whether an environment needs to be
# updated or not. Eventually there may be a cleaner solution.
assert len(config.envconfigs['py1'].deps) == 3
assert 'world' == config.envconfigs['py1'].conda_deps[0].name
assert 'something' == config.envconfigs['py1'].conda_deps[1].name
def test_no_conda_deps(tmpdir, newconfig):
config = newconfig(
[],
"""
[tox]
toxworkdir = {}
[testenv:py1]
deps=
hello
""".format(
tmpdir
),
)
assert len(config.envconfigs) == 1
assert hasattr(config.envconfigs['py1'], 'deps')
assert hasattr(config.envconfigs['py1'], 'conda_deps')
assert hasattr(config.envconfigs['py1'], 'conda_channels')
assert len(config.envconfigs['py1'].conda_deps) == 0
assert len(config.envconfigs['py1'].conda_channels) == 0
assert len(config.envconfigs['py1'].deps) == 1
def test_conda_channels(tmpdir, newconfig):
config = newconfig(
[],
"""
[tox]
toxworkdir = {}
[testenv:py1]
deps=
hello
conda_deps=
something
else
conda_channels=
conda-forge
""".format(
tmpdir
),
)
assert len(config.envconfigs) == 1
assert hasattr(config.envconfigs['py1'], 'deps')
assert hasattr(config.envconfigs['py1'], 'conda_deps')
assert hasattr(config.envconfigs['py1'], 'conda_channels')
assert len(config.envconfigs['py1'].conda_channels) == 1
assert 'conda-forge' in config.envconfigs['py1'].conda_channels
def test_conda_force_deps(tmpdir, newconfig):
config = newconfig(
['--force-dep=something<42.1'],
"""
[tox]
toxworkdir = {}
[testenv:py1]
deps=
hello
conda_deps=
something
else
conda_channels=
conda-forge
""".format(
tmpdir
),
)
assert len(config.envconfigs) == 1
assert hasattr(config.envconfigs['py1'], 'conda_deps')
assert len(config.envconfigs['py1'].conda_deps) == 2
assert 'something<42.1' == config.envconfigs['py1'].conda_deps[0].name
| 2,842 | 946 |
**Chapter 19 – Training and Deploying TensorFlow Models at Scale**
_This notebook contains all the sample code in chapter 19._
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/19_training_and_deploying_at_scale.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
!echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" > /etc/apt/sources.list.d/tensorflow-serving.list
!curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -
!apt update && apt-get install -y tensorflow-model-server
!pip install -q -U tensorflow-serving-api
IS_COLAB = True
except Exception:
IS_COLAB = False
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deploy"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# Deploying TensorFlow models to TensorFlow Serving (TFS)
We will use the REST API or the gRPC API.
## Save/Load a `SavedModel`
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis].astype(np.float32) / 255.
X_test = X_test[..., np.newaxis].astype(np.float32) / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
np.round(model.predict(X_new), 2)
model_version = "0001"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
!rm -rf {model_name}
tf.saved_model.save(model, model_path)
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
!saved_model_cli show --dir {model_path}
!saved_model_cli show --dir {model_path} --tag_set serve
!saved_model_cli show --dir {model_path} --tag_set serve \
--signature_def serving_default
!saved_model_cli show --dir {model_path} --all
Let's write the new instances to a `npy` file so we can pass them easily to our model:
np.save("my_mnist_tests.npy", X_new)
input_name = model.input_names[0]
input_name
And now let's use `saved_model_cli` to make predictions for the instances we just saved:
!saved_model_cli run --dir {model_path} --tag_set serve \
--signature_def serving_default \
--inputs {input_name}=my_mnist_tests.npy
np.round([[1.1739199e-04, 1.1239604e-07, 6.0210604e-04, 2.0804715e-03, 2.5779348e-06,
6.4079795e-05, 2.7411186e-08, 9.9669880e-01, 3.9654213e-05, 3.9471846e-04],
[1.2294615e-03, 2.9207937e-05, 9.8599273e-01, 9.6755642e-03, 8.8930705e-08,
2.9156188e-04, 1.5831805e-03, 1.1311053e-09, 1.1980456e-03, 1.1113169e-07],
[6.4066830e-05, 9.6359509e-01, 9.0598064e-03, 2.9872139e-03, 5.9552520e-04,
3.7478798e-03, 2.5074568e-03, 1.1462728e-02, 5.5553433e-03, 4.2495009e-04]], 2)
## TensorFlow Serving
Install [Docker](https://docs.docker.com/install/) if you don't have it already. Then run:
```bash
docker pull tensorflow/serving
export ML_PATH=$HOME/ml # or wherever this project is
docker run -it --rm -p 8500:8500 -p 8501:8501 \
-v "$ML_PATH/my_mnist_model:/models/my_mnist_model" \
-e MODEL_NAME=my_mnist_model \
tensorflow/serving
```
Once you are finished using it, press Ctrl-C to shut down the server.
Alternatively, if `tensorflow_model_server` is installed (e.g., if you are running this notebook in Colab), then the following 3 cells will start the server:
os.environ["MODEL_DIR"] = os.path.split(os.path.abspath(model_path))[0]
%%bash --bg
nohup tensorflow_model_server \
--rest_api_port=8501 \
--model_name=my_mnist_model \
--model_base_path="${MODEL_DIR}" >server.log 2>&1
!tail server.log
import json
input_data_json = json.dumps({
"signature_name": "serving_default",
"instances": X_new.tolist(),
})
repr(input_data_json)[:1500] + "..."
Now let's use TensorFlow Serving's REST API to make predictions:
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status() # raise an exception in case of error
response = response.json()
response.keys()
y_proba = np.array(response["predictions"])
y_proba.round(2)
### Using the gRPC API
from tensorflow_serving.apis.predict_pb2 import PredictRequest
request = PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = "serving_default"
input_name = model.input_names[0]
request.inputs[input_name].CopyFrom(tf.make_tensor_proto(X_new))
import grpc
from tensorflow_serving.apis import prediction_service_pb2_grpc
channel = grpc.insecure_channel('localhost:8500')
predict_service = prediction_service_pb2_grpc.PredictionServiceStub(channel)
response = predict_service.Predict(request, timeout=10.0)
response
Convert the response to a tensor:
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
y_proba = tf.make_ndarray(outputs_proto)
y_proba.round(2)
Or to a NumPy array if your client does not include the TensorFlow library:
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
shape = [dim.size for dim in outputs_proto.tensor_shape.dim]
y_proba = np.array(outputs_proto.float_val).reshape(shape)
y_proba.round(2)
## Deploying a new model version
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
model_version = "0002"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
tf.saved_model.save(model, model_path)
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
**Warning**: You may need to wait a minute before the new model is loaded by TensorFlow Serving.
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status()
response = response.json()
response.keys()
y_proba = np.array(response["predictions"])
y_proba.round(2)
# Deploy the model to Google Cloud AI Platform
Follow the instructions in the book to deploy the model to Google Cloud AI Platform, download the service account's private key and save it to the `my_service_account_private_key.json` in the project directory. Also, update the `project_id`:
project_id = "onyx-smoke-242003"
import googleapiclient.discovery
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "my_service_account_private_key.json"
model_id = "my_mnist_model"
model_path = "projects/{}/models/{}".format(project_id, model_id)
model_path += "/versions/v0001/" # if you want to run a specific version
ml_resource = googleapiclient.discovery.build("ml", "v1").projects()
def predict(X):
input_data_json = {"signature_name": "serving_default",
"instances": X.tolist()}
request = ml_resource.predict(name=model_path, body=input_data_json)
response = request.execute()
if "error" in response:
raise RuntimeError(response["error"])
return np.array([pred[output_name] for pred in response["predictions"]])
Y_probas = predict(X_new)
np.round(Y_probas, 2)
# Using GPUs
tf.test.is_gpu_available()
tf.test.gpu_device_name()
tf.test.is_built_with_cuda()
from tensorflow.python.client.device_lib import list_local_devices
devices = list_local_devices()
devices
# Distributed Training
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
batch_size = 100
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
distribution = tf.distribute.MirroredStrategy()
# Change the default all-reduce algorithm:
#distribution = tf.distribute.MirroredStrategy(
# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
# Specify the list of GPUs to use:
#distribution = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"])
# Use the central storage strategy instead:
#distribution = tf.distribute.experimental.CentralStorageStrategy()
#resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
#tf.tpu.experimental.initialize_tpu_system(resolver)
#distribution = tf.distribute.experimental.TPUStrategy(resolver)
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
model.predict(X_new)
Custom training loop:
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
K = keras.backend
distribution = tf.distribute.MirroredStrategy()
with distribution.scope():
model = create_model()
optimizer = keras.optimizers.SGD()
with distribution.scope():
dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).repeat().batch(batch_size)
input_iterator = distribution.make_dataset_iterator(dataset)
@tf.function
def train_step():
def step_fn(inputs):
X, y = inputs
with tf.GradientTape() as tape:
Y_proba = model(X)
loss = K.sum(keras.losses.sparse_categorical_crossentropy(y, Y_proba)) / batch_size
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
per_replica_losses = distribution.experimental_run(step_fn, input_iterator)
mean_loss = distribution.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses, axis=None)
return mean_loss
n_epochs = 10
with distribution.scope():
input_iterator.initialize()
for epoch in range(n_epochs):
print("Epoch {}/{}".format(epoch + 1, n_epochs))
for iteration in range(len(X_train) // batch_size):
print("\rLoss: {:.3f}".format(train_step().numpy()), end="")
print()
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
## Training across multiple servers
A TensorFlow cluster is a group of TensorFlow processes running in parallel, usually on different machines, and talking to each other to complete some work, for example training or executing a neural network. Each TF process in the cluster is called a "task" (or a "TF server"). It has an IP address, a port, and a type (also called its role or its job). The type can be `"worker"`, `"chief"`, `"ps"` (parameter server) or `"evaluator"`:
* Each **worker** performs computations, usually on a machine with one or more GPUs.
* The **chief** performs computations as well, but it also handles extra work such as writing TensorBoard logs or saving checkpoints. There is a single chief in a cluster. If no chief is specified, then the first worker is the chief.
* A **parameter server** (ps) only keeps track of variable values, it is usually on a CPU-only machine.
* The **evaluator** obviously takes care of evaluation. There is usually a single evaluator in a cluster.
The set of tasks that share the same type is often called a "job". For example, the "worker" job is the set of all workers.
To start a TensorFlow cluster, you must first specify it. This means defining all the tasks (IP address, TCP port, and type). For example, the following cluster specification defines a cluster with 3 tasks (2 workers and 1 parameter server). It's a dictionary with one key per job, and the values are lists of task addresses:
```
{
"worker": ["my-worker0.example.com:9876", "my-worker1.example.com:9876"],
"ps": ["my-ps0.example.com:9876"]
}
```
Every task in the cluster may communicate with every other task in the server, so make sure to configure your firewall to authorize all communications between these machines on these ports (it's usually simpler if you use the same port on every machine).
When a task is started, it needs to be told which one it is: its type and index (the task index is also called the task id). A common way to specify everything at once (both the cluster spec and the current task's type and id) is to set the `TF_CONFIG` environment variable before starting the program. It must be a JSON-encoded dictionary containing a cluster specification (under the `"cluster"` key), and the type and index of the task to start (under the `"task"` key). For example, the following `TF_CONFIG` environment variable defines a simple cluster with 2 workers and 1 parameter server, and specifies that the task to start is the first worker:
import os
import json
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["my-work0.example.com:9876", "my-work1.example.com:9876"],
"ps": ["my-ps0.example.com:9876"]
},
"task": {"type": "worker", "index": 0}
})
print("TF_CONFIG='{}'".format(os.environ["TF_CONFIG"]))
Some platforms (e.g., Google Cloud ML Engine) automatically set this environment variable for you.
Then you would write a short Python script to start a task. The same script can be used on every machine, since it will load the `TF_CONFIG` variable, which will tell it which task to start:
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
worker0 = tf.distribute.Server(resolver.cluster_spec(),
job_name=resolver.task_type,
task_index=resolver.task_id)
Another way to specify the cluster specification is directly in Python, rather than through an environment variable:
cluster_spec = tf.train.ClusterSpec({
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
})
You can then start a server simply by passing it the cluster spec and indicating its type and index. Let's start the two remaining tasks (remember that in general you would only start a single task per machine; we are starting 3 tasks on the localhost just for the purpose of this code example):
#worker1 = tf.distribute.Server(cluster_spec, job_name="worker", task_index=1)
ps0 = tf.distribute.Server(cluster_spec, job_name="ps", task_index=0)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
},
"task": {"type": "worker", "index": 1}
})
print(repr(os.environ["TF_CONFIG"]))
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
},
"task": {"type": "worker", "index": 1}
})
#CUDA_VISIBLE_DEVICES=0
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
import tensorflow as tf
from tensorflow import keras
import numpy as np
# At the beginning of the program (restart the kernel before running this cell)
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis] / 255.
X_test = X_test[..., np.newaxis] / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
n_workers = 2
batch_size = 32 * n_workers
dataset = tf.data.Dataset.from_tensor_slices((X_train[..., np.newaxis], y_train)).repeat().batch(batch_size)
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(dataset, steps_per_epoch=len(X_train)//batch_size, epochs=10)
# Hyperparameter tuning
# Only talk to ps server
config_proto = tf.ConfigProto(device_filters=['/job:ps', '/job:worker/task:%d' % tf_config['task']['index']])
config = tf.estimator.RunConfig(session_config=config_proto)
# default since 1.10
strategy.num_replicas_in_sync | 20,933 | 7,394 |
# coding: utf-8
"""
qTest Manager API Version 8.6 - 9.1
qTest Manager API Version 8.6 - 9.1
OpenAPI spec version: 8.6 - 9.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.testrun_api import TestrunApi
class TestTestrunApi(unittest.TestCase):
""" TestrunApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.testrun_api.TestrunApi()
def tearDown(self):
pass
def test_add_comment(self):
"""
Test case for add_comment
Adds a Comment to a Test Run
"""
pass
def test_create(self):
"""
Test case for create
Creates a Test Run
"""
pass
def test_delete(self):
"""
Test case for delete
Deletes a Test Run
"""
pass
def test_delete_comment(self):
"""
Test case for delete_comment
Deletes a Comment of a Test Run
"""
pass
def test_get(self):
"""
Test case for get
Gets a Test Run
"""
pass
def test_get_comment(self):
"""
Test case for get_comment
Gets a Comment from a Test Run
"""
pass
def test_get_comments(self):
"""
Test case for get_comments
Gets all Comments of a Test Run
"""
pass
def test_get_of(self):
"""
Test case for get_of
Gets multiple Test Runs
"""
pass
def test_get_status_valuable(self):
"""
Test case for get_status_valuable
Gets Test Run statuses
"""
pass
def test_update(self):
"""
Test case for update
Updates a Test Run
"""
pass
def test_update_comment(self):
"""
Test case for update_comment
Updates a Comment of a Test Run
"""
pass
if __name__ == '__main__':
unittest.main()
| 2,291 | 776 |
""" Prune the preassembled corpus json file """
import json
import sys
from tqdm import tqdm
def isInfluenceStatement(s):
return s["type"] == "Influence"
if __name__ == "__main__":
with open(sys.argv[1], "r") as f:
sts = json.load(f)
filtered_sts = []
hasWMKey = lambda x: x.get("WM") is not None
hasNonZeroWMGroundingList = lambda x: len(x["WM"]) != 0
isNotGroundedToCausalFactor = lambda x: x["WM"][0][0] not in (
"wm/concept/causal_factor",
"wm/concept/causal_factor/condition",
"wm/concept/causal_factor/condition/trend",
"wm/concept/causal_factor/access",
"wm/concept/causal_factor/intervention",
"wm/concept/causal_factor/movement/movement",
"wm/concept/causal_factor/social_and_political",
"wm/concept/entity/artifact",
"wm/concept/entity/geo-location",
"wm/concept/entity/government_entity",
"wm/concept/entity/organization",
"wm/concept/entity/person_and_group/community",
"wm/concept/causal_factor/economic_and_commerce/economic_activity/market",
"wm/concept/indicator_and_reported_property/weather"
)
for s in filter(
lambda s: isInfluenceStatement(s)
and all(
map(
lambda x: hasWMKey(x)
and hasNonZeroWMGroundingList(x)
and isNotGroundedToCausalFactor(x),
map(lambda x: s[x]["concept"]["db_refs"], ("subj", "obj")),
)
),
sts,
):
for c in (s["subj"], s["obj"]):
for k in list(c["concept"]["db_refs"].keys()):
if k != "WM":
del c["concept"]["db_refs"][k]
c["concept"]["db_refs"]["WM"] = c["concept"]["db_refs"]["WM"][
0:1
]
c["concept"]["db_refs"]["WM"][0][0] = (
c["concept"]["db_refs"]["WM"][0][0]
.replace(" ", "_")
.replace(
"wm/concept/causal_factor/economic_and_commerce/economic_activity/market/price/food_price",
"wm/concept/causal_factor/economic_and_commerce/economic_activity/market/price_or_cost/food_price",
)
.replace(
"wm/concept/causal_factor/economic_and_commerce/economic_activity/market/price/oil_price",
"wm/concept/causal_factor/economic_and_commerce/economic_activity/market/price_or_cost/oil_price",
)
.replace(
"wm/concept/causal_factor/intervention/provision_of_goods_and_services/provide_stationary",
"wm/concept/causal_factor/intervention/provision_of_goods_and_services/provide_stationery",
)
.replace(
"wm/concept/causal_factor/intervention/provision_of_goods_and_services/provide_moving_of_houseHolds",
"wm/concept/causal_factor/intervention/provision_of_goods_and_services/provide_moving_of_households",
)
.replace(
"wm/concept/causal_factor/social_and_political/crime",
"wm/concept/causal_factor/social_and_political/crime/crime",
)
.replace(
"wm/concept/causal_factor/social_and_political/education",
"wm/concept/causal_factor/social_and_political/education/education",
)
)
filtered_sts.append(s)
with open(sys.argv[2], "w") as f:
f.write(json.dumps(filtered_sts, indent=2))
| 3,731 | 1,252 |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Iterable
class DataProviderError(Exception):
"""Base Exception for Ax DataProviders.
The type of the data provider must be included.
The raw error is stored in the data_provider_error section,
and an Ax-friendly message is stored as the actual error message.
"""
def __init__(
self, message: str, data_provider: str, data_provider_error: Any
) -> None:
self.message = message
self.data_provider = data_provider
self.data_provider_error = data_provider_error
def __str__(self) -> str:
return (
"{message}. \n Error thrown by: {dp} data provider \n"
+ "Native {dp} data provider error: {dp_error}"
).format(
dp=self.data_provider,
message=self.message,
dp_error=self.data_provider_error,
)
class MissingDataError(Exception):
def __init__(self, missing_trial_indexes: Iterable[int]) -> None:
missing_trial_str = ", ".join([str(index) for index in missing_trial_indexes])
self.message: str = (
f"Unable to find data for the following trials: {missing_trial_str} "
"consider updating the data fetching kwargs or manually fetching "
"data via `refetch_data()`"
)
def __str__(self) -> str:
return self.message
| 1,562 | 447 |
from . import (baseobject, basepdf, basemodel, basefunc, data, interfaces, integration, math, loss,
sample, limits, operations, parameter, )
| 156 | 40 |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="Settings.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class Settings(object):
"""
Defines comparison process additional settings
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'generate_summary_page': 'bool',
'show_deleted_content': 'bool',
'show_inserted_content': 'bool',
'style_change_detection': 'bool',
'inserted_items_style': 'ItemsStyle',
'deleted_items_style': 'ItemsStyle',
'changed_items_style': 'ItemsStyle',
'words_separator_chars': 'list[str]',
'details_level': 'str',
'use_frames_for_del_ins_elements': 'bool',
'calculate_component_coordinates': 'bool',
'mark_changed_content': 'bool',
'mark_nested_content': 'bool',
'clone_metadata': 'str',
'meta_data': 'Metadata',
'password_save_option': 'str',
'password': 'str',
'diagram_master_setting': 'DiagramMasterSetting',
'original_size': 'Size',
'header_footers_comparison': 'bool',
'paper_size': 'str',
'sensitivity_of_comparison': 'int'
}
attribute_map = {
'generate_summary_page': 'GenerateSummaryPage',
'show_deleted_content': 'ShowDeletedContent',
'show_inserted_content': 'ShowInsertedContent',
'style_change_detection': 'StyleChangeDetection',
'inserted_items_style': 'InsertedItemsStyle',
'deleted_items_style': 'DeletedItemsStyle',
'changed_items_style': 'ChangedItemsStyle',
'words_separator_chars': 'WordsSeparatorChars',
'details_level': 'DetailsLevel',
'use_frames_for_del_ins_elements': 'UseFramesForDelInsElements',
'calculate_component_coordinates': 'CalculateComponentCoordinates',
'mark_changed_content': 'MarkChangedContent',
'mark_nested_content': 'MarkNestedContent',
'clone_metadata': 'CloneMetadata',
'meta_data': 'MetaData',
'password_save_option': 'PasswordSaveOption',
'password': 'Password',
'diagram_master_setting': 'DiagramMasterSetting',
'original_size': 'OriginalSize',
'header_footers_comparison': 'HeaderFootersComparison',
'paper_size': 'PaperSize',
'sensitivity_of_comparison': 'SensitivityOfComparison'
}
def __init__(self, generate_summary_page=None, show_deleted_content=None, show_inserted_content=None, style_change_detection=None, inserted_items_style=None, deleted_items_style=None, changed_items_style=None, words_separator_chars=None, details_level=None, use_frames_for_del_ins_elements=None, calculate_component_coordinates=None, mark_changed_content=None, mark_nested_content=None, clone_metadata=None, meta_data=None, password_save_option=None, password=None, diagram_master_setting=None, original_size=None, header_footers_comparison=None, paper_size=None, sensitivity_of_comparison=None, **kwargs): # noqa: E501
"""Initializes new instance of Settings""" # noqa: E501
self._generate_summary_page = None
self._show_deleted_content = None
self._show_inserted_content = None
self._style_change_detection = None
self._inserted_items_style = None
self._deleted_items_style = None
self._changed_items_style = None
self._words_separator_chars = None
self._details_level = None
self._use_frames_for_del_ins_elements = None
self._calculate_component_coordinates = None
self._mark_changed_content = None
self._mark_nested_content = None
self._clone_metadata = None
self._meta_data = None
self._password_save_option = None
self._password = None
self._diagram_master_setting = None
self._original_size = None
self._header_footers_comparison = None
self._paper_size = None
self._sensitivity_of_comparison = None
if generate_summary_page is not None:
self.generate_summary_page = generate_summary_page
if show_deleted_content is not None:
self.show_deleted_content = show_deleted_content
if show_inserted_content is not None:
self.show_inserted_content = show_inserted_content
if style_change_detection is not None:
self.style_change_detection = style_change_detection
if inserted_items_style is not None:
self.inserted_items_style = inserted_items_style
if deleted_items_style is not None:
self.deleted_items_style = deleted_items_style
if changed_items_style is not None:
self.changed_items_style = changed_items_style
if words_separator_chars is not None:
self.words_separator_chars = words_separator_chars
if details_level is not None:
self.details_level = details_level
if use_frames_for_del_ins_elements is not None:
self.use_frames_for_del_ins_elements = use_frames_for_del_ins_elements
if calculate_component_coordinates is not None:
self.calculate_component_coordinates = calculate_component_coordinates
if mark_changed_content is not None:
self.mark_changed_content = mark_changed_content
if mark_nested_content is not None:
self.mark_nested_content = mark_nested_content
if clone_metadata is not None:
self.clone_metadata = clone_metadata
if meta_data is not None:
self.meta_data = meta_data
if password_save_option is not None:
self.password_save_option = password_save_option
if password is not None:
self.password = password
if diagram_master_setting is not None:
self.diagram_master_setting = diagram_master_setting
if original_size is not None:
self.original_size = original_size
if header_footers_comparison is not None:
self.header_footers_comparison = header_footers_comparison
if paper_size is not None:
self.paper_size = paper_size
if sensitivity_of_comparison is not None:
self.sensitivity_of_comparison = sensitivity_of_comparison
@property
def generate_summary_page(self):
"""
Gets the generate_summary_page. # noqa: E501
Indicates whether to add summary page to resultant document or not # noqa: E501
:return: The generate_summary_page. # noqa: E501
:rtype: bool
"""
return self._generate_summary_page
@generate_summary_page.setter
def generate_summary_page(self, generate_summary_page):
"""
Sets the generate_summary_page.
Indicates whether to add summary page to resultant document or not # noqa: E501
:param generate_summary_page: The generate_summary_page. # noqa: E501
:type: bool
"""
if generate_summary_page is None:
raise ValueError("Invalid value for `generate_summary_page`, must not be `None`") # noqa: E501
self._generate_summary_page = generate_summary_page
@property
def show_deleted_content(self):
"""
Gets the show_deleted_content. # noqa: E501
Indicates whether to show deleted components in resultant document or not # noqa: E501
:return: The show_deleted_content. # noqa: E501
:rtype: bool
"""
return self._show_deleted_content
@show_deleted_content.setter
def show_deleted_content(self, show_deleted_content):
"""
Sets the show_deleted_content.
Indicates whether to show deleted components in resultant document or not # noqa: E501
:param show_deleted_content: The show_deleted_content. # noqa: E501
:type: bool
"""
if show_deleted_content is None:
raise ValueError("Invalid value for `show_deleted_content`, must not be `None`") # noqa: E501
self._show_deleted_content = show_deleted_content
@property
def show_inserted_content(self):
"""
Gets the show_inserted_content. # noqa: E501
Indicates whether to show inserted components in resultant document or not # noqa: E501
:return: The show_inserted_content. # noqa: E501
:rtype: bool
"""
return self._show_inserted_content
@show_inserted_content.setter
def show_inserted_content(self, show_inserted_content):
"""
Sets the show_inserted_content.
Indicates whether to show inserted components in resultant document or not # noqa: E501
:param show_inserted_content: The show_inserted_content. # noqa: E501
:type: bool
"""
if show_inserted_content is None:
raise ValueError("Invalid value for `show_inserted_content`, must not be `None`") # noqa: E501
self._show_inserted_content = show_inserted_content
@property
def style_change_detection(self):
"""
Gets the style_change_detection. # noqa: E501
Indicates whether to detect style changes or not # noqa: E501
:return: The style_change_detection. # noqa: E501
:rtype: bool
"""
return self._style_change_detection
@style_change_detection.setter
def style_change_detection(self, style_change_detection):
"""
Sets the style_change_detection.
Indicates whether to detect style changes or not # noqa: E501
:param style_change_detection: The style_change_detection. # noqa: E501
:type: bool
"""
if style_change_detection is None:
raise ValueError("Invalid value for `style_change_detection`, must not be `None`") # noqa: E501
self._style_change_detection = style_change_detection
@property
def inserted_items_style(self):
"""
Gets the inserted_items_style. # noqa: E501
Style for inserted components # noqa: E501
:return: The inserted_items_style. # noqa: E501
:rtype: ItemsStyle
"""
return self._inserted_items_style
@inserted_items_style.setter
def inserted_items_style(self, inserted_items_style):
"""
Sets the inserted_items_style.
Style for inserted components # noqa: E501
:param inserted_items_style: The inserted_items_style. # noqa: E501
:type: ItemsStyle
"""
self._inserted_items_style = inserted_items_style
@property
def deleted_items_style(self):
"""
Gets the deleted_items_style. # noqa: E501
Style for deleted components # noqa: E501
:return: The deleted_items_style. # noqa: E501
:rtype: ItemsStyle
"""
return self._deleted_items_style
@deleted_items_style.setter
def deleted_items_style(self, deleted_items_style):
"""
Sets the deleted_items_style.
Style for deleted components # noqa: E501
:param deleted_items_style: The deleted_items_style. # noqa: E501
:type: ItemsStyle
"""
self._deleted_items_style = deleted_items_style
@property
def changed_items_style(self):
"""
Gets the changed_items_style. # noqa: E501
Style for components with changed style # noqa: E501
:return: The changed_items_style. # noqa: E501
:rtype: ItemsStyle
"""
return self._changed_items_style
@changed_items_style.setter
def changed_items_style(self, changed_items_style):
"""
Sets the changed_items_style.
Style for components with changed style # noqa: E501
:param changed_items_style: The changed_items_style. # noqa: E501
:type: ItemsStyle
"""
self._changed_items_style = changed_items_style
@property
def words_separator_chars(self):
"""
Gets the words_separator_chars. # noqa: E501
An array of delimiters to split text into words # noqa: E501
:return: The words_separator_chars. # noqa: E501
:rtype: list[str]
"""
return self._words_separator_chars
@words_separator_chars.setter
def words_separator_chars(self, words_separator_chars):
"""
Sets the words_separator_chars.
An array of delimiters to split text into words # noqa: E501
:param words_separator_chars: The words_separator_chars. # noqa: E501
:type: list[str]
"""
self._words_separator_chars = words_separator_chars
@property
def details_level(self):
"""
Gets the details_level. # noqa: E501
Gets of sets the comparison details level # noqa: E501
:return: The details_level. # noqa: E501
:rtype: str
"""
return self._details_level
@details_level.setter
def details_level(self, details_level):
"""
Sets the details_level.
Gets of sets the comparison details level # noqa: E501
:param details_level: The details_level. # noqa: E501
:type: str
"""
if details_level is None:
raise ValueError("Invalid value for `details_level`, must not be `None`") # noqa: E501
allowed_values = ["Low", "Middle", "High"] # noqa: E501
if not details_level.isdigit():
if details_level not in allowed_values:
raise ValueError(
"Invalid value for `details_level` ({0}), must be one of {1}" # noqa: E501
.format(details_level, allowed_values))
self._details_level = details_level
else:
self._details_level = allowed_values[int(details_level) if six.PY3 else long(details_level)]
@property
def use_frames_for_del_ins_elements(self):
"""
Gets the use_frames_for_del_ins_elements. # noqa: E501
Indicates whether to use frames for shapes in Word Processing and for rectangles in Image documents # noqa: E501
:return: The use_frames_for_del_ins_elements. # noqa: E501
:rtype: bool
"""
return self._use_frames_for_del_ins_elements
@use_frames_for_del_ins_elements.setter
def use_frames_for_del_ins_elements(self, use_frames_for_del_ins_elements):
"""
Sets the use_frames_for_del_ins_elements.
Indicates whether to use frames for shapes in Word Processing and for rectangles in Image documents # noqa: E501
:param use_frames_for_del_ins_elements: The use_frames_for_del_ins_elements. # noqa: E501
:type: bool
"""
if use_frames_for_del_ins_elements is None:
raise ValueError("Invalid value for `use_frames_for_del_ins_elements`, must not be `None`") # noqa: E501
self._use_frames_for_del_ins_elements = use_frames_for_del_ins_elements
@property
def calculate_component_coordinates(self):
"""
Gets the calculate_component_coordinates. # noqa: E501
Indicates whether to calculate coordinates for changed components # noqa: E501
:return: The calculate_component_coordinates. # noqa: E501
:rtype: bool
"""
return self._calculate_component_coordinates
@calculate_component_coordinates.setter
def calculate_component_coordinates(self, calculate_component_coordinates):
"""
Sets the calculate_component_coordinates.
Indicates whether to calculate coordinates for changed components # noqa: E501
:param calculate_component_coordinates: The calculate_component_coordinates. # noqa: E501
:type: bool
"""
if calculate_component_coordinates is None:
raise ValueError("Invalid value for `calculate_component_coordinates`, must not be `None`") # noqa: E501
self._calculate_component_coordinates = calculate_component_coordinates
@property
def mark_changed_content(self):
"""
Gets the mark_changed_content. # noqa: E501
Indicates whether to use frames for shapes in Word Processing and for rectangles in Image documents # noqa: E501
:return: The mark_changed_content. # noqa: E501
:rtype: bool
"""
return self._mark_changed_content
@mark_changed_content.setter
def mark_changed_content(self, mark_changed_content):
"""
Sets the mark_changed_content.
Indicates whether to use frames for shapes in Word Processing and for rectangles in Image documents # noqa: E501
:param mark_changed_content: The mark_changed_content. # noqa: E501
:type: bool
"""
if mark_changed_content is None:
raise ValueError("Invalid value for `mark_changed_content`, must not be `None`") # noqa: E501
self._mark_changed_content = mark_changed_content
@property
def mark_nested_content(self):
"""
Gets the mark_nested_content. # noqa: E501
Gets or sets a value indicating whether to mark the children of the deleted or inserted element as deleted or inserted # noqa: E501
:return: The mark_nested_content. # noqa: E501
:rtype: bool
"""
return self._mark_nested_content
@mark_nested_content.setter
def mark_nested_content(self, mark_nested_content):
"""
Sets the mark_nested_content.
Gets or sets a value indicating whether to mark the children of the deleted or inserted element as deleted or inserted # noqa: E501
:param mark_nested_content: The mark_nested_content. # noqa: E501
:type: bool
"""
if mark_nested_content is None:
raise ValueError("Invalid value for `mark_nested_content`, must not be `None`") # noqa: E501
self._mark_nested_content = mark_nested_content
@property
def clone_metadata(self):
"""
Gets the clone_metadata. # noqa: E501
Gets or sets type of metadata to clone # noqa: E501
:return: The clone_metadata. # noqa: E501
:rtype: str
"""
return self._clone_metadata
@clone_metadata.setter
def clone_metadata(self, clone_metadata):
"""
Sets the clone_metadata.
Gets or sets type of metadata to clone # noqa: E501
:param clone_metadata: The clone_metadata. # noqa: E501
:type: str
"""
if clone_metadata is None:
raise ValueError("Invalid value for `clone_metadata`, must not be `None`") # noqa: E501
allowed_values = ["Default", "Source", "Target", "FileAuthor"] # noqa: E501
if not clone_metadata.isdigit():
if clone_metadata not in allowed_values:
raise ValueError(
"Invalid value for `clone_metadata` ({0}), must be one of {1}" # noqa: E501
.format(clone_metadata, allowed_values))
self._clone_metadata = clone_metadata
else:
self._clone_metadata = allowed_values[int(clone_metadata) if six.PY3 else long(clone_metadata)]
@property
def meta_data(self):
"""
Gets the meta_data. # noqa: E501
Gets or sets user metadata # noqa: E501
:return: The meta_data. # noqa: E501
:rtype: Metadata
"""
return self._meta_data
@meta_data.setter
def meta_data(self, meta_data):
"""
Sets the meta_data.
Gets or sets user metadata # noqa: E501
:param meta_data: The meta_data. # noqa: E501
:type: Metadata
"""
self._meta_data = meta_data
@property
def password_save_option(self):
"""
Gets the password_save_option. # noqa: E501
Gets or sets type of password saving # noqa: E501
:return: The password_save_option. # noqa: E501
:rtype: str
"""
return self._password_save_option
@password_save_option.setter
def password_save_option(self, password_save_option):
"""
Sets the password_save_option.
Gets or sets type of password saving # noqa: E501
:param password_save_option: The password_save_option. # noqa: E501
:type: str
"""
if password_save_option is None:
raise ValueError("Invalid value for `password_save_option`, must not be `None`") # noqa: E501
allowed_values = ["None", "Source", "Target", "User"] # noqa: E501
if not password_save_option.isdigit():
if password_save_option not in allowed_values:
raise ValueError(
"Invalid value for `password_save_option` ({0}), must be one of {1}" # noqa: E501
.format(password_save_option, allowed_values))
self._password_save_option = password_save_option
else:
self._password_save_option = allowed_values[int(password_save_option) if six.PY3 else long(password_save_option)]
@property
def password(self):
"""
Gets the password. # noqa: E501
Gets or sets user password to resultant document # noqa: E501
:return: The password. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""
Sets the password.
Gets or sets user password to resultant document # noqa: E501
:param password: The password. # noqa: E501
:type: str
"""
self._password = password
@property
def diagram_master_setting(self):
"""
Gets the diagram_master_setting. # noqa: E501
Gets or sets master for Diagram document # noqa: E501
:return: The diagram_master_setting. # noqa: E501
:rtype: DiagramMasterSetting
"""
return self._diagram_master_setting
@diagram_master_setting.setter
def diagram_master_setting(self, diagram_master_setting):
"""
Sets the diagram_master_setting.
Gets or sets master for Diagram document # noqa: E501
:param diagram_master_setting: The diagram_master_setting. # noqa: E501
:type: DiagramMasterSetting
"""
self._diagram_master_setting = diagram_master_setting
@property
def original_size(self):
"""
Gets the original_size. # noqa: E501
Gets or sets original document size when picture is compared with other different formats # noqa: E501
:return: The original_size. # noqa: E501
:rtype: Size
"""
return self._original_size
@original_size.setter
def original_size(self, original_size):
"""
Sets the original_size.
Gets or sets original document size when picture is compared with other different formats # noqa: E501
:param original_size: The original_size. # noqa: E501
:type: Size
"""
self._original_size = original_size
@property
def header_footers_comparison(self):
"""
Gets the header_footers_comparison. # noqa: E501
Control to turn on comparison of header/footer contents # noqa: E501
:return: The header_footers_comparison. # noqa: E501
:rtype: bool
"""
return self._header_footers_comparison
@header_footers_comparison.setter
def header_footers_comparison(self, header_footers_comparison):
"""
Sets the header_footers_comparison.
Control to turn on comparison of header/footer contents # noqa: E501
:param header_footers_comparison: The header_footers_comparison. # noqa: E501
:type: bool
"""
if header_footers_comparison is None:
raise ValueError("Invalid value for `header_footers_comparison`, must not be `None`") # noqa: E501
self._header_footers_comparison = header_footers_comparison
@property
def paper_size(self):
"""
Gets the paper_size. # noqa: E501
Gets or sets the result document paper size # noqa: E501
:return: The paper_size. # noqa: E501
:rtype: str
"""
return self._paper_size
@paper_size.setter
def paper_size(self, paper_size):
"""
Sets the paper_size.
Gets or sets the result document paper size # noqa: E501
:param paper_size: The paper_size. # noqa: E501
:type: str
"""
if paper_size is None:
raise ValueError("Invalid value for `paper_size`, must not be `None`") # noqa: E501
allowed_values = ["Default", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8"] # noqa: E501
if not paper_size.isdigit():
if paper_size not in allowed_values:
raise ValueError(
"Invalid value for `paper_size` ({0}), must be one of {1}" # noqa: E501
.format(paper_size, allowed_values))
self._paper_size = paper_size
else:
self._paper_size = allowed_values[int(paper_size) if six.PY3 else long(paper_size)]
@property
def sensitivity_of_comparison(self):
"""
Gets the sensitivity_of_comparison. # noqa: E501
Gets or sets a sensitivity of comparison. Default is 75 # noqa: E501
:return: The sensitivity_of_comparison. # noqa: E501
:rtype: int
"""
return self._sensitivity_of_comparison
@sensitivity_of_comparison.setter
def sensitivity_of_comparison(self, sensitivity_of_comparison):
"""
Sets the sensitivity_of_comparison.
Gets or sets a sensitivity of comparison. Default is 75 # noqa: E501
:param sensitivity_of_comparison: The sensitivity_of_comparison. # noqa: E501
:type: int
"""
if sensitivity_of_comparison is None:
raise ValueError("Invalid value for `sensitivity_of_comparison`, must not be `None`") # noqa: E501
self._sensitivity_of_comparison = sensitivity_of_comparison
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Settings):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29,113 | 8,808 |
# Copyright (c) 2020, 2022, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms, as
# designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
import uuid
import config
from gui_plugin import sqleditor
from gui_plugin import dbconnections
from gui_plugin.core.Error import MSGException
from tests.conftest import backend_callback
from .MockWebSession import MockWebSession
from tests import backend_callback_with_pending
import time
import gui_plugin.core.Logger as logger
class Parameters:
_connection_id = None
_web_session = None
_module_session = None
_module_session_id = None
_db_connection_id = None
@pytest.fixture(scope="module")
def params():
parameters = Parameters()
parameters._connection_id = None
parameters._web_session = MockWebSession()
@backend_callback(1)
def open_connection_cb(msg_type, msg, request_id, values):
if values['request_state']['type'] != "OK":
raise Exception('Failed opening connection.')
parameters._web_session.register_callback(
open_connection_cb.request_id, open_connection_cb)
result = sqleditor.start_session(parameters._web_session)
parameters._module_session_id = result['module_session_id']
parameters._module_session = parameters._web_session.module_sessions[
parameters._module_session_id]
connection_options = config.Config.get_instance(
).database_connections[0]['options'].copy()
del connection_options['portStr']
result = dbconnections.add_db_connection(1, {
"db_type": "MySQL",
"caption": "This is a test MySQL database",
"description": "This is a test MySQL database description",
"options": connection_options
}, '', parameters._web_session)
parameters._db_connection_id = result['result']['db_connection_id']
sqleditor.open_connection(
parameters._db_connection_id, parameters._module_session, open_connection_cb.request_id)
open_connection_cb.join_and_validate()
yield parameters
parameters._web_session.db.close()
result = sqleditor.close_session(parameters._module_session)
# del parameters._web_session.module_sessions[parameters._module_session_id]
class TestSqleditor:
def test_service_connection(self, params):
@backend_callback_with_pending()
def callback_request1(msg_type, msg, request_id, values):
logger.debug("callback_request1")
@backend_callback_with_pending()
def callback_schemas(msg_type, msg, request_id, values):
logger.debug("callback_schemas")
params._web_session.register_callback(
callback_request1.request_id, callback_request1)
params._web_session.register_callback(
callback_schemas.request_id, callback_schemas)
sqleditor.execute(sql="SELECT SLEEP(3)", module_session=params._module_session,
request_id=callback_request1.request_id)
sqleditor.get_current_schema(module_session=params._module_session,
request_id=callback_schemas.request_id)
callback_schemas.join_and_validate()
callback_request1.join_and_validate()
def test_close_session(self, params):
request_id1 = str(uuid.uuid1())
sqleditor.close_session(params._module_session)
with pytest.raises(MSGException) as e:
sqleditor.execute("SELECT SLEEP(1)", params._module_session, request_id1)
assert e.value.args[0] == "Error[MSG-1200]: The database session needs to be opened before SQL can be executed."
@backend_callback(1)
def open_connection_cb(msg_type, msg, request_id, values):
if values['request_state']['type'] != "OK":
raise Exception('Failed opening connection.')
params._web_session.register_callback(
open_connection_cb.request_id, open_connection_cb)
sqleditor.open_connection(
params._db_connection_id, params._module_session, open_connection_cb.request_id)
open_connection_cb.join_and_validate()
def test_kill_query(self, params):
@backend_callback_with_pending()
def callback_sleep(msg_type, msg, request_id=None, values=None):
assert 'request_state' in values
assert 'type' in values['request_state']
assert 'msg' in values['request_state']
assert values['request_state']['type'] == "ERROR"
assert values['request_state']['msg'] == "Query killed"
params._web_session.register_callback(
callback_sleep.request_id, callback_sleep)
sqleditor.execute("SELECT SLEEP(3)",
params._module_session, callback_sleep.request_id)
# since kill works in a different session (service session)
# it might happen that we try to kill a query that is still not running.
# so avoid that, just wait a bit. there's plenty of time to kill it.
time.sleep(1)
sqleditor.kill_query(params._module_session)
callback_sleep.join_and_validate()
def test_execute_query_with_params(self, params):
@backend_callback_with_pending()
def callback_execute(msg_type, msg, request_id=None, values=None):
assert 'done' in values
assert 'columns' in values
assert 'rows' in values
assert values['done'] == True
params._web_session.register_callback(
callback_execute.request_id, callback_execute)
result = sqleditor.execute(
"SHOW DATABASES LIKE ?", params._module_session, callback_execute.request_id, ['mysql'])
callback_execute.join_and_validate()
| 6,713 | 1,867 |
import tensorflow as tf
import hyperchamber as hc
import hypergan as hg
import numpy as np
from hypergan.losses.lamb_gan_loss import LambGanLoss
from hypergan.ops import TensorflowOps
from unittest.mock import MagicMock
from tests.mocks import mock_gan
loss_config = {'test': True, 'reduce':'reduce_mean', 'labels': [0,1,0], 'label_smooth': 0.3, 'alpha': 0.2, 'beta': 0.1}
class LambGanLossTest(tf.test.TestCase):
def test_config(self):
with self.test_session():
loss = LambGanLoss(mock_gan(), loss_config)
self.assertTrue(loss.config.test)
def test_create(self):
with self.test_session():
gan = mock_gan()
loss = LambGanLoss(gan, loss_config)
d_loss, g_loss = loss.create()
d_shape = gan.ops.shape(d_loss)
g_shape = gan.ops.shape(g_loss)
self.assertEqual(d_shape, [])
self.assertEqual(g_shape, [])
if __name__ == "__main__":
tf.test.main()
| 981 | 334 |
import chaospy as cp
import numpy as np
import easyvvuq as uq
import os
# import fabsim3_cmd_api as fab
import matplotlib.pyplot as plt
plt.close('all')
# author: Wouter Edeling
__license__ = "LGPL"
HOME = os.path.abspath(os.path.dirname(__file__))
# Set up a fresh campaign called "sc"
my_campaign = uq.Campaign(name='sc', work_dir='/tmp')
#number of uncertain parameters
d = 5
# Define parameter space
params = {}
for i in range(45):
params["x%d" % (i + 1)] = {"type": "float",
"min": 0.0,
"max": 1.0,
"default": 0.5}
params["d"] = {"type": "integer", "default": d}
params["out_file"] = {"type": "string", "default": "output.csv"}
output_filename = params["out_file"]["default"]
output_columns = ["f"]
# Create an encoder, decoder and collation element
encoder = uq.encoders.GenericEncoder(
template_fname=HOME + '/sc/poly.template',
delimiter='$',
target_filename='poly_in.json')
decoder = uq.decoders.SimpleCSV(target_filename=output_filename,
output_columns=output_columns,
header=0)
collater = uq.collate.AggregateSamples()
# Add the SC app (automatically set as current app)
my_campaign.add_app(name="sc",
params=params,
encoder=encoder,
decoder=decoder,
collater=collater)
#uncertain variables
vary = {}
for i in range(d):
vary["x%d" % (i + 1)] = cp.Uniform(0, 1)
#=================================
#create dimension-adaptive sampler
#=================================
#sparse = use a sparse grid (required)
#growth = use a nested quadrature rule (not required)
#dimension_adaptive = use a dimension adaptive sampler (required)
my_sampler = uq.sampling.SCSampler(vary=vary, polynomial_order=1,
quadrature_rule="C",
sparse=True, growth=True,
dimension_adaptive=True)
# Associate the sampler with the campaign
my_campaign.set_sampler(my_sampler)
# Will draw all (of the finite set of samples)
my_campaign.draw_samples()
my_campaign.populate_runs_dir()
## Use this instead to run the samples using EasyVVUQ on the localhost
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(
"./sc/poly_model.py poly_in.json"))
# fab.run_uq_ensemble(my_campaign.campaign_dir, script_name='poly_model',
# machine='localhost')
# fab.get_uq_samples(my_campaign.campaign_dir, machine='localhost')
my_campaign.collate()
data_frame = my_campaign.get_collation_result()
# Post-processing analysis
analysis = uq.analysis.SCAnalysis(sampler=my_sampler, qoi_cols=output_columns)
my_campaign.apply_analysis(analysis)
# how many adaptation to make
number_of_adaptations = 2
for i in range(number_of_adaptations):
#required parameter in the case of a Fabsim run
skip = my_sampler.count
print('Adaptation %d' % (i+1))
#look-ahead step (compute the code at admissible forward points)
my_sampler.look_ahead(analysis.l_norm)
#proceed as usual
my_campaign.draw_samples()
my_campaign.populate_runs_dir()
my_campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(
"./sc/poly_model.py poly_in.json"))
# fab.run_uq_ensemble(my_campaign.campaign_dir, script_name='poly_model',
# machine='localhost', skip = skip)
# fab.get_uq_samples(my_campaign.campaign_dir, machine='localhost')
my_campaign.collate()
#compute the error at all admissible points, select direction with
#highest error and add that direction to the grid
data_frame = my_campaign.get_collation_result()
analysis.adapt_dimension('f', data_frame, method='var')
#proceed as usual with analysis
my_campaign.apply_analysis(analysis)
results = my_campaign.get_last_analysis()
#some post-processing
#analytic mean and standard deviation
a = np.array([1/(2*(i+1)) for i in range(d)])
ref_mean = np.prod(a+1)/2**d
ref_std = np.sqrt(np.prod(9*a[0:d]**2/5 + 2*a[0:d] + 1)/2**(2*d) - ref_mean**2)
print("======================================")
print("Number of samples = %d" % my_sampler._number_of_samples)
print("--------------------------------------")
print("Analytic mean = %.4e" % ref_mean)
print("Computed mean = %.4e" % results['statistical_moments']['f']['mean'])
print("--------------------------------------")
print("Analytic standard deviation = %.4e" % ref_std)
print("Computed standard deviation = %.4e" % results['statistical_moments']['f']['std'])
print("--------------------------------------")
print("First order Sobol indices =", results['sobols_first']['f'])
print("--------------------------------------")
analysis.plot_grid()
analysis.plot_stat_convergence()
analysis.adaptation_table() | 4,846 | 1,574 |
from fnmatch import fnmatch
def test_fixture(regression_file_path):
assert fnmatch(str(regression_file_path.relative), "[01].xmf")
assert fnmatch(
str(regression_file_path.absolute),
"*/test_regression_file_path_fixture0/references/case/[01].xmf",
)
| 280 | 101 |
"""preprocess data before using it"""
import logging
import pandas as pd
from preprocess_data import csv_data, custom_col_base, custom_columns
def col_from_cls(client_class, data):
'''
Create "series" from a given function and dataframe
Parameters:
client_func (callable): function used to create the column
data (pd.DataFrame): data used to calculate new columns
Returns:
column (pd.Series): calculated column
Raises:
TypeError: if the column type is not pd.Series
'''
# TODO check if there are any Nan or inf values in new column
column_cls = client_class(data)
col_name, col_data = column_cls.add_column()
if isinstance(col_data, pd.Series):
return col_name, col_data
else:
raise TypeError('Method "add_column()" has to return "pd.Series"')
def add_user_defined_features(data: pd.DataFrame, user_cols) -> pd.DataFrame:
'''
Add data from functions in 'user_calculated_columns.py'.
Parameters:
data (pd.DataFrame): data used to calculate new columns
user_cols (list): user class names to use
Returns:
data (pd.DataFrame): the updated dataframe
'''
logging.info('Adding custom columns')
for col_cls in custom_col_base.CustomColumn.__subclasses__():
cls_name = col_cls.__name__
if(col_cls.__module__ == custom_columns.__name__):
if cls_name in user_cols:
try:
# add new column to dataframe
new_col_name, new_col = col_from_cls(col_cls, data)
data[new_col_name] = new_col
logging.info(f'Add column "{new_col_name}" ✅')
except Exception as e:
logging.info(f'Add column from user class "{cls_name}" '
f'❌: {str(e)}')
return data
def preprocess_data(tic_list, start_date, end_date,
field_mappings, baseline_filed_mappings,
csv_file_info, user_columns) -> pd.DataFrame:
"""preprocess data before using"""
logging.info(f'Train start date: {start_date}')
logging.info(f'Train end date: {end_date}')
logging.info(f'Tickers: {tic_list}')
logging.info(f'Fetching data from csv files')
data_loader = csv_data.CSVData(
start_date=start_date,
end_date=end_date,
ticker_list=tic_list,
csv_dirs=csv_file_info["dir_list"],
baseline_file_name=csv_file_info["baseline_file_name"],
has_daily_trading_limit=csv_file_info["has_daily_trading_limit"],
use_baseline_data=csv_file_info["use_baseline_data"],
baseline_filed_mappings=baseline_filed_mappings,
baseline_date_column_name=csv_file_info["baseline_date_column_name"]
)
processed_data = data_loader.fetch_data(
field_mappings = field_mappings,
date_column=csv_file_info["date_column_name"])
# Preprocess Data
processed_data = add_user_defined_features(
processed_data, user_columns
)
logging.info(f'Preprocessed data (tail): \n{processed_data.tail()}')
logging.info(f'Sample size: {len(processed_data)}')
logging.info(f'Columns after preprocess: {processed_data.columns}')
return processed_data
def get_baseline_df(
start_date,
end_date,
baseline_filed_mappings,
csv_file_info
):
"""
return a dataframe for baseline data
"""
data_loader = csv_data.CSVData(
start_date=start_date,
end_date=end_date,
baseline_file_name=csv_file_info["baseline_file_name"],
use_baseline_data=csv_file_info["use_baseline_data"],
baseline_filed_mappings=baseline_filed_mappings,
baseline_date_column_name=csv_file_info["baseline_date_column_name"]
)
baseline_df = data_loader.baseline_df
return baseline_df
| 3,996 | 1,222 |
LEAGUES = [
'Scottish Premiership',
'Italy Serie A',
'French Ligue 1',
'Spanish Segunda Division',
'Australian A-League',
'Italy Serie B',
'Dutch Eredivisie',
'Mexican Primera Division Torneo Clausura',
'Russian Premier Liga',
'Spanish Primera Division',
'English League One',
'UEFA Europa League',
'Mexican Primera Division Torneo Apertura',
'German Bundesliga',
'South African ABSA Premier League',
'Austrian T-Mobile Bundesliga',
'Barclays Premier League',
'English League Two',
'Greek Super League',
'German 2. Bundesliga',
'United Soccer League',
'Chinese Super League',
'UEFA Champions League',
'Portuguese Liga',
'English League Championship',
'Belgian Jupiler League',
'Norwegian Tippeligaen',
'Turkish Turkcell Super Lig',
'Danish SAS-Ligaen',
'Japanese J League',
'Swedish Allsvenskan',
'Swiss Raiffeisen Super League',
'Brasileiro Série A',
'Major League Soccer',
'Argentina Primera Division',
'French Ligue 2'
]
TEAMS = {
'Manchester City': 94.25,
'Bayern Munich': 93.96,
'Liverpool': 92.92,
'Barcelona': 91.22,
'Paris Saint-Germain': 87.79,
'Chelsea': 85.96,
'Real Madrid': 85.23,
'Tottenham Hotspur': 85.23,
'Juventus': 84.07,
'Borussia Dortmund': 83.63,
'Atletico Madrid': 83.11,
'Bayer Leverkusen': 82.44,
'Ajax': 82.17,
'RB Leipzig': 81.72,
'Internazionale': 81.52,
'Napoli': 80.98,
'Manchester United': 79.79,
'Arsenal': 79.22,
'Everton': 78.53,
'FC Salzburg': 78.51,
'Atalanta': 78.14,
'FC Porto': 78.03,
'Valencia': 77.81,
'Benfica': 76.86,
'TSG Hoffenheim': 76.23,
'Leicester City': 75.82,
'Olympiacos': 75.73,
'AC Milan': 75.49,
'Sevilla FC': 74.86,
'Lyon': 74.57,
'Wolverhampton': 73.87,
'AS Roma': 73.69,
'Getafe': 73.57,
'Real Sociedad': 73.55,
'Athletic Bilbao': 73.38,
'Eibar': 73.31,
'Eintracht Frankfurt': 72.83,
'FC Krasnodar': 71.74,
'Real Betis': 71.51,
'Young Boys': 71.5,
'Villarreal': 71.3,
'Palmeiras': 71.27,
'Borussia Monchengladbach': 71.24,
'Zenit St Petersburg': 71.18,
'Lazio': 71.13,
'Crystal Palace': 71.09,
'VfL Wolfsburg': 70.72,
'Espanyol': 70.42,
'Leganes': 70.32,
'PSV': 70.14,
'Werder Bremen': 70.1,
'PAOK Salonika': 70.07,
'Newcastle': 69.88,
'West Ham United': 69.71,
'AFC Bournemouth': 69.62,
'Lille': 69.47,
'CSKA Moscow': 69.44,
'Galatasaray': 69.39,
'Fulham': 69.3,
'Sporting CP': 68.75,
'Southampton': 68.7,
'Flamengo': 68.34,
'Shakhtar Donetsk': 68.14,
'Schalke 04': 68.02,
'Watford': 67.92,
'Celta Vigo': 67.77,
'Burnley': 67.76,
'Torino': 67.76,
'Mainz': 67.75,
'Genk': 67.46,
'FC Copenhagen': 67.37,
'Fiorentina': 67.14,
'Marseille': 66.56,
'Sampdoria': 66.48,
'Hertha Berlin': 66.08,
'Alavés': 65.93,
'Club Brugge': 65.76,
'River Plate': 65.61,
'Boca Juniors': 65.43,
'Basel': 65.38,
'Lokomotiv Moscow': 65.26,
'Levante': 64.99,
'Dynamo Kyiv': 64.82,
'Bologna': 64.78,
'Aston Villa': 64.76,
'Besiktas': 64.76,
'Viktoria Plzen': 64.72,
'Santos': 64.41,
'Real Valladolid': 64.29,
'St Etienne': 64.25,
'AS Monaco': 64.14,
'Osasuna': 64.11,
'Fortuna Düsseldorf': 64.04,
'Montpellier': 63.95,
'Granada': 63.78,
'Mallorca': 63.77,
'Genoa': 63.48,
'Celtic': 63.39,
'Ludogorets': 63.34,
'Brighton and Hove Albion': 62.79,
'Norwich City': 62.76,
'Leeds United': 62.74,
'Nantes': 62.67,
'Guangzhou Evergrande': 62.64,
'Red Star Belgrade': 62.4,
'Grêmio': 62.21,
'Atlético Paranaense': 61.84,
'Sheffield United': 61.76,
'Club América': 61.7,
'FC Cologne': 61.4,
'Sassuolo': 61.3,
'Dinamo Zagreb': 61.22,
'SC Freiburg': 60.9,
'Nice': 60.42,
'Angers': 60.15,
'Istanbul Basaksehir': 59.94,
'Los Angeles FC': 59.87,
'Stade Rennes': 59.76,
'Trabzonspor': 59.59,
'FC Augsburg': 59.56,
'Feyenoord': 59.49,
'Spal': 59.18,
'Monterrey': 58.88,
'Beijing Guoan': 58.81,
'Tigres UANL': 58.78,
'Cagliari': 58.67,
'1. FC Union Berlin': 58.66,
'Strasbourg': 58.39,
'Huddersfield Town': 58.38,
'Rangers': 58.36,
'FC Astana': 58.29,
'Braga': 58.28,
'Nimes': 58.22,
'LASK Linz': 58.2,
'Slavia Prague': 58.2,
'SC Paderborn': 58.12,
'Internacional': 57.99,
'Udinese': 57.93,
'Jablonec': 57.83,
'Empoli': 57.74,
'Shanghai SIPG': 57.73,
'FC Midtjylland': 57.18,
'Spartak Moscow': 57.05,
'São Paulo': 56.84,
'Bordeaux': 56.83,
'Corinthians': 56.28,
'Reims': 56.26,
'Cruz Azul': 56.08,
'Atletico Mineiro': 55.95,
'Toulouse': 55.55,
'Stoke City': 55.52,
'Brentford': 55.4,
'Kawasaki Frontale': 55.29,
'West Bromwich Albion': 55.12,
'Amiens': 54.82,
'AEK Athens': 54.46,
'Fenerbahce': 54.28,
'Racing Club': 54.18,
'Cardiff City': 54.00,
'Malmo FF': 53.82,
'FC Arsenal Tula': 53.55,
'Metz': 53.48,
'Frosinone': 53.36,
'Swansea City': 53.3,
'Brondby': 53.26,
'Vitesse': 53.01,
'Parma': 53.00,
'Molde': 52.92,
'Brest': 52.77,
'Dijon FCO': 52.75,
'Santos Laguna': 52.58,
'Bahía': 52.53,
'VfB Stuttgart': 52.47,
'Middlesbrough': 52.45,
'León': 52.45,
'Anderlecht': 52.41,
'CA Independiente': 52.38,
'Girona FC': 52.34,
'Standard Liege': 51.98,
'Bristol City': 51.95,
'Kashima Antlers': 51.94,
'Derby County': 51.83,
'Pachuca': 51.59,
'AZ': 51.32,
'Guimaraes': 51.04,
'Guingamp': 50.72,
'KAA Gent': 50.44,
'Terek Grozny': 50.03,
'FK Qarabag': 49.91,
'Dinamo Moscow': 49.76,
'Gazovik Orenburg': 49.68,
'Shandong Luneng': 49.46,
'New York City FC': 49.34,
'Chievo Verona': 49.3,
'Vasco da Gama': 49.27,
'Caykur Rizespor': 49.26,
'Apollon Limassol': 49.22,
'Cruzeiro': 49.03,
'Rostov': 48.88,
'AIK': 48.83,
'Velez Sarsfield': 48.74,
'Nottingham Forest': 48.59,
'Fluminense': 48.22,
'Rapid Vienna': 48.13,
'Defensa y Justicia': 48.07,
'Atlanta United FC': 48.06,
'ADO Den Haag': 47.8,
'Rayo Vallecano': 47.69,
'BATE Borisov': 47.66,
'Blackburn': 47.63,
'Sheffield Wednesday': 47.59,
'Ceará': 47.18,
'Arizona United': 46.91,
'Hull City': 46.74,
'FC Utrecht': 46.5,
'Rio Ave': 46.41,
'Hammarby': 46.39,
'Jiangsu Suning FC': 46.38,
'Millwall': 46.37,
'FK Austria Vienna': 46.36,
'Desportivo Aves': 46.34,
'Birmingham': 46.25,
'Toluca': 46.21,
'Konyaspor': 46.04,
'Botafogo': 46.01,
'Portimonense': 45.95,
'Djurgardens IF': 45.8,
'FC Nordsjaelland': 45.73,
'Tijuana': 45.61,
'BK Hacken': 45.6,
'Hannover 96': 45.41,
'AEK Larnaca': 45.4,
'FC Luzern': 45.22,
'IFK Norrkoping': 45.14,
'FC Ufa': 45.09,
'Queens Park Rangers': 44.99,
'St. Truidense': 44.82,
'Boavista': 44.8,
'Lanus': 44.75,
'Pumas Unam': 44.64,
'Rubin Kazan': 44.62,
'Moreirense': 44.44,
'Aris Salonika': 44.4,
'SK Sturm Graz': 44.36,
'Ural Sverdlovsk Oblast': 44.21,
'Portland Timbers': 44.21,
'Sivasspor': 44.2,
'Guadalajara': 43.99,
'Hamburg SV': 43.91,
'Goztepe': 43.83,
'Preston North End': 43.78,
'Mouscron-Peruwelz': 43.73,
'Málaga': 43.71,
'KV Kortrijk': 43.71,
'Philadelphia Union': 43.65,
'Sporting Kansas City': 43.52,
'San Lorenzo': 43.5,
'Santa Clara': 43.46,
'Cerezo Osaka': 43.34,
'Perth Glory': 43.32,
'Troyes': 43.27,
'Wigan': 43.27,
'Tigre': 43.21,
'Alanyaspor': 43.08,
'Wolfsberger AC': 42.98,
'Sunderland': 42.97,
'Banfield': 42.9,
'Videoton FC': 42.86,
'FC Groningen': 42.82,
'Union Santa Fe': 42.76,
'Talleres de Córdoba': 42.61,
'Vitoria Setubal': 42.61,
'FC Lugano': 42.58,
'Chapecoense AF': 42.54,
'Rosenborg': 42.42,
'Antwerp': 42.37,
'Cashpoint SC Rheindorf Altach': 42.26,
'Yokohama F. Marinos': 42.21,
'Seattle Sounders FC': 42.2,
'Dalian Aerbin': 42.17,
'Sporting de Charleroi': 42.09,
'New York Red Bulls': 42.07,
'Goiás': 42.06,
'Atromitos': 41.89,
'Sanfrecce Hiroshima': 41.86,
'Vorskla': 41.83,
"Newell's Old Boys": 41.61,
'Necaxa': 41.59,
'Yeni Malatyaspor': 41.39,
'Deportivo La Coruña': 41.33,
'Erzurumspor': 41.29,
'Odense BK': 41.26,
'FC Tokyo': 41.03,
'AGF Aarhus': 41.01,
'Caen': 40.93,
'Fortaleza': 40.83,
'Belenenses': 40.7,
'Chaves': 40.45,
'SV Zulte Waregem': 40.42,
'St Gallen': 40.4,
'Los Angeles Galaxy': 40.4,
'Colon Santa Fe': 40.26,
'FC Sion': 40.1,
'Esbjerg': 40.03,
'Mamelodi Sundowns': 39.98,
'Tondela': 39.98,
'Real Salt Lake': 39.97,
'Huracán': 39.85,
'Chicago Fire': 39.84,
'Atlas': 39.75,
'Morelia': 39.72,
'FC Dallas': 39.56,
'Argentinos Juniors': 39.55,
'Querétaro': 39.45,
'San Jose Earthquakes': 39.43,
'FC Zurich': 39.42,
'Estudiantes': 39.42,
'Atlético Tucumán': 39.31,
'Hebei China Fortune FC': 39.31,
'Reading': 39.26,
'Shanghai Greenland': 39.21,
'Avaí': 38.95,
'Rosario Central': 38.9,
'Kasimpasa': 38.86,
'Lorient': 38.86,
'Toronto FC': 38.86,
'Guangzhou RF': 38.74,
'Lens': 38.63,
'Sydney FC': 38.56,
'Valerenga': 38.53,
'Heracles': 38.4,
'Maritimo': 38.33,
'Heerenveen': 38.33,
'Godoy Cruz': 38.24,
'Minnesota United FC': 38.22,
'Krylia Sovetov': 38.2,
'1. FC Nürnberg': 38.04,
'Tianjin Teda': 38.03,
'Puebla': 37.85,
'Panathinaikos': 37.79,
'Ankaragucu': 37.73,
'PEC Zwolle': 37.54,
'Chongqing Lifan': 37.53,
'New England Revolution': 37.44,
'Bursaspor': 37.3,
'Aldosivi': 37.3,
'Barnsley': 37.3,
'Willem II': 37.21,
'Thun': 37.2,
'Holstein Kiel': 37.00,
'Luton Town': 36.99,
'AaB': 36.96,
'Cadiz': 36.86,
'SV Mattersburg': 36.85,
'Montreal Impact': 36.82,
'Patronato': 36.77,
'Belgrano Cordoba': 36.75,
'FC Ingolstadt 04': 36.73,
'Orlando Pirates': 36.72,
'Kilmarnock': 36.62,
'Columbus Crew': 36.61,
'San Martin San Juan': 36.48,
'Randers FC': 36.42,
'FC Spartak Trnava': 36.38,
'Charlton Athletic': 36.34,
'Aberdeen': 36.33,
'Kayserispor': 36.16,
'C.D. Nacional': 36.16,
'Almeria': 36.02,
'VVV Venlo': 35.95,
'Waasland-Beveren': 35.86,
'Arminia Bielefeld': 35.84,
'FC Trenkwalder Admira': 35.73,
'Sporting Gijón': 35.67,
'Henan Jianye': 35.55,
'Gimnasia La Plata': 35.53,
'Wuhan Zall': 35.52,
'Bodo/Glimt': 35.39,
'San Martin de Tucuman': 35.27,
'DC United': 35.25,
'Consadole Sapporo': 35.14,
'Antalyaspor': 34.92,
'Real Zaragoza': 34.82,
'Houston Dynamo': 34.79,
'Jahn Regensburg': 34.67,
'Akhisar Belediye': 34.59,
'Melbourne City': 34.57,
'F91 Dudelange': 34.49,
'Emmen': 34.46,
'1. FC Heidenheim 1846': 34.41,
'Hartberg': 34.3,
'Paris FC': 34.23,
'Albacete': 34.09,
'IFK Goteborg': 34.07,
'FC Wacker Innsbruck': 34.04,
'Orlando City SC': 33.94,
'OFI Crete': 33.93,
'Haugesund': 33.88,
'Asteras Tripolis': 33.84,
'Newcastle Jets': 33.78,
'Bidvest Wits': 33.7,
'Tianjin Quanujian': 33.64,
'Urawa Red Diamonds': 33.57,
'Neuchatel Xamax': 33.54,
'Las Palmas': 33.52,
'Feirense': 33.38,
'Hibernian': 33.28,
'SD Huesca': 33.26,
'Metallurg Krasnoyarsk': 33.25,
'Eupen': 33.19,
'SK Brann': 33.18,
'Ipswich Town': 33.13,
'Excelsior': 33.07,
'Sonderjyske': 32.99,
'KV Oostende': 32.85,
'Vissel Kobe': 32.81,
'Kristiansund BK': 32.72,
'Lamia': 32.57,
'VfL Bochum': 32.34,
'Nagoya Grampus Eight': 32.21,
'Larissa': 32.08,
'SV Sandhausen': 31.97,
'Atlético San Luis': 31.67,
'Palermo': 31.44,
'Cercle Brugge': 31.44,
'Dynamo Dresden': 31.43,
'Tenerife': 31.38,
'CSA': 31.38,
'SV Darmstadt 98': 31.35,
'Melbourne Victory': 31.32,
'Nashville SC': 31.29,
'Clermont Foot': 31,
'KSC Lokeren': 31,
'Le Havre': 30.95,
'Extremadura UD': 30.94,
'Elche': 30.89,
'Odd BK': 30.89,
'Real Oviedo': 30.87,
'Gamba Osaka': 30.76,
'Reno 1868 FC': 30.57,
'1. FC Magdeburg': 30.45,
'Lobos de la BUAP': 30.3,
'St. Pölten': 30.28,
'New York Red Bulls II': 30.21,
'Hobro IK': 30.13,
'Oita Trinita': 30.06,
'Benevento': 29.83,
'Veracruz': 29.73,
'Shenzhen FC': 29.66,
'Vendsyssel': 29.55,
'Östersunds FK': 29.53,
'FC Juárez': 29.48,
'Chateauroux': 29.36,
'Adelaide United': 29.25,
'De Graafschap': 29.25,
'Black Aces': 29.18,
'Lugo': 29.18,
'Cittadella': 29.16,
'FC St. Pauli': 29.04,
'Vejle': 29.03,
'Numancia': 28.98,
'Tampa Bay Rowdies': 28.68,
'AS Nancy Lorraine': 28.55,
'FC Xanthi': 28.54,
'Fortuna Sittard': 28.47,
'Kaizer Chiefs': 28.47,
'Orléans': 28.4,
'Panetolikos': 28.33,
'Rayo Majadahonda': 28.2,
'Colorado Rapids': 28.18,
'Orebro SK': 28.18,
'Vegalta Sendai': 27.98,
'Auxerre': 27.98,
'Erzgebirge Aue': 27.94,
'Portsmouth': 27.65,
'Hearts': 27.65,
'Western Sydney FC': 27.57,
'SpVgg Greuther Fürth': 27.42,
'Sarpsborg': 27.21,
'Panionios': 27.09,
'Crotone': 27.07,
'Bolton': 27.05,
'Valenciennes': 27.03,
'Pittsburgh Riverhounds': 27.02,
'Milton Keynes Dons': 26.96,
'Perugia': 26.91,
'Shimizu S-Pulse': 26.87,
'Spezia': 26.33,
'AC Horsens': 26.27,
'Shonan Bellmare': 26.25,
'Giannina': 26.12,
'Guizhou Renhe': 26.02,
'Burton Albion': 25.86,
'Grasshoppers Zürich': 25.61,
'Motherwell': 25.53,
'GIF Sundsvall': 25.32,
'Indy Eleven': 24.83,
'Anzhi Makhachkala': 24.82,
'Cosenza': 24.61,
'FC Cincinnati': 24.46,
'Grenoble': 24.43,
'North Carolina FC': 24.33,
'Wellington Phoenix': 24.27,
'AD Alcorcon': 24.26,
'MSV Duisburg': 24.25,
'Doncaster Rovers': 24.13,
'Real Monarchs SLC': 24.09,
'Vancouver Whitecaps': 23.95,
'Scunthorpe': 23.88,
'Peterborough United': 23.86,
'IK Sirius': 23.78,
'Fresno FC': 23.66,
'Niort': 23.62,
'Rotherham United': 23.58,
'Cordoba': 23.39,
'IF Elfsborg': 23.35,
'Stabaek': 23.05,
'Louisville City FC': 22.79,
'Livingston': 22.74,
'Foggia': 22.69,
'Sochaux': 22.69,
'Viking FK': 22.65,
'Cremonese': 22.61,
'Sagan Tosu': 22.51,
'Mjondalen': 22.5,
'St Johnstone': 22.45,
'Lillestrom': 22.3,
'Reus Deportiu': 22.2,
'San Antonio FC': 22.16,
'Fleetwood Town': 21.97,
'Ottawa Fury FC': 21.68,
'AC Ajaccio': 21.65,
'Coventry City': 21.61,
'SuperSport United': 21.58,
'St Mirren': 21.54,
'Bradford City': 21.53,
'Oxford United': 21.38,
'Beziers AS': 21.15,
'Lincoln City': 20.97,
'Highlands Park FC': 20.97,
'Polokwane City FC': 20.91,
'Helsingborgs IF': 20.78,
'NAC': 20.75,
'Bloem Celtic': 20.58,
'Ranheim': 20.56,
'Jubilo Iwata': 20.46,
'Brescia': 20.45,
'Stromsgodset': 20.41,
'Maritzburg Utd': 20.38,
'Orange County SC': 20.31,
'US Pescara': 20.15,
'Sacramento Republic FC': 20.03,
'New Mexico United': 19.99,
'Golden Arrows': 19.68,
'Gimnástic Tarragona': 19.51,
'Bristol Rovers': 19.43,
'AmaZulu': 19.28,
'Gillingham': 19.2,
'Shrewsbury Town': 18.97,
'F.B.C Unione Venezia': 18.89,
'Livorno': 18.84,
'Salernitana': 18.81,
'Portland Timbers 2': 18.71,
'Kalmar FF': 18.71,
'Charleston Battery': 18.69,
'Lecce': 18.66,
'Verona': 18.53,
'Tromso': 18.14,
'Austin Bold FC': 17.99,
'Chippa United': 17.97,
'Brisbane Roar': 17.89,
'Padova': 17.85,
'Oklahoma City Energy FC': 17.6,
'Mansfield Town': 17.08,
'Blackpool': 17.06,
'Ascoli': 17.06,
'Free State Stars': 17.00,
'Southend United': 16.98,
'Levadiakos': 16.8,
'Exeter City': 16.76,
'Saint Louis FC': 16.71,
'Red Star FC 93': 16.37,
'Hamilton Academical': 16.25,
'Baroka FC': 16.09,
'GFC Ajaccio': 15.91,
'Accrington Stanley': 15.69,
'Falkenbergs FF': 15.6,
'Tranmere Rovers': 15.57,
'Carpi': 15.5,
'Vasby United': 15.27,
'Matsumoto Yamaga FC': 14.98,
'Central Coast Mariners': 14.88,
'Bury': 14.87,
'Charlotte Independence': 14.84,
'Wycombe Wanderers': 14.69,
'LA Galaxy II': 14.67,
'Oldham Athletic': 14.62,
'Swindon Town': 14.56,
'Black Leopards': 14.41,
'Plymouth Argyle': 14.2,
'AFC Wimbledon': 14.07,
'El Paso Locomotive FC': 13.7,
'Las Vegas Lights FC': 13.18,
'Bethlehem Steel FC': 13.11,
'Apollon Smyrni': 13.08,
'Forest Green Rovers': 12.88,
'Rochdale': 12.68,
'Dundee': 12.52,
'Rio Grande Valley FC Toros': 12.48,
'Northampton Town': 12.36,
'Loudoun United FC': 11.49,
'Crewe Alexandra': 11.05,
'Colchester United': 10.65,
'Carlisle United': 10.37,
'Stevenage': 10.24,
'Cheltenham Town': 10.22,
'Memphis 901 FC': 10.15,
'Walsall': 9.95,
'Swope Park Rangers': 9.9,
'Hartford Athletic': 9.63,
'Birmingham Legion FC': 9.29,
'Morecambe': 8.8,
'Newport County': 8.19,
'Grimsby Town': 7.39,
'Crawley Town': 7.18,
'Port Vale': 6.94,
'Tulsa Roughnecks': 6.63,
'Colorado Springs Switchbacks FC': 6.45,
'Cambridge United': 6.41,
'Macclesfield': 6.28,
'Atlanta United 2': 4.73,
'Notts County': 4.52,
'Yeovil Town': 4.35,
'Tacoma Defiance': 4.14
}
| 17,930 | 9,950 |
import sys
import itertools
def main():
with open(sys.argv[1]) as input_file:
for line in input_file:
length, letters = line.strip().split(',')
length = int(length)
letters = letters.strip()
result = set(''.join(e) for e in itertools.product(letters, repeat=length))
result = sorted(result)
print(','.join(result))
if __name__ == '__main__':
main()
| 474 | 133 |
import os
from falcon import Request, Response
class ServerMetadataResource:
def on_get(self, req: Request, resp: Response) -> None:
resp.cache_control = ['public', 'max-age=604800']
resp.media = {
'chat': {
'host': os.getenv('CHITTY_CHAT_HOST', '127.0.0.1'),
'port': int(os.getenv('CHITTY_CHAT_PORT', '5000'))
}
}
| 403 | 141 |
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import zpylib.ast.lexer as lex
from zpylib.grammar import *
# Test it out
data = """
# TEST
Apple = 3 + 4 * 10 + -20 *2
def Print(what):
if True:
go(what + 10)
如果 错:
x = 'yoo what's up'
'''what fuck'''
{:.2f}.format(name)
"""
class Compiler():
def __init__(self, data):
# Build the lexer
self.lexer = lex.lex()
# Give the lexer some input
self.lexer.input(data)
self.data = data
self.positionOffset = 0
self.tokenize()
def tokenize(self):
print(self.data)
# Tokenize
while True:
tok = self.lexer.token()
if tok:
self.update(tok)
else:
break # No more input
print(self.data)
def update(self, tok):
#print(tok.type, tok.value, tok.lexpos, tok.lineno)
if tok.type == 'NAME':
self.subData(tok.value, tok.value+'_', tok.lexpos)
def subData(self, oldStr, newStr, index):
start = index + self.positionOffset
end = start + len(oldStr)
self.data = self.data[:start] + newStr + self.data[end:]
self.positionOffset = self.positionOffset - (len(oldStr) - len(newStr))
Compiler(data)
| 1,295 | 452 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from yamllint_junit_bootstrap import bootstrap
version = bootstrap.__version__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='yamllint-junit',
packages=['yamllint_junit_bootstrap'],
version=version,
description='yamllint to JUnit converter.',
long_description=long_description,
long_description_content_type="text/markdown",
author='wasil',
author_email='piotr.m.boruc@gmail.com',
url='https://github.com/wasilak/yamllint-junit',
download_url='https://github.com/wasilak/yamllint-junit/archive/%s.tar.gz' % version,
keywords=['yaml', 'junit'],
classifiers=[],
entry_points={
"console_scripts": ['yamllint-junit = yamllint_junit_bootstrap.bootstrap:main']
},
install_requires=[
'yamllint',
],
tests_require=[
'pytest',
'flake8',
'coverage',
'mock',
],
)
| 989 | 344 |
from pyot.models.tft import base
def platform_to_region(platform: str) -> str:
'''Return the region correspondent to a given platform'''
return base.PyotRouting._platform2regions[platform]
| 199 | 59 |
"""Simple dummy solver for the BiClique problem, outputting a static solution."""
with open("output", "w") as output:
output.write("s set1 1\n")
output.write("s set2 2\n")
output.write("s set2 3\n")
output.write("s set2 4\n")
| 242 | 89 |
from typing import Union, List, Type
from sqlalchemy.orm import scoped_session, sessionmaker
from logging import Logger
from starlette.endpoints import HTTPEndpoint
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_200_OK
from pyalfred.contract.utils import chunk, serialize
from auto_schema import AutoMarshmallowSchema
from query_serializer import QueryBuilder
from pyalfred.contract.utils import get_columns_in_base_mixin
from pyalfred.server.utils import make_base_logger, apply_filter_from_string
from pyalfred.constants import CHUNK_SIZE
def get_bool_from_string(x: str):
return x.lower() == "true"
class DatabaseResource(HTTPEndpoint):
schema = None
session_factory = None
logger = None
_create_ignore = None
@classmethod
def make_endpoint(
cls,
schema: Type[AutoMarshmallowSchema],
session_factory: Union[scoped_session, sessionmaker],
logger: Logger = None,
mixin_ignore: Type[object] = None,
create_ignore: List[str] = None,
):
"""
Implements a base resources for exposing database models.
:param schema: The schema to use, must be marshmallow.Schema
:param session_factory: The sqlalchemy scoped_session object to use
:param logger: The logger to use
:param mixin_ignore: If all of your models inherit from a single mixin that defines server side generated
columns, you may pass that here.
"""
_create_ignore = []
if mixin_ignore is not None:
_create_ignore += get_columns_in_base_mixin(mixin_ignore)
elif create_ignore is not None:
_create_ignore += create_ignore
state_dict = {
"schema": schema,
"session_factory": session_factory,
"logger": logger or make_base_logger(schema.__name__),
"_create_ignore": _create_ignore,
}
return type(f"DatabaseResource_{schema.__name__}", (DatabaseResource,), state_dict)
@property
def model(self):
return self.schema.Meta.model
@property
def fields_to_skip_on_create(self):
schema_fields_to_load = list(getattr(self.schema, "load_only_fields", []))
return self._create_ignore + schema_fields_to_load
async def get(self, req: Request):
session = self.session_factory()
try:
query = session.query(self.model).with_for_update()
filter_ = req.query_params.get("filter", None)
if filter_:
query_builder = QueryBuilder(self.model)
query = query_builder.from_string(query, filter_)
ops = req.query_params.get("ops", "")
result = apply_filter_from_string(self.model, query, ops.split(","))
if result is None:
result = list()
elif not isinstance(result, list):
result = [result]
media = serialize(result, self.schema, many=True)
status = HTTP_200_OK
except Exception as e:
self.logger.exception(e)
status = HTTP_500_INTERNAL_SERVER_ERROR
media = f"{e.__class__.__name__}: {e}"
self.session_factory.remove()
return JSONResponse(media, status)
async def put(self, req: Request):
batched = get_bool_from_string(req.query_params.get("batched", "false"))
schema = self.schema(dump_only=self.fields_to_skip_on_create, many=True)
objs = schema.load_instance(await req.json())
self.logger.info(f"Now trying to create {len(objs):n} objects")
session = self.session_factory()
try:
for c in chunk(objs, CHUNK_SIZE):
session.add_all(c)
session.flush()
session.commit()
self.logger.info(f"Successfully created {len(objs):n} objects, now trying to serialize")
media = serialize(objs, self.schema, many=True) if not batched else []
status = HTTP_200_OK
except Exception as e:
self.logger.exception(e)
status = HTTP_500_INTERNAL_SERVER_ERROR
media = f"{e.__class__.__name__}: {e}"
session.rollback()
self.session_factory.remove()
return JSONResponse(media, status)
async def delete(self, req: Request):
session = self.session_factory()
try:
nums = session.query(self.model).filter(self.model.id == req.query_params["id"]).delete("fetch")
self.logger.info(f"Now trying to delete {nums:n} objects")
session.commit()
self.logger.info(f"Successfully deleted {nums:n} objects")
media = {"deleted": nums}
status = HTTP_200_OK
except Exception as e:
self.logger.exception(e)
session.rollback()
media = f"{e.__class__.__name__}: {e}"
status = HTTP_500_INTERNAL_SERVER_ERROR
self.session_factory.remove()
return JSONResponse(media, status)
async def patch(self, req: Request):
batched = get_bool_from_string(req.query_params.get("batched", "false"))
schema = self.schema(many=True)
objs = schema.load_instance(await req.json())
session = self.session_factory()
self.logger.info(f"Now trying to update {len(objs):n} objects")
try:
for c in chunk(objs, CHUNK_SIZE):
for obj in c:
session.merge(obj)
session.flush()
session.commit()
self.logger.info(f"Successfully updated {len(objs):n} objects, now trying to serialize")
media = serialize(objs, self.schema, many=True) if not batched else []
status = HTTP_200_OK
except Exception as e:
self.logger.exception(e)
session.rollback()
media = f"{e.__class__.__name__}: {e}"
status = HTTP_500_INTERNAL_SERVER_ERROR
self.session_factory.remove()
return JSONResponse(media, status)
| 6,151 | 1,762 |
##############################################################################
# Copyright (c) 2020 China Mobile Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
'''
a common http_handler
'''
import urllib.request
import json
import ssl
from http.client import HTTPException
from urllib.error import HTTPError, URLError
# pylint: disable=E0611
from log_utils import LOGGER
from errors import ERROR_CODE
# pylint: disable=W0212
ssl._create_default_https_context = ssl._create_unverified_context
HEADERS = {
'Connection': 'keep-alive',
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36',
}
TIME_OUT = 3000
class UrllibHttpHandler:
"""
http handler based on urllib of python2.7
"""
def __init__(self):
self.__header = HEADERS
def get(self, url):
"""
run the get request
"""
try:
req = urllib.request.Request(url, headers=self.__header)
res = urllib.request.urlopen(req, timeout=TIME_OUT)
except HTTPException as http_exp:
LOGGER.error(http_exp)
LOGGER.error(u"%s %s", ERROR_CODE['E100001'], url)
except HTTPError as http_err:
LOGGER.error(http_err)
LOGGER.error(u"%s %s", ERROR_CODE['E100001'], url)
LOGGER.error(u"%s %s", ERROR_CODE['E600001'], url)
else:
return res
def post(self, url, parameter=None):
"""
run the post request, parameter must to encode to bytes
"""
try:
data = json.dumps(parameter).encode(encoding="utf-8")
LOGGER.debug("data is %s", data)
req = urllib.request.Request(url, data=data, headers=self.__header)
req.add_header("Content-Type", "application/json")
res = urllib.request.urlopen(req, timeout=TIME_OUT)
except HTTPException as http_exp:
LOGGER.error(http_exp)
LOGGER.error(u"%s %s", ERROR_CODE['E100001'], url)
except TimeoutError as timeout_error:
LOGGER.error(timeout_error)
LOGGER.error(u"%s", ERROR_CODE['E100003'])
except HTTPError as http_err:
LOGGER.error(http_err)
LOGGER.error(u"%s %s", ERROR_CODE['E100001'], url)
LOGGER.error(u"%s %s", ERROR_CODE['E600001'], url)
except URLError as url_err:
LOGGER.error(url_err)
LOGGER.error(u"%s %s", ERROR_CODE['E100001'], url)
else:
return res
def put(self, url, parameter=None):
"""
run the put request, parameter must to encode to bytes
"""
# parameter_data = urllib.parse.urlencode(parameter) #??
data = json.dumps(parameter).encode(encoding="utf-8")
LOGGER.debug("data is %s", data)
req = urllib.request.Request(url, data=data, headers=self.__header)
req.get_method = lambda: 'PUT'
res = urllib.request.urlopen(req)
return res
def patch(self, url, parameter=None, etag=None):
"""
run the patch request, parameter must to encode to bytes
"""
data = json.dumps(parameter).encode(encoding="utf-8")
LOGGER.debug("data is %s", data)
req = urllib.request.Request(url, data=data, headers=self.__header)
req.add_header("Content-Type", "application/json")
req.add_header("If-Match", etag)
req.get_method = lambda: 'PATCH'
res = None
try:
res = urllib.request.urlopen(req, timeout=TIME_OUT)
except HTTPException as http_exp:
LOGGER.error(http_exp)
LOGGER.error(u"%s %s", ERROR_CODE['E100001'], url)
except HTTPError as http_err:
LOGGER.error(http_err)
LOGGER.error(u"%s %s", ERROR_CODE['E100001'], url)
LOGGER.error(u"%s %s", ERROR_CODE['E600001'], url)
except TypeError as type_err:
LOGGER.error(type_err)
LOGGER.error(u"%s %s", ERROR_CODE['E100001'], url)
return res
def delete(self, url):
'''
run the delete request,
'''
req = urllib.request.Request(url, headers=self.__header)
req.get_method = lambda: 'DELETE'
res = urllib.request.urlopen(req)
return res
| 4,628 | 1,489 |
"""Seed AnnotationStyle and DomainURLsMap tables
Revision ID: a6ca510027a5
Revises: fb1654973fbd
Create Date: 2020-08-19 23:27:53.132930
"""
import json
from alembic import context
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm.session import Session
from sqlalchemy.sql import table, column
from neo4japp.models import AnnotationStyle, DomainURLsMap
# revision identifiers, used by Alembic.
revision = 'a6ca510027a5'
down_revision = 'fb1654973fbd'
branch_labels = None
depends_on = None
t_annotation_style = table(
'annotation_style',
column('id', sa.Integer),
column('label', sa.String),
column('color', sa.String),
column('icon_code', sa.String),
column('font_color', sa.String),
column('border_color', sa.String),
column('background_color', sa.String),
)
t_domain_urls_map = table(
'domain_urls_map',
column('id', sa.Integer),
column('domain', sa.String),
column('base_URL', sa.String),
)
def upgrade():
if context.get_x_argument(as_dictionary=True).get('data_migrate', None):
data_upgrades()
def downgrade():
pass
# ### end Alembic commands ###
# NOTE: In practice perfect downgrades are difficult and in some cases
# impossible! It is more practical to use database backups/snapshots to
# "downgrade" the database. Changes to the database that we intend to
# push to production should always be added to a NEW migration.
# (i.e. "downgrade forward"!)
def data_upgrades():
"""Add optional data upgrade migrations here"""
session = Session(op.get_bind())
domain_urls_map_json = {}
annotation_style_json = {}
with open("fixtures/seed.json", "r") as f:
data = json.load(f)
for model in data:
if model['model'] == 'neo4japp.models.DomainURLsMap':
domain_urls_map_json = model['records']
continue
if model['model'] == 'neo4japp.models.AnnotationStyle':
annotation_style_json = model['records']
continue
domain_urls_map_data = []
for row in domain_urls_map_json:
domain_urls_map_data.append(
{
'domain': row['domain'],
'base_URL': row['base_URL']
}
)
session.execute(t_domain_urls_map.insert(), domain_urls_map_data)
annotation_style_data = []
for row in annotation_style_json:
annotation_style_data.append(
{
'label': row['label'],
'color': row['color'],
'border_color': row.get('border_color', None),
'background_color': row.get('background_color', None),
'font_color': row.get('font_color', None),
'icon_code': row.get('icon_code', None),
}
)
session.execute(t_annotation_style.insert(), annotation_style_data)
session.commit()
def data_downgrades():
"""Add optional data downgrade migrations here"""
pass
| 3,006 | 951 |
##########################################################################
#
# Copyright 2008-2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""Common trace code generation."""
import specs.stdapi as stdapi
from dispatch import Dispatcher
def interface_wrap_name(interface):
return "Wrap" + interface.expr
class DumpDeclarator(stdapi.OnceVisitor):
'''Declare helper functions to dump complex types.'''
def visit_void(self, literal):
pass
def visit_literal(self, literal):
pass
def visit_string(self, string):
pass
def visit_const(self, const):
self.visit(const.type)
def visit_struct(self, struct):
for type, name in struct.members:
self.visit(type)
print 'static void _write__%s(const %s &value) {' % (struct.tag, struct.expr)
print ' static const char * members[%u] = {' % (len(struct.members),)
for type, name, in struct.members:
print ' "%s",' % (name,)
print ' };'
print ' static const trace::StructSig sig = {'
print ' %u, "%s", %u, members' % (struct.id, struct.name, len(struct.members))
print ' };'
print ' trace::localWriter.beginStruct(&sig);'
for type, name in struct.members:
dump_instance(type, 'value.%s' % (name,))
print ' trace::localWriter.endStruct();'
print '}'
print
def visit_array(self, array):
self.visit(array.type)
def visit_blob(self, array):
pass
__enum_id = 0
def visit_enum(self, enum):
print 'static const trace::EnumValue __enum%s_values[] = {' % (enum.tag)
for value in enum.values:
print ' {"%s", %s},' % (value, value)
print '};'
print
print 'static const trace::EnumSig __enum%s_sig = {' % (enum.tag)
print ' %u, %u, __enum%s_values' % (enum.id, len(enum.values), enum.tag)
print '};'
print
def visit_bitmask(self, bitmask):
print 'static const trace::BitmaskFlag __bitmask%s_flags[] = {' % (bitmask.tag)
for value in bitmask.values:
print ' {"%s", %s},' % (value, value)
print '};'
print
print 'static const trace::BitmaskSig __bitmask%s_sig = {' % (bitmask.tag)
print ' %u, %u, __bitmask%s_flags' % (bitmask.id, len(bitmask.values), bitmask.tag)
print '};'
print
def visit_pointer(self, pointer):
self.visit(pointer.type)
def visit_handle(self, handle):
self.visit(handle.type)
def visit_alias(self, alias):
self.visit(alias.type)
def visit_opaque(self, opaque):
pass
def visit_interface(self, interface):
print "class %s : public %s " % (interface_wrap_name(interface), interface.name)
print "{"
print "public:"
print " %s(%s * pInstance);" % (interface_wrap_name(interface), interface.name)
print " virtual ~%s();" % interface_wrap_name(interface)
print
for method in interface.itermethods():
print " " + method.prototype() + ";"
print
#print "private:"
print " %s * m_pInstance;" % (interface.name,)
print "};"
print
def visit_polymorphic(self, polymorphic):
print 'static void _write__%s(int selector, const %s & value) {' % (polymorphic.tag, polymorphic.expr)
print ' switch (selector) {'
for cases, type in polymorphic.iterswitch():
for case in cases:
print ' %s:' % case
dump_instance(type, 'static_cast<%s>(value)' % (type,))
print ' break;'
print ' }'
print '}'
print
class DumpImplementer(stdapi.Visitor):
'''Dump an instance.'''
def visit_literal(self, literal, instance):
print ' trace::localWriter.write%s(%s);' % (literal.kind, instance)
def visit_string(self, string, instance):
if string.length is not None:
print ' trace::localWriter.writeString((const char *)%s, %s);' % (instance, string.length)
else:
print ' trace::localWriter.writeString((const char *)%s);' % instance
def visit_const(self, const, instance):
self.visit(const.type, instance)
def visit_struct(self, struct, instance):
print ' _write__%s(%s);' % (struct.tag, instance)
def visit_array(self, array, instance):
length = '__c' + array.type.tag
index = '__i' + array.type.tag
print ' if (%s) {' % instance
print ' size_t %s = %s;' % (length, array.length)
print ' trace::localWriter.beginArray(%s);' % length
print ' for (size_t %s = 0; %s < %s; ++%s) {' % (index, index, length, index)
print ' trace::localWriter.beginElement();'
self.visit(array.type, '(%s)[%s]' % (instance, index))
print ' trace::localWriter.endElement();'
print ' }'
print ' trace::localWriter.endArray();'
print ' } else {'
print ' trace::localWriter.writeNull();'
print ' }'
def visit_blob(self, blob, instance):
print ' trace::localWriter.writeBlob(%s, %s);' % (instance, blob.size)
def visit_enum(self, enum, instance):
print ' trace::localWriter.writeEnum(&__enum%s_sig, %s);' % (enum.tag, instance)
def visit_bitmask(self, bitmask, instance):
print ' trace::localWriter.writeBitmask(&__bitmask%s_sig, %s);' % (bitmask.tag, instance)
def visit_pointer(self, pointer, instance):
print ' if (%s) {' % instance
print ' trace::localWriter.beginArray(1);'
print ' trace::localWriter.beginElement();'
dump_instance(pointer.type, "*" + instance)
print ' trace::localWriter.endElement();'
print ' trace::localWriter.endArray();'
print ' } else {'
print ' trace::localWriter.writeNull();'
print ' }'
def visit_handle(self, handle, instance):
self.visit(handle.type, instance)
def visit_alias(self, alias, instance):
self.visit(alias.type, instance)
def visit_opaque(self, opaque, instance):
print ' trace::localWriter.writeOpaque((const void *)%s);' % instance
def visit_interface(self, interface, instance):
print ' trace::localWriter.writeOpaque((const void *)&%s);' % instance
def visit_polymorphic(self, polymorphic, instance):
print ' _write__%s(%s, %s);' % (polymorphic.tag, polymorphic.switch_expr, instance)
dump_instance = DumpImplementer().visit
class Wrapper(stdapi.Visitor):
'''Wrap an instance.'''
def visit_void(self, type, instance):
raise NotImplementedError
def visit_literal(self, type, instance):
pass
def visit_string(self, type, instance):
pass
def visit_const(self, type, instance):
pass
def visit_struct(self, struct, instance):
for type, name in struct.members:
self.visit(type, "(%s).%s" % (instance, name))
def visit_array(self, array, instance):
# XXX: actually it is possible to return an array of pointers
pass
def visit_blob(self, blob, instance):
pass
def visit_enum(self, enum, instance):
pass
def visit_bitmask(self, bitmask, instance):
pass
def visit_pointer(self, pointer, instance):
print " if (%s) {" % instance
self.visit(pointer.type, "*" + instance)
print " }"
def visit_handle(self, handle, instance):
self.visit(handle.type, instance)
def visit_alias(self, alias, instance):
self.visit(alias.type, instance)
def visit_opaque(self, opaque, instance):
pass
def visit_interface(self, interface, instance):
assert instance.startswith('*')
instance = instance[1:]
print " if (%s) {" % instance
print " %s = new %s(%s);" % (instance, interface_wrap_name(interface), instance)
print " }"
def visit_polymorphic(self, type, instance):
# XXX: There might be polymorphic values that need wrapping in the future
pass
class Unwrapper(Wrapper):
def visit_interface(self, interface, instance):
assert instance.startswith('*')
instance = instance[1:]
print " if (%s) {" % instance
print " %s = static_cast<%s *>(%s)->m_pInstance;" % (instance, interface_wrap_name(interface), instance)
print " }"
wrap_instance = Wrapper().visit
unwrap_instance = Unwrapper().visit
class Tracer:
def __init__(self):
self.api = None
def trace_api(self, api):
self.api = api
self.header(api)
# Includes
for header in api.headers:
print header
print
# Type dumpers
types = api.all_types()
visitor = DumpDeclarator()
map(visitor.visit, types)
print
# Interfaces wrapers
interfaces = [type for type in types if isinstance(type, stdapi.Interface)]
map(self.interface_wrap_impl, interfaces)
print
# Function wrappers
map(self.trace_function_decl, api.functions)
map(self.trace_function_impl, api.functions)
print
self.footer(api)
def header(self, api):
pass
def footer(self, api):
pass
def trace_function_decl(self, function):
# Per-function declarations
if function.args:
print 'static const char * __%s_args[%u] = {%s};' % (function.name, len(function.args), ', '.join(['"%s"' % arg.name for arg in function.args]))
else:
print 'static const char ** __%s_args = NULL;' % (function.name,)
print 'static const trace::FunctionSig __%s_sig = {%u, "%s", %u, __%s_args};' % (function.name, function.id, function.name, len(function.args), function.name)
print
def is_public_function(self, function):
return True
def trace_function_impl(self, function):
if self.is_public_function(function):
print 'extern "C" PUBLIC'
else:
print 'extern "C" PRIVATE'
print function.prototype() + ' {'
if function.type is not stdapi.Void:
print ' %s __result;' % function.type
self.trace_function_impl_body(function)
if function.type is not stdapi.Void:
self.wrap_ret(function, "__result")
print ' return __result;'
print '}'
print
def trace_function_impl_body(self, function):
print ' unsigned __call = trace::localWriter.beginEnter(&__%s_sig);' % (function.name,)
for arg in function.args:
if not arg.output:
self.unwrap_arg(function, arg)
self.dump_arg(function, arg)
print ' trace::localWriter.endEnter();'
self.dispatch_function(function)
print ' trace::localWriter.beginLeave(__call);'
for arg in function.args:
if arg.output:
self.dump_arg(function, arg)
self.wrap_arg(function, arg)
if function.type is not stdapi.Void:
self.dump_ret(function, "__result")
print ' trace::localWriter.endLeave();'
def dispatch_function(self, function, prefix='__', suffix=''):
if function.type is stdapi.Void:
result = ''
else:
result = '__result = '
dispatch = prefix + function.name + suffix
print ' %s%s(%s);' % (result, dispatch, ', '.join([str(arg.name) for arg in function.args]))
def dump_arg(self, function, arg):
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
self.dump_arg_instance(function, arg)
print ' trace::localWriter.endArg();'
def dump_arg_instance(self, function, arg):
dump_instance(arg.type, arg.name)
def wrap_arg(self, function, arg):
wrap_instance(arg.type, arg.name)
def unwrap_arg(self, function, arg):
unwrap_instance(arg.type, arg.name)
def dump_ret(self, function, instance):
print ' trace::localWriter.beginReturn();'
dump_instance(function.type, instance)
print ' trace::localWriter.endReturn();'
def wrap_ret(self, function, instance):
wrap_instance(function.type, instance)
def unwrap_ret(self, function, instance):
unwrap_instance(function.type, instance)
def interface_wrap_impl(self, interface):
print '%s::%s(%s * pInstance) {' % (interface_wrap_name(interface), interface_wrap_name(interface), interface.name)
print ' m_pInstance = pInstance;'
print '}'
print
print '%s::~%s() {' % (interface_wrap_name(interface), interface_wrap_name(interface))
print '}'
print
for method in interface.itermethods():
self.trace_method(interface, method)
print
def trace_method(self, interface, method):
print method.prototype(interface_wrap_name(interface) + '::' + method.name) + ' {'
print ' static const char * __args[%u] = {%s};' % (len(method.args) + 1, ', '.join(['"this"'] + ['"%s"' % arg.name for arg in method.args]))
print ' static const trace::FunctionSig __sig = {%u, "%s", %u, __args};' % (method.id, interface.name + '::' + method.name, len(method.args) + 1)
print ' unsigned __call = trace::localWriter.beginEnter(&__sig);'
print ' trace::localWriter.beginArg(0);'
print ' trace::localWriter.writeOpaque((const void *)m_pInstance);'
print ' trace::localWriter.endArg();'
for arg in method.args:
if not arg.output:
self.unwrap_arg(method, arg)
self.dump_arg(method, arg)
if method.type is stdapi.Void:
result = ''
else:
print ' %s __result;' % method.type
result = '__result = '
print ' trace::localWriter.endEnter();'
print ' %sm_pInstance->%s(%s);' % (result, method.name, ', '.join([str(arg.name) for arg in method.args]))
print ' trace::localWriter.beginLeave(__call);'
for arg in method.args:
if arg.output:
self.dump_arg(method, arg)
self.wrap_arg(method, arg)
if method.type is not stdapi.Void:
print ' trace::localWriter.beginReturn();'
dump_instance(method.type, "__result")
print ' trace::localWriter.endReturn();'
wrap_instance(method.type, '__result')
print ' trace::localWriter.endLeave();'
if method.name == 'QueryInterface':
print ' if (ppvObj && *ppvObj) {'
print ' if (*ppvObj == m_pInstance) {'
print ' *ppvObj = this;'
print ' }'
for iface in self.api.interfaces:
print r' else if (riid == IID_%s) {' % iface.name
print r' *ppvObj = new Wrap%s((%s *) *ppvObj);' % (iface.name, iface.name)
print r' }'
print r' else {'
print r' os::log("apitrace: warning: unknown REFIID {0x%08lX,0x%04X,0x%04X,{0x%02X,0x%02X,0x%02X,0x%02X,0x%02X,0x%02X,0x%02X,0x%02X}}\n",'
print r' riid.Data1, riid.Data2, riid.Data3,'
print r' riid.Data4[0],'
print r' riid.Data4[1],'
print r' riid.Data4[2],'
print r' riid.Data4[3],'
print r' riid.Data4[4],'
print r' riid.Data4[5],'
print r' riid.Data4[6],'
print r' riid.Data4[7]);'
print r' }'
print ' }'
if method.name == 'Release':
assert method.type is not stdapi.Void
print ' if (!__result)'
print ' delete this;'
if method.type is not stdapi.Void:
print ' return __result;'
print '}'
print
class DllTracer(Tracer):
def __init__(self, dllname):
self.dllname = dllname
def header(self, api):
print '''
static HINSTANCE g_hDll = NULL;
static PROC
__getPublicProcAddress(LPCSTR lpProcName)
{
if (!g_hDll) {
char szDll[MAX_PATH] = {0};
if (!GetSystemDirectoryA(szDll, MAX_PATH)) {
return NULL;
}
strcat(szDll, "\\\\%s");
g_hDll = LoadLibraryA(szDll);
if (!g_hDll) {
return NULL;
}
}
return GetProcAddress(g_hDll, lpProcName);
}
''' % self.dllname
dispatcher = Dispatcher()
dispatcher.dispatch_api(api)
Tracer.header(self, api)
| 18,205 | 5,600 |
# pylint: disable=invalid-name,missing-docstring
import os
import subprocess
import urllib.parse
import uuid
def read_temporal_data(temporal_path: str):
temporal_data = dict()
for line in open(temporal_path, 'r'):
name, v_cls, s_frm_1, f_frm_1, s_frm_2, f_frm_2 = line.strip().split()
temporal_data[name] = {
'class': v_cls,
'action_1': (int(s_frm_1), int(f_frm_1)),
'action_2': (int(s_frm_2), int(f_frm_2)),
}
return temporal_data
source_path = '/home/daniil/Downloads/UCF-Crime/Videos'
result_path = '/home/daniil/Documents/Projects/University/Thesis/frames2'
if not os.path.exists(result_path):
os.mkdir(result_path)
normal_frames = os.path.join(result_path, 'Normal')
if not os.path.exists(normal_frames):
os.mkdir(normal_frames)
temporal_data = read_temporal_data('./temporal_data.txt')
err_log = open('./err.log', 'w')
for video_class in os.listdir(source_path):
frames_dir = os.path.join(result_path, video_class)
class_path = os.path.join(source_path, video_class)
if not os.path.exists(frames_dir):
os.mkdir(frames_dir)
for idx, video in enumerate(os.listdir(class_path)):
video_path = os.path.join(class_path, video).strip()
print(f'Start working on {os.path.abspath(video_path)}')
video_name, video_ext = os.path.splitext(os.path.basename(video_path))
video_name = urllib.parse.unquote(video_name)
command = ('ffmpeg', '-i', os.path.abspath(video_path), '-vf',
'select=not(mod(n\\,120))', '-vsync', 'vfr', '-hide_banner',
'-threads', '16',
os.path.join(frames_dir, f'{idx:03d}-%06d.jpg'))
try:
subprocess.check_call(command,
stdout=subprocess.DEVNULL,
stderr=err_log)
except subprocess.CalledProcessError as exc:
print(f'ffmpeg failed with return code {exc.returncode}')
video_frames = sorted([
frame for frame in os.listdir(frames_dir)
if frame.startswith(f'{idx:03d}-')
])
if video in temporal_data.keys() and temporal_data[video] != 'Normal':
frames_data = temporal_data[video]
frames_to_move = set()
for action in ['action_1', 'action_2']:
if frames_data[action][0] == -1:
continue
start_frame, stop_frame = frames_data[action]
for frm_idx, frame in enumerate(video_frames):
frm_num = frm_idx * 120
if not start_frame < frm_num < stop_frame:
frames_to_move.add(os.path.join(frames_dir, frame))
for frame_path in frames_to_move:
uuid_idx = str(uuid.uuid4())
os.rename(
frame_path,
os.path.join(result_path, 'Normal',
f'moved-{uuid_idx}.jpg'))
frames_to_rename = list(os.listdir(frames_dir))
for frame in frames_to_rename:
frame_idx = frame.split('-')[0]
frame_path = os.path.join(frames_dir, frame)
uuid_idx = str(uuid.uuid4())
os.rename(frame_path,
os.path.join(frames_dir, f'{frame_idx}-{uuid_idx}.jpg'))
| 3,334 | 1,124 |
# -*- coding:utf-8 -*-
class Solution:
def jumpFloorII(self, number):
# write code here
dp = [0,1,2]
for i in range(3,number+1):
dp.append(sum(dp)+1)
return dp[number]
| 228 | 83 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'DeviceReportEntry.app_version'
db.alter_column(u'phonelog_devicereportentry', 'app_version', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'DeviceReportEntry.device_id'
db.alter_column(u'phonelog_devicereportentry', 'device_id', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
def backwards(self, orm):
# Changing field 'DeviceReportEntry.app_version'
db.alter_column(u'phonelog_devicereportentry', 'app_version', self.gf('django.db.models.fields.TextField')(default=''))
# Changing field 'DeviceReportEntry.device_id'
db.alter_column(u'phonelog_devicereportentry', 'device_id', self.gf('django.db.models.fields.CharField')(default='', max_length=50))
models = {
u'phonelog.devicereportentry': {
'Meta': {'unique_together': "[('xform_id', 'i')]", 'object_name': 'DeviceReportEntry'},
'app_version': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'device_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'i': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'db_index': 'True'}),
'xform_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'phonelog.userentry': {
'Meta': {'unique_together': "[('xform_id', 'i')]", 'object_name': 'UserEntry'},
'i': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sync_token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'xform_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['phonelog']
| 2,866 | 942 |
import xbmc
from lib.libs import mediatypes
from lib.libs.addonsettings import settings
from lib.libs.pykodi import json, UTF8JSONDecoder
from lib.libs.utils import SortedDisplay
from lib.providers.base import AbstractProvider, AbstractImageProvider, cache, build_key_error
class TheAudioDBAbstractProvider(AbstractImageProvider):
name = SortedDisplay('theaudiodb.com', 'TheAudioDB.com')
contenttype = 'application/json'
def __init__(self):
super(TheAudioDBAbstractProvider, self).__init__()
# url param i=MB track/album/artist ID
self.artmap = {'mbtrack': {'datakey':'track', 'artmap': {'strTrackThumb': 'thumb'},
'url': 'https://www.theaudiodb.com/api/v1/json/{0}/track-mb.php'},
'mbgroup': {'datakey':'album', 'artmap': {'strAlbumThumb': 'thumb', 'strAlbumCDart': 'discart',
'strAlbumThumbBack': 'back', 'strAlbumSpine': 'spine'},
'url': 'https://www.theaudiodb.com/api/v1/json/{0}/album-mb.php'},
'mbartist': {'datakey':'artists', 'artmap': {'strArtistThumb': 'thumb', 'strArtistLogo': 'clearlogo',
'strArtistBanner': 'banner', 'strArtistFanart': 'fanart', 'strArtistFanart2': 'fanart',
'strArtistFanart3': 'fanart', 'strArtistClearart': 'clearart', 'strArtistWideThumb': 'landscape'},
'url': 'https://www.theaudiodb.com/api/v1/json/{0}/artist-mb.php'}
}
self.provtypes = set(x for data in self.artmap.values() for x in data['artmap'].values())
def get_data(self, url, params):
result = cache.cacheFunction(self._get_data, url.format(settings.get_apikey('tadb')), params)
return result if result != 'Empty' else None
def _get_data(self, url, params):
apikey = settings.get_apikey('tadb')
if not apikey:
raise build_key_error('tadb')
self.log('uncached', xbmc.LOGINFO)
response = self.doget(url, params=params)
if response is None:
raise build_key_error('tadb')
return 'Empty' if response is None else json.loads(response.text, cls=UTF8JSONDecoder)
def _build_image(self, url, size, title=None):
result = {'provider': self.name, 'url': url, 'preview': url + '/preview',
'size': size, 'language': None, 'rating': SortedDisplay(5.1 if title == 'track' else 5.0, '')}
if title:
result['title'] = title
return result
class TheAudioDBMusicVideoProvider(TheAudioDBAbstractProvider):
mediatype = mediatypes.MUSICVIDEO
def provides(self, types):
if 'artistthumb' in types:
return True
return bool(set(types) & self.provtypes)
def get_images(self, uniqueids, types=None):
if not settings.get_apienabled('tadb'):
return {}
if types is not None and not self.provides(types) or not (uniqueids.get('mbtrack') or
uniqueids.get('mbgroup') or uniqueids.get('mbartist')):
return {}
images = {}
for idsource, artdata in self.artmap.iteritems():
if idsource not in uniqueids or types is not None and not \
any(x in types for x in artdata['artmap'].itervalues()):
continue
data = self.get_data(artdata['url'], {'i': uniqueids[idsource]})
if not data or not data.get(artdata['datakey']):
continue
data = data[artdata['datakey']][0]
for originaltype, finaltype in artdata['artmap'].iteritems():
if (originaltype in ('strAlbumThumbBack', 'strAlbumSpine')):
continue
if originaltype == 'strArtistThumb':
finaltype = 'artistthumb'
elif originaltype in ('strTrackThumb', 'strAlbumThumb'):
finaltype = 'poster'
if data.get(originaltype):
_insertart(images, finaltype, self._build_image(data[originaltype],
_get_imagesize(originaltype), artdata['datakey']))
return images
class TheAudioDBAbstractMusicProvider(TheAudioDBAbstractProvider):
def _inner_get_images(self, uniqueids, idsource, types):
if not settings.get_apienabled('tadb'):
return {}
if not uniqueids.get(idsource):
return {}
artdata = self.artmap[idsource]
if types and not any(x in types for x in artdata['artmap'].itervalues()):
return {}
images = {}
data = self.get_data(artdata['url'], {'i': uniqueids[idsource]})
if not data or not data.get(artdata['datakey']):
return {}
data = data[artdata['datakey']][0]
for originaltype, finaltype in artdata['artmap'].iteritems():
if data.get(originaltype):
_insertart(images, finaltype, self._build_image(data[originaltype],
_get_imagesize(originaltype), artdata['datakey']))
return images
class TheAudioDBAlbumProvider(TheAudioDBAbstractMusicProvider):
mediatype = mediatypes.ALBUM
def get_images(self, uniqueids, types=None):
return self._inner_get_images(uniqueids, 'mbgroup', types)
class TheAudioDBArtistProvider(TheAudioDBAbstractMusicProvider):
mediatype = mediatypes.ARTIST
def get_images(self, uniqueids, types=None):
return self._inner_get_images(uniqueids, 'mbartist', types)
class TheAudioDBSongProvider(TheAudioDBAbstractMusicProvider):
mediatype = mediatypes.SONG
def get_images(self, uniqueids, types=None):
return self._inner_get_images(uniqueids, 'mbtrack', types)
def _get_imagesize(arttype):
if arttype in ('strTrackThumb', 'strAlbumThumb', 'strArtistThumb', 'strAlbumThumbBack'):
return SortedDisplay(500, '500-800')
if arttype in ('strAlbumCDart',):
return SortedDisplay(500, '500 or 1000')
if arttype in ('strArtistLogo',):
return SortedDisplay(400, '400x155 or 800x310')
if arttype in ('strArtistBanner',):
return SortedDisplay(1000, '1000x185')
if arttype in ('strArtistClearart', 'strArtistWideThumb'):
return SortedDisplay(1000, '1000x562')
if arttype in ('strArtistFanart', 'strArtistFanart2', 'strArtistFanart3'):
return SortedDisplay(1280, '1280x720 or 1920x1080')
if arttype in ('strAlbumSpine',):
return (SortedDisplay(700, '700x35'))
return SortedDisplay(0, '')
def _insertart(images, arttype, image):
if arttype not in images:
images[arttype] = []
images[arttype].append(image)
class TheAudioDBSearch(AbstractProvider):
name = SortedDisplay('theaudiodb.com:search', 'TheAudioDB.com search')
contenttype = 'application/json'
def __init__(self):
super(TheAudioDBSearch, self).__init__()
# s=[artist], t=[track title]
self.url_trackby_artistandtrack = 'https://www.theaudiodb.com/api/v1/json/{0}/searchtrack.php'
def get_data(self, url, params=None):
apikey = settings.get_apikey('tadb')
if not apikey:
raise build_key_error('tadb')
result = cache.cacheFunction(self._get_data, url.format(settings.get_apikey('tadb')), params)
return result if result != 'Empty' else None
def _get_data(self, url, params=None):
self.log('uncached', xbmc.LOGINFO)
if params is None:
params = {}
response = self.doget(url, params=params)
if response is None:
raise build_key_error('tadb')
return 'Empty' if response is None else response.json()
def search(self, query, mediatype):
if mediatype != mediatypes.MUSICVIDEO:
return []
query = query.split(' - ', 1)
if len(query) != 2:
return []
data = self.get_data(self.url_trackby_artistandtrack, {'s': query[0], 't': query[1]})
if not data or not data.get('track'):
return []
return [{'label': item['strArtist'] + ' - ' + item['strTrack'], 'uniqueids':
{'mbtrack': item['strMusicBrainzID'], 'mbartist': item['strMusicBrainzArtistID'],
'mbgroup': item['strMusicBrainzAlbumID']}} for item in data['track']]
| 8,201 | 2,585 |
import pytest
from osp.corpus.models import Document
from osp.corpus.models import Document_Format
pytestmark = pytest.mark.usefixtures('db')
def test_format_counts():
"""
Document.format_counts()
"""
d1 = Document.create(path='1')
d2 = Document.create(path='2')
d3 = Document.create(path='3')
d4 = Document.create(path='4')
d5 = Document.create(path='5')
d6 = Document.create(path='6')
# 1 doc with 'format1'.
f1 = Document_Format.create(document=d1, format='format1')
# 2 docs with 'format2'.
f2 = Document_Format.create(document=d2, format='format2')
f3 = Document_Format.create(document=d3, format='format2')
# 3 docs with 'format3'.
f4 = Document_Format.create(document=d4, format='format3')
f5 = Document_Format.create(document=d5, format='format3')
f6 = Document_Format.create(document=d6, format='format3')
assert Document_Format.format_counts() == [
('format3', 3),
('format2', 2),
('format1', 1)
]
| 1,025 | 365 |
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from constants.jobs import JobLifeCycle
from db.models.repos import Repo
from dockerizer.builders.base import BaseDockerBuilder
class BaseJobDockerBuilder(BaseDockerBuilder):
CHECK_INTERVAL = 10
def __init__(self,
project_id,
project_name,
repo_path,
from_image,
image_name,
image_tag,
copy_code=True,
in_tmp_repo=True,
build_steps=None,
env_vars=None,
dockerfile_name='Dockerfile'):
self.project_id = project_id
self.project_name = project_name
super().__init__(
repo_path=repo_path,
from_image=from_image,
image_name=image_name,
image_tag=image_tag,
copy_code=copy_code,
in_tmp_repo=in_tmp_repo,
build_steps=build_steps,
env_vars=env_vars,
dockerfile_name=dockerfile_name)
def _handle_logs(self, log_line):
pass
def _check_pulse(self, check_pulse):
pass
def get_job_repo_info(project, job):
project_name = project.name
# job_spec = job.specification
# if job_spec.build.git: # We need to fetch the repo first
#
# repo, is_created = ExternalRepo.objects.get_or_create(project=project,
# git_url=job_spec.build.git)
# if not is_created:
# # If the repo already exist, we just need to refetch it
# git.fetch(git_url=repo.git_url, repo_path=repo.path)
#
# repo_path = repo.path
# repo_name = repo.name
# last_commit = repo.last_commit
# else:
repo_path = project.repo.path
last_commit = project.repo.last_commit
repo_name = project_name
image_name = '{}/{}'.format(settings.REGISTRY_HOST, repo_name)
if not last_commit:
raise Repo.DoesNotExist
image_tag = last_commit[0]
return {
'repo_path': repo_path,
'image_name': image_name,
'image_tag': image_tag
}
def build_job(project, job, job_builder, image_tag=None):
"""Build necessary code for a job to run"""
job_spec = job.specification
build_info = get_job_repo_info(project, job)
# Build the image
docker_builder = job_builder(project_id=project.id,
project_name=project.unique_name,
repo_path=build_info['repo_path'],
from_image=job_spec.build.image,
image_name=build_info['image_name'],
image_tag=image_tag or build_info['image_tag'],
build_steps=job_spec.build.build_steps,
env_vars=job_spec.build.env_vars)
docker_builder.login(registry_user=settings.REGISTRY_USER,
registry_password=settings.REGISTRY_PASSWORD,
registry_host=settings.REGISTRY_HOST)
if docker_builder.check_image():
# Image already built
docker_builder.clean()
return True
if not docker_builder.build():
docker_builder.clean()
return False
if not docker_builder.push():
docker_builder.clean()
try:
job.set_status(JobLifeCycle.FAILED,
message='The docker image could not be pushed.')
except ObjectDoesNotExist:
pass
return False
docker_builder.clean()
return True
| 3,696 | 1,049 |
import math
import collections
import random
import numpy as np
import tensorflow as tf
import itertools
import time
def sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
cols = tf.shape(x)[1]
ones_shape = tf.stack([cols, 1])
ones = tf.ones(ones_shape, x.dtype)
return tf.reshape(tf.matmul(x, ones), [-1])
def compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1):
if not isinstance(weights, list):
weights = [weights]
if labels.dtype != tf.int64:
labels = tf.cast(labels, tf.int64)
labels_flat = tf.reshape(labels, [-1])
sampled_ids, true_expected_count, sampled_expected_count = tf.nn.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
true_w = tf.nn.embedding_lookup(weights, labels_flat)
true_b = tf.nn.embedding_lookup(biases, labels_flat)
sampled_w = tf.nn.embedding_lookup(weights, sampled_ids)
sampled_b = tf.nn.embedding_lookup(biases, sampled_ids)
dim = tf.shape(true_w)[1:2]
new_true_w_shape = tf.concat([[-1, num_true], dim], 0)
row_wise_dots = tf.multiply(tf.expand_dims(inputs, 1), tf.reshape(true_w, new_true_w_shape))
dots_as_matrix = tf.reshape(row_wise_dots, tf.concat([[-1], dim], 0))
true_logits = tf.reshape(sum_rows(dots_as_matrix), [-1, num_true])
true_b = tf.reshape(true_b, [-1, num_true])
true_logits += true_b
sampled_b_vec = tf.reshape(sampled_b, [num_sampled])
sampled_logits = tf.matmul(inputs, sampled_w, transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
v=None):
batch_size = int(labels.get_shape()[0])
if v is None:
v = tf.ones([batch_size, 1])
true_logits, sampled_logits = compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true)
true_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(true_logits), logits=true_logits)
sampled_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
true_loss = tf.multiply(true_loss, v)
return tf.div(tf.reduce_sum(true_loss) + tf.reduce_sum(sampled_loss), tf.constant(batch_size, dtype=tf.float32))
| 2,786 | 996 |
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
from sqlalchemy.orm.session import Session
from tortuga.db.componentsDbHandler import ComponentsDbHandler
from tortuga.db.softwareProfilesDbHandler import SoftwareProfilesDbHandler
from tortuga.db.tortugaDbApi import TortugaDbApi
from tortuga.exceptions.tortugaException import TortugaException
from tortuga.objects.component import Component
from tortuga.objects.osInfo import OsInfo
class ComponentDbApi(TortugaDbApi):
"""
Component DB API class.
"""
def __init__(self):
TortugaDbApi.__init__(self)
self._softwareProfilesDbHandler = SoftwareProfilesDbHandler()
self._componentsDbHandler = ComponentsDbHandler()
def getComponent(self, session: Session, name: str, version: str,
osInfo: OsInfo,
optionDict: Optional[Union[dict, None]] = None) \
-> Component:
"""
Get component from the db.
Returns:
component
Throws:
ComponentNotFound
DbError
"""
try:
dbComponent = self._componentsDbHandler.getComponentByOsInfo(
session, name, version, osInfo)
self.loadRelations(dbComponent, optionDict)
return Component.getFromDbDict(dbComponent.__dict__)
except TortugaException:
raise
except Exception as ex:
self._logger.exception(str(ex))
raise
def getBestMatchComponent(self, session: Session, name, version, osInfo,
kitId):
"""
Get component from the db.
Returns:
component
Throws:
ComponentNotFound
DbError
"""
try:
dbComponent = self._componentsDbHandler.getBestMatchComponent(
session, name, version, osInfo, kitId)
self.loadRelations(dbComponent, {
'os': True,
'family': True,
'kit': True,
'os_components': True,
'osfamily_components': True,
})
return Component.getFromDbDict(dbComponent.__dict__)
except TortugaException:
raise
except Exception as ex:
self._logger.exception(str(ex))
raise
def addComponentToSoftwareProfile(self, session: Session, componentId,
softwareProfileId):
"""
Add component to softwareProfile.
Returns:
None
Throws:
SoftwareProfileNotFound
ComponentNotFound
SoftwareProfileComponentAlreadyExists
DbError
"""
try:
self._softwareProfilesDbHandler.addComponentToSoftwareProfile(
session, componentId, softwareProfileId)
session.commit()
except TortugaException:
session.rollback()
raise
except Exception as ex:
session.rollback()
self._logger.exception(str(ex))
raise
def deleteComponentFromSoftwareProfile(self, session: Session,
componentId,
softwareProfileId):
"""
Delete component to software profile.
Returns:
None
Throws:
SoftwareProfileNotFound
ComponentNotFound
SoftwareProfileComponentNotFound
DbError
"""
try:
self._softwareProfilesDbHandler.\
deleteComponentFromSoftwareProfile(
session, componentId, softwareProfileId)
session.commit()
except TortugaException:
session.rollback()
raise
except Exception as ex:
session.rollback()
self._logger.exception(str(ex))
raise
def getComponentList(self, session: Session, softwareProfile=None):
try:
if softwareProfile:
return self._softwareProfilesDbHandler.getSoftwareProfile(
session, softwareProfile).components
# List all components
self._logger.debug('Retrieving component list')
dbComps = self._componentsDbHandler.getComponentList(session)
return self.getTortugaObjectList(Component, dbComps)
except Exception as ex:
self._logger.exception(str(ex))
raise
| 5,218 | 1,219 |
"""Main module."""
class Stack:
stack = []
def isEmpty(self) -> bool:
return len(self.stack) == 0
def push(self, value: any) -> None:
self.stack.append(value)
def peek(self) -> any:
return self.stack[-1]
def pop(self) -> any:
return self.stack.pop() | 312 | 107 |
import tensorflow as tf
import numpy as np
FLAGS = tf.app.flags.FLAGS
class Embedding(object):
def __init__(self, is_training, word_vec, word, pos1, pos2):
temp_word_embedding = tf.get_variable(initializer=word_vec, name = 'temp_word_embedding', dtype=tf.float32)
unk_word_embedding = tf.get_variable('unk_embedding', [FLAGS.word_size], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
self.word_vec = tf.concat([temp_word_embedding,
tf.reshape(unk_word_embedding, [1, FLAGS.word_size]),
tf.reshape(tf.constant(np.zeros([FLAGS.word_size], dtype=np.float32)), [1, FLAGS.word_size])], 0)
self.word = word
self.pos1 = pos1
self.pos2 = pos2
self.is_training = is_training
def word_embedding(self, var_scope = None, reuse = False):
with tf.variable_scope(var_scope or 'word_embedding', reuse = reuse):
x = tf.nn.embedding_lookup(self.word_vec, self.word)
return x
def pos_embedding(self, simple_pos=False):
with tf.name_scope("pos_embedding"):
if simple_pos:
temp_pos_array = np.zeros((FLAGS.pos_num + 1, FLAGS.pos_size), dtype=np.float32)
temp_pos_array[(FLAGS.pos_num - 1) / 2] = np.ones(FLAGS.pos_size, dtype=np.float32)
pos1_embedding = tf.constant(temp_pos_array)
pos2_embedding = tf.constant(temp_pos_array)
else:
temp_pos1_embedding = tf.get_variable('temp_pos1_embedding', [FLAGS.pos_num, FLAGS.pos_size], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
pos1_embedding = tf.concat([temp_pos1_embedding, tf.reshape(tf.constant(np.zeros(FLAGS.pos_size, dtype=np.float32)), [1, FLAGS.pos_size])], 0)
temp_pos2_embedding = tf.get_variable('temp_pos2_embedding', [FLAGS.pos_num, FLAGS.pos_size], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
pos2_embedding = tf.concat([temp_pos2_embedding, tf.reshape(tf.constant(np.zeros(FLAGS.pos_size, dtype=np.float32)), [1, FLAGS.pos_size])], 0)
input_pos1 = tf.nn.embedding_lookup(pos1_embedding, self.pos1)
input_pos2 = tf.nn.embedding_lookup(pos2_embedding, self.pos2)
x = tf.concat(values = [input_pos1, input_pos2], axis = 2)
return x
def concat_embedding(self, word_embedding, pos_embedding):
if pos_embedding is None:
return word_embedding
else:
return tf.concat(values = [word_embedding, pos_embedding], axis = 2)
| 2,667 | 919 |
import torch.nn as nn
class MappingModel(nn.Module):
def __init__(self, dims, nlayers=1, units=128, activation=nn.ReLU):
super(MappingModel, self).__init__()
print(nlayers)
if nlayers == 1:
self.linmap = nn.Linear(dims, dims)
elif nlayers == 2:
self.linmap = nn.Sequential(
nn.Linear(dims, units), activation(), nn.Linear(units, dims)
)
else:
stack = (
[nn.Linear(dims, units), activation()]
+ [nn.Linear(units, units), activation()] * (nlayers - 2)
+ [nn.Linear(units, dims)]
)
self.linmap = nn.Sequential(*stack)
def forward(self, x):
x = self.linmap(x)
return x
| 768 | 254 |
# @Time : 2019/5/22 11:26
# @Author : shakespere
# @FileName: sum.py
import pandas as pd
submission_1 = pd.read_csv("./data/merge_0.8550913438849271_predictions.csv")
submission_2 = pd.read_csv("./data/merge_0.8551243481873769_predictions.csv")
submission_3 = pd.read_csv("./data/merge_0.8571411176454415_predictions.csv")
submission_4 = pd.read_csv("./data/merge_0.8582128855527719_predictions.csv")
submission_5 = pd.read_csv("./data/merge_0.8585647873963975_predictions.csv")
submission_6 = pd.read_csv("./data/merge_0.8599225290804536_predictions.csv")
submission_7 = pd.read_csv("./data/merge_0.860564284049377_predictions.csv")
submission_8 = pd.read_csv("./data/merge_0.8606908440533374_predictions.csv")
submission = pd.DataFrame.from_dict({
'ID': submission_1['ID'],
'Pred': (submission_1.Pred.values * 0.125) + (submission_2.Pred.values * 0.125) + (submission_3.Pred.values * 0.125) + (submission_4.Pred.values * 0.125)+ (submission_5.Pred.values * 0.125)+ (submission_6.Pred.values * 0.125)+ (submission_7.Pred.values * 0.125)+ (submission_8.Pred.values * 0.125)
})
submission.to_csv('./data/submission.csv', index=False) | 1,166 | 582 |
"""This is the receiver for the matrix model."""
from typing import NamedTuple, Optional, Dict
from lexos.receivers.base_receiver import BaseReceiver
DocumentLabelMap = Dict[int, str]
class TokenOption(NamedTuple):
"""A typed tuple to represent token option."""
# the size of each token
n_gram_size: int
# the token type to send to CountVerctorizer
# available options are 'word', 'char_wb', and 'char'
token_type: str
class NormOption(NamedTuple):
"""A typed tuple to keep the normalize option."""
# True if we are using proportional count, False if we are using raw count
use_freq: bool
# True if we are using proportional count, False if we are using raw count
use_tf_idf: bool
# the normalize option in TF-IDF
# available options are 'l1' and 'l2'. nice naming, SciPy!
tf_idf_norm_option: str
class CullingOption(NamedTuple):
"""A typed tuple to represent all the culling options."""
# the lowest word rank to keep in DTM
# if none, then don't apply most frequent word
mfw_lowest_rank: Optional[int]
# the least number of passage that the word needs to be in
# if none, then don't apply culling
cull_least_seg: Optional[int]
class MatrixFrontEndOption(NamedTuple):
"""A typed tuple to represent all the matrix options."""
# the token options
token_option: TokenOption
# the normalize options
norm_option: NormOption
# the culling options
culling_option: CullingOption
class MatrixReceiver(BaseReceiver):
"""This class receives the front end options."""
def __init__(self):
"""Get all the matrix options using the receiver."""
super().__init__()
def _get_token_option_from_front_end(self) -> TokenOption:
"""Get the token option from front end.
:return: a token option struct
"""
token_type_is_word = self._front_end_data['token_type'] == 'Tokens'
token_type_is_char = self._front_end_data['token_type'] == 'Characters'
char_within_word = False
# get the token type
if token_type_is_word:
token_type = 'word'
elif token_type_is_char and char_within_word:
token_type = 'char_wb'
elif token_type_is_char and not char_within_word:
token_type = 'char'
else:
raise ValueError('invalid token type from front end')
# get the n_gram_size
n_gram_size = int(self._front_end_data['token_size'])
return TokenOption(token_type=token_type, n_gram_size=n_gram_size)
def _get_normalize_option_from_front_end(self) -> NormOption:
"""Get the normalize option from front end.
:return: a normalize option struct
"""
use_freq = self._front_end_data['normalization_method'] == \
'Proportional'
# if use TF/IDF
use_tfidf = self._front_end_data['normalization_method'] == 'TF-IDF'
return NormOption(use_freq=use_freq, use_tf_idf=use_tfidf,
tf_idf_norm_option='l2')
def _get_culling_option_from_front_end(self) -> CullingOption:
"""Get the culling option from the front end.
:return: a culling option struct
"""
if 'enable_most_frequent_words' in self._front_end_data:
lower_rank_bound = int(self._front_end_data['most_frequent_words'])
else:
lower_rank_bound = None
if 'enable_minimum_occurrences' in self._front_end_data:
least_num_seg = int(self._front_end_data['minimum_occurrences'])
else:
least_num_seg = None
return CullingOption(cull_least_seg=least_num_seg,
mfw_lowest_rank=lower_rank_bound)
def options_from_front_end(self) -> MatrixFrontEndOption:
"""Get all the matrix options from front end.
:return: all the options packed together into a matrix option class
"""
return MatrixFrontEndOption(
token_option=self._get_token_option_from_front_end(),
norm_option=self._get_normalize_option_from_front_end(),
culling_option=self._get_culling_option_from_front_end()
)
| 4,216 | 1,294 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from Model.CNN_1 import CNN
def test_performance(model, device, test_loader, loss, pred_train_labels, train_labels, printHistory=True):
test_num_right = 0
for step, (datas, labels) in enumerate(test_loader):
b_test_datas = datas.clone().detach().type(torch.float32)
b_test_labels = labels.clone().detach().type(torch.int64)
b_test_datas = Variable(b_test_datas, requires_grad=False).to(device)
# b_test_labels don't need to calculate loss
b_test_labels = Variable(b_test_labels, requires_grad=False)
b_test_output = model(b_test_datas)
pred_test_labels = torch.argmax(
b_test_output, dim=1).detach().cpu()
# pred_test_labels = F.log_softmax(test_output, dim=1)
b_test_num_right = int(sum(pred_test_labels == b_test_labels))
test_num_right += b_test_num_right
# train_acc
train_num_right = int(sum(pred_train_labels == train_labels))
train_acc = train_num_right / train_labels.size(0)
# test_acc
test_acc = test_num_right / len(test_loader.dataset.labels)
if printHistory is True:
print('train_acc: {:5f} | train_loss: {:5f} | test_acc: {:5f}'.format(
train_acc, loss, test_acc))
return train_acc, test_acc
def train_model(device, EPOCH, train_loader, test_loader, model, loss_func, optimizer, printHistory=True):
train_loss_ls = []
train_acc_ls = []
test_acc_ls = []
pred_train_labels = torch.tensor([], dtype=torch.int64).cpu()
train_labels = torch.tensor([], dtype=torch.int64).cpu()
for epoch in range(1, EPOCH+1):
# print("EPOCH: ", epoch)
# total = 0
train_acc = 0
for step, (b_datas, b_labels) in enumerate(train_loader):
b_datas = b_datas.clone().detach().type(torch.float32)
b_labels = b_labels.clone().detach().type(torch.int64)
b_datas = Variable(b_datas).to(device)
b_labels = Variable(b_labels).to(device)
output = model(b_datas)
loss = loss_func(output, b_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred_b_train_labels = torch.argmax(
output, dim=1).detach().cpu()
pred_train_labels = torch.cat(
(pred_train_labels, pred_b_train_labels), dim=0)
train_labels = torch.cat((train_labels, b_labels.cpu()), dim=0)
# pred_train_labels = F.log_softmax(output, dim=1)
# total += b_labels.size(0)
# if step % 10 == 0:
# train_acc, test_acc = test_performance(model, loss, test_datas,
# pred_train_labels, b_labels, test_labels, printHistory=False)
# if printHistory is True:
# print('Step: {} | train_acc: {:5f} | train_loss: {:5f} | test_acc: {:5f}'.format(
# step, train_acc, loss, test_acc))
train_acc, test_acc = test_performance(model, device, test_loader, loss.cpu(),
pred_train_labels, train_labels, printHistory=False)
if printHistory is True:
print('EPOCH: {} | train_acc: {:5f} | train_loss: {:5f} | test_acc: {:5f}'.format(
epoch, train_acc, loss, test_acc))
if loss > 1:
loss = 1
try:
train_loss_ls.append(float(loss.cpu().detach().numpy()))
except AttributeError:
train_loss_ls.append(float(loss))
train_acc_ls.append(train_acc)
test_acc_ls.append(test_acc)
return (train_loss_ls, train_acc_ls, test_acc_ls)
| 3,791 | 1,260 |
from fastapi import Request
class MockedHTTPRequest(Request):
def __init__(self, body: dict = {}, headers: dict = {}):
super().__init__({"type": "http"})
self.__body = body
self.__headers = headers
@property
def body(self) -> dict:
return self.__body
@property
def headers(self) -> dict:
return self.__headers
| 374 | 108 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Header
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import sys
import numpy as np
import message_filters
import tf
class DepthImageProcessor:
def __init__(self):
self.HOME_BASE_KEY = 1
rospy.init_node('depth_image_processor')
self.bridge = CvBridge()
self.listener = tf.TransformListener()
# subscribe to depth image and segmentation result
self.base_pose_pub = rospy.Publisher("/base_station/pose", PoseStamped, queue_size=3)
rospy.Subscriber("/stereo/depth_image", Image, callback=self.depth_image_callback)
rospy.Subscriber("/segmented_image", Image, callback=self.segmentat_image_callback)
self.odom_frame = "odom"
self.camera_frame = "scout_1_tf/camera_link"
self.depth_image = None
print("started node")
rospy.spin()
def depth_image_callback(self, depth_image):
try:
cv_mat_depth = self.bridge.imgmsg_to_cv2(depth_image, desired_encoding="passthrough")
except CvBridgeError, e:
raise e
self.depth_image = cv_mat_depth
# # Convert the depth image to a Numpy array
# self.depth_image = np.array(cv_mat_depth, dtype=np.float32)
def segmentat_image_callback(self, segmentation_image):
try:
cv_mat_seg = self.bridge.imgmsg_to_cv2(segmentation_image, desired_encoding="mono8")
except CvBridgeError, e:
raise e
if self.depth_image is not None:
cv_mat_seg = np.array(cv_mat_seg)
cv_mat_seg[cv_mat_seg != self.HOME_BASE_KEY] = 0
cv_mat_seg[cv_mat_seg > 0] = 1
masked_depth = cv2.bitwise_and(self.depth_image, self.depth_image, mask=cv_mat_seg)
# convert depth mask to numpy and clean
np_array = np.array(masked_depth).flatten()
where_are_NaNs = np.isnan(np_array)
np_array[where_are_NaNs] = 0
count = np.count_nonzero(np_array)
sum = np_array.sum()
dist = sum / count
obj_pose = PoseStamped()
obj_pose.header = Header()
obj_pose.header.frame_id = self.camera_frame
obj_pose.pose.position.x = dist
obj_pose.pose.position.y = 0
obj_pose.pose.position.z = 0
final_pose = self.listener.transformPose(self.odom_frame, obj_pose)
self.base_pose_pub.publish(final_pose)
# print(obj_pose)
# print(final_pose)
# cv2.imshow("masked_data", masked_depth)
# cv2.waitKey(0)
if __name__ == "__main__":
DepthImageProcessor()
| 2,821 | 932 |
# -*- coding: utf-8 -*-
import const_map
import math
import packing_method
# 选择在packing_model 中的装配方案
pack_function = packing_method.used_func
def pack_api(dataObj, predict_result,target_c_m):
'''
装配接口
:param dataObj: 数据对象
:param predict_result: 预测结果
'''
picker = VmWorker(predict_result)
group = ServerObj(dataObj, picker.origin_cpu_mem_sum())
pack_function(picker, group,target_c_m)
vm_size, vm = picker.to_origin_desc()
pm_size, pm, pm_name = group.to_description()
res_use = group.get_res_used_pro()
pm_free = group.get_pm_free()
print(group.to_usage_status())
return vm_size, vm, pm_size, pm, pm_name, res_use, pm_free
class ServerObj():
# 计数
empty = 0
# 集群中物理机参数
server_info = {'CPU': 0, # u数
'MEM': 0, # m数
'HDD': 0} # h数
# 物理机计数量
pm_size = 0
PM_status = []
# 当前集群中虚拟机计数
vm_size = 0
# 虚拟机存储状态,对应的存储为
VM = {}
# 物理机存储状态,对应存储值
PM = []
PM_name = []
# 剩余资源表
PM_Free = []
# 当前指向的物理机id 索引从0开始
PM_ID = 0
# 目标比例
direction = []
# 剩余的cpu数量
lave_cpu_sum = 0
# 剩余的mem数量
lave_mem_sum = 0
# vm总共需要cpu数量
need_cpu_sum = 0
# vm总共需要mem数量
need_mem_sum = 0
# 物理机cpu总数
pm_cpu_sum = 0
# 物理机内存总算
pm_mem_sum = 0
def __init__(self, dataObj, vm_res):
'''
初始化
'''
self.vm_size = 0
self.PM = []
self.VM = {}
self.PM_status = []
self.pm_size = 0
self.PM_ID = -1
self.empty = 0
self.PM_Free = []
self.server_info = {}
self.server_info = dataObj.pm_type_list
self.direction = [0.25, 0.5, 1]
self.lave_mem_sum = vm_res[1]
self.lave_cpu_sum = vm_res[0]
self.need_mem_sum = vm_res[1]
self.need_cpu_sum = vm_res[0]
self.pm_cpu_sum = 0
self.pm_mem_sum = 0
self.PM_name = []
def new_physic_machine(self, pm_type):
'''
创建物理机
:param pm_type:虚拟机类型
:return:
'''
C_M = const_map.PM_TYPE[pm_type]['CPU'] / float(const_map.PM_TYPE[pm_type]['MEM'])
re_cpu = const_map.PM_TYPE[pm_type]['CPU']
re_mem = const_map.PM_TYPE[pm_type]['MEM']
temp = {
'pm_type': pm_type,
'C_M': C_M,
're_cpu': re_cpu,
're_mem': re_mem,
'vm_size': 0
}
self.PM_status.append(temp)
self.PM.append({})
self.pm_size += 1
self.PM_ID += 1
# 保存现在总的物理资源开辟数量
self.pm_cpu_sum += re_cpu
self.pm_mem_sum += re_mem
# 存储物理机名字
self.PM_name.append(pm_type)
print 'apply pm:%s , C/M=%.2f\n' % (pm_type, C_M)
return self.PM_ID
def get_nearest_distance(self, c_m):
'''
获取最接近c_m的优化目标
:param c_m:
:return:
'''
min_distance_target = 1
distance = 1
for i in range(len(self.direction)):
# 距离更接近
if abs(c_m - self.direction[i]) < distance:
distance = abs(c_m - self.direction[i])
min_distance_target = self.direction[i]
return min_distance_target
def get_pm_c_m(self, pm_id):
'''
返回指定物理机的c/m
:param pm_id:
:return:
'''
c_m = self.PM_status[pm_id]['C_M']
return c_m
def get_lave_cpu_mem_sum(self):
'''
获取当前cpu mem的数量
:return:
'''
return self.lave_cpu_sum, self.lave_mem_sum
def get_sum_C_M(self):
return self.lave_cpu_sum * 1.0 / self.lave_mem_sum
def is_free(self, pm_id):
'''
判断是否还没放满
:param pm: 物理机编号
:return: 状态
'''
re_cpu = self.PM_status[pm_id]['re_cpu']
re_mem = self.PM_status[pm_id]['re_mem']
if re_cpu > 0 and re_mem > 0:
return True
else:
return False
def get_pm_cpu_mem(self, pm_id):
'''
返回指定物理机的cpu 内存剩余空间
:param pm_id:
:return:
'''
re_cpu = self.PM_status[pm_id]['re_cpu']
re_mem = self.PM_status[pm_id]['re_mem']
return re_cpu, re_mem
def test_put_vm(self, pm_id, vm_type):
'''
测试能否放置虚拟机
:param pm_id: 物理机id
:param vm_type: 虚拟机类型
:return: 剩余资源数
'''
# 数据异常
if pm_id is None or \
pm_id < 0 or pm_id >= self.pm_size:
raise ValueError('error pm_id=', pm_id)
vm_cpu, vm_mem = const_map.VM_PARAM[vm_type][:2]
# 从物理机状态表中获取参数
pmstatus = self.PM_status[pm_id]
re_cpu = pmstatus['re_cpu'] - vm_cpu
re_mem = pmstatus['re_mem'] - vm_mem
if re_cpu == 0 or re_mem == 0:
c_m = 0
else:
c_m = re_cpu * 1.0 / re_mem
# 返回能否放置,并返回放置后的剩余空间大小
if re_cpu >= 0 and re_mem >= 0:
return (True, [re_cpu, re_mem, c_m])
else:
return (False, [re_cpu, re_mem, c_m])
def put_vm(self, pm_id, vm_type):
'''
:param pm_id:物理机id
:param vm_type: 虚拟机类型
:return:
'''
if pm_id is None or \
pm_id < 0 or pm_id >= self.pm_size:
raise ValueError('error pm_id=', pm_id)
# 获取资源数
vm_cpu, vm_mem = const_map.VM_PARAM[vm_type][:2]
# 获取参数状态
pmstatus = self.PM_status[pm_id]
re_cpu = pmstatus['re_cpu'] - vm_cpu
re_mem = pmstatus['re_mem'] - vm_mem
# 剩余总数计算
self.lave_cpu_sum -= vm_cpu
self.lave_mem_sum -= vm_mem
# 资源充足,分配
if re_cpu >= 0 and re_mem >= 0:
self.empty += 1
pmstatus['re_cpu'] = re_cpu
pmstatus['re_mem'] = re_mem
# 计算c/m比例
if re_cpu == 0 or re_mem == 0:
c_m = 0
else:
c_m = re_cpu * 1.0 / re_mem
pmstatus['C_M'] = c_m
pmstatus['vm_size'] += 1
self.vm_size += 1
# 记录虚拟机种类数量
if vm_type not in self.VM.keys():
self.VM[vm_type] = 0
self.VM[vm_type] += 1
pm = self.PM[pm_id]
# 记录物理机种类数量
if vm_type not in pm.keys():
pm[vm_type] = 0
pm[vm_type] += 1
return (re_cpu, re_mem)
return None # 超分返回
def to_description(self):
if self.empty != 0:
return self.pm_size, self.PM, self.PM_name
else:
return 0, self.PM, self.PM_name
def get_res_used_pro(self):
'''
:return: 返回资源使用率
'''
cpu_use = self.need_cpu_sum * 1.0 / self.pm_cpu_sum
mem_use = self.need_mem_sum * 1.0 / self.pm_mem_sum
use = cpu_use * 0.5 + mem_use * 0.5
# 返回物理机的资源使用率
# return cpu_use, mem_use, use
return use
def to_usage_status(self):
'''
生成当前集群中各个物理机的使用状态
'''
result = ''
usage = self.PM_status
# result = 'CPU:%d MEM:%d\n' % (cpu_max, mem_max)
for i in range(self.pm_size):
pm_type = usage[i]['pm_type']
cpu_max = self.server_info[pm_type]['CPU']
mem_max = self.server_info[pm_type]['MEM']
cpu_used = cpu_max - usage[i]['re_cpu']
mem_used = mem_max - usage[i]['re_mem']
cpu_usage_rate = cpu_used * 100.0 / cpu_max
mem_usage_rate = mem_used * 100.0 / mem_max
total_usage_rate = cpu_usage_rate * 0.5 + mem_usage_rate * 0.5
vm_cot = usage[i]['vm_size']
string = 'pm_id:%d \t cpu_used:%d(%.2f%%)\t' % (i, cpu_used, cpu_usage_rate)
string += 'mem_used:%d(%.2f%%)\t' % (mem_used, mem_usage_rate)
string += 'total_used:(%.2f%%)\tvm_cot:%d\n' % (total_usage_rate, vm_cot)
# 保存剩余空间情况表
self.PM_Free.append([cpu_max - cpu_used, mem_max - mem_used])
result += string
return result
def get_pm_free(self):
return self.PM_Free
def is_packing(self):
if self.lave_cpu_sum == 0 or self.lave_mem_sum == 0:
return False
else:
return True
################## end class Server ####################
class VmWorker():
# 预测输入的原始数据
origin_data = None
# 原始输入描述
origin_desc_table = {}
origin_vm_size = 0
# 虚拟机总数,非零虚拟机总数
vm_size = 0
# 虚拟机中cpu总数
vm_cpu_size = 0
# 虚拟机中mem总数
vm_mem_size = 0
# 预测虚拟机的在M/U权重与核心数级别
# 上展开 shape=[3,6]
# CPU=1,2,4,8,16,32
VM = [[-1, -1, -1, -1, -1, -1], # weight_1.0
[-1, -1, -1, -1, -1, -1], # weight_2.0
[-1, -1, -1, -1, -1, -1] # weight_4.0
]
# 虚拟机类型名数组
vm_types = const_map.VM_TYPE_DIRT
def __init__(self, predict_result):
# 保存原始数据
self.origin_data = predict_result
# 初始化分拣对象
self.init_worker(predict_result)
self.vm_size, self.origin_desc_table = self.set_data_info()
self.origin_vm_size = self.vm_size
# 初始化实时的cpu mem数量
self.cpu_sum = self.vm_cpu_size
self.mem_sum = self.vm_mem_size
pass
def init_worker(self, predict_result):
'''
初始化分拣对象
:param predict_result:预测结果
'''
types = predict_result.keys()
# 遍历计算总共需要cpu mem 的数量
for vmtype in types:
vm_sum = 0
pre_temp = predict_result[vmtype]
vm_cpu, vm_mem, _ = const_map.VM_PARAM[vmtype]
for i in range(len(pre_temp)):
vm_sum += pre_temp[i]
self.vm_cpu_size += vm_cpu * vm_sum
self.vm_mem_size += vm_mem * vm_sum
# 添加到数量列表
row, col = self.type2index(vmtype)
self.VM[row][col] = vm_sum
def type2index(self, vm_type):
tindex = self.vm_types.index(vm_type)
windex = tindex % 3
cindex = int(tindex / 3)
return windex, cindex
def index2type(self, windex, cindex):
if windex < 0 or cindex < 0:
raise ValueError('Error ', (windex, cindex))
return self.vm_types[cindex * 3 + windex]
def get_vm_by_index(self, windex, cindex):
'''
:param windex:
:param cindex:
:return:
'''
re_vm = self.VM[windex][cindex]
if self.vm_size == -1 or re_vm == -1:
return None
elif self.vm_size == 0 or re_vm == 0:
return -1
else:
re_vm -= 1
self.vm_size -= 1
self.VM[windex][cindex] = re_vm
return re_vm
pass
def get_vm_by_wc(self, weight, cpu):
'''
:param weight:
:param cpu:
:return:
'''
windex = int(math.log(weight, 2))
cindex = int(math.log(cpu, 2))
return self.get_vm_by_index(windex, cindex)
pass
def get_vm_by_type(self, vm_type):
windex, cindex = self.type2index(vm_type)
return self.get_vm_by_index(windex, cindex)
def get_vm_by_mu_weight(self, mu_weight, order=0):
result = [[], # vm_type
[]] # cot
windex = int(math.log(mu_weight, 2))
start = 0
end = 5
step = 1
if order == 1:
start = 4
end = -1
step = -1
for cindex in range(start, end, step):
tmp = self.VM[windex][cindex]
if tmp > 0:
result[0].append(self.index2type(windex, cindex))
result[1].append(tmp)
self.VM[windex][cindex] = 0
self.vm_size -= tmp
if len(result[0]) == 0:
return None
return result
def get_vm_order(self, cpu):
'''
:param cpu:CPU
:return: 返回该cpu类型下所有比例队列
'''
result = {}
col = int(math.log(cpu, 2))
start = col
end = -1
step = -1
temp_1 = [[], []]
temp_2 = [[], []]
temp_4 = [[], []]
for col in range(start, end, step):
if self.VM[0][col] != -1:
temp_1[0].append(self.index2type(0, col))
temp_1[1].append(self.VM[0][col])
if self.VM[1][col] != -1:
temp_2[0].append(self.index2type(1, col))
temp_2[1].append(self.VM[1][col])
if self.VM[2][col] != -1:
temp_4[0].append(self.index2type(2, col))
temp_4[1].append(self.VM[2][col])
# 如果都为空,则无需放置
if len(temp_1[0]) == 0 and len(temp_2[0]) == 0 and len(temp_4[0]) == 0:
return result
else:
result['1.0'] = temp_1
result['2.0'] = temp_2
result['4.0'] = temp_4
return result
def get_vm_by_cpu(self, cpu, order=0):
'''
获得队列顺序
:param cpu:
:param order:
:return:
'''
result = [[], # vm_type
[]] # cot
# 计算CPU所在列
col = int(math.log(cpu, 2))
start = 0
end = 3
step = 1
# 从下往上取 1:4->1:1
if order == 1:
start = 2
end = -1
step = -1
# 从上往下取 1:1->1:4
for row in range(start, end, step):
tmp = self.VM[row][col]
if tmp > 0:
result[0].append(self.index2type(row, col))
result[1].append(tmp)
self.VM[row][col] = 0
self.vm_size -= tmp
# 没有vm 返回None
if len(result[0]) == 0:
return None
return result
def origin_cpu_mem_sum(self):
return self.vm_cpu_size, self.vm_mem_size
def to_origin_desc(self):
return self.origin_vm_size, self.origin_desc_table
def set_data_info(self):
'''
设置虚拟机数量表
计算虚拟机总数
'''
info_table = {}
vm_sum = 0
flag = True
for i in range(len(self.VM)): # 行
for j in range(len(self.VM[2])): # 列
tmp = self.VM[i][j]
if tmp != -1:
flag = False
vm_sum += tmp
info_table[self.index2type(i, j)] = tmp
if flag:
vm_sum = -1
return vm_sum, info_table
| 14,313 | 5,749 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from edgedb.lang.common import exceptions as edgedb_error
from edgedb.lang.common.persistent_hash import persistent_hash
from . import base as s_types
_add_impl = s_types.BaseTypeMeta.add_implementation
_add_map = s_types.BaseTypeMeta.add_mapping
class UUID(uuid.UUID):
def __init__(self, value, *, hex=None, bytes=None, bytes_le=None,
fields=None, int=None, version=None):
try:
if isinstance(value, uuid.UUID):
int = value.int
super().__init__(hex, bytes, bytes_le, fields, int, version)
else:
hex = value
super().__init__(hex, bytes, bytes_le, fields, int, version)
except ValueError as e:
raise edgedb_error.ScalarTypeValueError(e.args[0]) from e
def persistent_hash(self):
return persistent_hash(self.int)
_add_impl('std::uuid', UUID)
_add_map(UUID, 'std::uuid')
_add_map(uuid.UUID, 'std::uuid')
| 1,653 | 513 |
def is_p_vector(val):
"""checks whether given value is of type P_Vector"""
from Q.p_vector import PVector
return isinstance(val, PVector)
def is_p_scalar(val):
"""checks whether given vlaue is of type P_Scalar"""
from Q.p_scalar import PScalar
return isinstance(val, PScalar) | 304 | 109 |
"""
This script contains functions that
create data in the Neo4j database.
"""
import json
import platform
from pathlib import Path
from gensim.models import Word2Vec
from GraphOfDocs_Representation.utils import (
clear_screen, generate_words
)
# Initialize an empty set of edges.
edges = {}
# Initialize an empty list of unique terms.
# We are using a list to preserver order of appearance.
nodes = []
def create_graph_of_words(words, database, filename, relationship, window_size = 4):
"""
Function that creates a Graph of Words that contains all nodes from each document for easy comparison,
inside the neo4j database, using the appropriate cypher queries.
"""
# Files that have word length < window size, are skipped.
# Window size ranges from 2 to 6.
length = len(words)
if (length < window_size):
# Early exit, we return the skipped filename
return filename
# We are using a global set of edges to avoid creating duplicate edges between different graph of words.
# Basically the co-occurences will be merged.
global edges
# We are using a global set of edges to avoid creating duplicate nodes between different graph of words.
# A list is being used to respect the order of appearance.
global nodes
# We are getting the unique terms for the current graph of words.
terms = []
creation_list = []
for word in words:
if word not in terms:
terms.append(word)
# Remove end-of-sentence token, so it doesn't get created.
if 'e5c' in terms:
terms.remove('e5c')
# If the word doesn't exist as a node, then add it to the creation list.
for word in terms:
if word not in nodes:
creation_list.append(word)
# Append word to the global node graph, to avoid duplicate creation.
nodes.append(word)
# Create all unique nodes, from the creation list.
database.execute(f'UNWIND {creation_list} as key '
'CREATE (word:Word {key: key})', 'w')
# Create unique connections between existing nodes of the graph.
for i, current in enumerate(words):
# If there are leftover items smaller than the window size, reduce it.
if i + window_size > length:
window_size = window_size - 1
# If the current word is the end of sentence string,
# we need to skip it, in order to go to the words of the next sentence,
# without connecting words of different sentences, in the database.
if current == 'e5c':
continue
# Connect the current element with the next elements of the window size.
for j in range(1, window_size):
next = words[i + j]
# Reached the end of sentence string.
# We can't connect words of different sentences,
# therefore we need to pick a new current word,
# by going back out to the outer loop.
if next == 'e5c':
break
edge = (current, next)
if edge in edges:
# If the edge, exists just update its weight.
edges[edge] = edges[edge] + 1
query = (f'MATCH (w1:Word {{key: "{current}"}})-[r:connects]-(w2:Word {{key: "{next}"}}) '
f'SET r.weight = {edges[edge]}')
else:
# Else, create it, with a starting weight of 1 meaning first co-occurence.
edges[edge] = 1
query = (f'MATCH (w1:Word {{key: "{current}"}}) '
f'MATCH (w2:Word {{key: "{next}"}}) '
f'MERGE (w1)-[r:connects {{weight: {edges[edge]}}}]-(w2)')
# This line of code, is meant to be executed, in both cases of the if...else statement.
database.execute(query, 'w')
# Connect the paper, with all of its words.
query = (f'MATCH (w:Word) WHERE w.key IN {terms} '
'WITH collect(w) as words '
f'MATCH (i:Issue {{key: "{filename}"}}) '
'UNWIND words as word '
f'CREATE (i)-[:{relationship}]->(word)')
database.execute(query, 'w')
return
def create_unique_constraints(database):
"""
Wrapper function that gathers all CREATE CONSTRAINT queries,
in one place.
"""
database.execute('CREATE CONSTRAINT ON (word:Word) '
'ASSERT word.key IS UNIQUE', 'w')
database.execute('CREATE CONSTRAINT ON (issue:Issue) '
'ASSERT issue.key IS UNIQUE', 'w')
database.execute('CREATE CONSTRAINT ON (person:Person) '
'ASSERT person.uname IS UNIQUE', 'w')
return
def create_issues_from_json(database, dirpath):
"""
Function that creates the nodes representing issues,
persons assigned to them, sets the properties of the
first ones, and create the correspending graph of docs
by using the title and description of the issue,
based on the supplied json file.
"""
current_system = platform.system()
# Read json in memory.
with open(dirpath, encoding = 'utf-8-sig', errors = 'ignore') as f:
issues = json.load(f)['issues']
skip_count = 0
count = 1
total_count = len(issues)
# Process all issues.
for issue in issues:
# Print the number of the currently processed issue.
print(f'Processing {count + skip_count} out of {total_count} issues...' )
# Extract the title and description from the issue.
title = '' if issue.get('title') is None else issue['title']
description = '' if issue.get('description') is None else issue['description']
# If the issue has no title and description, continue.
if title == '' and description == '':
skip_count += 1
continue
# Create the issue, using its fields.
query = (
f'CREATE (i:Issue {{key: "{issue["key"]}", '
f'type: "{issue["type"]}", '
f'priority: "{issue["priority"]}", '
f'status: "{issue["status"]}"}})'
)
database.execute(query, 'w')
# Create the assignee.
query = (f'CREATE (p:Person {{uname: "{issue["assignee"]}"}})')
database.execute(query, 'w')
# Create the connection between the assignee and the issue.
query = (
f'MATCH (p:Person {{uname: "{issue["assignee"]}"}}) '
f'MATCH (i:Issue {{key: "{issue["key"]}"}}) '
f'CREATE (p)-[r:is_assigned_to]->(i)'
)
database.execute(query, 'w')
# Join the text of the title and description.
text = ' '.join((title, description))
# Create the graph of words representation from the text of the issue.
create_graph_of_words(generate_words(text), database, issue['key'], 'includes')
# Update the progress counter.
count = count + 1
# Save the last accessed issue in a file.
with open('last_accessed_issue.txt', 'w') as f:
f.write(issue['key'])
# Clear the screen to output the update the progress counter.
clear_screen(current_system)
print(f'Created {total_count - skip_count}, skipped {skip_count} issues.')
return
def train_word2vec(dirpath, model_name, size):
# Read json in memory.
with open(dirpath, encoding = 'utf-8-sig', errors = 'ignore') as f:
issues = json.load(f)['issues']
# Generate a list of lists, where each inner list
# contains the tokens of each text.
texts = [
generate_words(' '.join((
str(issue.get('title', '')),
str(issue.get('description', ''))
))) for issue in issues
]
# Train the Word2Vec model on the texts of jira issues.
model = Word2Vec(texts, size = size, window = 5, min_count = 1, workers = 8)
model.save(f'{model_name}')
def create_word2vec_similarity_graph(database, dirpath, model_name, size = 100):
# If the file doesn't exist, train the word2vec model.
if not Path(model_name).is_file():
train_word2vec(dirpath, model_name, size)
current_system = platform.system()
# Load the word2vec model
model = Word2Vec.load(model_name)
# Initialize variables.
count = 0
total_count = len(model.wv.vocab)
# Find all tokens in the vocabulary and their most similar terms.
for token in model.wv.vocab:
print(f'Processing {count} out of {total_count} tokens...' )
for term, score in model.wv.most_similar(token, topn = 10):
# Create the similarity relationship between
# the token and each of its terms,
# while setting the score property.
query = (
f'MATCH (token:Word {{key: "{token}"}}) '
f'MATCH (term:Word {{key: "{term}"}}) '
f'CREATE (token)-[r:similar_w2v{{score: {score}}}]->(term)'
)
database.execute(query, 'w')
# Clear the screen to output the update the progress counter.
clear_screen(current_system)
count += 1
| 9,348 | 2,749 |
from django.contrib import admin
from .models import Semester, Shift, ShiftDate
# Register your models here.
admin.site.register(Semester)
admin.site.register(Shift)
admin.site.register(ShiftDate)
| 199 | 60 |
import pytest
import mwtab
@pytest.mark.parametrize("files_source", [
"tests/example_data/mwtab_files/ST000122_AN000204.json",
"tests/example_data/mwtab_files/ST000122_AN000204.txt"
])
def test_validate(files_source):
"""Test method for validating passing mwTab and JSON files from Metabolomics Workbench.
:param files_source: File path to Metabolomics Workbench file to be validated.
:type files_source: :py:class:`str` or
"""
mwfile = next(mwtab.read_files(files_source))
_, validation_log = mwtab.validate_file(mwfile, metabolites=False)
assert len(validation_log.split('\n')) == 9
@pytest.mark.parametrize("file_source", [
"tests/example_data/validation_files/ST000122_AN000204_error_1.txt",
"tests/example_data/validation_files/ST000122_AN000204_error_1.json"
])
def test_validate_subject_sample_factors(file_source):
mwfile = next(mwtab.read_files(file_source))
_, validation_log = mwtab.validate_file(mwfile, metabolites=False)
assert "missing Subject ID" in validation_log
assert "missing Sample ID" in validation_log
assert "missing value for Factor" in validation_log
@pytest.mark.parametrize("file_source", [
"tests/example_data/validation_files/ST000122_AN000204_error_2.txt",
"tests/example_data/validation_files/ST000122_AN000204_error_2.json"
])
def test_validate_subject_sample_factors(file_source):
mwfile = next(mwtab.read_files(file_source))
_, validation_log = mwtab.validate_file(mwfile, metabolites=False)
assert "Section missing data entry for sample(s):" in validation_log
assert "SUBJECT_SAMPLE_FACTORS: Section missing sample ID(s)" in validation_log
@pytest.mark.parametrize("file_source", [
"tests/example_data/validation_files/ST000122_AN000204_error_3.txt",
"tests/example_data/validation_files/ST000122_AN000204_error_3.json"
])
def test_validate_metabolites(file_source):
mwfile = next(mwtab.read_files(file_source))
_, validation_log = mwtab.validate_file(mwfile)
assert "which matches a commonly used field name" in validation_log
@pytest.mark.parametrize("file_source", [
"tests/example_data/validation_files/ST000122_AN000204_error_4.txt",
"tests/example_data/validation_files/ST000122_AN000204_error_4.json"
])
def test_validate_schema(file_source):
mwfile = next(mwtab.read_files(file_source))
_, validation_log = mwtab.validate_file(mwfile)
assert "does not match the allowed schema" in validation_log
@pytest.mark.parametrize("file_source", [
"tests/example_data/mwtab_files/ST000122_AN000204.json"
])
def test_validation_log_local(file_source):
mwfile = next(mwtab.read_files(file_source))
_, validation_log = mwtab.validate_file(mwfile)
# assert "mwtab version: {}".format(mwtab.__version__) in validation_log
assert "Source: {}".format(file_source) in validation_log
assert "Study ID: {}".format("ST000122") in validation_log
assert "Analysis ID: {}".format("AN000204") in validation_log
assert "File format: {}".format("json") in validation_log
@pytest.mark.parametrize("file_source", [
"2"
])
def test_validation_log_web(file_source):
mwfile = next(mwtab.read_files(file_source))
_, validation_log = mwtab.validate_file(mwfile, metabolites=False)
# assert "mwtab version: {}".format(mwtab.__version__) in validation_log
assert "Source: {}".format("https://www.metabolomicsworkbench.org/rest/study/analysis_id/AN000002/mwtab/txt")\
in validation_log
assert "Study ID: {}".format("ST000002") in validation_log
assert "Analysis ID: {}".format("AN000002") in validation_log
assert "File format: {}".format("txt") in validation_log | 3,720 | 1,394 |
#!/usr/bin/env python
"""Very simple AST parser to get star imports. Nothing more.
"""
import os
import ast
import imp
import sys
from importlib import import_module
try:
str_ = unicode # noqa F821
except:
str_ = str
modules_seen = set()
import_names = set()
class NodeVisitor(ast.NodeVisitor):
using_all = False
names = set()
imports = []
def iterable_values(self, node):
if not hasattr(node, 'elts'):
return []
values = []
types = (ast.Str,)
if hasattr(ast, 'Bytes'):
types += (ast.Bytes,)
for item in node.elts:
if isinstance(item, types):
values.append(str_(item.s))
return values
def add_name(self, name):
if name and not self.using_all and name[0] != '_':
self.names.add(name)
def visit_Import(self, node):
for n in node.names:
self.add_name(n.asname or n.name)
self.generic_visit(node)
def visit_ImportFrom(self, node):
module = '%s%s' % ('.' * node.level, str_(node.module or ''))
for n in node.names:
if n.name == '*':
if module not in self.imports:
self.imports.append(module)
else:
self.add_name(n.asname or n.name)
self.generic_visit(node)
def visit_Assign(self, node):
for t in node.targets:
if not isinstance(t.ctx, ast.Store):
continue
if isinstance(t, ast.Name):
if t.id == '__all__':
self.names.clear()
self.using_all = True
self.names.update(self.iterable_values(node.value))
else:
self.add_name(t.id)
elif isinstance(t, ast.Tuple):
for item in t.elts:
self.add_name(item.id)
self.generic_visit(node)
def visit_AugAssign(self, node):
if isinstance(node.op, ast.Add) and node.target.id == '__all__':
self.names.update(self.iterable_values(node.value))
def visit_FunctionDef(self, node):
# Don't visit the function body
self.add_name(node.name)
def visit_ClassDef(self, node):
# Don't visit the class body
self.add_name(node.name)
def visit_Try(self, node):
for item in node.body:
self.visit(item)
for item in node.finalbody:
self.visit(item)
for handler in node.handlers:
if handler.type.id == 'ImportError':
# Only care about collecting names that would be imported
for item in handler.body:
self.visit(item)
def simple_parse(source_file, module):
if module.split('.')[0] in sys.builtin_module_names:
try:
imported = import_module(module)
if hasattr(imported, '__all__'):
import_names.update(imported.__all__)
else:
import_names.update(x for x in dir(imported) if x[0] != '_')
except ImportError:
pass
return
if module in modules_seen:
return
modules_seen.add(module)
visitor = NodeVisitor()
try:
file = None
last_path = None
if module[0] == '.':
module_tmp = module.lstrip('.')
p = source_file
for _ in range(len(module) - len(module_tmp)):
p = os.path.dirname(p)
last_path = [p]
module = module_tmp
for module in module.split('.'):
if file is not None:
file.close()
file, path, desc = imp.find_module(module, last_path)
if path:
last_path = [path]
if desc[2] == imp.PKG_DIRECTORY:
for suffix, _, _ in imp.get_suffixes():
init_path = os.path.join(path, '__init__%s' % suffix)
if os.path.exists(init_path):
file = open(init_path, 'rb')
path = init_path
break
if not file:
return
except ImportError:
return
try:
root = ast.parse(file.read())
visitor.visit(root)
except (SyntaxError, IndentationError):
return
finally:
import_names.update(visitor.names)
for module in visitor.imports:
simple_parse(path, module)
if __name__ == "__main__":
if len(sys.argv) > 2:
for arg in sys.argv[2:]:
simple_parse(sys.argv[1], arg)
for name in sorted(import_names):
print(name)
| 4,653 | 1,399 |
# Generated by Django 2.0.3 on 2018-03-24 03:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('demands', '0001_initial'),
('projects', '0004_auto_20180323_1346'),
('profiles', '0009_profilewallet'),
('offers', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Operations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('debit', models.PositiveIntegerField(blank=True, null=True)),
('credit', models.PositiveIntegerField(blank=True, null=True)),
('balance', models.PositiveIntegerField(blank=True, null=True)),
('profile', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='profiles.Profile')),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=15)),
('date', models.DateTimeField(auto_now_add=True)),
('transaction_type', models.CharField(choices=[('OF', 'Offer'), ('DE', 'Demand'), ('CR', 'Creation')], max_length=2)),
('transaction_id', models.PositiveIntegerField()),
('profile_from', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='from_user', to='profiles.Profile')),
('profile_to', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='to_user', to='profiles.Profile')),
],
),
migrations.CreateModel(
name='Transaction_isCreation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='projects.Project')),
('transaction', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ledger.Transaction')),
],
),
migrations.CreateModel(
name='Transaction_isDemand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('demand', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='demands.Demand')),
('transaction', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ledger.Transaction')),
],
),
migrations.CreateModel(
name='Transaction_isOffer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('offer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='offers.Offer')),
('transaction', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ledger.Transaction')),
],
),
migrations.AddField(
model_name='operations',
name='transaction',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ledger.Transaction'),
),
]
| 3,666 | 1,090 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 带可选参数的装饰器
Desc :
"""
from functools import wraps, partial
import logging
def logged(func=None, *, level=logging.DEBUG, name=None, message=None):
if func is None:
return partial(logged, level=level, name=name, message=message)
logname = name if name else func.__module__
log = logging.getLogger(logname)
logmsg = message if message else func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
log.log(level, logmsg)
return func(*args, **kwargs)
return wrapper
# Example use
@logged
def add(x, y):
return x + y
@logged(level=logging.CRITICAL, name='example')
def spam():
print('Spam!')
spam()
def aa(kk=None, *, a=1,b=2,c=3):
print(kk, a, b, c)
bbb = partial(aa, a=1,b=2,c=3)
bbb('333') | 821 | 326 |
#Media loader class.
#Loads images.
import os, sys, pygame
from pygame.locals import *
#Load an image. :)
def load_image(file, transparent = True):
print("Loading " + file + " ..")
fullname = os.path.join('media', file)
image = pygame.image.load(fullname)
if transparent == True:
image = image.convert()
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
else:
image = image.convert_alpha()
return image
| 482 | 157 |
# Python modules
import multiprocessing
import math
# 3rd party modules
import numpy as np
# Our modules
from pylab import *
# GAMMA = 26753.0 - replaced with user defined value
TWOPI = 6.283185
def isiterable(p_object):
try:
it = iter(p_object)
except TypeError:
return False
return True
"""
Code for calculating bloch simulations. This code is independent of any GUI
and can be called from the command line. There's only one public function
which is bloch_multi().
This code iterates over each position and frequency offset in the simulation.
For each, it returns the magnetization values (Mx,My,Mz).
It uses multiprocessing.
"""
# The naive algorithm for this code would be to process one simulation at
# a time. We found that experiments are processed maybe 10% faster if we
# combine the bloch simulation's frequency offset dimension into chunks
# prior to processing.
#
# MAX_CHUNK_SIZE is the largest # of freq offsets that will be grouped together
# in one chunk. This is a hard limit.
#
# Choosing MAX_CHUNK_SIZE was the result of a lot of experimentation.
# Obviously we can't test all possibilities but 4-10 gave good average
# performance.
#
# Because the x,y,z position dimension is always fully included in each chunk
# the time saved by adding more than 1 freq offset line to the chunk is much
# less than expected. Thus for a 1000x200 nf x npos array, we went from 55 sec
# on 12 processors to 45 seconds on 12 processors when we set MAX_CHUNK_SIZE
# from 1 to 10 respectively. At 50 or 100 per chunk, the total time approached
# 55 seconds again. So, nothing too simple to figure out here, just empirical
# mysto-crud.
#
# To force this code to use the naive algorithm (each simulation = one chunk),
# set MAX_CHUNK_SIZE = 1.
MAX_CHUNK_SIZE = 4
class _Chunk(object):
"""
A chunk of frequency offset values to process. len(df) is
always < MAX_CHUNK_SIZE.
b1real, b1imag, xgrad, ygrad, zgrad, tsteps, e1, e2, df, dx, dy, dz, mx, my, mz, mode, gamma
"""
def __init__(self, b1real, b1imag, xgrad, ygrad, zgrad, tsteps,
e1, e2, dx, dy, dz, mx, my, mz, mode, gamma):
# Each chunk has a bunch of static info that is used in all
# calculations. And a small set of frequency offset values and their
# indices for storage back into the results arrays on finish.
self.b1real = b1real
self.b1imag = b1imag
self.xgrad = xgrad
self.ygrad = ygrad
self.zgrad = zgrad
self.tsteps = tsteps
self.e1 = e1
self.e2 = e2
self.df = []
self.ifreq = []
self.dx = dx
self.dy = dy
self.dz = dz
self.mx = mx
self.my = my
self.mz = mz
self.mode = mode
self.gamma = gamma
@property
def nlines(self):
return len(self.df)
def __str__(self):
# __str__ is useful for debugging
lines = [ ]
lines.append("---- chunk ----")
lines.append("b1real: %d" % self.b1real.size)
lines.append("b1imag: %d" % self.b1imag.size)
lines.append("xgrad: %d" % self.xgrad.size)
lines.append("ygrad: %d" % self.ygrad.size)
lines.append("zgrad: %d" % self.zgrad.size)
lines.append("tsteps: %d" % self.tsteps.size)
lines.append("e1: %d" % self.e1.size)
lines.append("e2: %d" % self.e2.size)
lines.append("df: %d" % self.df.size)
lines.append("dx: %d" % self.dx.size)
lines.append("dy: %d" % self.dy.size)
lines.append("dz: %d" % self.dz.size)
lines.append("mx: %d" % self.mx.size)
lines.append("my: %d" % self.my.size)
lines.append("mz: %d" % self.mz.size)
lines.append("mode: %d" % self.mode)
lines.append("gamma: %f" % self.gamma)
return "\n".join(lines)
def blochsim(b1real, b1imag, xgrad, ygrad, zgrad,
tsteps, ntime, e1, e2, df,
dx, dy, dz, mx, my, mz, mode,
cpu_count=1, gamma=26751.3 ):
"""
Builds a set of Magnetization values for spatial locations (dx,dy,dz) and
frequency offset values (df) given a B1 pulse and set of x,y,z gradient
values. It runs a bloch simulation at each of nf x npos frequencies and
locations. It uses multiprocessing.
Returns a numpy array appropriate for the magnetization vector at each
location and frequency offset.
"""
mxout = mx.copy()
myout = my.copy()
mzout = mz.copy()
# PS - If you want to run this code without using multiprocessing (e.g.
# in order to profile execution), use the 3 lines below in place of
# the use of multiprocessing.Pool.
# _initializer()
# chunks = _build_chunks(b1real, b1imag, xgrad, ygrad, zgrad, tsteps, e1, e2, df, dx, dy, dz, mx, my, mz, mode, gamma )
# results = [_process_chunk(chunk) for chunk in chunks]
pool = multiprocessing.Pool(cpu_count, _initializer, [])
# The 3rd param to imap_unordered() is a chunksize. These chunks are not
# to be confused with the chunks returned by _build_chunks()! chunksize
# just determines how many values will be grabbed from the iterator
# at once. Using a chunksize > 1 gives slightly better performance, but
# only slightly. The advantage of using a chunksize == 1 is that
# _build_chunks() is called every time a worker needs new work, so we
# can use it as a cheap callback/progress indicator.
results = pool.imap_unordered(_process_chunk,
_build_chunks(b1real, b1imag, xgrad, ygrad, zgrad, tsteps,
e1, e2, df, dx, dy, dz, mx, my, mz, mode, gamma ),
1)
pool.close()
pool.join()
# The lines from each bloch simulation are combined into one array that
# has results from all nf x npos frequency offsets and spatial positions
for result in results:
ifreqs = result[0][0]
mxs = result[0][1]
mys = result[0][2]
mzs = result[0][3]
for i, ifreq in enumerate(ifreqs):
mxout[ifreq,:,:] = mxs[i,:,:]
myout[ifreq,:,:] = mys[i,:,:]
mzout[ifreq,:,:] = mzs[i,:,:]
return mxout, myout, mzout
############### Internal use only below this line ###############
def _build_chunks(b1real, b1imag, xgrad, ygrad, zgrad, tsteps,
e1, e2, df, dx, dy, dz, mx, my, mz, mode, gamma ):
"""
A generator function. Given an experiment, iterates over the bloch
simulation's frequency offset dimension and returns a set of offsets
chunked according to MAX_CHUNK_SIZE.
See here for more info on generators:
http://docs.python.org/tutorial/classes.html#generators
"""
current = _Chunk(b1real, b1imag, xgrad, ygrad, zgrad, tsteps,
e1, e2, dx, dy, dz, mx, my, mz, mode, gamma)
nlines_processed = 0
for ifreq, dfreq in enumerate(df):
nlines = 1
if current.nlines and ((current.nlines + nlines) > MAX_CHUNK_SIZE):
# Chunk has enough in it, adding more would exceed the max.
nlines_processed += current.nlines
yield current
current = _Chunk(b1real, b1imag, xgrad, ygrad, zgrad, tsteps,
e1, e2, dx, dy, dz, mx, my, mz, mode, gamma)
#else:
# The current chunk is empty or there's still room in the current
# chunk for the next collection of lines.
# Append the contents of this simulation to the current chunk.
current.df.append(dfreq)
current.ifreq.append(ifreq)
# Return the final set of lines.
yield current
def _initializer():
# This function is subtle...it's called by each worker process, and is
# passed the values of the global constants that I need in
# _process_chunk(). Under *nix, I can just declare them global and
# (thanks to the magic of fork()) the variables and their values will be
# copied to the worker processes'. Under Windows, this module is
# re-imported once for each worker, and as a result these globals are
# recreated and re-initialized to 0 in each worker. This function sets
# them back to the values they need to be, and that's the only reason
# it exists.
pass
def _process_chunk(chunk):
# This is what each worker executes
b1real = chunk.b1real
b1imag = chunk.b1imag
xgrad = chunk.xgrad
ygrad = chunk.ygrad
zgrad = chunk.zgrad
tsteps = chunk.tsteps
e1 = chunk.e1
e2 = chunk.e2
df = np.array(chunk.df)
ifreq = chunk.ifreq
dx = chunk.dx
dy = chunk.dy
dz = chunk.dz
mx = chunk.mx
my = chunk.my
mz = chunk.mz
mode = chunk.mode
gamma = chunk.gamma
mxout = np.zeros((len(df),mx.shape[1],mx.shape[2]), 'float')
myout = np.zeros((len(df),mx.shape[1],mx.shape[2]), 'float')
mzout = np.zeros((len(df),mx.shape[1],mx.shape[2]), 'float')
npos = len(dx)
nf = len(df)
bvecs = [np.zeros((3,)) for i in range(nf*npos)]
decmat = np.zeros((3,3)) # Decay matrix for each time step.
decvec = np.zeros((3,)) # Recovery vector for each time step.
amats = [np.eye(3) for i in range(nf*npos)] # A is the identity matrix.
imats = [np.eye(3) for i in range(nf*npos)] # I is the identity matrix.
mcurr0s = [np.array([mx[j,i,0],my[j,i,0],mz[j,i,0]]) for j in range(nf) for i in range(npos)] # Set starting x,y,z magnetizations
for t in range(len(tsteps)):
# Rotation
df_array = np.repeat(df*TWOPI*tsteps[t], npos)
rotz = -(xgrad[t] * dx + ygrad[t] * dy + zgrad[t] * dz) * tsteps[t]
rotz = np.tile(rotz, nf) - df_array
rotx = (- b1real[t] * gamma * tsteps[t])
roty = (- b1imag[t] * gamma * tsteps[t]) # based on Hao Sun's UMich blochCim code
rotmats = calcrotmat(rotx, roty, rotz)
if (mode == 1):
arots = [np.dot(rotmat, amat) for rotmat, amat in zip(rotmats,amats)]
brots = [np.dot(rotmat, bvec) for rotmat, bvec in zip(rotmats,bvecs)]
else:
mcurr1s = [np.dot(rotmat, mcurr0) for rotmat, mcurr0 in zip(rotmats,mcurr0s)]
# Decay
decvec[2] = 1-e1[t]
decmat[0,0] = e2[t]
decmat[1,1] = e2[t]
decmat[2,2] = e1[t]
if (mode == 1):
amats = [np.dot(decmat, arot) for arot in arots]
bvecs = [np.dot(decmat, brot) for brot in brots]
bvecs = [bvec+decvec for bvec in bvecs]
else:
mcurr0s = [np.dot(decmat, mcurr1) for mcurr1 in mcurr1s]
mcurr0s = [mcurr0+decvec for mcurr0 in mcurr0s]
if mode == 2:
# Sample output at times. Only do this if transient!
mcurr0 = np.array(mcurr0s)
mcurr0.shape = nf, npos, 3
mxout[:,:,t] = mcurr0[:,:,0]
myout[:,:,t] = mcurr0[:,:,1]
mzout[:,:,t] = mcurr0[:,:,2]
# If only recording the endpoint, either store the last
# point, or calculate the steady-state endpoint.
if mode == 0:
# Indicates start at given m, or m0.
mcurr0 = np.array(mcurr0s)
mcurr0.shape = nf, npos, 3
mxout[:,:,0] = mcurr0[:,:,0]
myout[:,:,0] = mcurr0[:,:,1]
mzout[:,:,0] = mcurr0[:,:,2]
elif mode == 1:
# Indicates to find steady-state magnetization
amats = [imat-amat for imat,amat in zip(imats,amats)] # Now amat = (I-A)
imats = [np.linalg.inv(amat) for amat in amats] # Reuse imat as inv(I-A)
mvec = [np.dot(imat,bvec) for imat,bvec in zip(imats,bvecs)] # Now M = inv(I-A)*B
mvec = np.array(mvec)
mvec.shape = nf, npos, 3
mxout[:,:,0] = mvec[:,:,0]
myout[:,:,0] = mvec[:,:,1]
mzout[:,:,0] = mvec[:,:,2]
# The results are a list of 2-tuples (index, Mx, My, Mz). index is an index
# into the frequency offset dimension of the magnetization array -- it's
# where these mx, my, mz values will reside in the overall results array.
result = [ (ifreq, mxout, myout, mzout) ]
return result
#==============================================================================
def times2intervals( endtimes ):
"""
Function takes the given endtimes of intervals, and
returns the interval lengths in an array, assuming that
the first interval starts at 0.
If the intervals are all greater than 0, then this
returns True, otherwise it returns False.
"""
allpos = True
lasttime = 0.0
intervals = []
for endtime in endtimes:
intervals.append(endtime-lasttime)
lasttime = endtime
if intervals[-1] <= 0:
allpos = False
return allpos, np.array(intervals)
def calcrotmat(nx, ny, nz):
"""
Find the rotation matrix that rotates |n| radians about
the vector given by nx,ny,nz
From: https://code.google.com/p/robotics-toolbox-python/source/browse/trunk/robotics-toolbox-python/robot/transform.py
Approach: Uses Matrices from numpy
Turns out to be 1.8 times slower than for-loop with original straight math
Found on: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
Approach: Uses numpy array manipulations and skew array.
Turns out to be 2.5 times slower than than for-loop with original straight math
This final version takes advantage of ufunc speed of cos, sin, *, etc acting
on an array of numbers. This was ~ 3-4 times faster than for-loop with math.
"""
phi_array = np.sqrt(nx*nx+ny*ny+nz*nz)
rmat = []
# First define Cayley-Klein parameters
hp = phi_array/2.0
cp = np.cos(hp)
sp = np.sin(hp)/phi_array # /phi because n is unit length in defs.
ar = cp
ai = -nz*sp
br = ny*sp
bi = -nx*sp
# Make auxiliary variables to speed this up
arar = ar*ar
aiai = ai*ai
arai2 = 2*ar*ai
brbr = br*br
bibi = bi*bi
brbi2 = 2*br*bi
arbi2 = 2*ar*bi
aibr2 = 2*ai*br
arbr2 = 2*ar*br
aibi2 = 2*ai*bi
# Make rotation matrix.
rmat = np.array([[arar-aiai-brbr+bibi, arai2-brbi2, arbr2+aibi2],
[-arai2-brbi2, arar-aiai+brbr-bibi, arbi2-aibr2],
[-arbr2+aibi2, -aibr2-arbi2, arar+aiai-brbr-bibi]])
rmat = rmat.transpose([2,0,1])
for i, phi in enumerate(phi_array):
if phi == 0.0:
rmat[i,:,:] = np.array( [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]] )
return rmat
def blochsimfz(b1real, b1imag, xgrad, ygrad, zgrad,
tsteps, ntime, t1, t2, dfreq, nfreq,
dxpos, dypos, dzpos, npos,
mx, my, mz, mode, cpu_count=0, gamma=26751.3):
"""
Comment?
"""
if mode & 2:
ntout = ntime
else:
ntout = 1
mxout = mx.copy()
myout = my.copy()
mzout = mz.copy()
# First calculate the e1 and e2 values at each time step.
e1 = np.exp(-tsteps/t1)
e2 = np.exp(-tsteps/t2)
gammadx = dxpos.copy()*gamma # Convert to Hz/cm
gammady = dypos.copy()*gamma # Convert to Hz/cm
gammadz = dzpos.copy()*gamma # Convert to Hz/cm
#for ifreq in range(nfreq):
if mode == 3: # Steady state AND record all time points.
# First go through and find steady state, then
# repeat as if transient starting at steady st.
mxx, myy, mzz = blochsim(b1real, b1imag, xgrad, ygrad, zgrad,
tsteps, ntime, e1, e2, dfreq,
gammadx, gammady, gammadz,
mx, my, mz, 1, cpu_count, gamma);
mxx, myy, mzz = blochsim(b1real, b1imag, xgrad, ygrad, zgrad,
tsteps, ntime, e1, e2, dfreq,
gammadx, gammady, gammadz,
mxx, myy, mzz, 2, cpu_count, gamma);
else:
mxx, myy, mzz = blochsim(b1real, b1imag, xgrad, ygrad, zgrad,
tsteps, ntime, e1, e2, dfreq,
gammadx, gammady, gammadz,
mx, my, mz, mode, cpu_count, gamma);
mxout[:,:,:] = mxx
myout[:,:,:] = myy
mzout[:,:,:] = mzz
return mxout, myout, mzout
def bloch_multi(b1,gr,tp,t1,t2,df,dp,mode=0,mx=[],my=[],mz=[], cpu_count=0, gamma=26751.3):
"""
Calling format
[mx,my,mz] = bloch(b1,gr,tp,t1,t2,df,dp,mode=0,mx=[],my=[],mz=[])
blochsimfz(b1r,b1i,gx,gy,gz,tp,ntime,t1,t2,df,nf,dx,dy,dz,npos,mx,my,mz,md);
Bloch simulation of rotations due to B1, gradient and
off-resonance, including relaxation effects. At each time
point, the rotation matrix and decay matrix are calculated.
Simulation can simulate the steady-state if the sequence
is applied repeatedly, or the magnetization starting at m0.
INPUT:
b1 = (Mx1) RF pulse in G. Can be complex.
gr = (Mx1,2,or 3) 1,2 or 3-dimensional gradient in G/cm.
tp = (Mx1) time duration of each b1 and gr point, in seconds,
or 1x1 time step if constant for all points
or monotonically INCREASING endtime of each
interval..
t1 = T1 relaxation time in seconds.
t2 = T2 relaxation time in seconds.
df = (Nx1) Array of off-resonance frequencies (Hz)
dp = (Px1,2,or 3) Array of spatial positions (cm).
Width should match width of gr.
mode= Bitmask mode:
Bit 0: 0-Simulate from start or M0, 1-Steady State
Bit 1: 0-Just end time, 1-Record m at time points.
(optional) - NB. bjs, swapped N anp P parameters here versus Matlab code
mx,my,mz (NxP) arrays of starting magnetization, where N
is the number of frequencies and P is the number
of spatial positions.
OUTPUT:
mx,my,mz = NxP arrays of the resulting magnetization
components at each position and frequency.
B. Hargreaves. Nov 2003.
"""
# cpu_count is the number of processing cores (virtual CPUs) available on
# this machine. We ask multiprocessing.Pool() to create CPU_COUNT workers.
# cpu_count can be determined from a variety of sources. We accept any
# int > 0.
if not cpu_count:
# OK, the user didn't specify so we ask multiprocessing. Note that
# multiprocessing.cpu_count() isn't implemented on all platforms.
# Where it's not implemented we default to 2 for no really strong reasons.
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError:
cpu_count = 2
print("---------------------------------------")
print("3D-position + frequency Bloch Simulator")
print("---------------------------------------")
ntime = len(b1) # Number of Time, RF, and Grad points
# ====================== RF (B1) =========================
# : If complex, split up. If real, allocate an imaginary part.
b1 = np.array(b1)
if np.iscomplexobj(b1):
b1r = b1.real.copy()
b1i = b1.imag.copy()
else:
b1r = b1.real.copy()
b1i = b1r.copy() * 0
# ======================= Gradients =========================
gr = np.array(gr)
ngrad = gr.size # Number of Time, RF, and Grad points
gx = np.zeros((ntime,),'float')
gy = np.zeros((ntime,),'float')
gz = np.zeros((ntime,),'float')
gx[0:ntime] = gr[0:ntime] # X-gradient is first N points.
if ngrad >= 2*ntime: # Need to allocate Y-gradient.
gy[0:ntime] = gr[ntime:ntime*2] # Assign from Nx3 input array. Assuming (at least) 2-Dimensional Gradient
if (ngrad >= 3*ntime): # Need to allocate Z-gradient.
gz[0:ntime] = gr[ntime*2:ntime*3] # Assign from Nx3 input array. Assuming 3-Dimensional Gradient
# Warning if Gradient length is not 1x, 2x, or 3x RF length.
if (ngrad != ntime) and (ngrad != 2*ntime) and (ngrad != 3*ntime):
print("Gradient length differs from B1 length")
# === Time points =====
#
# THREE Cases:
# 1) Single value given -> this is the interval length for all.
# 2) List of intervals given.
# 3) Monotonically INCREASING list of end times given.
#
# For all cases, the goal is for tp to have the intervals.
#
if isinstance(tp,float): # === Case 1 ===
tstep = tp
tp = np.zeros((ntime,),'float') + tstep
elif len(tp) == 1: # === Case 1 ===
tstep = tp
tp = np.zeros((ntime,),'float') + tstep
elif len(tp) != ntime:
print("Time-point length differs from B1 length")
else:
tp = np.array(tp)
posflag, tp = times2intervals( tp )
if posflag:
print("Times are monotonically increasing. ")
# === Relaxation Times =====
t1 = float(t1)
t2 = float(t2)
# === Frequency Points =====
if isiterable(df):
df = np.array(df)
else:
df = np.array([df])
nf = len(df)
# === Position Points =====
if isiterable(dp):
dp = np.array(dp)
else:
dp = np.array([dp])
if len(dp.shape) == 1:
dp.shape = 1,dp.shape[0]
nposN, nposM = dp.shape
npos = nposM
dx = np.zeros((npos,), 'float')
dy = np.zeros((npos,), 'float')
dz = np.zeros((npos,), 'float')
if (nposN==3): # Assume 3 position dimensions given
dx[0:npos] = dp[0]
dy[0:npos] = dp[1]
dz[0:npos] = dp[2]
elif (nposN==2): # Assume only 2 position dimensions given
dx[0:npos] = dp[0]
dy[0:npos] = dp[1]
else:
dx[0:npos] = dp[0]
nfnpos = nf*npos; # Just used to speed things up below.
# ===== Mode, defaults to 0 (simulate single endpoint, transient). ====
md = int(mode)
if (md & 2):
ntout = ntime # Include time points.
else:
ntout = 1
ntnfnpos = ntout*nfnpos;
if (md & 1)==0:
print("Simulation from Initial Condition.")
else:
print("Simulation of Steady-State.")
if (md & 2)==0:
print("Simulation to Endpoint. ")
else:
print("Simulation over Time.")
# ===== Allocate Output Magnetization vectors arrays.
mxin = np.zeros((nf, npos, ntout), 'float')
myin = np.zeros((nf, npos, ntout), 'float')
mzin = np.zeros((nf, npos, ntout), 'float')
# ===== If Initial Magnetization is given...
if mx and my and mz and len(mx)==nfnpos and len(my)==nfnpos and len(mz)==nfnpos:
# Set output magnetization to that passed.
# If multiple time points, then just the first is set.
print("Using Specified Initial Magnetization.")
for ipos in range(npos):
for ifreq in range(nf):
mxin[ifreq,ipos,0] = mx[ifreq,ipos]
myin[ifreq,ipos,0] = my[ifreq,ipos]
mzin[ifreq,ipos,0] = mz[ifreq,ipos]
else:
if mx and my and mz: # Magnetization given, but wrong size!
print("Initial magnetization passed, but not Npositions x Nfreq. ")
print(" --> Using [0; 0; 1] for initial magnetization. ")
for ipos in range(npos):
for ifreq in range(nf):
mxin[ifreq,ipos,0] = 0
myin[ifreq,ipos,0] = 0
mzin[ifreq,ipos,0] = 1
# ======= Do The Simulation! ======
print("Calling blochsimfz_par() function.")
mxout, myout, mzout = blochsimfz(b1r, b1i, x, gy, gz,
tp, ntime, t1, t2,
df, nf, dx, dy, dz,
npos, mxin, myin, mzin,
md, cpu_count, gamma=gamma)
# ======= Reshape Output Matrices ======
if (ntout > 1) and (nf > 1) and (npos > 1):
outsize = nf, npos, ntout
else: # Basically "squeeze" the matrix.
if nf <= 1:
outsize = npos, ntout
else:
if npos <= 1:
outsize = nf, ntout
else:
outsize = nf, npos, ntout
mxout.shape = outsize
myout.shape = outsize
mzout.shape = outsize
return mxout, myout, mzout
#------------------------------------------------------------------------------
# Testing
def hsinc(npts, ncycles, filter='hamming'):
"""
Returns a sinc function of length npts, with ncycles sinc-cycles. This
yields a time-bandwidth value of 4 * ncycles
"""
t = np.arange(npts) - (npts/2.0)
t = t / (npts/2.0)
val = 2*np.pi*ncycles*t + 0.00001
res = np.sin(val) / val
if filter == 'hamming':
res = res * 4 * ncycles * (0.54 + 0.46*np.cos(np.pi*t)) / npts
return res
def main():
"""
Blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah
"""
gamma = 26751.3 # 1H Hz/gauss
my_sinc = hsinc(120,6)
T = 0.00002
b1 = np.concatenate((np.zeros(4), my_sinc, np.zeros(4)))
g = np.concatenate((np.zeros(4), 1*np.ones(120), np.zeros(4)))
b1 = 0.5*b1/np.max(b1);
x = np.arange(-5,5,0.05)
f = np.arange(-1000.0,2000.0,200.0)
t = np.arange(1,len(b1)+1)*T;
mx, my, mz = bloch_multi(b1,g,t,1,.2,f,x,mode=0, cpu_count=0, gamma=gamma)
mxy = mx + 1j*my
ioff = int(len(f)/2)-1
subplot(3,1,1)
xlabel('Time [ms]')
plot(t*1000,b1)
subplot(3,1,2)
plot(x, abs(mxy[ioff,:]), x, real(mxy[ioff,:]), x, imag(mxy[ioff,:]) )
xlabel('Position [cm]')
ylabel('Magnetization |Mxy|')
subplot(3,1,3)
plot(x, mz[ioff,:])
xlabel('Position [cm]')
ylabel('Magnetization Mz')
show()
bob = 10
bob = 2*bob
if __name__ == "__main__":
main()
#import cProfile
#cProfile.run('main()')
| 26,793 | 9,632 |
from abc import ABCMeta, abstractmethod
from future.utils import with_metaclass
from spectator.atomicnumber import AtomicNumber
from spectator.clock import SystemClock
class AbstractGauge(with_metaclass(ABCMeta)):
@abstractmethod
def get(self):
pass
@abstractmethod
def set(self, value):
pass
@abstractmethod
def _measure(self):
pass
class NoopGauge(AbstractGauge):
def get(self):
return 0
def set(self, value):
pass
def _measure(self):
return {}
class Gauge(AbstractGauge):
ttl = 15 * 60
def __init__(self, meterId, clock=SystemClock()):
self.meterId = meterId
self._clock = clock
self._last_update = AtomicNumber(float('nan'))
self._value = AtomicNumber(float('nan'))
def get(self):
return self._value.get()
def set(self, value):
self._last_update.set(self._clock.wall_time())
self._value.set(value)
def _has_expired(self):
return (self._clock.wall_time() - self._last_update.get()) > self.ttl
def _measure(self):
id = self.meterId.with_default_stat('gauge')
if self._has_expired():
v = self._value.get_and_set(float('nan'))
else:
v = self._value.get()
return {id: v}
| 1,318 | 429 |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2013
# - Cedric Serfon, <cedric.serfon@cern.ch>, 2014
# - Martin Barisits, <martin.barisits@cern.ch>, 2014
# - Ralph Vigne, <ralph.vigne@cern.ch>, 2015
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2012-2015
from json import dumps
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url
class AccountLimitClient(BaseClient):
"""Account limit client class for working with account limits"""
ACCOUNTLIMIT_BASEURL = 'accountlimits'
def __init__(self, rucio_host=None, auth_host=None, account=None, ca_cert=None, auth_type=None, creds=None, timeout=None, user_agent='rucio-clients'):
super(AccountLimitClient, self).__init__(rucio_host, auth_host, account, ca_cert, auth_type, creds, timeout, user_agent)
def set_account_limit(self, account, rse, bytes):
"""
Sends the request to set an account limit for an account.
:param account: The name of the account.
:param rse: The rse name.
:param bytes: An integer with the limit in bytes.
:return: True if quota was created successfully else False.
"""
data = dumps({'bytes': bytes})
path = '/'.join([self.ACCOUNTLIMIT_BASEURL, account, rse])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_account_limit(self, account, rse):
"""
Sends the request to remove an account limit.
:param account: The name of the account.
:param rse: The rse name.
:return: True if quota was removed successfully. False otherwise.
:raises AccountNotFound: if account doesn't exist.
"""
path = '/'.join([self.ACCOUNTLIMIT_BASEURL, account, rse])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
| 2,763 | 889 |
import os
from dotenv import load_dotenv, find_dotenv
import requests
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import requests_cache
load_dotenv(find_dotenv())
ALPHA_API_KEY = os.getenv('ALPHA_API_KEY')
NEWS_API = os.getenv('GET_NEWS_KEY')
EMAIL_PASSWORD = os.getenv('EMAIL_ACCOUNT_PASSWORD')
SMTP_SERVER = os.getenv('SMTP_SERVER')
def myStockInfo(StockSymbol_1, StockSymbol_2, StockSymbol_3):
URL_1 = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=' + StockSymbol_1 + '&apikey=' + ALPHA_API_KEY
URL_2 = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=' + StockSymbol_2 + '&apikey=' + ALPHA_API_KEY
URL_3 = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=' + StockSymbol_3 + '&apikey=' + ALPHA_API_KEY
requests_cache.install_cache('myStock1', expire_after=86400)
response_1 = requests.get(URL_1)
requests_cache.install_cache('myStock2', expire_after=86400)
response_2 = requests.get(URL_2)
requests_cache.install_cache('myStock3', expire_after=86400)
response_3 = requests.get(URL_3)
response_1 = response_1.json()
response_2 = response_2.json()
response_3 = response_3.json()
return [response_1, response_2, response_3]
def myStockNewsInfo (StockSymbol_1, StockSymbol_2, StockSymbol_3):
URL_1 = 'https://newsapi.org/v2/everything?q=' + StockSymbol_1 + '&apiKey=' + NEWS_API
URL_2 = 'https://newsapi.org/v2/everything?q=' + StockSymbol_2 + '&apiKey=' + NEWS_API
URL_3 = 'https://newsapi.org/v2/everything?q=' + StockSymbol_3 + '&apiKey=' + NEWS_API
requests_cache.install_cache('myStockNews1', expire_after=86400)
response_1 = requests.get(URL_1)
requests_cache.install_cache('myStockNews2', expire_after=86400)
response_2 = requests.get(URL_2)
requests_cache.install_cache('myStockNews3', expire_after=86400)
response_3 = requests.get(URL_3)
response_1 = response_1.json()
response_2 = response_2.json()
response_3 = response_3.json()
return {StockSymbol_1: response_1, StockSymbol_2: response_2, StockSymbol_3: response_3}
def searchStock(symbol):
URL = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=' + symbol + '&apikey=' + ALPHA_API_KEY
cacheName = "{}StockDataFromSearch".format(symbol)
requests_cache.install_cache(cacheName, expire_after=86400)
response = requests.get(URL)
response = response.json()
return response
def fetchNews(symbol):
URL = 'https://newsapi.org/v2/everything?q=' + symbol + '&apiKey=' + NEWS_API
cacheName = "{}NewsData".format(symbol)
requests_cache.install_cache(cacheName, expire_after=86400)
r = requests.get(URL)
r = r.json()
return r
# this is to use in order to send emails notification
def send_email_SSL():
print("Send_Email_SSL")
port = 465 # this is SSL
smtp_server = SMTP_SERVER # smtp server address
sender_email = "caballoscuba@gmail.com" # Enter your email
receiver_email = "osky.op@gmail.com"
password = EMAIL_PASSWORD
message = """\
Subject: Stocker Eyes
New Notification. A user just logged in"""
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
print("Send_Email_SSL2")
def send_email_starttls(email, textEmail, html1):
print("Send_Email_starttls")
port = 587 # this is SSL
smtp_server = SMTP_SERVER # smtp server address
sender_email = "caballoscuba@gmail.com" # Enter your email
password = EMAIL_PASSWORD
receiver_email = email
#message = """\
#Subject: Stocker Eyes
# New Notification. A user just logged in"""
message = MIMEMultipart("alternative")
message["Subject"] = "Stocker-Eyes Notification"
message["From"] = sender_email
message["To"] = receiver_email
message["Cc"] = "oo89@njit.edu"
# Create the plain-text and HTML version of your message
text = textEmail
html = html1
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
# Create a secure SSL context
context = ssl.create_default_context()
# Try to log in to server and send email
try:
print("Send_Email_starttls2")
server = smtplib.SMTP(smtp_server,port)
server.ehlo() # Can be omitted
server.starttls(context=context) # Secure the connection
server.ehlo() # Can be omitted
server.login(sender_email, password)
#Send email here
server.sendmail(sender_email, receiver_email, message.as_string())
except Exception as e:
print("ERROR1")
print(e)
finally:
server.quit()
| 5,146 | 1,799 |
import warnings
from collections import Sequence
import rply
from rply import ParserGenerator
from ice import __version__
from . import lexer
name_seq = 0
def get_temp_name():
global name_seq
name_seq += 1
name_symbol = Symbol('_gs%s' % name_seq)
return name_symbol
class ParsingError(Exception):
def __init__(self, file_path, lineno=1, colno=1):
self.file_path = file_path
self.lineno = lineno
self.colno = colno
def __str__(self):
return 'ParsingError: file=' \
+ self.file_path\
+ ' lineno='\
+ str(self.lineno)\
+ ' colno='\
+ str(self.colno)
def parse(lexer, filename="<string>"):
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return pg.build().parse(lexer)
except rply.errors.ParsingError as e:
source_pos = e.getsourcepos()
if source_pos is None:
raise ParsingError(filename)
else:
raise ParsingError(filename,
source_pos.lineno,
source_pos.colno)
class Symbol(object):
def __init__(self, name, lineno=0, col_offset=0):
self.name = name
self.outer_name = name
self.lineno = lineno
self.col_offset = col_offset
def eval(self, env):
pass
def __repr__(self):
return self.outer_name
def __str__(self):
return self.outer_name
def __eq__(self, other):
if type(other) is not Symbol:
return False
if self.name == other.name:
return True
else:
return False
def __hash__(self):
return (self.name.__hash__() << 16) + self.outer_name.__hash__()
class Keyword(object):
def __init__(self, name, lineno=0, col_offset=0):
self.name = name
self.lineno = lineno
self.col_offset = col_offset
self.repr = ':' + self.name
def __repr__(self):
return self.repr
def __str__(self):
return self.name
def __call__(self, table):
return table[self.name]
def __eq__(self, other):
if type(other) is not Keyword:
return False
if self.name == other.name:
return True
else:
return False
def __hash__(self):
return self.name.__hash__()
pg = ParserGenerator(['NUMBER', 'OPPLUS', 'OPMINUS', 'OPTIMES', 'OPDIV', 'OPLEQ', 'OPGEQ', 'OPEQ', 'OPNEQ',
'OPLT', 'OPGT', 'OPBITOR', 'OPPOW', 'MACRO_NAME', 'LET', # 'UNION', 'PREDICATE',
'INFIX_MACRO_NAME', 'INFIX_1_MACRO_NAME', 'INFIX_2_MACRO_NAME', 'INFIX', 'INFIX_1', 'INFIX_2',
'OPRSHIFT', 'OPLSHIFT', 'OPFLOORDIV', 'OPBITAND', 'OPBITXOR', 'USER_DEFINED_KEYWORD',
'OPAND', 'OPOR', 'OPIS', 'NOT', 'PERCENT', 'EXPORT', 'ASSERT',
'LPAREN', 'RPAREN', 'TRUE', 'FALSE', 'TQUOTE_STR', 'DQUOTE_STR', 'SQUOTE_STR', 'NAME_LPAREN',
'AT', 'DOT_NAME', 'DOT_NAME_LPAREN', 'TQUOTE_RAW_STR', 'DQUOTE_RAW_STR', 'SQUOTE_RAW_STR',
'NAME', 'EQUALS', 'IF', 'ELSEIF', 'ELSE', 'COLON', 'SEMI', 'DATA', 'IMPORT', 'INCLUDE',
'LBRACK', 'RBRACK', 'COMMA', 'FUNC', 'DOC', 'CALET', 'PIPELINE', 'PIPELINE_BIND',
'PIPELINE_FIRST', 'PIPELINE_FIRST_BIND', 'RETURN', 'CALL', 'DO',
'LBRACE', 'RBRACE', 'MATCH', 'CASE', 'DEFM', 'RECORD', 'AMP', 'FATARROW', 'THINARROW',
'YIELD', 'FROM', 'USE', 'FOR', 'IN', 'TRY', 'FINALLY', 'EXCEPT',
'AS', 'RAISE', 'WITH', 'MACRO', 'QUOTE', 'QUASI_QUOTE', 'UNQUOTE', 'UNQUOTE_SPLICING',
'QUOTE_LPAREN', 'QUASI_QUOTE_LPAREN', 'UNQUOTE_LPAREN', 'UNQUOTE_SPLICING_LPAREN'],
precedence=[('left', ['EQUALS']),
('left', ['NOT']),
('left', ['OPIS']),
('left', ['IN']),
('left', ['AS', 'OPEQ', 'OPLEQ', 'OPGEQ', 'OPNEQ', 'OPLT', 'OPGT', 'OPAND', 'OPOR',
'PIPELINE', 'PIPELINE_BIND', 'PIPELINE_FIRST', 'PIPELINE_FIRST_BIND',
'INFIX_MACRO_NAME']),
('left', ['OPPLUS', 'OPMINUS', 'INFIX_1_MACRO_NAME']),
('left', ['LBRACK', 'RBRACK']),
('left', ['OPTIMES', 'OPDIV', 'OPFLOORDIV', 'PERCENT', 'OPBITAND', 'OPBITOR',
'OPBITXOR', 'OPPOW', 'OPRSHIFT', 'OPLSHIFT', 'INFIX_2_MACRO_NAME']),
('left', ['IF'])],
cache_id='ice_' + __version__)
@pg.production('program : block')
def program(p):
return p[0]
@pg.production('block : stmts')
def block(p):
return p[0]
@pg.production('stmts : stmts stmt')
def stmts_b(p):
if p[1] is None:
return p[0]
else:
return p[0] + [p[1]]
@pg.production('stmts : stmt')
def stmts_stmt(p):
if p[0] is None:
return []
else:
return [p[0]]
@pg.production('stmt : SEMI')
def stmt_semi(p):
pass
@pg.production('stmt : binop_expr')
@pg.production('stmt : let_expr')
@pg.production('stmt : as_expr')
@pg.production('stmt : deco_expr')
@pg.production('stmt : func_expr')
@pg.production('stmt : funcm_expr')
# @pg.production('stmt : record_expr')
@pg.production('stmt : data_expr')
@pg.production('stmt : import_expr')
@pg.production('stmt : from_expr')
@pg.production('stmt : macros_stmt')
@pg.production('stmt : try_stmt')
@pg.production('stmt : with_stmt')
@pg.production('stmt : raise_stmt')
@pg.production('stmt : return_stmt')
@pg.production('stmt : macro_stmt')
@pg.production('stmt : infix_macro_stmt')
@pg.production('stmt : q_stmt')
@pg.production('stmt : qq_stmt')
@pg.production('stmt : assert_stmt')
def stmt(p):
return p[0]
# # TODO fukkatu?
# @pg.production('call_macro_stmt : id_expr COLON do_suite')
# def call_macro_stmt(p):
# head = []
# body = p[2]
# rest = []
# return process_calling_macro(p[0], head, body, rest)
# @pg.production('call_macro_stmt : id_expr COLON do_suite rest')
# def call_macro_expr(p):
# head = []
# body = p[2]
# rest = p[3]
# return process_calling_macro(p[0], head, body, rest)
@pg.production('import_expr : IMPORT names_list')
def import_expr(p):
return [Symbol('import')] + p[1]
@pg.production('names_list : names_list COMMA names')
def names(p):
return p[0] + [p[2]]
@pg.production('names_list : names')
def names_single(p):
return [p[0]]
@pg.production('names : _names')
def names(p):
return Symbol('.'.join(p[0]))
@pg.production('_names : NAME')
def _names_one(p):
return [p[0].getstr()]
@pg.production('_names : _names DOT_NAME')
def _names(p):
return p[0] + [p[1].getstr()[1:]]
@pg.production('names_lparen : _names_lparen')
def names(p):
return Symbol('.'.join(p[0]))
@pg.production('_names_lparen : NAME_LPAREN')
def _names_one(p):
return [p[0].getstr()[:-1]]
@pg.production('_names_lparen : _names DOT_NAME_LPAREN')
def _names(p):
return p[0] + [p[1].getstr()[1:-1]]
# @pg.production('include_expr : INCLUDE string')
# def include_expr(p):
# return [Symbol('require'), p[1]]
@pg.production('lbrace : LBRACE')
def lbrace(p):
pass
@pg.production('rbrace : RBRACE')
def rbrace(p):
pass
@pg.production('namelist : namelist COMMA name')
def names(p):
return p[0] + [p[2]]
@pg.production('namelist : name')
def names_single(p):
return [p[0]]
@pg.production('name : NAME')
def name(p):
return token_to_symbol(p[0])
@pg.production('tuple_elt : binop_expr COMMA')
def tuple_elt(p):
return p[0]
@pg.production('from_expr : FROM names IMPORT namelist')
def from_expr(p):
return [Symbol('from_import'), p[1], p[3]]
@pg.production('macros_stmt : USE lbrace use_expr_list rbrace')
def macros_stmt(p):
return [Symbol('do')] + p[2]
@pg.production('use_expr_list : use_expr')
def macros_stmt(p):
return [p[0]]
@pg.production('use_expr_list : use_expr_list use_expr')
def macros_stmt(p):
return p[0] + p[1]
@pg.production('use_expr : namelist FROM names')
def use_expr(p):
filename = p[1].filename
module_name = p[2].name
import importlib
mod = importlib.import_module(module_name)
if hasattr(mod, 'get_keywords'):
keywords = mod.get_keywords()
else:
keywords = {}
for macro_name in p[0]:
macro_name_str = str(macro_name)
lexer.add_macro_name((filename, macro_name_str))
if macro_name_str in keywords:
for user_defined_keyword in keywords[macro_name_str]:
print(user_defined_keyword)
lexer.add_user_defined_keyword((filename, user_defined_keyword))
# else:
# raise SyntaxError(macro_name_str)
return [Symbol('use'), p[0], p[2]]
#@pg.production('use_infix_expr : USE INFIX namelist FROM names')
#def use_expr(p):
# for macro_name in p[1]:
# lexer.add_infix_macro_name(str(macro_name))
# return [Symbol('use'), p[1], p[3]]
@pg.production('suite : binop_expr')
def suite_expr(p):
return p[0]
@pg.production('suite : lbrace stmts rbrace')
def suite_stmts(p):
return [Symbol('do')] + p[1]
# @pg.production('suite : NEWLINE INDENT stmts DEDENT END')
# def suite_stmts(p):
# return [Symbol('do')] + p[2]
@pg.production('suite2 : lbrace stmts rbrace')
def suite2_stmts(p):
return p[1]
# @pg.production('suite2 : NEWLINE INDENT stmts DEDENT END')
# def suite2_stmts(p):
# return p[2]
@pg.production('try_stmt : TRY suite2 finally_cls')
def try_finally_stmt(p):
return [Symbol('try')] + p[1] + [p[2]]
@pg.production('try_stmt : TRY suite2 except_cls_list')
def try_except_stmt(p):
return [Symbol('try')] + p[1] + p[2]
@pg.production('try_stmt : TRY suite2 except_cls_list finally_cls')
def try_excepts_finally_stmt(p):
return [Symbol('try')] + p[1] + p[2] + [p[3]]
@pg.production('except_cls_list : except_cls_list except_cls')
def except_cls_list(p):
return p[0] + [p[1]]
@pg.production('except_cls_list : except_cls')
def except_cls_list(p):
return [p[0]]
@pg.production('except_cls : EXCEPT binop_expr AS NAME suite2')
def except_cls(p):
return [Symbol('except'), p[1], token_to_symbol(p[3])] + p[4]
@pg.production('finally_cls : FINALLY suite2')
def finally_cls(p):
return [Symbol('finally')] + p[1]
@pg.production('raise_stmt : RAISE binop_expr')
def raise_stmt(p):
return [Symbol('raise'), p[1]]
#@pg.production('do_suite : binop_expr')
#def suite_expr(p):
# return [Symbol('do'), p[0]]
@pg.production('do_suite : lbrace stmts rbrace')
def suite_stmts(p):
return [Symbol('do')] + p[1]
# @pg.production('do_suite : NEWLINE INDENT stmts DEDENT END')
# def suite_stmts(p):
# return [Symbol('do')] + p[2]
#@pg.production('labeled_expr : binop_expr COLON binop_expr')
#def labeled_expr(p):
# # return [Symbol('label'), p[0], p[2]]
# return [Symbol('call_macro'), p[0], [], [Symbol('do'), p[2]], []]
class Label:
def __init__(self, label):
self.label = label
# @pg.production('labeled_expr : binop_expr do_suite')
# def labeled_expr(p):
# name = Label(p[0])
# head = []
# body = p[1]
# rest = None
# return [Symbol('call_macro'), name, [head, body, rest]]
#@pg.production('cont_labeled_expr : id_expr COLON do_suite')
#def labeled_expr(p):
# name = p[0]
# head = []
# body = p[2]
# rest = None
# return [Symbol('call_macro'), name, [head, body, rest]]
#@pg.production('cont_labeled_expr : id_expr COLON do_suite cont_labeled_expr')
#def labeled_expr(p):
# name = p[0]
# head = []
# body = p[2]
# rest = p[3][1:]
# return [Symbol('call_macro'), name, [head, body, rest]]
#@pg.production('labeled_expr : binop_expr COLON do_suite cont_labeled_expr')
#def labeled_expr(p):
# name = p[0]
# head = []
# body = p[2]
# rest = p[3][1:]
# return [Symbol('call_macro'), name, [head, body, rest]]
# @pg.production('labeled_expr : binop_expr do_suite labeled_expr')
# def labeled_expr(p):
# name = Label(p[0])
# head = []
# body = p[1]
# rest = p[2][1:]
# return [Symbol('call_macro'), name, [head, body, rest]]
# @pg.production('labeled_expr : binop_expr do_suite call_macro_expr')
# def labeled_expr(p):
# name = Label(p[0])
# head = []
# body = p[1]
# rest = p[2][1:]
# return [Symbol('call_macro'), name, [head, body, rest]]
# @pg.production('user_defined_stmt : NAME NAME COLON trailing_dict')
# def user_defined_stmt(p):
# return [Symbol('val'),
# token_to_symbol(p[1]),
# [Symbol(p[0].getstr() + '.define'), p[1].getstr(), p[3]]]
# @pg.production('user_defined_stmt : NAME id_expr app_args COLON trailing_dict')
# def user_defined_stmt(p):
# return [Symbol(p[0].getstr() + '.define'), p[1]] + p[2] + [p[4]]
# @pg.production('user_defined_stmt : NAME NAME DOT_NAME app_args COLON trailing_dict')
# def user_defined_stmt(p):
# return [Symbol(p[0].getstr() + '.define'), Symbol(p[1].getstr() + '.' + p[2].getstr()[1:])] + p[3] + [p[5]]
@pg.production('macro_stmt : MACRO fun_header suite2')
def macro_stmt(p):
fun_name, fun_args = p[1]
lexer.add_macro_name((p[0].filename, str(fun_name)))
return [Symbol('mac'), fun_name, fun_args] + p[2]
@pg.production('infix_macro_stmt : INFIX MACRO fun_header suite2')
def macro_stmt(p):
fun_name, fun_args = p[2]
lexer.add_infix_macro_name((p[0].filename, str(fun_name)))
return [Symbol('mac'), fun_name, fun_args] + p[3]
@pg.production('infix_macro_stmt : INFIX_1 MACRO fun_header suite2')
def macro_stmt(p):
fun_name, fun_args = p[2]
lexer.add_infix_1_macro_name((p[0].filename, str(fun_name)))
return [Symbol('mac'), fun_name, fun_args] + p[3]
@pg.production('infix_macro_stmt : INFIX_2 MACRO fun_header suite2')
def macro_stmt(p):
fun_name, fun_args = p[2]
lexer.add_infix_2_macro_name((p[0].filename, str(fun_name)))
return [Symbol('mac'), fun_name, fun_args] + p[3]
@pg.production('q_stmt : QUOTE suite')
def q_stmt(p):
return [Symbol('quote'), p[1]]
#@pg.production('quote_expr : QUOTE_LPAREN binop_expr RPAREN')
#@pg.production('quote_expr : QUOTE binop_expr')
@pg.production('quote_expr : QUOTE stmt')
def quote_expr(p):
return [Symbol('quote'), p[1]]
@pg.production('qq_stmt : QUASI_QUOTE suite')
def qq_stmt(p):
return [Symbol('quasiquote'), p[1]]
#@pg.production('quasi_quote_expr : QUASI_QUOTE_LPAREN binop_expr RPAREN')
#@pg.production('quasi_quote_expr : QUASI_QUOTE binop_expr')
@pg.production('quasi_quote_expr : QUASI_QUOTE stmt')
def quasi_quote_expr(p):
return [Symbol('quasiquote'), p[1]]
#@pg.production('uq_expr : UNQUOTE_LPAREN binop_expr RPAREN')
@pg.production('uq_expr : UNQUOTE binop_expr')
def uq_expr(p):
return [Symbol('unquote'), p[1]]
#@pg.production('uqs_expr : UNQUOTE_SPLICING_LPAREN binop_expr RPAREN')
@pg.production('uqs_expr : UNQUOTE_SPLICING binop_expr')
def uqs_expr(p):
return [Symbol('unquote_splicing'), p[1]]
@pg.production('assert_stmt : ASSERT binop_expr')
def assert_stmt(p):
return [Symbol('assert'), p[1]]
@pg.production('with_stmt : WITH with_contexts suite2')
def with_stmt(p):
return [Symbol('with'), p[1]] + p[2]
@pg.production('with_contexts : with_contexts COMMA with_context')
def with_contexts(p):
return p[0] + [p[2]]
@pg.production('with_contexts : with_context')
def with_contexts_one(p):
return [p[0]]
@pg.production('with_context : binop_expr AS NAME')
def with_context(p):
return [p[0], token_to_symbol(p[2])]
@pg.production('return_stmt : RETURN binop_expr')
def raise_stmt(p):
return [Symbol('return'), p[1]]
def token_to_symbol(token):
return Symbol(token.getstr(), token.getsourcepos().lineno, token.getsourcepos().colno)
def token_to_keyword(token):
return Keyword(token.getstr(), token.getsourcepos().lineno, token.getsourcepos().colno)
@pg.production('let_expr : LET pattern EQUALS binop_expr')
def let_expr(p):
return [Symbol('match'), p[3], p[1], Symbol('True')]
@pg.production('binding : NAME')
def binding(p):
return token_to_symbol(p[0])
@pg.production('as_expr : binop_expr AS id_expr')
def let_expr(p):
return [Symbol('val', 0, 0), p[2], p[0]]
@pg.production('expr : record_expr')
# @pg.production('expr : func_expr')
# @pg.production('expr : union_expr')
# @pg.production('expr : predicate_expr')
@pg.production('expr : fn_expr')
@pg.production('expr : paren_expr')
@pg.production('expr : if_expr')
@pg.production('expr : prim_expr')
@pg.production('expr : uq_expr')
@pg.production('expr : uqs_expr')
@pg.production('expr : app_expr')
@pg.production('expr : left_app_expr')
@pg.production('expr : dict_expr')
@pg.production('expr : tuple_expr')
@pg.production('expr : match_expr')
@pg.production('expr : yield_expr')
@pg.production('expr : yield_from_expr')
@pg.production('expr : for_expr')
@pg.production('expr : block_expr')
@pg.production('expr : dot_expr')
@pg.production('expr : get_expr')
@pg.production('expr : quote_expr')
@pg.production('expr : quasi_quote_expr')
@pg.production('expr : id_expr')
@pg.production('expr : call_macro_expr')
@pg.production('expr : call_func_expr')
@pg.production('expr : call_method_expr')
def expr(p):
return p[0]
@pg.production('paren_expr : LPAREN binop_expr RPAREN')
def paren_expr(p):
return p[1]
@pg.production('prim_expr : NUMBER')
def expr_num(p):
num_repr = p[0].getstr()
try:
return int(num_repr)
except ValueError as _:
return float(num_repr)
@pg.production('prim_expr : string')
def expr_string(p):
return p[0]
@pg.production('string : DQUOTE_STR')
@pg.production('string : SQUOTE_STR')
def expr_quote_str(p):
return quote_str(p[0].getstr()[1:-1])
@pg.production('string : TQUOTE_STR')
def expr_triple_quote_str(p):
return quote_str(p[0].getstr()[3:-3])
def quote_str(string):
new_string = ''
string_enumerator = enumerate(string)
for index, char in string_enumerator:
if char == '\\':
index, char = next(string_enumerator)
if char == 'n':
char = '\n'
elif char == 't':
char = '\t'
elif char == 'r':
char = '\r'
elif char in {'\\', "'", '"'}:
pass
else:
char = '\\' + char
new_string = new_string + char
return new_string
@pg.production('string : DQUOTE_RAW_STR')
@pg.production('string : SQUOTE_RAW_STR')
def expr_quote_raw_str(p):
return p[0].getstr()[2:-1]
@pg.production('string : TQUOTE_RAW_STR')
def expr_triple_quote_raw_str(p):
return p[0].getstr()[4:-3]
@pg.production('prim_expr : bool_expr')
def expr_false(p):
return p[0]
@pg.production('bool_expr : TRUE')
def expr_true(p):
return Symbol('True')
@pg.production('bool_expr : FALSE')
def expr_false(p):
return Symbol('False')
@pg.production('id_expr : NAME')
def id_expr(p):
return token_to_symbol(p[0])
@pg.production('id_expr : AMP')
def id_expr(p):
return Symbol('&')
@pg.production('if_expr : IF binop_expr suite elseif_exprs ELSE suite')
def if_else_expr(p):
return [Symbol('if'), p[1], p[2]] + p[3] + [p[5]]
@pg.production('if_expr : IF binop_expr suite ELSE suite')
def if_else_expr(p):
return [Symbol('if'), p[1], p[2], p[4]]
@pg.production('elseif_exprs : elseif_exprs elseif_expr')
def elseif_exprs(p):
return p[0] + p[1]
@pg.production('elseif_exprs : elseif_expr')
def elseif_exprs_expr(p):
return p[0]
@pg.production('elseif_expr : ELSEIF binop_expr suite')
def elseif_expr(p):
return [p[1], p[2]]
# @pg.production('elseif_expr :')
# def elseif_expr_empty(p):
# return None
#@pg.production('trailing_if_expr : binop_expr IF binop_expr ELSE binop_expr')
def trailing_if_expr(p):
return [Symbol('if'), p[2], p[0], p[4]]
@pg.production('yield_expr : YIELD binop_expr')
def yield_expr(p):
return [Symbol('yield'), p[1]]
@pg.production('yield_from_expr : YIELD FROM binop_expr')
def yield_from_expr(p):
return [Symbol('yield_from'), p[1]]
def issequence(obj):
return isinstance(obj, Sequence)
def issequence_except_str(obj):
if isinstance(obj, str):
return False
return isinstance(obj, Sequence)
def _compute_underscore_max_num(exps):
max_num = 0
if not issequence_except_str(exps):
exps = (exps,)
for exp in exps:
if isinstance(exp, Symbol) and exp.name.startswith('$'):
try:
n = int(exp.name[1:])
except:
n = 1
elif issequence_except_str(exp):
n = _compute_underscore_max_num(exp)
else:
n = 0
if n > max_num:
max_num = n
return max_num
@pg.production('dot_expr : expr DOT_NAME')
def dot_expr(p):
return [Symbol('getattr'), p[0], p[1].getstr()[1:]]
@pg.production('get_expr : binop_expr LBRACK binop_expr RBRACK')
def get_expr(p):
return [Symbol('get'), p[0], p[2]]
@pg.production('get_expr : binop_expr LBRACK binop_expr COMMA binop_expr RBRACK')
def get_expr(p):
return [Symbol('get'), p[0], [Symbol('v'), p[2], p[4]]]
@pg.production('get_expr : binop_expr LBRACK binop_expr COMMA binop_expr COMMA binop_expr RBRACK')
def get_expr(p):
return [Symbol('get'), p[0], [Symbol('v'), p[2], p[4], p[6]]]
@pg.production('get_expr : binop_expr LBRACK range_start COLON range_end RBRACK')
def get_slice_expr(p):
return [Symbol('get'), p[0], p[2], p[4]]
@pg.production('get_expr : binop_expr LBRACK range_start COLON range_end COLON range_interval RBRACK')
def get_slice_expr(p):
return [Symbol('get'), p[0], p[2], p[4], p[6]]
@pg.production('range_start : ')
@pg.production('range_end : ')
@pg.production('range_interval : ')
def range_start_none(p):
return Symbol('None')
@pg.production('range_start : binop_expr')
@pg.production('range_end : binop_expr')
@pg.production('range_interval : binop_expr')
def range_start_none(p):
return p[0]
@pg.production('for_expr : LBRACK binop_expr FOR pattern IN binop_expr RBRACK')
def for_expr(p):
pattern = p[3]
items = p[5]
body = p[1]
return [Symbol('tuple_of'), body, [pattern, items]]
@pg.production('for_expr : LBRACK binop_expr FOR pattern IN binop_expr IF binop_expr RBRACK')
def for_expr_if(p):
pattern = p[3]
items = p[5]
body = p[1]
when = p[7]
return [Symbol('tuple_of'), body, [pattern, items, Keyword('when'), when]]
@pg.production('tuple_expr : LBRACK tuple_elts binop_expr RBRACK')
def tuple_expr(p):
return [Symbol('make_tuple')] + p[1] + [p[2]]
@pg.production('tuple_expr : LBRACK binop_expr RBRACK')
def tuple_expr_one(p):
return [Symbol('make_tuple'), p[1]]
@pg.production('tuple_expr : LBRACK tuple_elts binop_expr RBRACK')
def tuple_expr(p):
return [Symbol('make_tuple')] + p[1] + [p[2]]
@pg.production('tuple_expr : LBRACK binop_expr RBRACK')
def tuple_expr_one(p):
return [Symbol('make_tuple'), p[1]]
@pg.production('tuple_expr : LBRACK RBRACK')
def tuple_expr_empty(p):
return [Symbol('make_tuple')]
@pg.production('tuple_elts : tuple_elts tuple_elt')
def tuple_elts(p):
return p[0] + [p[1]]
@pg.production('tuple_elts : tuple_elt')
def tuple_elts_elt(p):
return [p[0]]
@pg.production('tuple_elt : binop_expr COMMA')
def tuple_elt(p):
return p[0]
#@pg.production('deco_expr : decorators binop_expr')
@pg.production('deco_expr : decorators func_expr')
def deco_expr(p):
# return p[1][:2] + p[0] + p[1][2:]
return [Symbol('with_decorator')] + p[0] + [p[1]]
@pg.production('decorators : decorators decorator')
def decorators(p):
return p[0] + [p[1]]
@pg.production('decorators : decorator')
def decorators_single(p):
return [p[0]]
@pg.production('decorator : AT binop_expr')
def decorator(p):
return p[1]
@pg.production('func_expr : FUNC fun_header doc_string suite')
def fun_expr(p):
fun_name, fun_args = p[1]
return [Symbol('def'), fun_name, fun_args, p[3]]
@pg.production('funcm_expr : FUNC NAME doc_string lbrace defm_case_branches rbrace')
def fun_expr(p):
return [Symbol('defm'), token_to_symbol(p[1])] + p[4]
@pg.production('defm_case_branches : defm_case_branches defm_case_branch')
def case_branches(p):
return p[0] + p[1]
@pg.production('defm_case_branches : defm_case_branch')
def case_branches_branch(p):
return p[0]
@pg.production('defm_case_branch : CASE defm_pattern THINARROW lbrace stmts rbrace')
def case_branch(p):
return [p[1], [Symbol('do')] + p[4]]
@pg.production('defm_case_branch : CASE defm_pattern THINARROW binop_expr')
def case_branch(p):
return [p[1], p[3]]
# @pg.production('defm_case_branch : CASE defm_pattern COLON binop_expr NEWLINE')
# def case_branch(p):
# return [p[1], p[3]]
# @pg.production('defm_case_branch : CASE defm_pattern COLON binop_expr SEMI')
# def case_branch(p):
# return [p[1], p[3]]
@pg.production('defm_pattern : app_nc_args')
def pattern(p):
return p[0]
@pg.production('defm_pattern : pattern')
def app_args(p):
return [p[0]]
@pg.production('defm_pattern : pattern COMMA defm_pattern')
def app_args(p):
return [p[0]] + p[2]
@pg.production('fun_header : NAME_LPAREN list_arg_elts id_expr RPAREN')
def fun_header(p):
return [namelparen_to_symbol(p[0]), p[1] + [p[2]]]
@pg.production('fun_header : NAME_LPAREN id_expr RPAREN')
def fun_header(p):
return [namelparen_to_symbol(p[0]), [p[1]]]
@pg.production('fun_header : NAME_LPAREN RPAREN')
def fun_header(p):
return [namelparen_to_symbol(p[0]), []]
@pg.production('fn_expr : id_expr FATARROW suite')
def fun_expr(p):
return [Symbol('fn'), [p[0]], p[2]]
@pg.production('fn_expr : args FATARROW suite')
def fun_expr(p):
return [Symbol('fn'), p[0], p[2]]
@pg.production('args : LPAREN list_arg_elts id_expr RPAREN')
def args(p):
return p[1] + [p[2]]
@pg.production('args : LPAREN id_expr RPAREN')
def args_one(p):
return [p[1]]
@pg.production('args : LPAREN RPAREN')
def args_empty(p):
return []
@pg.production('nc_args : list_arg_elts id_expr')
def args(p):
return p[0] + [p[1]]
@pg.production('nc_args : id_expr')
def args_one(p):
return [p[0]]
@pg.production('list_arg_elts : list_arg_elts list_arg_elt')
def list_arg_elts(p):
return p[0] + [p[1]]
@pg.production('list_arg_elts : list_arg_elt')
def list_arg_elts_elt(p):
return [p[0]]
@pg.production('list_arg_elt : id_expr COMMA')
def list_arg_elt(p):
return p[0]
def _create_underscore_args(exps):
max_num = _compute_underscore_max_num(exps)
if max_num == 1:
return [Symbol('$1')]
else:
return [Symbol('$' + str(n)) for n in range(1, max_num + 1)]
@pg.production('block_expr : FATARROW suite')
def block_expr(p):
block = p[1]
return [Symbol('fn'), _create_underscore_args(block), block]
@pg.production('doc_string : DOC string')
@pg.production('doc_string : ')
def doc_string(p):
pass
from collections import Iterable
def flatten_list(lis):
i = 0
while i < len(lis):
while isinstance(lis[i], Iterable):
if not lis[i]:
lis.pop(i)
i -= 1
break
else:
lis[i:i + 1] = lis[i]
i += 1
return lis
@pg.production('call_macro_expr : MACRO_NAME head')
def call_macro_expr(p):
head = p[1]
body = None
rest = []
return process_calling_macro(token_to_symbol(p[0]), head, body, rest)
@pg.production('call_macro_expr : MACRO_NAME head rest')
def call_macro_expr(p):
head = p[1]
body = None
rest = p[2]
return process_calling_macro(token_to_symbol(p[0]), head, body, rest)
@pg.production('call_macro_expr : MACRO_NAME do_suite')
def call_macro_expr(p):
head = []
body = p[1]
rest = []
return process_calling_macro(token_to_symbol(p[0]), head, body, rest)
@pg.production('call_macro_expr : MACRO_NAME head do_suite')
def call_macro_expr(p):
head = p[1]
body = p[2]
rest = []
return process_calling_macro(token_to_symbol(p[0]), head, body, rest)
@pg.production('call_macro_expr : MACRO_NAME head do_suite rest')
def call_macro_expr(p):
head = p[1]
body = p[2]
rest = p[3]
return process_calling_macro(token_to_symbol(p[0]), head, body, rest)
def process_calling_macro(name, head, body, rest):
# macro_name = name.name
# if macro_name == 'macro':
# call_func, *rest = head
# _, fun_name, *fun_args = call_func
# return [Symbol('mac'), fun_name, fun_args, body]
# elif macro_name == 'def':
# call_func, *rest = head
# _, fun_name, *fun_args = call_func
# return [Symbol('def'), fun_name, fun_args, body]
# elif macro_name == 'if':
# clauses = [head[0], body]
# for rest_clause in rest:
# label, head, body = rest_clause
# if label == Symbol('elif'):
# clauses.append(head[0])
# clauses.append(body)
# elif label == Symbol('else'):
# clauses.append(body)
# return [Symbol('if'), *clauses]
# else:
# error = SyntaxError(label)
# error.filename = '<string>'
# error.lineno = name.lineno
# error.offset = name.col_offset
# raise error
# return [Symbol('if'), *clauses]
# elif macro_name == 'return':
# return [Symbol('return'), head[0]]
# elif macro_name == 'raise':
# return [Symbol('raise'), head[0]]
# else:
# if rest is None or len(rest) == 0:
# return [Symbol('call_macro'), name, head, body]
# else:
return [Symbol('call_macro'), name, head, body, rest]
@pg.production('rest : ')
def rest(p):
return []
@pg.production('rest : rest_item')
def rest(p):
return [p[0]]
@pg.production('rest : rest rest_item')
def rest(p):
return p[0] + [p[1]]
@pg.production('rest_item : sub_keyword head do_suite')
def rest_item(p):
head = p[1]
body = p[2]
return [p[0], head, body]
@pg.production('rest_item : sub_keyword do_suite')
def rest_item(p):
head = []
body = p[1]
return [p[0], head, body]
@pg.production('sub_keyword : ELSE')
@pg.production('sub_keyword : ELSEIF')
@pg.production('sub_keyword : EXCEPT')
@pg.production('sub_keyword : USER_DEFINED_KEYWORD')
def sub_keyword(p):
return token_to_symbol(p[0])
@pg.production('head : app_nc_args')
def head(p):
return p[0]
@pg.production('if_expr : IF binop_expr suite')
def if_expr(p):
return [Symbol('if'), p[1], p[2]]
@pg.production('if_expr : IF binop_expr suite elseif_exprs')
def if_expr(p):
return [Symbol('if'), p[1], p[2]] + p[3]
def namelparen_to_symbol(token):
return Symbol(token.getstr()[:-1],
token.getsourcepos().lineno,
token.getsourcepos().colno)
@pg.production('call_func_expr : NAME_LPAREN RPAREN')
def call_func_expr(p):
return [Symbol('call_func'),
namelparen_to_symbol(p[0])]
# @pg.production('call_func_expr : NAME_LPAREN RPAREN fn_expr')
# @pg.production('call_func_expr : NAME_LPAREN RPAREN block_expr')
def call_func_expr(p):
return [Symbol('call_func'),
namelparen_to_symbol(p[0]), p[2]]
@pg.production('call_func_expr : NAME_LPAREN app_args_elts RPAREN')
def call_func_expr(p):
return [Symbol('call_func'),
namelparen_to_symbol(p[0])] + p[1]
@pg.production('app_expr : binop_expr app_args')
def call_func_expr(p):
return [p[0]] + p[1]
#@pg.production('call_func_expr : NAME_LPAREN app_args_elts RPAREN fn_expr')
#@pg.production('call_func_expr : NAME_LPAREN app_args_elts RPAREN block_expr')
#def call_func_expr(p):
# return [Symbol('call_func'),
# namelparen_to_symbol(p[0])] + [p[3]] + p[1]
@pg.production('call_func_expr : paren_expr LPAREN RPAREN')
def call_func_expr(p):
return [Symbol('call_func'), p[0]]
@pg.production('call_func_expr : paren_expr LPAREN app_args_elts RPAREN')
def call_func_expr(p):
return [Symbol('call_func'), p[0]] + p[2]
#@pg.production('call_func_expr : call_func_expr LPAREN RPAREN')
#def call_func_expr(p):
# return [Symbol('call_func'), p[0]]
#@pg.production('call_func_expr : call_func_expr LPAREN app_args_elts RPAREN')
#def call_func_expr(p):
# return [Symbol('call_func'), p[0]] + p[2]
@pg.production('call_method_expr : expr DOT_NAME_LPAREN RPAREN')
def call_method_expr(p):
return [Symbol('call_func'), [Symbol('getattr'), p[0], p[1].getstr()[1:-1]]]
@pg.production('call_method_expr : expr DOT_NAME_LPAREN app_args_elts RPAREN')
def call_method_expr(p):
return [Symbol('call_func'), [Symbol('getattr'), p[0], p[1].getstr()[1:-1]]] + p[2]
@pg.production('app_args : LPAREN app_args_elts RPAREN')
def app_args(p):
return p[1]
@pg.production('app_args : LPAREN RPAREN')
def app_args(p):
return []
@pg.production('app_args_elts : app_args_elts COMMA app_args_elt')
def app_args_elts(p):
return p[0] + p[2]
@pg.production('app_args_elts : app_args_elt')
def app_args_elts(p):
return p[0]
@pg.production('app_args_elt : NAME EQUALS binop_expr')
def app_args_elt(p):
return [token_to_keyword(p[0]), p[2]]
@pg.production('app_args_elt : EQUALS NAME')
def app_args_elt_short(p):
return [token_to_keyword(p[1]), token_to_symbol(p[1])]
@pg.production('app_args_elt : binop_expr')
def app_args_elt(p):
return [p[0]]
# TODO
#@pg.production('app_expr : expr app_args app_args')
#@pg.production('app_expr : expr app_args app_args')
def trailing_closure_expr(p):
return [[p[0]] + p[1]] + p[2]
#@pg.production('app_expr : expr app_args AT fn_expr')
#@pg.production('app_expr : expr app_args AT block_expr')
#def trailing_closure_expr(p):
# return [p[0]] + p[1] + [p[3]]
@pg.production('app_nc_expr : expr app_nc_args')
def app_expr(p):
return [p[0]] + p[1]
@pg.production('app_nc_args : app_nc_arg')
def app_nc_args(p):
return [p[0]]
@pg.production('app_nc_args : app_nc_arg COMMA app_nc_args')
def app_nc_args(p):
return [p[0]] + p[2]
# @pg.production('app_nc_args : app_nc_arg app_nc_args')
# def app_nc_args(p):
# return [p[0]] + p[1]
# @pg.production('app_nc_args : app_nc_arg labeled_blocks')
# def app_nc_args(p):
# return [p[0]] + p[1]
#
#
# @pg.production('labeled_blocks : labeled_block labeled_blocks')
# def labeled_blocks(p):
# return [p[0]] + p[1]
#
#
# @pg.production('labeled_blocks : labeled_block')
# def labeled_blocks(p):
# return [p[0]]
@pg.production('app_nc_arg : binop_expr')
def app_nc_arg(p):
return p[0]
@pg.production('left_app_expr : expr CALET left_app_fun_expr app_args')
def left_app_expr(p):
expr, _, left_app_fun_expr, app_args = p
return [left_app_fun_expr, expr] + app_args
@pg.production('left_app_fun_expr : id_expr')
def left_app_fun_expr(p):
return p[0]
@pg.production('dict_expr : lbrace rbrace')
def dict_expr_empty(p):
return [Symbol('table')]
@pg.production('dict_expr : lbrace fields rbrace')
def dict_expr(p):
return [Symbol('table')] + p[1]
@pg.production('fields : field')
def fields_one(p):
return p[0]
@pg.production('fields : list_fields field')
def fields(p):
return p[0] + p[1]
@pg.production('list_fields : list_field')
def list_fields_one(p):
return p[0]
@pg.production('list_fields : list_fields list_field')
def list_fields(p):
return p[0] + p[1]
@pg.production('list_field : field COMMA')
def list_field(p):
return p[0]
@pg.production('field : key COLON binop_expr')
def field(p):
return [p[0], p[2]]
@pg.production('field : EQUALS NAME')
def field(p):
s = token_to_symbol(p[1])
return [s.name, s]
@pg.production('key : prim_expr')
@pg.production('key : id_expr')
@pg.production('key : call_func_expr')
def key(p):
return p[0]
@pg.production('match_expr : MATCH binop_expr lbrace case_branches rbrace')
def case(p):
return [Symbol('match'), p[1]] + p[3]
@pg.production('case_branches : case_branches case_branch')
def case_branches(p):
return p[0] + p[1]
@pg.production('case_branches : case_branch')
def case_branches_branch(p):
return p[0]
@pg.production('case_branch : CASE pattern THINARROW lbrace stmts rbrace')
def case_branch(p):
return [p[1], [Symbol('do')] + p[4]]
@pg.production('case_branch : CASE pattern THINARROW binop_expr')
def case_branch(p):
return [p[1], p[3]]
# @pg.production('case_branch : CASE pattern COLON binop_expr SEMI')
# def case_branch(p):
# return [p[1], p[3]]
# @pg.production('pattern : fn_expr')
@pg.production('pattern : prim_pattern')
@pg.production('pattern : dict_pattern')
@pg.production('pattern : sequence_pattern')
@pg.production('pattern : sequence_type_pattern')
@pg.production('pattern : type_pattern')
@pg.production('pattern : id_pattern')
@pg.production('pattern : ref_pattern')
# @pg.production('pattern : and_pattern')
# @pg.production('pattern : or_pattern')
@pg.production('pattern : quote_pattern')
# TODO @pg.production('defm_pattern : app_nc_args')
def pattern(p):
return p[0]
@pg.production('prim_pattern : NUMBER')
def pattern_num(p):
num_repr = p[0].getstr()
try:
return int(num_repr)
except ValueError as _:
return float(num_repr)
@pg.production('prim_pattern : string')
def pattern_string(p):
return p[0]
@pg.production('prim_pattern : bool_expr')
def pattern_bool(p):
return p[0]
@pg.production('dict_pattern : lbrace rbrace')
def dict_pattern_empty(p):
return [Symbol('table')]
@pg.production('dict_pattern : lbrace dict_pattern_fields rbrace')
def dict_pattern(p):
return [Symbol('table')] + p[1]
@pg.production('dict_pattern_fields : dict_pattern_field')
def fields_one(p):
return p[0]
@pg.production('dict_pattern_fields : dict_pattern_list_fields dict_pattern_field')
def fields(p):
return p[0] + p[1]
@pg.production('dict_pattern_list_fields : dict_pattern_list_field')
def list_fields_one(p):
return p[0]
@pg.production('dict_pattern_list_fields : dict_pattern_list_fields dict_pattern_list_field')
def list_fields(p):
return p[0] + p[1]
@pg.production('dict_pattern_list_field : dict_pattern_field COMMA')
def list_field(p):
return p[0]
@pg.production('dict_pattern_field : dict_pattern_key COLON pattern')
def field(p):
return [p[0], p[2]]
@pg.production('dict_pattern_field : EQUALS NAME')
def field(p):
s = token_to_symbol(p[1])
return [s.name, s]
@pg.production('dict_pattern_key : binop_expr')
def key(p):
return p[0]
@pg.production('id_pattern : NAME')
def id_pattern(p):
return token_to_symbol(p[0])
@pg.production('id_pattern : AMP')
def id_pattern(p):
return Symbol('&')
@pg.production('sequence_pattern : LBRACK sequence_pattern_elts pattern RBRACK')
def sequence_pattern(p):
return [Symbol('make_tuple')] + p[1] + [p[2]]
@pg.production('sequence_pattern : LBRACK pattern RBRACK')
def sequence_pattern_one(p):
return [Symbol('make_tuple'), p[1]]
@pg.production('sequence_pattern : LBRACK RBRACK')
def sequence_pattern_empty(p):
return [Symbol('make_tuple')]
@pg.production('sequence_pattern_elts : sequence_pattern_elts sequence_pattern_elt')
def sequence_pattern_elts(p):
return p[0] + [p[1]]
@pg.production('sequence_pattern_elts : sequence_pattern_elt')
def sequence_pattern_elts_elt(p):
return [p[0]]
@pg.production('sequence_pattern_elt : pattern COMMA')
def sequence_pattern_elt(p):
return p[0]
@pg.production('sequence_pattern_named_elts : sequence_pattern_named_elts sequence_pattern_named_elt')
def sequence_pattern_named_elts(p):
return p[0] + p[1]
@pg.production('sequence_pattern_named_elts : sequence_pattern_named_elt')
def sequence_pattern_named_elts_elt(p):
return p[0]
@pg.production('sequence_pattern_named_elt : named_pattern COMMA')
def sequence_pattern_named_elt(p):
return p[0]
@pg.production('named_pattern : NAME EQUALS pattern')
def sequence_pattern_named_pattern(p):
s = token_to_symbol(p[0])
return [s.name, p[2]]
@pg.production('sequence_type_pattern : names_lparen sequence_pattern_elts pattern RPAREN')
def sequence_type_pattern(p):
return [Symbol('sequence_type'), p[0]] + p[1] + [p[2]]
@pg.production('sequence_type_pattern : names_lparen sequence_pattern_named_elts named_pattern RPAREN')
def sequence_type_pattern(p):
return [Symbol('sequence_type_with_named_member'), p[0]] + p[1] + p[2]
@pg.production('sequence_type_pattern : names_lparen pattern RPAREN')
def sequence_type_pattern_one(p):
return [Symbol('sequence_type'), p[0], p[1]]
@pg.production('sequence_type_pattern : names_lparen named_pattern RPAREN')
def sequence_type_pattern_one(p):
return [Symbol('sequence_type_with_named_member'), p[0], p[1]]
@pg.production('and_pattern : pattern OPAND pattern')
def and_pattern(p):
return [token_to_symbol(p[1]), p[0], p[2]]
@pg.production('or_pattern : pattern OPOR pattern')
def or_pattern(p):
return [token_to_symbol(p[1]), p[0], p[2]]
#@pg.production('type_pattern : pattern COLON NAME')
#def type_pattern(p):
# return [Symbol('type'), token_to_symbol(p[2]), p[0]]
@pg.production('type_pattern : pattern COLON binop_expr')
def type_pattern(p):
return [Symbol('type'), p[2], p[0]]
@pg.production('ref_pattern : CALET NAME')
def ref_pattern(p):
return [Symbol('ref'), token_to_symbol(p[1])]
# @pg.production('quote_pattern : QUOTE LPAREN pattern RPAREN')
@pg.production('quote_pattern : QUOTE pattern')
def quote_pattern(p):
return [Symbol('quote'), p[1]]
@pg.production('record_expr : RECORD NAME')
def record_expr(p):
return [Symbol('record'), token_to_symbol(p[1]), []]
# @pg.production('record_expr : RECORD NAME OPLT NAME')
# def record_expr(p):
# return [Symbol('record'), token_to_symbol(p[1]), token_to_symbol(p[3]), []]
@pg.production('record_expr : RECORD NAME_LPAREN record_fields RPAREN')
def record_expr(p):
return [Symbol('record'), namelparen_to_symbol(p[1]), p[2]]
@pg.production('record_expr : RECORD NAME_LPAREN record_fields RPAREN OPLT NAME')
def record_expr(p):
return [Symbol('record'), namelparen_to_symbol(p[1]), token_to_symbol(p[5]), p[2]]
@pg.production('record_expr : RECORD NAME lbrace record_body rbrace')
def record_expr(p):
return [Symbol('record'), token_to_symbol(p[1]), []] + p[3]
@pg.production('record_expr : RECORD NAME OPLT NAME lbrace record_body rbrace')
def record_expr(p):
return [Symbol('record'), token_to_symbol(p[1]), token_to_symbol(p[3]), []] + p[5]
@pg.production('record_expr : RECORD NAME_LPAREN record_fields RPAREN lbrace record_body rbrace')
def record_expr(p):
return [Symbol('record'), namelparen_to_symbol(p[1]), p[2]] + p[5]
@pg.production('record_expr : RECORD NAME_LPAREN record_fields RPAREN OPLT NAME lbrace record_body rbrace')
def record_expr(p):
return [Symbol('record'), namelparen_to_symbol(p[1]), token_to_symbol(p[5]), p[2]] + p[7]
# @pg.production('union_expr : UNION suite2')
# def union_expr(p):
# return [Symbol('union')] + p[1]
# @pg.production('predicate_expr : PREDICATE binop_expr')
# def union_expr(p):
# return [Symbol('predicate'), p[1]]
@pg.production('record_body : func_expr')
def record_body(p):
return [p[0]]
@pg.production('record_body : record_body func_expr')
def record_body(p):
return p[0] + [p[1]]
@pg.production('record_fields : record_field')
def record_expr(p):
return [p[0]]
@pg.production('record_fields : record_field COMMA record_fields')
def record_expr(p):
return [p[0]] + p[2]
@pg.production('record_field : id_expr')
def record_expr(p):
return p[0]
@pg.production('record_field : id_expr COLON binop_expr')
def record_expr(p):
return [p[0], p[2]]
@pg.production('data_expr : DATA NAME lbrace data_record_expr_list rbrace')
def data_expr(p):
return [Symbol('data'), token_to_symbol(p[1])] + p[3]
@pg.production('data_record_expr_list : data_record_expr')
def record_expr(p):
return [p[0]]
@pg.production('data_record_expr_list : data_record_expr data_record_expr_list')
def record_expr(p):
return [p[0]] + p[1]
@pg.production('data_record_expr : NAME_LPAREN record_fields RPAREN')
def record_expr(p):
return [namelparen_to_symbol(p[0])] + p[1]
@pg.production('binop_expr : NOT binop_expr')
def binop_expr(p):
return [token_to_symbol(p[0]), p[1]]
@pg.production('binop_expr : binop_expr OPPLUS binop_expr')
@pg.production('binop_expr : binop_expr OPMINUS binop_expr')
@pg.production('binop_expr : binop_expr OPTIMES binop_expr')
@pg.production('binop_expr : binop_expr PERCENT binop_expr')
@pg.production('binop_expr : binop_expr OPDIV binop_expr')
@pg.production('binop_expr : binop_expr OPLEQ binop_expr')
@pg.production('binop_expr : binop_expr OPGEQ binop_expr')
@pg.production('binop_expr : binop_expr OPEQ binop_expr')
@pg.production('binop_expr : binop_expr OPNEQ binop_expr')
@pg.production('binop_expr : binop_expr OPLT binop_expr')
@pg.production('binop_expr : binop_expr OPGT binop_expr')
@pg.production('binop_expr : binop_expr OPBITOR binop_expr')
@pg.production('binop_expr : binop_expr OPBITXOR binop_expr')
@pg.production('binop_expr : binop_expr OPBITAND binop_expr')
@pg.production('binop_expr : binop_expr OPFLOORDIV binop_expr')
@pg.production('binop_expr : binop_expr OPPOW binop_expr')
@pg.production('binop_expr : binop_expr OPRSHIFT binop_expr')
@pg.production('binop_expr : binop_expr OPLSHIFT binop_expr')
@pg.production('binop_expr : binop_expr OPAND binop_expr')
@pg.production('binop_expr : binop_expr OPOR binop_expr')
@pg.production('binop_expr : binop_expr OPIS binop_expr')
@pg.production('binop_expr : binop_expr IN binop_expr')
@pg.production('binop_expr : binop_expr AS id_expr')
def binop_expr(p):
return [token_to_symbol(p[1]), p[0], p[2]]
@pg.production('binop_expr : binop_expr INFIX_MACRO_NAME binop_expr')
def binop_expr(p):
return [Symbol('call_macro'), token_to_symbol(p[1]), p[0], p[2]]
@pg.production('binop_expr : binop_expr INFIX_1_MACRO_NAME binop_expr')
def binop_expr(p):
return [Symbol('call_macro'), token_to_symbol(p[1]), p[0], p[2]]
@pg.production('binop_expr : binop_expr INFIX_2_MACRO_NAME binop_expr')
def binop_expr(p):
return [Symbol('call_macro'), token_to_symbol(p[1]), p[0], p[2]]
@pg.production('binop_expr : binop_expr NOT IN binop_expr')
def binop_expr(p):
return [Symbol('not_in'), p[0], p[3]]
@pg.production('binop_expr : binop_expr PIPELINE binop_expr')
def binop_expr(p):
return [Symbol('|>'), p[0], p[2]]
@pg.production('binop_expr : binop_expr PIPELINE_BIND binop_expr')
def binop_expr(p):
left, _, right = p
input_sym = get_temp_name()
return [Symbol('|>'), p[0], [Symbol('bind'),
[Symbol('fn'), [input_sym], p[2] + [input_sym]]]]
@pg.production('binop_expr : binop_expr PIPELINE_FIRST binop_expr')
def binop_expr(p):
return [Symbol('|>1'), p[0], p[2]]
@pg.production('binop_expr : binop_expr PIPELINE_FIRST_BIND binop_expr')
def binop_expr(p):
left, _, right = p
input_sym = get_temp_name()
return [Symbol('|>'), p[0], [Symbol('bind'),
[Symbol('fn'), [input_sym],
[p[2][0], input_sym] + p[2][(1 if len(p[2]) > 1 else len(p[2])):]]]]
@pg.production('binop_expr : expr')
def binop_expr(p):
return p[0] | 47,762 | 18,280 |
import scrapy
import re
class StrofaSpider(scrapy.Spider):
name = 'poems_strofa'
start_urls = ['http://strofa.su/vse-poety/']
custom_settings = {}
def parse(self, response):
for href in response.css('.poemlinks a::attr(href)'):
poet_url = response.urljoin(href.extract())
yield scrapy.Request(poet_url, callback=self.parse_poet)
def parse_poet(self, response):
for href in response.css('.poemlinks a::attr(href)'):
poem_url = response.urljoin(href.extract())
yield scrapy.Request(poem_url, callback=self.parse_poem)
def parse_poem(self, response):
name = response.css('.poem h1::text').extract_first()
text = "\n".join(response.css('.poem .related::text').extract())
meta = response.css('.poem .related p::text').extract_first().split(',')
author = meta[0]
dates = re.findall(r"1[0-9]{3}", meta[1]) if len(meta) >= 2 else []
result = {
'author': author.strip(),
'text': text
}
if " ".join(name.strip().split()) != "* * *":
result['name'] = name.strip()
if len(dates) != 0:
result['date_from'] = dates[0]
result['date_to'] = dates[-1]
yield result
| 1,282 | 414 |
from django.contrib import admin
from .models import Category, Product,Project, Vendor,Zone,Localite,TypeProject,Service,TypeService,Commodite
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Category, CategoryAdmin)
class CommoditeAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Commodite, CommoditeAdmin)
class ProductAdmin(admin.ModelAdmin):
list_display = ['name', 'localite', 'category', 'price','available', 'created']
list_editable = ['price', 'available']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Product, ProductAdmin)
class VendorAdmin(admin.ModelAdmin):
fieldsets = [
('Personal', {'fields': ['name','email','phone1','phone2','phone3']}),
('Physical', {'fields': ['address','ville','localization']}),
]
admin.site.register(Vendor, VendorAdmin)
admin.site.register(Service)
admin.site.register(TypeService)
admin.site.register(TypeProject)
admin.site.register(Zone)
admin.site.register(Localite)
admin.site.register(Project)
| 1,167 | 353 |
from app import app, db
from app.models import Poem
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Poem': Poem}
| 145 | 50 |
# HEAD
# Classes - Magic Methods - Unary Numeric Magic Methods
# DESCRIPTION
# Describes the magic methods of classes
# pos, neg, abs, invert
# round, floor, ceil, trunc
# RESOURCES
#
# https://rszalski.github.io/magicmethods/
# Now would be a good time to note that you don't have to define
# every comparison magic method to get rich comparisons.
# The standard library has kindly provided us with a class
# decorator in the module functools that will define all
# rich comparison methods if you only define __eq__
# and one other (e.g. __gt__, __lt__, etc.)
# This feature is only available in Python 2.7, but when
# you get a chance it saves a great deal of time and effort.
# You can use it by placing @total_ordering above your class definition.
# NUMERIC MAGIC METHODS
# Just like you can create ways for instances of your class to be compared with comparison operators, you can define behavior for numeric operators. Buckle your seat belts, folks...there's a lot of these. For organization's sake, I've split the numeric magic methods into 5 categories: unary operators, normal arithmetic operators, reflected arithmetic operators (more on this later), augmented assignment, and type conversions.
# Unary operators and functions
# UNARY OPERATORS and functions only have one operand, e.g. negation, absolute value, etc.
# __pos__(self)
# Implements behavior for unary positive (e.g. +some_object)
# __neg__(self)
# Implements behavior for negation (e.g. -some_object)
# __abs__(self)
# Implements behavior for the built in abs() function.
# __invert__(self)
# Implements behavior for inversion using the ~ operator. For an explanation on what this does, see the Wikipedia article on bitwise operations.
# __round__(self, n)
# Implements behavior for the built in round() function. n is the number of decimal places to round to.
# __floor__(self)
# Implements behavior for math.floor(), i.e., rounding down to the nearest integer.
# __ceil__(self)
# Implements behavior for math.ceil(), i.e., rounding up to the nearest integer.
# __trunc__(self)
# Implements behavior for math.trunc(), i.e., truncating to an integral.
class Unary(str):
def __pos__(self):
# Implements behavior for unary positive (e.g. +some_object)
def __neg__(self):
# Implements behavior for negation (e.g. -some_object)
def __abs__(self):
# Implements behavior for the built in abs() function.
def __invert__(self):
# Implements behavior for inversion using the ~ operator. For an explanation on what this does, see the Wikipedia article on bitwise operations.
def __round__(self, n):
# Implements behavior for the built in round() function. n is the number of decimal places to round to.
def __floor__(self):
# Implements behavior for math.floor(), i.e., rounding down to the nearest integer.
def __ceil__(self):
# Implements behavior for math.ceil(), i.e., rounding up to the nearest integer.
def __trunc__(self):
# Implements behavior for math.trunc(), i.e., truncating to an integral.
u = Unary(" Tes ")
| 3,194 | 882 |
# -*- coding: utf-8 -*-
"""
This script was borrowed from the RISJbot repository (https://github.com/pmyteh/RISJbot)
All credit goes to original author
"""
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
# from scrapy_deltafetch.middleware import DeltaFetch
import logging
import re
from scrapy.exceptions import IgnoreRequest, NotConfigured
logger = logging.getLogger(__name__)
class Fake404Error(IgnoreRequest):
"""A fake 404 page response was found and filtered"""
def __init__(self, response, *args, **kwargs):
self.response = response
super(Fake404Error, self).__init__(*args, **kwargs)
class Fake404(object):
"""Spider middleware to drop pages iff they are that annoyance on the web:
the 404 'not found' response returned as a branded page with HTTP code
200 (which should indicate success).
This should not be necessary, both because such behaviour is improper
on behalf of webservers, and because we are literally crawling the
sites' OWN LIST OF VALID PAGES. Nevertheless, foxnews.com does it and
others might.
"""
def __init__(self, settings):
if not settings.getbool('FAKE404_ENABLED'):
raise NotConfigured
# List of ( url re object, matching xpath ) tuples
detsigs = settings.get('FAKE404_DETECTIONSIGS')
self.detectionsigs = [(re.compile(x), y) for x, y in detsigs]
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_spider_input(self, response, spider):
for regex, xp in self.detectionsigs:
if regex.match(response.url):
if response.xpath(xp):
raise Fake404Error(response,
'Ignoring "not found" response '
'with success HTTP code')
return None # Success
def process_spider_exception(self, response, exception, spider):
if isinstance(exception, Fake404Error):
spider.crawler.stats.inc_value('fake404/response_ignored_count')
logger.info(
'Ignoring response from %(response)r: Ignoring "not found" '
'response with success HTTP code',
{'response': response}, extra={'spider': spider},
)
return []
| 2,446 | 716 |
import os
from uuid import uuid4
from dvc.main import main
from tests.basic_env import TestDvc
class TestCmdImport(TestDvc):
def test(self):
ret = main(['import', self.FOO, 'import'])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists('import.dvc'))
ret = main(['import', 'non-existing-file', 'import'])
self.assertNotEqual(ret, 0)
def test_unsupported(self):
ret = main(['import', 'unsupported://path', 'import_unsupported'])
self.assertNotEqual(ret, 0)
class TestDefaultOutput(TestDvc):
def test(self):
tmpdir = self.mkdtemp()
filename = str(uuid4())
tmpfile = os.path.join(tmpdir, filename)
with open(tmpfile, 'w') as fd:
fd.write('content')
ret = main(['import', tmpfile])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(filename))
self.assertEqual(open(filename).read(), 'content')
| 954 | 310 |
import factory
from wapps.models import IdentitySettings
from .image import ImageFactory, SvgFileField
from .site import SiteFactory
from .tag import TagFactory
class IdentityFactory(factory.DjangoModelFactory):
site = factory.SubFactory(SiteFactory)
name = factory.Faker('word')
description = factory.Faker('paragraph')
@factory.post_generation
def tags(self, create, extracted, **kwargs):
if not create: # pragma: nocover
# Simple build, do nothing.
return
if extracted:
# A list of tags were passed in, use them.
if isinstance(extracted, int):
tags = TagFactory.create_batch(extracted)
else:
tags = extracted
for tag in tags:
self.tags.add(tag)
class Meta:
model = IdentitySettings
django_get_or_create = ['site']
class FullIdentityFactory(IdentityFactory):
tags = 3
logo = factory.SubFactory(ImageFactory)
svg_logo = SvgFileField()
favicon = factory.SubFactory(ImageFactory)
amp_logo = factory.SubFactory(ImageFactory)
email = factory.Faker('email')
telephone = factory.Faker('phone_number')
address_1 = factory.Faker('street_address')
post_code = factory.Faker('postalcode')
city = factory.Faker('city')
country = factory.Faker('country')
facebook = factory.Faker('user_name')
twitter = factory.Faker('user_name')
linkedin = factory.Faker('uri')
instagram = factory.Faker('user_name')
pinterest = factory.Faker('user_name')
youtube = factory.Faker('user_name')
| 1,620 | 461 |
from flask import Blueprint, redirect, url_for
from flask import request
from flask import render_template
from flask import session
from sqlalchemy import and_
from .. import db
from app.models import Actors, Users, Likes
from ..recommend import Recommend
recommend_blue = Blueprint('recommend', __name__)
@recommend_blue.route('/', methods=['GET', 'POST'])
def start():
if request.method == "GET":
likes = db.session.query(Likes).all()
user_item = Recommend(sim_algorithm=0, top_k_user=3, top_k_actor=3, user_id=session['userid'],
users_like=likes).run_collaborative_filtering()
# 猜你喜欢
guess_actor = []
for key, value in user_item.items():
actor = db.session.query(Actors).filter(Actors.actor_id == int(key)).all()[0]
guess_actor.append(actor)
db.session.close()
for guess in guess_actor:
guess.actor_c_name = guess.actor_c_name.split(' ')[0]
guess.actor_img = guess.actor_img.split('/')[-1]
return render_template("recommend.html", Guess=guess_actor)
@recommend_blue.route('/recommendActor', methods=['GET', 'POST'])
def recommend_actor():
if request.method == "GET":
# 存储用户输入的数据
feature_dict = {}
# 电影类型
film_type_select = request.args.get('filmTypeSelect')
feature_dict[film_type_select] = 1
# 演员性别
actor_gender = request.args.get('genderSelect')
if actor_gender != "":
feature_dict['actor_gender'] = actor_gender
# 演员年龄段
actor_age_group = request.args.get('ageSelect')
feature_dict['actor_age_group'] = actor_age_group
# 演员所属地域
actor_birthplace_faction = request.args.get('areaSelect')
if actor_birthplace_faction != "":
feature_dict['actor_birthplace_faction'] = actor_birthplace_faction
# 是否国际化
actor_international = request.args.get('internationalSelect')
if actor_international != "":
feature_dict['actor_international'] = actor_international
# 是否多职业
actor_multi_career = request.args.get('multiCareerSelect')
if actor_multi_career != "":
feature_dict['actor_multi_career'] = actor_multi_career
# 演员星座
actor_horoscope_code = request.args.get('horoscopeSelect')
if actor_horoscope_code != "":
feature_dict['actor_horoscope_code'] = actor_horoscope_code
# 关心星率
star_rate_select = request.args.get('starRateSelect')
if star_rate_select != "":
feature_dict[star_rate_select] = 1
# 平均评分
actor_avg_films_score = request.args.get('range_avg_score')
feature_dict['actor_avg_films_score'] = actor_avg_films_score
# 电影总数
actor_film_sum = request.args.get('range_total_films')
feature_dict['actor_film_sum'] = actor_film_sum
# 获奖总数
actor_award_sum = request.args.get('range_total_awards')
feature_dict['actor_award_sum'] = actor_award_sum
# 平均评论数
actor_avg_comments_sum = request.args.get('range_avg_comments')
feature_dict['actor_avg_comments_sum'] = actor_avg_comments_sum
result = Recommend(r'E:\PythonCode\FARSystem\static\data\actor_similarity_data.csv', current_actor=1314124,
like_actors=[1314124], input_dict=feature_dict).run()
# 猜你喜欢
guess_actor = []
for key, value in result.items():
actor = db.session.query(Actors).filter(Actors.actor_id == int(key)).all()[0]
guess_actor.append(actor)
db.session.close()
result_list = {}
i = 1
for guess in guess_actor:
result_dict = {}
result_dict[str(guess.actor_id)] = {}
result_dict[str(guess.actor_id)]['img'] = guess.actor_img.split('/')[-1]
result_dict[str(guess.actor_id)]['name'] = guess.actor_c_name.split(' ')[0]
result_list[str(i)] = result_dict
print(result_dict)
i += 1
print(result_list)
return result_list
@recommend_blue.route('/a', methods=['GET', 'POST'])
def a():
likes = db.session.query(Likes).all()
db.session.close()
user_item = Recommend(sim_algorithm=0, top_k_user=3, top_k_actor=3, user_id=session['userid'],
users_like=likes).run()
return user_item
| 4,405 | 1,505 |
# Copyright 2015, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ovsdbapp.backend.ovs_idl import connection
from ovsdbapp.backend.ovs_idl import idlutils
from neutron.agent.ovsdb.native import connection as native_conn
from neutron.agent.ovsdb.native import helpers
from neutron.tests import base
class TestOVSNativeConnection(base.BaseTestCase):
@mock.patch.object(connection, 'threading')
@mock.patch.object(idlutils, 'wait_for_change')
@mock.patch.object(native_conn, 'idl')
@mock.patch.object(helpers, 'enable_connection_uri')
@mock.patch.object(idlutils, 'get_schema_helper')
def test_do_get_schema_helper_retry(self, mock_get_schema_helper,
mock_enable_conn,
mock_idl,
mock_wait_for_change,
mock_threading):
mock_helper = mock.Mock()
# raise until 3rd retry attempt
mock_get_schema_helper.side_effect = [Exception(), Exception(),
mock_helper]
conn = connection.Connection(idl_factory=native_conn.idl_factory,
timeout=mock.Mock())
conn.start()
self.assertEqual(3, len(mock_get_schema_helper.mock_calls))
mock_helper.register_all.assert_called_once_with()
| 1,946 | 561 |
from .agent_action_pb2 import *
from .agent_info_pb2 import *
from .brain_parameters_pb2 import *
from .command_pb2 import *
from .compressed_observation_pb2 import *
from .custom_action_pb2 import *
from .custom_observation_pb2 import *
from .custom_reset_parameters_pb2 import *
from .demonstration_meta_pb2 import *
from .engine_configuration_pb2 import *
from .environment_parameters_pb2 import *
from .header_pb2 import *
from .space_type_pb2 import *
from .unity_input_pb2 import *
from .unity_rl_initialization_input_pb2 import *
from .unity_rl_initialization_output_pb2 import *
from .unity_rl_input_pb2 import *
from .__init__ import *
| 663 | 231 |
#!/usr/bin/env python
import sys
import codecs
import os.path
import re
from setuptools import setup, find_packages
extra = {}
def read_requirements_file(file):
fname = os.path.join(os.path.abspath(os.path.dirname(__file__)), file)
with open(fname, 'r') as r:
return r.readlines()
install_requires = read_requirements_file('requirements-base.txt')
# Since futures 3.2 [1], the package enforces to be installed only in Python 2
# environments because it's basically a backport of Python 3's built-in
# package. So in order to support both Python 2 and Python 3 environments, we
# have to skip installation of futures package in case of Python 3.
#
# It might look natural to use environment markers [2] to achieve this goal but
# they are new and were introduced in setuptools in mid of 2017. FWIW,
# setuptools on both Ubuntu Trusty and Ubuntu Xenial do not support them and
# batch scoring script may be installed in pretty outdated envs. So let's do it
# old-fashioned way by adding condition here.
#
# The above is implemented by splitting dependencies into 2 files:
# `requirements-base.txt` - common deps for Py3 and Py2
# `requirements-py27.txt` - for Python 2 only
#
# [1] https://github.com/agronholm/pythonfutures/commit/d0393ad626d25622927bb0ed47d35ddb2f6cd321 # noqa: E501
# [2] https://www.python.org/dev/peps/pep-0508/#environment-markers
if sys.version_info[0] < 3:
install_requires.extend(
read_requirements_file('requirements-py27.txt')
)
extra['entry_points'] = {
'console_scripts': [
'batch_scoring = datarobot_batch_scoring.main:main',
'batch_scoring_sse = datarobot_batch_scoring.main:main_standalone',
'batch_scoring_deployment_aware = datarobot_batch_scoring.main:main_deployment_aware'
]}
extra['install_requires'] = install_requires
this_directory = os.path.abspath(os.path.dirname(__file__))
init_fname = os.path.join(this_directory, 'datarobot_batch_scoring', '__init__.py')
with codecs.open(init_fname, 'r', 'latin1') as fp:
try:
version = re.findall(r"^__version__ = '([^']+)'\r?$",
fp.read(), re.M)[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
readme_fname = os.path.join(this_directory, 'README.rst')
with codecs.open(readme_fname, 'r', 'utf-8') as f:
long_description = f.read()
setup(
name='datarobot_batch_scoring',
version=version,
description=("A script to score CSV files via DataRobot's prediction API"),
long_description=long_description,
author='DataRobot',
author_email='support@datarobot.com',
maintainer='DataRobot',
maintainer_email='support@datarobot.com',
license='BSD',
url='http://www.datarobot.com/',
packages=find_packages(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
**extra
)
| 3,031 | 1,036 |
from django.urls import path
from evap.results import views
app_name = "results"
urlpatterns = [
path("", views.index, name="index"),
path("semester/<int:semester_id>/evaluation/<int:evaluation_id>", views.evaluation_detail, name="evaluation_detail"),
path("evaluation/<int:evaluation_id>/text_answers_export", views.evaluation_text_answers_export, name="evaluation_text_answers_export"),
]
| 407 | 135 |
import os
import smtplib, ssl
from email.message import EmailMessage
from typing import Tuple
from flask import Blueprint, jsonify, request
from flask_jwt_extended import jwt_required
from utils import dynamo
site_blueprint = Blueprint('site', __name__,)
@site_blueprint.route('/profile', methods=['GET'])
def profile():
user = __getProfile()
if not user:
return jsonify({'error': 'User does not exist'}), 404
return jsonify(user)
@site_blueprint.route('/contact', methods=['POST'])
def contact():
req_data = request.get_json()
if not req_data:
return jsonify({'error': 'Invalid form data'}), 400
isValid, err = __validateMessage(req_data)
if not isValid:
return jsonify({'error': err}), 400
try:
emailServer = os.getenv('EMAIL_SERVER')
emailPort = os.getenv('EMAIL_PORT')
emailSender = os.getenv('EMAIL_SENDER')
emailSenderPassword = os.getenv('EMAIL_SENDER_PASSWORD')
emailReceiver = os.getenv('EMAIL_RECEIVER')
msg = EmailMessage()
msg.set_content(__createMessage(req_data))
msg['Subject'] = "Contact From Personal Website"
msg['From'] = emailSender
msg['To'] = emailReceiver
context = ssl.create_default_context()
with smtplib.SMTP_SSL(emailServer, int(emailPort), timeout=10, context=context) as server:
server.login(emailSender, emailSenderPassword)
server.send_message(msg)
server.close()
return jsonify(success=True)
except Exception as e:
print(e)
return jsonify({'error': 'Unable to send the email'}), 500
def __getProfile() -> dict:
table = dynamo.getTable(os.getenv('PROFILE_TABLE'))
result = table.scan(Limit=1)
item = None
if 'Items' in result.keys() and len(result['Items']) > 0:
item = result['Items'][0]
return item
def __validateMessage(emailData: dict) -> Tuple[bool, str]:
if 'name' not in emailData.keys():
return False, 'Name is required'
if 'email' not in emailData.keys():
return False, 'Email is required'
if 'subject' not in emailData.keys():
return False, 'Subject is required'
if 'message' not in emailData.keys():
return False, 'Message is required'
return True, ''
def __createMessage(emailData: dict) -> str:
return 'Name: {}\nEmail: {} \nSubject: {} \nMessage: {}'.format(emailData['name'], emailData['email'], emailData['subject'], emailData['message']) | 2,513 | 777 |
###***********************************###
'''
Grade Notifier
File: helper.py
Author: Ehud Adler
Core Maintainers: Ehud Adler, Akiva Sherman,
Yehuda Moskovits
Copyright: Copyright 2019, Ehud Adler
License: MIT
'''
###***********************************###
def print_to_screen(text):
print("RENDER::" + text)
| 312 | 111 |
# %%
# nmrezman
from .general import train_findings_model
# Misc
import argparse
# %%
desc_str = "Train Phase 01 Findings vs No Findings Model"
def get_args_parser():
parser = argparse.ArgumentParser(description=desc_str, add_help=False)
# Paths
parser.add_argument(
"--data_path",
type=str,
required=True,
help="Path to dataframe file (e.g., \"/path/to/data/reports_df.gz\")."
)
parser.add_argument(
"--glove_embedding_path",
type=str,
required=True,
help="Path to GloVe word vector glove.6B.300d file (e.g., \"/path/to/data/glove.6B.300d.txt\")."
)
# Output file names
parser.add_argument(
"--model_checkpoint_name",
type=str,
required=True,
help="Path / filename to save model checkpoints (e.g., \"/path/to/results/phase01/findings/findings_best_model.h5\")."
)
parser.add_argument(
"--result_fname",
type=str,
required=True,
help="Path / filename to save model evaluation metrics (e.g., \"/path/to/results/phase01/findings/findings_best_result.log\")."
)
parser.add_argument(
"--tokenizer_fname",
type=str,
required=True,
help="Path / filename to save tokenizer (e.g., \"/path/to/results/phase01/findings/tokenizer.gz\")."
)
return parser
if __name__ == "__main__":
# Parse the arguments
parser = argparse.ArgumentParser(desc_str, parents=[get_args_parser()])
args = parser.parse_args()
# Train the findings vs no findings model
train_findings_model(
data_path=args.data_path,
glove_embedding_path=args.glove_embedding_path,
model_checkpoint_name=args.model_checkpoint_name,
result_fname=args.result_fname,
tokenizer_fname=args.tokenizer_fname,
)
| 1,845 | 639 |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='remotefreebox',
version='0.3.1',
description='A Python module to control a Freebox v6 remotely',
long_description=long_description,
# The project's main homepage.
url='https://github.com/MaximeCheramy/remotefreebox',
# Author details
author='Maxime Chéramy and Francois Guibert',
author_email='maxime.cheramy@gmail.com',
# Choose your license
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='freebox remote control rudp hid',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
install_requires=['zeroconf>=0.17']
)
| 1,474 | 456 |
import matplotlib.pyplot as plt
import numpy as np
from darkflow.net.build import TFNet
import cv2
import pprint as pp
import os
# options = {"model": "cfg/custum_yolo.cfg",
# "batch": 8,
# "load": "bin/yolo.weights",
# "epoch": 3,
# "trainer":"adam",
# "gpu": 1.0,
# "train": True,
# "annotation": "train/train_anno/",
# "dataset": "train/train_img/"}
# tfnet = TFNet(options)
#tfnet.load_from_ckpt()
options = {"model": "cfg/custum_yolo.cfg",
"load": -1,
"batch": 16,
"epoch": 4,
"gpu": 1.0,
"train": True,
"annotation": "train/train_anno/",
"dataset": "train/train_img/"}
tfnet = TFNet(options)
# #tfnet.load_from_ckpt()
tfnet.train()
tfnet.savepb()
#prediction
img_names = os.listdir('test/test_img/')
cnt_valid = 0
for names in img_names:
if names[-1] =='g':
original_img = cv2.imread("test/test_img/"+names)
original_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)
results = tfnet.return_predict(original_img)
print(results)
if results !=[]:
cnt_valid+=1
print(cnt_valid)
# original_img = cv2.imread("test/test_img/00029843_001.png")
# original_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)
# results = tfnet.return_predict(original_img)
def boxing(original_img , predictions):
newImage = np.copy(original_img)
for result in predictions:
print(result)
top_x = result['topleft']['x']
top_y = result['topleft']['y']
print(top_x,top_y)
btm_x = result['bottomright']['x']
btm_y = result['bottomright']['y']
print(btm_x,btm_y)
confidence = result['confidence']
print(confidence)
label = result['label'] + " " + str(round(confidence, 3))
if confidence > 0.1:
newImage = cv2.rectangle(newImage, (top_x, top_y), (btm_x, btm_y), (255,0,0), 3)
newImage = cv2.putText(newImage, label, (top_x, top_y-5), cv2.FONT_HERSHEY_COMPLEX_SMALL , 0.8, (0, 230, 0), 1, cv2.LINE_AA)
return newImage
# fig, ax = plt.subplots(figsize=(20, 10))
# ax.imshow(boxing(original_img, results))
# plt.show() | 2,237 | 859 |
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import pickle
from colorama import init, Fore
init(autoreset=True)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='List of avaible commands.')
parser.add_argument('--data_path',dest="path", type=str, nargs='?',
help='Path to learning data')
parser.add_argument('--save_path',dest="save", type=str, nargs='?',
help='Path to checkpoint storage')
parser.add_argument('--epochs',dest="epochs",metavar="100", type=int, nargs='?',
help='Number of training epochs', default=100)
parser.add_argument('--n_batch',dest="batch",metavar="64", type=int, nargs='?',
help='Batch size', default=64)
parser.add_argument('--n_units',dest="units",metavar="512", type=int, nargs='?',
help='Number of LSTM Units', default=512)
parser.add_argument('--n_layers',dest="layers",metavar="3", type=int, nargs='?',
help='Number of LSTM Layers', default=3)
parser.add_argument('--n_sequence',dest="seq",metavar="100", type=int, nargs='?',
help='The maximum length sentence for a single input in characters', default=100)
parser.add_argument('--n_embedding',dest="embedding",metavar="128", type=int,
nargs='?', help='The embedding dimension size', default=128)
parser.add_argument("--continue",dest="cont",metavar="False", type=str2bool,
nargs='?',const=True, default=False,help="Continue from last save.")
args = parser.parse_args()
import tensorflow as tf
import numpy as np
import os
import time
def save_model_configs(directory, params):
path = os.path.join(directory, "parameters.bin")
dumped = pickle.dumps(params)
f = open(path, 'wb+')
f.write(dumped)
def load_model_configs(directory):
path = os.path.join(directory, "parameters.bin")
return pickle.loads(open(path,'rb').read())
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
def build_model(vocab_size, embedding_dim, rnn_units, batch_size, nl):
layers = []
layers.append(tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]))
for n in range(nl):
layers.append(tf.keras.layers.LSTM(rnn_units, return_sequences=True,
stateful=True, recurrent_initializer='glorot_uniform'))
layers.append(tf.keras.layers.Dense(vocab_size))
model = tf.keras.Sequential(layers)
return model
@tf.function
def train_step(inp, target):
with tf.GradientTape() as tape:
predictions = model(inp)
loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(
target, predictions, from_logits=True))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
text = open(args.path, 'rb').read().decode(encoding='utf-8')
vocab = sorted(set(text))
checkpoint_dir = args.save
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
confs = None
if os.path.exists(args.save) and args.cont:
print(Fore.LIGHTGREEN_EX + '[Loading existent configurations]')
try:
confs = load_model_configs(args.save)
embedding_dim = confs['embedding']
rnn_units = confs['units']
n_layers = confs['layers']
except Exception as e:
print(Fore.RED + 'Error loading checkpoint ' + str(e))
confs = None
elif args.cont:
if not os.path.exists(args.save):
os.mkdir(args.save)
print(Fore.RED + '[Directory created]')
print(Fore.RED + '[No configurations to load]')
if confs is None:
embedding_dim = args.embedding
rnn_units = args.units
n_layers = args.layers
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
text_as_int = np.array([char2idx[c] for c in text])
# The maximum length sentence we want for a single input in characters
seq_length = args.seq
examples_per_epoch = len(text)//seq_length
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
dataset = sequences.map(split_input_target)
# Batch size
BATCH_SIZE = args.batch
steps_per_epoch = examples_per_epoch//BATCH_SIZE
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
# Length of the vocabulary in chars
vocab_size = len(vocab)
model = build_model(
vocab_size = len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE,
nl=n_layers)
if os.path.exists(args.save) and args.cont:
print(Fore.LIGHTBLUE_EX + '[Loading existent checkpoint]')
try:
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
except Exception as e:
print(Fore.RED + 'Error loading checkpoint ' + str(e))
elif args.cont:
print(Fore.RED + '[No checkpoints to load]')
if confs is None:
embedding_dim = args.embedding
rnn_units = args.units
n_layers = args.layers
EPOCHS=args.epochs
if confs is None:
confs = {
'units': args.units,
'embedding': args.embedding,
'layers': args.layers,
'vocab_size': vocab_size,
'char2idx': char2idx,
'idx2char': idx2char,
}
save_model_configs(args.save, confs)
model.summary()
print (Fore.CYAN + 'Length of text: {} characters'.format(len(text)))
print (Fore.CYAN + '{} unique characters'.format(len(vocab)))
optimizer = tf.keras.optimizers.Adam()
train_start = time.time()
for epoch in range(EPOCHS):
start = time.time()
# initializing the hidden state at the start of every epoch
# initally hidden is None
hidden = model.reset_states()
for (batch_n, (inp, target)) in enumerate(dataset):
loss = train_step(inp, target)
if batch_n % 100 == 0:
template = Fore.LIGHTYELLOW_EX + 'Epoch [{}/{}] Batch [{}/{}] Loss {}'
print(template.format(epoch+1,EPOCHS, batch_n, steps_per_epoch, loss))
# saving (checkpoint) the model every 5 epochs
if (epoch + 1) % 5 == 0:
model.save_weights(checkpoint_prefix.format(epoch=epoch))
print (Fore.LIGHTYELLOW_EX + '[Model saved]\n')
print (Fore.LIGHTWHITE_EX + '\n[Epoch {} Loss {:.4f}]'.format(epoch+1, loss))
print (Fore.GREEN + '[Time taken for 1 epoch {} sec]\n'.format(time.time() - start))
print (Fore.LIGHTGREEN_EX + '\n[Total time {} mins]\n'.format((time.time() - train_start)/60))
model.save_weights(checkpoint_prefix.format(epoch=epoch))
| 7,275 | 2,533 |
my_expr = 42
s = b'foo{my_e<caret>' | 35 | 22 |
#!/usr/bin/python3
import pandas as pd
import pickle
import sys
import base64
import re
from sklearn.linear_model import LinearRegression
# Here we keep input data to Dataframe constructor
rows = []
for line in sys.stdin:
line = line.replace('[', '')
line = line.replace(']', '')
line = line.replace('\n', '')
line = re.split('[,]', line)
line_dict = {}
for i, value in enumerate(line):
if i != len(line) - 1:
name = "feature_" + str(i)
else:
name = "label"
line_dict[name] = value
rows.append(line_dict)
# Initialize a dataframe from the list
df = pd.DataFrame(rows)
feature_columns = []
for i in range(0, len(df.columns) - 1):
feature_columns.append("feature_" + str(i))
label_column = "label"
model = LinearRegression()
model.fit(df[feature_columns], df[label_column])
model_string = base64.b64encode(pickle.dumps(model)).decode('utf-8')
# Output to stdin, so that rdd.pipe() can return the string to pipedRdd.
print(model_string)
| 1,023 | 341 |
"""
This file defines the database models
"""
from .common import db, Field
from pydal.validators import *
from yatl.helpers import *
from py4web.utils.form import Form, FormStyleBulma
MTROUTE_TYPES = ('DefaultRoute', 'StaticMTRoute', 'RandomRoundrobinMTRoute','FailoverMTRoute')
MOROUTE_TYPES = ('DefaultRoute', 'StaticMORoute', 'RandomRoundrobinMORoute','FailoverMORoute')
HTTP_CON_TYPE =('GET', 'POST')
MT_CON_TYPES =('smppc', 'httpc')
MT_FILTER_TYPES=('DestinationAddrFilter','UserFilter','GroupFilter','SourceAddrFilter','ShortMessageFilter','DateIntervalFilter','TimeIntervalFilter','TagFilter','TransparentFilter')
MO_FILTER_TYPES=('DestinationAddrFilter','SourceAddrFilter','ConnectorFilter','ShortMessageFilter','DateIntervalFilter','TimeIntervalFilter','TagFilter','EvalPyFilter','TransparentFilter')
IMO_TYPES=('DefaultInterceptor', 'StaticMOInterceptor')
IMT_TYPES=('DefaultInterceptor', 'StaticMTInterceptor')
db.define_table('mt_filter',
Field('fid', 'string', length=15, label='FID', comment='Filter ID must be unique'),
Field('filter_type', requires=IS_IN_SET(MT_FILTER_TYPES), comment='Select from list of available types'),
Field('filter_route'),
Field('f_value', 'string', length = 50, label='Filter Value', comment='Values must correspond to filter type'),
format='%(fid)s')
db.define_table('j_imo',
Field('motype', label='Type',requires=IS_IN_SET(IMO_TYPES),comment='Type of interceptor'),
Field('moorder',label='Order',comment='Interceptor will evaluate in descending order'),
Field('mofilters', 'list:reference mt_filter', requires=IS_IN_DB(db,'mt_filter.id','mt_filter.fid',multiple=True),label='Filter(s)', comment='Filters need to be added prior to adding routes. Please see filter management'),
Field('moscript', label='Script',comment='Path and script must exist. Only python 3 scripts allowed now'))
db.define_table('j_imt',
Field('mttype', requires=IS_IN_SET(IMT_TYPES), label='Type', comment='Type of interceptor'),
Field('mtorder', label='Order', comment='Interceptor will evaluate in descending order'),
Field('mtfilters', 'list:reference mt_filter',requires=IS_IN_DB(db,db.mt_filter._id, db.mt_filter.fid ,multiple=True),label='Filter(s)', comment='Filters need to be added prior to adding routes. Please see filter management'),
Field('mtscript', label='Script', comment='Path and script must exist. Only python 3 scripts allowed now'))
db.define_table('j_group',
Field('name','string',length = 10, comment='Must be a string with no spaces or special characters'),
format='%(name)s')
db.define_table('j_user',
Field('username', 'string', length=10, comment="Jasmin User Name for HTTP and SMPP connecting. Must not include any spaces and can not be longer than 10 characters"),
Field('password', 'string', length=10, comment='Jasmin Password for HTTP and SMPP connecting. Must not include any spaces and can not be longer than 10 characters'),
Field('j_uid','string',label='Jasmin UID',length=12, comment='Jasmin UID cannot be longer than 12 characters and reccoment all in UPPER case. No spaces allowed. Suggest USER_1 etc.'),
Field('j_group','reference j_group',label = 'Jasim GID', comment='Select a Group', requires=IS_IN_DB(db,'j_group.id','j_group.name')),
format='%(name)s')
db.define_table('j_user_cred',
Field('juser', 'string',label='Jasmin UID', length = 10),
Field('default_src_addr', default='None', comment='Default source address of SMS-MT'),
Field('quota_http_throughput',default='ND', comment='Max. number of messages per second to accept through HTTP API'),
Field('quota_balance',default = 'ND', comment='c.f. 1. Balance quota'),
Field('quota_smpps_throughput',default = 'ND', comment='Max. number of messages per second to accept through SMPP Server'),
Field('quota_sms_count', default='ND', comment='c.f. 2. sms_count quota'),
Field('quota_early_percent', default='ND', comment='c.f. Asynchronous billing'),
Field('value_priority',default='^[0-3]$', comment='Regex pattern to validate priority of SMS-MT'),
Field('value_content',default='.*', comment='Regex pattern to validate content of SMS-MT'),
Field('value_src_addr', default='.*', comment='Regex pattern to validate source address of SMS-MT'),
Field('value_dst_addr', default='.*', comment='Regex pattern to validate destination address of SMS-MT'),
Field('value_validity_period', default='^\d+$', comment='Regex pattern to validate validity_period of SMS-MT'),
Field('author_http_send',default=True, comment='Privilege to send SMS through Sending SMS-MT'),
Field('author_http_dlr_method', default=True, comment='Privilege to set dlr-method HTTP parameter (default is GET)'),
Field('author_http_balance', default= True, comment='Privilege to check balance through Checking account balance'),
Field('author_smpps_send',default= True, comment='Privilege to send SMS through SMPP Server API'),
Field('author_priority', default= True, comment='Privilege to defined priority of SMS-MT (default is 0)'),
Field('author_http_long_content', default= True, comment='Privilege to send long content SMS through Sending SMS-MT'),
Field('author_src_addr', default= True, comment='Privilege to defined source address of SMS-MT'),
Field('author_dlr_level', default= True, comment='Privilege to set dlr-level parameter (default is 1)'),
Field('author_http_rate', default =True, comment='Privilege to check a message rate through Checking rate price'),
Field('author_validity_period', default=True, comment='Privilege to defined validity_period of SMS-MT (default is NOT SET)'),
Field('author_http_bulk', default= False, comment='Privilege to send bulks through http api (Not implemented yet)'),
format = '%(juser)s')
db.define_table('mo_filter',
Field('fid', 'string', length=15, unique=True),
Field('filter_type', requires=IS_IN_SET(MO_FILTER_TYPES)),
Field('f_value', 'string', length = 50),
format='%(name)s')
db.define_table('connector',
Field('name','string',length=15, label='Connector ID',comment='Connector ID must be unique on each gateway', requires=[IS_LENGTH(minsize=1,maxsize=15),IS_NOT_IN_DB(db, 'connector.name')]),
Field('c_logfile', label = 'Logfile',default='/var/log/jasmin/default-<cid>.log'),
Field('c_logrotate', label = 'Log Rotate', default='midnight', comment='When to rotate the log file, possible values: S=Seconds, M=Minutes, H=Hours, D=Days, W0-W6=Weekday (0=Monday) and midnight=Roll over at midnight'),
Field('c_loglevel', label = 'Log Level',default='20', comment='Logging numeric level: 10=DEBUG, 20=INFO, 30=WARNING, 40=ERROR, 50=CRITICCAL'),
Field('c_host', label = 'Host',default='127.0.0.1', comment='Server that runs SMSC'),
Field('c_port', label = 'Port',default='2775', comment='The port number for the connection to the SMSC'),
Field('c_ssl', label = 'SSL', default='no', comment='Activate ssl connection'),
Field('c_username', 'string', label = 'User name',length=15, comment='User name max 12 characters with no spaces'),
Field('c_password', 'string', length=15, label = 'Password', comment='Password max 12 characters with no spaces'),
Field('c_bind', label = 'Bind Type', requires=IS_IN_SET(('transceiver', 'transmitter', 'receiver')), default='transceiver', comment='Bind type: transceiver, receiver or transmitter'),
Field('c_bind_to', label = 'Bind To', default='30', comment='Timeout for response to bind request'),
Field('c_trx_to', label = 'Transmit Timeout',default='300', comment='Maximum time lapse allowed between transactions, after which, the connection is considered as inactive and will reconnect'),
Field('c_res_to', label = 'Response Timeout',default='60', comment='Timeout for responses to any request PDU'),
Field('c_pdu_red_to', label = 'PDU Read Timeout',default='10', comment='Timeout for reading a single PDU, this is the maximum lapse of time between receiving PDU’s header and its complete read, if the PDU reading timed out, the connection is considered as ‘corrupt’ and will reconnect'),
Field('c_con_loss_retry', label = 'Coonection Loss Retry', default='yes', comment='Reconnect on connection loss ? (yes, no)'),
Field('c_con_loss_delay', label = 'Connection Loss Delay',default='10', comment='Reconnect delay on connection loss (seconds)'),
Field('c_con_fail_retry', label = 'Connection Fail Retry',default='yes', comment='Reconnect on connection failure ? (yes, no)'),
Field('c_con_fail_delay', label = 'Connection Fail Delay',default='10', comment='Reconnect delay on connection failure (seconds)'),
Field('c_src_addr', label = 'Default Source Address',default='Not defined', comment='Default source adress of each SMS-MT if not set while sending it, can be numeric or alphanumeric, when not defined it will take SMSC default'),
Field('c_src_ton', label = 'Source TON',default='2', comment='Source address TON setting for the link: 0=Unknown, 1=International, 2=National, 3=Network specific, 4=Subscriber number, 5=Alphanumeric, 6=Abbreviated'),
Field('c_src_npi', label = 'Source NPI',default='1', comment='Source address NPI setting for the link: 0=Unknown, 1=ISDN, 3=Data, 4=Telex, 6=Land mobile, 8=National, 9=Private, 10=Ermes, 14=Internet, 18=WAP Client ID'),
Field('c_dst_ton', label = 'Destination TON',default='1', comment='Destination address TON setting for the link: 0=Unknown, 1=International, 2=National, 3=Network specific, 4=Subscriber number, 5=Alphanumeric, 6=Abbreviated'),
Field('c_dst_npi', label = 'Destination NPI',default='1', comment='Destination address NPI setting for the link: 0=Unknown, 1=ISDN, 3=Data, 4=Telex, 6=Land mobile, 8=National, 9=Private, 10=Ermes, 14=Internet, 18=WAP Client ID'),
Field('c_bind_ton', label = 'Bind TON',default='0', comment='Bind address TON setting for the link: 0=Unknown, 1=International, 2=National, 3=Network specific, 4=Subscriber number, 5=Alphanumeric, 6=Abbreviated'),
Field('c_bind_npi', label = 'Bind NPI',default='1', comment='Bind address NPI setting for the link: 0=Unknown, 1=ISDN, 3=Data, 4=Telex, 6=Land mobile, 8=National, 9=Private, 10=Ermes, 14=Internet, 18=WAP Client ID'),
Field('c_validity', label = 'Validtiy',default='Not defined', comment='Default validity period of each SMS-MT if not set while sending it, when not defined it will take SMSC default (seconds)'),
Field('c_priority', label = 'Priority',default='0', comment='SMS-MT default priority if not set while sending it: 0, 1, 2 or 3'),
Field('c_requeue_delay', label = 'Requeue Delay',default='120', comment='Delay to be considered when requeuing a rejected message'),
Field('c_addr_range', label = 'Address Range',default='Not defined', comment='Indicates which MS’s can send messages to this connector, seems to be an informative value'),
Field('c_systype', label = 'System Type',default='Not defined', comment='The system_type parameter is used to categorize the type of ESME that is binding to the SMSC. Examples include “VMS” (voice mail system) and “OTA” (over-the-air activation system)'),
Field('c_dlr_expiry', label = 'DLR Expiry',default='86400', comment='When a SMS-MT is not acked, it will remain waiting in memory for expiry seconds, after this period, any received ACK will be ignored'),
Field('c_submit_throughput', label = 'Submit Throughput',default='1', comment='Active SMS-MT throttling in MPS (Messages per second), set to 0 (zero) for unlimited throughput'),
Field('c_proto_id', label = 'Protocol',default='0', comment='Used to indicate protocol id in SMS-MT and SMS-MO'),
Field('c_coding',label = 'Coding',default='0', comment='Default coding of each SMS-MT if not set while sending it: 0=SMSC Default, 1=IA5 ASCII, 2=Octet unspecified, 3=Latin1, 4=Octet unspecified common, 5=JIS, 6=Cyrillic, 7=ISO-8859-8, 8=UCS2, 9=Pictogram, 10=ISO-2022-JP, 13=Extended Kanji Jis, 14=KS C 5601'),
Field('c_elink_interval',label = 'Elink',default='30', comment='Enquire link interval (seconds)'),
Field('c_def_msg_id',label = 'Default Msg ID',default='0', comment='Specifies the SMSC index of a pre-defined (‘canned’) message'),
Field('c_ripf',label = 'Replace If Present',default='0', comment='Replace if present flag: 0=Do not replace, 1=Replace'),
Field('c_dlr_msgid',label = 'DLR MsgID',default='0', comment='Indicates how to read msg id when receiving a receipt: 0=msg id is identical in submit_sm_resp and deliver_sm, 1=submit_sm_resp msg-id is in hexadecimal base, deliver_sm msg-id is in decimal base, 2=submit_sm_resp msg-id is in decimal base'),
format='%(name)s')
db.define_table('http_cons',
Field('hcon_cid','string',length=10,label='Connector ID', comment= 'Must be unique'),
Field('hcon_method', label='Method', comment='GET/POST',requires = IS_IN_SET(HTTP_CON_TYPE)),
Field('hcon_url',label='Base URL', comment='URL for MO messages e.g http://10.10.20.125/receive-sms/mo.php'),
format='%(hcon_cid)s')
db.define_table('mtroute',
Field('mt_order', 'string', length=10, label='Route order', requires=IS_NOT_EMPTY(), comment='Routes will be assesd in descending order based on filters and matches'),
Field('mt_type', requires = IS_IN_SET(MTROUTE_TYPES), label='Route type'),
Field('mt_connectors', 'list:reference connector', label='SMPP Connector(s)', comment='SMPP connector needs to be available'),
Field('mt_filters', 'list:reference mt_filter',label='Filter(s)', comment='Filters need to be added prior to adding routes. Please see filter management'),
Field('mt_rate','string',length = 10, label='Rate', comment='Decimal rate value for the connector. All messages going over this connector will be charged at the rate specified'),
format='%(mt_order)s')
db.define_table('moroute',
Field('mo_order', 'string', length=10, label='Route order',comment='Routes will be assesd in descending order based on filters and matches'),
Field('mo_type', requires = IS_IN_SET(MOROUTE_TYPES), label='Route type'),
Field('mo_connectors', 'list:reference connector', requires=IS_IN_DB(db,'connector.id','connector.name',multiple=True), label='SMPP Connector(s)', comment='SMPP connector needs to be available'),
Field('mo_http_cons', 'list:reference http_cons', requires=IS_IN_DB(db,'http_cons.id','http_hcons-hcons_cid', multiple=True), label='HTTP Connector(s)', comment='HTTP connector needs to be available'),
Field('mo_filters', 'list:reference mt_filter', requires=IS_IN_DB(db,'mt_filter.id','mt_filter.fid',multiple=True), label='Filter(s)', comment='Filters need to be added prior to adding routes. Please see filter management'),
format='%(mo_order)s')
db.commit() | 15,956 | 4,723 |
from pessoa import Pessoa
| 27 | 10 |
import traceback
import logging
logger = logging.getLogger(__package__)
def log_error_handler(cls, tb):
try:
logger.error('Future/Task exception was never retrieved:\n%s',
''.join(tb))
except:
pass
class Default(object):
# Called when failure of the future was not handled by any callback
# This includes exceptions in on_success and on_failure callbacks
UNHANDLED_FAILURE_CALLBACK = staticmethod(log_error_handler)
# Default executor for future callbacks
CALLBACK_EXECUTOR = None
@staticmethod
def get_callback_executor():
if not Default.CALLBACK_EXECUTOR:
from .cooperative.synchronous_executor import Synchronous
Default.CALLBACK_EXECUTOR = Synchronous
return Default.CALLBACK_EXECUTOR
@staticmethod
def on_unhandled_error(exc):
tb = traceback.format_exception(exc.__class__, exc,
exc.__traceback__)
Default.UNHANDLED_FAILURE_CALLBACK(exc.__class__, tb)
| 1,043 | 319 |
import duden
def main():
# find the correct url
# get definition and examples
w1 = duden.get('einfach_einmal_simpel')
# remove beispiel code to get the meanings???
print(w1.meaning_example)
# change the depth, include code
if __name__ == '__main__':
main()
| 292 | 99 |
"""zc.buildout recipe for downloading and extracting an archive."""
from setuptools import setup, find_packages
name = "gocept.download"
classifiers = [
"Environment :: Console",
"Environment :: Plugins",
"Framework :: Buildout",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Zope Public License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Build Tools",
"Topic :: System :: Software Distribution",
]
setup(
name = name,
version = '1.0dev',
author = "Christian Theune",
author_email = "ct@gocept.com",
description = __doc__.strip(),
long_description = open("README.txt").read(),
license = "ZPL 2.1",
keywords = "buildout zc.buildout recipe download extract archive",
classifiers = classifiers,
url = "https://bitbucket.org/gocept/%s/" % name,
packages = find_packages("src"),
include_package_data = True,
package_dir = {"": "src"},
namespace_packages = ["gocept"],
install_requires = ["zc.buildout", "setuptools"],
extras_require = {"test": ["zope.testing"]},
entry_points = {"zc.buildout": ["default = %s:Recipe" % name,],},
)
| 1,275 | 392 |
import time
print ("this is a 500 years python dummy file")
print ("see you after 500 years")
time.sleep(15768000000)
print ("Time Travel!") | 144 | 58 |
# -*- coding: utf-8 -*-
def comp_radius_mid_active(self):
"""Compute the radius at the middle of the active part of the slot
Parameters
----------
self : Slot
A Slot object
Returns
-------
Rmw: float
Mid active radius [m]
"""
Rbo = self.get_Rbo()
Hslot = self.comp_height()
Hwind = self.comp_height_active()
if self.is_outwards():
return Rbo + Hslot - Hwind / 2
else:
return Rbo - Hslot + Hwind / 2
| 489 | 176 |
# Adapted by Ji Zhang in 2019
#
# Based on Detectron.pytorch/lib/roi_data/minibatch.py written by Roy Tseng
import numpy as np
import cv2
import os
from core.config import cfg
import utils.blob as blob_utils
import roi_data.rpn
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data', 'all_frames', 'bf_cur_len', 'f_scale']
if cfg.RPN.RPN_ON:
# RPN-only or end-to-end Faster R-CNN
blob_names += roi_data.rpn.get_rpn_blob_names(is_training=is_training)
elif cfg.RETINANET.RETINANET_ON:
raise NotImplementedError
else:
# Fast R-CNN like models trained on precomputed proposals
blob_names += roi_data.fast_rcnn.get_fast_rcnn_blob_names(
is_training=is_training
)
return blob_names
def get_minibatch(roidb):
"""Given a roidb, construct a minibatch sampled from it."""
# We collect blobs from each image onto a list and then concat them into a
# single tensor, hence we initialize each blob to an empty list
blobs = {k: [] for k in get_minibatch_blob_names()}
# Get the input image blob
im_blob, im_scales, all_frames_blob, bf_cur_len, f_scale = _get_image_blob(roidb)
blobs['data'] = im_blob
blobs['all_frames'] = all_frames_blob
blobs['bf_cur_len'] = bf_cur_len
blobs['f_scale'] = f_scale
if cfg.RPN.RPN_ON:
# RPN-only or end-to-end Faster/Mask R-CNN
valid = roi_data.rpn.add_rpn_blobs(blobs, im_scales, roidb)
elif cfg.RETINANET.RETINANET_ON:
raise NotImplementedError
else:
# Fast R-CNN like models trained on precomputed proposals
valid = roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)
# add relpn blobs
add_relpn_blobs(blobs, im_scales, roidb)
return blobs, valid
def add_relpn_blobs(blobs, im_scales, roidb):
assert 'roidb' in blobs
valid_keys = ['dataset_name',
'sbj_gt_boxes', 'sbj_gt_classes', 'obj_gt_boxes', 'obj_gt_classes', 'prd_gt_classes',
'sbj_gt_overlaps', 'obj_gt_overlaps', 'prd_gt_overlaps', 'pair_to_gt_ind_map',
'width', 'height', 'file_name', 'pre_processed_temporal_roi', 'pre_processed_frames_rpn_ret'] ###!!!
for i, e in enumerate(roidb):
for k in valid_keys:
if k in e:
blobs['roidb'][i][k] = e[k]
# Always return valid=True, since RPN minibatches are valid by design
return True
def _get_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images)
processed_ims = []
im_scales = []
#roidb_file_name = []
for i in range(num_images):
#im = cv2.imread(roidb[i]['image'])
im = cv2.imread(roidb[i]['image'], cv2.IMREAD_COLOR)
#print(roidb[i]['image'], im.shape)
#roidb_file_name.append(int(roidb[i]['file_name'].split('.')[0]))
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
# If NOT using opencv to read in images, uncomment following lines
# if len(im.shape) == 2:
# im = im[:, :, np.newaxis]
# im = np.concatenate((im, im, im), axis=2)
# # flip the channel, since the original one using cv2
# # rgb -> bgr
# im = im[:, :, ::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
# Create a blob to hold the input images [n, c, h, w]
blob = blob_utils.im_list_to_blob(processed_ims)
if (len(cfg.TRAIN.DATASETS) > 0 and \
cfg.TRAIN.DATASETS[0].find('vidvrd') >= 0) or \
(len(cfg.TEST.DATASETS) > 0 and \
cfg.TEST.DATASETS[0].find('vidvrd') >= 0):
im_id_st = 0
elif (len(cfg.TRAIN.DATASETS) > 0 and \
cfg.TRAIN.DATASETS[0].find('ag') >= 0) or \
(len(cfg.TEST.DATASETS) > 0 and \
cfg.TEST.DATASETS[0].find('ag') >= 0):
im_id_st = 1
else:
im_id_st = 1
all_frames_blob, bf_cur_len, f_scale = get_frames_blob(roidb, \
num_images, scale_inds, im_id_st=im_id_st, half_frame_relative_path=cfg.HALF_FRAME_RELATIVE_PATH) ###!
#print(blob.shape, all_frames_blob.shape)
return blob, im_scales, all_frames_blob, bf_cur_len, f_scale
def get_frames_blob(roidb, num_images, scale_inds, im_id_st=1, half_frame_relative_path=''):
all_frames_blob = []
bf_cur_len = []
f_scale = []
if half_frame_relative_path == 'sampled_frames': sufix_class = '.jpg'
else: sufix_class = '.png'
for i in range(num_images):
frame_full_name = roidb[i]['image'].split('/')[-1]
frame_id = int(frame_full_name.split('.')[0])
tot_video_path_list = roidb[i]['image'].split('/')
video_path_list = tot_video_path_list[:-3]
video_path = '/'
for j in video_path_list:
video_path = os.path.join(video_path, j)
#video_path = os.path.join(video_path, 'all_frames')
video_path = os.path.join(video_path, half_frame_relative_path) ###!!!
video_path = os.path.join(video_path, tot_video_path_list[-2])
processed_frames = []
start_f_id = frame_id - (cfg.HALF_NUMBER_OF_FRAMES + 1) * cfg.FRAMES_INTERVAL
end_f_id = frame_id + (cfg.HALF_NUMBER_OF_FRAMES + 1) * cfg.FRAMES_INTERVAL
process_frames_id = []
for j in range(frame_id, start_f_id, -cfg.FRAMES_INTERVAL):
if j < im_id_st:
break
process_frames_id.append(j)
process_frames_id = process_frames_id[::-1]
process_frames_id = process_frames_id[:-1]
for j in range(frame_id, end_f_id, cfg.FRAMES_INTERVAL):
process_frames_id.append(j)
off_set_f = 0
off_set_b = cfg.HALF_NUMBER_OF_FRAMES
k = 0
for cnt, j in enumerate(process_frames_id):
if j < im_id_st:
continue
frame_path = os.path.join(video_path, '{:06d}'.format(j)+sufix_class)
if j == frame_id:
off_set_f = k
k = 0
#k = k+1 #
#continue #
frame_path = roidb[i]['image']
if os.path.exists(frame_path):
im = cv2.imread(frame_path, cv2.IMREAD_COLOR)
if roidb[i]['flipped']:
im = im[:, ::-1, :]
#target_size = cfg.TRAIN.SCALES[scale_inds[i]]
target_size = cfg.TEMPORAL_SCALES
im, f_scale_i = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_size], 1000)
processed_frames.append(im[0])
k = k + 1
else:
off_set_b = k - 1
break
st = cfg.HALF_NUMBER_OF_FRAMES - off_set_f
ed = cfg.HALF_NUMBER_OF_FRAMES + off_set_b
if cfg.FPN.REL_FPN_ON:
frames_blob = blob_utils.im_list_to_blob(processed_frames)
else:
#frames_blob = np.stack(processed_frames)
frames_blob = np.array(processed_frames, dtype=np.float32)
channel_swap = (0, 3, 1, 2)
frames_blob = frames_blob.transpose(channel_swap)
##got_frames = np.zeros((2*cfg.HALF_NUMBER_OF_FRAMES+1, frames_blob.shape[1], frames_blob.shape[2], frames_blob.shape[3]), dtype=np.float32)
##got_frames[st:ed] = frames_blob.astype(np.float32)
pad_st = max(0, st)
#pad_ed = max(0, 2*cfg.HALF_NUMBER_OF_FRAMES + 1 - ed)
pad_ed = max(0, 2*cfg.HALF_NUMBER_OF_FRAMES - ed) #
f_scale.append(f_scale_i[0])
if (pad_st == 0 and pad_ed == 0) or num_images == 1:
got_frames = frames_blob
elif num_images != 1:
got_frames = np.pad(frames_blob, ((pad_st,pad_ed), (0,0), (0,0), (0,0)),'constant',constant_values=0)
if num_images != 1:
bf_cur_len.append(cfg.HALF_NUMBER_OF_FRAMES)
all_frames_blob.append(got_frames)
else:
bf_cur_len.append(off_set_f)
all_frames_blob = np.expand_dims(got_frames, axis=0)
if num_images != 1:
all_frames_blob = np.stack(all_frames_blob)
bf_cur_len = np.array(bf_cur_len, dtype=np.int32)
f_scale = np.array(f_scale, dtype=np.float32)
return all_frames_blob, bf_cur_len, f_scale | 9,044 | 3,376 |
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import Typed, set_default
from .android_view_group import AndroidViewGroup, ViewGroup
from .bridge import JavaBridgeObject, JavaMethod, JavaCallback
class ArrayAdapter(JavaBridgeObject):
__nativeclass__ = set_default('android.widget.ArrayAdapter')
__signature__ = set_default(('android.content.Context', 'android.R'))
add = JavaMethod('java.lang.Object')
addAll = JavaMethod('[Ljava.lang.Object;')
remove = JavaMethod('java.lang.Object')
clear = JavaMethod()
class AdapterView(ViewGroup):
__nativeclass__ = set_default('android.widget.AdapterView')
setEmptyView = JavaMethod('android.view.View')
setFocusableInTouchMode = JavaMethod('boolean')
setOnItemClickListener = JavaMethod(
'android.widget.AdapterView$OnItemClickListener')
setOnItemLongClickListener = JavaMethod(
'android.widget.AdapterView$OnItemLongClickListener')
setOnItemSelectedListener = JavaMethod(
'android.widget.AdapterView$OnItemSelectedListener')
setSelection = JavaMethod('int')
onItemClick = JavaCallback('android.widget.AdapterView',
'android.view.View', 'int', 'long')
onItemLongClick = JavaCallback('android.widget.AdapterView',
'android.view.View', 'int', 'long')
onItemSelected = JavaCallback('android.widget.AdapterView',
'android.view.View', 'int', 'long')
onNothingSelected = JavaCallback('android.widget.AdapterView')
class AndroidAdapterView(AndroidViewGroup):
#: Adapter reference
adapter = Typed(ArrayAdapter)
| 1,825 | 513 |
from abc import ABCMeta, abstractmethod, abstractproperty
__all__ = ['ConstraintsInterface']
class ConstraintsInterface(metaclass=ABCMeta):
@abstractproperty
def title(self) -> str: ...
@abstractproperty
def min_aspect_ratio(self) -> float: ...
@abstractproperty
def max_aspect_ratio(self) -> float: ...
@abstractproperty
def recommended_ratio(self) -> float: ...
@abstractproperty
def recommended_ratio_deviation(self) -> float: ...
@abstractproperty
def use_recommended_ratio_by_default(self) -> bool: ...
@abstractproperty
def min_duration(self) -> float: ...
@abstractproperty
def max_duration(self) -> float: ...
| 693 | 211 |
from collections import OrderedDict
from django.db import models
from django.utils.translation import gettext_lazy as _
from forms.modelutils import *
# The following necessitated for some of the channges
# https://code.djangoproject.com/ticket/19539 necessitated removal of __metaclass__
# get_fields_with_model was deprected and thus the move to fields
def prepend_verbose(mydict, field_name, num, verbose_name=None):
field = mydict[field_name]
# Some field_name differ from the expected verbose_name. e.g for age, the desired verbose_name is AGE (PERSON APPEARS).
# Instead of passing AGE (PERSON APPEARS) as the field name, we can instead pass it as the expected verbose_name
field.verbose_name = f'({num}) {verbose_name if verbose_name else field_name}'
# ----------------------------
# Newspaper
# ----------------------------
def newspaper_journalist_meta (name, bases, mydict):
dct = {
'sex' : bases[0]._meta.fields[0],
'age' : bases[0]._meta.fields[1],
}
prepend_verbose(dct, 'sex', '9')
return type(name, bases, mydict)
class NewspaperJournalist(Journalist, metaclass=newspaper_journalist_meta):
newspaper_sheet = models.ForeignKey('NewspaperSheet', on_delete=models.CASCADE)
class NewspaperPerson(Person):
sex = field_sex(_('(10) Sex'))
age = field_age(_('(11) Age (person appears)'))
occupation = field_occupation(_('(12) Occupation or Position'))
function = field_function(_('(13) Function in the news story'))
family_role = field_family_role(_('(14) Family Role Given?'))
victim_or_survivor = field_victim_or_survivor(_('(15) Does the story identify the person as either a victim or survivor?'))
victim_of = field_victim_of(_('(16) The story identifies the person as a victim of:'))
survivor_of = field_survivor_of(_('(17) The story identifies the person as a survivor of:'))
is_quoted = field_is_quoted(_('(18) Is the person directly quoted'))
is_photograph = field_is_photograph(_('(19) Is there a photograph of the person in the story?'))
special_qn_1 = field_special_qn(_('(20) Special question (1)'))
special_qn_2 = field_special_qn(_('(21) Special question (2)'))
special_qn_3 = field_special_qn(_('(22) Special question (3)'))
newspaper_sheet = models.ForeignKey('NewspaperSheet', on_delete=models.CASCADE)
class NewspaperSheet(SheetModel):
class Meta:
verbose_name = _('Newspaper')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
newspaper_name = models.CharField(max_length=255, verbose_name=_('Newspaper'), help_text=_('''Be as specific as possible. If the paper has different regional editions, write in the name of the edition you are monitoring - e.g. 'The Hindu - Delhi edition'.'''))
page_number = models.PositiveIntegerField(verbose_name=_('(1) Page Number'), help_text=_('Write in the number of the page on which the story begins. Story appears on first page = 1, Seventh page = 7, etc.'), null=True, blank=True)
covid19 = field_covid19(_('(z) Is this story related to coronavirus Covid-19?'))
topic = field_topic(_('(2) Topic'))
scope = field_scope(_('(3) Scope'))
space = models.PositiveIntegerField(choices=SPACE, verbose_name=_('(4) Space'), null=True, blank=True)
equality_rights = field_equality_rights(_('(5) Reference to gender equality / human rights legislation/ policy'))
about_women = field_about_women(_('(6) Is the story about a particular woman or group of women?'))
inequality_women = field_inequality_women(_('(7) This story clearly highlights issues of inequality between women and men'))
stereotypes = field_stereotypes(_('(8) This story clearly challenges gender stereotypes'))
further_analysis = field_further_analysis(_('(24) Does this story warrant further analysis?'), _('''<br><br>A story warrants further analysis if it clearly perpetuates or clearly challenges gender stereotypes, if it includes women's opinions in a remarkable way, if it contributes to an understanding of inequalities between women and men, if it mentions or calls attention to women's human rights, etc. Consult the guide for further explanation'''))
comments = field_comments(_('(23) Describe any photographs included in the story and the conclusions you draw from them.'))
def __str__(self):
created_at = self.created_at.strftime("%Y-%m-%d")
space = SPACE[self.space - 1][1].split(')')[1] if self.space else "" # Extract space title from SPACE tuple
page = f" page {self.page_number}" if self.page_number else ""
return f"{self.newspaper_name} {created_at}{page} {space}"
# ----------------------------
# Radio
# ----------------------------
class RadioPerson(Person):
sex = field_sex(_('(10) Sex'))
occupation = field_occupation(_('(11) Occupation or Position'))
function = field_function(_('(12) Function in the news story'))
family_role = field_family_role(_('(13) Family Role Given?'))
victim_or_survivor = field_victim_or_survivor(_('(14) Does the story identify the person as either a victim or survivor?'))
victim_of = field_victim_of(_('(15) The story identifies the person as a victim of:'))
survivor_of = field_survivor_of(_('(16) The story identifies the person as a survivor of:'))
special_qn_1 = field_special_qn(_('(17) Special question (1)'))
special_qn_2 = field_special_qn(_('(18) Special question (2)'))
special_qn_3 = field_special_qn(_('(19) Special question (3)'))
radio_sheet = models.ForeignKey('RadioSheet', on_delete=models.CASCADE)
def radio_journalist_meta(name, bases, mydict):
dct = {
'sex' : bases[0]._meta.fields[0],
'role' : bases[0]._meta.fields[2],
}
prepend_verbose(dct, 'role', '8')
prepend_verbose(dct, 'sex', '9')
return type(name, bases, mydict)
class RadioJournalist(BroadcastJournalist, metaclass=radio_journalist_meta):
radio_sheet = models.ForeignKey('RadioSheet', on_delete=models.CASCADE)
class RadioSheet(SheetModel):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
channel = models.CharField(max_length=255, verbose_name=_('Channel'), help_text=_('''Be as specific as possible. E.g. if the radio company is called RRI, and if the newscast is broadcast on its third channel, write in 'RRI-3'.'''))
start_time = models.TimeField(verbose_name=_('Time of Broadcast'))
num_female_anchors = field_num_anchors(_('Number of female anchors'))
num_male_anchors = field_num_anchors(_('Number of male anchors'))
item_number = field_item_number(_('(1) Item Number'))
covid19 = field_covid19(_('(z) Is this story related to coronavirus Covid-19?'))
topic = field_topic(_('(2) Topic'))
scope = field_scope(_('(3) Scope'))
equality_rights = field_equality_rights(_('(4) Reference to gender equality / human rights legislation/ policy'))
about_women = field_about_women(_('(5) Is the story about a particular woman or group of women?'))
inequality_women = field_inequality_women(_('(6) This story clearly highlights issues of inequality between women and men'))
stereotypes = field_stereotypes(_('(7) This story clearly challenges gender stereotypes'))
further_analysis = field_further_analysis(_('(20) Does this story warrant further analysis?'), _('''<br><br>A story warrants further analysis if it clearly perpetuates or clearly challenges gender stereotypes, if it includes women's opinions in a remarkable way, if it contributes to an understanding of inequalities between women and men, if it mentions or calls attention to women's human rights, etc. Consult the guide for further explanation'''))
comments = field_comments(_('(N/A) Describe any photographs included in the story and the conclusions you draw from them.'))
def __str__(self):
item_number = f" story {str(self.item_number)}" if self.item_number else ""
return f"{self.channel} {str(self.start_time)}{item_number}"
class Meta:
verbose_name = _('Radio')
# ----------------------------
# Television
# ----------------------------
class TelevisionPerson(Person):
sex = field_sex(_('(11) Sex'))
age = field_age(_('(12) Age (person appears)'))
occupation = field_occupation(_('(13) Occupation or Position'))
function = field_function(_('(14) Function in the news story'))
family_role = field_family_role(_('(15) Family Role Given?'))
victim_or_survivor = field_victim_or_survivor(_('(16) Does the story identify the person as either a victim or survivor?'))
victim_of = field_victim_of(_('(17) The story identifies the person as a victim of:'))
survivor_of = field_survivor_of(_('(18) The story identifies the person as a survivor of:'))
special_qn_1 = field_special_qn(_('(19) Special question (1)'))
special_qn_2 = field_special_qn(_('(20) Special question (2)'))
special_qn_3 = field_special_qn(_('(21) Special question (3)'))
television_sheet = models.ForeignKey('TelevisionSheet', on_delete=models.CASCADE)
def television_journalist_meta(name, bases, mydict):
dct = {
'sex' : bases[0]._meta.fields[0],
'age' : bases[0]._meta.fields[1],
'role' : bases[0]._meta.fields[2],
}
prepend_verbose(dct, 'role', '8')
prepend_verbose(dct, 'sex', '9')
prepend_verbose(dct, 'age', '10', 'Age (Person Appears)')
return type(name, bases, mydict)
class TelevisionJournalist(BroadcastJournalist, metaclass=television_journalist_meta):
television_sheet = models.ForeignKey('TelevisionSheet', on_delete=models.CASCADE)
class TelevisionSheet(SheetModel):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
channel = models.CharField(max_length=255, verbose_name=_('Channel'), help_text=_('''Be as specific as possible. E.g. if the television company is called RTV, and if the newscast is broadcast on its second channel, write in 'RTV-2' '''))
start_time = models.TimeField(verbose_name=_('Time of Broadcast'))
num_female_anchors = field_num_anchors(_('Number of female anchors'))
num_male_anchors = field_num_anchors(_('Number of male anchors'))
item_number = field_item_number(_('(1) Item Number'))
covid19 = field_covid19(_('(z) Is this story related to coronavirus Covid-19?'))
topic = field_topic(_('(2) Topic'))
scope = field_scope(_('(3) Scope'))
equality_rights = field_equality_rights(_('(4) Reference to gender equality / human rights legislation/ policy'))
about_women = field_about_women(_('(5) Is the story about a particular woman or group of women?'))
inequality_women = field_inequality_women(_('(6) This story clearly highlights issues of inequality between women and men'))
stereotypes = field_stereotypes(_('(7) This story clearly challenges gender stereotypes'))
further_analysis = field_further_analysis(_('(22) Does this story warrant further analysis?'), _('''<br><br>A story warrants further analysis if it clearly perpetuates or clearly challenges gender stereotypes, if it includes women's opinions in a remarkable way, if it contributes to an understanding of inequalities between women and men, if it mentions or calls attention to women's human rights, etc. Consult the guide for further explanation'''))
comments = field_comments(_('(N/A) Describe any photographs included in the story and the conclusions you draw from them.'))
def __str__(self):
item_number = f" story {str(self.item_number)}" if self.item_number else ""
return f"{self.channel} {str(self.start_time)}{item_number}"
class Meta:
verbose_name = _('Television')
# ----------------------------
# Internet News
# ----------------------------
def internet_journalist_meta(name, bases, mydict):
dct = {
'sex' : bases[0]._meta.fields[0],
'age' : bases[0]._meta.fields[1],
}
prepend_verbose(dct, 'sex', '10')
prepend_verbose(dct, 'age', '11', 'Age (Person Appears)')
return type(name, bases, mydict)
class InternetNewsJournalist(Journalist, metaclass=internet_journalist_meta):
internetnews_sheet = models.ForeignKey('InternetNewsSheet', on_delete=models.CASCADE)
class InternetNewsPerson(Person):
sex = field_sex(_('(12) Sex'))
age = field_age(_('(13) Age (person appears)'))
occupation = field_occupation(_('(14) Occupation or Position'))
function = field_function(_('(15) Function in the news story'))
family_role = field_family_role(_('(16) Family Role Given?'))
victim_or_survivor = field_victim_or_survivor(_('(17) Does the story identify the person as either a victim or survivor?'))
victim_of = field_victim_of(_('(18) The story identifies the person as a victim of:'))
survivor_of = field_survivor_of(_('(19) The story identifies the person as a survivor of:'))
is_quoted = field_is_quoted(_('(20) Is the person directly quoted'))
is_photograph = field_is_photograph(_('(21) Is there a photograph of the person in the story?'))
special_qn_1 = field_special_qn(_('(22) Special question (1)'))
special_qn_2 = field_special_qn(_('(23) Special question (2)'))
special_qn_3 = field_special_qn(_('(24) Special question (3)'))
internetnews_sheet = models.ForeignKey('InternetNewsSheet', on_delete=models.CASCADE)
class InternetNewsSheet(SheetModel):
def __init__(self, *args, **kwargs):
super(InternetNewsSheet, self).__init__(*args, **kwargs)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# Story
website_name = models.CharField(max_length=255, verbose_name=_('Website Name'))
website_url = models.CharField(max_length=255, verbose_name=_('URL'))
time_accessed = models.DateTimeField(verbose_name=_('Date and Time Accessed'))
offline_presence = models.CharField(max_length=1, choices=YESNO, verbose_name=_('Offline presence?'))
webpage_layer_no = models.PositiveIntegerField(help_text=_('Webpage Layer Number. Homepage=1, One click away=2, Five clicks away= 5, etc. Note that if a story appears on the front page, code with 1'), verbose_name=_('(1) Webpage Layer Number'), blank=True, null=True)
covid19 = field_covid19(_('(z) Is this story related to coronavirus Covid-19?'))
topic = field_topic(_('(2) Topic'))
scope = field_scope(_('(3) Scope'))
shared_via_twitter = models.CharField(max_length=1, verbose_name=_('(4) Shared via twitter?'), choices=YESNO, help_text=_('''Has this story been shared by the media house via Twitter?
<br>Enter the exact URL of the story into <a href="https://twitter.com" target="_blank">https://twitter.com</a> - answer yes if the media house's name appears in the search results.'''))
shared_on_facebook = models.CharField(max_length=1, choices=YESNO, verbose_name=_('(5) Shared on Facebook'), help_text=_('''Has this story been shared by the media house on its Facebook Page?
<br>Scroll down the media house's Facebook page to check.'''))
# Analysis
equality_rights = field_equality_rights(_('(6) Reference to gender equality / human rights legislation/ policy'))
about_women = field_about_women(_('(7) Is the story about a particular woman or group of women?'))
inequality_women = field_inequality_women(_('(8) This story clearly highlights issues of inequality between women and men'))
stereotypes = field_stereotypes(_('(9) This story clearly challenges gender stereotypes'))
further_analysis = field_further_analysis(_('(26) Does this story warrant further analysis?'), _('''<br><br>A story warrants further analysis if it clearly perpetuates or clearly challenges gender stereotypes, if it includes women's opinions in a remarkable way, if it contributes to an understanding of inequalities between women and men, if it mentions or calls attention to women's human rights, etc. Consult the guide for further explanation'''))
url_and_multimedia = field_url_and_multimedia(_('(25) Copy and paste the URL of the story. Describe any photographs, images, other multimedia features included in the story. Note down the conclusions you draw from the images, audio and video.'))
def __str__(self):
time_accessed = self.time_accessed.strftime("%Y-%m-%d %H:%M:%S")
website_url = f" {self.website_url}"
return f"{self.website_name} {time_accessed}{website_url}"
class Meta:
verbose_name = _('Internet')
def twitter_journalist_meta(name, bases, mydict):
dct = {
'sex' : bases[0]._meta.fields[0],
'age' : bases[0]._meta.fields[1],
}
prepend_verbose(dct, 'sex', '7')
prepend_verbose(dct, 'age', '8', 'Age (Person Appears)')
return type(name, bases, mydict)
# ----------------------------
# Twitter
# ----------------------------
class TwitterJournalist(Journalist, metaclass=twitter_journalist_meta):
twitter_sheet = models.ForeignKey('TwitterSheet', on_delete=models.CASCADE)
class TwitterPerson(Person):
sex = field_sex(_('(9) Sex'))
age = field_age(_('(10) Age (person appears)'))
occupation = field_occupation(_('(11) Occupation or Position'))
function = field_function(_('(12) Function in the news story'))
is_photograph = field_is_photograph(_('(13) Is there a photograph of the person in the story?'))
special_qn_1 = field_special_qn(_('(14) Special question (1)'))
special_qn_2 = field_special_qn(_('(15) Special question (2)'))
special_qn_3 = field_special_qn(_('(16) Special question (3)'))
twitter_sheet = models.ForeignKey('TwitterSheet', on_delete=models.CASCADE)
class TwitterSheet(SheetModel):
class Meta:
verbose_name = _('Twitter')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
media_name = models.CharField(max_length=255, verbose_name=_('Media Name'), help_text=_('''For example. 'CNN Breaking News' '''))
twitter_handle = models.CharField(max_length=255, verbose_name=_('Twitter Handle'), help_text=_('e.g. @cnnbrk'))
# Story
retweet = models.PositiveIntegerField(choices=RETWEET,
verbose_name=_('(1) Tweet or Retweet'),
help_text=_('Only retweets from the same media house can be coded. Do not code retweets from other news providers')
)
covid19 = field_covid19(_('(z) Is this story related to coronavirus Covid-19?'))
topic = field_topic(_('(2) Topic'))
# Analysis
equality_rights = field_equality_rights(_('(3) Reference to gender equality / human rights legislation/ policy'))
about_women = field_about_women(_('(4) Is the story about a particular woman or group of women?'))
inequality_women = field_inequality_women(_('(5) This story clearly highlights issues of inequality between women and men'))
stereotypes = field_stereotypes(_('(6) This story clearly challenges gender stereotypes'))
further_analysis = field_further_analysis(_('(18) Does this tweet warrant further analysis?'), _('''<br><br>A tweet warrants further analysis if it clearly perpetuates or clearly challenges gender stereotypes, if it includes women's opinions in a remarkable way, if it contributes to an understanding of inequalities between women and men, if it mentions or calls attention to women's human rights, etc. Consult the guide for further explanation'''))
url_and_multimedia = field_url_and_multimedia(_('(17) Copy and paste the URL of the tweet. Describe any photographs, images, other multimedia features included in the tweet. Note down the conclusions you draw from the images, audio and video.'))
def __str__(self):
created_at = self.created_at.strftime("%Y-%m-%d %H:%M:%S")
twitter_handle = f" {self.twitter_handle}"
return f"{self.media_name} {created_at}{twitter_handle}"
sheet_models = OrderedDict([
('Print', NewspaperSheet),
('Radio', RadioSheet),
('Television', TelevisionSheet),
('Internet', InternetNewsSheet),
('Twitter', TwitterSheet)
])
tm_sheet_models = OrderedDict([
('Print', NewspaperSheet),
('Radio', RadioSheet),
('Television', TelevisionSheet)
])
dm_sheet_models = OrderedDict([
('Internet', InternetNewsSheet),
('Twitter', TwitterSheet)
])
person_models = OrderedDict([
('Print', NewspaperPerson),
('Radio', RadioPerson),
('Television', TelevisionPerson),
('Internet', InternetNewsPerson),
('Twitter', TwitterPerson)]
)
tm_person_models = OrderedDict([
('Print', NewspaperPerson),
('Radio', RadioPerson),
('Television', TelevisionPerson),
])
dm_person_models = OrderedDict([
('Internet', InternetNewsPerson),
('Twitter', TwitterPerson)
])
journalist_models = OrderedDict([
('Print', NewspaperJournalist),
('Radio', RadioJournalist),
('Television', TelevisionJournalist),
('Internet', InternetNewsJournalist),
('Twitter', TwitterJournalist)
])
tm_journalist_models = OrderedDict([
('Print', NewspaperJournalist),
('Radio', RadioJournalist),
('Television', TelevisionJournalist),
])
broadcast_journalist_models = OrderedDict([
('Radio', RadioJournalist),
('Television', TelevisionJournalist),
])
dm_journalist_models = OrderedDict([
('Internet', InternetNewsJournalist),
('Twitter', TwitterJournalist)
])
all_models = OrderedDict([
('Sheets', sheet_models),
('Sources', person_models),
('Reporters', journalist_models)
])
| 21,532 | 6,807 |