id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8062938 | _base_ = [
'../../configs/_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../../configs/_base_/datasets/coco_instance.py',
'../../configs/_base_/schedules/schedule_1x.py',
'../../configs/_base_/default_runtime.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages = -1,
init_cfg=None),
neck=dict(in_channels=[96, 192, 384, 768]),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1)))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=100)
dataset_type = 'CocoDataset'
data_root = 'solarppdetect/data/'
classes = ('module',)
# augmentation strategy originates from DETR / Sparse RCNN
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
_delete_=True,
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(
type='CocoDataset',
classes=('module',),
ann_file='via_project_25Mar2021_20h43m_coco.json',
data_root='solarppdetect/data/train2/',
pipeline=train_pipeline),
val=dict(
type='CocoDataset',
# explicitly add your class names to the field `classes`
classes=('module',),
ann_file='via_project_16Mar2021_10h16m_coco.json',
data_root='solarppdetect/data/test/',
pipeline=test_pipeline),
test=dict(
type='CocoDataset',
# explicitly add your class names to the field `classes`
classes=('module',),
ann_file='solarppdetect/data/test/via_project_16Mar2021_10h16m_coco.json',
data_root='solarppdetect/data/test/',
pipeline=test_pipeline)
) | StarcoderdataPython |
9772191 | <filename>python/emnes/apu/square_wave.py
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# See LICENSE at the root of this project for more info.
class SquareWave:
DUTY_WAVEFORM = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 1, 1],
]
__slots__ = (
"_waveform_period_low",
"_waveform_period_high",
"_duty_cycle_index",
"_duty_step",
"_waveform_counter",
)
def __init__(self):
self._waveform_period_low = 0
self._waveform_period_high = 0
self._waveform_counter = 0
self._duty_cycle_index = 0
self._duty_step = 0
@property
def waveform_period(self):
# The period is always combined from the two values.
# If one wanted to implement a vibrato, they would modify
# the timer low bits only so that the period can oscillate
# without resetting the _waveform_counter, since that
# would restart the cycle counting until the next duty
# step.
return self._waveform_period_low | self._waveform_period_high
@property
def duty_cycle_index(self):
return self._duty_cycle_index
@duty_cycle_index.setter
def duty_cycle_index(self, index):
self._duty_cycle_index = index
@property
def output(self):
return self.DUTY_WAVEFORM[self._duty_cycle_index][self._duty_step]
def update_period(self, low=None, high=None, duty=None):
if low is not None:
self._waveform_period_low = low
if high is not None:
self._waveform_period_high = high
if duty is not None:
self._duty_step = duty
def emulate(self):
self._waveform_counter -= 1
if self._waveform_counter < 0:
# This counter is clocked at every second CPU cycle, but the
# emulate method is called on every cycle, so we'll double
# the counter.
self._waveform_counter = (self.waveform_period + 1) * 2
self._duty_step = (self._duty_step - 1) % 8
| StarcoderdataPython |
11212073 | from django.test import TestCase
from contracts.mommy_recipes import get_contract_recipe
from itertools import cycle
class ContractTestCase(TestCase):
def test_readable_business_size(self):
business_sizes = ('O', 'S')
contract1, contract2 = get_contract_recipe().make(_quantity=2, business_size=cycle(business_sizes))
self.assertEqual(contract1.get_readable_business_size(), 'other than small business')
self.assertEqual(contract2.get_readable_business_size(), 'small business')
def test_get_education_code(self):
c = get_contract_recipe().make()
self.assertEqual(c.get_education_code('Bachelors'), 'BA')
self.assertIsNone(c.get_education_code('Nursing'), None)
def test_normalize_rate(self):
c = get_contract_recipe().make()
self.assertEqual(c.normalize_rate('$1,000.00,'), 1000.0)
| StarcoderdataPython |
9652077 | <reponame>girder/slicer_package_manager
# -*- coding: utf-8 -*-
from girder_client import GirderClient
if __name__ == "__main__":
login = 'admin'
password = '<PASSWORD>'
gc = GirderClient(apiUrl='http://localhost:8080/api/v1')
# Create an admin user & authenticate
gc.createUser(
login=login,
email='<EMAIL>',
firstName='admin',
lastName='admin',
password=password,
admin=True
)
gc.authenticate(username=login, password=password)
# Create an assetstore
gc.post('assetstore', parameters={
'name': 'TestAssetstore',
'type': 0,
'root': '/home/circleci/project/assetstore'
})
# Enable the 'slicer_package_manager' plugin
gc.put('system/plugins', parameters={
"plugins": '["slicer_package_manager"]'
})
# Restart the server
gc.put('system/restart')
| StarcoderdataPython |
1776019 | from agent_synth_game import AgentSynthGame
from adviser_framework import AdviserFramework
from models.corridor import corridor_directions_mdp
from models.misc import *
from models.office_generator import *
def running_example():
agents = [AgentSynthGame(mdp=corridor_directions_mdp(r_id='A', init_state='end_l_fr'),
formula='F(era) & G!(crita & critb)'),
AgentSynthGame(mdp=corridor_directions_mdp(r_id='B', init_state='end_r_fl'),
formula='F(elb) & G!(critb & crita)')]
framework = AdviserFramework(agents)
return framework.complete_strategy_synthesis('results/running_example.p', verbose=True)
def scalable_running_example(n_agents):
assert n_agents > 1
agents = []
for n in range(n_agents):
ltlf = 'F er%i & ' % n
ltlf += 'G!('
for m in range(n_agents):
if m == n:
continue
ltlf += '(crit%i & crit%i) | ' % (n, m)
ltlf = ltlf[:-3] + ' )'
print(ltlf)
agents.append(AgentSynthGame(mdp=corridor_directions_mdp(r_id='%i' % n, init_state='end_l_fr'), formula=ltlf))
framework = AdviserFramework(agents)
return framework.complete_strategy_synthesis('results/scalable_running_example_%i.p' % n_agents, verbose=True)
def office_10x5(n_agents, n_bins):
agents = []
for n in range(n_agents):
ltlf = ''
for i in range(n_bins):
ltlf += '(F bin%i%i) & ' % (i, n)
ltlf = ltlf[:-3]
agents.append(AgentSynthGame(mdp=office_5x10_mdp(r_id='%i' % n, n_bins=n_bins), formula=ltlf))
framework = AdviserFramework(agents)
return framework.complete_strategy_synthesis('results/office_10x5_%i_%i.p' % (n_agents, n_bins), verbose=True)
def office_safe_spillage_10x5(n_bin_agents, n_bins, n_clean_agents):
assert n_clean_agents <= 5, 'office layout only allows for max 5 cleaning agents.'
agents = []
# BIN AGENTS
for n in range(n_bin_agents):
ltlf = ''
for i in range(n_bins):
ltlf += '(F bin%i%i) & ' % (i, n)
ltlf = ltlf[:-3]
agents.append(AgentSynthGame(mdp=office_spillage_5x10_mdp(r_id='%i' % n, n_bins=n_bins, is_bin=True, n_cleaners=n_clean_agents), formula=ltlf))
# SPILLAGE AGENTS
for n in range(n_bin_agents, n_bin_agents+n_clean_agents):
ltlf = '(F off%i%i) & ' % (n - n_bin_agents, n)
for i in range(n_bin_agents):
ltlf += '(G !off%i%i) & ' % (n - n_bin_agents, i)
ltlf = ltlf[:-3]
agents.append(AgentSynthGame(mdp=office_spillage_5x10_mdp(r_id='%i' % n, n_bins=n_bins, is_bin=False, n_cleaners=n_clean_agents), formula=ltlf))
framework = AdviserFramework(agents)
return framework.complete_strategy_synthesis('results/office_spillage_10x5_%i_%i_%i.p' % (n_bin_agents, n_bins, n_clean_agents), verbose=True)
def office_fair_spillage_10x5(n_bin_agents, n_bins, n_clean_agents):
assert n_clean_agents <= 5, 'office layout only allows for max 5 cleaning agents.'
agents = []
# BIN AGENTS
for n in range(n_bin_agents):
ltlf = ''
for i in range(n_bins):
ltlf += '(F bin%i%i) & ' % (i, n)
ltlf = ltlf[:-3]
agents.append(AgentSynthGame(mdp=office_spillage_5x10_mdp(r_id='%i' % n, n_bins=n_bins, is_bin=True, n_cleaners=n_clean_agents), formula=ltlf))
# SPILLAGE AGENTS
for n in range(n_bin_agents, n_bin_agents+n_clean_agents):
ltlf = '(F (off%i%i &' % (n - n_bin_agents, n)
for i in range(n_bin_agents):
ltlf += '!off%i%i & ' % (n - n_bin_agents, i)
ltlf = ltlf[:-3] + '))'
agents.append(AgentSynthGame(mdp=office_spillage_5x10_mdp(r_id='%i' % n, n_bins=n_bins, is_bin=False, n_cleaners=n_clean_agents), formula=ltlf))
framework = AdviserFramework(agents)
return framework.complete_strategy_synthesis('results/office_spillage_10x5_%i_%i_%i.p' % (n_bin_agents, n_bins, n_clean_agents), verbose=False)
def office_crit_10x5(n_agents, n_doors):
agents = []
for n in range(n_agents):
ltlf = 'F bin%i & ' % n
for d in range(n_doors):
ltlf += 'G!('
for m in range(n_agents):
if m == n:
continue
ltlf += '(door%i%i & door%i%i) & ' % (d, n, d, m)
ltlf = ltlf[:-3] + ') & '
ltlf = ltlf[:-3]
agents.append(AgentSynthGame(mdp=office_critical_doors_5x10_mdp(r_id='%i' % n, n_doors=n_doors), formula=ltlf))
framework = AdviserFramework(agents)
return framework.complete_strategy_synthesis('results/office_10x5_%i_%i.p' % (n_agents, n_doors), verbose=True)
def office_crit_5x5(n_agents, n_doors):
agents = []
for n in range(n_agents):
ltlf = 'F bin%i & ' % n
for d in range(n_doors):
ltlf += 'G!('
for m in range(n_agents):
if m == n:
continue
ltlf += '(door%i%i & door%i%i) & ' % (d, n, d, m)
ltlf = ltlf[:-3] + ') & '
ltlf = ltlf[:-3]
agents.append(AgentSynthGame(mdp=office_critical_doors_5x5_mdp(r_id='%i' % n, n_doors=n_doors), formula=ltlf))
framework = AdviserFramework(agents)
return framework.complete_strategy_synthesis('results/office_10x5_%i_%i.p' % (n_agents, n_doors), verbose=True)
def switch_test():
agents = [
AgentSynthGame(mdp=switch_mdp('0', 'off'), formula='G !on0'),
AgentSynthGame(mdp=switch_mdp('1', 'off'), formula='G !on0')
]
framework = AdviserFramework(agents)
return framework.complete_strategy_synthesis('results/switch_test.p', verbose=True)
if __name__ == '__main__':
running_example()
| StarcoderdataPython |
3356733 | <gh_stars>1-10
"""Multipurpose module that contains important functions ranging from managing parameters of
generated galaxies to extracting information from relevant files.
"""
import csv
import math
import numpy as np
from .. import defaults
def read_results(project_path, g_parameters, fish):
orig_image = fish.image
mins = defaults.get_minimums(g_parameters, orig_image)
maxs = defaults.get_maximums(g_parameters, orig_image)
residuals = {}
pulls = {}
redchis = [] # list containing values of reduced chi2 for each fit.
results_dir = project_path.joinpath(defaults.RESULTS_DIR)
# read results from results_dir's files.
for fit_file in results_dir.iterdir():
with open(fit_file) as csvfile:
reader = csv.DictReader(csvfile)
for i, row in enumerate(reader):
redchis.append(float(row['redchi']))
for param in g_parameters.fit_params:
if param not in residuals:
residuals[param] = []
if param not in pulls:
pulls[param] = []
residual = (float(row[param]) -
float(g_parameters.params[param]))
pull = (residual /
math.sqrt(fish.covariance_matrix[param, param]))
residuals[param].append(residual)
pulls[param].append(pull)
biases = {param: np.mean(residuals[param]) for param in residuals}
pull_means = {param: np.mean(pulls[param]) for param in residuals}
res_stds = {param: np.std(residuals[param]) for param in residuals}
pull_mins = {param: ((mins[param] - float(g_parameters.params[param])) /
math.sqrt(fish.covariance_matrix[param, param])) for
param in residuals}
pull_maxs = {param: ((maxs[param] - float(g_parameters.params[param])) /
math.sqrt(fish.covariance_matrix[param, param])) for
param in residuals}
return pulls, residuals, biases, pull_means, res_stds, pull_mins, pull_maxs, redchis
| StarcoderdataPython |
5032137 | """
Tests for the Django management commands.
"""
from django.core import management
from django.test import TestCase
try:
# No name 'timezone' in module 'django.utils'
# pylint: disable=E0611
from django.utils import timezone as datetime
except ImportError:
from datetime import datetime
from mock import patch
from async import schedule
from async.api import health, deschedule
from async.models import Job
# Using the global statement
# pylint: disable = W0603
ORDER = None
def _dummy(order=None, error=None):
"""Basic dummy function we can use to test the queue execution.
"""
if order:
ORDER.append(order)
if error:
raise Exception(error)
class TestFlushQueue(TestCase):
"""Test the flush_queue management command.
"""
def test_empty_queue(self):
"""Make sure we don't get any errors if the queue is empty.
"""
management.call_command('flush_queue')
self.assertEqual(Job.objects.all().count(), 0)
def test_asap_tasks(self):
"""Make sure that tasks scheduled for immediate execution
are run.
"""
schedule(_dummy)
self.assertEqual(Job.objects.filter(executed=None).count(), 1)
management.call_command('flush_queue')
self.assertEqual(Job.objects.filter(executed=None).count(), 0)
def test_queue_fails_on_error(self):
"""Make sure that the queue flushing stops on the first error.
"""
schedule(_dummy, kwargs={'error': "Error"})
schedule(_dummy)
self.assertEqual(Job.objects.filter(executed=None).count(), 2)
with self.assertRaises(Exception):
management.call_command('flush_queue')
self.assertEqual(Job.objects.filter(executed=None).count(), 2)
management.call_command('flush_queue')
self.assertEqual(Job.objects.filter(executed=None).count(), 1)
def test_scheduled_runs_first_when_added_first(self):
"""Make sure that the scheduled job is always run first.
"""
global ORDER
ORDER = []
schedule(_dummy, args=[1], run_after=datetime.now())
schedule(_dummy, args=[2])
management.call_command('flush_queue')
self.assertEqual(ORDER, [1, 2])
def test_scheduled_runs_first_when_added_last(self):
"""Make sure that the scheduled job is always run first.
"""
global ORDER
ORDER = []
schedule(_dummy, args=[2])
schedule(_dummy, args=[1], run_after=datetime.now())
management.call_command('flush_queue')
self.assertEqual(ORDER, [1, 2])
def test_scheduled_runs_last_when_has_higher_priority(self):
"""The lowest priority scheduled job runs before the highest
priority non-scheduled job.
"""
global ORDER
ORDER = []
schedule(_dummy, args=[1], priority=5)
schedule(_dummy, args=[2], priority=1, run_after=datetime.now())
management.call_command('flush_queue')
self.assertEqual(ORDER, [1, 2])
def test_flush_queue_with_jobs_limit(self):
"""Make sure that the number of job run is the same
as the input jobs limit.
"""
for _ in range(5):
schedule(_dummy)
management.call_command('flush_queue', jobs=2)
self.assertEqual(Job.objects.filter(executed=None).count(), 3)
def test_flush_queue_without_jobs_limit_limit_at_300_by_default(self):
"""Make sure that the number of job run by default is 300.
"""
for _ in range(305):
schedule(_dummy)
management.call_command('flush_queue')
self.assertEqual(Job.objects.filter(executed=None).count(), 5)
def test_flush_queue_with_cancelled_jobs__should_not_be_executed(self):
"""Make sure that the number of job run by default is 300.
"""
for _ in range(5):
job = schedule(_dummy)
deschedule(job.name)
management.call_command('flush_queue')
self.assertEqual(Job.objects.filter(executed=None).count(), 5)
self.assertEqual(Job.objects.filter(cancelled=None).count(), 0)
class TestHealth(TestCase):
"""Make sure the health command runs without any errors.
"""
def test_health(self):
"""Excecute command.
"""
print( health())
print( management.call_command('queue_health'))
with patch(
'async.management.commands.queue_health.dumps',
lambda x: self.assertEqual(x, health())):
management.call_command('queue_health')
def test_health_with_options(self):
"""Excecute command.
"""
with patch(
'async.management.commands.queue_health.dumps',
lambda x: self.assertEqual(x, health())):
management.call_command('queue_health', algorithm='rough')
self.assertRaises(BaseException, management.call_command,
'queue_health', algorithm='hello')
| StarcoderdataPython |
4876537 | from pytest import fixture
from ..hiff_star import HiffStar
__SAMPLES = [
([1] * 24, 36),
([0] * 24, 36)
]
@fixture
def hiff_star() -> HiffStar:
return HiffStar(blocks=[8, 8, 8])
def test_fitness(hiff_star: HiffStar, helpers):
helpers.check_samples(__SAMPLES, hiff_star)
| StarcoderdataPython |
9775280 | <filename>src/tweet-pipeline.py
"""
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import argparse
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.io.gcp.internal.clients import bigquery
def frequency_schema(field_name):
return {
'fields': [{
'name': field_name, 'type': 'STRING', 'mode': 'NULLABLE'
}, {
'name': 'count', 'type': 'NUMERIC', 'mode': 'NULLABLE'
}]
}
class RemoveUrls(beam.DoFn):
def process(self, tweet):
pattern = 'http\S+'
tweet['song'] = re.sub(pattern, '', tweet['song'], flags=re.IGNORECASE)
tweet['artist'] = re.sub(pattern, '', tweet['artist'], flags=re.IGNORECASE)
yield tweet
class RemoveHashtags(beam.DoFn):
def process(self, tweet):
pattern = '#\S+'
tweet['song'] = re.sub(pattern, '', tweet['song'])
yield tweet
class RemoveSource(beam.DoFn):
def process(self, tweet):
pattern = '(on|at) (#|@)\S+'
tweet['song'] = re.sub(pattern, '', tweet['song'], flags=re.IGNORECASE)
tweet['artist'] = re.sub(pattern, '', tweet['artist'], flags=re.IGNORECASE)
yield tweet
class RemoveExtraChars(beam.DoFn):
def process(self, tweet):
extra_chars = ['"', ':', ';', '|', '“', '”', '`', '~', '(', ')', '[', ']', '♫',]
for char in extra_chars:
tweet['artist'] = tweet['artist'].replace(char, '')
tweet['song'] = tweet['song'].replace(char, '')
tweet['artist'] = tweet['artist'].strip()
tweet['song'] = tweet['song'].strip()
yield tweet
class RemoveStopPatterns(beam.DoFn):
def process(self, tweet):
stop_patterns = [
'Listen to this track and more',
'via',
'official video',
'tune in',
'listen live',
'listen now',
'♫ at',
'Listen:',
'#listen',
'\(Official Music Video\)',
'\(Official Lyric Video\)',
'\(Official Audio\)',
]
for pattern in stop_patterns:
tweet['song'] = re.sub(pattern, '', tweet['song'], flags=re.IGNORECASE)
tweet['artist'] = re.sub(pattern, '', tweet['artist'], flags=re.IGNORECASE)
yield tweet
class ParseTweet(beam.DoFn):
def process(self, text):
tweet = {}
split_ts_from_tweet = re.split('#nowplaying', text, flags=re.IGNORECASE)
tweet['date_time'] = split_ts_from_tweet[0].strip()
tweet['raw_tweet'] = split_ts_from_tweet[1].strip()
tweet['song'] = ''
tweet['artist'] = ''
if re.search('by', tweet['raw_tweet'], flags=re.IGNORECASE):
split_tweet = re.split('by', tweet['raw_tweet'], flags=re.IGNORECASE)
tweet['song'] = split_tweet[0].strip()
tweet['artist'] = split_tweet[1].strip()
elif '-' in tweet['raw_tweet']:
split_tweet = tweet['raw_tweet'].split('-')
tweet['song'] = split_tweet[1].strip()
tweet['artist'] = split_tweet[0].strip()
yield tweet
class ValidateTweet(beam.DoFn):
OUTPUT_TAG_REJECTS = 'tag_tweet_rejects'
PATTERN = re.compile('^(\w{3,}\s+\d{2}\s+\d{2}:\d{2}:\d{2})\s+#nowplaying\s+(?P<s1>.*)\s+(by|-)\s+(?P<s2>.*)', re.IGNORECASE)
def process(self, tweet):
if (self.PATTERN.match(tweet)):
yield tweet
else:
yield pvalue.TaggedOutput(self.OUTPUT_TAG_REJECTS, tweet)
def run(argv=None, save_main_session=True):
"""Main entry point for the pipeline"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
help='Input file to process')
parser.add_argument(
'--output',
dest='output',
help='Destination for the output file')
parser.add_argument(
'--bqdataset',
dest='bqdataset',
help='BigQuery target dataset')
parser.add_argument(
'--bqproject',
dest='bqproject',
help='GCP bq project')
args, beam_args = parser.parse_known_args()
beam_options = PipelineOptions(beam_args)
beam_options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=beam_options) as p:
lines = p | ReadFromText(args.input)
filtered_tweets = (
lines
| beam.ParDo(ValidateTweet()).with_outputs(
ValidateTweet.OUTPUT_TAG_REJECTS, main='tweets'
))
(filtered_tweets[ValidateTweet.OUTPUT_TAG_REJECTS] | 'Write rejects' >> WriteToText(args.output))
songs_and_artists = (
filtered_tweets['tweets']
| 'Parse Tweet' >> beam.ParDo(ParseTweet())
| 'Remove URLs' >> beam.ParDo(RemoveUrls())
| 'Remove Source' >> beam.ParDo(RemoveSource())
| 'Remove Song Hashtags' >> beam.ParDo(RemoveHashtags())
| 'Remove stop patterns' >> beam.ParDo(RemoveStopPatterns())
| 'Remove extra chars' >> beam.ParDo(RemoveExtraChars())
)
song_frequency = (
songs_and_artists
| 'Extract song field' >> beam.Map(lambda tweet: (tweet['song'], 1))
| 'Group and sum songs' >> beam.CombinePerKey(sum)
| 'Map songs to dict' >> beam.Map(lambda element: {'song': element[0], 'count': element [1]})
)
artist_frequency = (
songs_and_artists
| 'Extract artist field' >> beam.Map(lambda tweet: (tweet['artist'], 1))
| 'Group and sum artists' >> beam.CombinePerKey(sum)
| 'Map artists to dict' >> beam.Map(lambda element: {'artist': element[0], 'count': element [1]})
)
freq_schema_song = frequency_schema('song')
(song_frequency
| 'Write song frequency to BQ' >> beam.io.WriteToBigQuery(
bigquery.TableReference(projectId=args.bqproject, datasetId=args.bqdataset, tableId='songs'),
schema=freq_schema_song,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED))
freq_schema_artist = frequency_schema('artist')
(artist_frequency
| 'Write artist frequency to BQ' >> beam.io.WriteToBigQuery(
bigquery.TableReference(projectId=args.bqproject, datasetId=args.bqdataset, tableId='artists'),
schema=freq_schema_artist,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED))
if __name__ == '__main__':
run()
| StarcoderdataPython |
9613234 | <gh_stars>1-10
#!/usr/bin/python
# -* coding: utf-8 *-
import time
import serial
import os, sys
class USBtin(object):
ser = None
hw_ver = None
fw_ver = None
def __init__(self, port):
# configure the serial connections (the parameters differs on the device you are connecting to)
self.ser = serial.Serial(
port='/dev/ttyACM0',
baudrate=19200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
self.ser.open()
if not self.ser.isOpen():
print 'Fehler!'
# clear state, close connection
self.ser.write("\rC\r")
time.sleep(0.1)
self.ser.flushInput()
self.ser.flushOutput()
self.command("C")
# reset complete
def getVersions(self):
# debug
self.fw_ver = self.command("v")
self.hw_ver = self.command("V")
#print 'HW: %s, FW: %s' % (self.hw_ver, self.fw_ver)
def openCAN(self, speed=10000, mode='LISTENONLY'):
speedChar = '0'
if speed == 10000: speedChar = '0'
elif speed == 20000: speedChar = '1'
elif speed == 50000: speedChar = '2'
elif speed == 100000: speedChar = '3'
elif speed == 125000: speedChar = '4'
elif speed == 250000: speedChar = '5'
elif speed == 500000: speedChar = '6'
elif speed == 800000: speedChar = '7'
elif speed == 1000000: speedChar = '8'
# open connection
# Speed 10 kbaud
reply = self.command("S%s" % speedChar)
# LISTENONLY
reply = self.command("L")
def closeCAN(self):
self.ser.flushInput()
self.ser.flushOutput()
self.command("C")
time.sleep(0.1)
self.ser.flushInput()
self.ser.flushOutput()
self.command("C")
def readline(self):
reply = ''
c = ''
while c != '\r':
c = self.ser.read(1)
if c and c != '\r':
reply = reply + c
if c == '\x07':
print 'BELL'
break
return reply
def command(self, cmd):
if not cmd.endswith('\r'):
cmd = cmd + '\r'
self.ser.write(cmd)
reply = self.readline()
print 'DEBUG: reply to %s was %s' % (cmd[:-1], ['%s ' % hex(ord(c)) for c in reply])
return reply
| StarcoderdataPython |
1626702 | <gh_stars>10-100
import spacy
import pytest
INIT_LOOKUPS_CONFIG = {
"@misc": "spacy.LookupsDataLoader.v1",
"lang": "${nlp.lang}",
"tables": ["lexeme_norm"],
}
@pytest.fixture(scope="session")
def ca_lookup_nlp():
nlp = spacy.blank("ca")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def cs_nlp():
nlp = spacy.blank("cs")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def da_nlp():
nlp = spacy.blank("da")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def de_nlp():
nlp = spacy.blank("de")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def en_nlp():
nlp = spacy.blank("en")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def en_lookup_nlp():
nlp = spacy.blank("en")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def fr_lookup_nlp():
nlp = spacy.blank("fr")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def grc_nlp():
nlp = spacy.blank("grc")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def hr_nlp():
nlp = spacy.blank("hr")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def it_lookup_nlp():
nlp = spacy.blank("it")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def ga_pos_lookup_nlp():
nlp = spacy.blank("ga")
nlp.add_pipe("lemmatizer", config={"mode": "pos_lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def lb_nlp():
nlp = spacy.blank("lb")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def lt_nlp():
nlp = spacy.blank("lt")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def mk_lookup_nlp():
nlp = spacy.blank("mk")
nlp.config["initialize"]["lookups"] = INIT_LOOKUPS_CONFIG
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def nl_nlp():
nlp = spacy.blank("nl")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def nl_lookup_nlp():
nlp = spacy.blank("nl")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def ro_nlp():
nlp = spacy.blank("ro")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def ru_lookup_nlp():
nlp = spacy.blank("ru")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def sr_nlp():
nlp = spacy.blank("sr")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def sv_nlp():
nlp = spacy.blank("sv")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def sv_lookup_nlp():
nlp = spacy.blank("sv")
nlp.add_pipe("lemmatizer", config={"mode": "lookup"})
nlp.initialize()
return nlp
@pytest.fixture(scope="session")
def tr_nlp():
nlp = spacy.blank("tr")
nlp.add_pipe("lemmatizer")
nlp.initialize()
return nlp
| StarcoderdataPython |
3536961 | <gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'oip_ui.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(600, 300)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.header = QtWidgets.QWidget(Form)
self.header.setObjectName("header")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.header)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.header)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.myAddrLineEdit = QtWidgets.QLineEdit(self.header)
self.myAddrLineEdit.setObjectName("myAddrLineEdit")
self.horizontalLayout.addWidget(self.myAddrLineEdit)
self.setMyAddress = QtWidgets.QPushButton(self.header)
self.setMyAddress.setObjectName("setMyAddress")
self.horizontalLayout.addWidget(self.setMyAddress)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label_2 = QtWidgets.QLabel(self.header)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.devieAddrLineEdit = QtWidgets.QLineEdit(self.header)
self.devieAddrLineEdit.setObjectName("devieAddrLineEdit")
self.horizontalLayout.addWidget(self.devieAddrLineEdit)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.syncDIDPushButton = QtWidgets.QPushButton(self.header)
self.syncDIDPushButton.setObjectName("syncDIDPushButton")
self.horizontalLayout.addWidget(self.syncDIDPushButton)
self.verticalLayout.addWidget(self.header)
self.tabWidget = QtWidgets.QTabWidget(Form)
self.tabWidget.setObjectName("tabWidget")
self.basicTab = QtWidgets.QWidget()
self.basicTab.setObjectName("basicTab")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.basicTab)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.operationGroup = QtWidgets.QGroupBox(self.basicTab)
self.operationGroup.setObjectName("operationGroup")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.operationGroup)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.cmdComboBox = QtWidgets.QComboBox(self.operationGroup)
self.cmdComboBox.setObjectName("cmdComboBox")
self.horizontalLayout_4.addWidget(self.cmdComboBox)
self.typeComboBox = QtWidgets.QComboBox(self.operationGroup)
self.typeComboBox.setMinimumSize(QtCore.QSize(30, 0))
self.typeComboBox.setObjectName("typeComboBox")
self.horizontalLayout_4.addWidget(self.typeComboBox)
self.didLabel = QtWidgets.QLabel(self.operationGroup)
self.didLabel.setObjectName("didLabel")
self.horizontalLayout_4.addWidget(self.didLabel)
self.didcomboBox = QtWidgets.QComboBox(self.operationGroup)
self.didcomboBox.setEnabled(True)
self.didcomboBox.setMinimumSize(QtCore.QSize(100, 0))
self.didcomboBox.setMaximumSize(QtCore.QSize(200, 16777215))
self.didcomboBox.setEditable(True)
self.didcomboBox.setObjectName("didcomboBox")
self.horizontalLayout_4.addWidget(self.didcomboBox)
self.sendPushButton = QtWidgets.QPushButton(self.operationGroup)
self.sendPushButton.setMaximumSize(QtCore.QSize(80, 16777215))
self.sendPushButton.setObjectName("sendPushButton")
self.horizontalLayout_4.addWidget(self.sendPushButton)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.verticalLayout_2.addWidget(self.operationGroup)
self.dataGroup = QtWidgets.QGroupBox(self.basicTab)
self.dataGroup.setMinimumSize(QtCore.QSize(0, 80))
self.dataGroup.setObjectName("dataGroup")
self.verticalLayout_2.addWidget(self.dataGroup)
self.tabWidget.addTab(self.basicTab, "")
self.imageTab = QtWidgets.QWidget()
self.imageTab.setObjectName("imageTab")
self.layoutWidget = QtWidgets.QWidget(self.imageTab)
self.layoutWidget.setGeometry(QtCore.QRect(10, 0, 403, 242))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.imageLabel = QtWidgets.QLabel(self.layoutWidget)
self.imageLabel.setMinimumSize(QtCore.QSize(320, 240))
self.imageLabel.setStyleSheet("background:rgb(0, 255, 0)")
self.imageLabel.setObjectName("imageLabel")
self.horizontalLayout_2.addWidget(self.imageLabel)
self.readImagePushButton = QtWidgets.QPushButton(self.layoutWidget)
self.readImagePushButton.setObjectName("readImagePushButton")
self.horizontalLayout_2.addWidget(self.readImagePushButton)
self.tabWidget.addTab(self.imageTab, "")
self.updateTab = QtWidgets.QWidget()
self.updateTab.setObjectName("updateTab")
self.tabWidget.addTab(self.updateTab, "")
self.verticalLayout.addWidget(self.tabWidget)
self.retranslateUi(Form)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "抄控器地址"))
self.myAddrLineEdit.setText(_translate("Form", "1"))
self.setMyAddress.setText(_translate("Form", "设置"))
self.label_2.setText(_translate("Form", "设备地址"))
self.syncDIDPushButton.setText(_translate("Form", "刷新did列表"))
self.operationGroup.setTitle(_translate("Form", "操作"))
self.didLabel.setText(_translate("Form", "DID"))
self.sendPushButton.setText(_translate("Form", "发送"))
self.dataGroup.setTitle(_translate("Form", "回复"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.basicTab), _translate("Form", "基本配置"))
self.imageLabel.setText(_translate("Form", "TextLabel"))
self.readImagePushButton.setText(_translate("Form", "readImage"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.imageTab), _translate("Form", "图像读取"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.updateTab), _translate("Form", "升级"))
| StarcoderdataPython |
4829764 | <reponame>joedomino874/hummingbot<gh_stars>1-10
#!/usr/bin/env python
from collections import namedtuple
import logging
import time
import aiohttp
import asyncio
import ujson
import pandas as pd
from typing import (
Any,
AsyncIterable,
Dict,
List,
Optional,
)
import websockets
from websockets.exceptions import ConnectionClosed
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_row import OrderBookRow
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.data_type.order_book_tracker_entry import (
OrderBookTrackerEntry
)
from hummingbot.core.data_type.order_book_message import (
OrderBookMessage,
OrderBookMessageType,
)
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.bitfinex import (
BITFINEX_REST_URL,
BITFINEX_WS_URI,
ContentEventType,
)
from hummingbot.connector.exchange.bitfinex.bitfinex_utils import (
join_paths,
convert_to_exchange_trading_pair,
convert_from_exchange_trading_pair,
)
from hummingbot.connector.exchange.bitfinex.bitfinex_active_order_tracker import BitfinexActiveOrderTracker
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book import BitfinexOrderBook
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_message import \
BitfinexOrderBookMessage
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_tracker_entry import \
BitfinexOrderBookTrackerEntry
BOOK_RET_TYPE = List[Dict[str, Any]]
RESPONSE_SUCCESS = 200
NaN = float("nan")
MAIN_FIAT = ("USD", "USDC", "USDS", "DAI", "PAX", "TUSD", "USDT")
Ticker = namedtuple(
"Ticker",
"bid bid_size ask ask_size daily_change daily_change_percent last_price volume high low"
)
BookStructure = namedtuple("Book", "price count amount")
TradeStructure = namedtuple("Trade", "id mts amount price")
# n0-n9 no documented, we dont' know, maybe later market write docs
ConfStructure = namedtuple("Conf", "n0 n1 n2 min max n5 n6 n7 n8 n9")
class BitfinexAPIOrderBookDataSource(OrderBookTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
STEP_TIME_SLEEP = 1.0
REQUEST_TTL = 60 * 30
TIME_SLEEP_BETWEEN_REQUESTS = 5.0
CACHE_SIZE = 1
SNAPSHOT_LIMIT_SIZE = 100
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, trading_pairs: Optional[List[str]] = None):
super().__init__(trading_pairs)
self._trading_pairs: Optional[List[str]] = trading_pairs
# Dictionary that maps Order IDs to book enties (i.e. price, amount, and update_id the
# way it is stored in Hummingbot order book, usually timestamp)
self._tracked_book_entries: Dict[int, OrderBookRow] = {}
@staticmethod
async def fetch_trading_pairs() -> List[str]:
try:
async with aiohttp.ClientSession() as client:
async with client.get("https://api-pub.bitfinex.com/v2/conf/pub:list:pair:exchange", timeout=10) as response:
if response.status == 200:
data = await response.json()
trading_pair_list: List[str] = []
for trading_pair in data[0]:
# change the following line accordingly
converted_trading_pair: Optional[str] = \
convert_from_exchange_trading_pair(trading_pair)
if converted_trading_pair is not None:
trading_pair_list.append(converted_trading_pair)
else:
logging.getLogger(__name__).info(f"Could not parse the trading pair "
f"{trading_pair}, skipping it...")
return trading_pair_list
except Exception:
# Do nothing if the request fails -- there will be no autocomplete available
pass
return []
@staticmethod
def _convert_volume(raw_prices: Dict[str, Any]) -> BOOK_RET_TYPE:
converters = {}
prices = []
for price in [v for v in raw_prices.values() if v["quoteAsset"] in MAIN_FIAT]:
raw_symbol = f"{price['baseAsset']}-{price['quoteAsset']}"
symbol = f"{price['baseAsset']}{price['quoteAsset']}"
prices.append(
{
**price,
"symbol": symbol,
"USDVolume": price["volume"] * price["price"]
}
)
converters[price["baseAsset"]] = price["price"]
del raw_prices[raw_symbol]
for raw_symbol, item in raw_prices.items():
symbol = f"{item['baseAsset']}{item['quoteAsset']}"
if item["baseAsset"] in converters:
prices.append(
{
**item,
"symbol": symbol,
"USDVolume": item["volume"] * converters[item["baseAsset"]]
}
)
if item["quoteAsset"] not in converters:
converters[item["quoteAsset"]] = item["price"] / converters[item["baseAsset"]]
continue
if item["quoteAsset"] in converters:
prices.append(
{
**item,
"symbol": symbol,
"USDVolume": item["volume"] * item["price"] * converters[item["quoteAsset"]]
}
)
if item["baseAsset"] not in converters:
converters[item["baseAsset"]] = item["price"] * converters[item["quoteAsset"]]
continue
prices.append({
**item,
"symbol": symbol,
"volume": NaN})
return prices
@staticmethod
def _prepare_snapshot(pair: str, raw_snapshot: List[BookStructure]) -> Dict[str, Any]:
"""
Return structure of three elements:
symbol: traded pair symbol
bids: List of OrderBookRow for bids
asks: List of OrderBookRow for asks
"""
update_id = time.time()
bids = [OrderBookRow(i.price, i.amount, update_id) for i in raw_snapshot if i.amount > 0]
asks = [OrderBookRow(i.price, abs(i.amount), update_id) for i in raw_snapshot if i.amount < 0]
return {
"symbol": pair,
"bids": bids,
"asks": asks,
}
def _prepare_trade(self, raw_response: str) -> Optional[Dict[str, Any]]:
*_, content = ujson.loads(raw_response)
if content == ContentEventType.HEART_BEAT:
return None
try:
trade = TradeStructure(*content)
except Exception as err:
self.logger().error(err)
self.logger().error(raw_response)
else:
return {
"id": trade.id,
"mts": trade.mts,
"amount": trade.amount,
"price": trade.price,
}
async def _get_response(self, ws: websockets.WebSocketClientProtocol) -> AsyncIterable[str]:
try:
while True:
msg: str = await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT)
yield msg
except asyncio.TimeoutError:
self.logger().warning("WebSocket ping timed out. Going to reconnect...")
return
except ConnectionClosed:
return
finally:
await ws.close()
def _generate_delete_message(self, symbol: str, price: float, amount: str):
side_key = "bids" if amount == 1 else "asks"
timestamp = time.time()
msg = {
"symbol": symbol,
side_key: OrderBookRow(price, 0, timestamp), # 0 amount will force the order to be deleted
"update_id": time.time() # Assume every update is incremental
}
return BitfinexOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=msg,
timestamp=timestamp)
def _generate_add_message(self, symbol: str, price: float, amount: float):
side_key = "bids" if amount > 0 else "asks"
timestamp = time.time()
msg = {
"symbol": symbol,
side_key: OrderBookRow(price, abs(amount), timestamp),
"update_id": timestamp # Assume every update is incremental
}
return BitfinexOrderBookMessage(
message_type=OrderBookMessageType.DIFF,
content=msg,
timestamp=timestamp)
def _parse_raw_update(self, pair: str, raw_response: str) -> OrderBookMessage:
"""
Parses raw update, if price for a tracked order identified by ID is 0, then order is deleted
Returns OrderBookMessage
"""
*_, content = ujson.loads(raw_response)
if isinstance(content, list) and len(content) == 3:
price = content[0]
count = content[1]
amount = content[2]
if count > 0:
return self._generate_add_message(pair, price, amount)
else:
return self._generate_delete_message(pair, price, amount)
return None
@classmethod
async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, float]:
tasks = [cls.get_last_traded_price(t_pair) for t_pair in trading_pairs]
results = await safe_gather(*tasks)
return {t_pair: result for t_pair, result in zip(trading_pairs, results)}
@classmethod
async def get_last_traded_price(cls, trading_pair: str) -> float:
async with aiohttp.ClientSession() as client:
# https://api-pub.bitfinex.com/v2/ticker/tBTCUSD
ticker_url: str = join_paths(BITFINEX_REST_URL, f"ticker/{convert_to_exchange_trading_pair(trading_pair)}")
resp = await client.get(ticker_url)
resp_json = await resp.json()
ticker = Ticker(*resp_json)
return float(ticker.last_price)
async def get_trading_pairs(self) -> List[str]:
"""
Get a list of active trading pairs
(if the market class already specifies a list of trading pairs,
returns that list instead of all active trading pairs)
:returns: A list of trading pairs defined by the market class,
or all active trading pairs from the rest API
"""
if not self._trading_pairs:
try:
self._trading_pairs = await self.fetch_trading_pairs()
except Exception:
msg = "Error getting active exchange information. Check network connection."
self._trading_pairs = []
self.logger().network(
"Error getting active exchange information.",
exc_info=True,
app_warning_msg=msg
)
return self._trading_pairs
async def get_snapshot(self, client: aiohttp.ClientSession, trading_pair: str) -> Dict[str, Any]:
request_url: str = f"{BITFINEX_REST_URL}/book/{convert_to_exchange_trading_pair(trading_pair)}/P0"
# by default it's = 50, 25 asks + 25 bids.
# set 100: 100 asks + 100 bids
# Exchange only allow: 1, 25, 100 (((
params = {
"len": self.SNAPSHOT_LIMIT_SIZE
}
async with client.get(request_url, params=params) as response:
response: aiohttp.ClientResponse = response
if response.status != RESPONSE_SUCCESS:
raise IOError(f"Error fetching Bitfinex market snapshot for {trading_pair}. "
f"HTTP status is {response.status}.")
raw_data: Dict[str, Any] = await response.json()
return self._prepare_snapshot(trading_pair, [BookStructure(*i) for i in raw_data])
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
async with aiohttp.ClientSession() as client:
snapshot: Dict[str, any] = await self.get_snapshot(client, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BitfinexOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp
)
active_order_tracker: BitfinexActiveOrderTracker = BitfinexActiveOrderTracker()
bids, asks = active_order_tracker.convert_snapshot_message_to_order_book_row(snapshot_msg)
order_book = self.order_book_create_function()
order_book.apply_snapshot(bids, asks, snapshot_msg.update_id)
return order_book
async def get_tracking_pairs(self) -> Dict[str, OrderBookTrackerEntry]:
result: Dict[str, OrderBookTrackerEntry] = {}
trading_pairs: List[str] = await self.get_trading_pairs()
number_of_pairs: int = len(trading_pairs)
async with aiohttp.ClientSession() as client:
for idx, trading_pair in enumerate(trading_pairs):
try:
snapshot: Dict[str, Any] = await self.get_snapshot(client, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BitfinexOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp
)
order_book: OrderBook = self.order_book_create_function()
active_order_tracker: BitfinexActiveOrderTracker = BitfinexActiveOrderTracker()
order_book.apply_snapshot(
snapshot_msg.bids,
snapshot_msg.asks,
snapshot_msg.update_id
)
result[trading_pair] = BitfinexOrderBookTrackerEntry(
trading_pair, snapshot_timestamp, order_book, active_order_tracker
)
self.logger().info(
f"Initialized order book for {trading_pair}. "
f"{idx+1}/{number_of_pairs} completed."
)
await asyncio.sleep(self.STEP_TIME_SLEEP)
except IOError:
self.logger().network(
f"Error getting snapshot for {trading_pair}.",
exc_info=True,
app_warning_msg=f"Error getting snapshot for {trading_pair}. "
"Check network connection."
)
except Exception:
self.logger().error(
f"Error initializing order book for {trading_pair}. ",
exc_info=True
)
return result
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
trading_pairs: List[str] = await self.get_trading_pairs()
for trading_pair in trading_pairs:
async with websockets.connect(BITFINEX_WS_URI) as ws:
payload: Dict[str, Any] = {
"event": "subscribe",
"channel": "trades",
"symbol": convert_to_exchange_trading_pair(trading_pair),
}
await ws.send(ujson.dumps(payload))
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # response
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # subscribe info
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # snapshot
async for raw_msg in self._get_response(ws):
msg = self._prepare_trade(raw_msg)
if msg:
msg_book: OrderBookMessage = BitfinexOrderBook.trade_message_from_exchange(
msg,
metadata={"symbol": f"{trading_pair}"}
)
output.put_nowait(msg_book)
except Exception as err:
self.logger().error(err)
self.logger().network(
"Unexpected error with WebSocket connection.",
exc_info=True,
app_warning_msg="Unexpected error with WebSocket connection. "
f"Retrying in {int(self.MESSAGE_TIMEOUT)} seconds. "
"Check network connection."
)
await asyncio.sleep(5)
async def listen_for_order_book_diffs(self,
ev_loop: asyncio.BaseEventLoop,
output: asyncio.Queue):
while True:
try:
trading_pairs: List[str] = await self.get_trading_pairs()
for trading_pair in trading_pairs:
async with websockets.connect(BITFINEX_WS_URI) as ws:
payload: Dict[str, Any] = {
"event": "subscribe",
"channel": "book",
"prec": "P0",
"symbol": convert_to_exchange_trading_pair(trading_pair),
}
await ws.send(ujson.dumps(payload))
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # response
await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # subscribe info
raw_snapshot = await asyncio.wait_for(ws.recv(), timeout=self.MESSAGE_TIMEOUT) # snapshot
snapshot = self._prepare_snapshot(trading_pair, [BookStructure(*i) for i in ujson.loads(raw_snapshot)[1]])
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BitfinexOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp
)
output.put_nowait(snapshot_msg)
async for raw_msg in self._get_response(ws):
msg = self._parse_raw_update(trading_pair, raw_msg)
if msg is not None:
output.put_nowait(msg)
except Exception as err:
self.logger().error(err)
self.logger().network(
"Unexpected error with WebSocket connection.",
exc_info=True,
app_warning_msg="Unexpected error with WebSocket connection. "
f"Retrying in {int(self.MESSAGE_TIMEOUT)} seconds. "
"Check network connection."
)
await asyncio.sleep(5)
async def listen_for_order_book_snapshots(self,
ev_loop: asyncio.BaseEventLoop,
output: asyncio.Queue):
while True:
trading_pairs: List[str] = await self.get_trading_pairs()
try:
async with aiohttp.ClientSession() as client:
for trading_pair in trading_pairs:
try:
snapshot: Dict[str, Any] = await self.get_snapshot(client, trading_pair)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BitfinexOrderBook.snapshot_message_from_exchange(
snapshot,
snapshot_timestamp
)
output.put_nowait(snapshot_msg)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
await asyncio.sleep(self.TIME_SLEEP_BETWEEN_REQUESTS)
except asyncio.CancelledError:
raise
except Exception as err:
self.logger().error("Listening snapshots", err)
self.logger().network(
"Unexpected error with HTTP connection.",
exc_info=True,
app_warning_msg="Unexpected error with HTTP connection. "
f"Retrying in {self.TIME_SLEEP_BETWEEN_REQUESTS} sec."
"Check network connection."
)
await asyncio.sleep(self.TIME_SLEEP_BETWEEN_REQUESTS)
this_hour: pd.Timestamp = pd.Timestamp.utcnow().replace(
minute=0, second=0, microsecond=0
)
next_hour: pd.Timestamp = this_hour + pd.Timedelta(hours=1)
delta: float = next_hour.timestamp() - time.time()
await asyncio.sleep(delta)
except asyncio.CancelledError:
raise
except Exception as err:
self.logger().error("Listening snapshots", err)
self.logger().error("Unexpected error", exc_info=True)
await asyncio.sleep(self.TIME_SLEEP_BETWEEN_REQUESTS)
| StarcoderdataPython |
3340959 |
import json
import os
import numpy as np
import csv
if __name__ == '__main__':
original = "referenceSW.csv"
target = "convertannotationsSW.csv"
result = "convertannotationsSW1.csv"
alldata=[]
dataset_dir = 'F:\MaskRCNN\Mask_RCNN\myproject'
path = os.path.join(dataset_dir, original)
import csv
fileXsizePair = {}
with open(path, mode='r') as csv_file:
original_reader = csv.DictReader(csv_file)
for row in original_reader:
fileXsizePair[row['filename']]= row['file_size']
print(fileXsizePair)
path = os.path.join(dataset_dir, target)
temp = []
with open(path, mode='r') as csv_file:
target_reader = csv.DictReader(csv_file)
filenamestemp = target_reader.fieldnames
for row in target_reader:
name = row["filename"]
row["file_size"]=fileXsizePair[name]
temp.append(row)
path = os.path.join(dataset_dir, result)
with open(path, mode='w') as csv_file:
# print(filenamestemp)
filenamestemp = ['filename', 'file_size', 'file_attributes', 'region_count', 'region_id', 'region_shape_attributes','region_attributes']
writer = csv.DictWriter(csv_file, fieldnames=filenamestemp)
writer.writeheader()
for row in temp:
writer.writerow(row)
| StarcoderdataPython |
4876609 | from distutils.core import setup, Extension
import os, numpy
numpy_include = os.path.join(os.path.dirname(numpy.__file__), "core", "include", "numpy")
print(numpy_include)
module_dbr = Extension('dbr',
sources = ['dbr.c'],
include_dirs=[numpy_include],
libraries=['DynamsoftBarcodeReader'])
setup (name = 'DynamsoftBarcodeReader',
version = '1.0',
description = 'Python barcode extension',
ext_modules = [module_dbr])
| StarcoderdataPython |
8086896 | from .github import GithubFeed
| StarcoderdataPython |
122165 | from itertools import zip_longest
from typing import List, Tuple, Optional, Dict, Set
import requests
import hashlib
from .logger import logger
from .constants import REPO_PATH
import subprocess
import shlex
import tempfile
import textwrap
from pathlib import Path
from distutils.version import Version
def vercmp(v1: str, v2: str) -> int:
"""
Copyright 2016-2020 <NAME>
SPDX-License-Identifier: MIT
"""
def cmp(a: int, b: int) -> int:
return (a > b) - (a < b)
def split(v: str) -> Tuple[str, str, Optional[str]]:
if "~" in v:
e, v = v.split("~", 1)
else:
e, v = ("0", v)
r: Optional[str] = None
if "-" in v:
v, r = v.rsplit("-", 1)
else:
v, r = (v, None)
return (e, v, r)
digit, alpha, other = range(3)
def get_type(c: str) -> int:
assert c
if c.isdigit():
return digit
elif c.isalpha():
return alpha
else:
return other
def parse(v: str) -> List[Tuple[int, Optional[str]]]:
parts: List[Tuple[int, Optional[str]]] = []
seps = 0
current = ""
for c in v:
if get_type(c) == other:
if current:
parts.append((seps, current))
current = ""
seps += 1
else:
if not current:
current += c
else:
if get_type(c) == get_type(current):
current += c
else:
parts.append((seps, current))
current = c
parts.append((seps, current or None))
return parts
def rpmvercmp(v1: str, v2: str) -> int:
for (s1, p1), (s2, p2) in zip_longest(
parse(v1), parse(v2), fillvalue=(0, None)
):
if s1 is not None and s2 is not None:
ret = cmp(s1, s2)
if ret != 0:
return ret
if p1 is None and p2 is None:
return 0
if p1 is None:
if get_type(p2) == alpha:
return 1
return -1
elif p2 is None:
if get_type(p1) == alpha:
return -1
return 1
t1 = get_type(p1)
t2 = get_type(p2)
if t1 != t2:
if t1 == digit:
return 1
elif t2 == digit:
return -1
elif t1 == digit:
ret = cmp(int(p1), int(p2))
if ret != 0:
return ret
elif t1 == alpha:
ret = cmp(p1, p2)
if ret != 0:
return ret
return 0
e1, v1, r1 = split(v1)
e2, v2, r2 = split(v2)
ret = rpmvercmp(e1, e2)
if ret == 0:
ret = rpmvercmp(v1, v2)
if ret == 0 and r1 is not None and r2 is not None:
ret = rpmvercmp(r1, r2)
return ret
def version_is_newer_than(v1: str, v2: str) -> bool:
return vercmp(v1, v2) == 1
class VersionSort(Version):
"""VersionSort, compare two version using ``>`` and ``<``
using the previously defined version comparing functions.
This inherits from ``distutils.version.Version``, when it is
depreciated, compatibity code should be added.
This uses `version_is_newer_than` to check whether the version
passed here is newer than the other, to which comparing to.
This is helpful when using ``list.sort(key=VersionSort)``.
Examples
========
>>> a = VersionSort("1.0.0")
>>> b = VersionSort("2.0.0")
>>> a > b
False
>>> a < b
True
>>> c = ['1.0.0','1.1.0','1.0.1','1.0.2','1.0.3','1.1.3','1.1.2']
>>> c.sort(key=VersionSort)
>>> c
['1.0.0', '1.0.1', '1.0.2', '1.0.3', '1.1.0', '1.1.2', '1.1.3']
"""
def parse(self, vstring):
self.version = vstring
def __str__(self):
return self.versions
def _cmp(self, other):
if isinstance(other, str):
other = VersionSort(other)
elif not isinstance(other, VersionSort):
return NotImplemented
if version_is_newer_than(self.version, other.version):
return 1
return -1
def find_checksum_from_file(fname, hashtype, info):
path = get_repo_path(info)
hash = hashlib.new(hashtype)
with open(path / info["name"] / fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash.update(chunk)
return hash.hexdigest()
def find_checksum(url, hashtype):
logger.info("Finding checksum for URL: %s", url)
logger.info("Hash type: %s", hashtype)
con = requests.get(url)
con.raise_for_status()
file_hash = hashlib.new(hashtype)
file_hash.update(con.content)
return file_hash.hexdigest()
def get_repo_path(info):
return REPO_PATH / (info["repo"] + "-packages")
def run_command(command, cwd):
"""Runs a command using subprocess"""
k = shlex.split(command)
a = subprocess.Popen(
k,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=cwd,
)
stdout, stderr = a.communicate()
if stderr:
raise Exception(stderr.decode())
return stdout.decode()
class PKGBUILD:
"""An utility class to get Data from the
content of ``PKGBUILD`` provided.
Examples
--------
>>> a=PKGBUILD(open('./PKGBUILD').read())
>>> a.pkgrel
1
>>> a.pkgver
1.2.3
"""
def __init__(self, content) -> None:
self.content = content
def __getattr__(self, var):
att = self.get_variable_from_pkgbuild(var)
if not att:
raise AttributeError(f"No attribute {att} in PKGBUILD")
else:
return att
def check_variable_is_array(self, variable):
content = self.content
base = f"#!/bin/bash\n{content}\n"
base += f"declare -p {variable} 2> /dev/null | grep -q 'declare \-a' && echo 1 || echo 0\n"
with tempfile.TemporaryDirectory() as tmpdirname:
# tmpdirname = Path(".")
with open(Path(tmpdirname) / "var.sh", "w", encoding="utf-8") as f:
f.write(base)
out = run_command(
f"bash {Path(tmpdirname).as_posix()}/var.sh", cwd=tmpdirname
)
run_command(f"rm var.sh", cwd=tmpdirname)
return bool(int(out))
def get_variable_from_pkgbuild(self, variable):
content = self.content
base = f"#!/bin/bash\n{content}\n"
base += f"isarray=$(declare -p {variable} 2> /dev/null | grep -q 'declare \-a' && echo true || echo false)\n"
with tempfile.TemporaryDirectory() as tmpdirname:
# tmpdirname = Path(".")
tmpdirname = Path(tmpdirname)
with open(Path(tmpdirname) / "test.sh", "w", encoding="utf-8") as f:
f.write(
base
+ f"declare -n tempvar={variable}\n"
+ textwrap.dedent(
"""\
if [ $isarray=true ]
then
for i in "${!tempvar[@]}"; do
printf "${tempvar[i]}\\n"
done
else
printf "${variable}\\n"
fi
"""
)
)
out = run_command(
f"bash {Path(tmpdirname).as_posix()}/test.sh", cwd=tmpdirname
)
run_command(f"rm test.sh", cwd=tmpdirname)
out = out[:-1]
if self.check_variable_is_array(variable):
out = out.split("\n")
for i, j in enumerate(out):
if "::" in j:
out[i] = j.split("::")[1]
return out
return out | StarcoderdataPython |
5169880 | <reponame>specter119/py4cytoscape
# -*- coding: utf-8 -*-
"""Functions common to test suites.
"""
"""License:
Copyright 2020 The Cytoscape Consortium
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from py4cytoscape import *
import os
import functools
def __init__(self):
pass
def load_test_session(session_filename=None):
if session_filename: session_filename = session_filename
open_session(session_filename)
def load_test_network(network_name, make_current=True):
if make_current:
imported = import_network_from_file(network_name)
set_current_network(imported['networks'][0])
else:
try:
cur_suid = get_network_suid()
except:
cur_suid = None
imported = import_network_from_file(network_name)
if cur_suid: set_current_network(cur_suid)
return imported['networks'][0], imported['views'][0]
def test_select_nodes(node_list):
if len(node_list) == 0:
clear_selection(type='nodes')
else:
select_nodes(node_list, by_col='COMMON')
def clean_session_file(session_filename):
if os.path.isfile(session_filename): os.remove(session_filename)
def skip_for_ui():
return os.environ.get('PY4CYTOSCAPE_SKIP_UI_TESTS', 'FALSE').upper() == 'TRUE'
def show_test_progress():
return os.environ.get('PY4CYTOSCAPE_SHOW_TEST_PROGRESS', 'TRUE').upper() == 'TRUE'
# Decorators inspired by https://realpython.com/primer-on-python-decorators/
def print_entry_exit(func):
"""Print the function signature and return value"""
@functools.wraps(func)
def wrapper_entry_exit(*args, **kwargs):
if show_test_progress():
print(f"Into {func.__name__}()")
try:
value = func(*args, **kwargs)
print(f"Out of {func.__name__!r}")
return value
except Exception as e:
print(f"{func.__name__!r} exception {e!r}")
raise
else:
return func(*args, **kwargs)
return wrapper_entry_exit
| StarcoderdataPython |
4938421 | <filename>user-based-authorization/app.py
import os
import requests
from flask import Flask, request, redirect, session, url_for, render_template
from flask_bootstrap import Bootstrap
from requests_oauthlib import OAuth2Session
app = Flask(__name__)
Bootstrap(app)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'supersecretkey'
# Flag for development. Avoids errors if the OAuth redirect uri is not https.
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
client_id = 'YOUR_CLIENT_ID'
client_secret = 'YOUR_CLIENT_SECRET'
authorize_url = 'https://login.ingest.io/authorize'
token_url = 'https://login.ingest.io/token'
# -- functions --
def get_videos():
""" Returns a list of 'published' and 'scheduled' videos.
"""
with requests.Session() as sess:
sess.headers['Authorization'] = 'Bearer {}'.format(session['token'])
sess.headers['Accept'] = 'application/vnd.ingest.v1+json'
sess.params['status'] = 'published,scheduled'
resp = sess.get('https://api.ingest.io/videos')
if resp.ok:
return resp.json()
resp.raise_for_status()
def get_video(id):
""" Returns a video by ID
"""
with requests.Session() as sess:
sess.headers['Authorization'] = 'Bearer {}'.format(session['token'])
sess.headers['Accept'] = 'application/vnd.ingest.v1+json'
url = 'https://api.ingest.io/videos/{}'.format(id)
resp = sess.get(url)
if resp.ok:
return resp.json()
resp.raise_for_status()
def get_video_play_url(video):
""" Find playback_url for video target named 'high', in case there isn't
one, returns the first target on the list. Change this to the specific
target name you wish to play.
"""
if len(video['targets']) > 0:
target = next((target for target in video['targets']
if target['name'] == 'high'), video['targets'][0])
return target['playback_url']
else:
return ''
def logout_user():
""" Clears session and revokes user token.
"""
session.pop('logged_in', None)
with requests.Session() as sess:
sess.headers['Authorization'] = 'Bearer {}'.format(session['token'])
sess.headers['Accept'] = 'application/vnd.ingest.v1+json'
resp = sess.delete('https://api.ingest.io/users/me/revoke')
if resp.ok:
session.pop('oauth_token', None)
session.pop('token', None)
session.pop('oauth_state', None)
resp.raise_for_status()
# --- routes ---
@app.route('/')
def index():
return render_template('index.html')
@app.route("/login")
def login():
# requests authentication with a 'read_videos' scope because that's all
# we need for now.
ingest = OAuth2Session(client_id, scope='read_videos')
authorization_url, state = ingest.authorization_url(authorize_url)
# State is used to prevent CSRF, when getting a token on OAuth2 step 2
session['oauth_state'] = state
return redirect(authorization_url)
@app.route("/oauth/ingest", methods=['GET'])
def callback():
ingest = OAuth2Session(client_id, state=session['oauth_state'])
token = ingest.fetch_token(token_url, client_secret=client_secret,
authorization_response=request.url)
session['oauth_token'] = token
session['token'] = token['access_token']
session['logged_in'] = True
return redirect(url_for('index'))
@app.route('/logout')
def logout():
if session['token']:
logout_user()
return redirect(url_for('index'))
@app.route('/videos')
def show_videos():
if session['logged_in']:
videos = get_videos()
return render_template('videos.html', videos=videos)
else:
return redirect(url_for('index'))
@app.route('/videos/<id>')
def show_video(id):
if session['logged_in']:
video = get_video(id)
play_url = get_video_play_url(video)
return render_template('video.html', video=video, play_url=play_url)
else:
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
6454632 | # -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import sys
from fabric.api import env
from fabric.colors import red
from fabric.context_managers import hide
from fabric.context_managers import settings
def load_nailgun_deploy_parser(operation_parser):
deploy_parser = operation_parser.add_parser('deploy')
cur_dir = os.path.dirname(__file__)
default_dir = os.path.realpath(os.path.join(cur_dir, '..'))
deploy_parser.add_argument(
'-d', '--fuelweb-dir',
type=str,
help="Path to fuel-web repository "
"(if not set '{0}' will be used)".format(default_dir),
default=default_dir
)
deploy_parser.add_argument(
'--synconly',
action='store_true',
help="Synchronize source and restart service "
"without masternode configuration"
)
def load_nailgun_revert_parser(operation_parser):
operation_parser.add_parser('revert')
def load_nailgun_parser(subparsers):
nailgun_parser = subparsers.add_parser(
'nailgun', help="Processing nailgun operations"
)
operation_parser = nailgun_parser.add_subparsers(
dest='command',
help="Deploy or revert nailgun development environment on masternode"
)
load_nailgun_deploy_parser(operation_parser)
load_nailgun_revert_parser(operation_parser)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
default_masternode_addr = '10.20.0.2'
parser.add_argument(
'-m', '--masternode-addr',
help="Master node address ('{0}' by default)".format(
default_masternode_addr
),
default=default_masternode_addr
)
subparsers = parser.add_subparsers(
dest='action',
help="Targets to be developed on master node"
)
load_nailgun_parser(subparsers)
params = parser.parse_args()
# Configuring fabric global params
env.host_string = params.masternode_addr
env.user = 'root'
# Loading configurator by action value
action_module = __import__('configurator.{0}'.format(params.action))
processor = getattr(action_module, params.action)
# Executing action
try:
with settings(hide('running', 'stdout')):
processor.action(params)
except Exception as e:
print(red("Configuration failed: {0}".format(e)))
# Exiting with general error code
sys.exit(1)
| StarcoderdataPython |
3588252 | <filename>src/opera/parser/tosca/v_1_3/policy_definition.py
from ..entity import Entity
from ..list import List
from ..map import Map
from ..reference import Reference, ReferenceXOR
from ..string import String
from ..void import Void
from .trigger_definition import TriggerDefinition
class PolicyDefinition(Entity):
ATTRS = dict(
type=Reference("policy_types"),
description=String,
metadata=Map(String),
properties=Map(Void),
targets=List(ReferenceXOR(("topology_template", "node_templates"), ("topology_template", "groups"))),
triggers=Map(TriggerDefinition),
)
REQUIRED = {"type"}
| StarcoderdataPython |
5057082 | # -*- coding: utf-8 -*-
#import random
import numpy as np
from parameter import params
class Connection:
global_innovation = 0
def __init__(self, in_neuron=None, out_neuron=None, weight=0.0, enabled=True, innov_no=0):
"""
Create a simple base gene i.e. a connection/synapse
"""
self.in_neuron = in_neuron
self.out_neuron = out_neuron
self.weight = weight
self.enabled = enabled
self.innovation = Connection.createInnovationNumber() if innov_no == None else innov_no
def mutate(self, mutationRate):
#mutate Weight
if np.random.random() < mutationRate:
#90% chance of tweeking the value a bit
pertubationValue = (np.random.uniform(params['w_min'], params['w_max']))/10
self.weight += pertubationValue
else:
#10% chance of changing Weight completely
self.weight = np.random.uniform(params['w_min'], params['w_max'])
#keep weight in between bounds
# self.weight = min(1, max(-1, self.weight))
def createInnovationNumber():
#get current innovation Number
curInnovationNumber = Connection.globalInnovationNumber
#increase global innovation Number
Connection.globalInnovationNumber += 1
#return current innovation Number
return curInnovationNumber
def __repr__(self):
string = "Connection {}: w = {} ({} -> {}) {}"
return string.format(self.innovation, self.weight, self.in_neuron, self.out_neuron,
"Enabled" * self.enabled or "Disabled") | StarcoderdataPython |
3460266 | import numpy as np
class Grid:
def __init__(self, config_path):
with open(path_to_config, 'r') as f:
config = json.load(f)
self.dims = config['dims']
x, y, z = self.dims
self.agent_pos = [0, 0, 0]
self.win_pos = [x - 1, y - 1]
self.grid = np.zeros(x, y, z, dtype=np.int8)
self.grid[agent_pos] = 1
def step(self, action):
# m is a valid move
self.move(
pass
def reset(self):
self.agent
def reward(self):
| StarcoderdataPython |
1658527 | # -*- coding: utf-8 -*-
"""
Created on Fri May 14 11:41:40 2021
@author: hatta
"""
from implementation_functions import *
import six
import sys
sys.modules['sklearn.externals.six'] = six
import mlrose
import pandas as pd
import numpy as np
from prince import FAMD #Factor analysis of mixed data
from aif360.metrics import BinaryLabelDatasetMetric
from sklearn.model_selection import train_test_split
from sklearn.metrics import silhouette_score
import matplotlib.cm as cm
import matplotlib.pyplot as plt
'-----------------------------------------------------------------------------'
#import the dataset and get initial statistics
# SKIP THIS BLOCK IF YOU ARE ALREADY IMPORTING A DATAFRAME FROM A CSV(except sensitive attr and decision label definition)
dataset_orig, privileged_groups, unprivileged_groups = aif_data("german", 2, False)
# Define sensitive attributes and decision label names for subroup label function
# Note: Sensitive attribute(s) must be always given as a list
sens_attr = ['age', 'sex']
decision_label = 'credit'
fav_l = 1
unfav_l = 0
# Initial disparities in the full original dataset
metric_orig = BinaryLabelDatasetMetric(dataset_orig,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
print("Disparate impact (of original labels) between unprivileged and privileged groups = %f" % metric_orig.disparate_impact())
print("Difference in statistical parity (of original labels) between unprivileged and privileged groups = %f" % metric_orig.statistical_parity_difference())
print("Individual fairness metric that measures how similar the labels are for similar instances = %f" % metric_orig.consistency())
'----------------------------------------------------------------------------'
# Creating the snythetic sub-class label column and num-cat columns identification
orig_df, num_list, cat_list = preprocess(dataset_orig, sens_attr, decision_label)
# The list of sub-group sizes in the dataset (to monitor the dist. of sub-groups)
orig_df['sub_labels'].value_counts()
#check correlation of the columns
# res = orig_df.apply(lambda x : pd.factorize(x)[0] if (x.dtype == 'O') else x).corr(method='pearson', min_periods=1)
#check the correlation of features to class labels
# res.loc[res.iloc[:,58].abs() > 0.25, 'important_columns'] = res.iloc[:,58]
#plot heatmap
# plt.figure(figsize=(16,12))
# _ = sns.heatmap(res)
'----------------------------------------------------------------------------'
# Train-test split WITH stratification
X = orig_df.loc[:, orig_df.columns != decision_label]
y = orig_df.loc[:, orig_df.columns == decision_label].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42, shuffle=True,
stratify=X['sub_labels'])
# Check class imbalance in the splitted training set
print(X_train['sub_labels'].value_counts())
print(X_test['sub_labels'].value_counts())
# Partial feture scaling (of numerical variables)
X_train, X_test = scale(X_train, X_test)
num_list, cat_list = type_lists(X_train)
#Calculate the base fairness metrics that can be obtained from the original dataset
#Find the privileged and unprivileged subgroups based on the X_train's original labels
dataset_metrics, aggr_metrics, priv_gr, unpriv_gr = aif_dataset_metrics(X_train, y_train,
sens_attr, fav_l, unfav_l)
'----------------------------------------------------------------------------'
# Getting the baseline performance results from the imbalanced dataset
# Note: the function is created based on the assump. that the X's have sub_labels
# Instantiate the desired classifier obj to train the classification models
classifier = LogisticRegression()
# classifier = RandomForestClassifier()
# classifier = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1,
# max_depth=1, random_state=0)
baseline_stats, cm, ratio_table = baseline_metrics(classifier, X_train, X_test,
y_train, y_test, sens_attr,
fav_l, unfav_l)
'-----------------------------------------------------------------------------'
# Keep the subgroup labels to append them back later
keep_sub_l = X_train['sub_labels']
# Required drops for the GERMAN dataset (THIS DF CREATION IS A MUST)
X_train_new = X_train.drop(['age', 'sex', 'sub_labels'], axis=1)
# Get the idx of categ and numeric columns again due to the column drops above
num_list, cat_list = type_lists(X_train_new)
'-----------------------------------------------------------------------------'
'''POSSIBLE PRE-PROCESSING OPTIONS'''
# Gower dist to get distance matrix (for k-medoids)
import gower
cat = [True if X_train_new[x].dtype == 'object' else False for x in X_train_new.columns]
gd = gower.gower_matrix(X_train_new, cat_features = cat)
#OR (can be used for both fuzzy c-means and k-medoids):
# Optional dimensionality reduction for big datasets with FAMD
X_train_new['sub_labels'] = keep_sub_l
famd = FAMD(n_components=2, random_state = 42)
famd.fit(X_train_new.drop('sub_labels', axis=1))
X_train_reduc = famd.transform(X_train_new)
#plotting the reduced dimensions
# ax = famd.plot_row_coordinates(X_train_new,
# color_labels=['sub-labels {}'.format(t) for t in X_train_new['sub_labels']] )
# X_train_red = famd.partial_row_coordinates(X_train_new)
# famd.explained_inertia_
# ax = famd.plot_partial_row_coordinates(X_train_new,
# color_labels=['sub-labels {}'.format(t) for t in X_train_new['sub_labels']])
# Delete the subgroup label column again if dimensionality reduction is used
X_train_new = X_train_new.drop(['sub_labels'], axis=1)
'----------------------------------------------------------------------------'
'''K-MEDOIDS'''
from sklearn_extra.cluster import KMedoids
#Note: when the metric is precomputed, object doesnt return any cluster centers.
#find the num of cluster based on inertia
#if gower's distance is used, then the metric params should be 'precomputed'
costs = []
n_clusters = []
clusters_assigned = []
silhouette_scores = []
from tqdm import tqdm
for i in tqdm(range(2, 10)):
try:
cluster = KMedoids(n_clusters=i, metric='euclidean', method='pam',
random_state=0).fit(X_train_reduc)
clusters = cluster.predict(X_train_reduc)
costs.append(cluster.inertia_)
n_clusters.append(i)
clusters_assigned.append(clusters)
silhouette_val = silhouette_score(X_train_reduc, clusters,
metric='euclidean')
silhouette_scores.append(silhouette_val)
except:
print(f"Can't cluster with {i} clusters")
plt.scatter(x=n_clusters, y=costs)
plt.plot(n_clusters, costs)
plt.show()
plt.scatter(x=n_clusters, y=silhouette_scores)
plt.plot(n_clusters, silhouette_scores)
plt.show()
#predict cluster labels (3 or 6 clusts)
numc = 3
model = KMedoids(n_clusters=numc, metric='euclidean', method='pam',
random_state=0).fit(X_train_reduc)
clusters = model.predict(X_train_reduc)
centroids_df = X_train_new.iloc[model.medoid_indices_]
cluster_centroids = centroids_df.reset_index(drop=True)
cluster_centroids = cluster_centroids.to_numpy(np.float64)
if len(model.cluster_centers_) == 0:
model.cluster_centroids_ = cluster_centroids
model.cluster_centers_ = cluster_centroids
else:
pass
'--------------------------------------------------------'
#if gower's distance is used
plot_tsne(gd, clusters, 60, 80)
#if FAMD is used
plot_tsne(X_train_reduc, clusters, 50, 100)
'----------------------------------------------------------------------------'
# Putting the required label info back to the dataframe before oversampling
X_train_new['cluster_labels'] = clusters
X_train_new['cluster_labels'] = X_train_new['cluster_labels'].astype('object')
X_train_new['sub_labels'] = keep_sub_l
# Also put the original decision labels so that they are also oversampled
X_train_new['class_labels'] = y_train
#cluster datasets in their original form
existing_clust = {}
for h in range(len(X_train_new['cluster_labels'].unique())):
existing_clust[h] = X_train_new.loc[X_train_new['cluster_labels']==h]
#checking the subgroup counts in each cluster dataset
for item in existing_clust:
print(existing_clust[item]['sub_labels'].value_counts())
#fixing the cluster memberships in each df if a sample from a subgroup is alone
fixed_clusters = fix_memberships(X_train_new, model)
for item in fixed_clusters:
print(fixed_clusters[item]['sub_labels'].value_counts())
#transform the data types of all the columns to numeric for SMOTE
for df in fixed_clusters:
for i in range(len(fixed_clusters[df].columns)):
fixed_clusters[df].iloc[:,i] = fixed_clusters[df].iloc[:,i].astype('float')
# Over-sampling of each cluster
oversampled_clusters, unique_subl = oversample(fixed_clusters)
for item in oversampled_clusters:
print(oversampled_clusters[item]['sub_labels'].value_counts())
# Deleting sensitive attributes and subgroup labels from test set is required
# to apply the implemented solutions (sens. attr. are not used to satisfy the
# disparate treatment in the functions)
test_sublabels = X_test['sub_labels']
X_test_n = X_test.drop(['age', 'sex','sub_labels'], axis=1)
num_list, cat_list = type_lists(X_test_n)
'----------------------------------------------------------------------------'
# Predicting the test sets based on strategy 1
X_test_pred1 = predict_whole_set(classifier, oversampled_clusters, X_test_n)
'----------------------------------------------'
# Predicting the test sets based on strategy 2
#if gower's distance is used:
# test_set = pd.concat([centroids_df, X_test_n], axis=0)
# cats = [True if X_test_n[x].dtype == 'object' else False for x in X_test_n.columns]
# gd_test = gower.gower_matrix(test_set, cat_features = cats)
# pairwise_dist = gd_test[numc:, 0:numc]
#if FAMD is used:
X_test_reduc = famd.transform(X_test_n)
dists = kmed_dists(X_test_reduc, model.cluster_centers_)
X_test_pred2 = predict_w_clusters(classifier, oversampled_clusters, X_test_n,
dists, unique_subl, test_sublabels)
'----------------------------------------------'
# Predicting the test sets based on strategy 3
'''K-PROTOTYPES'''
X_test_pred3 = predict_w_weights_kmed(classifier, oversampled_clusters, dists,
X_test_n, unique_subl, test_sublabels)
'----------------------------------------------------------------------------'
'''The metrics table creation for given dataset'''
# Protected attributes and groups must be defined based on the dataset and
# preferences to calculate fairness and performance metrics
metrics_table1, cm1, ratio_t1 = metrics_calculate(X_test, X_test_pred1, y_test, sens_attr,
fav_l, unfav_l)
metrics_table2, cm2, ratio_t2 = metrics_calculate(X_test, X_test_pred2, y_test, sens_attr,
fav_l, unfav_l)
metrics_table3, cm3, ratio_t3 = metrics_calculate(X_test, X_test_pred3, y_test, sens_attr,
fav_l, unfav_l)
#if all reasults are needed to be placed in one dataframe
all_results = pd.concat([baseline_stats, metrics_table1, metrics_table2,
metrics_table3], axis=0) | StarcoderdataPython |
112695 | from mainframe import MainFrame
with open("txnlog.dat", mode='rb') as file: # b is important -> binary
fileContent = file.read()
m = MainFrame(fileContent)
print("Total amount of dollars in debits:", m.amountTotal(m.recordTypes[0x00]))
print("Total amount of dollars in credits:", m.amountTotal(m.recordTypes[0x01]))
print("Total autopays started:", m.countRecordType(m.recordTypes[0x02]))
print("Total autopays ended:", m.countRecordType(m.recordTypes[0x03]))
print("UserID 2456938384156277127 balance:", m.userBalanceById(2456938384156277127))
m.printAllRecords() | StarcoderdataPython |
1867528 | <reponame>leelige/mindspore
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Yolo train."""
import os
import time
import datetime
import mindspore as ms
from mindspore.context import ParallelMode
from mindspore.nn.optim.momentum import Momentum
from mindspore import Tensor
from mindspore import context
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.callback import ModelCheckpoint, RunContext
from mindspore.train.callback import _InternalCallbackParam, CheckpointConfig
from mindspore.profiler.profiling import Profiler
from src.yolo import YOLOV3, YoloWithLossCell, TrainingWrapper
from src.logger import get_logger
from src.util import AverageMeter, get_param_groups
from src.lr_scheduler import get_lr
from src.yolo_dataset import create_yolo_dataset
from src.initializer import default_recurisive_init, load_yolo_params
from model_utils.config import config
from model_utils.moxing_adapter import moxing_wrapper
from model_utils.device_adapter import get_device_id, get_device_num
ms.set_seed(1)
def set_default():
"""default setting"""
if config.lr_scheduler == 'cosine_annealing' and config.max_epoch > config.t_max:
config.t_max = config.max_epoch
config.lr_epochs = list(map(int, config.lr_epochs.split(',')))
config.data_root = os.path.join(config.data_dir, 'train2017')
config.ann_file = os.path.join(config.data_dir, 'annotations/instances_train2017.json')
config.data_val_root = os.path.join(config.data_dir, 'val2017')
config.ann_val_file = os.path.join(config.data_dir, 'annotations/instances_val2017.json')
device_id = int(os.getenv('DEVICE_ID', '0'))
context.set_context(mode=context.GRAPH_MODE,
device_target=config.device_target, save_graphs=False, device_id=device_id)
if config.need_profiler:
profiler = Profiler(output_path=config.outputs_dir, is_detail=True, is_show_op_path=True)
else:
profiler = None
# init distributed
if config.is_distributed:
if config.device_target == "Ascend":
init()
else:
init("nccl")
config.rank = get_rank()
config.group_size = get_group_size()
# select for master rank save ckpt or all rank save, compatible for model parallel
config.rank_save_ckpt_flag = 0
if config.is_save_on_master:
if config.rank == 0:
config.rank_save_ckpt_flag = 1
else:
config.rank_save_ckpt_flag = 1
# logger
config.outputs_dir = os.path.join(config.ckpt_path,
datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
config.logger = get_logger(config.outputs_dir, config.rank)
config.logger.save_args(config)
return profiler
def convert_training_shape(args_training_shape):
"""Convert training shape"""
training_shape = [int(args_training_shape), int(args_training_shape)]
return training_shape
def modelarts_pre_process():
'''modelarts pre process function.'''
def unzip(zip_file, save_dir):
import zipfile
s_time = time.time()
if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):
zip_isexist = zipfile.is_zipfile(zip_file)
if zip_isexist:
fz = zipfile.ZipFile(zip_file, 'r')
data_num = len(fz.namelist())
print("Extract Start...")
print("unzip file num: {}".format(data_num))
data_print = int(data_num / 100) if data_num > 100 else 1
i = 0
for file in fz.namelist():
if i % data_print == 0:
print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
i += 1
fz.extract(file, save_dir)
print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
int(int(time.time() - s_time) % 60)))
print("Extract Done.")
else:
print("This is not zip.")
else:
print("Zip has been extracted.")
if config.need_modelarts_dataset_unzip:
zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
save_dir_1 = os.path.join(config.data_path)
sync_lock = "/tmp/unzip_sync.lock"
# Each server contains 8 devices as most.
if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
print("Zip file path: ", zip_file_1)
print("Unzip file save dir: ", save_dir_1)
unzip(zip_file_1, save_dir_1)
print("===Finish extract data synchronization===")
try:
os.mknod(sync_lock)
except IOError:
pass
while True:
if os.path.exists(sync_lock):
break
time.sleep(1)
print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
config.save_checkpoint_dir = os.path.join(config.output_path, config.save_checkpoint_dir)
@moxing_wrapper(pre_process=modelarts_pre_process)
def run_train():
"""Train start"""
profiler = set_default()
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=get_device_id())
loss_meter = AverageMeter('loss')
context.reset_auto_parallel_context()
parallel_mode = ParallelMode.STAND_ALONE
degree = 1
if config.is_distributed:
parallel_mode = ParallelMode.DATA_PARALLEL
degree = get_group_size()
context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=degree)
network = YOLOV3(is_training=True)
# default is kaiming-normal
default_recurisive_init(network)
load_yolo_params(config, network)
network = YoloWithLossCell(network)
if config.training_shape:
config.multi_scale = [convert_training_shape(config.training_shape)]
if config.resize_rate:
config.resize_rate = config.resize_rate
ds, data_size = create_yolo_dataset(image_dir=config.data_root, anno_path=config.ann_file, is_training=True,
batch_size=config.per_batch_size, max_epoch=config.max_epoch,
device_num=config.group_size, rank=config.rank, config=config)
config.logger.info('Finish loading dataset')
config.steps_per_epoch = int(data_size / config.per_batch_size / config.group_size)
if config.ckpt_interval <= 0:
config.ckpt_interval = config.steps_per_epoch
lr = get_lr(config)
opt = Momentum(params=get_param_groups(network),
learning_rate=Tensor(lr),
momentum=config.momentum,
weight_decay=config.weight_decay,
loss_scale=config.loss_scale)
network = TrainingWrapper(network, opt, config.loss_scale)
network.set_train()
if config.rank_save_ckpt_flag:
# checkpoint save
ckpt_max_num = config.max_epoch * config.steps_per_epoch // config.ckpt_interval
ckpt_config = CheckpointConfig(save_checkpoint_steps=config.ckpt_interval,
keep_checkpoint_max=ckpt_max_num)
save_ckpt_path = os.path.join(config.outputs_dir, 'ckpt_' + str(config.rank) + '/')
ckpt_cb = ModelCheckpoint(config=ckpt_config,
directory=save_ckpt_path,
prefix='{}'.format(config.rank))
cb_params = _InternalCallbackParam()
cb_params.train_network = network
cb_params.epoch_num = ckpt_max_num
cb_params.cur_epoch_num = 1
run_context = RunContext(cb_params)
ckpt_cb.begin(run_context)
old_progress = -1
t_end = time.time()
data_loader = ds.create_dict_iterator(output_numpy=True, num_epochs=1)
for i, data in enumerate(data_loader):
images = data["image"]
images = Tensor.from_numpy(images)
batch_y_true_0 = Tensor.from_numpy(data['bbox1'])
batch_y_true_1 = Tensor.from_numpy(data['bbox2'])
batch_gt_box0 = Tensor.from_numpy(data['gt_box1'])
batch_gt_box1 = Tensor.from_numpy(data['gt_box2'])
loss = network(images, batch_y_true_0, batch_y_true_1, batch_gt_box0, batch_gt_box1)
loss_meter.update(loss.asnumpy())
if config.rank_save_ckpt_flag:
# ckpt progress
cb_params.cur_step_num = i + 1 # current step number
cb_params.batch_num = i + 2
ckpt_cb.step_end(run_context)
if i % config.log_interval == 0:
time_used = time.time() - t_end
epoch = int(i / config.steps_per_epoch)
fps = config.per_batch_size * (i - old_progress) * config.group_size / time_used
if config.rank == 0:
config.logger.info(
'epoch[{}], iter[{}], {}, fps:{:.2f} imgs/sec, lr:{}'.format(epoch, i, loss_meter, fps, lr[i]))
t_end = time.time()
loss_meter.reset()
old_progress = i
if (i + 1) % config.steps_per_epoch == 0 and config.rank_save_ckpt_flag:
cb_params.cur_epoch_num += 1
if config.need_profiler and profiler is not None:
if i == 10:
profiler.analyse()
break
config.logger.info('==========end training===============')
if __name__ == "__main__":
run_train()
| StarcoderdataPython |
1878374 | # MODULE: phy-components
# CLASS: phy-mii-transceiver
from sim_core import *
from components import *
from base_components import *
class phy_mii_transceiver(component_object):
classname = 'phy-mii-transceiver'
basename = 'phy'
description = "PHY component representing a general MII transceiver"
connectors = {
'mac' : {'type' : 'phy', 'direction' : 'up', 'empty_ok' : False,
'hotplug' : False, 'multi' : False},
'eth' : {'type' : 'ethernet-link', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False}}
def __init__(self, parse_obj):
component_object.__init__(self, parse_obj)
self.phy_id = 0
self.mii_addr = 0
def get_phy_id(self, idx):
return self.phy_id
def set_phy_id(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.phy_id = val
return Sim_Set_Ok
def get_mii_address(self, idx):
return self.mii_addr
def set_mii_address(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.mii_addr = val
return Sim_Set_Ok
def add_objects(self):
self.o.phy = pre_obj('phy$', 'mii-transceiver')
self.o.phy.registers = [0] * 32
self.o.phy.registers[2] = (self.phy_id >> 16) & 0xffff
self.o.phy.registers[3] = self.phy_id & 0xffff
def add_connector_info(self):
self.connector_info['mac'] = [self.o.phy, self.mii_addr]
self.connector_info['eth'] = []
def connect_phy(self, connector, mac):
self.o.phy.mac = mac
def connect_ethernet_link(self, connector, link):
self.o.phy.link = link
def disconnect_ethernet_link(self, connector):
self.o.phy.link = None
register_component_class(phy_mii_transceiver,
[['phy_id', Sim_Attr_Optional, 'i', 'PHY ID'],
['mii_address', Sim_Attr_Optional, 'i',
'PHY address on MII bus']])
| StarcoderdataPython |
8177158 | <filename>testcase_skills/views_ex/tdd_with_post_views.py
from django.shortcuts import render
from django.http import HttpResponse
# reference : https://wikidocs.net/11061
# 테스트만 통과하기 위한 방법으로 POST 요청으로 받은 값 그대로 돌려주는 형태로 코드를 수정한다.
def home_page(request):
# if request.method == 'POST':
# return HttpResponse(request.POST['item_text'])
# return render(request, 'home.html')
# test_uses_home_template 테스트는 요청이기 때문에
# item_text 변수가 존재하지 않아 이렇게 테스트를 실패한다.
# 따라서 키 값이 존재하지 않을 경우 예외처리를 위해 아래와 같이 뷰를 수정한다.
# return render(request, 'home.html', {
# 'new_item_text': request.POST['item_text']
# })
return render(request, 'home.html', {
'new_item_text': request.POST.get('item_text', '')
}) | StarcoderdataPython |
1704515 | <reponame>talonchandler/polaris
def main():
# This script demonstrates the main functionality of polaris. It generates and
# visualizes a spatio-angular phantom (a Spang object), specifies a microscope
# (a MultiMicroscope object), generates data (a Data object) using the phantom
# and the microscope, and reconstructs the object using a pseudo-inverse
# solution.
from polaris import data, spang, phantom
from polaris.micro import multi
import numpy as np
# Make output folder
folder = './helix/'
import os
if not os.path.exists(folder):
os.makedirs(folder)
# Generate phantom
vox_dim = (130,130,130) # nm
# helix phantom
px = (64,64,64)
# px = (64,128,64)
# px = (64,64,128)
# px = (64,128,256)
phant = phantom.three_helix(vox_dim=vox_dim, px=px)
# bead phantom - try varying orientation and kappa
# px = (32,32,32)
# phant = phantom.bead(orientation=[1,0,0], kappa=30, vox_dim=vox_dim, px=px)
# Mask for fast rendering
mask = phant.density() > 0
roi = [[20,20,20],[39,39,39]]
# uncomment and try "interact=True" to interact with the phantom
phant.visualize(folder+'phantom/', mask=mask, interact=False, video=True,
n_frames=180, scale=3, viz_type=['ODF','Peak','Density'],
compress=True, mag=1, skip_n=2, roi=roi)
# phant.visualize(folder+'phantom/', mask=mask, interact=False, video=True,
# n_frames=36, scale=3, viz_type=['Density'],
# tiff=True, mag=4, skip_n=2, roi=None)
# phant.save_stats(folder+'phantom/')
phant.save_summary(folder+'phantom.pdf', mask=mask)
# Specify microscope
# try "det_nas = [0.8, 0.8]" for symmetric diSPIM
data1 = data.Data(g=np.zeros(phant.f.shape[0:3]+(4,2)), vox_dim=vox_dim,
det_nas=[1.1, 0.71])
m = multi.MultiMicroscope(phant, data1, n_samp=1.33, lamb=525,
spang_coupling=True)
# Calculate system matrix
m.calc_H()
# Generate data using forward model
# set "snr" to a positive number to simulate Poisson noise
data1.g = m.fwd(phant.f, snr=None)
# Save data
# data1.save_mips(folder+'data.pdf')
# try "diSPIM_format=False" for a 5D ImageJ hyperstack (pol = c, view = t)
data1.save_tiff(folder+'data/', diSPIM_format=True)
# Calculate pseudoinverse solution
# set "eta" to a positive number for Tikhonov regularization
phant.f = m.pinv(data1.g, eta=0)
# Calculate reconstruction statistics and save
mask = phant.density() > 0.1
phant.visualize(folder+'phantom-recon/', mask=mask, interact=False, video=True,
n_frames=180, viz_type=['ODF','Peak', 'Density'],
skip_n=2, scale=3, roi=roi, mag=1)
# phant.save_stats(folder+'phantom-recon/')
phant.save_summary(folder+'phantom-recon.pdf', mask=mask)
if __name__ == '__main__':
main()
| StarcoderdataPython |
46665 | <filename>wt/pygardena/rest_api.py
import requests
from urllib.parse import urljoin
class RestAPI(requests.Session):
"""
Encapsulates all REST calls to the Gardena API.
"""
# The base URL for all Gardena requests. Change this if you want to fire the requests against other services
base_url = "https://sg-api.dss.husqvarnagroup.net/sg-1/"
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.headers["Accept"] = "application/json"
def request(self, method, url, *args, **kw):
return super().request(method, urljoin(self.base_url, url), *args, **kw)
def post_sessions(self, email_address, password):
"""
Posts session data to the server.
"""
response = self.post('sessions', json={
'sessions': {
'email': email_address,
'password': password,
}
})
return response.json()
def get_locations(self, user_id):
"""
Gets all the locations that are associated with a given user ID.
"""
response = self.get("locations", params={
'user_id': user_id,
})
return response.json()
def get_devices(self, location_id):
"""
Loads the devices from a given location ID.
"""
response = self.get('devices', params={
'locationId': location_id
})
return response.json()
def post_command(self, device, command_name, parameters=None):
data = {'name': command_name}
if parameters is not None:
data['parameters'] = parameters
url = 'devices/{device.id}/abilities/{device.category}/command'.format(device=device)
return self.post(url, params={
'locationId': device.location.id
}, json=data) | StarcoderdataPython |
3425855 | <filename>examples/seq2seq/distillation.py
#!/usr/bin/env python
import argparse
import gc
import os
import sys
from pathlib import Path
from typing import List
import pytorch_lightning as pl
import torch
from torch import nn
from torch.nn import functional as F
from finetune import SummarizationModule, TranslationModule
from finetune import main as ft_main
from make_student import create_student_by_copying_alternating_layers, get_layers_to_supervise
from transformers import AutoModelForSeq2SeqLM, MBartTokenizer, T5ForConditionalGeneration
from transformers.modeling_bart import shift_tokens_right
from utils import calculate_bleu, check_output_dir, freeze_params, label_smoothed_nll_loss, use_task_specific_params
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import generic_train # noqa
class SummarizationDistiller(SummarizationModule):
"""Supports T5, Bart, Pegasus and other models that inherit from Bart."""
loss_names = ["loss", "ce_loss", "mlm_loss", "hid_loss_enc", "hid_loss_dec"]
def __init__(self, hparams):
assert Path(hparams.data_dir).exists()
self.output_dir = Path(hparams.output_dir)
self.output_dir.mkdir(exist_ok=True)
save_dir = self.output_dir.joinpath("student")
hparams.model_name_or_path = str(save_dir) # Tell lightning we are training the student
teacher = AutoModelForSeq2SeqLM.from_pretrained(hparams.teacher).eval()
use_task_specific_params(teacher, hparams.task) # We copy good generation parameters to student by default
if hparams.student is not None:
student = AutoModelForSeq2SeqLM.from_pretrained(hparams.student)
use_task_specific_params(student, hparams.task)
e_layer_ids, d_layer_ids = None, None
else:
student, e_layer_ids, d_layer_ids = create_student_by_copying_alternating_layers(
teacher, e=hparams.student_encoder_layers, d=hparams.student_decoder_layers, save_path=save_dir
)
if hparams.length_penalty != -1:
student.config.length_penalty = hparams.length_penalty
hparams.tokenizer_name = hparams.teacher # Use teacher's tokenizer
super().__init__(hparams, model=student, config=student.config)
assert (
student.config.model_type == teacher.config.model_type
), f"teacher, student model types should be the same, got {student.config.model_type} != {teacher.config.model_type}"
if student.config.model_type == "t5":
student_encoder_layers = len(student.get_encoder().block)
student_decoder_layers = len(student.get_decoder().block)
teacher_encoder_layers = len(teacher.get_encoder().block)
teacher_decoder_layers = len(teacher.get_decoder().block)
else:
student_encoder_layers = student.config.encoder_layers
student_decoder_layers = student.config.decoder_layers
teacher_encoder_layers = teacher.config.encoder_layers
teacher_decoder_layers = teacher.config.decoder_layers
self.different_base_models = not (hparams.student is None or hparams.teacher == hparams.student)
self.do_calc_hidden_loss = (not self.different_base_models) and hparams.alpha_hid > 0
self.different_encoder = self.different_base_models or (student_encoder_layers != teacher_encoder_layers)
# self.different_encoder determines whether we need to run the teacher encoder
self.teacher = teacher
freeze_params(self.teacher)
if not self.different_encoder: # To save RAM, delete teacher encoder and freeze student encoder.
try:
del self.teacher.model.encoder
except AttributeError: # T5
del self.teacher.encoder
if e_layer_ids is None:
e_layer_ids = list(range(student_encoder_layers))
if d_layer_ids is None:
d_layer_ids = list(range(student_decoder_layers))
self.e_layer_ids, self.d_layer_ids = e_layer_ids, d_layer_ids # type: List[int], List[int]
if self.do_calc_hidden_loss: # Intermediate supervision: Decide which layers to supervise
if hparams.supervise_forward:
self.e_matches = get_layers_to_supervise(
n_student=len(self.e_layer_ids), n_teacher=teacher_encoder_layers
)
self.d_matches = get_layers_to_supervise(
n_student=len(self.d_layer_ids), n_teacher=teacher_decoder_layers
)
else: # student layer should emulate hidden states of the teacher layer it was copied from
self.e_matches = self.e_layer_ids
self.d_matches = self.d_layer_ids
else:
self.e_matches = None
self.d_matches = None
self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean")
self.temperature = 2.0
self.alpha_mlm = hparams.alpha_mlm
self.alpha_ce = hparams.alpha_ce
self.alpha_hid = hparams.alpha_hid
gc.collect()
torch.cuda.empty_cache()
def calc_ce_loss(self, mask, s_logits, t_logits):
"""Copy pasted from distillbert (transformers/examples/distillation/)"""
# mask has False at padding_idx
sel_mask = mask[:, :, None].expand_as(s_logits)
vocab_size = s_logits.size(-1)
s_logits_slct = torch.masked_select(s_logits, sel_mask) # (bs * seq_length * voc_size) modulo the 1s in mask
t_logits_slct = torch.masked_select(t_logits, sel_mask) # (bs * seq_length * voc_size) modulo the 1s in mask
s_logits_slct = s_logits_slct.view(-1, vocab_size) # (bs * seq_length, voc_size) modulo the 1s in mask
t_logits_slct = t_logits_slct.view(-1, vocab_size) # (bs * seq_length, voc_size) modulo the 1s in mask
assert t_logits_slct.size() == s_logits_slct.size()
loss_ce = (
self.ce_loss_fct(
F.log_softmax(s_logits_slct / self.temperature, dim=-1),
F.softmax(t_logits_slct / self.temperature, dim=-1),
)
* (self.temperature) ** 2
)
return loss_ce
@staticmethod
def add_model_specific_args(parser, root_dir):
SummarizationModule.add_model_specific_args(parser, root_dir)
add_distill_args(parser)
return parser
def _step(self, batch: dict) -> tuple:
"""Compute the loss for a batch"""
pad_token_id = self.tokenizer.pad_token_id
input_ids, src_mask, labels = batch["input_ids"], batch["attention_mask"], batch["labels"]
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(labels)
else:
decoder_input_ids = shift_tokens_right(labels, pad_token_id)
# noinspection PyCallingNonCallable
student_outputs = self(
input_ids,
attention_mask=src_mask,
decoder_input_ids=decoder_input_ids,
output_hidden_states=self.do_calc_hidden_loss,
output_attentions=False,
use_cache=False,
)
lm_logits = student_outputs.logits
# Same cross entropy vs. label smoothing logic as finetune.py
assert lm_logits.shape[-1] == self.model.config.vocab_size
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
student_lm_loss = loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), labels.view(-1))
else:
lprobs = F.log_softmax(lm_logits, dim=-1)
student_lm_loss, _ = label_smoothed_nll_loss(
lprobs, labels, self.hparams.label_smoothing, ignore_index=pad_token_id
)
def zero_tensor():
return torch.tensor(0.0).type_as(student_lm_loss)
teacher_enc_outputs = student_outputs.encoder_last_hidden_state # use this unless self.different_base_models
hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor()
if self.different_encoder: # compute encoder hidden state loss
all_teacher_encoder_outputs = self.teacher.get_encoder()(
input_ids,
attention_mask=src_mask,
output_hidden_states=self.do_calc_hidden_loss,
)
if self.different_base_models:
teacher_enc_outputs = all_teacher_encoder_outputs.last_hidden_state
elif self.do_calc_hidden_loss:
hid_loss_enc = self.calc_hidden_loss(
src_mask,
student_outputs.encoder_hidden_states,
all_teacher_encoder_outputs.hidden_states,
self.e_matches,
normalize_hidden=self.hparams.normalize_hidden,
)
teacher_outputs = self.teacher(
input_ids,
attention_mask=src_mask,
encoder_outputs=(teacher_enc_outputs,),
decoder_input_ids=decoder_input_ids,
output_hidden_states=self.do_calc_hidden_loss,
use_cache=False, # since we are not passing labels, never let this default to True
)
dec_mask = decoder_input_ids.ne(pad_token_id)
loss_ce = self.calc_ce_loss(dec_mask, lm_logits, teacher_outputs.logits)
if self.do_calc_hidden_loss: # Intermediate supervision of decoder hidden states
hid_loss_dec = self.calc_hidden_loss(
dec_mask,
student_outputs.decoder_hidden_states,
teacher_outputs.decoder_hidden_states,
self.d_matches,
normalize_hidden=self.hparams.normalize_hidden,
)
blended_loss = (
self.alpha_ce * loss_ce
+ self.alpha_mlm * student_lm_loss
+ self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec)
)
return blended_loss, loss_ce, student_lm_loss, hid_loss_enc, hid_loss_dec
@staticmethod
def calc_hidden_loss(attention_mask, hidden_states, hidden_states_T, matches, normalize_hidden):
"""MSE(student_hid, teacher_hid[matches]). Called "Intermediate supervision" in paper. Inspired by TinyBERT."""
msg = "expected list or tuple for hidden_states, got tensor of shape: "
assert not isinstance(hidden_states, torch.Tensor), f"{msg}{hidden_states.shape}"
assert not isinstance(hidden_states_T, torch.Tensor), f"{msg}{hidden_states_T.shape}"
mask = attention_mask.to(hidden_states[0])
valid_count = mask.sum() * hidden_states[0].size(-1)
student_states = torch.stack([hidden_states[i] for i in range(len(matches))])
teacher_states = torch.stack([hidden_states_T[j] for j in matches])
assert student_states.shape == teacher_states.shape, f"{student_states.shape} != {teacher_states.shape}"
if normalize_hidden:
student_states = F.layer_norm(student_states, student_states.shape[1:])
teacher_states = F.layer_norm(teacher_states, teacher_states.shape[1:])
mse = F.mse_loss(student_states, teacher_states, reduction="none")
masked_mse = (mse * mask.unsqueeze(0).unsqueeze(-1)).sum() / valid_count
return masked_mse
def add_distill_args(parser):
# NOTE: if --student argument was specified and the teacher and student base models
# are different, the models still have to have the same tokenizer, specified by
# --tokenizer_name. So, for example, you can distill from t5_large to t5_small but not
# from bart to t5. This s because if the tokenizers are different, the output space
# for the two models is also different and their logits are not comparable.
parser.add_argument("--teacher", type=str)
parser.add_argument("--alpha_ce", default=0.8, type=float)
parser.add_argument("--alpha_mlm", default=0.2, type=float)
parser.add_argument("--alpha_hid", default=0.0, type=float, required=False)
parser.add_argument("--student", type=str, required=False)
parser.add_argument("--student_decoder_layers", default=12, type=int, required=False)
parser.add_argument("--student_encoder_layers", default=12, type=int, required=False)
parser.add_argument("--no_teacher", action="store_true", default=False)
parser.add_argument("--length_penalty", type=float, default=-1)
parser.add_argument("--supervise_forward", action="store_true", default=False)
parser.add_argument("--normalize_hidden", action="store_true", default=False)
class TranslationDistiller(SummarizationDistiller):
"""Supports T5, mBART, Marian, other models that inherit from Bart."""
mode = "translation"
metric_names = ["bleu"]
default_val_metric = "bleu"
def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs)
assert hparams.src_lang is not None
assert hparams.tgt_lang is not None
self.dataset_kwargs["src_lang"] = hparams.src_lang
self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer):
self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
def calc_generative_metrics(self, preds, target) -> dict:
return calculate_bleu(preds, target)
@staticmethod
def add_model_specific_args(parser, root_dir):
TranslationModule.add_model_specific_args(parser, root_dir)
add_distill_args(parser)
return parser
def create_module(args):
if args.no_teacher:
module_cls = TranslationModule if "translation" in args.task else SummarizationModule
else: # DISTILL WITH TEACHER
module_cls = TranslationDistiller if "translation" in args.task else SummarizationDistiller
args.setup_cls: str = module_cls.__name__
print(f"using module {args.setup_cls}")
model = module_cls(args)
return model
def distill_main(args):
Path(args.output_dir).mkdir(exist_ok=True)
check_output_dir(args, expected_items=3)
model = create_module(args)
return ft_main(args, model=model)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = SummarizationDistiller.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
distill_main(args)
| StarcoderdataPython |
4903781 | import os
import logging
from tqdm import tqdm
from lxml import etree
from typing import List
from xylose.scielodocument import Journal, Issue
from documentstore_migracao.utils import files, xml, string, xylose_converter
from documentstore_migracao.export.sps_package import SPS_Package
from documentstore_migracao import config
logger = logging.getLogger(__name__)
def convert_article_xml(file_xml_path):
obj_xmltree = xml.loadToXML(file_xml_path)
obj_xml = obj_xmltree.getroot()
obj_xml.set("specific-use", "sps-1.9")
obj_xml.set("dtd-version", "1.1")
xml_sps = SPS_Package(obj_xmltree)
# CONVERTE O BODY DO AM PARA SPS
xml_sps.transform_body()
# CONVERTE PUB-DATE PARA SPS 1.9
xml_sps.transform_pubdate()
# CONSTROI O SCIELO-id NO XML CONVERTIDO
xml_sps.create_scielo_id()
languages = "-".join(xml_sps.languages)
_, fname = os.path.split(file_xml_path)
fname, fext = fname.rsplit(".", 1)
new_file_xml_path = os.path.join(
config.get("CONVERSION_PATH"), "%s.%s.%s" % (fname, languages, fext)
)
xml.objXML2file(new_file_xml_path, xml_sps.xmltree, pretty=True)
def convert_article_ALLxml():
logger.info("Iniciando Conversão do xmls")
list_files_xmls = files.xml_files_list(config.get("SOURCE_PATH"))
for file_xml in tqdm(list_files_xmls):
try:
convert_article_xml(os.path.join(config.get("SOURCE_PATH"), file_xml))
except Exception as ex:
logger.error(file_xml)
logger.exception(ex)
# raise
def conversion_journal_to_bundle(journal: dict) -> None:
"""Transforma um objeto Journal (xylose) para o formato
de dados equivalente ao persistido pelo Kernel em um banco
mongodb"""
_journal = Journal(journal)
_bundle = xylose_converter.journal_to_kernel(_journal)
return _bundle
def conversion_journals_to_kernel(journals: list) -> list:
"""Transforma uma lista de periódicos não normalizados em
uma lista de periódicos em formato Kernel"""
logger.info("Convertendo %d periódicos para formato Kernel" % (len(journals)))
return [conversion_journal_to_bundle(journal) for journal in journals]
def conversion_issues_to_xylose(issues: List[dict]) -> List[Issue]:
"""Converte uma lista de issues em formato JSON para uma
lista de issues em formato xylose"""
return [Issue({"issue": issue}) for issue in issues]
def conversion_issues_to_kernel(issues: List[Issue]) -> List[dict]:
"""Converte uma lista de issues em formato xylose para uma lista
de issues em formato Kernel"""
return [xylose_converter.issue_to_kernel(issue) for issue in issues]
| StarcoderdataPython |
6643669 | <reponame>GeoffreyXue/TypeRacerStats<filename>TypeRacerStats/Core/user_config.py
import sys
import discord
from discord.ext import commands
sys.path.insert(0, '')
from TypeRacerStats.config import BOT_ADMIN_IDS, MAIN_COLOR, HELP_BLACK
from TypeRacerStats.file_paths import UNIVERSES_FILE_PATH
from TypeRacerStats.Core.Common.accounts import load_accounts, update_accounts, check_banned_status
from TypeRacerStats.Core.Common.aliases import get_aliases
from TypeRacerStats.Core.Common.errors import Error
from TypeRacerStats.Core.Common.formatting import href_universe
from TypeRacerStats.Core.Common.prefixes import get_prefix, load_prefixes, update_prefixes
from TypeRacerStats.Core.Common.requests import fetch
from TypeRacerStats.Core.Common.supporter import get_supporter, check_dm_perms
from TypeRacerStats.Core.Common.urls import Urls
class UserConfig(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.cooldown(1, 1, commands.BucketType.default)
@commands.command(aliases=get_aliases('setprefix'))
@commands.check(lambda ctx: ctx.message.author.guild_permissions.
administrator and check_banned_status(ctx))
async def setprefix(self, ctx, *args):
if len(args) == 0:
prefix = get_prefix(self.bot, ctx.message)
await ctx.send(embed=discord.Embed(
color=discord.Color(HELP_BLACK),
title=f"The prefix is `{prefix}`",
description=f"`{prefix}setprefix [prefix]`\n`{prefix}help`"))
return
elif len(args) > 1:
await ctx.send(embed=Error(ctx, ctx.message).parameters(
f"{ctx.invoked_with} <prefix>"))
return
prefix = args[0]
if len(prefix) > 14:
await ctx.send(
f"<@{<EMAIL>}>",
embed=Error(ctx, ctx.message).incorrect_format(
'`prefix` can not be longer than 14 characters'))
return
prefixes = load_prefixes()
prefixes[str(ctx.guild.id)] = prefix
update_prefixes(prefixes)
await ctx.send(embed=discord.Embed(title=f"Updated prefix to {prefix}",
color=discord.Color(0)))
return
@commands.cooldown(1, 3, commands.BucketType.default)
@commands.check(
lambda ctx: check_dm_perms(ctx, 4) and check_banned_status(ctx))
@commands.command(aliases=get_aliases('register'))
async def register(self, ctx, *args):
user_id = str(ctx.message.author.id)
MAIN_COLOR = get_supporter(user_id)
show_user_count = ctx.invoked_with[
-1] == '*' and ctx.message.author.id in BOT_ADMIN_IDS
invalid = False
if len(args) != 1:
await ctx.send(content=f"<@{user_id}>",
embed=Error(ctx, ctx.message).parameters(
f"{ctx.invoked_with} [typeracer_username]"))
return
player = args[0].lower()
urls = [Urls().get_user(player, 'play')]
try:
test_response = await fetch(urls, 'json')
except:
await ctx.send(
content=f"<@{user_id}>",
embed=Error(ctx, ctx.message).missing_information(
'`typeracer_username` must be a TypeRacer username'))
return
accounts = load_accounts()
try:
accounts[user_id]['main'] = player
except KeyError:
accounts.update({
user_id: {
'main': player,
'alts': [],
'desslejusted': False,
'speed': 'lag',
'universe': 'play'
}
})
update_accounts(accounts)
user_count = ''
if show_user_count:
user_count = f"\n{len(accounts)} users registered"
await ctx.send(embed=discord.Embed(
color=discord.Color(MAIN_COLOR),
description=(f"<@{user_id}> has been linked to [**{player}**]"
f"({Urls().user(player, 'play')}){user_count}")))
return
@commands.cooldown(1, 1, commands.BucketType.default)
@commands.check(
lambda ctx: check_dm_perms(ctx, 4) and check_banned_status(ctx))
@commands.command(aliases=get_aliases('setuniverse'))
async def setuniverse(self, ctx, *args):
user_id = str(ctx.message.author.id)
MAIN_COLOR = get_supporter(user_id)
invalid = False
if len(args) > 1:
await ctx.send(
content=f"<@{user_id}>",
embed=Error(
ctx,
ctx.message).parameters(f"{ctx.invoked_with} [universe]"))
return
if len(args) == 0:
args = ('play', )
universe = args[0].lower()
if len(universe) > 50:
invalid = True
else:
with open(UNIVERSES_FILE_PATH, 'r') as txtfile:
universes = txtfile.read().split('\n')
if not universe in universes:
invalid = True
if invalid:
await ctx.send(content=f"<@{user_id}>",
embed=Error(ctx, ctx.message).incorrect_format(
('`universe` must be a [TypeRacer universe]'
'(http://typeracerdata.com/universes)')))
return
accounts = load_accounts()
try:
accounts[user_id]['universe'] = universe
except KeyError:
await ctx.send(
content=f"<@{user_id}>",
embed=Error(ctx, ctx.message).missing_information((
'Discord account must be linked to TypeRacer account with '
f"`{get_prefix(ctx, ctx.message)}register [typeracer_username]`"
)))
return
update_accounts(accounts)
await ctx.send(embed=discord.Embed(
color=discord.Color(MAIN_COLOR),
description=
(f"<@{user_id}> has been linked to the {href_universe(universe)} universe"
)))
return
@commands.cooldown(1, 1, commands.BucketType.default)
@commands.check(
lambda ctx: check_dm_perms(ctx, 4) and check_banned_status(ctx))
@commands.command(aliases=get_aliases('toggledessle'))
async def toggledessle(self, ctx, *args):
user_id = str(ctx.message.author.id)
MAIN_COLOR = get_supporter(user_id)
invalid = False
if len(args) != 0:
await ctx.send(
content=f"<@{user_id}>",
embed=Error(ctx,
ctx.message).parameters(f"{ctx.invoked_with}"))
return
accounts = load_accounts()
try:
cur = accounts[user_id]['desslejusted']
accounts[user_id]['desslejusted'] = not cur
except KeyError:
await ctx.send(
content=f"<@{user_id}>",
embed=Error(ctx, ctx.message).missing_information((
'Discord account must be linked to TypeRacer account with '
f"`{get_prefix(ctx, ctx.message)}register [typeracer_username]`"
)))
return
update_accounts(accounts)
await ctx.send(embed=discord.Embed(
color=discord.Color(MAIN_COLOR),
description=(
f"<@{user_id}> has been set to `desslejusted` **{not cur}**")))
return
def setup(bot):
bot.add_cog(UserConfig(bot))
| StarcoderdataPython |
1944145 | import numpy as np
def get_neighbors(edge_index, node_i):
idx = edge_index[0] == node_i
node_neighbor_idx = edge_index[1][idx]
return node_neighbor_idx
def create_distance_matrix(X, num_nodes):
D = np.zeros((num_nodes, num_nodes))
for i in range(num_nodes):
for j in range(i, num_nodes):
D[i, j] = np.linalg.norm(X[i] - X[j])
D[j, i] = D[i, j]
return D
def filter_adj(row, col, edge_attr, mask):
return row[mask], col[mask], None if edge_attr is None else edge_attr[mask] | StarcoderdataPython |
4998757 | import pandas as pd
import numpy as np
from pathlib import Path
import os
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib import rcParams
from sklearn.linear_model import LogisticRegression
import scipy.stats as spst
import sys
import re
import collections
from routines import *
from matching import *
import bootstrapped.bootstrap as bs
import bootstrapped.stats_functions as bs_stats
def clear_imgname(imname):
return imname.replace("_dark","").replace("_light","")
def compute_votes(im1, im2, est):
est = est.loc[est.pairname == im1.name+im2.name]
v1 = est.loc[est.num == im1.num].shape[0]
v2 = est.loc[est.num == im1.num].shape[0]
return v1, v2
def plot_image_pair(im1, im2, est):
v1, v2 = compute_votes(im1, im2, est)
plt.subplot(1,2,1)
plt.imshow(mpimg.imread('_all_/'+im1.name))
plt.title("True: %.2lf cm, %.2lf kg\nVotes: %d" % (im1.h_true, im1.w_true, v1), fontsize=5)
plt.subplot(1,2,2)
plt.imshow(mpimg.imread('_all_/'+im2.name))
plt.title("True: %.2lf cm, %.2lf kg\nVotes: %d" % (im2.h_true, im2.w_true, v2), fontsize=5)
plt.axis('off')
def get_group(f):
if ("_light" in f):
return "l"
elif ("_dark" in f):
return "d"
else:
return "s"
def assign_color_groups(est, images):
est["pairname"] = [x.file1+x.file2 for _, x in est.iterrows()]
est["group"] = [get_group(x.file1) + get_group(x.file2) for _, x in est.iterrows()]
print(set(est.num.values) - set(images.num.values))
est["vote"] = [images.loc[images.num == x.num].index.values[0] for _, x in est.iterrows()]
#est["votegroup"] = [x.vote.split("_")[-2][0] for _, x in est.iterrows()]
print(est.groupby("group").apply(lambda x: x.shape[0]))
return est
def compute_stats(est, imgpairs):
imgpairs["votes1"], imgpairs["votes2"] = 0.5, 0.5
imgpairs["group"] = ""
# CIs based on the per-images vote distribution.
for ip, p in imgpairs.iterrows():
cest = est.loc[est.pairname == p.img1+p.img2]
if (cest.shape[0] > 0):
imgpairs.loc[ip,"group"] = cest.group.values[0]
imgpairs.loc[ip,"votes1"] = cest.loc[cest.vote == p.img1].shape[0]/cest.shape[0]
imgpairs.loc[ip,"votes2"] = cest.loc[cest.vote == p.img2].shape[0]/cest.shape[0]
#estcnt = est.groupby("pairname").apply(lambda x: )
gdv = {}
for g in np.unique(imgpairs.group.values):
cres = imgpairs.loc[imgpairs.group == g]
gdv[g] = bs.bootstrap(cres.votes1.values, stat_func=bs_stats.mean, alpha=0.05, num_iterations=10000)
g = np.sort(np.unique(imgpairs.group.values))
if (g[0] == ""):
g = g[1:]
print("Count of images per group:", imgpairs.groupby("group").apply(lambda x: x.shape[0]))
# CIs based on separate resampling of votes for each image.
means = collections.defaultdict(list)
allgroups = np.unique(imgpairs.group.values)
for count in range(10000):
if (count % 100 == 0):
print(count)
cmeans = collections.defaultdict(list)
for ip, p in imgpairs.iterrows():
cest = est.loc[est.pairname == p.img1+p.img2]
if (cest.shape[0] == 0):
continue
cmeans[cest.group.values[0]].append(np.mean(np.random.choice(cest.vote == p.img1, 40, replace=True)))
for group in allgroups:
means[group].append(np.mean(cmeans[group]))
for group in allgroups:
pCI = np.percentile(means[group], [0, 95])
print("%s: %.3lf (%.3lf, %.3lf)" % (group, np.mean(imgpairs.loc[imgpairs.group == group, "votes1"]), pCI[0], pCI[1]))
sys.exit(1)
return imgpairs, g, gdv
def compute_vote_change(img4, ig):
img4 = img4.copy()
img4 = img4.set_index('group')
# see what happens to the votes when any person changes into light
if (ig == "dd"):
return (img4.loc["ld","vote1"] - img4.loc["dd","vote1"]) +\
(img4.loc["dl","vote2"] - img4.loc["dd","vote2"])
def plot_pair_results(imgpairs):
plt.subplot(2,4,1)
plt.imgshpw(im)
imgpairs["pairname"] = [clear_imgname(x.img1)+clear_imgname(x.img2) for _, x in imgpairs.iterrows()]
print(imgpairs[["img1","img2"]])
if __name__ == "__main__":
folder = sys.argv[1]
est = pd.read_csv(folder+os.sep+'est.csv', index_col=0)
workers = pd.read_csv(folder+os.sep+'workers.csv', index_col=0)
images = pd.read_csv(folder+os.sep+'images.csv', index_col=0)
imgpairs = pd.read_csv(folder+os.sep+'matched_pairs.csv', index_col=0)
# plot settings
sns.set(style="whitegrid", palette="bright", color_codes=True)
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Tahoma']
rcParams['figure.autolayout'] = True
# set the random seed to make results reproducable
np.random.seed(239)
est = assign_color_groups(est, images)
imgpairs = compute_stats(est, imgpairs)
plot_change(imgpairs, "dd")
| StarcoderdataPython |
6422807 | #!/usr/bin/env python
"""Various methods reused in main scripts"""
import sys, os, subprocess, struct, numpy, math, multiprocessing
PC_FILE_FORMATS = ['las','laz']
OCTTREE_NODE_NUM_CHILDREN = 8
DB_NAME = 'pc_extents'
DB_TABLE_RAW = 'extent_raw'
DB_TABLE_POTREE = 'extent_potree'
DB_TABLE_POTREE_DIST = 'potree_dist'
def shellExecute(command, showOutErr = False):
""" Execute the command in the SHELL and shows both stdout and stderr"""
print command
(out,err) = subprocess.Popen(command, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
r = '\n'.join((out,err))
if showOutErr:
print r
return r
# Check the LAStools is installed and that it is in PATH before libLAS
if shellExecute('lasinfo -version').count('LAStools') == 0:
raise Exception("LAStools is not found!. Please check that it is in PATH and that it is before libLAS")
def getUserName():
return os.popen('whoami').read().replace('\n','')
def getConnectString(dbName = None, userName= None, password = None, dbHost = None, dbPort = None, cline = False):
""" Gets the connection string to be used by psycopg2 (if cline is False)
or by psql (if cline is True)"""
connString=''
if cline:
if dbName != None and dbName != '':
connString += " " + dbName
if userName != None and userName != '':
connString += " -U " + userName
if password != None and password != '':
os.environ['PGPASSWORD'] = password
if dbHost != None and dbHost != '':
connString += " -h " + dbHost
if dbPort != None and dbPort != '':
connString += " -p " + dbPort
else:
if dbName != None and dbName != '':
connString += " dbname=" + dbName
if userName != None and userName != '':
connString += " user=" + userName
if password != None and password != '':
connString += " password=" + password
if dbHost != None and dbHost != '':
connString += " host=" + dbHost
if dbPort != None and dbPort != '':
connString += " port=" + dbPort
return connString
def getPCFileDetails(absPath):
""" Get the details (count numPoints and extent) of a LAS/LAZ file (using LAStools, hence it is fast)"""
count = None
(minX, minY, minZ, maxX, maxY, maxZ) = (None, None, None, None, None, None)
(scaleX, scaleY, scaleZ) = (None, None, None)
(offsetX, offsetY, offsetZ) = (None, None, None)
command = 'lasinfo ' + absPath + ' -nc -nv -nco'
for line in shellExecute(command).split('\n'):
if line.count('min x y z:'):
[minX, minY, minZ] = line.split(':')[-1].strip().split(' ')
minX = float(minX)
minY = float(minY)
minZ = float(minZ)
elif line.count('max x y z:'):
[maxX, maxY, maxZ] = line.split(':')[-1].strip().split(' ')
maxX = float(maxX)
maxY = float(maxY)
maxZ = float(maxZ)
elif line.count('number of point records:'):
count = int(line.split(':')[-1].strip())
elif line.count('scale factor x y z:'):
[scaleX, scaleY, scaleZ] = line.split(':')[-1].strip().split(' ')
scaleX = float(scaleX)
scaleY = float(scaleY)
scaleZ = float(scaleZ)
elif line.count('offset x y z:'):
[offsetX, offsetY, offsetZ] = line.split(':')[-1].strip().split(' ')
offsetX = float(offsetX)
offsetY = float(offsetY)
offsetZ = float(offsetZ)
return (count, minX, minY, minZ, maxX, maxY, maxZ, scaleX, scaleY, scaleZ, offsetX, offsetY, offsetZ)
def getFileSize(absPath):
""" Get the size of a file """
try:
if os.path.islink(absPath):
return int(((os.popen('du -sm ' + os.readlink(absPath))).read().split('\t'))[0])
else:
return int(((os.popen('du -sm ' + absPath)).read().split('\t'))[0])
except ValueError:
return None | StarcoderdataPython |
6518823 | from typing import List
from ..agent import Agent
from ..env_cmd import CmdEnv
from UTILS.colorful import *
from UTILS.tensor_ops import dir2rad, np_softmax, reg_rad_at, reg_rad, repeat_at
from .maneuver import maneuver_angle_to_ms, maneuver_angle_to_ms3d, maneuver_speed_to_ms, maneuver_vip
import copy
import random
import numpy as np
import time
from .tools import distance_matrix
from .base import Baseclass, Special, Drone, Vip, Plane, MS
from .missile_policy import MS_policy
class Emergent():
# 检查紧急避险是否被触发,prepare_escape会将机头指向目标,emergent_escape会将机头调转80度急转弯
def check_escape_condition(self):
for p in self.my_planes:
if p.is_vip:
p.step_state = 'follow'
active_ms = [self.find_ms_by_id(ms['ID']) for ms in p.attacked_by_ms]
active_ms = list(filter(lambda ms: ms.tracking_target, active_ms))
if len(active_ms) >= 1:
dis = [self.get_dis(p.ID, ms.ID) for ms in active_ms]
min_ms_dis = min(dis)
if min_ms_dis <= 45e3:
p.step_state = 'vip_180_escape'
if min_ms_dis <= 25e3:
p.step_state = 'vip_90_escape'
# if min_ms_dis <= p.prepare_escape_distance: # 2e3: # 2000ok 1500ok
# if min_ms_dis <= 30e3: # 2e3: # 2000ok 1500ok
# p.step_state = 'vip_prepare_escape'
# high priority
# if min_ms_dis <= p.escape_distance: # 2e3: # 2000ok 1500ok
# p.step_state = 'emergent_escape'
# for ms in active_ms:
# ft = ms.flying_time
# pass
else:
overall_adv, _ = self.has_overall_adv()
if overall_adv:
p.step_state = 'advantage_vip_escape'
if p.is_drone:
active_ms = [self.find_ms_by_id(ms['ID']) for ms in p.attacked_by_ms]
active_ms = list(filter(lambda ms: ms.tracking_target, active_ms))
if len(active_ms) >= 1:
dis = [self.get_dis(p.ID, ms.ID) for ms in active_ms]
min_ms_dis = min(dis)
if min_ms_dis <= p.prepare_escape_distance: # 2e3: # 2000ok 1500ok
p.step_state = 'prepare_escape'
if len(active_ms) >= 2:
# 检索距离最近的两个导弹
min_dis = 9999e3
ms1_tmp = None
ms2_tmp = None
for ms1 in active_ms:
for ms2 in active_ms:
if ms1 is ms2: continue
distance = self.get_dis(ms1.ID, ms2.ID) #ms1_tmp =
if distance < min_dis:
ms1_tmp = ms1
ms2_tmp = ms2
min_dis = distance
# 获取了距离最近的两个导弹!检查是不是距离小于3km,且属于同一个飞机发射,且flyingtime不同
距离小于3km = (min_dis < 3e3) and (min_dis>50)
同一个飞机发射 = (ms1_tmp.LauncherID==ms2_tmp.LauncherID)
flyingtime不同 = (ms1_tmp.flying_time != ms2_tmp.flying_time)
导弹initdistance = (ms1_tmp.distance[0] > 45e3) and (ms2_tmp.distance[0] > 45e3)
if 距离小于3km and 同一个飞机发射 and flyingtime不同 and (not 导弹initdistance):
print('bk')
if 距离小于3km and 同一个飞机发射 and flyingtime不同 and 导弹initdistance:
p.step_state = 'reverse_escape'
def abort_attack_cmd(self, uav, force=False):
def get_nearest_op_with_ms(p):
dis = np.array([p.get_dis(op.ID) for op in self.op_planes if op.OpLeftWeapon>0])
if len(dis)>0:
return self.op_planes[np.argmin(dis)]
else:
return None
def confirm_abort(uav, target_id):
if uav.is_vip:
_, threat_distance = uav.get_nearest_threat()
if threat_distance > 30e3:
return False
else:
return True
if force: return True
# 无人机
op = self.find_plane_by_id(target_id)
ms = uav.nearest_ms()
if ms is None: return False # do not abort
distance_to_ms = self.get_dis(ms.ID, uav.ID)
## print亮红('需要决策是否放弃攻击? 主体:', uav.Name, '攻击目标:', op.Name, ' 规避导弹名称:', ms.Name, ' 规避导弹距离:', distance_to_ms, '是否放弃?',not ((op in uav.in_attack_radar) and (distance_to_ms>6e3)))
if (op in uav.in_attack_radar) and (distance_to_ms>6e3):
return False
else:
return True
self.cmd_list_new = []
for cmd in self.cmd_list:
abort_condition = ('CmdAttackControl' in cmd) and (cmd['CmdAttackControl']['HandleID'] == uav.ID) and confirm_abort(uav, cmd['CmdAttackControl']['TgtID'])
if abort_condition:
pass
## print('warning')
else:
self.cmd_list_new.append(cmd)
self.cmd_list = self.cmd_list_new
def reverse_escape_cmd(self, uav):
assert uav.is_drone
ms = uav.nearest_ms()
distance_to_ms = self.get_dis(ms.ID, uav.ID)
if distance_to_ms>8e3:
goto_location = maneuver_angle_to_ms(uav, 0)
else:
goto_location = maneuver_angle_to_ms(uav, 180)
self.abort_attack_cmd(uav, force=True)
self.cmd_list.append(self.check_and_make_linepatrolparam(
uav.ID,
goto_location,
uav.MaxSpeed,
uav.MaxAcc,
uav.MaxOverload
))
# 处理“紧急规避准备”指令
def prepare_escape_cmd(self, uav):
# STATE_TRANS
if uav.is_vip:
assert False
# goto_location = maneuver_angle_to_ms(uav, 110)
# self.abort_attack_cmd(uav)
# self.cmd_list.append(self.check_and_make_linepatrolparam(
# uav.ID,
# goto_location,
# uav.MaxSpeed,
# uav.MaxAcc,
# uav.MaxOverload
# ))
else:
goto_location = maneuver_angle_to_ms3d(uav, uav.escape_angle)
speed = maneuver_speed_to_ms(uav)
self.abort_attack_cmd(uav)
self.cmd_list.append(self.check_and_make_linepatrolparam(
uav.ID,
goto_location,
speed,
# uav.MaxSpeed,
uav.MaxAcc,
uav.MaxOverload
))
# # 处理“紧急规避”指令
# def emergent_escape_cmd(self, uav):
# uav.persistent_state = 'emergent_escape'
# goto_location = maneuver_angle_to_ms(uav, self.escape_angle)
# self.cmd_list.append(self.check_and_make_linepatrolparam(
# uav.ID,
# goto_location,
# uav.MaxSpeed,
# uav.MaxAcc,
# uav.MaxOverload
# ))
def vip_90_escape_cmd(self, uav):
# def adjust_angle(uav):
# Angle = 0
# if uav.X > 145000 or uav.X < -145000 or uav.Y > 145000 or uav.Y < -145000:
# Angle = 100
# return Angle
# # STATE_TRANS
# angle = adjust_angle(uav)
goto_location = maneuver_vip(uav, 90)
self.abort_attack_cmd(uav)
self.cmd_list.append(self.check_and_make_linepatrolparam(
uav.ID,
goto_location,
uav.MaxSpeed,
uav.MaxAcc,
uav.MaxOverload
))
def vip_0_escape_cmd(self, uav):
# def adjust_angle(uav):
# Angle = 0
# if uav.X > 145000 or uav.X < -145000 or uav.Y > 145000 or uav.Y < -145000:
# Angle = 100
# return Angle
# # STATE_TRANS
# angle = adjust_angle(uav)
goto_location = maneuver_angle_to_ms(uav, 0)
self.abort_attack_cmd(uav)
self.cmd_list.append(self.check_and_make_linepatrolparam(
uav.ID,
goto_location,
uav.MaxSpeed,
uav.MaxAcc,
uav.MaxOverload
))
def vip_goaway_when_has_advantage(self, p):
_, goto_location = self.has_overall_adv()
self.abort_attack_cmd(p)
self.cmd_list.append(self.check_and_make_linepatrolparam(
p.ID,
goto_location,
p.MaxSpeed,
p.MaxAcc,
p.MaxOverload,
force_old_way=True
))
| StarcoderdataPython |
3415175 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddlenlp.transformers import T5ForConditionalGeneration, T5Tokenizer
class Demo:
def __init__(self, model_name_or_path="t5-base", max_predict_len=5):
self.tokenizer = T5Tokenizer.from_pretrained(model_name_or_path)
print("Loading the model parameters, please wait...")
self.model = T5ForConditionalGeneration.from_pretrained(
model_name_or_path)
self.model.eval()
self.max_predict_len = max_predict_len
print("Model loaded.")
# prediction function
@paddle.no_grad()
def generate(self, inputs, max_predict_len=None):
max_predict_len = max_predict_len if max_predict_len is not None else self.max_predict_len
ids = self.tokenizer(inputs)["input_ids"]
input_ids = paddle.to_tensor([ids], dtype="int64")
outputs = self.model.generate(input_ids,
max_length=max_predict_len)[0][0]
decode_outputs = self.tokenizer.decode(
outputs, skip_special_tokens=True).strip()
print(f"input text: {inputs}")
print(f"label: {decode_outputs}")
print("=" * 50)
if __name__ == "__main__":
label_length_map = {
"cola": 4,
"sst2": 1,
"mrpc": 5,
"stsb": 5,
"qqp": 5,
"mnli": 4,
"qnli": 5,
"rte": 5,
}
demo = Demo(model_name_or_path="t5-base")
input_text_list = [
"sst2 sentence: contains no wit , only labored gags ",
"sst2 sentence: that loves its characters and communicates something rather beautiful about human nature ",
"cola sentence: Mickey looked it up.",
"sst2 sentence: remains utterly satisfied to remain the same throughout ",
"sst2 sentence: a well-made and often lovely depiction of the mysteries of friendship "
]
for text in input_text_list:
max_predict_len = label_length_map[text.split()[0]]
demo.generate(text, max_predict_len=max_predict_len)
# input text: sst2 sentence: contains no wit , only labored gags
# label: negative
# ==================================================
# input text: sst2 sentence: that loves its characters and communicates something rather beautiful about human nature
# label: positive
# ==================================================
# input text: cola sentence: Mickey looked it up.
# label: acceptable
# ==================================================
# input text: sst2 sentence: remains utterly satisfied to remain the same throughout
# label: positive
# ==================================================
# input text: sst2 sentence: a well-made and often lovely depiction of the mysteries of friendship
# label: positive
# ==================================================
| StarcoderdataPython |
377667 | <gh_stars>1-10
# heuristic monster types lists
ONLY_RANGED_SLOW_MONSTERS = ['floating eye', 'blue jelly', 'brown mold', 'gas spore', 'acid blob']
EXPLODING_MONSTERS = ['yellow light', 'gas spore', 'flaming sphere', 'freezing sphere', 'shocking sphere']
INSECTS = ['giant ant', 'killer bee', 'soldier ant', 'fire ant', 'giant beetle', 'queen bee']
WEAK_MONSTERS = ['lichen', 'newt', 'shrieker', 'grid bug']
WEIRD_MONSTERS = ['leprechaun', 'nymph']
def is_monster_faster(agent, monster):
_, y, x, mon, _ = monster
# TOOD: implement properly
return 'bat' in mon.mname or 'dog' in mon.mname or 'cat' in mon.mname \
or 'kitten' in mon.mname or 'pony' in mon.mname or 'horse' in mon.mname \
or 'bee' in mon.mname or 'fox' in mon.mname
def imminent_death_on_melee(agent, monster):
if is_dangerous_monster(monster):
return agent.blstats.hitpoints <= 16
return agent.blstats.hitpoints <= 8
def is_dangerous_monster(monster):
_, y, x, mon, _ = monster
is_pet = 'dog' in mon.mname or 'cat' in mon.mname or 'kitten' in mon.mname or 'pony' in mon.mname \
or 'horse' in mon.mname
# 'mumak' in mon.mname or 'orc' in mon.mname or 'rothe' in mon.mname \
# or 'were' in mon.mname or 'unicorn' in mon.mname or 'elf' in mon.mname or 'leocrotta' in mon.mname \
# or 'mimic' in mon.mname
return is_pet or mon.mname in INSECTS
def consider_melee_only_ranged_if_hp_full(agent, monster):
return monster[3].mname in ('brown mold', 'blue jelly') and agent.blstats.hitpoints == agent.blstats.max_hitpoints
| StarcoderdataPython |
8027339 | import config
import models
import tensorflow as tf
import numpy as np
import os
import shutil
import json
import pdb
import time
import socket
import datetime
import copy
import sys
os.environ['CUDA_VISIBLE_DEVICES']='0'
#Input training files from benchmarks/FB15K/ folder.
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
MENU = ["","train+test","train","test"]
PARAMS = {"test_flag",
"in_path",
"out_path",
"bern",
"hidden_size",
"ent_size",
"rel_size",
"train_times",
"margin",
"nbatches",
"negative_ent",
"negative_rel",
"workThreads",
"alpha",
"lmbda",
"log_on",
"exportName",
"importName",
"export_steps",
"opt_method",
"optimizer",
"test_link_prediction",
"test_triple_classification",}
REPORT_PARAMS = {"test_flag",
"in_path",
"out_path",
"bern",
"hidden_size",
"ent_size",
"rel_size",
"train_times",
"margin",
"nbatches",
"negative_ent",
"negative_rel",
"alpha",
"lmbda",
"log_on",
"exportName",
"importName",
"export_steps",
"opt_method",
"optimizer",}
def obj_to_dict(obj):
return {key:value for key, value in obj.__dict__.items() if not key.startswith('__') and not callable(key)}
class XpManager:
def __init__(self, file = None):
self._load(file)
def filter_default_params(self, xp):
for pk,pv in xp['parameters'].items():
try:
if self.default_parameters[pk] == pv:
xp['parameters'].pop(pk)
except:
continue
def write(self, xp):
# Reload the json file
self._load(self.file)
self.filter_default_params(xp)
self.xp_list[xp["log"]["title"]] = xp
# Save the host info
xp["log"]["host"] = socket.gethostname()
# Dump the XpManager object to json file
self._dump()
def _load(self, file= None):
if file == None:
file = self.file
# Read experience json file
self.file = file
if os.path.exists(self.file):
try:
xp_man = json.load(open(self.file))
for k, v in xp_man.items():
setattr(self, k, v)
except:
pass
else:
self.size = 0
self.xp_list = {}
def _dump(self, file = None):
if file == None:
file = self.file
# Recalculate the size
xp_man.size = len(xp_man.xp_list)
xp_man_dict = obj_to_dict(self)
xp_man_dict.pop('file', None)
xp_man_dict['last_modified']=time.strftime(TIME_FORMAT,time.localtime())
json.dump(xp_man_dict, open(file,"w"), sort_keys=True, indent=4,)
# class ExpManager Ends
def report(xp):
report = ""
report += ("Corpus is {}\n".format(xp["params_task"]["kb"]))
report += ("There are {} relations and {} entities.\n"
.format(xp["corpus"]["total_rel"],xp["corpus"]["total_ent"]))
report += ("There are {} train/ {} test/ {} valid triples.\n"
.format(xp["corpus"]['total_train'],xp["corpus"]['total_test'],xp["corpus"]['total_valid']))
report += ("Experiment {} achieved {:.2f}k average MR. (current best {:.2f}k)\n"
.format(xp["log"]["title"], xp["perf"]["avg_MR"]/1000, 180123/1000))
if xp["params_task"]["model"] in ['TransE', 'DistMult']:
report += ("Hyperparameters for the {} model were:\n{}\n"
.format(xp["params_task"]["model"], "\n".join([key+" : "+str(value) for key,value in xp["parameters"].items() if key in REPORT_PARAMS])))
else:
report += "Some baseline was used in this experiment, no deep learning.\n"
report += "\n"
report += ("Learned embeddings are stored at {}\n".format(xp["log"]["embeddings_path"]))
report += ("Detailed results:\nMR: head {:.0f} / tail {:.0f} / avg {:.1f}\n"
.format(xp["perf"]["head_MR"], xp["perf"]["tail_MR"], xp["perf"]["avg_MR"]))
report += ("MRR: head {:.3f} / tail {:.3f} / avg {:.3f}\n"
.format(xp["perf"]["head_MRR"], xp["perf"]["tail_MRR"], xp["perf"]["avg_MRR"]))
report += ("Config files train and test.py are saved alongside model in {}\n"
.format(os.path.abspath(xp["log"]["embeddings_path"])))
report += ("Train Time: {}\nTest Time: {}\n".format(xp["log"]["time_train"],xp["log"]["time_test"]))
print(report)
save_via_email(xp["log"]["title"] + " {:.2f}k".format(xp["perf"]["avg_MR"]/1000) , report)
def save_via_email(title, message):
# runs 'echo "MESSAGE" | mail -s "TITLE" <EMAIL>'
import os, socket
title = socket.gethostname().upper()+": "+ title
os.system('echo "Experiment log :\n{}" | '
'mail -s "{}" {}'
.format(message, title, xp_man.mail_recepients))
xp_man = XpManager('./xps.json')
if len(sys.argv) < 2:
print("# all experiences:\n#\n")
#List all the xps
nx = xp_man.xp_list.keys()
nx.sort()
for n in nx:
if not n.startswith('_'):
print(n)
xp_man._dump()
exit(1)
xp_title = sys.argv[1]
xp = {}
active_params = copy.deepcopy(xp_man.default_parameters)
try:
xp = xp_man.xp_list[xp_title]
xp["log"]={}
xp["log"]["title"]=xp_title
# Clean the params
xp["params_task"]["task"] = xp["params_task"]["task"].lower().replace(" ", "")
print("The {} for {} begins...".format(xp["params_task"]["task"],xp["log"]["title"]))
xp["log"]["title"] = xp_title
print(json.dumps(xp,sort_keys=True, indent=4,))
# Set current xp parameter values
active_params.update(xp['parameters'])
except:
# In case the xp is not in json (Shouldn't happen!)
print("{} is not in file {}.\n".format(xp_title, xp_man.file))
if raw_input("Create new experience {}?[y/n]".format(xp_title)) == 'y':
xp=xp_man.xp_list["_template"].copy()
xp["log"]={}
xp["log"]["title"]=xp_title
xp_man.write(xp)
print("The {} is created from the template. Please modify it in {}".format(xp_title, xp_man.file))
exit(1)
try:
ans = MENU.index(xp["params_task"]["task"])
except:
print("Task \"{}\" is unknown {}".format(xp['task'],MENU))
exit(1)
con = config.Config()
# Save the default parameter value from config.py
default_params = {}
for x in PARAMS:
default_params[x]=getattr(con, x)
########
# Set active parameter values
# Pass the params to openke
for k,v in active_params.items():
setattr(con, k, v)
# The in_path must ends in /(slash)
con.in_path = os.path.abspath("data/"+xp["params_task"]["kb"])+"/"
xp["log"]["in_path"] = con.in_path
# If it's train + test or test only
if "test" in MENU[ans]:
con.set_test_link_prediction(True)
try:
xp["params_task"]["xp_path_base"] = os.path.abspath(xp["params_task"]["xp_path_base"])
except:
xp["params_task"]["xp_path_base"]= os.path.abspath("models".format())
xp["log"]["embeddings_path"] = os.path.abspath("{}/{}".format(xp["params_task"]["xp_path_base"],xp["log"]["title"]))
if not os.path.exists(xp["log"]["embeddings_path"]):
os.makedirs(xp["log"]["embeddings_path"])
else:
print("{} exists already!".format(xp["log"]["title"]))
if raw_input("Input \"{}\" to confirm the override and to continue: ".format(xp["log"]["title"])) != xp["log"]["title"]:
exit(1)
if "train" in MENU[ans]:
#Models will be exported via tf.Saver() automatically.
con.set_export_files("{}/model.vec.tf".format(xp["log"]["embeddings_path"]), 0)
#Model parameters will be exported to json files automatically.
con.set_out_files("{}/embeddings.vec.json".format(xp["log"]["embeddings_path"]))
else:
con.set_import_files("{}/model.vec.tf".format(xp["log"]["embeddings_path"]))
print(active_params)
#Initialize experimental settings.
con.init()
#Get the corpus info (totals of relations and entities etc.)
xp["corpus"] = {}
xp["corpus"]["total_ent"] = con.lib.getEntityTotal()
xp["corpus"]["total_rel"] = con.lib.getRelationTotal()
xp["corpus"]["total_train"] = con.lib.getTrainTotal()
xp["corpus"]["total_test"] = con.lib.getTestTotal()
xp["corpus"]["total_valid"] = con.lib.getValidTotal()
#Set the knowledge embedding model
con.set_model(getattr(models, xp["params_task"]["model"])) # models.TransE
#Train the model.
# Train+ test or Train only
if "train" in MENU[ans]:
t0 = time.time()
con.run()
xp["log"]["time_train"] = str(datetime.timedelta(seconds=time.time()-t0))
#To test models after training needs "set_test_flag(True)".
# Train+ test or Test only
if "test" in MENU[ans]:
t0 = time.time()
con.test()
xp["log"]["time_test"] = str(datetime.timedelta(seconds=time.time()-t0))
# Get the performance
xp["perf"] = {}
xp["perf"]["head_MR"] = con.get_head_mr()
xp["perf"]["tail_MR"] = con.get_tail_mr()
xp["perf"]["avg_MR"] = (xp["perf"]["head_MR"] + xp["perf"]["tail_MR"])/2
xp["perf"]["head_MRR"] = con.get_head_mrr()
xp["perf"]["tail_MRR"] = con.get_tail_mrr()
xp["perf"]["avg_MRR"] = (xp["perf"]["head_MRR"] + xp["perf"]["tail_MRR"])/2
shutil.copy2("log.log", "{}/".format(xp["log"]["embeddings_path"]))
report(xp)
xp_man.write(xp)
| StarcoderdataPython |
6472284 | <filename>class/my_func.py
def k2c(temp):
"""this is a function that will do stuff and convert things
Parameters
----------
temp: int or float
temperature value to convert
Returns
-------
temp: int or float
converted temperature
"""
return temp - 273.15
def f2c(temp):
tk = f2k(temp)
c = k2c(tk)
return(c)
def f2k(temp=32):
convert = ((temp - 32) * (5/9)) + 273.15
return convert
| StarcoderdataPython |
9778285 | <reponame>variable/todobackend<gh_stars>0
# encoding: utf-8
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
| StarcoderdataPython |
1791951 | import re
def wordsinline(line):
return len(re.findall("\w+", line))
while (True):
try:
s = raw_input("type something: ")
print wordsinline(s)
except:
print "you finished"
quit() | StarcoderdataPython |
4840731 | <filename>setup.py
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# NOTE: The configuration for the package, including the name, version, and
# other information are set in the setup.cfg file.
import os
import sys
from setuptools import setup
from setuptools.command.develop import develop
# First provide helpful messages if contributors try and run legacy commands
# for tests or docs.
TEST_HELP = """
Note: running tests is no longer done using 'python setup.py test'. Instead
you will need to run:
tox -e test
If you don't already have tox installed, you can install it with:
pip install tox
If you only want to run part of the test suite, you can also use pytest
directly with::
pip install -e .[test]
pytest
For more information, see:
http://docs.astropy.org/en/latest/development/testguide.html#running-tests
"""
if 'test' in sys.argv:
print(TEST_HELP)
sys.exit(1)
DOCS_HELP = """
Note: building the documentation is no longer done using
'python setup.py build_docs'. Instead you will need to run:
tox -e build_docs
If you don't already have tox installed, you can install it with:
pip install tox
You can also build the documentation with Sphinx directly using::
pip install -e .[docs]
cd docs
make html
For more information, see:
http://docs.astropy.org/en/latest/install.html#builddocs
"""
if 'build_docs' in sys.argv or 'build_sphinx' in sys.argv:
print(DOCS_HELP)
sys.exit(1)
VERSION_TEMPLATE = """
# Note that we need to fall back to the hard-coded version if either
# setuptools_scm can't be imported or setuptools_scm can't determine the
# version, so we catch the generic 'Exception'.
try:
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
except Exception:
version = '{version}'
""".lstrip()
# These are based on jupyter_core.paths
def jupyter_config_dir():
"""Get the Jupyter config directory for this platform and user.
Returns JUPYTER_CONFIG_DIR if defined, else ~/.jupyter
"""
env = os.environ
home_dir = get_home_dir()
if env.get('JUPYTER_NO_CONFIG'):
return _mkdtemp_once('jupyter-clean-cfg')
if env.get('JUPYTER_CONFIG_DIR'):
return env['JUPYTER_CONFIG_DIR']
return os.path.join(home_dir, '.jupyter')
def user_dir():
homedir = os.path.expanduser('~')
# Next line will make things work even when /home/ is a symlink to
# /usr/home as it is on FreeBSD, for example
homedir = os.path.realpath(homedir)
if sys.platform == 'darwin':
return os.path.join(homedir, 'Library', 'Jupyter')
elif os.name == 'nt':
appdata = os.environ.get('APPDATA', None)
if appdata:
return os.path.join(appdata, 'jupyter')
else:
return os.path.join(jupyter_config_dir(), 'data')
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = env.get("XDG_DATA_HOME", None)
if not xdg:
xdg = os.path.join(home, '.local', 'share')
return os.path.join(xdg, 'jupyter')
class DevelopCmd(develop):
prefix_targets = [
("nbconvert/templates", 'jdaviz-default'),
("voila/templates", 'jdaviz-default')
]
def run(self):
target_dir = os.path.join(sys.prefix, 'share', 'jupyter')
if '--user' in sys.prefix: # TODO: is there a better way to find out?
target_dir = user_dir()
target_dir = os.path.join(target_dir)
for prefix_target, name in self.prefix_targets:
source = os.path.join('share', 'jupyter', prefix_target, name)
target = os.path.join(target_dir, prefix_target, name)
target_subdir = os.path.dirname(target)
if not os.path.exists(target_subdir):
os.makedirs(target_subdir)
rel_source = os.path.relpath(os.path.abspath(
source), os.path.abspath(target_subdir))
try:
os.remove(target)
except:
pass
print(rel_source, '->', target)
os.symlink(rel_source, target)
super(DevelopCmd, self).run()
# WARNING: all files generated during setup.py will not end up in the source
# distribution
data_files = []
# Add all the templates
for (dirpath, dirnames, filenames) in os.walk('share/jupyter/'):
if filenames:
data_files.append((dirpath, [os.path.join(dirpath, filename)
for filename in filenames]))
setup(data_files=data_files, cmdclass={'develop': DevelopCmd},
use_scm_version={'write_to': os.path.join('jdaviz', 'version.py'),
'write_to_template': VERSION_TEMPLATE})
| StarcoderdataPython |
5067139 | from django.apps import AppConfig
class FieldswidgetcwappConfig(AppConfig):
name = 'FieldsWidgetCWapp'
| StarcoderdataPython |
57147 | <filename>Python code/Lesson2.py<gh_stars>0
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
np.random.seed(0)
means = [[2, 2], [8, 3], [3, 6]]
cov = [[1, 0], [0, 1]]
N = 500
K = 3
X0 = np.random.multivariate_normal(means[0], cov, N)
X1 = np.random.multivariate_normal(means[1], cov, N)
X2 = np.random.multivariate_normal(means[2], cov, N)
X = np.concatenate((X0, X1, X2), axis = 0)
original_label = np.asarray([0]*N + [1]*N + [2]*N).T
def kmeans_display(X, label):
X0 = X[label == 0, :]
X1 = X[label == 1, :]
X2 = X[label == 2, :]
plt.plot(X0[:, 0], X0[:, 1], 'b^', markersize = 4, alpha = .8)
plt.plot(X1[:, 0], X1[:, 1], 'go', markersize = 4, alpha = .8)
plt.plot(X2[:, 0], X2[:, 1], 'rs', markersize = 4, alpha = .8)
plt.axis('equal')
plt.plot()
plt.show()
def kmeans_init_centers(X, k):
# randomly pick k rows of X as initial centers
return X[np.random.choice(X.shape[0], k, replace=False)]
def kmeans_assign_labels(X, centers):
# calculate pairwise distances btw data and centers
D = cdist(X, centers)
# return index of the closest center
return np.argmin(D, axis = 1)
def kmeans_update_centers(X, labels, K):
centers = np.zeros((K, X.shape[1]))
for k in range(K):
# collect all points assigned to the k-th cluster
Xk = X[labels == k, :]
# take average
centers[k,:] = np.mean(Xk, axis = 0)
return centers
def has_converged(centers, new_centers):
# return True if two sets of centers are the same
return (set([tuple(a) for a in centers]) ==
set([tuple(a) for a in new_centers]))
def kmeans(X, K):
centers = [kmeans_init_centers(X, K)]
labels = []
it = 0
while True:
labels.append(kmeans_assign_labels(X, centers[-1])) # n x 1 (index)
new_centers = kmeans_update_centers(X, labels[-1], K)
if has_converged(centers[-1], new_centers):
break
centers.append(new_centers)
it += 1
# kmeans_display(X, original_label)
# kmeans(X, K)
| StarcoderdataPython |
255561 | <reponame>maestro-hybrid-cloud/ceilometer
#
# Copyright 2013 eNovance <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from sqlalchemy import MetaData, Table, Column, Index
from sqlalchemy import String, Text
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('alarm', meta, autoload=True)
type = Column('type', String(50), default='threshold')
type.create(table, populate_default=True)
rule = Column('rule', Text())
rule.create(table)
for row in table.select().execute().fetchall():
query = []
if row.matching_metadata is not None:
matching_metadata = json.loads(row.matching_metadata)
for key in matching_metadata:
query.append({'field': key,
'op': 'eq',
'value': matching_metadata[key]})
rule = {
'meter_name': row.meter_name,
'comparison_operator': row.comparison_operator,
'threshold': row.threshold,
'statistic': row.statistic,
'evaluation_periods': row.evaluation_periods,
'period': row.period,
'query': query
}
table.update().where(table.c.id == row.id).values(rule=rule).execute()
index = Index('ix_alarm_counter_name', table.c.meter_name)
index.drop(bind=migrate_engine)
table.c.meter_name.drop()
table.c.comparison_operator.drop()
table.c.threshold.drop()
table.c.statistic.drop()
table.c.evaluation_periods.drop()
table.c.period.drop()
table.c.matching_metadata.drop()
| StarcoderdataPython |
4973513 | <reponame>cclauss/discovery-artifact-manager<gh_stars>100-1000
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom template tags and filters for Google APIs code generator.
These are Django template filters for reformatting blocks of code.
"""
__author__ = '<EMAIL> (<NAME>)'
import contextlib
import hashlib
import logging
import os
import re
import string
import textwrap
import threading
import django.template as django_template # pylint: disable=g-bad-import-order
from googleapis.codegen import utilities
from googleapis.codegen.filesys import files
register = django_template.Library()
# NOTE: Do not edit this text unless you understand the ramifications.
_LICENSE_TEXT = """
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
# Names of the parameters used for formatting generated code. These are keys
# to the actual values, which are stored in the generation context.
_LANGUAGE = '_LANGUAGE'
_LINE_BREAK_INDENT = '_LINE_BREAK_INDENT'
_LINE_WIDTH = '_LINE_WIDTH'
_PARAMETER_INDENT = '_PARAMETER_INDENT'
_LEVEL_INDENT = '_LEVEL_INDENT'
_COMMENT_START = '_COMMENT_START'
_COMMENT_CONTINUE = '_COMMENT_CONTINUE'
_COMMENT_END = '_COMMENT_END'
_DOC_COMMENT_START = '_DOC_COMMENT_START'
_DOC_COMMENT_CONTINUE = '_DOC_COMMENT_CONTINUE'
_DOC_COMMENT_END = '_DOC_COMMENT_END'
# The begin/end tags are parts of a doc comment that surround the text, but
# are not really part of the comment tags
_DOC_COMMENT_BEGIN_TAG = '_DOC_COMMENT_BEGIN_TAG'
_DOC_COMMENT_END_TAG = '_DOC_COMMENT_END_TAG'
_LITERAL_QUOTE_START = '_LITERAL_QUOTE_START'
_LITERAL_QUOTE_END = '_LITERAL_QUOTE_END'
_LITERAL_ESCAPE = '_LITERAL_ESCAPE'
_LITERAL_FLOAT_SUFFIX = '_LITERAL_FLOAT_SUFFIX'
_CURRENT_INDENT = '_CURRENT_INDENT' # The actual indent we are at
_CURRENT_LEVEL = '_CURRENT_LEVEL' # The current indent level we are at
_PARAMETER_DOC_INDENT = '_PARAMETER_DOC_INDENT'
_IMPORT_REGEX = '_IMPORT_REGEX'
_IMPORT_TEMPLATE = '_IMPORT_TEMPLATE'
_BOOLEAN_LITERALS = '_BOOLEAN_LITERALS'
# The name of the context variable holding a file writer for the 'write' tag to
# use. The file writer is a method with the signature func(path, content).
FILE_WRITER = '_FILE_WRITER'
_defaults = {
_LINE_BREAK_INDENT: 2,
_LINE_WIDTH: 40,
_PARAMETER_INDENT: 4,
_LEVEL_INDENT: 2,
_COMMENT_START: '# ',
_COMMENT_CONTINUE: '# ',
_COMMENT_END: '',
_DOC_COMMENT_START: '# ',
_PARAMETER_DOC_INDENT: 6,
_DOC_COMMENT_CONTINUE: '# ',
_DOC_COMMENT_END: '',
_DOC_COMMENT_BEGIN_TAG: '',
_DOC_COMMENT_END_TAG: '',
_IMPORT_REGEX: r'^\s*import\s+(?P<import>[a-zA-Z0-9.]+)',
_IMPORT_TEMPLATE: 'import %s',
_LITERAL_QUOTE_START: '"',
_LITERAL_QUOTE_END: '"',
_LITERAL_ESCAPE: [
('\\', '\\\\'),
('"', '\\"'),
('\n', '\\n'),
('\t', '\\t'),
('\r', '\\r'),
('\f', '\\f'),
],
_LITERAL_FLOAT_SUFFIX: '',
_BOOLEAN_LITERALS: ('false', 'true')
}
_language_defaults = {
'cpp': {
_LINE_WIDTH: 80,
_PARAMETER_INDENT: 4,
_LEVEL_INDENT: 2,
_COMMENT_START: '// ',
_COMMENT_CONTINUE: '// ',
_COMMENT_END: '',
_DOC_COMMENT_START: '/** ',
_DOC_COMMENT_CONTINUE: ' * ',
_DOC_COMMENT_END: ' */',
_IMPORT_REGEX: r'^#include\s+(?P<import>[\<\"][a-zA-Z0-9./_\-]+[\>\"])',
_IMPORT_TEMPLATE: '#include %s'
},
'csharp': {
_LINE_WIDTH: 120,
_PARAMETER_INDENT: 4,
_LEVEL_INDENT: 4,
_COMMENT_START: '// ',
_COMMENT_CONTINUE: '// ',
_COMMENT_END: '',
_DOC_COMMENT_START: '/// ',
_DOC_COMMENT_CONTINUE: '/// ',
_DOC_COMMENT_BEGIN_TAG: '<summary>',
_DOC_COMMENT_END_TAG: '</summary>',
},
'dart': {
_LEVEL_INDENT: 2,
_LINE_WIDTH: 100,
_COMMENT_START: '/* ',
_COMMENT_CONTINUE: ' * ',
_COMMENT_END: ' */',
_DOC_COMMENT_START: '/** ',
_PARAMETER_DOC_INDENT: 6,
# E.g. #import('dart:json');
_IMPORT_REGEX: r'^#\s*import\s+\(\'(?P<import>[a-zA-Z0-9:.]+)\'\);',
_IMPORT_TEMPLATE: """#import('%s');""",
_LITERAL_ESCAPE: _defaults[_LITERAL_ESCAPE] + [('$', '\\$')]
},
'go': {
_LINE_WIDTH: 120,
_PARAMETER_INDENT: 4,
_LEVEL_INDENT: 8,
_COMMENT_START: '// ',
_COMMENT_CONTINUE: '// ',
_COMMENT_END: '',
_DOC_COMMENT_START: '// ',
_DOC_COMMENT_CONTINUE: '// '
},
'java': {
_LINE_WIDTH: 100,
_COMMENT_START: '/* ',
_COMMENT_CONTINUE: ' * ',
_COMMENT_END: ' */',
_DOC_COMMENT_START: '/** ',
_PARAMETER_DOC_INDENT: 6,
_IMPORT_REGEX: r'^\s*import\s+(?P<import>[a-zA-Z0-9.]+);',
_IMPORT_TEMPLATE: 'import %s;'
},
'javascript': {
_LINE_WIDTH: 80,
_COMMENT_START: '/* ',
_COMMENT_CONTINUE: ' * ',
_COMMENT_END: ' */',
_DOC_COMMENT_START: '/** ',
},
'objc': {
_LINE_WIDTH: 80,
_COMMENT_START: '// ',
_COMMENT_CONTINUE: '// ',
_COMMENT_END: '',
_DOC_COMMENT_START: '// ',
_DOC_COMMENT_CONTINUE: '// ',
_DOC_COMMENT_END: '',
_LITERAL_QUOTE_START: '@"',
_BOOLEAN_LITERALS: ('NO', 'YES'),
_IMPORT_TEMPLATE: '#import %s',
},
'php': {
_LINE_WIDTH: 80,
_COMMENT_START: '/* ',
_COMMENT_CONTINUE: ' * ',
_COMMENT_END: ' */',
_DOC_COMMENT_START: '/** ',
_LITERAL_QUOTE_START: '\'',
_LITERAL_QUOTE_END: '\'',
_LITERAL_ESCAPE: [
('\\', '\\\\'), # Really is \ => \\
('\'', '\\\''), # ' => \'
]
},
'python': {
_LINE_WIDTH: 80,
_COMMENT_START: '# ',
_COMMENT_CONTINUE: '# ',
_COMMENT_END: '# ',
_DOC_COMMENT_START: '"""',
_DOC_COMMENT_CONTINUE: '"""',
_LITERAL_QUOTE_START: '\'',
_LITERAL_QUOTE_END: '\'',
_BOOLEAN_LITERALS: ('False', 'True'),
},
}
_TEMPLATE_GLOBALS = threading.local()
_TEMPLATE_GLOBALS.current_context = None
def GetCurrentContext():
return _TEMPLATE_GLOBALS.current_context
@contextlib.contextmanager
def SetCurrentContext(ctxt):
_TEMPLATE_GLOBALS.current_context = ctxt
try:
yield
finally:
_TEMPLATE_GLOBALS.current_context = None
def _GetCurrentLanguage(ctxt=None, default=None):
if ctxt is None:
ctxt = GetCurrentContext() or {}
try:
# respect the language set by the language node, if any
return ctxt[_LANGUAGE]
except KeyError:
language_model = ctxt.get('language_model')
if language_model and language_model.language:
return language_model.language
logging.debug('no language set in context or language model')
return default
class CachingTemplateLoader(object):
"""A template loader that caches templates under stable directories."""
# A pattern that variation directories will match if they are development
# versions that should not be cached. E.g., "java/dev/" or "java/1.0dev"
UNSTABLE_VARIATION_PATTERN = re.compile(r'^[^/]+/[^/]*dev/')
def __init__(self):
self._cache = {}
def GetTemplate(self, template_path, template_dir):
"""Get a compiled django template.
Args:
template_path: Full path to the template.
template_dir: The root of the template path.
Returns:
A compiled django template.
"""
relpath = os.path.relpath(template_path, template_dir)
if (self.UNSTABLE_VARIATION_PATTERN.match(relpath) or
os.environ.get('NOCACHE')):
# don't cache if specifically requested (for testing) or
# for unstable variations
return self._LoadTemplate(template_path)
template = self._cache.get(template_path)
if not template:
try:
template = self._LoadTemplate(template_path)
self._cache[template_path] = template
except django_template.TemplateSyntaxError as err:
raise django_template.TemplateSyntaxError('%s: %s' % (relpath, err))
return template
def _LoadTemplate(self, template_path):
source = files.GetFileContents(template_path).decode('utf-8')
return django_template.Template(source)
_TEMPLATE_LOADER = CachingTemplateLoader()
def _RenderToString(template_path, context):
"""Renders a template specified by a file path with a give values dict.
NOTE: This routine is essentially a copy of what is in django_helpers.
We duplicate it here rather than call that one to avoid a mutual recursion
in the strange django loading process.
Args:
template_path: (str) Path to file.
context: (Context) A django Context.
Returns:
(str) The expanded template.
"""
# FRAGILE: this relies on template_dir being passed in to the
# context (in generator.py)
t = _TEMPLATE_LOADER.GetTemplate(template_path,
context.get('template_dir', ''))
return t.render(context)
def _GetFromContext(context, *variables):
"""Safely get something from the context.
Look for a variable (or an alternate variable) in the context. If it is not in
the context, look in language-specific or overall defaults.
Args:
context: (Context|None) The Django render context
*variables: (str) varargs list of variable names
Returns:
The requested value from the context or the defaults.
"""
if context is None:
context = GetCurrentContext()
containers = [context]
current_language = _GetCurrentLanguage(context)
if current_language and current_language in _language_defaults:
containers.append(_language_defaults[current_language])
containers.append(_defaults)
# Use a non-reproducible default value to allow real non-truthy values.
default = object()
for c in containers:
for v in variables:
value = c.get(v, default)
if value is not default:
return value
return None
def _GetArgFromToken(token):
"""Split out a single argument word from django tag token.
When the Django parser encounters a tag of the form {% tag x %}, the tag
processor is handed a single token containing 'tag x'. We split that apart
and return just the 'x'.
Args:
token: (django.template.Token) the token holding this tag
Returns:
(str) The argument word contained in the token.
Raises:
TemplateSyntaxError: if the token has no argument.
"""
try:
_, arg = token.split_contents()
except ValueError:
raise django_template.TemplateSyntaxError(
'tag requires a single argument: %s' % token.contents)
return arg
#
# Basic Filters
#
def _DivideIntoBlocks(lines, prefix):
"""Dole out the input text in blocks separated by blank lines.
A "blank line" in this case means a line that is actually zero length or
just is the comment prefix. The common prefix, along with any spaces trailing
the prefix are removed from each line.
Args:
lines: list of str
prefix: a commmon prefix to remove from each line
Yields:
list of (list of str)
"""
block = []
prefix = prefix.rstrip()
for line in lines:
if line.startswith(prefix):
line = line[len(prefix):].strip()
if not line:
if block:
yield block
block = []
continue
block.append(line)
if block:
yield block
def _ExtractCommentPrefix(line):
"""Examine a line of text and extract what would be a comment prefix.
The pattern we are looking for is ' *[^ ::punctuation::]*'. This covers most
programming languages in common use. Fortran and Basic are obviously not
supported. :-)
Args:
line: (str) a sample line
Returns:
(str) The comment prefix
"""
# look for spaces followed by a comment tag and break after that.
got_tag = False
prefix_length = 0
# collect the prefix pattern
for c in line:
if c == ' ':
if got_tag:
break
prefix_length += 1
elif c in string.punctuation:
got_tag = True
prefix_length += 1
else:
break
return line[:prefix_length]
# We disable the bad function name warning because we use Django style names
# rather than Google style names
@register.filter
def java_comment_fragment(value, indent): # pylint: disable=g-bad-name
"""Template filter to wrap lines into Java comment style.
Take a single long string and break it so that subsequent lines are prefixed
by an approprate number of spaces and then a ' * '. The filter invocation
should begin on a line that is already indented suffciently.
This is typically used after we have written the lead-in for a comment. E.g.
| // NOTE: The leading / is indented 4 spaces.
| /**
| * {{ variable|java_comment_fragment:4 }}
| */
Args:
value: (str) the string to wrap
indent: (int) the number of spaces to indent the block.
Returns:
The rewrapped string.
"""
if not indent:
indent = 0
prefix = '%s * ' % (' ' * indent)
wrapper = textwrap.TextWrapper(width=_language_defaults['java'][_LINE_WIDTH],
replace_whitespace=False,
initial_indent=prefix,
subsequent_indent=prefix)
wrapped = wrapper.fill(value)
if wrapped.startswith(prefix):
wrapped = wrapped[len(prefix):]
return wrapped
@register.filter
def java_parameter_wrap(value): # pylint: disable=g-bad-name
"""Templatefilter to wrap lines of parameter documentation.
Take a single long string and breaks it up so that subsequent lines are
prefixed by an appropriate number of spaces (and preceded by a ' * '.
Args:
value: (str) the string to wrap
Returns:
the rewrapped string.
"""
# TODO(user): add 'parameter_doc' option to the DocCommentBlock
indent = _language_defaults['java'][_PARAMETER_DOC_INDENT]
prefix = ' * %s ' % (' ' * indent)
wrapper = textwrap.TextWrapper(width=_language_defaults['java'][_LINE_WIDTH],
replace_whitespace=False,
initial_indent='',
subsequent_indent=prefix)
wrapped = wrapper.fill(value)
return wrapped
# We disable the bad function name warning because we use Django style names
# rather than Google style names (disable-msg=C6409)
@register.filter
def block_comment(value): # pylint: disable=g-bad-name
"""Template filter to line wrap a typical block comment.
Take a block of text where each line has a common comment prefix, divide it
into multiple sections, line wrap each section and string them back together.
Sections are defined as blank lines or lines containing only the comment
prefix.
Example template usage:
/**{% filter block_comment %}
* wwelrj wlejrwerl jrl (very long line ...) rwrwr.
*
* more text
* and more
* {% endfilter %}
*/
Args:
value: (str) a block of text to line wrap.
Returns:
(str) the wrapped text.
"""
if not value:
return ''
lines = value.split('\n')
# Ignore a leading blank line while figuring out the comment tag. This allows
# us to put the filter tag above the content, rather than flush left before
# it. It makes the template easier to read.
leading_blank = False
if not lines[0]:
leading_blank = True
comment_prefix = _ExtractCommentPrefix(lines[1])
else:
comment_prefix = _ExtractCommentPrefix(lines[0])
# TODO(user): Default is for backwards-compatibility; remove when safe to
# do so.
language = _GetCurrentLanguage(default='java')
line_width = _language_defaults[language][_LINE_WIDTH]
wrapper = textwrap.TextWrapper(width=line_width,
replace_whitespace=False,
initial_indent=('%s ' % comment_prefix),
subsequent_indent=('%s ' % comment_prefix))
wrapped_blocks = []
for block in _DivideIntoBlocks(lines, comment_prefix):
wrapped_blocks.append(wrapper.fill(' '.join(block)))
ret = ''
if leading_blank:
ret = '\n'
return ret + ('\n%s\n' % comment_prefix).join(wrapped_blocks)
@register.filter
def noblanklines(value): # pylint: disable=g-bad-name
"""Template filter to remove blank lines."""
return '\n'.join([line for line in value.split('\n') if line.strip()])
@register.filter
def collapse_blanklines(value): # pylint: disable=g-bad-name
"""Template filter to collapse successive blank lines into a single one."""
lines = []
previous_blank = False
for line in value.split('\n'):
if not line.strip():
if not previous_blank:
lines.append(line)
previous_blank = True
else:
pass
else:
lines.append(line)
previous_blank = False
return '\n'.join(lines)
class Halt(Exception):
"""The exception raised when a 'halt' tag is encountered."""
pass
@register.simple_tag
def halt(): # pylint: disable=g-bad-name
"""A tag which raises a Halt exception.
Usage:
{% if some_condition %}{% halt %}{% endif %}
Raises:
Halt: always
"""
raise Halt()
#
# Tags for programming language concepts
#
class LanguageNode(django_template.Node):
"""Node for language setting."""
def __init__(self, language):
self._language = language
def render(self, context): # pylint: disable=g-bad-name
"""Render the 'language' tag.
For the language setting we render nothing, but we take advantage of being
passed the context to set language specific things there, so they are
usable later.
Args:
context: (Context) the render context.
Returns:
An empty string.
"""
context.autoescape = False
context[_LANGUAGE] = self._language
per_language_defaults = _language_defaults.get(self._language)
if per_language_defaults:
context.update(per_language_defaults)
context[_CURRENT_INDENT] = 0
context[_CURRENT_LEVEL] = 0
return ''
@register.tag(name='language')
def DoLanguage(unused_parser, token):
"""Specify the language we are emitting code in.
Usage:
{% language java %}
Args:
unused parser: (parser) the Django parser context.
token: (django.template.Token) the token holding this tag
Returns:
a LanguageNode
"""
language = _GetArgFromToken(token)
return LanguageNode(language)
class IndentNode(django_template.Node):
"""A node which indents its contents based on indent nesting levels.
The interior text is re-indented by the existing indent + the indent nesting
level * the LEVEL_INDENT
"""
def __init__(self, nodelist, levels):
self._nodelist = nodelist
self._levels = int(levels)
def render(self, context): # pylint: disable=g-bad-name
"""Reindent the block inside the tag scope."""
current_indent = context.get(_CURRENT_INDENT, 0)
current_indent_level = context.get(_CURRENT_LEVEL, 0)
# How much extra indent will this level add
extra = (_GetFromContext(context, _LEVEL_INDENT) * self._levels)
# Set the new effective indent of this block. Tags which wrap text to
# the line limit must use this value to determine their actual indentation.
context[_CURRENT_INDENT] = current_indent + extra
context[_CURRENT_LEVEL] = current_indent_level + self._levels
lines = self._nodelist.render(context)
context[_CURRENT_INDENT] = current_indent
context[_CURRENT_LEVEL] = current_indent_level
# We only have to prefix the lines in this row by the extra indent, because
# the outer scope will be adding its own indent as well.
prefix = ' ' * extra
def _PrefixNonBlank(s):
x = s.rstrip()
if x:
x = '%s%s' % (prefix, x)
return x
return '\n'.join([_PrefixNonBlank(line) for line in lines.split('\n')])
@register.tag(name='indent')
def DoIndent(parser, token):
"""Increase the indent level for indenting.
Usage:
{% indent [levels] %} text... {% endindent %}
Increase the indent on all lines of text by levels * LEVEL_INDENT
Args:
parser: (parser) the Django parser context.
token: (django.template.Token) the token holding this tag
Returns:
a IndentNode
"""
try:
unused_tag_name, levels = token.split_contents()
except ValueError:
# No level, default to 1
levels = 1
nodelist = parser.parse(('endindent',))
parser.delete_first_token()
return IndentNode(nodelist, levels)
class CollapsedNewLinesNode(django_template.Node):
"""A node which collapses 3 or more newlines into 2 newlines."""
def __init__(self, nodelist):
self._nodelist = nodelist
def render(self, context): # pylint: disable=g-bad-name
"""Collapses newline inside the tag scope."""
lines = self._nodelist.render(context)
ret = re.sub(r'\n(\n)+', '\n\n', lines)
return ret
@register.tag(name='collapsenewlines')
def DoCollapseNewLines(parser, unused_token):
"""Collapses 3 or more newlines into 2 newlines.
Usage:
{% collapsenewlines %}
...
{% end collapsenewlines %}
Args:
parser: (parser) the Django parser context.
unused_token: (django.template.Token) the token holding this tag
Returns:
a CollapsedNewLinesNode
"""
nodelist = parser.parse(('endcollapsenewlines',))
parser.delete_first_token()
return CollapsedNewLinesNode(nodelist)
EOL_MARKER = '\x00eol\x00'
SPACE_MARKER = '\x00sp\x00'
NOBLANK_STACK = '___noblank__stack___'
@register.simple_tag(takes_context=True)
def eol(context): # pylint:disable=g-bad-name
# Inside a noblock node, return special marker
if context.get(NOBLANK_STACK):
return EOL_MARKER
return '\n'
@register.simple_tag(takes_context=True)
def sp(context): # pylint:disable=g-bad-name
# Inside a noblock node, return special marker
if context.get(NOBLANK_STACK):
return SPACE_MARKER
return ' '
class NoBlankNode(django_template.Node):
"""Node for remove eols from output."""
def __init__(self, nodelist, recurse=False, noeol=False):
self.nodelist = nodelist
self.recurse = recurse
self.noeol = noeol
def _CleanText(self, text):
lines = [line for line in text.splitlines(True)
if line.strip()]
if self.noeol:
# Remove whitespace at the end of a source line, so that invisible
# whitespace is not significant (users should use {%sp%} in that
# situation).. The text passed in here doesn't necessarily end with a
# newline, so take care not to strip out whitespace unless it does.
def Clean(s):
if s.endswith('\n'):
return s.rstrip()
return s
lines = [Clean(line) for line in lines]
text = ''.join(lines)
return text
def _ReplaceMarkers(self, text):
return text.replace(EOL_MARKER, '\n').replace(SPACE_MARKER, ' ')
def render(self, context): # pylint:disable=g-bad-name
"""Render the node."""
stack = context.get(NOBLANK_STACK)
if stack is None:
stack = context[NOBLANK_STACK] = [self]
else:
stack.append(self)
try:
output = []
for n in self.nodelist:
text = n.render(context)
if not isinstance(n, TemplateNode) or self.recurse:
text = self._CleanText(text)
output.append(text)
text = ''.join(output)
# Only replace markers if we are the last node in the stack.
if len(stack) == 1:
text = self._ReplaceMarkers(text)
return text
finally:
stack.pop()
@register.tag(name='noblank')
def DoNoBlank(parser, token):
"""Suppress all empty lines unless explicitly added."""
args = token.split_contents()
if len(args) > 2:
raise django_template.TemplateSyntaxError(
'noblank expects at most one argument')
if len(args) == 2:
recursearg = args[1]
if recursearg not in ('recurse', 'norecurse'):
raise django_template.TemplateSyntaxError(
'argument to noblank must be either "norecurse" '
'(the default) or "recurse"')
recurse = recursearg == 'recurse'
else:
recurse = False
nodelist = parser.parse(('endnoblank',))
parser.delete_first_token()
return NoBlankNode(nodelist, recurse=recurse)
@register.tag(name='noeol')
def DoNoEol(parser, token):
"""Suppress all empty lines unless explicitly added."""
args = token.split_contents()
if len(args) > 2:
raise django_template.TemplateSyntaxError(
'noeol expects at most one argument')
if len(args) == 2:
recursearg = args[1]
if recursearg not in ('recurse', 'norecurse'):
raise django_template.TemplateSyntaxError(
'argument to noeol must be either "norecurse" '
'(the default) or "recurse"')
recurse = recursearg == 'recurse'
else:
recurse = False
nodelist = parser.parse(('endnoeol',))
parser.delete_first_token()
return NoBlankNode(nodelist, recurse=recurse, noeol=True)
class DocCommentNode(django_template.Node):
"""Node for comments which should be formatted as doc-style comments."""
def __init__(self, text=None, nodelist=None, comment_type=None,
wrap_blocks=True):
self._text = text
self._nodelist = nodelist
self._comment_type = comment_type
self._wrap_blocks = wrap_blocks
def render(self, context): # pylint: disable=g-bad-name
"""Render the node."""
the_text = self._text
if self._nodelist:
the_text = self._nodelist.render(context)
return self.RenderText(the_text, context)
def RenderText(self, text, context): # pylint: disable=g-bad-name
"""Format text according to the context.
The strategy is to divide the text into blocks (on blank lines), then
to format the blocks individually, then reassemble.
Args:
text: (str) The text to format.
context: (django_template.Context) The rendering context.
Returns:
The rendered comment.
"""
if self._comment_type == 'doc':
start_prefix = _GetFromContext(context, _DOC_COMMENT_START,
_COMMENT_START)
continue_prefix = _GetFromContext(context, _DOC_COMMENT_CONTINUE,
_COMMENT_CONTINUE)
comment_end = _GetFromContext(context, _DOC_COMMENT_END, _COMMENT_END)
begin_tag = _GetFromContext(context, _DOC_COMMENT_BEGIN_TAG)
end_tag = _GetFromContext(context, _DOC_COMMENT_END_TAG)
else:
start_prefix = _GetFromContext(context, _COMMENT_START)
continue_prefix = _GetFromContext(context, _COMMENT_CONTINUE)
comment_end = _GetFromContext(context, _COMMENT_END)
begin_tag = ''
end_tag = ''
available_width = (_GetFromContext(context, _LINE_WIDTH) -
context.get(_CURRENT_INDENT, 0))
return _WrapInComment(
text,
wrap_blocks=self._wrap_blocks,
start_prefix=start_prefix,
continue_prefix=continue_prefix,
comment_end=comment_end,
begin_tag=begin_tag,
end_tag=end_tag,
available_width=available_width)
def _WrapInComment(text, wrap_blocks, start_prefix,
continue_prefix, comment_end, begin_tag,
end_tag, available_width):
# If the text has no EOL and is short, it may be a one-liner,
# though still not necessarily because of other comment overhead.
if len(text) < available_width and '\n' not in text:
one_line = '%s%s%s%s%s' % (start_prefix, begin_tag, text, end_tag,
comment_end)
if len(one_line) < available_width:
return one_line
wrapper = textwrap.TextWrapper(width=available_width,
replace_whitespace=False,
initial_indent=continue_prefix,
subsequent_indent=continue_prefix)
text = '%s%s%s' % (begin_tag, text, end_tag)
continue_rstripped = continue_prefix.rstrip()
if wrap_blocks:
blocks = _DivideIntoBlocks(text.split('\n'), '')
block_joiner = '\n%s\n' % continue_rstripped
else:
blocks = [[l] for l in text.split('\n')]
# Eliminate spurious blanks at beginning and end,
# for compatibility with wrap_blocks behavior.
for idx in (0, -1):
if blocks and not blocks[idx][0]:
del blocks[idx]
block_joiner = '\n'
wrapped_blocks = []
for block in blocks:
t = ' '.join(block)
if not t.strip():
# The text wrapper won't apply an indent to an empty string
wrapped_blocks.append(continue_rstripped)
else:
wrapped_blocks.append(wrapper.fill(t))
ret = ''
if start_prefix != continue_prefix:
ret += '%s\n' % start_prefix.rstrip()
ret += block_joiner.join(wrapped_blocks)
if comment_end:
ret += '\n%s' % comment_end
return ret
class CommentIfNode(DocCommentNode):
"""Node for comments which should only appear if they have text.
A CommentIf is a pair of a comment style and a variable name. If the variable
has a value, then a comment will be emmited for it, otherwise nothing is
emitted.
"""
def __init__(self, variable_name, comment_type=None):
super(CommentIfNode, self).__init__(comment_type=comment_type)
self._variable_name = variable_name
def render(self, context): # pylint: disable=g-bad-name
"""Render the node."""
try:
text = django_template.resolve_variable(self._variable_name, context)
if text:
return self.RenderText(text, context)
except django_template.base.VariableDoesNotExist:
pass
return ''
@register.tag(name='comment_if')
def DoCommentIf(unused_parser, token):
"""If a variable has content, emit it as a comment."""
variable_name = _GetArgFromToken(token)
return CommentIfNode(variable_name)
@register.tag(name='doc_comment_if')
def DoDocCommentIf(unused_parser, token):
"""If a variable has content, emit it as a document compatible comment."""
variable_name = _GetArgFromToken(token)
return CommentIfNode(variable_name, comment_type='doc')
@register.tag(name='doc_comment')
def DoDocComment(parser, token):
"""A block tag for documentation comments.
Example usage:
{% doc_comment noblock %}
With the noblock parameter, line returns will be considered hard returns
and kept in the output, although long lines will be wrapped.
Without noblock, contiguous non-empty lines will be wrapped together as
paragraphs.
{% enddoc_comment %}
Args:
parser: (Parser): A django template parser.
token: (str): Token passed into the parser.
Returns:
(DocCommentNode) A template node.
"""
args = token.split_contents()
if len(args) > 2:
raise django_template.TemplateSyntaxError(
'doc_comment expects at most one argument')
if len(args) == 2:
wraparg = args[1]
if wraparg not in ('block', 'noblock'):
raise django_template.TemplateSyntaxError(
'argument to doc_comment (wrap_blocks) '
'must be either "block" (the default) or "noblock"')
wrap_blocks = wraparg == 'block'
else:
wrap_blocks = True
nodelist = parser.parse(('enddoc_comment',))
parser.delete_first_token()
return DocCommentNode(nodelist=nodelist, comment_type='doc',
wrap_blocks=wrap_blocks)
class CamelCaseNode(django_template.Node):
"""Node for camel casing a variable value."""
def __init__(self, variable_name):
super(CamelCaseNode, self).__init__()
self._variable_name = variable_name
def render(self, context): # pylint: disable=g-bad-name
try:
text = django_template.resolve_variable(self._variable_name, context)
if text:
return utilities.CamelCase(text)
except django_template.base.VariableDoesNotExist:
pass
return ''
@register.tag(name='camel_case')
def DoCamelCase(unused_parser, token):
variable_name = _GetArgFromToken(token)
return CamelCaseNode(variable_name)
class ParameterGetterChainNode(django_template.Node):
"""Node for returning the parameter getter chain of methods.
The parameter getter chain here refers to the sequence of getters necessary
to return the specified parameter. For example, for parameter xyz this method
could return: ".getParent1().getParent2().getParent1().getXyz()".
The chain is as long as the number of ancestors of the specified parameter.
"""
def __init__(self, variable_name):
super(ParameterGetterChainNode, self).__init__()
self._variable_name = variable_name
def render(self, context): # pylint: disable=g-bad-name
"""Render the node."""
try:
prop = django_template.resolve_variable(self._variable_name, context)
except django_template.base.VariableDoesNotExist:
return ''
lang_model = prop.language_model
parent_pointer = prop.data_type.parent
getter_chain_list = []
while parent_pointer.parent:
# Append a getter for an ancestor of the property.
getter_chain_list.append(
lang_model.ToPropertyGetterMethodWithDelim(
parent_pointer.safeClassName))
# Move the pointer up one level
parent_pointer = parent_pointer.parent
# Now append a final getter for the original property.
getter_chain_list.append(
lang_model.ToPropertyGetterMethodWithDelim(
str(prop.GetTemplateValue('wireName'))))
return ''.join(getter_chain_list)
@register.tag(name='param_getter_chain')
def DoParameterGetterChain(unused_parser, token):
variable_name = _GetArgFromToken(token)
return ParameterGetterChainNode(variable_name)
class ImportsNode(django_template.Node):
"""Node for outputting language specific imports."""
def __init__(self, nodelist, element):
self._nodelist = nodelist
self._element = element
def render(self, context): # pylint: disable=g-bad-name
"""Render the node."""
explicit_import_text = self._nodelist.render(context)
# Look for an importManager on the element. If we find one:
# - scan the import text for import statements
# - add each to the manager
# - get the complete import set
import_lists = None
try:
import_manager = django_template.resolve_variable(
'%s.importManager' % self._element, context)
import_regex = _GetFromContext(context, _IMPORT_REGEX)
for line in explicit_import_text.split('\n'):
match_obj = re.match(import_regex, line)
if match_obj:
import_manager.AddImport(match_obj.group('import'))
import_lists = import_manager.ImportLists()
except django_template.base.VariableDoesNotExist:
pass
import_template = _GetFromContext(context, _IMPORT_TEMPLATE)
if import_lists:
ret_lists = []
for import_list in import_lists:
ret_lists.append(
'\n'.join([import_template % x for x in import_list]))
# Each import should be on its own line and each group of imports should
# be separated by a new line.
return '\n\n'.join([ret_list for ret_list in ret_lists if ret_list])
else:
# We could not find the import lists from an import manager, revert to
# the original text
return explicit_import_text.strip()
@register.tag(name='imports')
def Imports(parser, token):
"""If an element has importLists emit them, else emit existing imports."""
element = _GetArgFromToken(token)
nodelist = parser.parse(('endimports',))
parser.delete_first_token()
return ImportsNode(nodelist, element)
class ParameterListNode(django_template.Node):
"""Node for parameter_list blocks."""
def __init__(self, nodelist, separator):
super(ParameterListNode, self).__init__()
self._nodelist = nodelist
self._separator = separator
def render(self, context): # pylint: disable=g-bad-name
"""Render the node."""
blocks = []
# Split apart on paramater boundaries, getting rid of white space between
# parameters
for block in self._nodelist.render(context).split(ParameterNode.BEGIN):
block = block.rstrip().replace(ParameterNode.END, '')
if block:
blocks.append(block)
return self._separator.join(blocks)
class ParameterNode(django_template.Node):
"""Node for parameter tags."""
# Makers so the parameter_list can find me.
BEGIN = chr(1)
END = chr(2)
def __init__(self, nodelist):
super(ParameterNode, self).__init__()
self._nodelist = nodelist
def render(self, context): # pylint: disable=g-bad-name
"""Render the node."""
# Attach markers so the enclosing parameter_list can find me
return self.BEGIN + self._nodelist.render(context).strip() + self.END
@register.tag(name='parameter_list')
def DoParameterList(parser, token):
"""Gather a list of parameter declarations and join them with ','.
Gathers all 'parameter' nodes until the 'end_parameter_list' tag and joins
them together with a ', ' separator. Extra white space between nodes is
removed, but other text is left intact, joined to the end of the preceeding
parameter node. Blank parameters are omitted from the list.
Usage:
foo({% parameter_list separator %}{% for p in method.parameters %}
{{ p.type }} {{ p.name }}
{% endfor %}
{% end_parameter_list %})
Args:
parser: (parser) the Django parser context.
token: (django.template.Token) the token holding this tag
Returns:
a ParameterListNode
"""
try:
unused_tag_name, separator = token.split_contents()
except ValueError:
# No separator, set default.
separator = ', '
nodelist = parser.parse(('end_parameter_list',))
parser.delete_first_token()
return ParameterListNode(nodelist, separator)
@register.tag(name='parameter')
def DoParameter(parser, unused_token):
"""A single parameter in a parameter_list.
See DoParameterList for a description.
Args:
parser: (parser) the Django parser context.
unused_token: (django.template.Token) the token holding this tag
Returns:
a ParameterNode
"""
nodelist = parser.parse(('end_parameter',))
parser.delete_first_token()
return ParameterNode(nodelist)
#
# Tags which include language specific templates
#
class TemplateNode(django_template.Node):
"""Django template Node holding data for writing a per language template.
The TemplateNode is a variation of an include template that allows for
per language lookup. The node
* Looks up the template name w.r.t. the template_dir variable of the current
context. The calling application must make sure template_dir is valid.
* evaluates a variable in the current context and binds that value to a
specific variable in the context
* renders the template
* restores the context.
See individual tag definitions for usage.
"""
def __init__(self, template_name, bindings):
"""Construct the TemplateNode.
Args:
template_name: (str) the name of the template file. This will be resolved
relative to the 'template_dir' element of the context.
bindings: (dict) maps names of variables to be bound in the invoked
template, to the variable from the calling template containing the
value that should be bound.
"""
self._template_name = template_name
self._bindings = bindings
def render(self, context): # pylint: disable=g-bad-name
"""Render the node."""
template_path = os.path.join(context['template_dir'], self._template_name)
# Collect new additions to the context
newvars = {}
for target, source in self._bindings.iteritems():
try:
newvars[target] = django_template.resolve_variable(source, context)
except django_template.base.VariableDoesNotExist:
raise django_template.TemplateSyntaxError(
'can not resolve %s when calling template %s' % (
source, self._template_name))
# Push new variables onto the context stack
context.update(newvars)
# Render the result
try:
return _RenderToString(template_path, context).rstrip()
except django_template.TemplateDoesNotExist:
# replace with full path
raise django_template.TemplateDoesNotExist(template_path)
finally:
# Pop the context stack
context.pop()
@classmethod
def CreateTemplateNode(cls, token, template, bound_variable):
"""Helper function to create a TemplateNode by parsing a tag.
Args:
token: (django.template.Token) the token holding this tag
template: (str) The template name
bound_variable: (str) the name of a variable to set in the context when
we invoke the template.
Returns:
a TemplateNode
"""
variable_name = _GetArgFromToken(token)
return cls(template, {bound_variable: variable_name})
@register.tag(name='call_template')
def CallTemplate(unused_parser, token):
"""Interpret a template with an additional set of variable bindings.
Evaluates the template named 'template_name.tmpl' with the variables 'name1',
'name2', etc., bound to the values of the variables 'val1', 'val2'.
Usage -- either:
{% call_template template_name name1=val1 name2=val2 %}
or (for backwards compatibility):
{% call_template template_name name1 val1 name2 val2 %}
Mixing the two styles is not allowed.
Args:
unused_parser: (parser) the Django parser context.
token: (django.template.Token) the token holding this tag
Returns:
a TemplateNode
"""
contents = token.split_contents()
if len(contents) < 2:
raise django_template.TemplateSyntaxError(
'tag requires at least 1 argument, the called template')
unused_tag, template = contents[:2]
template_path = '%s.tmpl' % template
toks = contents[2:]
if not toks:
return TemplateNode(template_path, {})
has_equals = set('=' in t for t in toks)
# Either all arguments should contain a '=', or none should.
if len(has_equals) != 1:
raise django_template.TemplateSyntaxError(
'use either name1=value1 name2=value2 syntax, '
'or name1 value1 name2 value2 syntax, but not both')
has_equals = has_equals.pop()
if has_equals:
# If the actual key/value pairs are malformed, let it explode later
bindings = dict(tok.split('=', 1) for tok in toks)
else:
if len(toks) % 2 != 0:
raise django_template.TemplateSyntaxError(
'odd number of keys and values found')
bindings = dict(zip(toks[0::2], toks[1::2]))
return TemplateNode('%s.tmpl' % template, bindings)
@register.tag(name='emit_parameter_doc')
def DoEmitParameterDoc(unused_parser, token):
"""Emit a parameter definition through a language specific template.
Evaluates a template named '_parameter.tmpl' with the variable 'parameter'
bound to the specified value.
Usage:
{% emit_parameter_doc parameter %}
Args:
unused_parser: (parser) the Django parser context
token: (django.template.Token) the token holding this tag
Returns:
a TemplateNode
"""
return TemplateNode.CreateTemplateNode(token, '_parameter.tmpl', 'parameter')
@register.tag(name='copyright_block')
def DoCopyrightBlock(parser, unused_token):
"""Emit a copyright block through a language specific template.
Emits a copyright and license block. The copyright text is pulled from the
variable api.copyright at rendering time.
Usage:
{% copyright_block %}
Args:
parser: (parser) the Django parser context.
unused_token: (django.template.Token) the token holding this tag
Returns:
a DocCommentNode
"""
return DocCommentNode(nodelist=django_template.NodeList([
django_template.base.VariableNode(parser.compile_filter('api.copyright')),
django_template.base.TextNode('\n'),
django_template.base.TextNode(_LICENSE_TEXT)
]))
class LiteralStringNode(django_template.Node):
"""Django template Node holding a string to be written as a literal."""
def __init__(self, text):
"""Construct the LiteralStringNode.
Args:
text: (list) the variable names containing the text being represented.
"""
self._variables = text
def render(self, context): # pylint: disable=g-bad-name
"""Render the node."""
resolve = django_template.resolve_variable
texts = []
for v in self._variables:
try:
texts.append(resolve(v, context))
except django_template.base.VariableDoesNotExist:
pass
text = ''.join(texts)
for special, replacement in _GetFromContext(context, _LITERAL_ESCAPE):
text = text.replace(special, replacement)
start = _GetFromContext(context, _LITERAL_QUOTE_START)
end = _GetFromContext(context, _LITERAL_QUOTE_END)
return start + text + end
@register.tag(name='literal')
def DoLiteralString(unused_parser, token):
"""Emit a variable as a string literal, escaped for the current language.
A variable foo containing 'ab<newline>c' would be emitted as "ab\\nc"
(with no literal newline character). Multiple variables are concatenated.
Usage:
{% literal somevar anothervar %}
Args:
unused_parser: (parser) the Django parser context
token: (django.template.Token) the token holding this tag and arguments
Returns:
a LiteralStringNode
"""
variables = token.split_contents()[1:]
return LiteralStringNode(variables)
class DataContextNode(django_template.Node):
"""A Django Template Node for resolving context lookup and validation."""
def __init__(self, variable):
self._variable = variable
def render(self, context): # pylint: disable=g-bad-name
"""Make sure this is actually a Node and render it."""
resolve = django_template.resolve_variable
data = resolve(self._variable, context)
if hasattr(data, 'GetLanguageModel') and hasattr(data, 'value'):
model = data.GetLanguageModel()
# TODO(user): Fix the fact that Arrays don't know their language
# model.
try:
return model.RenderDataValue(data)
except ValueError as e:
raise django_template.TemplateSyntaxError(
'Variable (%s) with value (%s) is not an accepted DataValue '
'type (%s) as exhibited by: ValueError(%s).' %
(self._variable, data.value, data.data_type, e))
else:
raise django_template.TemplateSyntaxError(
'(%s) is not a DataValue object.' % self._variable)
@register.tag(name='value_of')
def GetValueOf(unused_parser, token):
"""Appropriately wrap DataValue objects for eventual rendering."""
return DataContextNode(_GetArgFromToken(token))
class BoolNode(django_template.Node):
"""A node for outputting bool values."""
def __init__(self, variable):
self._variable = variable
def render(self, context): # pylint:disable=g-bad-name
data = bool(django_template.resolve_variable(self._variable, context))
return _GetFromContext(context, _BOOLEAN_LITERALS)[data]
@register.tag(name='bool')
def DoBoolTag(unused_parser, token):
return BoolNode(_GetArgFromToken(token))
class DivChecksumNode(django_template.Node):
"""A node for calculating a sha-1 checksum for HTML contents."""
def __init__(self, id_nodes, body_nodes):
self._id_nodes = id_nodes
self._body_nodes = body_nodes
def render(self, context): # pylint:disable=g-bad-name
body = self._body_nodes.render(context)
element_id = self._id_nodes.render(context)
checksum = hashlib.sha1(body).hexdigest()
return ('<div id="%s" checksum="%s">%s</div>' %
(element_id, checksum, body))
@register.tag(name='checksummed_div')
def DoDivChecksumTag(parser, unused_token):
"""Wraps HTML in a div with its checksum as an attribute."""
id_nodes = parser.parse(('divbody',))
parser.delete_first_token()
body_nodes = parser.parse(('endchecksummed_div',))
parser.delete_first_token()
return DivChecksumNode(id_nodes, body_nodes)
class WriteNode(django_template.Node):
"""A node which writes its contents to a file.
A Node which evaluates its children and writes that result to a file rather
than into the current output document. This node does not open files directly.
Instead, it requires that a file writing method is passed to us via the
evaluation context. It must be under the key template_objects.FILE_WRITER,
and be a method with the signature func(path, content).
"""
def __init__(self, nodelist, path_variable):
self._nodelist = nodelist
self._path_variable = path_variable
def render(self, context): # pylint: disable=g-bad-name
"""Render the 'write' tag.
Evaluate the file name, evaluate the content, find the writer, ship it.
Args:
context: (Context) the render context.
Returns:
An empty string.
Raises:
ValueError: If the file writer method can not be found.
"""
path = django_template.resolve_variable(self._path_variable, context)
content = self._nodelist.render(context)
file_writer = _GetFromContext(context, FILE_WRITER)
if not file_writer:
raise ValueError('"write" called in a context where "%s" is not defined.',
FILE_WRITER)
file_writer(path, content)
return ''
@register.tag(name='write')
def DoWrite(parser, token):
"""Construct a WriteNode.
write is a block tag which diverts the rendered content to a file rather than
into the current output document.
Usage:
{% write file_path_variable %} ... {% endwrite %}
Args:
parser: (parser) the Django parser context.
token: (django.template.Token) the token holding this tag
Returns:
a WriteNode
"""
unused_tag_name, path = token.split_contents()
nodelist = parser.parse(('endwrite',))
parser.delete_first_token()
return WriteNode(nodelist, path)
| StarcoderdataPython |
11363768 | import pickle
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from spacy.language import Language
from spacy.tokens import Doc, Span, Token
from edsnlp.pipelines.core.matcher import GenericMatcher
from edsnlp.utils.filter import get_spans
from .endlinesmodel import EndLinesModel
from .functional import _get_label, build_path
class EndLines(GenericMatcher):
"""
spaCy Pipeline to detect whether a newline character should
be considered a space (ie introduced by the PDF).
The pipeline will add the extension `end_line` to spans
and tokens. The `end_line` attribute is a boolean or `None`,
set to `True` if the pipeline predicts that the new line
is an end line character. Otherwise, it is set to `False`
if the new line is classified as a space. If no classification
has been done over that token, it will remain `None`.
Parameters
----------
nlp : Language
spaCy nlp pipeline to use for matching.
end_lines_model : Optional[Union[str, EndLinesModel]], by default None
path to trained model. If None, it will use a default model
"""
def __init__(
self,
nlp: Language,
end_lines_model: Optional[Union[str, EndLinesModel]],
**kwargs,
):
super().__init__(
nlp,
terms=None,
attr="TEXT",
regex=dict(
new_line=r"\n+",
),
ignore_excluded=False,
**kwargs,
)
if not Token.has_extension("end_line"):
Token.set_extension("end_line", default=None)
if not Span.has_extension("end_line"):
Span.set_extension("end_line", default=None)
self._read_model(end_lines_model)
def _read_model(self, end_lines_model: Optional[Union[str, EndLinesModel]]):
"""
Parameters
----------
end_lines_model : Optional[Union[str, EndLinesModel]]
Raises
------
TypeError
"""
if end_lines_model is None:
path = build_path(__file__, "base_model.pkl")
with open(path, "rb") as inp:
self.model = pickle.load(inp)
elif type(end_lines_model) == str:
with open(end_lines_model, "rb") as inp:
self.model = pickle.load(inp)
elif type(end_lines_model) == EndLinesModel:
self.model = end_lines_model
else:
raise TypeError(
"type(`end_lines_model`) should be one of {None, str, EndLinesModel}"
)
@staticmethod
def _spacy_compute_a3a4(token: Token) -> str:
"""Function to compute A3 and A4
Parameters
----------
token : Token
Returns
-------
str
"""
if token.is_upper:
return "UPPER"
elif token.shape_.startswith("Xx"):
return "S_UPPER"
elif token.shape_.startswith("x"):
return "LOWER"
elif (token.is_digit) & (
(token.doc[max(token.i - 1, 0)].is_punct)
| (token.doc[min(token.i + 1, len(token.doc) - 1)].is_punct)
):
return "ENUMERATION"
elif token.is_digit:
return "DIGIT"
elif (token.is_punct) & (token.text in [".", ";", "..", "..."]):
return "STRONG_PUNCT"
elif (token.is_punct) & (token.text not in [".", ";", "..", "..."]):
return "SOFT_PUNCT"
else:
return "OTHER"
@staticmethod
def _compute_length(doc: Doc, start: int, end: int) -> int:
"""Compute length without spaces
Parameters
----------
doc : Doc
start : int
end : int
Returns
-------
int
"""
length = 0
for t in doc[start:end]:
length += len(t.text)
return length
def _get_df(self, doc: Doc, new_lines: List[Span]) -> pd.DataFrame:
"""Get a pandas DataFrame to call the classifier
Parameters
----------
doc : Doc
new_lines : List[Span]
Returns
-------
pd.DataFrame
"""
data = []
for i, span in enumerate(new_lines):
start = span.start
end = span.end
max_index = len(doc) - 1
a1_token = doc[max(start - 1, 0)]
a2_token = doc[min(start + 1, max_index)]
a1 = a1_token.orth
a2 = a2_token.orth
a3 = self._spacy_compute_a3a4(a1_token)
a4 = self._spacy_compute_a3a4(a2_token)
blank_line = "\n\n" in span.text
if i > 0:
start_previous = new_lines[i - 1].start + 1
else:
start_previous = 0
length = self._compute_length(
doc, start=start_previous, end=start
) # It's ok cause i count the total length from the previous up to this one
data_dict = dict(
span_start=start,
span_end=end,
A1=a1,
A2=a2,
A3=a3,
A4=a4,
BLANK_LINE=blank_line,
length=length,
)
data.append(data_dict)
df = pd.DataFrame(data)
mu = df["length"].mean()
sigma = df["length"].std()
if np.isnan(sigma):
sigma = 1
cv = sigma / mu
df["B1"] = (df["length"] - mu) / sigma
df["B2"] = cv
return df
def __call__(self, doc: Doc) -> Doc:
"""
Predict for each new line if it's an end of line or a space.
Parameters
----------
doc: spaCy Doc object
Returns
-------
doc: spaCy Doc object, with each new line annotated
"""
matches = self.process(doc)
new_lines = get_spans(matches, "new_line")
if len(new_lines) > 0:
df = self._get_df(doc=doc, new_lines=new_lines)
df = self.model.predict(df)
spans = []
for span, prediction in zip(new_lines, df.PREDICTED_END_LINE):
span.label_ = _get_label(prediction)
span._.end_line = prediction
spans.append(span)
for t in span:
t._.end_line = prediction
if not prediction:
t._.excluded = True
doc.spans["new_lines"] = spans
return doc
| StarcoderdataPython |
1961823 | from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scraper
# Flask app
app = Flask(__name__)
# App's connection to MongoDB
db_name = 'mars'
mongo_uri = 'mongodb://localhost:27017/' + db_name
app.config['MONGO_URI'] = mongo_uri
mongo = PyMongo(app)
""" App Routes """
@app.route('/')
def index():
""" Home page displaying all data """
# Extract Mars data and render home page
last_doc = mongo.db.mars.find().sort([('last_modified', -1)]).limit(1) # last added document
try:
return render_template('index.html', mars=last_doc[0]) # pass data to and render home page
except:
return scrape() # scrape data if database is empty
@app.route('/scrape')
def scrape():
""" Scrape new data, update database with new data, and refresh page """
mars_data = scraper.scrape_all() # scrape new data
mongo.db.mars.insert_one(mars_data) # add new data to collection
return index() # redirect to home page with updated data
if __name__ == '__main__':
app.run() | StarcoderdataPython |
4849715 | <reponame>levitsky/biolccc
from pyteomics import biolccc
# Deriving a new ChemicalBasis instance from a predefined one.
myChemicalBasis = biolccc.ChemicalBasis(
biolccc.RP_ACN_FA_ROD)
# Changing the bind energy of a chemical group.
myChemicalBasis.chemicalGroups()['E'].setBindEnergy(0.0)
myChemicalBasis.chemicalGroups()['-NH2'].setBindEnergy(0.0)
print "The bind energy of E is", \
myChemicalBasis.chemicalGroups()['E'].bindEnergy()
print "The bind energy of -NH2 is", \
myChemicalBasis.chemicalGroups()['-NH2'].bindEnergy()
# Adding a new chemical group. The energy is not valid.
myChemicalBasis.addChemicalGroup(
biolccc.ChemicalGroup(
'Hydroxyproline', # full name
'hoP', # label
0.40, # bind energy
97.1167+15.9994, # average mass
97.05276+15.9994915)) # monoisotopic mass
# Setting a new type of model. Without a massive recalibration
# it will ruin the accuracy of prediction.
myChemicalBasis.setModel(biolccc.CHAIN);
peptide = "Ac-PEhoPTIDE-NH2"
RT = biolccc.calculateRT(peptide,
myChemicalBasis,
biolccc.standardChromoConditions)
monoisotopicMass = biolccc.calculateMonoisotopicMass(
peptide, myChemicalBasis)
print 'The retention time of', peptide, 'is', RT
print 'The monoisotopic mass of', peptide, 'is', monoisotopicMass,'Da'
| StarcoderdataPython |
8197340 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: ss
# @Time: 2020/8/4 4:25 下午
# @File: redis_queue.py
import redis
from setting import REDIS
from lib.common.utils import serialize, deserialize
class RedisQueue(object):
def __init__(self, queue_name):
self.queue_name = queue_name
self.__redis = redis.StrictRedis(host=REDIS['HOST'], port=REDIS['PORT'], password=REDIS['PASSWORD'],
db=REDIS['DB'], socket_connect_timeout=5,
socket_timeout=10, max_connections=10)
def __len__(self):
return self.__redis.llen(self.key)
@property
def key(self):
return self.queue_name
def clear(self):
self.__redis.delete(self.key)
def consume(self, **kwargs):
kwargs.setdefault('block', True)
kwargs.setdefault('timeout', 1) # 默认阻塞 且超时一秒
try:
while True:
msg = self.get(**kwargs)
if msg is None:
break
yield msg
except Exception as e:
print(e)
return
def get(self, block=False, timeout=None):
if block:
if timeout is None:
timeout = 0
msg = self.__redis.blpop(self.key, timeout=timeout)
if msg is not None:
msg = msg[1]
else:
msg = self.__redis.lpop(self.key)
if msg is not None:
msg = deserialize(msg)
return msg
def put(self, msg):
self.__redis.rpush(self.key, serialize(msg))
if __name__ == '__main__':
queue = RedisQueue(queue_name="bj:myqueue")
queue.put({'a': 1})
queue.put({'a': 1})
queue.put({'a': 1})
queue.put({'a': 1})
queue.put({'a': 1})
queue.put({'a': 1})
queue.put({'a': 1})
while 1:
print(1111)
for msg in queue.consume():
print(msg)
| StarcoderdataPython |
8167351 | #
# Copyright (C) 2014 eNovance SAS <<EMAIL>>
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import unittest
from hardware import generate
class TestGenerate(unittest.TestCase):
def test_is_included_same(self):
a = {'a': 1}
self.assertTrue(generate.is_included(a, a))
def test_is_included_different(self):
a = {'a': 1}
b = {'a': 2}
self.assertTrue(not generate.is_included(a, b))
def test_is_included_more(self):
a = {'a': 1, 'b': 2}
b = {'a': 1, 'b': 2, 'c': 3}
self.assertTrue(generate.is_included(a, b))
def test_generate_ips(self):
model = '192.168.1.10-12'
self.assertEqual(list(generate._generate_values(model)),
['192.168.1.10',
'192.168.1.11',
'192.168.1.12'])
def test_generate_names(self):
model = 'host10-12'
self.assertEqual(list(generate._generate_values(model)),
['host10', 'host11', 'host12'])
def test_generate_nothing(self):
model = 'host'
result = generate._generate_values(model)
self.assertEqual(next(result),
'host')
def test_generate_range(self):
self.assertEqual(list(generate._generate_range('10-12')),
['10', '11', '12'])
def test_generate_range_zero(self):
self.assertEqual(list(generate._generate_range('001-003')),
['001', '002', '003'])
def test_generate_range_colon(self):
self.assertEqual(list(generate._generate_range('1-3:10-12')),
['1', '2', '3', '10', '11', '12'])
def test_generate_range_colon_reverse(self):
self.assertEqual(list(generate._generate_range('100-100:94-90')),
['100', '94', '93', '92', '91', '90'])
def test_generate_range_invalid(self):
self.assertEqual(list(generate._generate_range('D7-H.1.0.0')),
['D7-H.1.0.0'])
def test_generate_norange(self):
model = {'gw': '192.168.1.1'}
self.assertEqual(
generate.generate(model),
[{'gw': '192.168.1.1'}]
)
def test_generate(self):
model = {'gw': '192.168.1.1',
'ip': '192.168.1.10-12',
'hostname': 'host10-12'}
self.assertEqual(
generate.generate(model),
[{'gw': '192.168.1.1', 'ip': '192.168.1.10', 'hostname': 'host10'},
{'gw': '192.168.1.1', 'ip': '192.168.1.11', 'hostname': 'host11'},
{'gw': '192.168.1.1', 'ip': '192.168.1.12', 'hostname': 'host12'}]
)
def test_generate_with_zeros(self):
model = {'gw': '192.168.1.1',
'ip': '192.168.1.1-6',
'hostname': 'ceph001-006'}
self.assertEqual(
generate.generate(model),
[{'gw': '192.168.1.1', 'ip': '192.168.1.1', 'hostname': 'ceph001'},
{'gw': '192.168.1.1', 'ip': '192.168.1.2', 'hostname': 'ceph002'},
{'gw': '192.168.1.1', 'ip': '192.168.1.3', 'hostname': 'ceph003'},
{'gw': '192.168.1.1', 'ip': '192.168.1.4', 'hostname': 'ceph004'},
{'gw': '192.168.1.1', 'ip': '192.168.1.5', 'hostname': 'ceph005'},
{'gw': '192.168.1.1', 'ip': '192.168.1.6', 'hostname': 'ceph006'},
]
)
def test_generate_253(self):
result = generate.generate({'hostname': '10.0.1-2.2-254'})
self.assertEqual(
len(result),
2 * 253,
result)
def test_generate_invalid(self):
result = generate.generate({'hostname': '10.0.1-2.2-254',
'version': 'D7-H.1.0.0'})
self.assertEqual(
len(result),
2 * 253,
result)
def test_generate_list(self):
result = generate.generate({'hostname': ['hosta', 'hostb', 'hostc']})
self.assertEqual(
result,
[{'hostname': 'hosta'},
{'hostname': 'hostb'},
{'hostname': 'hostc'}]
)
def test_generate_none(self):
model = {'gateway': '10.66.6.1',
'ip': '10.66.6.100',
'netmask': '255.255.255.0',
'gateway-ipmi': '10.66.6.1',
'ip-ipmi': '10.66.6.110',
'netmask-ipmi': '255.255.255.0',
'hostname': 'hp-grid'
}
result = generate.generate(model)
self.assertEqual(result, [model])
def test_generate_deeper(self):
model = {'=cmdb':
{'gw': False,
'=ip': '192.168.1.10-12',
'=hostname': 'host10-12'}}
self.assertEqual(
generate.generate(model, prefix='='),
[{'cmdb':
{'gw': False,
'ip': '192.168.1.10',
'hostname': 'host10'}},
{'cmdb':
{'gw': False,
'ip': '192.168.1.11',
'hostname': 'host11'}},
{'cmdb':
{'gw': False,
'ip': '192.168.1.12',
'hostname': 'host12'}}]
)
def test_generate_hosts(self):
model = OrderedDict([('host10', {'foo': 'bar'}),
('=host10-12',
{'=cmdb':
{'gw': ['192.168.1.1', '192.168.1.2'],
'=ip': '192.168.1.10-12'}})])
self.assertEqual(
generate.generate_dict(model, prefix='='),
{'host10':
{'cmdb':
{'gw': ['192.168.1.1', '192.168.1.2'],
'ip': '192.168.1.10'},
'foo': 'bar'},
'host11':
{'cmdb':
{'gw': ['192.168.1.1', '192.168.1.2'],
'ip': '192.168.1.11'}},
'host12':
{'cmdb':
{'gw': ['192.168.1.1', '192.168.1.2'],
'ip': '192.168.1.12'}}}
)
def test_generate_tuple(self):
disk_struct = (
{'size_gb': 50,
'raid_level': '1+0',
'disk_type': 'hdd',
'interface_type': 'sas',
'volume_name': 'root_volume',
'is_root_volume': 'true'},
{'size_gb': 100,
'number_of_physical_disks': 3,
'raid_level': '5',
'disk_type': 'hdd',
'interface_type': 'sas',
'volume_name': 'data_volume'}
)
model = {
'hostname': 'node1-2',
'logical_disks': disk_struct
}
result = generate.generate(model)
self.assertEqual(len(result), 2)
self.assertEqual(result[0]['logical_disks'], disk_struct)
self.assertEqual(result[1]['logical_disks'], disk_struct)
class TestMerge(unittest.TestCase):
def test_merge(self):
dic1 = {'a': 1}
dic2 = {'b': 2}
generate.merge(dic1, dic2)
self.assertEqual(dic1['b'], 2)
def test_merge_identical(self):
dic1 = {'a': 1}
dic2 = {'a': 2}
generate.merge(dic1, dic2)
self.assertEqual(dic1['a'], 2)
def test_merge_subdict(self):
dic1 = {'a': {'b': 2}}
dic2 = {'a': {'c': 3}}
generate.merge(dic1, dic2)
self.assertEqual(dic1['a']["c"], 3)
def test_merge_lists(self):
dic1 = {'a': [1, 2]}
dic2 = {'a': [3, 4]}
generate.merge(dic1, dic2)
self.assertEqual(dic1['a'], [1, 2, 3, 4])
if __name__ == "__main__":
unittest.main()
# test_generate.py ends here
| StarcoderdataPython |
381497 | from txtai.embeddings import Embeddings
from txtai.pipeline import Similarity
from txtai.ann import ANN
import os
import json
import numpy as np
import pandas as pd
import logging
import pickle
from gamechangerml.src.text_handling.corpus import LocalCorpus
import torch
logger = logging.getLogger(__name__)
class SentenceEncoder(object):
"""
Handles text encoding and creating of ANNOY index
for the initial search
Args:
encoder_model (str): Model name supported by huggingface
and txtai to generate the document embeddings
use_gpu (bool): Boolean to check if a GPU would be used
"""
def __init__(self, encoder_model=None, use_gpu=False):
if encoder_model:
self.encoder_model = encoder_model
else:
self.encoder_model = "sentence-transformers/msmarco-distilbert-base-v2"
if use_gpu and torch.cuda.is_available():
self.use_gpu = use_gpu
else:
self.use_gpu = False
self.embedder = Embeddings(
{"method": "transformers", "path": self.encoder_model, "gpu": self.use_gpu}
)
def _index(self, corpus, index_path, overwrite=False):
"""
Builds an embeddings index.
Args:
corpus: list of (id, text|tokens, tags)
index_path: Path of where to store and reference
existing index
overwrite: Boolean check to predict whether if an
existing index will be overwritten
"""
# Transform documents to embeddings vectors
ids, dimensions, stream = self.embedder.model.index(corpus)
# Load streamed embeddings back to memory
embeddings = np.empty((len(ids), dimensions), dtype=np.float32)
with open(stream, "rb") as queue:
for x in range(embeddings.shape[0]):
embeddings[x] = pickle.load(queue)
# Remove temporary file
os.remove(stream)
all_text = []
for para_id, text, _ in corpus:
all_text.append([text, para_id])
df = pd.DataFrame(all_text, columns=["text", "paragraph_id"])
embedding_path = os.path.join(index_path, "embeddings.npy")
dataframe_path = os.path.join(index_path, "data.csv")
ids_path = os.path.join(index_path, "doc_ids.txt")
# Load new data
if os.path.isfile(embedding_path) and (overwrite is False):
old_embed_path = os.path.join(index_path, "embeddings.npy")
old_dataframe_path = os.path.join(index_path, "data.csv")
old_ids_path = os.path.join(index_path, "doc_ids.txt")
# Load existing embeddings
old_embeddings = np.load(old_embed_path) # LOAD EMBEDDINGS
with open(old_ids_path, "r") as fp:
old_ids = fp.readlines()
old_ids = [doc_id[:-1] for doc_id in old_ids]
# Remove embeddings with document id overlaps
embeddings = np.vstack((old_embeddings, embeddings))
# Append new dataframe
old_df = pd.read_csv(old_dataframe_path)
df = pd.concat([old_df, df])
logger.debug(f"New ID Length = {len(ids)}")
logger.debug(f"Old ID Length = {len(old_ids)}")
# Remove document ids overlaps
logger.debug(f"New ID Length = {len(ids)}")
ids = old_ids + ids
logger.debug(f"Merged ID Length = {len(ids)}")
# Store embeddings and document index
# for future reference
np.save(embedding_path, embeddings)
with open(ids_path, "w") as fp:
fp.writelines([i + "\n" for i in ids])
# Save data csv
csv_path = os.path.join(index_path, "data.csv")
df.to_csv(csv_path, index=False)
# Normalize embeddings
self.embedder.normalize(embeddings)
# Save embeddings metadata
self.embedder.config["ids"] = ids
self.embedder.config["dimensions"] = dimensions
# Create embeddings index
self.embedder.embeddings = ANN.create(self.embedder.config)
# Build the index
self.embedder.embeddings.index(embeddings)
def index_documents(
self, corpus_path, index_path, min_token_len=10, overwrite=False
):
"""
Create the index and accompanying dataframe to perform text
and paragraph id search
Args:
corpus_path (str): Folder path containing JSON files having
GAMECHANGER format
index_path (str): Folder path to where the index of the document
would be storred
"""
logging.info(f"Indexing documents from {corpus_path}")
corp = LocalCorpus(
corpus_path, return_id=True, min_token_len=min_token_len, verbose=True
)
self._index(
[(para_id, " ".join(tokens), None) for tokens, para_id in corp],
index_path,
overwrite=overwrite,
)
self.embedder.save(index_path)
class SentenceSearcher(object):
"""
Imports the text index generated by the SentenceEncoder and
performs the search functionality. Initial set of documents
are first retrieved through an Annoy index then reranked with
the similarity model.
Args:
index_path (str): Path to index directory generated by the
SentenceEncoder
encoder_model (str): Model name supported by huggingface
and txtai to generate the document embeddings
sim_model (str): Model name supported by huggingface
and txtai to calculate similarity between query and document
"""
def __init__(self, index_path, sim_model=None):
if sim_model:
self.sim_model = sim_model
else:
self.sim_model = "valhalla/distilbart-mnli-12-3"
self.embedder = Embeddings()
self.embedder.load(index_path)
self.similarity = Similarity(self.sim_model)
self.data = pd.read_csv(os.path.join(index_path, "data.csv"))
def search(self, query, n_returns=10):
"""
Search the index and perform a similarity scoring reranker at
the topn returned documents
Args:
query (str): Query text to search in documents
n_returns (int): Number of documents to return
Returns:
rerank (list): List of tuples following a (score, paragraph_id,
paragraph_text) format ranked based on similarity with query
"""
retrieved = self.embedder.search(query, limit=n_returns)
doc_ids = []
doc_texts = []
for doc_id, score in retrieved:
doc_ids.append(doc_id)
text = self.data[self.data["paragraph_id"]
== doc_id].iloc[0]["text"]
doc_texts.append(text)
results = []
for idx, score in self.similarity(query, doc_texts):
doc = {}
doc["score"] = score
doc["id"] = doc_ids[idx]
doc["text"] = doc_texts[idx]
results.append(doc)
return results
| StarcoderdataPython |
6517701 | <reponame>KwanHoo/Data-Structure__Algorithm
# 2072 _ 홀수만 더하기
# 10개의 수 입력 받아 그 중 홀수만 더한 값을 출력
# input
# 1) T:테스트케이스 수
# 2) 테스트 케이스 입력,,,,
# output
# #1 200
# #2 208 ..
T = int(input())
sum_list = []
for i in range(T):
sum_temp = []
temp_list = list(map(int, input().split()))
for j in temp_list:
if j % 2 == 1:
sum_temp.append(j)
temp = sum(sum_temp)
sum_list.append(temp)
c = 1
for i in sum_list:
print(f'#{c} {i}')
c += 1
| StarcoderdataPython |
4949780 | <reponame>Jamesforelock/data-visualizer<gh_stars>0
# @author <NAME> <<EMAIL>>
class SurfaceServiceException(Exception):
pass
| StarcoderdataPython |
6660857 | <reponame>violatingcp/QUASAR<gh_stars>1-10
import os
import sys
import mplhep
class DataHandler(object):
"""docstring for DataHandler"""
def __init__(self, arg):
super(DataHandler, self).__init__()
self.arg = arg
def open_dataframe(directory,option):
def normalize_data(data):
def load_to_torch():
class AnomalyDetector(object):
"""docstring for ClassName"""
def __init__(self, ):
super(ClassName, self).__init__()
self.model = model
self.data
self.loss
self.outputdir
self.plotdir
def _make_parentdir():
os.mkdir()
def write():
def add_model():
| StarcoderdataPython |
3408726 | import logging
from django.conf import settings
from django.shortcuts import render
logger = logging.getLogger(__name__)
def index(request):
return render(request, 'index.html', settings.DEFAULT_CONTEXT)
| StarcoderdataPython |
192399 | <reponame>eabderh/imploder
import sys
import importlib
from exporter import Export
globalize = Export().top()
def implode():
for module in sys.modules.values():
items = module.__dict__.items()
if ('__IMPLODE__', True) in items:
print('IMPLODE - ' + module.__name__)
if module.__spec__ is not None:
importlib.reload(module)
globalize.module(module)
def impload(module):
importlib.reload(module)
globalize.module(module)
| StarcoderdataPython |
1854281 | <gh_stars>0
import pandas as pd
def create_db(data, save_path):
max_order = len(max(data, key=lambda x: len(x['basket']))['basket'])
def pad_list(arr):
if len(arr) == max_order:
return arr
elif len(arr) < max_order:
arr += [-1]*(max_order - len(arr))
return arr
data = pd.DataFrame.from_dict({customer_id: pad_list([int(product['goodsId']) for product in basket['basket']]) for customer_id, basket in enumerate(data)},
columns=['prod{}'.format(i) for i in range(max_order)], orient='index')
data.to_csv(save_path) | StarcoderdataPython |
9707708 | <reponame>aubreychen9012/cAAE
import tensorflow as tf
def lrelu(x, alpha):
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def dense(x, n1, n2, name):
"""
Used to create a dense layer.
:param x: input tensor to the dense layer
:param n1: no. of input neurons
:param n2: no. of output neurons
:param name: name of the entire dense layer.i.e, variable scope name.
:return: tensor with shape [batch_size, n2]
"""
with tf.variable_scope(name, reuse=None):
weights = tf.get_variable("weights", shape=[n1, n2],
initializer=tf.random_normal_initializer(mean=0., stddev=0.01))
bias = tf.get_variable("bias", shape=[n2], initializer=tf.constant_initializer(0.0))
out = tf.add(tf.matmul(x, weights), bias, name='matmul')
return out
| StarcoderdataPython |
1743867 | import psycopg2
import json
import sys
import psycopg2.extras
def connect_postgres(logger):
try:
key = ""
with open('db/key.json', 'r') as f:
key = json.load(f)
conn = psycopg2.connect(
host=key["host"],
port= key["port"],
user=key["user"],
password=key["password"],
sslmode="verify-full",
sslrootcert="db/decoded_crt.txt",
database="ibmclouddb")
return conn
except Exception as inst:
logger.warning(f'Unable to connect to database \n {type(inst)}, {inst.args}, {inst}')
class Db():
"""
This class contains code to talk to the the database
"""
def __init__(self, logger):
"""
Read key of your database from a file
connect to db2 database
"""
self.logger = logger
def execute(self, sql, value, fetch):
"""
Execute the sql statement and fetch the results with the results method
Fetch is a BOOL, if true the statement needs a result
If false no result is needed so directly execute the statement
"""
conn = connect_postgres(self.logger)
if conn:
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
cur.execute(sql, value)
if fetch == 1:
result = cur.fetchall()
return result
elif fetch == 2:
result = cur.fetchall()
conn.commit()
return result
else:
conn.commit()
except Exception as inst:
print(inst)
self.logger.warning(f'Unable to execute command:, {sql}, {type(inst)}, {inst.args}, {inst}')
finally:
cur.close()
conn.close()
def execute_from_file(self, filename, fetch):
"""
Takes a .sql file with sql commands and executes them
It takes fetch to pass to self.execute
"""
| StarcoderdataPython |
282784 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader, StdinShader
@inside_glslc_testsuite('StdInOut')
class VerifyStdinWorks(expect.ValidObjectFile):
"""Tests glslc accepts vertex shader extension (.vert)."""
shader = StdinShader('#version 140\nvoid main() { }')
glslc_args = ['-c', '-fshader-stage=vertex', shader]
@inside_glslc_testsuite('StdInOut')
class VerifyStdoutWorks(
expect.ReturnCodeIsZero, expect.StdoutMatch, expect.StderrMatch):
shader = FileShader('#version 140\nvoid main() {}', '.vert')
glslc_args = [shader, '-o', '-']
# We expect SOME stdout, we just do not care what.
expected_stdout = True
expected_stderr = ''
| StarcoderdataPython |
166607 | class Solution(object):
def alertNames(self, keyName, keyTime):
"""
:type keyName: List[str]
:type keyTime: List[str]
:rtype: List[str]
"""
mapp = {}
for i in range(len(keyName)):
name = keyName[i]
if(name not in mapp):
mapp[name] = [keyTime[i]]
else:
mapp[name].append(keyTime[i])
res = []
for name, arr in mapp.items():
arr.sort()
for i in range(len(arr)-2):
time= arr[i]
t2 = arr[i+1]
t3 = arr[i+2]
if(time[0:2]=="23"):
endTime = "24:00"
if(t2<=endTime and t3<=endTime and t2>time and t3>time):
res.append(name)
break
else:
start = int(time[0:2])
endTime = str(start+1)+time[2:]
if(start<9):
endTime = "0"+endTime
if(t2<=endTime and t3<=endTime):
res.append(name)
break
return sorted(res)
| StarcoderdataPython |
116955 | from rest_framework import generics, status
from apple import serializers
class ReceiptTypeQueryView(generics.CreateAPIView):
"""
post:
Get the type of an Apple receipt. If the provided receipt data
corresponds to a receipt that is valid in either the production or
test environments, then the `environment` key of the response will
be given as `PRODUCTION` or `SANDBOX`, respectively.
An invalid receipt will cause a 400 response.
"""
serializer_class = serializers.ReceiptTypeSerializer
def create(self, *args, **kwargs):
"""
Override the parent class' ``create`` method to set the status
code of the response to 200.
"""
response = super().create(*args, **kwargs)
response.status_code = status.HTTP_200_OK
return response
def perform_create(self, serializer):
"""
Override the method to not save the serializer. There is no data
to save, we just use the ``POST`` request so we can submit a
large body.
"""
pass
| StarcoderdataPython |
9716122 | from edna.ingest.streaming.BaseTwitterIngest import BaseTwitterIngest
from edna.serializers.EmptySerializer import EmptyStringSerializer
import requests
from typing import List, Dict
class TwitterFilteredIngest(BaseTwitterIngest):
"""Class for streaming from Twitter using the v2 API endpoints.
Attributes:
base_url (str): The endpoint for the streaming or filter request.
Raises:
Exception: Raised when filters can't be retrieved, deleted, or added
"""
base_url = "https://api.twitter.com/2/tweets/search/stream?"
def __init__(self, serializer: EmptyStringSerializer, bearer_token: str, filters: List[str], tweet_fields: List[str] = None, user_fields: List[str] = None, media_fields: List[str] = None,
poll_fields: List[str] = None, place_fields: List[str] = None, *args, **kwargs):
"""Initializes the TwitterFilteredIngest class with the `bearer_token` for authentication and
query fields to populate the received tweet object
Args:
serializer (EmptyStringSerializer): An empty serializer for convention.
filter (List[str]): List of filters to apply during streaming.
bearer_token (str): The authenticating v2 bearer token from a Twitter Developer account.
tweet_fields (List[str], optional): List of tweet fields to retrieve. Defaults to None.
user_fields (List[str], optional): List of user fields to retrieve. Defaults to None.
media_fields (List[str], optional): List of media fields to retrieve. Defaults to None.
poll_fields (List[str], optional): List of poll fields to retrieve. Defaults to None.
place_fields (List[str], optional): List of place fields to retrieve. Defaults to None.
"""
super().__init__(serializer, bearer_token, tweet_fields, user_fields, media_fields, poll_fields, place_fields, *args, **kwargs)
self.deleteAllFilters(self.getFilters())
self.setFilters(filters)
def getFilters(self):
"""Helper function to get the list of filters
Raises:
Exception: Raised if existing filters cannot be raised.
Returns:
(Dict): Existing filters.
"""
response = requests.get(
"https://api.twitter.com/2/tweets/search/stream/rules", headers=self.headers
)
if response.status_code != 200:
raise Exception(
"Cannot get filters (HTTP {}): {}".format(response.status_code, response.text)
)
return response.json()
def deleteAllFilters(self, filters: Dict[str, str]):
"""Helper function to delete filters.
Args:
filters (Dict[str, str]): Filters to delete.
Raises:
Exception: Raised if filtered cannot be deleted.
"""
if filters is None or "data" not in filters:
return None
ids = list(map(lambda rule: rule["id"], filters["data"]))
payload = {"delete": {"ids": ids}}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
headers=self.headers,
json=payload
)
if response.status_code != 200:
raise Exception(
"Cannot delete filters (HTTP {}): {}".format(
response.status_code, response.text
)
)
def setFilters(self, filters: List[str]):
"""Helper functions to set new filters.
Args:
filters (List[str]): List of filters to set.
Raises:
Exception: Raised if filters cannot be set.
"""
filters = [{"value": item} for item in filters]
payload = {"add": filters}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
headers=self.headers,
json=payload,
)
if response.status_code != 201:
raise Exception(
"Cannot add filters (HTTP {}): {}".format(response.status_code, response.text)
)
| StarcoderdataPython |
3472786 | def {{ transition }}(self):
self.current.{{ transition }}(self)
return self
| StarcoderdataPython |
3212094 | <gh_stars>0
import attr
@attr.s
class SomeClass(object):
a_number = attr.ib(default=42)
list_of_numbers = attr.ib(default=attr.Factory(list))
a = SomeClass()
assert a.a_number == 42
assert isinstance(a.list_of_numbers, list)
| StarcoderdataPython |
61917 | <reponame>harshabakku/live-back-testing-trader
# -*- coding: utf-8 -*-
'''
Author: www.backtest-rookies.com
MIT License
Copyright (c) 2017 backtest-rookies.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# SQN ranking system
# 1.6 – 1.9 Below average
# 2.0 – 2.4 Average
# 2.5 – 2.9 Good
# 3.0 – 5.0 Excellent
# 5.1 – 6.9 Superb
# 7.0 – Holy Grail?
import backtrader as bt
from datetime import datetime
class firstStrategy(bt.Strategy):
def __init__(self):
self.rsi = bt.indicators.RSI_SMA(self.data.close, period=21)
def next(self):
if not self.position:
if self.rsi < 30:
self.buy(size=100)
else:
if self.rsi > 70:
self.sell(size=100)
def printTradeAnalysis(analyzer):
'''
Function to print the Technical Analysis results in a nice format.
'''
#Get the results we are interested in
total_open = analyzer.total.open
total_closed = analyzer.total.closed
total_won = analyzer.won.total
total_lost = analyzer.lost.total
win_streak = analyzer.streak.won.longest
lose_streak = analyzer.streak.lost.longest
pnl_net = round(analyzer.pnl.net.total,2)
strike_rate = (total_won / total_closed) * 100
#Designate the rows
h1 = ['Total Open', 'Total Closed', 'Total Won', 'Total Lost']
h2 = ['Strike Rate','Win Streak', 'Losing Streak', 'PnL Net']
r1 = [total_open, total_closed,total_won,total_lost]
r2 = [strike_rate, win_streak, lose_streak, pnl_net]
#Check which set of headers is the longest.
if len(h1) > len(h2):
header_length = len(h1)
else:
header_length = len(h2)
#Print the rows
print_list = [h1,r1,h2,r2]
row_format ="{:<15}" * (header_length + 1)
print("Trade Analysis Results:")
for row in print_list:
print(row_format.format('',*row))
def printSQN(analyzer):
sqn = round(analyzer.sqn,2)
print('SQN: {}'.format(sqn))
#Variable for our starting cash
startcash = 100000
#Create an instance of cerebro
cerebro = bt.Cerebro()
#Add our strategy
cerebro.addstrategy(firstStrategy)
#Get Apple data from Yahoo Finance.
data = bt.feeds.YahooFinanceData(
dataname='AAPL',
fromdate = datetime(2009,1,1),
todate = datetime(2017,1,1),
buffered= True
)
#Add the data to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(startcash)
# Add the analyzers we are interested in
cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name="ta")
cerebro.addanalyzer(bt.analyzers.SQN, _name="sqn")
# Run over everything
strategies = cerebro.run()
firstStrat = strategies[0]
# print the analyzers
printTradeAnalysis(firstStrat.analyzers.ta.get_analysis())
printSQN(firstStrat.analyzers.sqn.get_analysis())
#Get final portfolio Value
portvalue = cerebro.broker.getvalue()
#Print out the final result
print('Final Portfolio Value: ${}'.format(portvalue))
#Finally plot the end results
cerebro.plot(style='candlestick') | StarcoderdataPython |
9727450 | import csv
fileObject = open('Metro_Nashville_Schools.csv', 'r', newline='', encoding='utf-8')
fileRows = csv.reader(fileObject)
schoolData = []
for row in fileRows:
schoolData.append(row)
inputSchoolName = input("What's the name of the school? ")
found = False
for school in range(1, len(schoolData)):
if inputSchoolName.lower() in schoolData[school][3].lower(): # column 3 has the school name
found = True
# this section adds up the students in all of the grades
totalEnrollment = 0
# the grade enrollments are in columns 6 through 21
for grade in range(6, 21):
enrollment = schoolData[school][grade]
# only add the column if it isn't empty
if enrollment != '':
totalEnrollment += int(enrollment) # csv.reader reads in numbers as strings, so convert to integers
print(schoolData[school][3] + ' has a total of ' + str(totalEnrollment) + ' students.')
for category in range(22, 32):
if schoolData[school][category] != '':
# turn strings into floating point numbers
numerator = float(schoolData[school][category])
# find fraction, get the percent, and round to 1 decimal place
value = round(numerator/totalEnrollment*100, 1)
print(schoolData[0][category] + ': ' + str(value) + '%')
print() # put this here to separate multiple school results
if not found:
print("Didn't find that school")
| StarcoderdataPython |
9619789 | <filename>BAMF_Detect/modules/xtremerat.py
from common import Modules, load_yara_rules, PEParseModule, ModuleMetadata
from struct import unpack
import pefile
class xtreme(PEParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="xtreme",
bot_name="Xtreme",
description="RAT...TO THE EXTREME",
authors=["kevthehermit"], # https://github.com/kevthehermit/RATDecoders/blob/master/Xtreme.py
version="1.0.0",
date="March 25, 2015",
references=[]
)
PEParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("xtreme.yara")
return self.yara_rules
@staticmethod
def run(data):
key = "<KEY>"
codedConfig = xtreme.configExtract(data)
if codedConfig is not None:
rawConfig = xtreme.rc4crypt(codedConfig, key)
if len(rawConfig) == 0xe10:
config = None
elif len(rawConfig) == 0x1390 or len(rawConfig) == 0x1392:
config = xtreme.v29(rawConfig)
elif len(rawConfig) == 0x5Cc:
config = xtreme.v32(rawConfig)
elif len(rawConfig) == 0x7f0:
config = xtreme.v35(rawConfig)
else:
config = None
return config
else:
return {}
@staticmethod
def rc4crypt(data, key): # modified for bad implemented key length
x = 0
box = range(256)
for i in range(256):
x = (x + box[i] + ord(key[i % 6])) % 256
box[i], box[x] = box[x], box[i]
x = 0
y = 0
out = []
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256]))
return ''.join(out)
@staticmethod
def configExtract(rawData):
try:
pe = pefile.PE(data=rawData)
try:
rt_string_idx = [
entry.id for entry in
pe.DIRECTORY_ENTRY_RESOURCE.entries].index(pefile.RESOURCE_TYPE['RT_RCDATA'])
except ValueError, e:
return None
except AttributeError, e:
return None
rt_string_directory = pe.DIRECTORY_ENTRY_RESOURCE.entries[rt_string_idx]
for entry in rt_string_directory.directory.entries:
if str(entry.name) == "XTREME":
data_rva = entry.directory.entries[0].data.struct.OffsetToData
size = entry.directory.entries[0].data.struct.Size
data = pe.get_memory_mapped_image()[data_rva:data_rva+size]
return data
except:
return None
@staticmethod
def v29(rawConfig):
dict = {}
dict["ID"] = xtreme.getUnicodeString(rawConfig, 0x9e0)
dict["Group"] = xtreme.getUnicodeString(rawConfig, 0xa5a)
dict["Version"] = xtreme.getUnicodeString(rawConfig, 0xf2e) # use this to recalc offsets
dict["Mutex"] = xtreme.getUnicodeString(rawConfig, 0xfaa)
dict["Install Dir"] = xtreme.getUnicodeString(rawConfig, 0xb50)
dict["Install Name"] = xtreme.getUnicodeString(rawConfig, 0xad6)
dict["HKLM"] = xtreme.getUnicodeString(rawConfig, 0xc4f)
dict["HKCU"] = xtreme.getUnicodeString(rawConfig, 0xcc8)
dict["Custom Reg Key"] = xtreme.getUnicodeString(rawConfig, 0xdc0)
dict["Custom Reg Name"] = xtreme.getUnicodeString(rawConfig, 0xe3a)
dict["Custom Reg Value"] = xtreme.getUnicodeString(rawConfig, 0xa82)
dict["ActiveX Key"] = xtreme.getUnicodeString(rawConfig, 0xd42)
dict["Injection"] = xtreme.getUnicodeString(rawConfig, 0xbd2)
dict["FTP Server"] = xtreme.getUnicodeString(rawConfig, 0x111c)
dict["FTP UserName"] = xtreme.getUnicodeString(rawConfig, 0x1210)
dict["FTP Password"] = xtreme.getUnicodeString(rawConfig, 0x128a)
dict["FTP Folder"] = xtreme.getUnicodeString(rawConfig, 0x1196)
dict["Domain1"] = str(xtreme.getUnicodeString(rawConfig, 0x50)+":"+str(unpack("<I",rawConfig[0:4])[0]))
dict["Domain2"] = str(xtreme.getUnicodeString(rawConfig, 0xca)+":"+str(unpack("<I",rawConfig[4:8])[0]))
dict["Domain3"] = str(xtreme.getUnicodeString(rawConfig, 0x144)+":"+str(unpack("<I",rawConfig[8:12])[0]))
dict["Domain4"] = str(xtreme.getUnicodeString(rawConfig, 0x1be)+":"+str(unpack("<I",rawConfig[12:16])[0]))
dict["Domain5"] = str(xtreme.getUnicodeString(rawConfig, 0x238)+":"+str(unpack("<I",rawConfig[16:20])[0]))
dict["Domain6"] = str(xtreme.getUnicodeString(rawConfig, 0x2b2)+":"+str(unpack("<I",rawConfig[20:24])[0]))
dict["Domain7"] = str(xtreme.getUnicodeString(rawConfig, 0x32c)+":"+str(unpack("<I",rawConfig[24:28])[0]))
dict["Domain8"] = str(xtreme.getUnicodeString(rawConfig, 0x3a6)+":"+str(unpack("<I",rawConfig[28:32])[0]))
dict["Domain9"] = str(xtreme.getUnicodeString(rawConfig, 0x420)+":"+str(unpack("<I",rawConfig[32:36])[0]))
dict["Domain10"] = str(xtreme.getUnicodeString(rawConfig, 0x49a)+":"+str(unpack("<I",rawConfig[36:40])[0]))
dict["Domain11"] = str(xtreme.getUnicodeString(rawConfig, 0x514)+":"+str(unpack("<I",rawConfig[40:44])[0]))
dict["Domain12"] = str(xtreme.getUnicodeString(rawConfig, 0x58e)+":"+str(unpack("<I",rawConfig[44:48])[0]))
dict["Domain13"] = str(xtreme.getUnicodeString(rawConfig, 0x608)+":"+str(unpack("<I",rawConfig[48:52])[0]))
dict["Domain14"] = str(xtreme.getUnicodeString(rawConfig, 0x682)+":"+str(unpack("<I",rawConfig[52:56])[0]))
dict["Domain15"] = str(xtreme.getUnicodeString(rawConfig, 0x6fc)+":"+str(unpack("<I",rawConfig[56:60])[0]))
dict["Domain16"] = str(xtreme.getUnicodeString(rawConfig, 0x776)+":"+str(unpack("<I",rawConfig[60:64])[0]))
dict["Domain17"] = str(xtreme.getUnicodeString(rawConfig, 0x7f0)+":"+str(unpack("<I",rawConfig[64:68])[0]))
dict["Domain18"] = str(xtreme.getUnicodeString(rawConfig, 0x86a)+":"+str(unpack("<I",rawConfig[68:72])[0]))
dict["Domain19"] = str(xtreme.getUnicodeString(rawConfig, 0x8e4)+":"+str(unpack("<I",rawConfig[72:76])[0]))
dict["Domain20"] = str(xtreme.getUnicodeString(rawConfig, 0x95e)+":"+str(unpack("<I",rawConfig[76:80])[0]))
return dict
@staticmethod
def v32(rawConfig):
dict = {}
dict["ID"] = xtreme.getUnicodeString(rawConfig, 0x1b4)
dict["Group"] = xtreme.getUnicodeString(rawConfig, 0x1ca)
dict["Version"] = xtreme.getUnicodeString(rawConfig, 0x2bc)
dict["Mutex"] = xtreme.getUnicodeString(rawConfig, 0x2d4)
dict["Install Dir"] = xtreme.getUnicodeString(rawConfig, 0x1f8)
dict["Install Name"] = xtreme.getUnicodeString(rawConfig, 0x1e2)
dict["HKLM"] = xtreme.getUnicodeString(rawConfig, 0x23a)
dict["HKCU"] = xtreme.getUnicodeString(rawConfig, 0x250)
dict["ActiveX Key"] = xtreme.getUnicodeString(rawConfig, 0x266)
dict["Injection"] = xtreme.getUnicodeString(rawConfig, 0x216)
dict["FTP Server"] = xtreme.getUnicodeString(rawConfig, 0x35e)
dict["FTP UserName"] = xtreme.getUnicodeString(rawConfig, 0x402)
dict["FTP Password"] = xtreme.getUnicodeString(rawConfig, 0x454)
dict["FTP Folder"] = xtreme.getUnicodeString(rawConfig, 0x3b0)
dict["Domain1"] = str(xtreme.getUnicodeString(rawConfig, 0x14)+":"+str(unpack("<I",rawConfig[0:4])[0]))
dict["Domain2"] = str(xtreme.getUnicodeString(rawConfig, 0x66)+":"+str(unpack("<I",rawConfig[4:8])[0]))
dict["Domain3"] = str(xtreme.getUnicodeString(rawConfig, 0xb8)+":"+str(unpack("<I",rawConfig[8:12])[0]))
dict["Domain4"] = str(xtreme.getUnicodeString(rawConfig, 0x10a)+":"+str(unpack("<I",rawConfig[12:16])[0]))
dict["Domain5"] = str(xtreme.getUnicodeString(rawConfig, 0x15c)+":"+str(unpack("<I",rawConfig[16:20])[0]))
dict["Msg Box Title"] = xtreme.getUnicodeString(rawConfig, 0x50c)
dict["Msg Box Text"] = xtreme.getUnicodeString(rawConfig, 0x522)
return dict
@staticmethod
def v35(rawConfig):
dict = {}
dict["ID"] = xtreme.getUnicodeString(rawConfig, 0x1b4)
dict["Group"] = xtreme.getUnicodeString(rawConfig, 0x1ca)
dict["Version"] = xtreme.getUnicodeString(rawConfig, 0x2d8)
dict["Mutex"] = xtreme.getUnicodeString(rawConfig, 0x2f0)
dict["Install Dir"] = xtreme.getUnicodeString(rawConfig, 0x1f8)
dict["Install Name"] = xtreme.getUnicodeString(rawConfig, 0x1e2)
dict["HKLM"] = xtreme.getUnicodeString(rawConfig, 0x23a)
dict["HKCU"] = xtreme.getUnicodeString(rawConfig, 0x250)
dict["ActiveX Key"] = xtreme.getUnicodeString(rawConfig, 0x266)
dict["Injection"] = xtreme.getUnicodeString(rawConfig, 0x216)
dict["FTP Server"] = xtreme.getUnicodeString(rawConfig, 0x380)
dict["FTP UserName"] = xtreme.getUnicodeString(rawConfig, 0x422)
dict["FTP Password"] = xtreme.getUnicodeString(rawConfig, 0x476)
dict["FTP Folder"] = xtreme.getUnicodeString(rawConfig, 0x3d2)
dict["Domain1"] = str(xtreme.getUnicodeString(rawConfig, 0x14)+":"+str(unpack("<I",rawConfig[0:4])[0]))
dict["Domain2"] = str(xtreme.getUnicodeString(rawConfig, 0x66)+":"+str(unpack("<I",rawConfig[4:8])[0]))
dict["Domain3"] = str(xtreme.getUnicodeString(rawConfig, 0xb8)+":"+str(unpack("<I",rawConfig[8:12])[0]))
dict["Domain4"] = str(xtreme.getUnicodeString(rawConfig, 0x10a)+":"+str(unpack("<I",rawConfig[12:16])[0]))
dict["Domain5"] = str(xtreme.getUnicodeString(rawConfig, 0x15c)+":"+str(unpack("<I",rawConfig[16:20])[0]))
dict["Msg Box Title"] = xtreme.getUnicodeString(rawConfig, 0x52c)
dict["Msg Box Text"] = xtreme.getUnicodeString(rawConfig, 0x542)
return dict
@staticmethod
def getString(buf,pos):
out = ""
for c in buf[pos:]:
if ord(c) == 0:
break
out += c
if out == "":
return None
else:
return out
@staticmethod
def getUnicodeString(buf,pos):
out = ""
for i in range(len(buf[pos:])):
if not (ord(buf[pos+i]) >= 32 and ord(buf[pos+i]) <= 126) and not (ord(buf[pos+i+1]) >= 32 and ord(buf[pos+i+1]) <= 126):
out += "\x00"
break
out += buf[pos+i]
if out == "":
return None
else:
return out.replace("\x00", "")
def get_bot_information(self, file_data):
results = {}
uri_path = None
domain = None
s = xtreme.run(file_data)
if s is not None:
results = s
for key in s.keys():
s[key] = s[key].decode("ascii", errors="replace")
c2s = set()
for key in [i for i in results.keys() if i.startswith("Domain") and results[i] != ":0"]:
c2s.add("tcp://" + results[key])
if len(c2s) > 0:
results['c2s'] = []
for c2 in c2s:
results['c2s'].append({"c2_uri": c2})
return results
Modules.list.append(xtreme()) | StarcoderdataPython |
11308558 | <gh_stars>0
input_data = open("day2.input").read().split("\n")
input_data = [a.split(" ") for a in input_data]
horizontal = 0
depth = 0
aim = 0
for item in input_data:
if item[0] == 'forward':
horizontal += int(item[1])
depth += aim * int(item[1])
if item[0] == 'down':
aim += int(item[1])
if item[0] == 'up':
aim -= int(item[1])
print(horizontal, depth, aim, horizontal * depth)
| StarcoderdataPython |
3331666 | <reponame>D2KLab/touringrec<filename>SetupScript/RNN/ds_manipulation.py
import numpy as np
import pandas as pd
import math
import torch
def reference_to_str(df):
df['reference'] = df.apply(lambda x: str(x['reference']), axis=1)
return df
def remove_single_clickout_actions(df):
print('Initial size: ' + str(df.shape[0]))
n_action_session = df.groupby('session_id').size().reset_index(name='n_actions')
df = (df.merge(n_action_session, left_on='session_id', right_on='session_id', how="left"))
df = df.drop(df[(df["action_type"] == "clickout item") & (df['n_actions'] == 1)].index)
print('Final size: ' + str(df.shape[0]))
del df['n_actions']
return df
def remove_nonitem_actions(df):
df = df.drop(df[(df['action_type'] != 'interaction item image') & (df['action_type'] != 'interaction item deals') & (df['action_type'] != 'clickout item') & (df['action_type'] != 'search for item')].index)
return df
def reduce_df(df, dim):
df = df.head(dim)
return pd.DataFrame(df)
def get_clickout_data(action, clickout_dict, impression_dict):
clickout_dict[action.session_id] = action.reference
impression_dict[action.session_id] = action.impressions.split('|')
return action.reference
def get_list_session_interactions(group, session_dict):
session_dict[group.session_id.values[0]] = list(group.reference.values)[-200:]
return " ".join(list(group.reference.values))
def get_training_input(df_train):
clickout_dict = {}
impression_dict = {}
session_dict = {}
df_train['step_max'] = df_train[df_train['action_type'] == 'clickout item'].groupby(['session_id'])['step'].transform(max)
df_train['result'] = df_train[df_train['step'] == df_train['step_max']].apply(lambda x: get_clickout_data(x, clickout_dict, impression_dict), axis = 1)
df_train_corpus = df_train.groupby('session_id').apply(lambda x: get_list_session_interactions(x, session_dict)).reset_index(name = 'hotel_list')
df_train = df_train.drop(df_train.index[(df_train['step'] == df_train['step_max']) & (df_train["action_type"] == "clickout item")])
train_corpus = list(session_dict.values())
return session_dict, clickout_dict, impression_dict, train_corpus
def get_clickout_data_test(action, clickout_dict, impression_dict):
if math.isnan(float(action.reference)):
clickout_dict[action.session_id] = action.step
impression_dict[action.session_id] = action.impressions.split('|')
return action.reference
def get_test_input(df_test):
#Creating a NaN column for item recommendations
df_test['item_recommendations'] = np.nan
test_step_clickout_dict = {}
test_impression_dict = {}
test_sessions_dict = {}
df_test['step_max'] = df_test[df_test['action_type'] == 'clickout item'].groupby(['session_id'])['step'].transform(max)
df_test['result'] = df_test[df_test['step'] == df_test['step_max']].apply(lambda x: get_clickout_data_test(x, test_step_clickout_dict, test_impression_dict), axis = 1)
df_test = df_test.drop(df_test.index[(df_test['step'] == df_test['step_max']) & (df_test["action_type"] == "clickout item")])
df_test_corpus = df_test.groupby('session_id').apply(lambda x: get_list_session_interactions(x, test_sessions_dict)).reset_index(name = 'hotel_list')
test_corpus = list(test_sessions_dict.values())
return test_sessions_dict, test_step_clickout_dict, test_impression_dict, test_corpus
def get_batched_sessions(session_dict, category_dict, batchsize):
batched_sessions = []
temp_sessions = []
for session_id in session_dict.keys():
if session_id in category_dict:
temp_sessions.append(session_id)
if len(temp_sessions) == batchsize:
batched_sessions.append(temp_sessions)
temp_sessions = []
if temp_sessions != []:
batched_sessions.append(temp_sessions)
return batched_sessions | StarcoderdataPython |
1779580 | # -*- coding: utf-8 -*-
# Scrapy settings for wsl_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wsl_spider'
SPIDER_MODULES = ['wsl_spider.spiders']
NEWSPIDER_MODULE = 'wsl_spider.spiders'
LOG_LEVEL = 'ERROR'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36'
)
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wsl_spider.middlewares.WslSpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'wsl_spider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'wsl_spider.pipelines.DuplicatesPipeline': 100,
'wsl_spider.pipelines.FilterByYearPipeline': 200,
'wsl_spider.pipelines.RenameCourseTermPipeline': 300,
'wsl_spider.pipelines.RenameCourseSchoolPipeline': 310,
'wsl_spider.pipelines.RenameCourseLangPipeline': 320,
'wsl_spider.pipelines.MongoPipeline': 400,
}
MONGO_URI = 'mongodb://localhost:27017/'
MONGO_DB = 'syllabus'
# Change the name of the output collection here
# spring 2018
raw = 'raw_'
year = '2018'
entireYear = 'entire_' + year
# For targeting a single semester
# term = 'spr_first_half_'
# termYear = term + year
MONGO_COLLECTION = raw + entireYear + "_courses_all"
MONGO_STATS_COLLECTION = "stats"
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| StarcoderdataPython |
1613966 | from PyQt5 import QtWidgets as qwid
from extras.orders_async import *
from ordermanager_interface import OrderManager
from utils import log
# Constants
DEFAULT_INSTRUMENT = 'BTC-PERPETUAL'
# Logging
logger = log.setup_custom_logger(__name__)
class Window(qwid.QMainWindow):
def __init__(self, ordermanager=None):
# OrderManager setup process
self.om = ordermanager
if not ordermanager:
self.om = OrderManager() # by default uses 'BTC-PERPETUAL' as instrument
Order.om = self.om
# BuySellButton.om = self.om # TODO
# window init call
super().__init__()
self.setGeometry(50, 50, 500, 300)
self.setWindowTitle('PyQt boiiiiiiiii')
# Layouts
mktlayout = qwid.QVBoxLayout(self)
# Buttons
self.butts = []
for p in range(3):
b = qwid.QPushButton(f"Butt for Layout{p}", self)
b.clicked.connect(lambda : print(p))
mktlayout.addWidget(b)
self.setLayout(mktlayout)
# self.create_button(0)
# self.create_button(1)
# self.create_button(2)
self.show()
def create_button(self, p):
butt = qwid.QPushButton(f"Butt on{p}", self)
butt.move(p*100, p*100)
def printp():
for i in range(p+1):
print(p)
butt.clicked.connect(lambda : printp())
self.butts.append(butt)
# button.show()
# button2.show()
# button3.show()
def go(self):
self.button.setText("CHANGEDDDDD")
print("blerp")
app = qwid.QApplication([])
omlol = OrderManager(path_to_keyfile='../../deribit_keys.txt') # TODO: DELETE LOL
win = Window(omlol)
app.exec_() | StarcoderdataPython |
4993539 | <gh_stars>1-10
from pytd.pytdutils.pytdout.textblock import TextBlock
from typing import List
import time
import random
tex_number = 3
Text1 = """ Wellcome magnificent people
Glad to see you all here today
I hope that we would understand each other
"""
Text2 = """ In the middle of the night
i felt so strongly in my crotch
that i want to pee..
"""
Text3 = """ Magnificent !!!!
Truly, Magnificent!
"""
textBlocks: List[TextBlock] = list ()
writenText: List[TextBlock] = list ()
textBlocks.append (TextBlock (Text1))
textBlocks.append (TextBlock (Text2))
textBlocks.append (TextBlock (Text3))
for rep in range (50):
for i in range (tex_number):
key = random.randint (0, tex_number - 1)
textBlocks[key].write ()
writenText.append ( textBlocks[key] )
time.sleep (1)
for i in range ( len (writenText) ):
toDeleteTB = writenText.pop ()
toDeleteTB.delete () | StarcoderdataPython |
6625766 | <reponame>windstrip/Genetic-Algorithm-PID-Controller-Tuner
# Reference:
# http://code.activestate.com/recipes/278258/
def sumList(L):
return reduce(lambda x,y:x+y, L)
def avgList(L):
return reduce(lambda x,y:x+y, L) /(len(L)*1.0)
def normList(L, normalizeTo=1):
'''normalize values of a list to make its max = normalizeTo'''
vMax = max(L)
return [ x/(vMax*1.0)*normalizeTo for x in L]
def normListSumTo(L, sumTo=1):
'''normalize values of a list to make it sum = sumTo'''
sum = reduce(lambda x,y:x+y, L)
return [ x/(sum*1.0)*sumTo for x in L]
def accumList(L, normalizeTo=None):
''' L= [1, 2, 3, 4, 5]: accumList(L)=> [1, 3, 6, 10, 15]
L= [0.25, 0.25, 0.25, 0.25]: accumList(L)=> [0.25, 0.50, 0.75, 1.00]
normalizeTo: set the last number of the returned list to this value
'''
if normalizeTo: LL = normListSumTo(L, sumTo=normalizeTo)
else: LL = L[:]
r = range(1, len(LL))
newList=[LL[0]]
for i in r:
newList.append( newList[-1]+ LL[i] )
return newList
def findIndex(sortedList, x, indexBuffer=0):
''' Given a sortedList and value x, return the index i where
sortedList[i-1] <= x < sortedList[i]
Which means,
sortedList.insert( findIndex(sortedList, x), x )
will give a sorted list
'''
if len(sortedList)==2:
if x==sortedList[-1]: return indexBuffer+2
elif x>=sortedList[0]: return indexBuffer+1
else:
L = len(sortedList)
firstHalf = sortedList[:L/2+1]
secondHalf = sortedList[(L/2):]
if secondHalf[-1]<=x:
return indexBuffer + len(sortedList)
elif x< firstHalf[0]:
return indexBuffer
else:
if firstHalf[-1] < x:
return findIndex(secondHalf, x, indexBuffer=L/2+indexBuffer)
else:
return findIndex(firstHalf,x, indexBuffer=indexBuffer)
def randomPickList(L):
''' given a list L, with all values are numbers,
randomly pick an item and return it's index according
to the percentage of all values'''
return findIndex(accumList(L,1), random.random())
def deepList(LString):
'''
Given string representation of a nested list tree,
return a list containing all the deepest list contents.
For example:
'[[1,[2, 2a]],[[3,3b],4]]'
==> ['2, 2a', '3,3b']
'[[[1,[2, 2a]],[[3,3b],4]],6]'
==> ['2, 2a', '3,3b']
'[[[[a1,a2],out],o1],[o2,o3]]'
==> ['a1,a2', 'o2,o3']
'[[[[[a1,a2], out], [o1,o2]],[o3,o4]],[o5,o6]]'
==> ['a1,a2', 'o1,o2', 'o3,o4', 'o5,o6']
The code: [x.split(']') for x in code.split('[')]
returns something like:
[[''], [''], [''], [''], [''], ['a1,a2', ', out', ', '],
['o1,o2', '', ','], ['o3,o4', '', ','], ['o5,o6', '', '']]
'''
result= [x[0] for x in \
[x.split(']') for x in LString.split('[')] \
if len(x)>1]
if result==['']: result =[]
return result
def getListStartsWith(aList, startsWith, isStrip=1):
''' for a list: L= ['abcdef', 'kkddff', 'xyz', '0wer'...],
getListStartWith(L, 'kk') will return:
['kkddff', 'xyz', '0wer'...],
getListStartWith(L, 'xy') will return:
['xyz', '0wer'...],
if isStrip: any item ' xyz' will be considered 'xyz'
else: the spaces in ' xyz' count.
'''
tmp = aList[:]
if isStrip: tmp = [x.strip() for x in tmp]
startLineIndex = 0
for i in range(len(tmp)):
if tmp[i].startswith(startsWith):
startLineIndex = i
return aList[startLineIndex:]
def rezip(aList):
''' d = [[1, 5, 8, 3], [2, 2, 3, 9], [3, 2, 4, 6]]
rezip(d):
[(1, 2, 3), (5, 2, 2), (8, 3, 4), (3, 9, 6)]
If a =[1, 5, 8], b=[2, 2, 3], c=[3, 2, 4]
then it's eazy to: zip(a,b,c) = [(1, 2, 3), (5, 2, 2), (8, 3, 4)]
But it's hard for d = [[1, 5, 8], [2, 2, 3], [3, 2, 4]]
'''
tmp = [ [] for x in range(len(aList[0])) ]
for i in range(len(aList[0])):
for j in range(len(aList)):
tmp[i].append(aList[j][i])
return tmp
def sumInList(complexList):
''' Given a complexList [ [a1,b1,c1], [a2,b2,c2], [a3,b3,c3] ],
return a list [ a, b, c] where a = a1+a2+a3, etc.'''
d = rezip(complexList)
return [ reduce(lambda x,y:x+y, z) for z in d ]
def avgInList(complexList):
''' Given a complexList [ [a1,b1,c1], [a2,b2,c2], [a3,b3,c3] ],
return a list [ a, b, c] where a = avg of a1, a2, a3, etc.'''
d = rezip(complexList)
return [ reduce(lambda x,y:x+y, z)/(len(z)*1.0) for z in d ]
## requires positive values (0 counts)
def max_value_in_list(list):
max_index = 0
max_value = -1
for i in range(len(list)):
if list[i] > max_value:
max_value = list[i]
max_index = i
return max_value
def max_index_in_list(list):
max_index = 0
max_value = -1
for i in range(len(list)):
if list[i] > max_value:
max_value = list[i]
max_index = i
return max_index
def min_value_in_list(list):
min_index = 0
min_value = 99999999
for i in range(len(list)):
if list[i] < min_value:
min_value = list[i]
return min_value
| StarcoderdataPython |
5184140 | from django.test import TestCase
from consent.models import Consent
import random
import string
from django.db import models
class ConsentCleanintTestCase(TestCase):
def __init__(self, method_name):
self.long_displayname = self._create_long_string(Consent.SP_DISPLAYNAME_LENGTH + 1)
self.shortened_displayname = self.long_displayname[:Consent.SP_DISPLAYNAME_LENGTH]
super().__init__(method_name)
@staticmethod
def _create_long_string(length):
x = ''.join(random.choice(string.ascii_letters) for _ in range(length))
return x
def test_save(self):
uid = self._create_long_string(2048)
realuid = uid[:Consent.UID_MAX_LENGTH]
consent = Consent(
uid=uid,
displayname=self._create_long_string(2048),
consentid=self._create_long_string(2048),
entityID=self._create_long_string(2048),
sp_displayname=self._create_long_string(2048),
)
consent.save()
saved_consent = Consent.objects.get(uid=realuid)
for f in saved_consent._meta.fields:
if isinstance(f, models.CharField):
value = "{}:{}".format(f.attname, getattr(saved_consent, f.attname))
value_short = "{}:{}".format(f.attname, getattr(saved_consent, f.attname)[:f.max_length])
self.assertEqual(value_short, value)
return None
| StarcoderdataPython |
5094486 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetSchedulerPolicyResult',
'AwaitableGetSchedulerPolicyResult',
'get_scheduler_policy',
]
@pulumi.output_type
class GetSchedulerPolicyResult:
"""
A collection of values returned by getSchedulerPolicy.
"""
def __init__(__self__, id=None, memory_oversubscription_enabled=None, preemption_config=None, scheduler_algorithm=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if memory_oversubscription_enabled and not isinstance(memory_oversubscription_enabled, bool):
raise TypeError("Expected argument 'memory_oversubscription_enabled' to be a bool")
pulumi.set(__self__, "memory_oversubscription_enabled", memory_oversubscription_enabled)
if preemption_config and not isinstance(preemption_config, dict):
raise TypeError("Expected argument 'preemption_config' to be a dict")
pulumi.set(__self__, "preemption_config", preemption_config)
if scheduler_algorithm and not isinstance(scheduler_algorithm, str):
raise TypeError("Expected argument 'scheduler_algorithm' to be a str")
pulumi.set(__self__, "scheduler_algorithm", scheduler_algorithm)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="memoryOversubscriptionEnabled")
def memory_oversubscription_enabled(self) -> bool:
return pulumi.get(self, "memory_oversubscription_enabled")
@property
@pulumi.getter(name="preemptionConfig")
def preemption_config(self) -> Mapping[str, bool]:
return pulumi.get(self, "preemption_config")
@property
@pulumi.getter(name="schedulerAlgorithm")
def scheduler_algorithm(self) -> str:
return pulumi.get(self, "scheduler_algorithm")
class AwaitableGetSchedulerPolicyResult(GetSchedulerPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSchedulerPolicyResult(
id=self.id,
memory_oversubscription_enabled=self.memory_oversubscription_enabled,
preemption_config=self.preemption_config,
scheduler_algorithm=self.scheduler_algorithm)
def get_scheduler_policy(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSchedulerPolicyResult:
"""
Retrieve the cluster's [scheduler configuration](https://www.nomadproject.io/api-docs/operator#sample-response-3).
## Example Usage
```python
import pulumi
import pulumi_nomad as nomad
global_ = nomad.get_scheduler_policy()
```
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('nomad:index/getSchedulerPolicy:getSchedulerPolicy', __args__, opts=opts, typ=GetSchedulerPolicyResult).value
return AwaitableGetSchedulerPolicyResult(
id=__ret__.id,
memory_oversubscription_enabled=__ret__.memory_oversubscription_enabled,
preemption_config=__ret__.preemption_config,
scheduler_algorithm=__ret__.scheduler_algorithm)
| StarcoderdataPython |
12818580 | <filename>lib/muchos/config/decorators.py
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections.abc import Iterable
from functools import wraps
from pydoc import locate
# struct to hold information about ansible vars defined via decorators.
# var_name indicates the desired variable name
# class_name indicates the class name where the variable was defined
# property_name indicates the class property/function where the variable was
# defined
class _ansible_var(object):
def __init__(self, var_name, class_name, property_name, module_name):
self.var_name = var_name
self.class_name = class_name
self.property_name = property_name
self.module_name = module_name
def __str__(self):
return 'var_name={}, class_name={}, property_name={}, module_name={}'.format(
self.var_name, self.class_name, self.property_name, self.module_name
)
# each entry of _ansible_vars will contain a list of _ansible_var instances
_ansible_vars = dict(
host=[],
play=[],
extra=[]
)
def get_ansible_vars(var_type, class_in_scope):
# return variables for the complete class hierarchy
return list(filter(lambda v:
issubclass(class_in_scope, locate(v.module_name + "." + v.class_name)),
_ansible_vars.get(var_type)))
# ansible hosts inventory variables
def ansible_host_var(name=None):
return ansible_var_decorator('host', name)
# ansible group/all variables
def ansible_play_var(name=None):
return ansible_var_decorator('play', name)
# ansible extra variables
def ansible_extra_var(name=None):
return ansible_var_decorator('extra', name)
def ansible_var_decorator(var_type, name):
def _decorator(func):
ansible_var = _ansible_var(
var_name=name if isinstance(name, str) else func.__name__,
class_name=func.__qualname__.split('.')[0],
property_name=func.__name__,
module_name=func.__module__)
_ansible_vars[var_type].append(ansible_var)
return func
if callable(name):
return _decorator(name)
return _decorator
def default(val):
def _default(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
except:
return val
else:
if res is None or (isinstance(res, str) and len(res) == 0):
return val
return res
return wrapper
return _default
def required(func):
@wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
if res in [None, 0, ''] or len(res) == 0:
raise ConfigMissingError(func.__name__)
return res
return wrapper
def is_valid(validators):
if not isinstance(validators, Iterable):
validators = [validators]
def _validate(func):
@wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
failed_checks = list(filter(lambda f: f(res) is not True, validators))
if len(failed_checks) > 0:
raise Exception("{}={} checked validation {}".format(
func.__name__, res,
[str(v) for v in failed_checks]))
return res
return wrapper
return _validate
class ConfigMissingError(Exception):
def __init__(self, name):
super(ConfigMissingError, self).__init__("{} is missing from the configuration".format(name))
| StarcoderdataPython |
9736977 | # coding=utf-8
import pytest
from sqlalchemy.exc import IntegrityError
import marcottievents.models.common.personnel as mcp
import marcottievents.models.common.match as mcm
def test_match_generic_insert(session, match_data):
match = mcm.Matches(**match_data)
session.add(match)
match_from_db = session.query(mcm.Matches).one()
assert match_from_db.first_half_length == 45
assert match_from_db.second_half_length == 45
assert match_from_db.first_extra_length == 0
assert match_from_db.second_extra_length == 0
assert match_from_db.attendance == 0
assert match_from_db.phase == 'matches'
def test_match_negative_time_error(session, match_data):
for field in ['first_half_length', 'second_half_length', 'first_extra_length', 'second_extra_length']:
new_match_data = dict(match_data, **{field: -1})
match = mcm.Matches(**new_match_data)
with pytest.raises(IntegrityError):
session.add(match)
session.commit()
session.rollback()
def test_match_negative_attendance_error(session, match_data):
new_match_data = dict(match_data, **{'attendance': -1})
match = mcm.Matches(**new_match_data)
with pytest.raises(IntegrityError):
session.add(match)
session.commit()
session.rollback()
def test_match_conditions_insert(session, match_data, match_condition_data):
match_condition_data['match'] = mcm.Matches(**match_data)
match_conditions = mcm.MatchConditions(**match_condition_data)
session.add(match_conditions)
match_from_db = session.query(mcm.Matches).one()
conditions_from_db = session.query(mcm.MatchConditions).one()
assert repr(conditions_from_db) == "<MatchCondition(id={}, kickoff=19:30, temp=15.0, " \
"humid=68.0, kickoff_weather=Partly Cloudy)>".format(match_from_db.id)
def test_match_conditions_temp_error(session, match_data, match_condition_data):
match_condition_data['match'] = mcm.Matches(**match_data)
for out_of_range in [-20.0, 55.0]:
match_condition_data['kickoff_temp'] = out_of_range
match_conditions = mcm.MatchConditions(**match_condition_data)
with pytest.raises(IntegrityError):
session.add(match_conditions)
session.commit()
session.rollback()
def test_match_conditions_humid_error(session, match_data, match_condition_data):
match_condition_data['match'] = mcm.Matches(**match_data)
for out_of_range in [-1.0, 102.0]:
match_condition_data['kickoff_humidity'] = out_of_range
match_conditions = mcm.MatchConditions(**match_condition_data)
with pytest.raises(IntegrityError):
session.add(match_conditions)
session.commit()
session.rollback()
def test_match_lineup_generic_insert(session, match_data, person_data, position_data):
lineup = mcm.MatchLineups(
match=mcm.Matches(**match_data),
player=mcp.Players(**person_data['player'][1]),
position=position_data[1]
)
session.add(lineup)
lineup_from_db = session.query(mcm.MatchLineups).one()
match_from_db = session.query(mcm.Matches).one()
player_from_db = session.query(mcp.Players).one()
assert lineup_from_db.is_starting is False
assert lineup_from_db.is_captain is False
assert lineup_from_db.match_id == match_from_db.id
assert lineup_from_db.player_id == player_from_db.id
def test_lineup_designate_captain(session, match_data, person_data, position_data):
capn_indx = 1
lineups = [
mcm.MatchLineups(
match=mcm.Matches(**match_data),
player=mcp.Players(**plyr),
position=pos,
is_starting=True,
is_captain=(j == capn_indx))
for j, (plyr, pos) in enumerate(zip(person_data['player'], position_data))
]
session.add_all(lineups)
capn_position = position_data[capn_indx]
lineup_from_db = session.query(mcm.MatchLineups).join(mcp.Positions).filter(
mcp.Positions.name == capn_position.name).all()
assert len(lineup_from_db) == 1
assert lineup_from_db[0].is_captain is True
other_lineup_from_db = session.query(mcm.MatchLineups).join(mcp.Positions).filter(
mcp.Positions.name != capn_position.name).all()
for others in other_lineup_from_db:
assert others.is_captain is False
def test_lineup_designate_starter(session, match_data, person_data, position_data):
starter_indx = 0
lineups = [
mcm.MatchLineups(
match=mcm.Matches(**match_data),
player=mcp.Players(**plyr),
position=pos,
is_starting=(j == starter_indx))
for j, (plyr, pos) in enumerate(zip(person_data['player'], position_data))
]
session.add_all(lineups)
starter_position = position_data[starter_indx]
lineup_from_db = session.query(mcm.MatchLineups).join(mcp.Positions).filter(
mcp.Positions.name == starter_position.name).all()
assert len(lineup_from_db) == 1
assert lineup_from_db[0].is_starting is True
assert lineup_from_db[0].is_captain is False
other_lineup_from_db = session.query(mcm.MatchLineups).join(mcp.Positions).filter(
mcp.Positions.name != starter_position.name).all()
for others in other_lineup_from_db:
assert others.is_starting is False
assert others.is_captain is False
| StarcoderdataPython |
6530544 | __author__ = 'rbalda'
| StarcoderdataPython |
8000556 | from core.modules import BaseClass
import copy
class OptionsUpdate(BaseClass):
name = "WordPress Options Update"
severity = "High"
functions = [
"update_option",
]
blacklist = []
def build_pattern(self, content, file):
user_input = copy.deepcopy(self.user_input)
variables = self.get_input_variables(self, content)
if variables:
user_input.extend(variables)
pattern = r"((" + '|'.join(self.functions) + ")\s{0,}\(\s{0,}(" + '|'.join(user_input) + ").*)"
return pattern
| StarcoderdataPython |
3406927 | # -*- coding: utf-8 -*-
import furl
import urllib
import urlparse
import bson.objectid
import httplib as http
import itsdangerous
from werkzeug.local import LocalProxy
from weakref import WeakKeyDictionary
from flask import request, make_response
from framework.flask import redirect
from website import settings
from .model import Session
def add_key_to_url(url, scheme, key):
"""Redirects the user to the requests URL with the given key appended
to the query parameters.
"""
query = request.args.to_dict()
query['view_only'] = key
replacements = {'query': urllib.urlencode(query)}
if scheme:
replacements['scheme'] = scheme
parsed_url = urlparse.urlparse(url)
if parsed_url.fragment:
# Fragments should exists server side so this mean some one set up a # in the url
# WSGI sucks and auto unescapes it so we just shove it back into the path with the escaped hash
replacements['path'] = '{}%23{}'.format(parsed_url.path, parsed_url.fragment)
replacements['fragment'] = ''
parsed_redirect_url = parsed_url._replace(**replacements)
return urlparse.urlunparse(parsed_redirect_url)
def prepare_private_key():
"""`before_request` handler that checks the Referer header to see if the user
is requesting from a view-only link. If so, reappend the view-only key.
NOTE: In order to ensure the execution order of the before_request callbacks,
this is attached in website.app.init_app rather than using
@app.before_request.
"""
# Done if not GET request
if request.method != 'GET':
return
# Done if private_key in args
key_from_args = request.args.get('view_only', '')
if key_from_args:
return
# grab query key from previous request for not login user
if request.referrer:
referrer_parsed = urlparse.urlparse(request.referrer)
scheme = referrer_parsed.scheme
key = urlparse.parse_qs(
urlparse.urlparse(request.referrer).query
).get('view_only')
if key:
key = key[0]
else:
scheme = None
key = None
# Update URL and redirect
if key and not session.is_authenticated:
new_url = add_key_to_url(request.url, scheme, key)
return redirect(new_url, code=http.TEMPORARY_REDIRECT)
def get_session():
session = sessions.get(request._get_current_object())
if not session:
session = Session()
set_session(session)
return session
def set_session(session):
sessions[request._get_current_object()] = session
def create_session(response, data=None):
current_session = get_session()
if current_session:
current_session.data.update(data or {})
current_session.save()
cookie_value = itsdangerous.Signer(settings.SECRET_KEY).sign(current_session._id)
else:
session_id = str(bson.objectid.ObjectId())
session = Session(_id=session_id, data=data or {})
session.save()
cookie_value = itsdangerous.Signer(settings.SECRET_KEY).sign(session_id)
set_session(session)
if response is not None:
response.set_cookie(settings.COOKIE_NAME, value=cookie_value)
return response
sessions = WeakKeyDictionary()
session = LocalProxy(get_session)
# Request callbacks
# NOTE: This gets attached in website.app.init_app to ensure correct callback
# order
def before_request():
from framework import sentry
from framework.auth import cas
from framework.auth.core import User
from framework.auth import authenticate
from framework.routing import json_renderer
# Central Authentication Server Ticket Validation and Authentication
ticket = request.args.get('ticket')
if ticket:
service_url = furl.furl(request.url)
service_url.args.pop('ticket')
# Attempt autn wih CAS, and return a proper redirect response
return cas.make_response_from_ticket(ticket=ticket, service_url=service_url.url)
# Central Authentication Server OAuth Bearer Token
authorization = request.headers.get('Authorization')
if authorization and authorization.startswith('Bearer '):
client = cas.get_client()
try:
access_token = cas.parse_auth_header(authorization)
cas_resp = client.profile(access_token)
except cas.CasError as err:
sentry.log_exception()
# NOTE: We assume that the request is an AJAX request
return json_renderer(err)
if cas_resp.authenticated:
user = User.load(cas_resp.user)
return authenticate(user, access_token=access_token, response=None)
return make_response('', http.UNAUTHORIZED)
if request.authorization:
# TODO: Fix circular import
from framework.auth.core import get_user
user = get_user(
email=request.authorization.username,
password=<PASSWORD>
)
# Create empty session
# TODO: Shoudn't need to create a session for Basic Auth
session = Session()
if user:
session.data['auth_user_username'] = user.username
session.data['auth_user_id'] = user._primary_key
session.data['auth_user_fullname'] = user.fullname
else:
# Invalid key: Not found in database
session.data['auth_error_code'] = http.FORBIDDEN
set_session(session)
return
cookie = request.cookies.get(settings.COOKIE_NAME)
if cookie:
try:
session_id = itsdangerous.Signer(settings.SECRET_KEY).unsign(cookie)
session = Session.load(session_id) or Session(_id=session_id)
set_session(session)
return
except:
pass
def after_request(response):
if session.data.get('auth_user_id'):
session.save()
return response
| StarcoderdataPython |
5131927 | <reponame>WeDias/ksp_suicide_burn<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# KSP_SUICIDEBURN.py
# Github:@WeDias
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import krpc
from time import sleep
print('=' * 170)
print('''
888 d8P .d8888b. 8888888b. .d8888b. 888 888 8888888 .d8888b. 8888888 8888888b. 8888888888 888888b. 888 888 8888888b. 888b 888
888 d8P d88P Y88b 888 Y88b d88P Y88b 888 888 888 d88P Y88b 888 888 "Y88b 888 888 "88b 888 888 888 Y88b 8888b 888
888 d8P Y88b. 888 888 Y88b. 888 888 888 888 888 888 888 888 888 888 .88P 888 888 888 888 88888b 888
888d88K "Y888b. 888 d88P "Y888b. 888 888 888 888 888 888 888 8888888 8888888K. 888 888 888 d88P 888Y88b 888
8888888b "Y88b. 8888888P" "Y88b. 888 888 888 888 888 888 888 888 888 "Y88b 888 888 8888888P" 888 Y88b888
888 Y88b "888 888 "888 888 888 888 888 888 888 888 888 888 888 888 888 888 888 T88b 888 Y88888
888 Y88b Y88b d88P 888 Y88b d88P Y88b. .d88P 888 Y88b d88P 888 888 .d88P 888 888 d88P Y88b. .d88P 888 T88b 888 Y8888
888 Y88b "Y8888P" 888 "Y8888P" "Y88888P" 8888888 "Y8888P" 8888888 8888888P" 8888888888 8888888P" "Y88888P" 888 T88b 888 Y888
BY: WEDIAS
''')
print('=' * 170)
# Conecxao com o foguete
conn = krpc.connect()
vessel = conn.space_center.active_vessel
# Definindo as constantes
constante_gravitacional = 6.674184 * 10 ** -11
massa_planeta = vessel.orbit.body.mass
raio_planeta = vessel.orbit.body.equatorial_radius
nome_astro = vessel.orbit.body.name
tem_atmosfera = vessel.orbit.body.has_atmosphere
tamanho_atmosfera = 0
if tem_atmosfera:
tamanho_atmosfera = vessel.orbit.body.atmosphere_depth
try:
vessel.control.sas = True
vessel.control.rcs = True
sleep(0.1)
vessel.control.sas_mode = vessel.control.sas_mode.retrograde
print('NAVE COM SAS_RETROGRADE, PILOTO AUTOMATICO ATIVADO')
print('SAS = [ONLINE]\nRCS = [ONLINE]')
# Contagem regressiva de 10s
for c in range(10, 0, - 1):
print(f'\rCONTAGEM REGRESSIVA: {c}', end='')
sleep(1)
print('\rPILOTO AUTIOMATICO = [ONLINE]')
except:
print('NAVE SEM SAS_RETROGRADE, PILOTO SEMI-AUTOMATICO ATIVADO. PILOTO MANTENHA A NAVE NO RETROGRADE')
print('PILOTO AUTOMATICO = [OFFLINE]')
print('\rSUICIDEBURN = [ONLINE]')
# Instrucoes de pouso | Computador de bordo
gear = caindo = False
while True:
# Dados de telemetria
altura_mar = vessel.flight().mean_altitude
altura_chao = vessel.flight().surface_altitude
periastro = vessel.orbit.periapsis_altitude
vel_superficie = vessel.flight(vessel.orbit.body.reference_frame).speed
retrograde = vessel.flight().retrograde
aceleracao_gravidade = constante_gravitacional * massa_planeta / (raio_planeta + altura_mar) ** 2
twr = vessel.available_thrust / (vessel.mass * aceleracao_gravidade)
aceleracao = twr * aceleracao_gravidade
tempo_ate_chao = altura_chao / vel_superficie
tempo_de_queima = vel_superficie / aceleracao
situacao = vessel.situation.name
print(f'\rACELERACAO:{aceleracao:.2f}M/S^2 | ALTURA:{altura_chao:.2f}M | VELOCIDADE:{vel_superficie:.2f}M/S | TEMPOATECHAO:{int(tempo_ate_chao)}S | TEMPODEQUEIMA:{int(tempo_de_queima)}S | SITU:{situacao.upper()}', end='')
# Ativacao de paraquedas
if tem_atmosfera and altura_mar <= (tamanho_atmosfera / 4):
vessel.control.parachutes = True
# Pernas de pouso e luzes
if tempo_ate_chao <= 3 and not gear:
vessel.control.gear = True
vessel.control.lights = True
# Separacao de estagio
if vessel.available_thrust == 0 and vessel.control.current_stage != 0:
print(f'\rSEPARAÇÃO DE ESTÁGIO')
vessel.control.activate_next_stage()
# controle da pontencia dos motores
if not caindo and situacao in ['orbiting', 'sub_orbital', 'flying']:
if tem_atmosfera:
vessel.control.brakes = True
if periastro >= (tamanho_atmosfera - 10000):
vessel.control.throttle = 1
else:
vessel.control.throttle = 0
caindo = True
else:
if periastro <= 0:
sleep(1)
vessel.control.throttle = 0
caindo = True
else:
vessel.control.throttle = 1
elif tempo_ate_chao <= tempo_de_queima + 0.5:
if tem_atmosfera:
if tempo_ate_chao <= tempo_de_queima + 0.5 and altura_mar <= (tamanho_atmosfera / 4):
vessel.control.throttle = 1
else:
vessel.control.throttle = 0
else:
vessel.control.throttle = 1
# Pouso com sucesso !
else:
vessel.control.throttle = 0
if vessel.situation.name in ['landed', 'splashed']:
vessel.auto_pilot.engage()
vessel.auto_pilot.target_pitch_and_heading(90, 90)
print(f'\nA NAVE "{vessel.name.upper()}" POUSOU COM SUCESSO EM {nome_astro.upper()}')
if vessel.recoverable:
input('PRESSIONE ENTER PARA VOLTAR E RECUPERAR A NAVE')
vessel.recover()
else:
input('PRESSIONE ENTER PARA VOLTAR')
vessel.auto_pilot.disengage()
vessel.control.sas = True
break
| StarcoderdataPython |
1905291 | <filename>app/submission/throttles.py
from rest_framework.throttling import UserRateThrottle
class RunThrottle(UserRateThrottle):
scope = "run"
class RunRCThrottle(UserRateThrottle):
scope = "run_rc"
class SubmitThrottle(UserRateThrottle):
scope = "submit"
| StarcoderdataPython |
9767137 | <reponame>openprocurement/openregistry.concierge
# -*- coding: utf-8 -*-
from openregistry.concierge.utils import (
create_certain_condition,
create_filter_condition
)
def test_create_filter_condition():
lot_aliases = ['loki', 'anotherLoki']
handled_statuses = ['pending', 'verification']
expected_result = '(doc.lotType == "loki" || doc.lotType == "anotherLoki") && (doc.status == "pending" || doc.status == "verification")'
result = create_filter_condition(lot_aliases, handled_statuses)
assert result == expected_result
expected_result = '(doc.lotType == "loki" || doc.lotType == "anotherLoki")'
result = create_filter_condition(lot_aliases, [])
assert result == expected_result
expected_result = '(doc.status == "pending" || doc.status == "verification")'
result = create_filter_condition([], handled_statuses)
assert result == expected_result
expected_result = ''
result = create_filter_condition([], [])
assert result == expected_result
def test_create_certain_condition():
lot_aliases = ['loki', 'anotherLoki']
expected_result = '(variable == "loki" && variable == "anotherLoki")'
result = create_certain_condition('variable', lot_aliases, '&&')
assert result == expected_result
| StarcoderdataPython |
4997631 | import sys
import numpy as np
from keras.datasets import mnist, fashion_mnist
import fire
sys.path.append(".")
from numpynn.layers import *
from numpynn.activations import *
from numpynn.initializers import *
from numpynn.models import Model
from numpynn.optimizers import SGD
from numpynn.losses import *
from numpynn.regularizers import *
def data(scale=[0, 1]):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
n = 50000
x_train, x_val = x_train[:n], x_train[n:]
y_train, y_val = y_train[:n], y_train[n:]
x_train = preprocess(x_train, scale)
x_val = preprocess(x_val, scale)
x_test = preprocess(x_test, scale)
return (x_train, y_train), (x_val, y_val), (x_test, y_test)
def preprocess(x, scale):
return normalize(x, scale).reshape(x.shape[0], -1)
def normalize(x, scale):
a, b = scale
min_, max_ = 0, 255
return ((b - a) * (x - min_)) / (max_ - min_) + a
def sigmoid_mse():
inputs = Input(784)
x = Dense(
30,
activation=Sigmoid,
kernel_initializer=RandomNormal(),
bias_initializer=Zeros(),
)(inputs)
outputs = Dense(
10,
activation=Sigmoid,
kernel_initializer=RandomNormal(),
bias_initializer=Zeros(),
)(x)
model = Model(inputs=inputs, outputs=outputs)
cfg = {
"optimizer": SGD(lr=3.0, momentum=0.0),
"loss": MSE,
"scale": [0, 1],
}
return model, cfg
def sigmoid_crossentropy():
inputs = Input(784)
x = Dense(
30,
activation=Sigmoid,
kernel_initializer=RandomNormal(0, 1 / (784 ** 0.5)),
bias_initializer=Zeros(),
kernel_regularizer=L2(5e-5),
)(inputs)
x = Dense(
30,
activation=Sigmoid,
kernel_initializer=RandomNormal(0, 1 / (30 ** 0.5)),
bias_initializer=Zeros(),
kernel_regularizer=L2(5e-5),
)(x)
x = Dense(
30,
activation=Sigmoid,
kernel_initializer=RandomNormal(0, 1 / (30 ** 0.5)),
bias_initializer=Zeros(),
kernel_regularizer=L2(5e-5),
)(x)
outputs = Dense(
10,
activation=Sigmoid,
kernel_initializer=RandomNormal(0, 1 / (30 ** 0.5)),
bias_initializer=Zeros(),
kernel_regularizer=L2(5e-5),
)(x)
model = Model(inputs=inputs, outputs=outputs)
cfg = {
"optimizer": SGD(lr=0.1, momentum=0.2),
"loss": CrossEntropy,
"scale": [0, 1],
}
return model, cfg
def softmax_loglikelihood():
inputs = Input(784)
x = Dense(
30,
activation=Sigmoid,
kernel_initializer=RandomNormal(0, 1 / (784 ** 0.5)),
bias_initializer=Zeros(),
kernel_regularizer=L2(5e-5),
)(inputs)
# x = Dropout(0.5)(x)
outputs = Dense(
10,
activation=Softmax,
kernel_initializer=RandomNormal(0, 1 / (30 ** 0.5)),
bias_initializer=Zeros(),
kernel_regularizer=L2(5e-5),
)(x)
model = Model(inputs=inputs, outputs=outputs)
cfg = {
"optimizer": SGD(lr=0.5, momentum=0.2),
"loss": LogLikelihood,
"scale": [0, 1],
}
return model, cfg
def relu_mse():
inputs = Input(784)
x = Dense(
30,
activation=ReLU,
kernel_initializer=RandomNormal(0, 1 / (784 ** 0.5)),
bias_initializer=Zeros(),
kernel_regularizer=L2(5e-5),
)(inputs)
# x = Dropout(0.5)(x)
outputs = Dense(
10,
activation=ReLU,
kernel_initializer=RandomNormal(0, 1 / (30 ** 0.5)),
bias_initializer=Zeros(),
kernel_regularizer=L2(5e-5),
)(x)
model = Model(inputs=inputs, outputs=outputs)
cfg = {
"optimizer": SGD(lr=0.25, momentum=0.2),
"loss": MSE,
"scale": [0, 1],
}
return model, cfg
def train(model, cfg):
(x_train, y_train), (x_val, y_val), (x_test, y_test) = data(cfg["scale"])
model.compile(optimizer=cfg["optimizer"], loss=cfg["loss"], n_classes=10)
model.fit(x_train, y_train, batch_size=10, n_epochs=30, val_data=(x_val, y_val))
accuracy = model.evaluate(x_test, y_test)
print("Accuracy:", accuracy)
def small_train(model, cfg):
(x_train, y_train), (x_val, y_val), (x_test, y_test) = data(cfg["scale"])
n_train = 10000
n_val = 2000
x_train, y_train = x_train[:n_train], y_train[:n_train]
x_val, y_val = x_val[:n_val], y_val[:n_val]
model.compile(optimizer=cfg["optimizer"], loss=cfg["loss"], n_classes=10)
model.fit(x_train, y_train, batch_size=10, n_epochs=30, val_data=(x_val, y_val))
def main(model_nm="softmax_loglikelihood", action="train"):
model, cfg = globals()[model_nm]()
globals()[action](model, cfg)
if __name__ == "__main__":
fire.Fire(main)
| StarcoderdataPython |
5160821 | <gh_stars>1-10
#/usr/bin/env python
# vim:ts=4:sw=4:et:ff=unix
__doc__=\
'''Steam Workshop subscribed mods.
Format for SteamModList.txt:
* of:
lines of:
esp name
Mod name
Unknown integer (always 1?)
'''
import os.path
from files import DumbModCollection, Mod, DataFile
class SteamWorkshopMods(DumbModCollection):
SUBSCRIPTIONS_FILE = 'SteamModList.txt'
def __init__(self, game):
super(SteamWorkshopMods, self).__init__(game)
self.subscriptions_path = os.path.join(game.user_data_path, self.SUBSCRIPTIONS_FILE)
def parse_install(self):
for (esp, name, subs_flag) in self.read_subscriptions():
esp = os.path.join('Data', esp)
bsa = esp[:-4] + '.bsa'
# TODO Some of these fields don't make sense for Steam. What do we do with them?
mod = Mod(name, esp, None, '1.0', None)
self.add_mod(mod)
self.add_data_file(DataFile(esp, [mod]))
self.add_data_file(DataFile(bsa, [mod]))
def read_subscriptions(self):
'''Read subscriptions file and generate (esp_name, mod_name, subscription int).
'''
esp_name = mod_name = None
with open(self.subscriptions_path) as subs_fd:
for ln in subs_fd.readlines():
ln = ln.strip()
if esp_name is None:
esp_name = ln
elif mod_name is None:
mod_name = ln
else:
yield (esp_name, mod_name, int(ln))
esp_name = mod_name = None
if __name__ == '__main__':
pass
| StarcoderdataPython |
9715271 | from __future__ import absolute_import, unicode_literals
# The edit_handlers module extends Page with some additional attributes required by
# tuiuiuadmin (namely, base_form_class and get_edit_handler). Importing this within
# tuiuiuadmin.models ensures that this happens in advance of running tuiuiuadmin's
# system checks.
from tuiuiu.tuiuiuadmin import edit_handlers # NOQA
| StarcoderdataPython |
6669859 | <filename>__init__.py
from flask import Flask
from flask import jsonify
from flask import abort
from flask import make_response
from flask import request
from flask import url_for
from flask.ext.pymongo import PyMongo
import logging
from logging.handlers import RotatingFileHandler
import datetime
app = Flask(__name__)
handler = RotatingFileHandler('logs/blogservice.log', maxBytes=40960, backupCount=3)
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
log.addHandler(handler)
# connect to mongo with defaults
mongo = PyMongo(app)
debug = True
def epr(s):
app.logger.error(s)
if(debug):
print s
def dpr(s):
app.logger.debug(s)
if(debug):
print s
def make_public_blog(blog):
new_blog = {}
for field in blog:
if field == 'id':
new_blog['uri'] = url_for(
'get_blog',
blog_id=blog['id'],
_external=True
)
if field == '_id':
pass
else:
try:
new_blog[field] = blog[field]
except Exception as e:
epr("{} - {}".format(field, e))
return new_blog
@app.errorhandler(404)
def not_found(error):
epr("404 not found")
return make_response(jsonify({'error': 'not found'}), 404)
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'bad request'}), 400)
@app.errorhandler(409)
def duplicate_resource(error):
return make_response(jsonify({'error': 'duplicate resource id'}), 409)
@app.errorhandler(500)
def internal_server_error(error):
return make_response(jsonify({'error': 'internal server error'}), 500)
@app.route('/blog/api/v1.0/blogs', methods=['GET'])
def get_blogs():
blogs = []
cursor = mongo.db.blogs.find()
cursor.sort("created-date", 1)
for blog in cursor:
dpr("Found blog {}".format(blog))
blogs.append(blog)
cursor.close()
if len(blogs) != 0:
return jsonify({'blogs': [make_public_blog(blog) for blog in blogs]})
else:
abort(404)
@app.route('/blog/api/v1.0/blogs', methods=['POST'])
def create_blog():
if not request.json:
epr("Not request json")
abort(400)
if 'title' not in request.json or 'id' not in request.json or 'content'not in request.json or 'tags' not in request.json:
epr("Incomplete data")
abort(400)
blog = {
'id': request.json['id'],
'title': request.json['title'],
'content': request.json['content'],
'description': request.json.get('description', ""),
'tags': request.json.get('tags', ""),
'last-update': datetime.datetime.utcnow(),
'created-date': datetime.datetime.utcnow(),
'author': request.json.get('author', "")
}
cursor = mongo.db.blogs.find({'id': blog['id']}).limit(1)
if cursor.count() > 0:
epr("Duplicate blog id")
cursor.close()
abort(409)
cursor.close()
mongo.db.blogs.insert(blog)
dpr("Blog inserted")
return jsonify({'blog': make_public_blog(blog)}), 201
@app.route('/blog/api/v1.0/blog/<string:blog_id>', methods=['GET'])
def get_blog(blog_id):
cursor = mongo.db.blogs.find()
blog = [blog for blog in cursor if blog['id'] == blog_id]
cursor.close()
if len(blog) == 0:
epr("Could not find blog")
abort(404)
dpr("Found blog {}".format(blog))
return jsonify({'blog': make_public_blog(blog[0])})
@app.route('/blog/api/v1.0/blog/<string:blog_id>', methods=['PUT'])
def update_blog(blog_id):
cursor = mongo.db.blogs.find()
blog = [blog for blog in cursor if blog['id'] == blog_id]
cursor.close()
if len(blog) == 0:
epr("Could not find blog with id {}".format(blog_id))
abort(404)
else:
blog = blog[0]
if not request.json:
epr("Invalid json")
abort(400)
if 'title' in request.json and type(request.json['title']) != unicode:
epr("Did not provide title in request (or title not unicode)")
abort(400)
if 'description' in request.json and type(request.json['description']) != unicode:
epr("Did not provide description in request (or description not unicode)")
abort(400)
if 'content' in request.json and type(request.json['content']) != unicode:
epr("Did not provide content in request (or content not unicode)")
abort(400)
if 'tags' in request.json and type(request.json['tags']) != unicode:
epr("Did not provide tags in request (or tags not unicode)")
abort(400)
if 'author' in request.json and type(request.json['author']) != unicode:
epr("Did not provide author in request (or author not unicode)")
abort(400)
for fieldname in ['title', 'description', 'content', 'tags', 'author']:
blog[fieldname] = request.json.get(fieldname, blog[fieldname])
result = mongo.db.blogs.update_one(
{"id": blog_id},
{
"$set": {
"title": blog["title"],
"description": blog["description"],
"content": blog["content"],
"tags": blog["tags"],
"author": blog["author"],
"last-update": datetime.datetime.utcnow(),
}
}
)
dpr(result)
dpr("Updated blog id {}".format(blog_id))
return jsonify({'blog': make_public_blog(blog)})
@app.route('/blog/api/v1.0/blog/<string:blog_id>', methods=['DELETE'])
def delete_blog(blog_id):
blog = mongo.db.blogs.find_one_or_404({'id': blog_id})
if len(blog) == 0:
epr("Blog with id {} not found".format(blog_id))
abort(404)
mongo.db.blogs.delete_one({"id": blog_id})
dpr("Blog id {} deleted".format(blog_id))
return jsonify({'result': True})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=6541)
# app.run(debug=True,host='0.0.0.0',port=6541)
| StarcoderdataPython |
8140150 | from Player import Player
import numpy as np
class FuziyPlayer(Player):
def name(self):
return "Fuziy Player"
def max_value(self, board, action, alpha, beta, player_code, p):
if p == 0:
result = self.evaluate(player_code, board), action
return result
sucessors = self.sucessores(player_code, board)
for s in sucessors:
mv, ac = self.min_value(s['board'], s['action'], alpha, beta, player_code, p-1)
if (mv > alpha):
alpha = mv
action = ac
if (alpha >= beta):
return alpha, action
return alpha, action
def min_value(self, board, action, alpha, beta, player_code, p):
if p == 0:
result = self.evaluate(player_code, board), action
return result
sucessors = self.sucessores(player_code, board)
for s in sucessors:
mv, ac = self.max_value(s['board'], s['action'], alpha, beta, player_code, p-1)
if (mv < beta):
beta = mv
action = ac
if (beta <= alpha):
return beta, action
return beta, action
def move(self, player_code, board):
_, action = self.max_value(board, None, -999999, 999999, player_code, 5)
if (self.emergency(board, player_code)):
sucessores = self.sucessores(self.enemy(player_code), board)
for s in sucessores:
result = self.evaluate(self.enemy(player_code), s['board'])
if (result > 70000):
print("EMERGENCY")
return None, s['action']
near_lost, defence_position = self.next_move(self.enemy(player_code), board)
if near_lost:
print("BLOQUEIO APENAS")
return None, defence_position
near_win, win_position = self.next_move(player_code, board)
if near_win:
print("VITORIA APENAS")
return None, win_position
if action is None:
for i in range(6):
for j in range(7):
if board[i,j] == 0:
return None, j
return None, action
def sucessores(self, player_code, board):
sucessors = []
for i in range(0,7):
b = self.movement(player_code, board, i)
if(b is not None):
sucessors.append({'board':b, 'action':i})
return sucessors
def enemy(self, player):
if player == 1:
return 2
else:
return 1
def evaluate(self, player, board):
lines = self.count_row_line(player, board)
cols = self.count_row_column(player, board)
diags = self.count_row_diag(player, board)
diags2 = self.count_row_diag(player, board[::-1])
possible_path = lines['2'] + cols['2'] + diags['2'] + diags2['2']
near_to_win = lines['3'] + cols['3'] + diags['3'] + diags2['3']
almost_win = lines['4'] + cols['4'] + diags['4'] + diags2['4']
win = 100000*almost_win + 1000*near_to_win + possible_path
enemy = self.enemy(player)
enemy_lines = self.count_row_line(enemy, board)
enemy_cols = self.count_row_column(enemy, board)
enemy_digs = self.count_row_diag(enemy, board)
enemy_digs2 = self.count_row_diag(enemy, board[::-1])
possible_path_lost = enemy_lines['2'] + enemy_cols['2'] + enemy_digs['2'] + enemy_digs2['2']
near_to_lost = enemy_lines['3'] + enemy_cols['3'] + enemy_digs['3'] + enemy_digs2['3']
almost_lost = enemy_lines['4'] + enemy_cols['4'] + enemy_digs['4'] + enemy_digs2['4']
lost = 100000*almost_lost + 1000*near_to_lost + possible_path_lost
return (win - lost)
def count_row_line(self, player, board):
retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}
for i in range(6):
counter = 0
for j in range(6):
if ((board[i, j] == player) and (board[i, j] == board[i, j + 1])):
counter = counter + 1
else:
counter = 0
if (counter==1):
retorno['2'] = retorno['2'] + 1
if (counter==2):
retorno['3'] = retorno['3'] + 1
if (counter==3):
retorno['4'] = retorno['4'] + 1
return retorno
def count_row_column(self, player, board):
retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}
for i in range(7):
counter = 0
for j in range(5):
if ((board[j, i] == player) and (board[j,i] == board[j+1,i])):
counter = counter + 1
else:
counter = 0
if (counter==1):
retorno['2'] = retorno['2'] + 1
if (counter==2):
retorno['3'] = retorno['3'] + 1
if (counter==3):
retorno['4'] = retorno['4'] + 1
return retorno
def count_row_diag(self, player, board):
retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}
for k in range(-2,4):
counter = 0
x = np.diag(board, k=k)
for i in range(0,len(x)-1):
if ((x[i] == player) and (x[i] == x[i+1])):
counter = counter + 1
else:
counter = 0
if (counter==1):
retorno['2'] = retorno['2'] + 1
if (counter==2):
retorno['3'] = retorno['3'] + 1
if (counter==3):
retorno['4'] = retorno['4'] + 1
return retorno
def count_last_line(self, player, board):
counter = 0
for i in range(6):
if (board[5, i] == player):
counter = counter + 1
return counter
def emergency(self, board, player_code):
enemy = self.enemy(player_code)
enemy_lines = self.count_row_line(enemy, board)
enemy_cols = self.count_row_column(enemy, board)
enemy_digs = self.count_row_diag(enemy, board)
enemy_digs2 = self.count_row_diag(enemy, board[::-1])
if (enemy_cols['3'] > 0 or enemy_lines['3'] > 0 or enemy_digs['3'] > 0 or enemy_digs2['3']> 0):
return True
return False
def next_move(self, player, board):
next_position = 0
#horizontal
for i in range(6):
stay = 0
for j in range(6):
if i == 5:
if j == 3:
if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i, j-1] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
if ((board[i, j+3] == player) and (board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i, j] == player)):
stay += 1
next_position = j+1
return True, next_position
if ((board[i, j+3] == player) and (board[i, j+2] == 0) and (board[i, j+1] == player) and (board[i, j] == player)):
stay += 1
next_position = j+2
return True, next_position
if j == 4:
if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i, j-1] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
if ((board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i, j] == player) and (board[i, j-1] == player)):
stay += 1
next_position = j+1
return True, next_position
if j >= 5:
if ((board[i, j-1] == 0) and (board[i, j-2] == player) and (board[i, j-3] == player) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-1] == player) and (board[i, j-2] == 0) and (board[i, j-3] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
else:
if j == 3:
if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-1] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
if ((board[i, j+3] == player) and (board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i+1, j+1] != 0) and (board[i, j] == player)):
stay += 1
next_position = j+1
return True, next_position
if ((board[i, j+3] == player) and (board[i, j+2] == 0) and (board[i+1, j+2] != 0) and (board[i, j+1] == player) and (board[i, j] == player)):
stay += 1
next_position = j+2
return True, next_position
if j == 4:
if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-1] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
if ((board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i+1, j+1] != 0) and (board[i, j] == player) and (board[i, j-1] == player)):
stay += 1
next_position = j+1
return True, next_position
if j >= 5:
if ((board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j-2] == player) and (board[i, j-3] == player) and (board[i, j] == player)):
stay += 1
next_position = j-1
return True, next_position
if ((board[i, j-1] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-3] == player) and (board[i, j] == player)):
stay += 1
next_position = j-2
return True, next_position
#vertical
for i in range(7):
end = 0
for j in range(5):
if ((board[j, i] == player) and (board[j+1,i] == player)):
end += 1
else:
end = 0
if (end >= 2):
if j >= 2:
if board[j-2,i] == 0:
next_position = i
return True, next_position
return False, next_position
def movement(self, player, board, column):
result_board = np.matrix(board)
for i in range(5,-2,-1):
if (board[i,column] == 0):
break
if(i < 0):
return None
result_board[i, column] = player
return result_board
| StarcoderdataPython |
4836056 |
class PathValidator(object):
"""Helper class to test if the path is valid for the query and grid."""
@staticmethod
def is_valid_path(grid, query, path):
if path[0] != query[0] or path[-1] != query[1]:
return False
for i in xrange(1, len(path)):
if (not PathValidator.is_valid_transition(path[i - 1], path[i]) or
not PathValidator.is_valid_waypoint(path[i], grid)):
return False
return True
@staticmethod
def is_valid_waypoint(wp, grid):
return 0 <= wp.x < grid.shape[0] and 0 <= wp.y < grid.shape[1] and not grid[wp.x, wp.y]
@staticmethod
def is_valid_transition(wp_a, wp_b):
change_or = (wp_b.orientation - wp_a.orientation + 4) % 4
change_xy = (wp_b.x - wp_a.x, wp_b.y - wp_a.y)
vertical = {
0: [(0, 1), (0, -1)],
1: [(-1, -1), (1, 1)],
2: [],
3: [(-1, 1), (1, -1)]
}
horizontal = {
0: [(1, 0), (-1, 0)],
1: [(-1, 1), (1, -1)],
2: [],
3: [(-1, -1), (1, 1)]
}
return change_xy in (vertical[change_or] if wp_a.orientation % 2 == 0 else horizontal[change_or])
| StarcoderdataPython |
5188967 | import rospy
from geometry_msgs.msg import Twist
from keyboard.msg import Key
vel = Twist()
def callback(command):
if command.code == 97:
print 'yes w'
vel.linear.x = 5
elif command.code =='s':
vel.linear.x = 0
elif command.code == 'a':
vel.linear.y = 5
elif command.code == 'd':
vel.linear.y = 0
print 'vel:',vel
def main():
rospy.init_node('Polaris_control_node',anonymous=True)
rospy.Subscriber('/keyboard/keydown',Key,callback)
vel_pub = rospy.Publisher('/polaris/cmd_vel',Twist,queue_size=10)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
vel_pub.publish(vel)
rate.sleep()
if __name__ == '__main__':
main() | StarcoderdataPython |
6611865 | # Generated by Django 2.0.5 on 2018-06-14 10:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20180614_1815'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='description',
field=models.CharField(default='', max_length=500),
),
migrations.AlterField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, default='enigma/media/profile_image/80ac7d053444b25f4de44baf53ca21e7_2.jpg', upload_to='profile_image'),
),
]
| StarcoderdataPython |
6612226 | <gh_stars>10-100
#
# Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime as dt
import io
import os
import random
from fdk import constants
from fdk import headers as hs
from fdk import log
from collections import namedtuple
class InvokeContext(object):
def __init__(self, app_id, app_name, fn_id, fn_name, call_id,
content_type="application/octet-stream",
deadline=None, config=None,
headers=None, request_url=None,
method="POST", fn_format=None,
tracing_context=None):
"""
Request context here to be a placeholder
for request-specific attributes
:param app_id: Fn App ID
:type app_id: str
:param app_name: Fn App name
:type app_name: str
:param fn_id: Fn App Fn ID
:type fn_id: str
:param fn_name: Fn name
:type fn_name: str
:param call_id: Fn call ID
:type call_id: str
:param content_type: request content type
:type content_type: str
:param deadline: request deadline
:type deadline: str
:param config: an app/fn config
:type config: dict
:param headers: request headers
:type headers: dict
:param request_url: request URL
:type request_url: str
:param method: request method
:type method: str
:param fn_format: function format
:type fn_format: str
:param tracing_context: tracing context
:type tracing_context: TracingContext
"""
self.__app_id = app_id
self.__fn_id = fn_id
self.__call_id = call_id
self.__config = config if config else {}
self.__headers = headers if headers else {}
self.__http_headers = {}
self.__deadline = deadline
self.__content_type = content_type
self._request_url = request_url
self._method = method
self.__response_headers = {}
self.__fn_format = fn_format
self.__app_name = app_name
self.__fn_name = fn_name
self.__tracing_context = tracing_context if tracing_context else None
log.log("request headers. gateway: {0} {1}"
.format(self.__is_gateway(), headers))
if self.__is_gateway():
self.__headers = hs.decap_headers(headers, True)
self.__http_headers = hs.decap_headers(headers, False)
def AppID(self):
return self.__app_id
def AppName(self):
return self.__app_name
def FnID(self):
return self.__fn_id
def FnName(self):
return self.__fn_name
def CallID(self):
return self.__call_id
def Config(self):
return self.__config
def Headers(self):
return self.__headers
def HTTPHeaders(self):
return self.__http_headers
def Format(self):
return self.__fn_format
def TracingContext(self):
return self.__tracing_context
def Deadline(self):
if self.__deadline is None:
now = dt.datetime.now(dt.timezone.utc).astimezone()
now += dt.timedelta(0, float(constants.DEFAULT_DEADLINE))
return now.isoformat()
return self.__deadline
def SetResponseHeaders(self, headers, status_code):
log.log("setting headers. gateway: {0}".format(self.__is_gateway()))
if self.__is_gateway():
headers = hs.encap_headers(headers, status=status_code)
for k, v in headers.items():
self.__response_headers[k.lower()] = v
def GetResponseHeaders(self):
return self.__response_headers
def RequestURL(self):
return self._request_url
def Method(self):
return self._method
def __is_gateway(self):
return (constants.FN_INTENT in self.__headers
and self.__headers.get(constants.FN_INTENT)
== constants.INTENT_HTTP_REQUEST)
class TracingContext(object):
def __init__(self, is_tracing_enabled, trace_collector_url,
trace_id, span_id, parent_span_id,
is_sampled, flags):
"""
Tracing context here to be a placeholder
for tracing-specific attributes
:param is_tracing_enabled: tracing enabled flag
:type is_tracing_enabled: bool
:param trace_collector_url: APM Trace Collector Endpoint URL
:type trace_collector_url: str
:param trace_id: Trace ID
:type trace_id: str
:param span_id: Span ID
:type span_id: str
:param parent_span_id: Parent Span ID
:type parent_span_id: str
:param is_sampled: Boolean for emmitting spans
:type is_sampled: int (0 or 1)
:param flags: Debug flags
:type flags: int (0 or 1)
"""
self.__is_tracing_enabled = is_tracing_enabled
self.__trace_collector_url = trace_collector_url
self.__trace_id = trace_id
self.__span_id = span_id
self.__parent_span_id = parent_span_id
self.__is_sampled = is_sampled
self.__flags = flags
self.__app_name = os.environ.get(constants.FN_APP_NAME)
self.__app_id = os.environ.get(constants.FN_APP_ID)
self.__fn_name = os.environ.get(constants.FN_NAME)
self.__fn_id = os.environ.get(constants.FN_ID)
self.__zipkin_attrs = self.__create_zipkin_attrs(is_tracing_enabled)
def is_tracing_enabled(self):
return self.__is_tracing_enabled
def trace_collector_url(self):
return self.__trace_collector_url
def trace_id(self):
return self.__trace_id
def span_id(self):
return self.__span_id
def parent_span_id(self):
return self.__parent_span_id
def is_sampled(self):
return bool(self.__is_sampled)
def flags(self):
return self.__flags
def zipkin_attrs(self):
return self.__zipkin_attrs
# this is a helper method specific for py_zipkin
def __create_zipkin_attrs(self, is_tracing_enabled):
ZipkinAttrs = namedtuple(
"ZipkinAttrs",
"trace_id, span_id, parent_span_id, is_sampled, flags"
)
trace_id = self.__trace_id
span_id = self.__span_id
parent_span_id = self.__parent_span_id
is_sampled = bool(self.__is_sampled)
trace_flags = self.__flags
# As the fnLb sends the parent_span_id as the span_id
# assign the parent span id as the span id.
if is_tracing_enabled:
parent_span_id = span_id
span_id = generate_id()
zipkin_attrs = ZipkinAttrs(
trace_id,
span_id,
parent_span_id,
is_sampled,
trace_flags
)
return zipkin_attrs
def service_name(self, override=None):
# in case of missing app and function name env variables
service_name = (
override
if override is not None
else str(self.__app_name) + "::" + str(self.__fn_name)
)
return service_name.lower()
def annotations(self):
annotations = {
"generatedBy": "faas",
"appName": self.__app_name,
"appID": self.__app_id,
"fnName": self.__fn_name,
"fnID": self.__fn_id,
}
return annotations
def generate_id():
return "{:016x}".format(random.getrandbits(64))
def context_from_format(format_def: str, **kwargs) -> (
InvokeContext, io.BytesIO):
"""
Creates a context from request
:param format_def: function format
:type format_def: str
:param kwargs: request-specific map of parameters
:return: invoke context and data
:rtype: tuple
"""
app_id = os.environ.get(constants.FN_APP_ID)
fn_id = os.environ.get(constants.FN_ID)
app_name = os.environ.get(constants.FN_APP_NAME)
fn_name = os.environ.get(constants.FN_NAME)
# the tracing enabled env variable is passed as a "0" or "1" string
# and therefore needs to be converted appropriately.
is_tracing_enabled = os.environ.get(constants.OCI_TRACING_ENABLED)
is_tracing_enabled = (
bool(int(is_tracing_enabled))
if is_tracing_enabled is not None
else False
)
trace_collector_url = os.environ.get(constants.OCI_TRACE_COLLECTOR_URL)
if format_def == constants.HTTPSTREAM:
data = kwargs.get("data")
headers = kwargs.get("headers")
# zipkin tracing http headers
trace_id = span_id = parent_span_id = is_sampled = trace_flags = None
tracing_context = None
if is_tracing_enabled:
# we generate the trace_id if tracing is enabled
# but the traceId zipkin header is missing.
trace_id = headers.get(constants.X_B3_TRACEID)
trace_id = generate_id() if trace_id is None else trace_id
span_id = headers.get(constants.X_B3_SPANID)
parent_span_id = headers.get(constants.X_B3_PARENTSPANID)
# span_id is also generated if the zipkin header is missing.
span_id = generate_id() if span_id is None else span_id
# is_sampled should be a boolean in the form of a "0/1" but
# legacy samples have them as "False/True"
is_sampled = headers.get(constants.X_B3_SAMPLED)
is_sampled = int(is_sampled) if is_sampled is not None else 1
# not currently used but is defined by the zipkin headers standard
trace_flags = headers.get(constants.X_B3_FLAGS)
# tracing context will be an empty object
# if tracing is not enabled or the flag is missing.
# this prevents the customer code from failing if they decide to
# disable tracing. An empty tracing context will not
# emit spans due to is_sampled being None.
tracing_context = TracingContext(
is_tracing_enabled,
trace_collector_url,
trace_id,
span_id,
parent_span_id,
is_sampled,
trace_flags
)
method = headers.get(constants.FN_HTTP_METHOD)
request_url = headers.get(constants.FN_HTTP_REQUEST_URL)
deadline = headers.get(constants.FN_DEADLINE)
call_id = headers.get(constants.FN_CALL_ID)
content_type = headers.get(constants.CONTENT_TYPE)
ctx = InvokeContext(
app_id, app_name, fn_id, fn_name, call_id,
content_type=content_type,
deadline=deadline,
config=os.environ,
headers=headers,
method=method,
request_url=request_url,
fn_format=constants.HTTPSTREAM,
tracing_context=tracing_context,
)
return ctx, data
| StarcoderdataPython |
1872363 | <reponame>gabykyei/GC_BlockChain_T_Rec
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import subprocess
from setuptools import setup, find_packages
<<<<<<< HEAD
setup(
name='sawtooth-signing',
version=subprocess.check_output(
['../bin/get_version']).decode('utf-8').strip(),
description='Sawtooth Signing Library',
author='<NAME>',
url='https://github.com/hyperledger/sawtooth-core',
=======
def bump_version(version):
(major, minor, patch) = version.split('.')
patch = str(int(patch) + 1)
return ".".join([major, minor, patch])
def auto_version(default, strict):
output = subprocess.check_output(['git', 'describe', '--dirty'])
parts = output.decode('utf-8').strip().split('-', 1)
parts[0] = parts[0][1:] # strip the leading 'v'
if len(parts) == 2:
parts[0] = bump_version(parts[0])
if default != parts[0]:
msg = "setup.py and (bumped?) git describe versions differ: " \
"{} != {}".format(default, parts[0])
if strict:
print >> sys.stderr, "ERROR: " + msg
sys.exit(1)
else:
print >> sys.stderr, "WARNING: " + msg
print >> sys.stderr, "WARNING: using setup.py version {}".format(
default)
parts[0] = default
if len(parts) == 2:
return "-git".join([parts[0], parts[1].replace("-", ".")])
else:
return parts[0]
def version(default):
if 'VERSION' in os.environ:
if os.environ['VERSION'] == 'AUTO_STRICT':
version = auto_version(default, strict=True)
elif os.environ['VERSION'] == 'AUTO':
version = auto_version(default, strict=False)
else:
version = os.environ['VERSION']
else:
version = default + "-dev1"
return version
if os.name == 'nt':
extra_compile_args = ['/EHsc']
libraries = ['json-c', 'cryptopp-static']
include_dirs = ['deps/include', 'deps/include/cryptopp']
library_dirs = ['deps/lib']
elif sys.platform == 'darwin':
os.environ["CC"] = "clang++"
extra_compile_args = ['-std=c++11']
libraries = ['json-c', 'cryptopp']
include_dirs = ['/usr/local/include']
library_dirs = ['/usr/local/lib']
else:
extra_compile_args = ['-std=c++11']
libraries = ['json-c', 'cryptopp']
include_dirs = []
library_dirs = []
ecdsamod = Extension('_ECDSARecoverModule',
['sawtooth_signing/ECDSA/ECDSARecoverModule.i',
'sawtooth_signing/ECDSA/ECDSARecover.cc'],
swig_opts=['-c++'],
extra_compile_args=extra_compile_args,
include_dirs=include_dirs,
libraries=libraries,
library_dirs=library_dirs)
setup(
name='sawtooth-signing',
version=version('0.7.2'),
description='Validator service for Sawtooth Lake distributed ledger from ',
author='<NAME>, Intel Labs',
url='http://www.intel.com',
>>>>>>> 0-7
packages=find_packages(),
install_requires=[
"secp256k1",
],
data_files=[],
entry_points={})
| StarcoderdataPython |
12805279 | <filename>cantools/database/utils.py
# Utility functions.
import binascii
from decimal import Decimal
from collections import namedtuple
from cantools.database.can.signal import NamedSignalValue
try:
import bitstruct.c
except ImportError:
import bitstruct
Formats = namedtuple('Formats',
[
'big_endian',
'little_endian',
'padding_mask'
])
def format_or(items):
items = [str(item) for item in items]
if len(items) == 1:
return items[0]
else:
return '{} or {}'.format(', '.join(items[:-1]),
items[-1])
def format_and(items):
items = [str(item) for item in items]
if len(items) == 1:
return items[0]
else:
return '{} and {}'.format(', '.join(items[:-1]),
items[-1])
def start_bit(data):
if data.byte_order == 'big_endian':
return (8 * (data.start // 8) + (7 - (data.start % 8)))
else:
return data.start
def _encode_field(field, data, scaling):
value = data[field.name]
if isinstance(value, str):
return field.choice_string_to_number(value)
elif isinstance(value, NamedSignalValue):
return field.choice_string_to_number(str(value))
elif scaling:
if field.is_float:
return (value - field.offset) / field.scale
else:
value = (Decimal(value) - Decimal(field.offset)) / Decimal(field.scale)
return int(value.to_integral())
else:
return value
def _decode_field(field, value, decode_choices, scaling):
if decode_choices:
try:
return field.choices[value]
except (KeyError, TypeError):
pass
if scaling:
is_int = \
lambda x: isinstance(x, int) or (isinstance(x, float) and x.is_integer())
if field.is_float \
or not is_int(field.scale) \
or not is_int(field.offset):
return (field.scale * value + field.offset)
else:
return int(field.scale * value + field.offset)
else:
return value
def encode_data(data, fields, formats, scaling):
if len(fields) == 0:
return 0
unpacked = {
field.name: _encode_field(field, data, scaling)
for field in fields
}
big_packed = formats.big_endian.pack(unpacked)
little_packed = formats.little_endian.pack(unpacked)[::-1]
packed_union = int(binascii.hexlify(big_packed), 16)
packed_union |= int(binascii.hexlify(little_packed), 16)
return packed_union
def decode_data(data, fields, formats, decode_choices, scaling):
unpacked = formats.big_endian.unpack(bytes(data))
unpacked.update(formats.little_endian.unpack(bytes(data[::-1])))
return {
field.name: _decode_field(field,
unpacked[field.name],
decode_choices,
scaling)
for field in fields
}
def create_encode_decode_formats(datas, number_of_bytes):
format_length = (8 * number_of_bytes)
def get_format_string_type(data):
if data.is_float:
return 'f'
elif data.is_signed:
return 's'
else:
return 'u'
def padding_item(length):
fmt = 'p{}'.format(length)
padding_mask = '1' * length
return fmt, padding_mask, None
def data_item(data):
fmt = '{}{}'.format(get_format_string_type(data),
data.length)
padding_mask = '0' * data.length
return fmt, padding_mask, data.name
def fmt(items):
return ''.join([item[0] for item in items])
def names(items):
return [item[2] for item in items if item[2] is not None]
def padding_mask(items):
try:
return int(''.join([item[1] for item in items]), 2)
except ValueError:
return 0
def create_big():
items = []
start = 0
# Select BE fields
be_datas = [data for data in datas if data.byte_order == "big_endian"]
# Ensure BE fields are sorted in network order
sorted_datas = sorted(be_datas, key = lambda data: sawtooth_to_network_bitnum(data.start))
for data in sorted_datas:
padding_length = (start_bit(data) - start)
if padding_length > 0:
items.append(padding_item(padding_length))
items.append(data_item(data))
start = (start_bit(data) + data.length)
if start < format_length:
length = format_length - start
items.append(padding_item(length))
return fmt(items), padding_mask(items), names(items)
def create_little():
items = []
end = format_length
for data in datas[::-1]:
if data.byte_order == 'big_endian':
continue
padding_length = end - (data.start + data.length)
if padding_length > 0:
items.append(padding_item(padding_length))
items.append(data_item(data))
end = data.start
if end > 0:
items.append(padding_item(end))
value = padding_mask(items)
if format_length > 0:
length = len(''.join([item[1] for item in items]))
value = bitstruct.pack('u{}'.format(length), value)
value = int(binascii.hexlify(value[::-1]), 16)
return fmt(items), value, names(items)
big_fmt, big_padding_mask, big_names = create_big()
little_fmt, little_padding_mask, little_names = create_little()
try:
big_compiled = bitstruct.c.compile(big_fmt, big_names)
except Exception as e:
big_compiled = bitstruct.compile(big_fmt, big_names)
try:
little_compiled = bitstruct.c.compile(little_fmt, little_names)
except Exception as e:
little_compiled = bitstruct.compile(little_fmt, little_names)
return Formats(big_compiled,
little_compiled,
big_padding_mask & little_padding_mask)
def sawtooth_to_network_bitnum(sawtooth_bitnum):
'''Convert SawTooth bit number to Network bit number
Byte | 0 | 1 |
Sawtooth |7 ... 0|15... 8|
Network |0 ... 7|8 ...15|
'''
return (8 * (sawtooth_bitnum // 8)) + (7 - (sawtooth_bitnum % 8))
def cdd_offset_to_dbc_start_bit(cdd_offset, bit_length, byte_order):
'''Convert CDD/c-style field bit offset to DBC field start bit convention.
BigEndian (BE) fields are located by their MSBit's sawtooth index.
LitteleEndian (LE) fields located by their LSBit's sawtooth index.
'''
if byte_order == "big_endian":
# Note: Allow for BE fields that are smaller or larger than 8 bits.
return (8 * (cdd_offset // 8)) + min(7, (cdd_offset % 8) + bit_length - 1)
else:
return cdd_offset
| StarcoderdataPython |
9711415 | # https://www.oreilly.com/library/view/programming-computer-vision/9781449341916/ch04.html
import numpy as np
import cv2
import glob
''' Dimensions of the chessboard '''
chessboard_pattern = (8,10)
chessboard_internal_pattern = (6,8)
chessboard_pattern_size_mm = 34
''' Path to the image to undistort '''
distorted_image_1 = './images/img_014.jpg'
''' Defining the world coordinates for 3D points. Object points are (0,0,0), (1,0,0), (2,0,0), ..., (6,8,0) '''
objp = np.zeros((chessboard_internal_pattern[0] * chessboard_internal_pattern[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:chessboard_internal_pattern[0], 0:chessboard_internal_pattern[1]].T.reshape(-1, 2)
''' Scaling the object points by the pattern size '''
# objp = objp * chessboard_pattern_size_mm
''' Arrays to store object points and image points from all the images '''
objpoints = [] # 3d points for chessboard images (world coordinate frame)
imgpoints = [] # 2d points for chessboard images (camera coordinate frame)
''' Path of chessboard images used for caliberation '''
image_list = glob.glob('./images/*.jpg')
''' Termination Criteria '''
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
for image in image_list:
img = cv2.imread(image)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, chessboard_internal_pattern, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
# If corners are found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
# Refine pixel coordinates for given 2d points
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners2)
# Draw and display the chessboard corners
cv2.drawChessboardCorners(img, chessboard_internal_pattern, corners2, ret)
cv2.imshow('img', img)
if image == distorted_image_1:
cv2.imwrite('outputs/chess.png', img)
cv2.waitKey(1)
cv2.destroyAllWindows()
########## CAMERA CALIBRATION #####################
ret, cameraMatrix, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
print('Camera Calibrated: ', ret)
print("Camera matrix : \n", cameraMatrix)
print("Distortion coefficient: \n", dist)
# print("Rotation Vectors: \n", rvecs)
# print("Translation Vectors: \n", tvecs)
img = cv2.imread(distorted_image_1)
cv2.imwrite('outputs/original.png', img)
h, w = img.shape[:2]
newCameraMatrix, roi = cv2.getOptimalNewCameraMatrix(cameraMatrix, dist, (w,h), 1, (w,h))
##################### Undistort Image ####################
''' Sample 1'''
dst = cv2.undistort(img, cameraMatrix, dist, None, newCameraMatrix)
x, y, w, h = roi
# dst = dst[y:y+h, x:x+w]
cv2.imwrite('outputs/undistorted_calibresult.png', dst)
##################### Undistort Image with Remapping ####################
mapx, mapy = cv2.initUndistortRectifyMap(cameraMatrix, dist, None, newCameraMatrix, (w,h), 5)
dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv2.imwrite('outputs/undistorted_calibresult_mapping.png', dst)
##################### Reprojection error ##################
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], cameraMatrix, dist)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
mean_error += error
print( "total error: {}".format(mean_error/len(objpoints)) )
| StarcoderdataPython |
4889835 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 27 10:23:27 2021
@author: denis
"""
import os
from Artificial_Image_Simulator import Artificial_Image_Simulator
dic = {
"em_mode": 0,
"em_gain": 1,
"preamp": 1,
"hss": 1,
"binn": 1,
"t_exp": 1,
"ccd_temp": -70,
"image_size": 200,
}
ais = Artificial_Image_Simulator(
ccd_operation_mode=dic,
channel=1,
gaussian_std=8,
star_coordinates=[512, 512],
bias_level=500,
sparc4_operation_mode="phot",
image_dir=os.path.join("..", "FITS"),
star_wavelength_interval=(350, 1150, 50),
star_temperature=5700,
)
ais.apply_atmosphere_spectral_response()
ais.apply_telescope_spectral_response()
ais.apply_sparc4_spectral_response()
ais.create_random_image(1)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.