index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
993,300 | 489e23507577d8d270a22634981390e4c82f8f3b | # -*- coding: utf-8 -*-
# @Time : 2021-08-02 17:13
# @Author : zxl
# @FileName: 004_6.py
class Solution:
def findKthNum(self,nums1,nums2,k):
if len(nums1) == 0:
return nums2[k-1]
if len(nums2) == 0:
return nums1[k-1]
i1 = 0
j1 = len(nums1)-1
i2 = 0
j2 = len(nums2)-1
while i1<=j1 and i2<=j2:
m1 = (i1+j1)//2
m2 = (i2+j2)//2
if m1-i1+m2-i2+2<=k:
if nums1[m1]<nums2[m2]:
k -= (m1 - i1 + 1)
i1=m1+1
else:
k -= (m2 - i2 + 1)
i2 = m2+1
else:
if nums1[m1]>nums2[m2]:
j1 = m1-1
else:
j2 = m2-1
if i1>j1:
return nums2[i2+k-1]
if i2>j2:
return nums1[i1+k-1]
def findMedianSortedArrays(self, nums1 , nums2 ) -> float:
m = len(nums1)
n = len(nums2)
if (m+n)%2 == 1:
k = (m+n)//2+1
num = self.findKthNum(nums1,nums2,k)
return num
else:
k1 = (m+n)//2
k2 = k1+1
num1 = self.findKthNum(nums1,nums2,k1)
num2 = self.findKthNum(nums1,nums2,k2)
return (num1+num2)/2
obj = Solution()
nums1 = [ 2 ]
nums2 = [ ]
ans = obj.findMedianSortedArrays(nums1,nums2)
print(ans) |
993,301 | c55ab49cd5041e8f7e5ecc6055bcdf06396e3b94 | import re
states = "Mississippi Alabama Texas Massachusetts Kansas"
statesArr = states.split()
statesList = list()
for val in statesArr:
if(re.search('xas$',val)):
statesList.append(val)
for val in statesArr:
if(re.search('^K.*s$',val,re.I)):
statesList.append(val)
for val in statesArr:
if(re.search('^M.*s$',val)):
statesList.append(val)
for val in statesArr:
if(re.search('a$',val)):
statesList.append(val)
for val in statesList:
print(val)
print(states)
|
993,302 | 7db82b35ce2559f4bf9a461ea58b2d04778ddb38 | # -*- coding: utf-8 -*-
"""
The generator class and related utility functions.
"""
from commando.util import getLoggerWithNullHandler
from fswrap import File, Folder
from hyde.exceptions import HydeException
from hyde.model import Context, Dependents
from hyde.plugin import Plugin
from hyde.template import Template
from hyde.site import Resource
from contextlib import contextmanager
from datetime import datetime
from shutil import copymode
import sys
logger = getLoggerWithNullHandler('hyde.engine')
class Generator(object):
"""
Generates output from a node or resource.
"""
def __init__(self, site):
super(Generator, self).__init__()
self.site = site
self.generated_once = False
self.deps = Dependents(site.sitepath)
self.waiting_deps = {}
self.create_context()
self.template = None
Plugin.load_all(site)
self.events = Plugin.get_proxy(self.site)
def create_context(self):
site = self.site
self.__context__ = dict(site=site)
if hasattr(site.config, 'context'):
site.context = Context.load(site.sitepath, site.config.context)
self.__context__.update(site.context)
@contextmanager
def context_for_resource(self, resource):
"""
Context manager that intializes the context for a given
resource and rolls it back after the resource is processed.
"""
self.__context__.update(
resource=resource,
node=resource.node,
time_now=datetime.now())
yield self.__context__
self.__context__.update(resource=None, node=None)
def context_for_path(self, path):
resource = self.site.resource_from_path(path)
if not resource:
return {}
ctx = self.__context__.copy
ctx.resource = resource
return ctx
def load_template_if_needed(self):
"""
Loads and configures the template environment from the site
configuration if it's not done already.
"""
class GeneratorProxy(object):
"""
An interface to templates and plugins for
providing restricted access to the methods.
"""
def __init__(self, preprocessor=None, postprocessor=None,
context_for_path=None):
self.preprocessor = preprocessor
self.postprocessor = postprocessor
self.context_for_path = context_for_path
if not self.template:
logger.info("Generating site at [%s]" % self.site.sitepath)
self.template = Template.find_template(self.site)
logger.debug("Using [%s] as the template",
self.template.__class__.__name__)
logger.info("Configuring the template environment")
preprocessor = self.events.begin_text_resource
postprocessor = self.events.text_resource_complete
proxy = GeneratorProxy(context_for_path=self.context_for_path,
preprocessor=preprocessor,
postprocessor=postprocessor)
self.template.configure(self.site,
engine=proxy)
self.events.template_loaded(self.template)
def initialize(self):
"""
Start Generation. Perform setup tasks and inform plugins.
"""
logger.debug("Begin Generation")
self.events.begin_generation()
def load_site_if_needed(self):
"""
Checks if the site requires a reload and loads if
necessary.
"""
self.site.reload_if_needed()
def finalize(self):
"""
Generation complete. Inform plugins and cleanup.
"""
logger.debug("Generation Complete")
self.events.generation_complete()
def get_dependencies(self, resource):
"""
Gets the dependencies for a given resource.
"""
rel_path = resource.relative_path
deps = self.deps[rel_path] if rel_path in self.deps \
else self.update_deps(resource)
return deps
def update_deps(self, resource):
"""
Updates the dependencies for the given resource.
"""
if not resource.source_file.is_text:
return []
rel_path = resource.relative_path
self.waiting_deps[rel_path] = []
deps = []
if hasattr(resource, 'depends'):
user_deps = resource.depends
for dep in user_deps:
deps.append(dep)
dep_res = self.site.content.resource_from_relative_path(dep)
if dep_res:
if dep_res.relative_path in self.waiting_deps.keys():
self.waiting_deps[
dep_res.relative_path].append(rel_path)
else:
deps.extend(self.get_dependencies(dep_res))
if resource.uses_template and not resource.simple_copy:
deps.extend(self.template.get_dependencies(rel_path))
deps = list(set(deps))
if None in deps:
deps.remove(None)
self.deps[rel_path] = deps
for path in self.waiting_deps[rel_path]:
self.deps[path].extend(deps)
return deps
def has_resource_changed(self, resource):
"""
Checks if the given resource has changed since the
last generation.
"""
logger.debug("Checking for changes in %s" % resource)
self.load_template_if_needed()
self.load_site_if_needed()
target = File(self.site.config.deploy_root_path.child(
resource.relative_deploy_path))
if not target.exists or target.older_than(resource.source_file):
logger.debug("Found changes in %s" % resource)
return True
if resource.source_file.is_binary:
logger.debug("No Changes found in %s" % resource)
return False
if self.site.config.needs_refresh() or \
not target.has_changed_since(self.site.config.last_modified):
logger.debug("Site configuration changed")
return True
deps = self.get_dependencies(resource)
if not deps or None in deps:
logger.debug("No changes found in %s" % resource)
return False
content = self.site.content.source_folder
layout = Folder(self.site.sitepath).child_folder('layout')
logger.debug("Checking for changes in dependents:%s" % deps)
for dep in deps:
if not dep:
return True
source = File(content.child(dep))
if not source.exists:
source = File(layout.child(dep))
if not source.exists:
return True
if target.older_than(source):
return True
logger.debug("No changes found in %s" % resource)
return False
def generate_all(self, incremental=False):
"""
Generates the entire website
"""
logger.info("Reading site contents")
self.load_template_if_needed()
self.template.clear_caches()
self.initialize()
self.load_site_if_needed()
self.events.begin_site()
logger.info("Generating site to [%s]" %
self.site.config.deploy_root_path)
self.__generate_node__(self.site.content, incremental)
self.events.site_complete()
self.finalize()
self.generated_once = True
def generate_node_at_path(self, node_path=None, incremental=False):
"""
Generates a single node. If node_path is non-existent or empty,
generates the entire site.
"""
if not self.generated_once and not incremental:
return self.generate_all()
self.load_template_if_needed()
self.load_site_if_needed()
node = None
if node_path:
node = self.site.content.node_from_path(node_path)
self.generate_node(node, incremental)
@contextmanager
def events_for(self, obj):
if not self.generated_once:
self.events.begin_site()
if isinstance(obj, Resource):
self.events.begin_node(obj.node)
yield
if not self.generated_once:
if isinstance(obj, Resource):
self.events.node_complete(obj.node)
self.events.site_complete()
self.generated_once = True
def generate_node(self, node=None, incremental=False):
"""
Generates the given node. If node is invalid, empty or
non-existent, generates the entire website.
"""
if not node or not self.generated_once and not incremental:
return self.generate_all()
self.load_template_if_needed()
self.initialize()
self.load_site_if_needed()
try:
with self.events_for(node):
self.__generate_node__(node, incremental)
self.finalize()
except HydeException:
self.generate_all()
def generate_resource_at_path(self, resource_path=None,
incremental=False):
"""
Generates a single resource. If resource_path is non-existent or empty,
generates the entire website.
"""
if not self.generated_once and not incremental:
return self.generate_all()
self.load_template_if_needed()
self.load_site_if_needed()
resource = None
if resource_path:
resource = self.site.content.resource_from_path(resource_path)
self.generate_resource(resource, incremental)
def generate_resource(self, resource=None, incremental=False):
"""
Generates the given resource. If resource is invalid, empty or
non-existent, generates the entire website.
"""
if not resource or not self.generated_once and not incremental:
return self.generate_all()
self.load_template_if_needed()
self.initialize()
self.load_site_if_needed()
try:
with self.events_for(resource):
self.__generate_resource__(resource, incremental)
except HydeException:
self.generate_all()
def refresh_config(self):
if self.site.config.needs_refresh():
logger.debug("Refreshing configuration and context")
self.site.refresh_config()
self.create_context()
def __generate_node__(self, node, incremental=False):
self.refresh_config()
for node in node.walk():
logger.debug("Generating Node [%s]", node)
self.events.begin_node(node)
for resource in sorted(node.resources):
self.__generate_resource__(resource, incremental)
self.events.node_complete(node)
def __generate_resource__(self, resource, incremental=False):
self.refresh_config()
if not resource.is_processable:
logger.debug("Skipping [%s]", resource)
return
if incremental and not self.has_resource_changed(resource):
logger.debug("No changes found. Skipping resource [%s]", resource)
return
logger.debug("Processing [%s]", resource)
with self.context_for_resource(resource) as context:
target = File(self.site.config.deploy_root_path.child(
resource.relative_deploy_path))
target.parent.make()
if resource.simple_copy:
logger.debug("Simply Copying [%s]", resource)
resource.source_file.copy_to(target)
elif resource.source_file.is_text:
self.update_deps(resource)
if resource.uses_template:
logger.debug("Rendering [%s]", resource)
try:
text = self.template.render_resource(resource,
context)
except Exception as e:
HydeException.reraise("Error occurred when processing"
"template: [%s]: %s" %
(resource, repr(e)),
sys.exc_info())
else:
text = resource.source_file.read_all()
text = self.events.begin_text_resource(
resource, text) or text
text = self.events.text_resource_complete(
resource, text) or text
target.write(text)
copymode(resource.source_file.path, target.path)
else:
logger.debug("Copying binary file [%s]", resource)
self.events.begin_binary_resource(resource)
resource.source_file.copy_to(target)
self.events.binary_resource_complete(resource)
|
993,303 | 02a308146213a9f6d54c9c917dc1e0e18864198d | from unittest.mock import Mock
import pytest
from hypothesis import strategies
from hypothesis.strategies import text as _text
@pytest.fixture
def mock_consumer(mocker):
from giap.consumer import ConsumerInterface
mock = Mock(spec=ConsumerInterface)
mocker.patch("giap.core.get_consumer", return_value=mock)
return mock
def text():
from string import ascii_letters
return _text(alphabet=ascii_letters, min_size=1)
# Override the default text strategy with a customized version
strategies.text = text
|
993,304 | 7be2e7469d435563c4d948d872e1ad952e4cf0be | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 6 16:53:15 2019
@author: haoqi
RNN model for emotion list to beh score
"""
import torch
import torch.nn as nn
import pdb
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Emotion_Seq2Beh_Model(nn.Module):
def __init__(self):
super(Emotion_Seq2Beh_Model, self).__init__()
self.num_of_emotions = 6
self.num_of_behs = 5
self.hidden_sz = 128
self.num_layers = 2
self.gru1 = nn.GRU(input_size=self.num_of_emotions, hidden_size=self.hidden_sz, num_layers=self.num_layers)
self.fc_out = nn.Sequential(
nn.Linear(self.hidden_sz, int(self.hidden_sz/2)),
nn.ReLU(),
nn.Linear(int(self.hidden_sz/2), self.num_of_behs)
)
def forward(self, x_input):
batch_sz = x_input.shape[1]
h_init = self.initHidden(batch_sz)
x, hidden = self.gru1(x_input, h_init)
x_ouput = self.fc_out(x[-1,:,:]) # only return last time step's results, this is a many-to-one sequence problem
return x_ouput
def initHidden(self, batch_sz):
return torch.zeros(self.num_layers, batch_sz, self.hidden_sz, device=device)
|
993,305 | 0bd53bffb57edb3835545e7698c2ee5ec3a50103 | import pandas as pd
import numpy as np
import sys
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.ticker import AutoMinorLocator
class rvjitter(object):
"""
Predicting RV jitter due to stellar oscillations, in terms of fundamental stellar properties.
Example 1:
#Generate MC samples using the model F=F(L, M, T)
import RVJitter
target = RVJitter.rvjitter(lumi=12.006, lumierr=1.131, mass=1.304, masserr=0.064, teff=4963.00, tefferr=80.000)
sigmarv, sigmarvperr, sigmarvmerr, mcsigmarv = target.rv()
Example 2:
#Generate MC samples using the model F=F(L, M, T) and plot out.
import RVJitter
target = RVJitter.rvjitter(lumi=12.006, lumierr=1.131, mass=1.304, masserr=0.064, teff=4963.00, tefferr=80.000)
sigmarv, sigmarvperr, sigmarvmerr, mcsigmarv = target.plot(figshow=True, figsave=True, figname='jitter.png')
Example 3:
#Generate MC samples using the model F=F(L, T, g) and plot out.
import RVJitter
target = RVJitter.rvjitter(lumi=12.006, lumierr=1.131, teff=4963.00, tefferr=80.000, logg=3.210, loggerr=0.006)
sigmarv, sigmarvperr, sigmarvmerr, mcsigmarv = target.plot(figshow=True, figsave=True, figname='jitter.png')
Example 4:
#Generate MC samples using the model F=F(T, g) and plot out.
import RVJitter
target = RVJitter.rvjitter(teff=4963.00, tefferr=80.000, logg=3.210, loggerr=0.006)
sigmarv, sigmarvperr, sigmarvmerr, mcsigmarv = target.plot(figshow=True, figsave=True, figname='jitter.png')
Example 5:
#Generate MC samples using the model F=F(L, T) and plot out. Note
import RVJitter
target = RVJitter.rvjitter(lumi=12.006, lumierr=1.131, teff=4963.00, tefferr=80.000, Lgiant=False)
sigmarv, sigmarvperr, sigmarvmerr, mcsigmarv = target.plot(figshow=True, figsave=True, figname='jitter.png')
"""
def __init__(self, lumi=None, lumierr=None, mass=None, masserr=None, teff=None, tefferr=None, logg=None, loggerr=None, Lgiant=None, CorFact=None):
self.teffsun = 5777.
self.gravsun = 10**4.44
self.nsample = int(100000)
self.loggthreshold = 3.5
# The variable "CorFact" denotes a correction factor used to convert the RV jitter due to
# only stellar oscillations to the jitter due to both stellar oscilations and granulation.
# A correction factor of 1.6 is recommended.
if CorFact is not None:
self.CorFact = CorFact
#else:
# self.CorFact = 1.6
if (lumi is not None) & (lumierr is not None):
self.lumi = lumi
self.lumierr = lumierr
if (mass is not None) & (masserr is not None):
self.mass = mass
self.masserr = masserr
if (teff is not None) & (tefferr is not None):
self.teff = teff
self.tefferr = tefferr
if (logg is not None) & (loggerr is not None):
self.grav = 10**logg
self.graverr = loggerr/np.log(10)/logg
if Lgiant is not None:
self.Lgiant=Lgiant
# Check a target is either either a dwarf/subgiant or giant.
if hasattr(self,'Lgiant'):
if self.Lgiant==True: logg=2.44
if self.Lgiant==False: logg=4.44 #only used for representing either a dwarf/subgiant or giant.
elif hasattr(self,'grav'): logg = np.log10(self.grav)
elif hasattr(self,'lumi') & hasattr(self,'mass') & hasattr(self,'teff'):
logg = np.log10(self.gravsun)-np.log10(self.lumi)+np.log10(self.mass)+4.*np.log10(self.teff/self.teffsun)
else:
print('Input data does not apply to any of the four models')
raise sys.exit()
# Read in fitted parameters and their uncertainties.
rms = pd.read_csv('fitparamsrms.csv')
rms.loc[np.where(rms['std']<0.005)[0], 'std'] = 0.01
if logg<=np.log10(self.loggthreshold):
self.lmt_alpha = rms[rms.parameter=='RV_RMS_All_Giant_LMT_alpha'].iloc[0]['value']
self.lmt_beta = rms[rms.parameter=='RV_RMS_All_Giant_LMT_beta'].iloc[0]['value']
self.lmt_gamma = rms[rms.parameter=='RV_RMS_All_Giant_LMT_gamma'].iloc[0]['value']
self.lmt_delta = rms[rms.parameter=='RV_RMS_All_Giant_LMT_delta'].iloc[0]['value']
self.lmt_alpha_sig = rms[rms.parameter=='RV_RMS_All_Giant_LMT_alpha'].iloc[0]['std']
self.lmt_beta_sig = rms[rms.parameter=='RV_RMS_All_Giant_LMT_beta'].iloc[0]['std']
self.lmt_gamma_sig = rms[rms.parameter=='RV_RMS_All_Giant_LMT_gamma'].iloc[0]['std']
self.lmt_delta_sig = rms[rms.parameter=='RV_RMS_All_Giant_LMT_delta'].iloc[0]['std']
self.ltg_alpha = rms[rms.parameter=='RV_RMS_All_Giant_LTg_alpha'].iloc[0]['value']
self.ltg_beta = rms[rms.parameter=='RV_RMS_All_Giant_LTg_beta'].iloc[0]['value']
self.ltg_delta = rms[rms.parameter=='RV_RMS_All_Giant_LTg_gamma'].iloc[0]['value']
self.ltg_epsilon = rms[rms.parameter=='RV_RMS_All_Giant_LTg_delta'].iloc[0]['value']
self.ltg_alpha_sig = rms[rms.parameter=='RV_RMS_All_Giant_LTg_alpha'].iloc[0]['std']
self.ltg_beta_sig = rms[rms.parameter=='RV_RMS_All_Giant_LTg_beta'].iloc[0]['std']
self.ltg_delta_sig = rms[rms.parameter=='RV_RMS_All_Giant_LTg_gamma'].iloc[0]['std']
self.ltg_epsilon_sig = rms[rms.parameter=='RV_RMS_All_Giant_LTg_delta'].iloc[0]['std']
self.tg_alpha = rms[rms.parameter=='RV_RMS_All_Giant_Tg_alpha'].iloc[0]['value']
self.tg_delta = rms[rms.parameter=='RV_RMS_All_Giant_Tg_beta'].iloc[0]['value']
self.tg_epsilon = rms[rms.parameter=='RV_RMS_All_Giant_Tg_gamma'].iloc[0]['value']
self.tg_alpha_sig = rms[rms.parameter=='RV_RMS_All_Giant_Tg_alpha'].iloc[0]['std']
self.tg_delta_sig = rms[rms.parameter=='RV_RMS_All_Giant_Tg_beta'].iloc[0]['std']
self.tg_epsilon_sig = rms[rms.parameter=='RV_RMS_All_Giant_Tg_gamma'].iloc[0]['std']
self.lt_alpha = rms[rms.parameter=='RV_RMS_All_Giant_LT_alpha'].iloc[0]['value']
self.lt_beta = rms[rms.parameter=='RV_RMS_All_Giant_LT_beta'].iloc[0]['value']
self.lt_delta = rms[rms.parameter=='RV_RMS_All_Giant_LT_gamma'].iloc[0]['value']
self.lt_alpha_sig = rms[rms.parameter=='RV_RMS_All_Giant_LT_alpha'].iloc[0]['std']
self.lt_beta_sig = rms[rms.parameter=='RV_RMS_All_Giant_LT_beta'].iloc[0]['std']
self.lt_delta_sig = rms[rms.parameter=='RV_RMS_All_Giant_LT_gamma'].iloc[0]['std']
else:
self.lmt_alpha = rms[rms.parameter=='RV_RMS_All_Dwarf_LMT_alpha'].iloc[0]['value']
self.lmt_beta = rms[rms.parameter=='RV_RMS_All_Dwarf_LMT_beta'].iloc[0]['value']
self.lmt_gamma = rms[rms.parameter=='RV_RMS_All_Dwarf_LMT_gamma'].iloc[0]['value']
self.lmt_delta = rms[rms.parameter=='RV_RMS_All_Dwarf_LMT_delta'].iloc[0]['value']
self.lmt_alpha_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LMT_alpha'].iloc[0]['std']
self.lmt_beta_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LMT_beta'].iloc[0]['std']
self.lmt_gamma_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LMT_gamma'].iloc[0]['std']
self.lmt_delta_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LMT_delta'].iloc[0]['std']
self.ltg_alpha = rms[rms.parameter=='RV_RMS_All_Dwarf_LTg_alpha'].iloc[0]['value']
self.ltg_beta = rms[rms.parameter=='RV_RMS_All_Dwarf_LTg_beta'].iloc[0]['value']
self.ltg_delta = rms[rms.parameter=='RV_RMS_All_Dwarf_LTg_gamma'].iloc[0]['value']
self.ltg_epsilon = rms[rms.parameter=='RV_RMS_All_Dwarf_LTg_delta'].iloc[0]['value']
self.ltg_alpha_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LTg_alpha'].iloc[0]['std']
self.ltg_beta_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LTg_beta'].iloc[0]['std']
self.ltg_delta_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LTg_gamma'].iloc[0]['std']
self.ltg_epsilon_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LTg_delta'].iloc[0]['std']
self.tg_alpha = rms[rms.parameter=='RV_RMS_All_Dwarf_Tg_alpha'].iloc[0]['value']
self.tg_delta = rms[rms.parameter=='RV_RMS_All_Dwarf_Tg_beta'].iloc[0]['value']
self.tg_epsilon = rms[rms.parameter=='RV_RMS_All_Dwarf_Tg_gamma'].iloc[0]['value']
self.tg_alpha_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_Tg_alpha'].iloc[0]['std']
self.tg_delta_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_Tg_beta'].iloc[0]['std']
self.tg_epsilon_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_Tg_gamma'].iloc[0]['std']
self.lt_alpha = rms[rms.parameter=='RV_RMS_All_Dwarf_LT_alpha'].iloc[0]['value']
self.lt_beta = rms[rms.parameter=='RV_RMS_All_Dwarf_LT_beta'].iloc[0]['value']
self.lt_delta = rms[rms.parameter=='RV_RMS_All_Dwarf_LT_gamma'].iloc[0]['value']
self.lt_alpha_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LT_alpha'].iloc[0]['std']
self.lt_beta_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LT_beta'].iloc[0]['std']
self.lt_delta_sig = rms[rms.parameter=='RV_RMS_All_Dwarf_LT_gamma'].iloc[0]['std']
def rv(self):
# Run Monte Carlo simulation
# Model 1: rvjitter = rvjitter(L, M, T)
if hasattr(self,'lumi') & hasattr(self,'mass') & hasattr(self,'teff'):
np.random.seed(seed=1) #makes the random numbers predictable
mclumi = self.lumi+np.random.randn(self.nsample)*self.lumierr
np.random.seed(seed=2)
mcmass = self.mass+np.random.randn(self.nsample)*self.masserr
np.random.seed(seed=3)
mcteff = self.teff+np.random.randn(self.nsample)*self.tefferr
np.random.seed(seed=4)
mcalpha = self.lmt_alpha+np.random.randn(self.nsample)*self.lmt_alpha_sig
np.random.seed(seed=5)
mcbeta = self.lmt_beta+np.random.randn(self.nsample)*self.lmt_beta_sig
np.random.seed(seed=6)
mcgamma = self.lmt_gamma+np.random.randn(self.nsample)*self.lmt_gamma_sig
np.random.seed(seed=7)
mcdelta = self.lmt_delta+np.random.randn(self.nsample)*self.lmt_delta_sig
# Compute the jitter samples
if hasattr(self,'CorFact'):
mcsigmarv = self.CorFact * mcalpha * mclumi**mcbeta * mcmass**mcgamma * (mcteff/self.teffsun)**mcdelta
else:
mcsigmarv = 1.93 * mcalpha * mclumi**mcbeta * mcmass**mcgamma * (mcteff/self.teffsun)**mcdelta
# Model 2: rvjitter = rvjitter(L, T, g)
elif hasattr(self,'lumi') & hasattr(self,'teff') & hasattr(self,'grav'):
np.random.seed(seed=8) #makes the random numbers predictable
mclumi = self.lumi+np.random.randn(self.nsample)*self.lumierr
np.random.seed(seed=9)
mcteff = self.teff+np.random.randn(self.nsample)*self.tefferr
np.random.seed(seed=10)
mcgrav = self.grav+np.random.randn(self.nsample)*self.graverr
np.random.seed(seed=11)
mcalpha = self.ltg_alpha+np.random.randn(self.nsample)*self.ltg_alpha_sig
np.random.seed(seed=12)
mcbeta = self.ltg_beta+np.random.randn(self.nsample)*self.ltg_beta_sig
np.random.seed(seed=13)
mcdelta = self.ltg_delta+np.random.randn(self.nsample)*self.ltg_delta_sig
np.random.seed(seed=14)
mcepsilon = self.ltg_epsilon+np.random.randn(self.nsample)*self.ltg_epsilon_sig
# Compute the jitter samples
if hasattr(self,'CorFact'):
mcsigmarv = self.CorFact * mcalpha * mclumi**mcbeta * (mcteff/self.teffsun)**mcdelta * (mcgrav/self.gravsun)**mcepsilon
else:
mcsigmarv = 1.93 * mcalpha * mclumi**mcbeta * (mcteff/self.teffsun)**mcdelta * (mcgrav/self.gravsun)**mcepsilon
# Model 3: rvjitter = rvjitter(T, g)
elif hasattr(self,'teff') & hasattr(self,'grav'):
np.random.seed(seed=15)
mcteff = self.teff+np.random.randn(self.nsample)*self.tefferr
np.random.seed(seed=16)
mcgrav = self.grav+np.random.randn(self.nsample)*self.graverr
np.random.seed(seed=17)
mcalpha = self.tg_alpha+np.random.randn(self.nsample)*self.tg_alpha_sig
np.random.seed(seed=18)
mcdelta = self.tg_delta+np.random.randn(self.nsample)*self.tg_delta_sig
np.random.seed(seed=19)
mcepsilon = self.tg_epsilon+np.random.randn(self.nsample)*self.tg_epsilon_sig
# Compute the jitter samples
if hasattr(self,'CorFact'):
mcsigmarv = self.CorFact * mcalpha * (mcteff/self.teffsun)**mcdelta * (mcgrav/self.gravsun)**mcepsilon
else:
mcsigmarv = 2.01 * mcalpha * (mcteff/self.teffsun)**mcdelta * (mcgrav/self.gravsun)**mcepsilon
# Model 4: rvjitter = rvjitter(L, T)
elif hasattr(self,'lumi') & hasattr(self,'teff'):
np.random.seed(seed=20) #makes the random numbers predictable
mclumi = self.lumi+np.random.randn(self.nsample)*self.lumierr
np.random.seed(seed=21)
mcteff = self.teff+np.random.randn(self.nsample)*self.tefferr
np.random.seed(seed=22)
mcalpha = self.lt_alpha+np.random.randn(self.nsample)*self.lt_alpha_sig
np.random.seed(seed=23)
mcbeta = self.lt_beta+np.random.randn(self.nsample)*self.lt_beta_sig
np.random.seed(seed=24)
mcdelta = self.lt_delta+np.random.randn(self.nsample)*self.lt_delta_sig
# Compute the jitter samples
if hasattr(self,'CorFact'):
mcsigmarv = self.CorFact * mcalpha * mclumi**mcbeta * (mcteff/self.teffsun)**mcdelta
else:
mcsigmarv = 1.87 * mcalpha * mclumi**mcbeta * (mcteff/self.teffsun)**mcdelta
else:
print('Input data does not apply to any of the four models')
raise SystemExit
# get rid of crazy simulated samples
mcsigmarv = mcsigmarv[np.isfinite(mcsigmarv)]
sigmarv=np.median(mcsigmarv)
sigmarvperr=np.percentile(mcsigmarv,84.1)-sigmarv
sigmarvmerr=sigmarv-np.percentile(mcsigmarv,15.9)
sigmarverr = np.sqrt((sigmarvperr**2+sigmarvmerr**2)/2.)
mcsigmarv = mcsigmarv[np.where(abs(mcsigmarv-sigmarv)<10*sigmarverr)[0]]
# Compute median RV jitter and uncertainties.
sigmarv=np.median(mcsigmarv)
sigmarvperr=np.percentile(mcsigmarv,84.1)-sigmarv
sigmarvmerr=sigmarv-np.percentile(mcsigmarv,15.9)
self.sigmarv=sigmarv
self.sigmarvperr=sigmarvperr
self.sigmarvmerr=sigmarvmerr
self.mcsigmarv=mcsigmarv
return self.sigmarv, self.sigmarvperr, self.sigmarvmerr, self.mcsigmarv
def plot(self, figshow=None, figsave=None, figname=None):
"""Plot Monte Carlo simulations of RV jitter"""
self.rv()
fig, ax = plt.subplots(1,1, figsize=(8,6))
ax.tick_params(which='major', labelsize=20, direction='in', top=True, right=True, length=6, width=1.4)
ax.tick_params(which='minor', labelsize=20, direction='in', top=True, right=True, length=3, width=1.4)
for axis in ['top','bottom','left','right']: ax.spines[axis].set_linewidth(2.0)
bins = np.linspace(min(self.mcsigmarv)*0.99, max(self.mcsigmarv)*1.01, num=100)
posty, postx, patches = ax.hist(self.mcsigmarv, bins=bins, ec='b', color='gray', density=True)
ax.plot([self.sigmarv, self.sigmarv], [0, max(posty)], 'r')
ax.plot([self.sigmarv+self.sigmarvperr, self.sigmarv+self.sigmarvperr], [0, max(posty)], '--r')
ax.plot([self.sigmarv-self.sigmarvmerr, self.sigmarv-self.sigmarvmerr], [0, max(posty)], '--r')
minorLocator = AutoMinorLocator()
ax.xaxis.set_minor_locator(minorLocator)
minorLocator = AutoMinorLocator()
ax.yaxis.set_minor_locator(minorLocator)
ax.set_xlabel(r'$\sigma_{\rm rms, rv}\ [\rm m/s]$', fontsize=20)
ax.set_ylabel('Probability Density', fontsize=20)
ax.annotate(r'$\sigma_{\rm rms,\ RV}$', xy=(0.45, 0.9), xycoords="axes fraction", fontsize=18)
ax.annotate(r'= {:.2f} +{:.2f} -{:.2f} [m/s]'.format(self.sigmarv, self.sigmarvperr, self.sigmarvmerr), xy=(0.58, 0.9), xycoords="axes fraction", fontsize=15)
plt.tight_layout()
if figsave==True: plt.savefig(figname) if figname is not None else plt.savefig('rvjitter.png')
if figshow==True: plt.show()
plt.close('all')
return self.sigmarv, self.sigmarvperr, self.sigmarvmerr, self.mcsigmarv
|
993,306 | f93223848962ef9a69e14b228a07d0ace00fd961 | import os
# Package import
import docker
# Local import
import config
docker_client = docker.from_env()
dir_name = os.path.dirname(os.path.abspath(__file__))
def start():
'''Launches the prometheus container'''
if getContainer() is not None:
print('Prometheus container already exists')
return
# Create prometheus server.
prometheus_container = docker_client.containers.run(
'prom/prometheus:v2.8.0',
command=['--config.file=/etc/prometheus/prometheus.yml',
'--storage.tsdb.path=/prometheus',
'--web.console.libraries=/usr/share/prometheus/console_libraries',
'--web.console.templates=/usr/share/prometheus/consoles'],
detach=True,
name=config.PROMETHEUS_NAME,
ports={'9090': config.PROMETHEUS_PORT}, # <inside-port>:<outside-port>
remove=True,
volumes={dir_name+'/prometheus/': {'bind': '/etc/prometheus/', 'mode': 'rw'},
'prometheus_data': {'bind':'/prometheus', 'mode': 'rw'}})
print('Created prometheus instance')
# Create and connect prometheus to network.
network = docker_client.networks.create(
config.MONITORING_NETWORK_NAME,
attachable=True,
driver='bridge',
internal=True, # private network.
)
network.connect(prometheus_container)
print('Created prometheus network')
return prometheus_container
def getContainer():
'''Returns the running container else None.'''
try:
return docker_client.containers.get(config.PROMETHEUS_NAME)
except Exception:
return None
def getNetwork():
'''Returns the prometheus network, else None.'''
try:
return docker_client.networks.get(config.PROMETHEUS_NAME)
except Exception:
return None
def stop():
'''Stops the prometheus instance'''
container = getContainer()
network = getNetwork()
if len(network.containers) > 1:
print('Containers still connected to network, aborting...')
return
# Remove container
if container is not None:
container.stop()
print('Stopped prometheus')
else:
print('No prometheus to stop')
# Remove network.
if network is not None:
network.remove()
print('Removed prometheus network')
else:
print('No prometheus network stop remove')
|
993,307 | 0736f6c591790bfca982afa78739d6a9c84f74da | __author__ = 'PaleNeutron'
import os
import subprocess
import importlib.util
import sys
# from distutils.sysconfig import get_python_lib
PyQt_path = os.path.dirname(importlib.util.find_spec("PyQt5").origin)
# uic_path = sys.exec_prefix + os.sep + "bin" + os.sep + "pyuic5"
uic_path = PyQt_path + os.sep + "pyuic5.bat"
rcc_path = PyQt_path + os.sep + "pyrcc5.exe"
for root, dirs, files in os.walk('.'):
for file in files:
path = root + os.sep + file
path = os.path.abspath(path)
if file.endswith('.ui'):
subprocess.call(
[uic_path, path, '-o', os.path.splitext(path)[0] + '.py'])
print(os.path.splitext(path)[0] + '.py', "created")
elif file.endswith('.qrc'):
subprocess.call([rcc_path, path, '-o',
os.path.splitext(path)[0] + '_rc.py'])
print(os.path.splitext(path)[0] + '_rc.py', "created")
|
993,308 | 05a9a0b83752fd7ef11f36312482023191c417fe | #!/usr/bin/env python
from lofarstation.stationdata import TBBXCData
from datetime import datetime
from casacore.measures import measures
zenith_f24 = measures().direction("J2000", "01h01m51s", "+57d07m52s")
t0_f24 = datetime(2017,2,24, 13,56,35, 135000)
sd = TBBXCData("feb24_0.05s_avg.npy",
station_name="SE607", rcu_mode=3,
integration_time=0.5, start_time=t0_f24,
direction=zenith_f24)
sd.write_ms("tbb1.ms")
|
993,309 | 51fc3c5ba5a068d313caf551d73d4d78e45ebd6a | print("Teste de python") |
993,310 | c75ed4f568a199762d7944e58e46a22144f5bc34 | from pysnmp.hlapi import *
import socket
import sys
import datetime
from time import sleep
crestron_ip = '192.168.0.5'
crestron_port = 505
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (crestron_ip, crestron_port)
while 1:
current_time1 = datetime.datetime.now()
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
CommunityData('rb1100'),
UdpTransportTarget(('192.168.0.19', 161)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.31.1.1.1.10.3')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.31.1.1.1.6.3')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.4.24.4.1.16.0.0.0.0.0.0.0.0.0.10.1.1.1'))
)
)
bytesIn_check1 = varBinds[0][1]
bytesOut_check1 = varBinds[1][1]
active_route_intelcom = int(varBinds[2][1])
sleep(1)
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
CommunityData('rb1100'),
UdpTransportTarget(('192.168.0.19', 161)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.31.1.1.1.10.3')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.31.1.1.1.6.3'))
)
)
time_delta = datetime.datetime.now() - current_time1
micross = float(time_delta.microseconds + time_delta.seconds*1000000) / 1000000
#print "Time difference in microseconds is: %0.2f" % micross
bytesIn_check2 = varBinds[0][1]
bytesOut_check2 = varBinds[1][1]
RX_loading = ((float(bytesIn_check2-bytesIn_check1))*8/1048576) / micross
TX_loading = ((float(bytesOut_check2-bytesOut_check1))*8/1048576) / micross
if active_route_intelcom:
sent = sock.sendto("|Intelcom|" + str(format(RX_loading,'.2f')) + "Mbps / " + str(format(TX_loading,'.2f')) + " Mbps|", server_address)
else:
sent = sock.sendto("|Megafon|" + str(format(RX_loading,'.2f')) + "Mbps / " + str(format(TX_loading,'.2f')) + " Mbps|", server_address)
sock.close()
|
993,311 | 5a38bc2b1b417d836c3b7896e7db8255c5130166 | import sys
import numpy as np
from numpy.linalg import inv
def GetNextItem(items, used_items, l):
u_i = list(used_items)
if (len(u_i) < 1):
p = np.zeros([1,items.shape[1]])
else:
p = items[u_i]
p = np.vstack([p, np.zeros(p.shape[1])])
ones = np.eye(p.shape[0])
ones *= l
best_res = 0
best_item = 0
for item in range(len(items)):
if item not in used_items:
p[-1] = items[item]
res = np.matrix.trace(inv(np.dot(p, p.T) + ones))
if res > best_res:
best_res = res
best_item = item
return best_item
def MostInformativeItems(items, n_items):
used_items = []
for i in range(n_items):
used_items.append(GetNextItem(items, used_items, 0.01))
return used_items |
993,312 | 75a8442506d2047e491215edcd74f207cfadc76f | import json
manifest_urls = []
ids = open('vatican-ids.txt', 'r')
list = ids.readlines()
for elem in list:
if list.count(elem) > 1:
print(elem)
# with open('vatican-manifests.txt', 'w') as out:
# json.dump(manifest_urls, out)
|
993,313 | fe91c3456310536029fe89b39ab689dced5806f2 | from monitor import Monitor
from raton import Raton
from teclado import Teclado
class Computadora:
cntComputador = 0
def __init__(self, nombre, monitor, teclado, raton):
Computadora.cntComputador += 1
self._idComputadora = Computadora.cntComputador
self._nombre = nombre
self._monitor = monitor
self._teclado = teclado
self._raton = raton
def __str__(self):
return f'''
{self._nombre}: {self._idComputadora}
Monitor: {self._monitor}
Teclado: {self._teclado}
Ratón: {self._raton}
'''
if __name__ == "__main__":
t1 = Teclado("HP", "Usb")
r1 = Raton("Logitech", "Bluetooth")
m1 = Monitor("MSI", 27)
c1 = Computadora("Asus", m1, t1, r1)
print(c1)
print(Computadora.cntComputador)
t2 = Teclado("Razor", "Usb")
r2 = Raton("Microsoft", "Cable")
m2 = Monitor("LG", 19)
c2 = Computadora("Asus", m2, t2, r2)
print(c2)
print(Computadora.cntComputador)
|
993,314 | 63db5d52e38f6692e0ec679871e82930a455f29a | import can
def send():
bus = can.interface.Bus(bustype='pcan', channel='PCAN_USBBUS1', bitrate=250000)
msg = can.Message(arbitration_id=0xc0ffee,
data=[31, 32, 33, 34])
try:
bus.send(msg)
print("channel: {}, send_msg: {}".format(bus.channel_info, msg))
except can.CanError:
print("fail in sending message")
if __name__ == '__main__':
send()
|
993,315 | d3feb319de10259b691619399500f3bb10765976 | #!/usr/bin/env python3
"""
CREATED AT: 2022-10-07
URL: https://leetcode.com/problems/maximum-ascending-subarray-sum/
GITHUB: https://github.com/Jiezhi/myleetcode
FileName: 1800-MaximumAscendingSubarraySum
Difficulty: Easy
Desc:
Tag:
See:
"""
from tool import *
class Solution:
def maxAscendingSum(self, nums: List[int]) -> int:
"""
Runtime: 45 ms, faster than 79.61%
Memory Usage: 13.9 MB, less than 13.69%
1 <= nums.length <= 100
1 <= nums[i] <= 100
"""
ret, cur, pre = 0, 0, 0
for num in nums:
if num > pre:
cur += num
else:
cur = num
pre = num
ret = max(ret, cur)
return ret
def test():
assert Solution().maxAscendingSum(nums=[10, 20, 30, 5, 10, 50]) == 65
assert Solution().maxAscendingSum(nums=[10, 20, 30, 40, 50]) == 150
assert Solution().maxAscendingSum(nums=[12, 17, 15, 13, 10, 11, 12]) == 33
if __name__ == '__main__':
test()
|
993,316 | 29db81d78857c11450d35a4e64184474e9f96737 | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2020 ETRI (Minkyu Lee)
import numpy
from etri_dist.libs import sigproc
from scipy.fftpack import dct
import os.path
def set_cmvn_file(path):
if os.path.exists(path+'/cmvn.ark'):
import kaldiio
import numpy as np
cmvn = kaldiio.load_mat(path+'/cmvn.ark')
count = cmvn[0][-1]
mean =cmvn[0,:-1]/count
var = (cmvn[1,:-1]/count)-mean*mean
scale = 1 / np.sqrt(var)
offset = -(mean*scale)
norm = np.zeros((2, cmvn[0].shape[0]-1))
norm[0,:] = offset
norm[1,:] = scale
print('cmvn.ark file apllied,inputdim=%d'%(cmvn[0].shape[0]-1))
return norm,cmvn[0].shape[0]-1
else:
print('Default cmvn apllied')
norm = [[-3.42167211,-3.19438577,-3.38188171,-3.70518327,-3.95481634,-4.08967972,
-4.12971735,-4.0177989,-4.05439854,-4.11131907,-4.2040782,-4.20991182,
-4.25162649,-4.25907564,-4.2473011,-4.2863965,-4.3228898,-4.34782124,
-4.42950296,-4.39487934,-4.36633348,-4.50143957,-4.48567581,-4.5968647,
-4.61216831,-4.68406868,-4.68915033,-4.70958185,-4.69221592,-4.70501041,
-4.70832491,-4.72276783,-4.74502897,-4.77747059,-4.79214573,-4.81906843,
-4.84250784,-4.8643012,-4.88663578,-4.85466433,-4.90646744,-4.9041872,
-4.9521184,-4.97165966,-5.01090717,-5.0324893,-5.03520489,-5.03818893,
-5.04275227,-5.06600761,-5.08489704,-5.11085701,-5.12284422,-5.12537432,
-5.10954142,-5.08986282,-5.09612083,-5.12694502,-5.16363811,-5.19640732,
-5.22519541,-5.21797276,-5.21604729,-5.2105999,-5.21371508,-5.21609163,
-5.2056222,-5.19626617,-5.16277838,-5.13859081,-5.13667679,-5.15312576,
-5.17222881,-5.1936388,-5.22146034,-5.23832226,-5.24389744,-5.21634912,
-5.15253687,-5.05822802,1.25118387,0.16807194,0.02456923],
[0.3435652,0.30806524,0.2948626,0.29855329,0.29850823,0.29500216,
0.2900461,0.28056651,0.28067291,0.28453702,0.28764045,0.28579083,
0.28413242,0.28140688,0.27958646,0.28081656,0.28304908,0.28531724,
0.28741103,0.28793833,0.28851834,0.293441,0.29677734,0.30205214,
0.30518064,0.30842769,0.31117955,0.31127203,0.31129918,0.31215218,
0.31162351,0.31246269,0.31293857,0.31346714,0.31359836,0.31413645,
0.31463048,0.31555009,0.31622899,0.31533957,0.31715053,0.31806079,
0.31910229,0.31948549,0.31972486,0.3182689,0.31538239,0.31367698,
0.31298089,0.31383485,0.31637794,0.31893483,0.320057,0.31951809,
0.31782046,0.31567478,0.31514621,0.31691712,0.3202112,0.32393128,
0.32680854,0.32837763,0.33002022,0.33165351,0.33369759,0.33539012,
0.33612099,0.3356232,0.33299479,0.33120826,0.3311016,0.33190542,
0.33274376,0.33311793,0.33442715,0.33595425,0.33788115,0.34010333,
0.3433814,0.34954873,2.91277742,2.19889498,4.09453058]]
return norm,83
def cmvn(vec, variance_normalization=False):
""" This function is aimed to perform global cepstral mean and
variance normalization (CMVN) on input feature vector "vec".
The code assumes that there is one observation per row.
Args:
vec (array): input feature matrix
(size:(num_observation,num_features))
variance_normalization (bool): If the variance
normilization should be performed or not.
Return:
array: The mean(or mean+variance) normalized feature vector.
"""
eps = 2**-30
rows, cols = vec.shape
# Mean calculation
#norm = numpy.mean(vec, axis=0)
norm=[13.81728912,13.54220955,14.5613793,15.45506153,16.28197078,16.77583828,16.90248914,16.51130705,16.87707883,17.03003926,16.79243714,16.4319049,16.15078832,15.96410727,15.86211735,15.88430905,15.91035622,15.74871705,15.63217505,15.18196422,14.87927356,14.97845328,14.62023821,14.54376859,14.36037709,14.37890261,14.05186802,13.95491892,13.78801275,13.7417198,13.70090885,13.63907513,13.5986479,13.55647996,13.57488933,13.62006698,13.72976808,13.72190318,13.70704903,13.61857512,13.68904373,13.65855143,13.75306085,13.70118232,13.68455553,13.64148073,13.56307018,13.55783733,13.44710216,13.30385999,13.23176361,13.24240552,13.24202188,13.22154549,13.1852984,13.2220598,13.33818141,13.46509443,13.44225796,13.33508423,13.23343752,13.02002618,12.86639199,12.83257406,12.92551667,12.9394715,12.87757082,12.89940534,12.94605788,12.93834487,12.83259154,12.71292629,12.62831123,12.61561601,12.54721791,12.15011781,11.30001299,9.98615348,8.61970199,7.56689922]
norm_vec = numpy.tile(norm, (rows, 1))
# Mean subtraction
mean_subtracted = vec - norm_vec
# Variance normalization
if variance_normalization:
#stdev = numpy.std(mean_subtracted, axis=0)
stdev = [2.77170399,2.36850564,2.64998414,2.80786705,2.96376364,3.16694759,3.38130528,3.90046123,3.89960683,3.75648588,3.80324647,3.81267306,3.85492083,3.96901255,4.10301255,4.19317926,4.14094211,4.11957733,4.18141569,4.19893117,4.10962309,3.96179855,3.79471732,3.68831649,3.53129423,3.3899461,3.42984116,3.46188679,3.45592937,3.38961382,3.36126416,3.34760663,3.36526951,3.42112789,3.44533324,3.45941405,3.45994202,3.57684512,3.64303491,3.6141617,3.65694041,3.67959744,3.65586664,3.65669462,3.66385247,3.64272663,3.58584162,3.5918153,3.5033288,3.35176385,3.29142179,3.33101401,3.3453287,3.33631761,3.34699373,3.37557506,3.48191781,3.5997266,3.59247739,3.52937215,3.4241821,3.28394983,3.14243689,3.12374424,3.25172066,3.29622535,3.26740819,3.31936797,3.41201299,3.46992348,3.40404082,3.21726981,3.17939876,3.35834759,3.48727169,3.50188143,3.41396138,3.20734311,2.97026716,3.08520177]
stdev_vec = numpy.tile(stdev, (rows, 1))
output = mean_subtracted / (stdev_vec + eps)
else:
output = mean_subtracted
return output
def cmvn2(vec,in_norm=None, variance_normalization=False,dim=80):
""" This function is aimed to perform global cepstral mean and
variance normalization (CMVN) on input feature vector "vec".
The code assumes that there is one observation per row.
Args:
vec (array): input feature matrix
(size:(num_observation,num_features))
variance_normalization (bool): If the variance
normilization should be performed or not.
Return:
array: The mean(or mean+variance) normalized feature vector.
"""
rows,cols = vec.shape
if in_norm is None:
norm = [[-3.42167211,-3.19438577,-3.38188171,-3.70518327,-3.95481634,-4.08967972,
-4.12971735,-4.0177989,-4.05439854,-4.11131907,-4.2040782,-4.20991182,
-4.25162649,-4.25907564,-4.2473011,-4.2863965,-4.3228898,-4.34782124,
-4.42950296,-4.39487934,-4.36633348,-4.50143957,-4.48567581,-4.5968647,
-4.61216831,-4.68406868,-4.68915033,-4.70958185,-4.69221592,-4.70501041,
-4.70832491,-4.72276783,-4.74502897,-4.77747059,-4.79214573,-4.81906843,
-4.84250784,-4.8643012,-4.88663578,-4.85466433,-4.90646744,-4.9041872,
-4.9521184,-4.97165966,-5.01090717,-5.0324893,-5.03520489,-5.03818893,
-5.04275227,-5.06600761,-5.08489704,-5.11085701,-5.12284422,-5.12537432,
-5.10954142,-5.08986282,-5.09612083,-5.12694502,-5.16363811,-5.19640732,
-5.22519541,-5.21797276,-5.21604729,-5.2105999,-5.21371508,-5.21609163,
-5.2056222,-5.19626617,-5.16277838,-5.13859081,-5.13667679,-5.15312576,
-5.17222881,-5.1936388,-5.22146034,-5.23832226,-5.24389744,-5.21634912,
-5.15253687,-5.05822802,1.25118387,0.16807194,0.02456923],
[0.3435652,0.30806524,0.2948626,0.29855329,0.29850823,0.29500216,
0.2900461,0.28056651,0.28067291,0.28453702,0.28764045,0.28579083,
0.28413242,0.28140688,0.27958646,0.28081656,0.28304908,0.28531724,
0.28741103,0.28793833,0.28851834,0.293441,0.29677734,0.30205214,
0.30518064,0.30842769,0.31117955,0.31127203,0.31129918,0.31215218,
0.31162351,0.31246269,0.31293857,0.31346714,0.31359836,0.31413645,
0.31463048,0.31555009,0.31622899,0.31533957,0.31715053,0.31806079,
0.31910229,0.31948549,0.31972486,0.3182689,0.31538239,0.31367698,
0.31298089,0.31383485,0.31637794,0.31893483,0.320057,0.31951809,
0.31782046,0.31567478,0.31514621,0.31691712,0.3202112,0.32393128,
0.32680854,0.32837763,0.33002022,0.33165351,0.33369759,0.33539012,
0.33612099,0.3356232,0.33299479,0.33120826,0.3311016,0.33190542,
0.33274376,0.33311793,0.33442715,0.33595425,0.33788115,0.34010333,
0.3433814,0.34954873,2.91277742,2.19889498,4.09453058]]
else:
norm = in_norm
norm_vec = numpy.tile(norm[0][:dim],(rows,1))
stdev_vec = numpy.tile(norm[1][:dim],(rows,1))
vec = vec * stdev_vec
vec += norm_vec
return vec
def mfcc(signal,samplerate=16000,winlen=0.025,winstep=0.01,numcep=13,
nfilt=23,nfft=512,lowfreq=20,highfreq=None,dither=1.0,remove_dc_offset=True,preemph=0.97,
ceplifter=22,useEnergy=True,wintype='povey'):
"""Compute MFCC features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param numcep: the number of cepstrum to return, default 13
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param ceplifter: apply a lifter to final cepstral coefficients. 0 is no lifter. Default is 22.
:param appendEnergy: if this is true, the zeroth cepstral coefficient is replaced with the log of the total frame energy.
:param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming
:returns: A numpy array of size (NUMFRAMES by numcep) containing features. Each row holds 1 feature vector.
"""
feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,dither,remove_dc_offset,preemph,wintype)
feat = numpy.log(feat)
feat = dct(feat, type=2, axis=1, norm='ortho')[:,:numcep]
feat = lifter(feat,ceplifter)
if useEnergy: feat[:,0] = numpy.log(energy) # replace first cepstral coefficient with log of frame energy
return feat
def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,
nfilt=40,nfft=512,lowfreq=0,highfreq=None,dither=1.0,remove_dc_offset=True, preemph=0.97,
wintype='hamming'):
"""Compute Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming
winfunc=lambda x:numpy.ones((x,))
:returns: 2 values. The first is a numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. The
second return value is the energy in each frame (total energy, unwindowed)
"""
highfreq= highfreq or samplerate/2
frames,raw_frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate, dither, preemph, remove_dc_offset, wintype)
pspec = sigproc.powspec(frames,nfft) # nearly the same until this part
energy = numpy.sum(raw_frames**2,1) # this stores the raw energy in each frame
energy = numpy.where(energy == 0,numpy.finfo(float).eps,energy) # if energy is zero, we get problems with log
fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)
feat = numpy.dot(pspec,fb.T) # compute the filterbank energies
feat = numpy.where(feat == 0,numpy.finfo(float).eps,feat) # if feat is zero, we get problems with log
return feat,energy
def logfbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,
nfilt=40,nfft=512,lowfreq=64,highfreq=None,dither=1.0,remove_dc_offset=True,preemph=0.97,wintype='hamming'):
"""Compute log Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector.
"""
feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,dither, remove_dc_offset,preemph,wintype)
return numpy.log(feat)
def hz2mel(hz):
"""Convert a value in Hertz to Mels
:param hz: a value in Hz. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Mels. If an array was passed in, an identical sized array is returned.
"""
return 1127 * numpy.log(1+hz/700.0)
def mel2hz(mel):
"""Convert a value in Mels to Hertz
:param mel: a value in Mels. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Hertz. If an array was passed in, an identical sized array is returned.
"""
return 700 * (numpy.exp(mel/1127.0)-1)
def get_filterbanks(nfilt=26,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):
"""Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the samplerate of the signal we are working with. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.
"""
highfreq= highfreq or samplerate/2
assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2"
# compute points evenly spaced in mels
lowmel = hz2mel(lowfreq)
highmel = hz2mel(highfreq)
# check kaldi/src/feat/Mel-computations.h
fbank = numpy.zeros([nfilt,nfft//2+1])
mel_freq_delta = (highmel-lowmel)/(nfilt+1)
for j in range(0,nfilt):
leftmel = lowmel+j*mel_freq_delta
centermel = lowmel+(j+1)*mel_freq_delta
rightmel = lowmel+(j+2)*mel_freq_delta
for i in range(0,nfft//2):
mel=hz2mel(i*samplerate/nfft)
if mel>leftmel and mel<rightmel:
if mel<centermel:
fbank[j,i]=(mel-leftmel)/(centermel-leftmel)
else:
fbank[j,i]=(rightmel-mel)/(rightmel-centermel)
return fbank
def lifter(cepstra, L=22):
"""Apply a cepstral lifter the the matrix of cepstra. This has the effect of increasing the
magnitude of the high frequency DCT coeffs.
:param cepstra: the matrix of mel-cepstra, will be numframes * numcep in size.
:param L: the liftering coefficient to use. Default is 22. L <= 0 disables lifter.
"""
if L > 0:
nframes,ncoeff = numpy.shape(cepstra)
n = numpy.arange(ncoeff)
lift = 1 + (L/2.)*numpy.sin(numpy.pi*n/L)
return lift*cepstra
else:
# values of L <= 0, do nothing
return cepstra
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = len(feat)
denominator = 2 * sum([i**2 for i in range(1, N+1)])
delta_feat = numpy.empty_like(feat)
padded = numpy.pad(feat, ((N, N), (0, 0)), mode='edge') # padded version of feat
for t in range(NUMFRAMES):
delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
|
993,317 | 07e1f68a864aec4d937573ea3943dddbafcafd85 | from Database_model.app_model import Program, ProgramSchema,db
from flask_restful import Resource
from flask import Flask, request, jsonify
#*********************************************************************************************#
#------------------ ProgramCURD----------------------------------------------------------------#
#*********************************************************************************************#
program_schema = ProgramSchema()
programs_schema=ProgramSchema(many=True)
class ProgramInfo(Resource):
#########Post_Program#########################
def post(self):
new_post = Program(
p_name=request.json['p_name']
)
db.session.add(new_post)
db.session.commit()
return program_schema.dump(new_post)
def get(self):
all_products = Program.query.all()
result = programs_schema.dump(all_products)
return jsonify(result)
class ProgramExtract(Resource):
########Get_Program############
def get(self, p_id):
extract = Program.query.get_or_404(p_id)
return program_schema.dump(extract)
def delete(self, p_id):
del_rec = Program.query.get_or_404(p_id)
db.session.delete(del_rec)
db.session.commit()
return '', 204
def put(self, p_id):
change = Program.query.get_or_404(p_id)
if 'p_name' in request.json:
change.p_name = request.json['p_name']
db.session.commit()
return program_schema.dump(change)
|
993,318 | cc23cb72292c0ed0f897014c8f7c162aabffffff | class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
ret = self.quick_sort(nums, k, 0, len(nums) - 1)
return ret
def quick_sort(self, nums, k, left, right):
if left < right:
pivot = self.partition(nums, left, right)
print(pivot, nums)
if pivot == k - 1:
return nums[pivot]
elif pivot < k - 1:
return self.quick_sort(nums, k, pivot, right)
else:
return self.quick_sort(nums, k, left, pivot)
else:
print('--')
return nums[right]
def partition(self, nums, left, right):
tmp = nums[right]
print(tmp, left, right, nums)
while left < right:
while left < right and nums[left] > tmp:
left += 1
while left < right and nums[right] < tmp:
right -= 1
if left < right:
nums[left], nums[right] = nums[right], nums[left]
left += 1
right -= 1
return right
if __name__ == '__main__':
o = Solution()
# 1 4
# p k
# nums = [3,2,1,5,6,4]
# k = 2
nums = [3, 2, 3, 1, 2, 4, 5, 5, 6]
k = 4
ret = o.findKthLargest(nums, k)
print(nums)
print(ret) |
993,319 | df4944169a34caff5f81b1c60d94546411344baf | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
class AccessControlAPI(object):
####
## Access Control API
##
def grant_access_control(self, subject, action, scope, grant_option):
"""
TODO: add docstring
"""
params = {"subject": subject, "action": action, "scope": scope, "grant_option": str(grant_option)}
with self.post("/v3/acl/grant", params) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("Granting access control failed", res, body)
return True
def revoke_access_control(self, subject, action, scope):
"""
TODO: add docstring
"""
params = {"subject": subject, "action": action, "scope": scope}
with self.post("/v3/acl/revoke", params) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("Revoking access control failed", res, body)
return True
def test_access_control(self, user, action, scope):
"""
TODO: add docstring
[True, [{subject:str,action:str,scope:str}]]
"""
params = {"user": user, "action": action, "scope": scope}
with self.get("/v3/acl/test", params) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("Testing access control failed", res, body)
js = self.checked_json(body, ["permission", "access_controls"])
perm = js["permission"]
acl = [ [roleinfo["subject"], roleinfo["action"], roleinfo["scope"]] for roleinfo in js["access_controls"] ]
return (perm, acl)
def list_access_controls(self):
"""
TODO: add docstring
[{subject:str,action:str,scope:str}]
"""
with self.get("/v3/acl/list") as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("Listing access control failed", res, body)
js = self.checked_json(body, ["access_controls"])
acl = [ [roleinfo["subject"], roleinfo["action"], roleinfo["scope"], roleinfo["grant_option"]] for roleinfo in js["access_controls"] ]
return acl
|
993,320 | 301fa87e8a4eee6d37b1fdd66d2611fb893ffabb | from django.shortcuts import render, redirect
# Create your views here.
def home(request):
return render(request, 'index.html', {'demo_title': 'Wello world', 'name': 'melardev'})
def template_demo(request):
return render(request, 'ui/template_example.html', {'demo_title': 'templates Demo'}) |
993,321 | 78d0669609afb4196c4f56838a59f1d1903858f2 | # Lukas Elsrode - (19/29/2020) - Completed Project for IT department changin
import pandas as pd
from math import ceil
# Init dictionday {fields: count}
d_fields = {'Browser': 'Number of Browser Sessions',
'Exchange ActiveSync': 'Number of Exchange ActiveSync Sessions',
'Exchange Web Services': 'Number of Exchange Web Services Sessions',
'MAPI over HTTP': 'Number of MAPI over HTTP Sessions'}
def make_data(filename='data.csv'):
return pd.read_csv(filename)
def fill_categories_for_rows(df=make_data(),d_fields=d_fields):
''' Fills the Category in Dataframe
'''
for i,r in df.iterrows():
row_values = r.dropna()
cat = []
max_c = 0
for num_of in d_fields.values():
try:
count = int(row_values[num_of])
if count > max_c:
max_c = count
except:
count = None
if count:
key = ' '.join(num_of.split(' ')[2:][:-1])
tup = (key,count)
cat.append(tup)
if 'Zero Byte.1' in row_values.index:
print(row_values)
df.at[i,'Category'] = None
else:
if len(cat) == 1:
df.at[i,'Category'] = cat[0][0]
if len(cat) > 1:
ans = [i[0] for i in cat if i[1] >= (max_c * 0.20)]
ans = ' & '.join(ans)
df.at[i,'Category'] = ans
return df
def save_as_xlsx(df,date='NEW'):
'''Saves the updated CSV as an Excel File'''
file_name = 'Basic-Auth-Users-Service-Desk-Labelled-verbose-Lukas-' + date + '.xlsx'
writer = pd.ExcelWriter(file_name)
df.to_excel(writer, index=False)
writer.save()
return
if __name__ == "__main__":
# Fill the rows
df_cated = fill_categories_for_rows()
# Save the File
save_as_xlsx(df_cated)
|
993,322 | 844c9dbe24834da1ac63955b5e67e98e1d1ed863 | from rest_framework import serializers
from procurements.models import Purchase, Producer, Country, ProductionType, ProductType, OKPD2ProductType, Material, \
Colour, Characteristic, Product, Region, BaseUser, ContactPerson, Customer, Contractor, PurchaseMethod, \
PurchaseType, ProductItem, Law, Offer, Contract, ContractorItem, CustomerItem
class PurchaseSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Purchase
def to_representation(self, instance):
return PurchaseReadSerializer(instance).data
class PurchaseReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Purchase
contractor_item = serializers.SerializerMethodField()
participant_items = serializers.SerializerMethodField()
product_items = serializers.SerializerMethodField()
method = serializers.SerializerMethodField()
law = serializers.SerializerMethodField()
type = serializers.SerializerMethodField()
offer = serializers.SerializerMethodField()
@staticmethod
def get_contractor_item(instance):
if instance.contractor_item is not None:
if instance.contractor_item is not None:
return ContractorSerializer(instance.contractor_item.contractor).data
@staticmethod
def get_participant_items(instance):
return CustomerSerializer([elem.customer for elem in instance.participant_items.all()], many=True).data
@staticmethod
def get_product_items(instance):
return ProductSerializer([elem.product for elem in instance.product_items.all()], many=True).data
@staticmethod
def get_method(instance):
return PurchaseMethodSerializer(instance.method).data
@staticmethod
def get_law(instance):
if instance.law is not None:
return LawSerializer(instance.law).data
@staticmethod
def get_type(instance):
if instance.law is not None:
return PurchaseTypeSerializer(instance.type).data
@staticmethod
def get_offer(instance):
if instance.offer is not None:
return OfferSerializer(instance.offer).data
class ProducerSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Producer
def to_representation(self, instance):
return ProducerReadSerializer(instance).data
class ProducerReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Producer
class CountrySerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Country
def to_representation(self, instance):
return CountryReadSerializer(instance).data
class CountryReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Country
class ProductionTypeSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ProductionType
def to_representation(self, instance):
return ProductionTypeReadSerializer(instance).data
class ProductionTypeReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ProductionType
class ProductTypeSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ProductType
def to_representation(self, instance):
return ProductTypeReadSerializer(instance).data
class ProductTypeReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ProductType
class OKPD2ProductTypeSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = OKPD2ProductType
def to_representation(self, instance):
return OKPD2ProductTypeReadSerializer(instance).data
class OKPD2ProductTypeReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = OKPD2ProductType
class MaterialSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Material
def to_representation(self, instance):
return MaterialReadSerializer(instance).data
class MaterialReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Material
class ColourSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Colour
def to_representation(self, instance):
return ColourReadSerializer(instance).data
class ColourReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Colour
class CharacteristicSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Characteristic
def to_representation(self, instance):
return CharacteristicReadSerializer(instance).data
class CharacteristicReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Characteristic
class ProductSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Product
def to_representation(self, instance):
return ProductReadSerializer(instance).data
class ProductReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Product
producer = serializers.SerializerMethodField()
country = serializers.SerializerMethodField()
production_type = serializers.SerializerMethodField()
product_type = serializers.SerializerMethodField()
okpd2_product_type = serializers.SerializerMethodField()
characteristics = serializers.SerializerMethodField()
material = serializers.SerializerMethodField()
colour = serializers.SerializerMethodField()
@staticmethod
def get_producer(instance):
if instance.producer is not None:
return ProducerSerializer(instance.producer).data
@staticmethod
def get_country(instance):
if instance.country is not None:
return CountrySerializer(instance.country).data
@staticmethod
def get_production_type(instance):
if instance.production_type is not None:
return ProductionTypeSerializer(instance.production_type).data
@staticmethod
def get_product_type(instance):
if instance.product_type is not None:
return ProductTypeSerializer(instance.product_type).data
@staticmethod
def get_okpd2_product_type(instance):
if instance.okpd2_product_type is not None:
return OKPD2ProductTypeSerializer(instance.okpd2_product_type).data
@staticmethod
def get_characteristics(instance):
return CharacteristicSerializer(instance.characteristics, many=True).data
@staticmethod
def get_material(instance):
if instance.material is not None:
return MaterialSerializer(instance.material).data
@staticmethod
def get_colour(instance):
return ColourSerializer(instance.colour, many=True).data
class RegionSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Region
def to_representation(self, instance):
return RegionReadSerializer(instance).data
class RegionReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Region
class BaseUserSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = BaseUser
def to_representation(self, instance):
return BaseUserReadSerializer(instance).data
class BaseUserReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = BaseUser
class ContactPersonSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ContactPerson
def to_representation(self, instance):
return ContactPersonReadSerializer(instance).data
class ContactPersonReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ContactPerson
class CustomerSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Customer
def to_representation(self, instance):
return CustomerReadSerializer(instance).data
class CustomerReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Customer
contact_person = serializers.SerializerMethodField()
registration_region = serializers.SerializerMethodField()
production_types = serializers.SerializerMethodField()
@staticmethod
def get_contact_person(instance):
if instance.contact_person is not None:
return ContactPersonSerializer(instance).data
@staticmethod
def get_registration_region(instance):
if instance.registration_region is not None:
return RegionSerializer(instance.registration_region).data
@staticmethod
def get_production_types(instance):
return ProductionTypeSerializer(instance.production_types, many=True).data
class ContractorSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Contractor
def to_representation(self, instance):
return ContractorReadSerializer(instance).data
class ContractorReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Contractor
registration_region = serializers.SerializerMethodField()
production_types = serializers.SerializerMethodField()
@staticmethod
def get_registration_region(instance):
if instance.registration_region is not None:
return RegionSerializer(instance.registration_region).data
@staticmethod
def get_production_types(instance):
return ProductionTypeSerializer(instance.production_types, many=True).data
class PurchaseMethodSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = PurchaseMethod
def to_representation(self, instance):
return PurchaseMethodReadSerializer(instance).data
class PurchaseMethodReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = PurchaseMethod
class PurchaseTypeSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = PurchaseType
def to_representation(self, instance):
return PurchaseTypeReadSerializer(instance).data
class PurchaseTypeReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = PurchaseType
class ProductItemSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ProductItem
def to_representation(self, instance):
return ProductItemReadSerializer(instance).data
class ProductItemReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ProductItem
class LawSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Law
def to_representation(self, instance):
return LawReadSerializer(instance).data
class LawReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Law
class OfferSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Offer
def to_representation(self, instance):
return OfferReadSerializer(instance).data
class OfferReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Offer
class ContractSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Contract
def to_representation(self, instance):
return ContractReadSerializer(instance).data
class ContractReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Contract
class ContractorItemSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ContractorItem
def to_representation(self, instance):
return ContractorItemReadSerializer(instance).data
class ContractorItemReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = ContractorItem
class CustomerItemSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = CustomerItem
def to_representation(self, instance):
return CustomerItemReadSerializer(instance).data
class CustomerItemReadSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = CustomerItem
|
993,323 | 1847702a7175304cceff4886a47d9584fe690876 | #coding=utf-8
from sklearn.datasets import load_iris # iris数据集
from sklearn.model_selection import train_test_split # 分割数据模块
from sklearn.neighbors import KNeighborsClassifier # K最近邻(kNN,k-NearestNeighbor)分类算法
from sklearn.model_selection import cross_val_score # K折交叉验证模块
import matplotlib.pyplot as plt #可视化模块
#加载iris数据集
iris = load_iris()
X = iris.data
y = iris.target
# Model 基础验证法
################################################
#分割数据并
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)
#建立模型
knn = KNeighborsClassifier()
#训练模型
knn.fit(X_train, y_train)
#将准确率打印出
print(knn.score(X_test, y_test)) # 0.973684210526
# Model 交叉验证法(Cross Validation)
################################################
#使用K折交叉验证模块
scores = cross_val_score(knn, X, y, cv=5, scoring='accuracy')
#将5次的预测准确率打印出
print(scores) # [ 0.96666667 1. 0.93333333 0.96666667 1. ]
#将5次的预测准确平均率打印出
print(scores.mean()) # 0.973333333333
# 以准确率(accuracy)判断
################################################
# 一般来说准确率(accuracy)会用于判断分类(Classification)模型的好坏
#建立测试参数集
k_range = range(1, 31)
k_scores = []
#藉由迭代的方式来计算不同参数对模型的影响,并返回交叉验证后的平均准确率
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy')
k_scores.append(scores.mean())
#可视化数据
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
plt.show()
# 以平均方差(neg_mean_squared_error)判断
################################################
# 一般来说平均方差(neg_mean_squared_error)会用于判断回归(Regression)模型的好坏
k_range = range(1, 31)
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
loss = -cross_val_score(knn, X, y, cv=10, scoring='neg_mean_squared_error')
k_scores.append(loss.mean())
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated MSE')
plt.show()
|
993,324 | f5c64384b4ba5400ce3de3c2580fc6de5077b6b9 | from Queue import Queue
from Stack import Stack
class Node:
def __init__(self, val, weight = 1, dist = 1):
self.val = val
self.neighbours = []
self.weight = weight
self.dist = dist
class Graph:
def __init__(self, nodes = []):
self.nodes = nodes
def add_node(self, val, weight = 1):
new_node = Node(val, weight)
self.nodes.append(new_node)
def add_edge(self, node_u, node_v):
node_u.neighbours.append(node_v)
def BFS(self):
if len(self.nodes) == 0:
return []
root = self.nodes[0]
visited = set([root])
Q = Queue()
Q.add(root)
BfsResult = []
while Q.size() > 0:
QueueHead = Q.remove()
BfsResult.append(QueueHead)
for neighbour in QueueHead.neighbours:
if neighbour not in visited:
Q.add(neighbour)
visited.add(neighbour)
return BfsResult
def DFS(self):
if len(self.nodes) == 0:
return []
root = self.nodes[0]
visited = set([root])
S = Stack()
S.add(root)
DfsResult = []
while S.size() > 0:
StackTop = S.remove()
DfsResult.append(StackTop)
for neighbour in StackTop.neighbours:
if neighbour not in visited:
S.add(neighbour)
visited.add(neighbour)
return DfsResult |
993,325 | efad260eaf872b7458a492a49369d84c445b3730 | #!/usr/bin/python
# -*- coding: latin-1 -*-
from parserobjects import *
from lexer_rules import tokens
def p_root(subexpressions):
'h : tempo compasheader constlistinit voicelist'
subexpressions[0] = Root(subexpressions[1], subexpressions[2], subexpressions[3], subexpressions[4])
def p_root_no_const(subexpressions):
'h : tempo compasheader voicelist'
subexpressions[0] = Root(subexpressions[1], subexpressions[2], None, subexpressions[3])
def p_tempo(subexpression):
'tempo : TEMPOBEGIN SHAPE NUM'
subexpression[0] = Tempo(subexpression[2], int(subexpression[3]))
def p_compasheader(subexpression):
'compasheader : COMPASHEADERBEGIN NUM SLASH NUM'
subexpression[0] = CompasHeader(int(subexpression[2]), int(subexpression[4]))
def p_voice(subexpression):
'voice : VOICEBEGIN LEFTPAR value RIGHTPAR LEFTCURL voicecontent RIGHTCURL'
subexpression[0] = Voice(subexpression[3], subexpression[6])
def p_compasloop(subexpression):
'compasloop : LOOPBEGIN LEFTPAR value RIGHTPAR LEFTCURL compaslist RIGHTCURL'
subexpression[0] = CompasLoop(subexpression[3], subexpression[6])
def p_note(subexpression):
'note : NOTEBEGIN LEFTPAR NOTENAME COMMA value COMMA SHAPE RIGHTPAR SEMICOLON'
subexpression[0] = Note(subexpression[3], None, subexpression[5], subexpression[7], False)
def p_note_alter(subexpression):
'note : NOTEBEGIN LEFTPAR NOTENAME ALTER COMMA value COMMA SHAPE RIGHTPAR SEMICOLON'
subexpression[0] = Note(subexpression[3], subexpression[4], subexpression[6], subexpression[8], False)
def p_note_punto(subexpression):
'note : NOTEBEGIN LEFTPAR NOTENAME COMMA value COMMA SHAPE PUNTO RIGHTPAR SEMICOLON'
subexpression[0] = Note(subexpression[3], None, subexpression[5], subexpression[7], True)
def p_note_alter_punto(subexpression):
'note : NOTEBEGIN LEFTPAR NOTENAME ALTER COMMA value COMMA SHAPE PUNTO RIGHTPAR SEMICOLON'
subexpression[0] = Note(subexpression[3], subexpression[4], subexpression[6], subexpression[8], True)
def p_compaslist_base(subexpression):
'compaslist : compas'
subexpression[0] = CompasList(subexpression[1], [])
def p_compaslist_rec(subexpression):
'compaslist : compaslist compas'
subexpression[0] = CompasList(subexpression[2], subexpression[1].getList())
def p_voice_list_base(subexpression):
'voicelist : voice'
subexpression[0] = VoiceList(subexpression[1])
def p_voice_list_rec(subexpressions):
'voicelist : voicelist voice'
### Invierto parametros intencionalmente. voicelist param es opcional en el new de la clase
subexpressions[0] = VoiceList(subexpressions[2], subexpressions[1].getList())
def p_compas(subexpressions):
'compas : COMPASBEGIN LEFTCURL notelist RIGHTCURL'
subexpressions[0] = Compas(subexpressions[3].getNoteList())
def p_note_list_base_note(subexpression):
'notelist : note'
subexpression[0] = NoteList(subexpression[1], [])
def p_note_list_base_silence(subexpression):
'notelist : silence'
subexpression[0] = NoteList(subexpression[1], [])
def p_note_list_rec_note(subexpressions):
'notelist : notelist note'
### Invierto parametros intencionalmente. notelist param es opcional en el new de la clase
subexpressions[0] = NoteList(subexpressions[2], subexpressions[1].getNoteList())
def p_note_list_rec_silence(subexpressions):
'notelist : notelist silence'
### Invierto parametros intencionalmente. notelist param es opcional en el new de la clase
subexpressions[0] = NoteList(subexpressions[2], subexpressions[1].getNoteList())
def p_val_num(subexpression):
'value : NUM'
subexpression[0] = int(subexpression[1])
def p_val_cname(subexpression):
'value : CNAME'
subexpression[0] = ConstantManager.getInstance().getValue(subexpression[1])
def p_const(subexpressions):
'const : CONST CNAME EQUALS NUM SEMICOLON'
subexpressions[0] = Const(subexpressions[2],int(subexpressions[4]), False)
#Una constante que es un puntero a otra constante
def p_const_cname(subexpressions):
'const : CONST CNAME EQUALS CNAME SEMICOLON'
subexpressions[0] = Const(subexpressions[2],subexpressions[4], True)
#Es un pasamanos para poder inicializar el ConstantManager y que pueda ser usado por las otras producciones
#(esto asume que las constantes se declaran primero en el header)
def p_const_list_init(subexpressions):
'constlistinit : constlist'
subexpressions[0] = subexpressions[1]
#Sabemos que subexpressions[0] es un constlist, inicializamos el Constantmanager para que el resto
#de las producciones puedan referenciar constantes.
#TODO: Pasarle el reserved del Lexer
ConstantManager.createInstance (subexpressions[0].getList(),[] )
def p_const_list_base(subexpressions):
'constlist : const'
subexpressions[0] = ConstList(subexpressions[1],[])
def p_const_list_rec(subexpressions):
'constlist : constlist const'
subexpressions[0] = ConstList(subexpressions[2],subexpressions[1].getList())
def p_voice_content_base_loop(subexpressions):
'voicecontent : compasloop'
subexpressions[0] = VoiceContent(subexpressions[1],[])
def p_voice_content_base_compas(subexpressions):
'voicecontent : compas'
subexpressions[0] = VoiceContent(subexpressions[1],[])
def p_voice_content_rec_compasloop(subexpressions):
'voicecontent : voicecontent compasloop'
subexpressions[0] = VoiceContent(subexpressions[2],subexpressions[1].getList())
def p_voice_content_rec_compas(subexpressions):
'voicecontent : voicecontent compas'
subexpressions[0] = VoiceContent(subexpressions[2],subexpressions[1].getList())
def p_silence(subexpression):
'silence : SILENCEBEGIN LEFTPAR SHAPE RIGHTPAR SEMICOLON'
subexpression[0] = Silence(subexpression[3],None)
def p_silence_punto(subexpression):
'silence : SILENCEBEGIN LEFTPAR SHAPE PUNTO RIGHTPAR SEMICOLON'
subexpression[0] = Silence(subexpression[3],True)
def isReserved(token):
return token in ('const',
'do',
're',
'mi',
'fa',
'sol',
'la',
'si',
'tempo',
'compas',
'repetir',
'voz',
'negra',
'blanca',
'redonda',
'corchea',
'smicorchea',
'fusa',
'semifusa')
def p_error(subexpressions):
#print ("----------------------------")
#print ("----------------------------")
#print (subexpressions)
if (subexpressions != None):
if isReserved(subexpressions.value):
strReservedMsg = '(palabra reservada)'
else:
strReservedMsg = ''
raise Exception("[Parser] Error de sintaxis Linea: {0}, Pos (absoluta): {1}, Token: <{2}>{3} ".format(subexpressions.lineno, subexpressions.lexpos, subexpressions.value, strReservedMsg))
else:
raise Exception("[Parser] Archivo incompleto")
|
993,326 | 9628e37556722e1317a85cfe6824a2bddf781a63 | """alter timestamps
Revision ID: 874ed61bf8d0
Revises: cc757507e996
Create Date: 2018-02-12 02:07:53.798273
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '874ed61bf8d0'
down_revision = 'cc757507e996'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('accounts', 'created_at',
existing_type=postgresql.TIMESTAMP(),
type_=sa.TIMESTAMP(timezone=True),
existing_nullable=False,
existing_server_default=sa.text('now()'))
op.alter_column('accounts', 'updated_at',
existing_type=postgresql.TIMESTAMP(),
type_=sa.TIMESTAMP(timezone=True),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('accounts', 'updated_at',
existing_type=sa.TIMESTAMP(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('accounts', 'created_at',
existing_type=sa.TIMESTAMP(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False,
existing_server_default=sa.text('now()'))
# ### end Alembic commands ###
|
993,327 | f048f431dd8114284a5f5081651420ba0e658ea5 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-02-25 14:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cate', '0002_auto_20190224_1938'),
]
operations = [
migrations.AlterModelOptions(
name='cates',
options={'verbose_name': '案例栏目', 'verbose_name_plural': '案例栏目'},
),
]
|
993,328 | f1bf3c523356c0dba05c3457e0aadb035477792a | from django import forms
class RegistrationForm(forms.Form):
firstname = forms.CharField(
label = "Enter Your First Name",
widget = forms.TextInput(
attrs = {
'class':'form-control',
'placeholder':'Your First Name'
}
)
)
lastname=forms.CharField(
label="Enter Your Last Name",
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Your Last Name'
}
)
)
username=forms.CharField(
label="Enter Your User Name",
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Your User Name'
}
)
)
password=forms.CharField(
label="Enter Your Password",
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Your Password'
}
)
)
mobile=forms.IntegerField(
label="Enter Your Mobile Number",
widget=forms.NumberInput(
attrs={
'class': 'form-control',
'placeholder': 'Your Mobile Number'
}
)
)
email=forms.EmailField(
label="Enter Your Email Id",
widget=forms.EmailInput(
attrs={
'class': 'form-control',
'placeholder': 'Your Email Id'
}
)
)
GENDER_CHOICES = (
('Male','MALE'),
('Female','FEMALE')
)
gender = forms.ChoiceField(
widget=forms.RadioSelect(),
choices=GENDER_CHOICES,
label="Selact Your Gender"
)
y = range(1960,2020)
date_of_birth = forms.DateField(
widget=forms.SelectDateWidget(years=y),
label="Enter Your Date of Birth"
)
class LoginForm(forms.Form):
username = forms.CharField(
label="Enter Your User Name",
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Your User Name'
}
)
)
password = forms.CharField(
label="Enter Your Password",
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Your Password'
}
)
) |
993,329 | b8dc5facc5253b3b22bfd598990ea0b2ce479412 | from typing import Literal
TextDecorationThickness = Literal[
'auto',
'from-font',
'0',
'1',
'2',
'4',
'8',
]
|
993,330 | 51c26fe0f7f1875ed0843295f955cfc8a920f99d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from logya import __version__
from logya.create import Create
from logya.generate import Generate
from logya.serve import Serve
def create(args):
Create(args.name, site=args.site)
def generate(args):
Generate(verbose=args.verbose, dir_site=args.dir_site, keep=args.keep)
def serve(args):
Serve(host=args.host, port=args.port)
def main():
parser = argparse.ArgumentParser(
description='Logya a static Web site generator.')
parser.add_argument(
'--version', action='version', version=__version__)
parser.add_argument(
'--verbose', action='store_true', default=False, help='print messages')
subparsers = parser.add_subparsers()
# create a basic site with the given name
p_create = subparsers.add_parser(
'create', help='Create a starter Web site in the specified directory.')
p_create.add_argument('name', help='name of the directory to create.')
p_create.set_defaults(func=create)
p_create.add_argument('--site', default='starter', help='Name one of the available sites.')
# generate a site for deployment, generate and gen sub commands do the same
hlp = 'Generate Web site to deploy from current directory.'
hlp_dir_site = ('Path to Web site directory, absolute or relative to '
'current working directory.')
hlp_keep = ('Keep existing deply directory, by default it is removed.')
for command in ['generate', 'gen']:
p_gen = subparsers.add_parser(command, help=hlp)
p_gen.set_defaults(func=generate)
p_gen.add_argument('--dir_site', help=hlp_dir_site)
p_gen.add_argument('--keep', action='store_true', default=False, help=hlp_keep)
# serve static pages
p_serve = subparsers.add_parser(
'serve', help='Serve static pages from deploy directory.')
p_serve.set_defaults(func=serve)
p_serve.add_argument('--port', type=int, help='server port to listen')
p_serve.add_argument('--host', help='server host name or IP')
args = parser.parse_args()
if getattr(args, 'func', None):
args.func(args)
if __name__ == '__main__':
main()
|
993,331 | 7d71b9a8acf8bdc58bacf9aece1f5d3e886463a4 | import os,re,pdb
from pprint import pprint
## Get the repos
path='/var/lib/apt/lists/'
files=os.listdir(path)
release_files=[file for file in files if file.endswith('Release')]
origin_pattern=re.compile('Origin: (.*)\n')
suite_pattern=re.compile('Suite: (.*)\n')
regex_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
skipped_release_files=[]
repos_to_add=[]
for release_file in release_files:
with open(path+release_file, 'r') as f:
read_data = f.read()
# parse to get origin and suite
origin_string=re.findall(origin_pattern,read_data)
suite_string=re.findall(suite_pattern,read_data)
try:
repo="\"%s:%s\";" %(origin_string[0].replace(',',r'\,'),
suite_string[0].replace(',',r'\,'))
if re.match(regex_url,origin_string[0]):
skipped_release_files.append(release_file)
else:
repos_to_add.append(repo)
except IndexError:
skipped_release_files.append(release_file)
## Checking if repos_to_add not already present in /etc/apt/apt.conf.d/50unattended-upgrades
with open('/etc/apt/apt.conf.d/50unattended-upgrades','r') as f:
read_data=f.read()
# get everything before first };
raw_data=re.findall('[.\s\S]*};',read_data)
repos_already_present=re.findall('".*:.*";',raw_data[0])
repos_to_add=[repo for repo in repos_to_add if repo not in repos_already_present]
print ("Add repos:")
print ('\n'.join(repos_to_add))
print ("\nSkipping files due to not present origin or suite. Or origin being a url.:")
print ('\n'.join(skipped_release_files))
|
993,332 | 2eb09cf0efc3961e0b1b7ea6e7818ef72f7f5abe | from ttt_board import Cell_Value, TTT_Board
import numpy as np
class Person:
"""Docstring for Person. """
def __init__(self, player_symbol):
"""TODO: to be defined1. """
self.player_symbol= player_symbol
def play_turn(self, board):
"""TODO: Docstring for play_turn.
:board: TODO
:returns: TODO
"""
x_cord = input("X cord: ")
y_cord = input("Y cord: ")
return (int(x_cord), int(y_cord))
class AI(Person):
"""Docstring for AI. """
MAX_EVAL = 100
MIN_EVAL = -100
def __init__(self, player_symbol):
"""TODO: to be defined1. """
super(AI, self).__init__( player_symbol )
def eval(self, board):
"""TODO: Docstring for eval.
:board: TODO
:returns: TODO
"""
mask = np.array([[3, 2, 3], [2, 3, 2],[3, 2, 3]])
return np.sum( board.to_num_arr() * mask )
def play_turn(self, board):
"""TODO: Docstring for play_turn.
:board: TODO
:returns: ( x cord, y cord )
"""
rate, move = self.minimax(board, True)
return move
def minimax(self, board, max_player=True, max_depth=3 ):
"""TODO: Docstring for minimax.
:board: TODO
:returns: TODO
"""
moves = board.get_moves()
if max_depth == None:
max_depth = len(moves)
# Game is over if no moves (cat's game) or already won
if max_depth < 0 or moves == [] or board.check_win() != Cell_Value.UNCLAIMED:
if board.check_win() == self.player_symbol:
return AI.MAX_EVAL * (9-max_depth), (-1,-1)
elif board.check_win() == Cell_Value.UNCLAIMED:
return self.eval(board) * (9-max_depth), (-1,-1)
else:
return AI.MIN_EVAL * (9-max_depth), (-1,-1)
possible_moves = board.get_moves()
best_move = None
if max_player:
curr_eval = AI.MIN_EVAL
for move in possible_moves:
test_board = TTT_Board(board)
test_board.state[move] = Cell_Value.PLAYER_2
move_eval, enemy_move = self.minimax( test_board, False, max_depth-1)
curr_eval = max( curr_eval, move_eval)
if curr_eval == move_eval:
best_move = move
else: # Minimizing player
curr_eval = AI.MAX_EVAL
for move in possible_moves:
test_board = TTT_Board(board)
test_board.state[move] = Cell_Value.PLAYER_1
move_eval , enemy_move = self.minimax( test_board, True, max_depth-1)
curr_eval = min( curr_eval, move_eval)
if curr_eval == move_eval:
best_move = move
return curr_eval, best_move
|
993,333 | 602e7d3f1086d131b3b3e03859e5fc4809f6690e | # Copyright CEA/DAM/DIF (2010)
# Contributors:
# Stephane THIELL <stephane.thiell@cea.fr>
# Aurelien DEGREMONT <aurelien.degremont@cea.fr>
#
# This file is part of the ClusterShell library.
#
# This software is governed by the CeCILL-C license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-C
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-C license and that you accept its terms.
#
# $Id: NodeUtils.py 509 2011-06-07 23:13:52Z st-cea $
"""
Cluster nodes utility module
The NodeUtils module is a ClusterShell helper module that provides
supplementary services to manage nodes in a cluster. It is primarily
designed to enhance the NodeSet module providing some binding support
to external node groups sources in separate namespaces (example of
group sources are: files, jobs scheduler, custom scripts, etc.).
"""
import sys
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
from string import Template
from subprocess import Popen, PIPE
class GroupSourceException(Exception):
"""Base GroupSource exception"""
def __init__(self, message, group_source):
Exception.__init__(self, message)
self.group_source = group_source
class GroupSourceNoUpcall(GroupSourceException):
"""Raised when upcall is not available"""
class GroupSourceQueryFailed(GroupSourceException):
"""Raised when a query failed (eg. no group found)"""
class GroupResolverError(Exception):
"""Base GroupResolver error"""
class GroupResolverSourceError(GroupResolverError):
"""Raised when upcall is not available"""
class GroupResolverConfigError(GroupResolverError):
"""Raised when a configuration error is encountered"""
class GroupSource(object):
"""
GroupSource class managing external calls for nodegroup support.
"""
def __init__(self, name, map_upcall, all_upcall=None,
list_upcall=None, reverse_upcall=None):
self.name = name
self.verbosity = 0
# Cache upcall data
self._cache_map = {}
self._cache_list = []
self._cache_all = None
self._cache_reverse = {}
# Supported external upcalls
self.map_upcall = map_upcall
self.all_upcall = all_upcall
self.list_upcall = list_upcall
self.reverse_upcall = reverse_upcall
def _verbose_print(self, msg):
if self.verbosity > 0:
print >> sys.stderr, "%s<%s> %s" % \
(self.__class__.__name__, self.name, msg)
def _upcall_read(self, cmdtpl, vars=dict()):
"""
Invoke the specified upcall command, raise an Exception if
something goes wrong and return the command output otherwise.
"""
cmdline = Template(getattr(self, "%s_upcall" % \
cmdtpl)).safe_substitute(vars)
self._verbose_print("EXEC '%s'" % cmdline)
proc = Popen(cmdline, stdout=PIPE, shell=True)
output = proc.communicate()[0].strip()
self._verbose_print("READ '%s'" % output)
if proc.returncode != 0:
self._verbose_print("ERROR '%s' returned %d" % (cmdline, \
proc.returncode))
raise GroupSourceQueryFailed(cmdline, self)
return output
def resolv_map(self, group):
"""
Get nodes from group 'group', using the cached value if
available.
"""
if group not in self._cache_map:
self._cache_map[group] = self._upcall_read('map', dict(GROUP=group))
return self._cache_map[group]
def resolv_list(self):
"""
Return a list of all group names for this group source, using
the cached value if available.
"""
if not self.list_upcall:
raise GroupSourceNoUpcall("list", self)
if not self._cache_list:
self._cache_list = self._upcall_read('list')
return self._cache_list
def resolv_all(self):
"""
Return the content of special group ALL, using the cached value
if available.
"""
if not self.all_upcall:
raise GroupSourceNoUpcall("all", self)
if not self._cache_all:
self._cache_all = self._upcall_read('all')
return self._cache_all
def resolv_reverse(self, node):
"""
Return the group name matching the provided node, using the
cached value if available.
"""
if not self.reverse_upcall:
raise GroupSourceNoUpcall("reverse", self)
if node not in self._cache_reverse:
self._cache_reverse[node] = self._upcall_read('reverse', \
dict(NODE=node))
return self._cache_reverse[node]
class GroupResolver(object):
"""
Base class GroupResolver that aims to provide node/group resolution
from multiple GroupSource's.
"""
def __init__(self, default_source=None):
"""
Initialize GroupResolver object.
"""
self._sources = {}
self._default_source = default_source
if default_source:
self._sources[default_source.name] = default_source
def set_verbosity(self, value):
"""
Set debugging verbosity value.
"""
for source in self._sources.itervalues():
source.verbosity = value
def add_source(self, group_source):
"""
Add a GroupSource to this resolver.
"""
if group_source.name in self._sources:
raise ValueError("GroupSource '%s': name collision" % \
group_source.name)
self._sources[group_source.name] = group_source
def sources(self):
"""
Get the list of all resolver source names.
"""
return self._sources.keys()
def _list(self, source, what, *args):
"""Helper method that returns a list of result when the source
is defined."""
result = []
assert source
raw = getattr(source, 'resolv_%s' % what)(*args)
for line in raw.splitlines():
map(result.append, line.strip().split())
return result
def _source(self, namespace):
"""Helper method that returns the source by namespace name."""
if not namespace:
source = self._default_source
else:
source = self._sources.get(namespace)
if not source:
raise GroupResolverSourceError(namespace or "<default>")
return source
def group_nodes(self, group, namespace=None):
"""
Find nodes for specified group name and optional namespace.
"""
source = self._source(namespace)
return self._list(source, 'map', group)
def all_nodes(self, namespace=None):
"""
Find all nodes. You may specify an optional namespace.
"""
source = self._source(namespace)
return self._list(source, 'all')
def grouplist(self, namespace=None):
"""
Get full group list. You may specify an optional
namespace.
"""
source = self._source(namespace)
return self._list(source, 'list')
def has_node_groups(self, namespace=None):
"""
Return whether finding group list for a specified node is
supported by the resolver (in optional namespace).
"""
try:
return bool(self._source(namespace).reverse_upcall)
except GroupResolverSourceError:
return False
def node_groups(self, node, namespace=None):
"""
Find group list for specified node and optional namespace.
"""
source = self._source(namespace)
return self._list(source, 'reverse', node)
class GroupResolverConfig(GroupResolver):
"""
GroupResolver class that is able to automatically setup its
GroupSource's from a configuration file. This is the default
resolver for NodeSet.
"""
def __init__(self, configfile):
"""
"""
GroupResolver.__init__(self)
self.default_sourcename = None
self.config = ConfigParser()
self.config.read(configfile)
# Get config file sections
group_sections = self.config.sections()
if 'Main' in group_sections:
group_sections.remove('Main')
if not group_sections:
return
try:
self.default_sourcename = self.config.get('Main', 'default')
if self.default_sourcename and self.default_sourcename \
not in group_sections:
raise GroupResolverConfigError( \
"Default group source not found: \"%s\"" % \
self.default_sourcename)
except (NoSectionError, NoOptionError):
pass
# When not specified, select a random section.
if not self.default_sourcename:
self.default_sourcename = group_sections[0]
try:
for section in group_sections:
map_upcall = self.config.get(section, 'map', True)
all_upcall = list_upcall = reverse_upcall = None
if self.config.has_option(section, 'all'):
all_upcall = self.config.get(section, 'all', True)
if self.config.has_option(section, 'list'):
list_upcall = self.config.get(section, 'list', True)
if self.config.has_option(section, 'reverse'):
reverse_upcall = self.config.get(section, 'reverse', True)
self.add_source(GroupSource(section, map_upcall, all_upcall,
list_upcall, reverse_upcall))
except (NoSectionError, NoOptionError), e:
raise GroupResolverConfigError(str(e))
def _source(self, namespace):
return GroupResolver._source(self, namespace or self.default_sourcename)
def sources(self):
"""
Get the list of all resolver source names (default source is always
first).
"""
srcs = GroupResolver.sources(self)
if srcs:
srcs.remove(self.default_sourcename)
srcs.insert(0, self.default_sourcename)
return srcs
|
993,334 | 740fe308f2c6787ab4301f26c8979c4fa7542ead | def solution(p,v):
F=lambda v,s:F(s,v%s)if s else v
l=len(v)+1
s=[-1]*l
for i in range(l-2):
f=F(v[i],v[i+1])
if f!=v[i]:
s[i+1]=f
for j in range(i+2,l):s[j]=v[j-1]//s[j-1]
for j in range(i,-1,-1):s[j]=v[j]//s[j+1]
return''.join(chr(sorted(set(s)).index(x)+65)for x in s) |
993,335 | 2d1722e76981ec16754ba849ef56590b4323d872 | class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
if not obstacleGrid:
return 0
m, n = len(obstacleGrid), len(obstacleGrid[0])
dp = [[0 for _ in range(n)]] + obstacleGrid
dp = [[0] + row for row in dp]
dp[0][1] = 1
for i in range(1, m + 1):
for j in range(1, n + 1):
if dp[i][j] == 1:
dp[i][j] = 0
else:
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[m][n]
|
993,336 | 1427dd5a6abb01aeb5d3f3ea67be7f08ea66145b | s1=str(input())
s2=str(input())
n=len(s1)
m=len(s2)
a=[0]*n
b=[0]*m
for i in range(0,n-2):
if s1[i]=='1':
a[i]=a[i]+1
a[i+1]+=a[i]
a[i+2]+=a[i];
if(s1[n-2]=='1'):
a[n-2]+=1
if(s1[n-1]=='1'):
a[n-1]+=1
for i in range(0,m-2):
if s2[i]=='1':
++b[i];
b[i+1]+=b[i]
b[i+2]+=b[i];
if(s2[m-2]=='1'):
b[m-2]+=1
if(s2[m-1]=='1'):
b[m-1]+=1
t1=5*pow(a[n-2]-b[m-2],2)
t2=pow(2*(b[m-1]-a[n-1])-(a[n-2]-b[m-2]),2)
if(t1<t2):
print("<")
if(t1==t2):
print("=")
if(t1>t2):
print(">")
|
993,337 | 10402fd87159dce0c8e927eb897cbfdcafcd2211 | from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect
#from django.http import HttpResponse
from .forms import SignInForm
def index(request):
if request.user.is_authenticated:
# if user already authenticated - redirect to main page
return HttpResponseRedirect('/main/')
else:
# Do something for anonymous users.
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = SignInForm(request.POST)
username = request.POST['username']
password = request.POST['password']
account = authenticate(request, username=username, password=password)
if account is not None:
login(request, account)
# Redirect to a success page.
return HttpResponseRedirect('/main/')
else:
# Return an 'invalid login' error message.
return HttpResponseRedirect('/signin/')
# if a GET (or any other method, or 1st load) we'll create a blank form
else:
form = SignInForm()
return render(request, 'signin/index.html', {'form': form}) |
993,338 | 77d704a91fc41517f4727181d9db97157477e3b0 | import pytest
from Queue.queue import queue_time
def test_queue_basic():
result = queue_time([10], 4)
assert result == 10
def test_queue_basic2():
result = queue_time([], 4)
assert result == 0
def test_queue1():
result = queue_time([10, 2, 3, 3], 2)
assert result == 10
def test_queue2():
result = queue_time([5, 3, 4], 1)
assert result == 12
def test_queue3():
result = queue_time([2, 3, 10], 2)
assert result == 12
def test_queue4():
result = queue_time([0, 0, 0], 7)
assert result == 0
|
993,339 | 3f3d98efaff74303a9970d96ef15a265c383e140 | #!/bin/python3
from flask import Flask,render_template,flash, redirect,url_for,session,logging,request
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Message, Mail
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
import datetime
from validate_email import validate_email
app = Flask(__name__)
app.config.from_pyfile('config.cfg')
db = SQLAlchemy(app)
mail = Mail(app)
s = URLSafeTimedSerializer(app.config['SECRET_KEY'])
class Contestant(db.Model):
id = db.Column(db.Integer, primary_key=True)
gender = db.Column(db.String(10))
first_name = db.Column(db.String(20))
last_name = db.Column(db.String(80))
email = db.Column(db.String(80), unique=True)
year_of_birth = db.Column(db.Integer)
telephone = db.Column(db.String(25))
club = db.Column(db.String(50))
contest = db.Column(db.String(25))
confirmation = db.Column(db.String())
time = db.Column(db.DateTime(), default=datetime.datetime.now())
active = db.Column(db.Boolean(), default=False)
ip = db.Column(db.String(16))
est_swim_time = db.Column(db.String(5))
@app.route('/')
def index():
nor = len(Contestant.query.filter_by(active=True).all())
return render_template("index.html", nor=nor)
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
email = request.form['email']
token = s.dumps(email, salt='email-confirm')
validation = validate_email(email, check_mx=True, debug=True)
if validation == False:
return '''<h1 style="text-align: center;">Email ist ungültig</h1>
<script>window.setTimeout(function(){window.history.back();}, 3000);</script>'''
if bool(Contestant.query.filter_by(email=email).first()):
return '''<h1 style="text-align: center;">Email bereits angemeldet</h1>
<script>window.setTimeout(function(){window.history.back();}, 3000);</script>'''
new_contestant = Contestant(first_name = request.form['firstname'], last_name = request.form['lastname'],
email = request.form['email'], telephone = request.form['telephone'],
year_of_birth = request.form['yob'], contest = "Eintracht", #request.form['contest'],
confirmation=token,
gender = request.form['gender'],club = request.form['club'], ip = request.environ['REMOTE_ADDR'],
est_swim_time="{:02d}:{:02d}".format(int(request.form['minutes']), int(request.form['seconds'])))
msg = Message('Email confirmation', sender='swimandrun-hannover@gmx.de', recipients=[email])
link = url_for('confirm_email', token=token, _external=True)
msg.body = 'Please confirm your registration on this link: {}'.format(link)
mail.send(msg)
db.session.add(new_contestant)
db.session.commit()
return redirect(url_for("send_email"))
return render_template("signup.html")
@app.route('/dashboard', methods=['GET', 'POST'])
def list_all():
query = db.session.query_property()
contestants = Contestant.query.filter_by(active=True).order_by(Contestant.last_name).all()
return render_template("dashboard.html", contestants=contestants)
@app.route('/confirm_email/<token>')
def confirm_email(token):
try:
email = s.loads(token, salt='email-confirm', max_age=79200)
except SignatureExpired:
return '<h1>Confirmation time has expired</h1>'
update = Contestant.query.filter_by(confirmation=token).first()
update.active = True
db.session.commit()
return redirect(url_for("confirmation"))
@app.route('/confirmation')
def confirmation():
return render_template("confirmation.html")
@app.route('/email_sent')
def send_email():
return render_template("email_sent.html")
@app.route('/infos')
def infos():
return render_template("infos.html")
if __name__ == "__main__":
db.create_all()
app.run(debug=True,host='0.0.0.0',port=80)
|
993,340 | e4a2ff1da9c79804bd60f0a5489d70459a6c173a | '''
1. Реализовать функцию, принимающую два числа (позиционные аргументы) и
выполняющую их деление. Числа запрашивать у пользователя, предусмотреть
обработку ситуации деления на ноль.
'''
def div(arg, arg2):
if arg2 != 0:
return arg / arg2
else:
print("Неправильное число, деление на ноль")
arg = int(input("Введите число делимое:"))
arg2 = int(input("Введите число делитель:"))
print(f'Результат деления: {div(arg, arg2)}')
'''
2. Реализовать функцию, принимающую несколько параметров, описывающих данные
пользователя: имя, фамилия, год рождения, город проживания, email, телефон.
Функция должна принимать параметры как именованные аргументы.
Реализовать вывод данных о пользователе одной строкой.
'''
def my_func(name, last_name, year, city, email, telephone):
return ' '.join([name, last_name, year, city, email, telephone])
print(my_func(last_name='Fedorovski', name='Vladimir', year='1998',
city='Belgorod', email='trep@mail.ru',
telephone='8-903-300-99-87'))
'''
3. Реализовать функцию my_func(), которая принимает три позиционных аргумента,
и возвращает сумму наибольших двух аргументов.
'''
def my_func(x, y, z):
res = sorted([x, y, z])
sum = res[1] + res[2]
return sum
x = 4
y = 10
z = 1
print(f"Сумма двух наибольших аргументов: {my_func(x, y, z)}")
'''
4. Программа принимает действительное положительное число x и целое
отрицательное число y. Необходимо выполнить возведение числа x в степень y.
Задание необходимо реализовать в виде функции my_func(x, y).
При решении задания необходимо обойтись без встроенной функции возведения числа
в степень.
'''
def my_func(x, y):
return x ** y
def my_func1(x, y):
res = 1 / x
for i in range(abs(y) - 1):
res = res * 1 / x
return res
x = 2
y = -6
print(f"Результат возведения в степень первый метод: {my_func(x, y)}")
print(f"Результат возведения в степень второй метод: {my_func1(x, y)}")
'''
5. Программа запрашивает у пользователя строку чисел, разделенных пробелом.
При нажатии Enter должна выводиться сумма чисел. Пользователь может продолжить
ввод чисел, разделенных пробелом и снова нажать Enter. Сумма вновь введенных
чисел будет добавляться к уже подсчитанной сумме. Но если вместо числа вводится
специальный символ, выполнение программы завершается. Если специальный символ
введен после нескольких чисел, то вначале нужно добавить сумму этих чисел
к полученной ранее сумме и после этого завершить программу.
'''
def sum_item():
result_all = 0
check = False
while check == False:
number = input('Введите числа через пробел или нажмите Q для выхода - ') \
.split()
result = 0
for item in range(len(number)):
if number[item] == 'q' or number[item] == 'Q':
check = True
break
else:
result += int(number[item])
print(f'Текущая сумма: {result}')
result_all += result
print(f'Конечный результат: {result_all}')
sum_item()
'''
Реализовать функцию int_func(), принимающую слово из маленьких латинских букв и
возвращающую его же, но с прописной первой буквой.
Например, print(int_func(‘text’)) -> Text.
Продолжить работу над заданием. В программу должна попадать строка из слов,
разделенных пробелом. Каждое слово состоит из латинских букв в нижнем регистре.
Сделать вывод исходной строки, но каждое слово должно начинаться с заглавной
буквы. Необходимо использовать написанную ранее функцию int_func().
'''
def int_func():
word = input("Введите слова: ").title()
return print(word)
int_func() |
993,341 | 2e662a0739198af528ee8e30ae8789e9fdb097dc | # https://binarysearch.com/problems/Pascal's-Triangle/submissions/4441799
class Solution:
def solve(self, n):
pascal = [[1], [1,1]]
i, temp = 2, []
while i <= n:
pascal.append(self.calc(pascal[i-1]))
i += 1
return pascal[n]
def calc(self, lst):
res = [1]
for i in range(len(lst)-1):
res.append(lst[i]+lst[i+1])
res.append(1)
return res
|
993,342 | 491540480e8d306dabbe6534b1c1abea0520100d | import numpy as np
from scipy import linalg
import pyHPC
from itertools import izip as zip
def lu(matrix):
"""
Compute LU decompostion of a matrix.
Parameters
----------
a : array, shape (M, M)
Array to decompose
Returns
-------
p : array, shape (M, M)
Permutation matrix
l : array, shape (M, M)
Lower triangular or trapezoidal matrix with unit diagonal.
u : array, shape (M, M)
Upper triangular or trapezoidal matrix
"""
SIZE = matrix.shape[0]
BS = np.BLOCKSIZE
if matrix.shape[0] != matrix.shape[0]:
raise Exception("LU only supports squared matricis")
if not matrix.dist():
raise Exception("The matrix is not distributed")
if(SIZE % np.BLOCKSIZE != 0):
raise Exception("The matrix dimensions must be divisible "\
"with np.BLOCKSIZE(%d)"%np.BLOCKSIZE)
(prow,pcol) = matrix.pgrid()
A = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True);A += matrix
L = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)
U = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)
tmpL = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)
tmpU = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)
for k in xrange(0,SIZE,BS):
bs = min(BS,SIZE - k) #Current block size
kb = k / BS # k as block index
#Compute vertical multiplier
slice = ((kb,kb+1),(kb,kb+1))
for a,l,u in zip(A.blocks(slice), L.blocks(slice), U.blocks(slice)):
(p,tl,tu) = linalg.lu(a)
if not (np.diag(p) == 1).all():#We do not support pivoting
raise Exception("Pivoting was needed!")
#There seems to be a transpose bug in SciPy's LU
l[:] = tl.T
u[:] = tu.T
#Replicate diagonal block horizontal and vertical
for tk in xrange(k+bs,SIZE,BS):
tbs = min(BS,SIZE - tk) #Current block size
L[tk:tk+tbs,k:k+bs] = U[k:k+tbs,k:k+bs]
U[k:k+bs,tk:tk+tbs] = L[k:k+bs,k:k+tbs]
if k+bs < SIZE:
#Compute horizontal multiplier
slice = ((kb,kb+1),(kb+1,SIZE/BS))
for a,u in zip(A.blocks(slice), U.blocks(slice)):
u[:] = np.linalg.solve(u.T,a.T).T
#Compute vertical multiplier
slice = ((kb+1,SIZE/BS),(kb,kb+1))
for a,l in zip(A.blocks(slice), L.blocks(slice)):
l[:] = np.linalg.solve(l,a)
#Apply to remaining submatrix
A -= pyHPC.summa(L[:,:k+bs],U[:k+bs,:], ao=(k+bs,k),
bo=(k,k+bs), co=(k+bs,k+bs))
return (L, U)
|
993,343 | 6fe9192cddc19e1e618c7ad4c50640767425097b | import streamlit as st
import pickle
import nltk
import string
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
def cleanupText(message):
message = message.translate(str.maketrans('','',string.punctuation)) # remove basic puncutation
words = [stemmer.stem(word) for word in message.split() if word.lower() not in stopwords.words('english')]
return " ".join(words)
def load_model(path ='models/clf.pk'):
with open(path,'rb') as f:
return pickle.load(f)
st.title('Message Spam detection')
with st.spinner('loading AI model'):
model = load_model()
vectorizer = load_model('models/tfidfvec.pk')
st.success("models loaded into memory")
message = st.text_area('enter your sms text',value='hi there')
btn = st.button('submit to analyse')
if btn:
stemmer = SnowballStemmer('english')
clean_msg = cleanupText(message)
data = vectorizer.transform([clean_msg])
data = data.toarray()
prediction = model.predict(data)
st.title("our prediction")
if prediction[0] == 0:
st.header('Normal message')
elif prediction[0] == 1:
st.header("Spam message")
else:
st.error("something fishy") |
993,344 | 149b2e43fa8817396ff95a41a3751c1f6fba0395 | import json
from django.core import serializers
from .models import *
from datetime import timedelta, date, datetime
def custom_json_converter(o):
if isinstance(o, date):
return o.__str__()
elif isinstance(o, datetime):
return o.__str__()
def get_rank_record(domain_id, user_id, unique=False):
domain = Domain.objects.get(id=domain_id, user_id=user_id)
final_data = []
if domain:
configs = Config.objects.filter(domain_id=domain_id)
config_ids = [config.id for config in configs]
ranks = Rank.objects.filter(config_id__in=config_ids).order_by('-executed_ts')
if unique:
final_data = process_rank_record(ranks, domain)
return final_data
for rank in ranks:
rank_obj = json.loads(serializers.serialize("json", [rank]))
row = {}
row.update(rank_obj[0].get("fields"))
row.update({
"executed_ts": str(rank.executed_ts),
"keyword": rank.config.keyword,
"domain_name": domain.domain
})
final_data.append(row)
return final_data
def process_rank_record(ranks, domain):
config_ranks = {}
for rank in ranks:
if rank.config_id not in config_ranks.keys():
rank_obj = json.loads(serializers.serialize("json", [rank]))
row = {}
row.update(rank_obj[0].get("fields"))
row.update({
"executed_ts": rank.executed_ts,
"keyword": rank.config.keyword,
"domain_name": domain.domain
})
config_ranks[rank.config_id] = row
else:
target_row = config_ranks.get(rank.config_id, {})
executed_ts = target_row.get("executed_ts")
one_day_before = executed_ts - timedelta(days=1)
seven_day_before = executed_ts - timedelta(days=7)
thirty_day_before = executed_ts - timedelta(days=30)
if rank.executed_ts == one_day_before:
target_row["day1"] = rank.page_rank-target_row.get("page_rank")
elif rank.executed_ts == seven_day_before:
target_row["day7"] = rank.page_rank-target_row.get("page_rank")
elif rank.executed_ts == thirty_day_before:
target_row["day30"] = rank.page_rank-target_row.get("page_rank")
return config_ranks
def get_keyword_rank_record(config_id, domain_name):
last_month = datetime.today() - timedelta(days=30)
ranks = Rank.objects.filter(config_id=config_id, executed_ts__gte=last_month).order_by('-executed_ts')
final_data = []
for rank in ranks:
row = {"Domain": domain_name, "Page Rank": rank.page_rank, "Date Added": rank.executed_ts, "Keyword": rank.config.keyword}
final_data.append(row)
return final_data |
993,345 | 2e2923a21ea334e4fc6d877a1a2aa66de5be160c | from json import dumps
from os.path import dirname
pwd = dirname(__file__)
with open(pwd + "/Dockerfile", "r") as f:
dockerfile = ''.join(line for line in f.readlines())
with open(pwd + "/script", "r") as f:
script = ''.join(line for line in f.readlines())
print(dumps(
{
"id": 1,
"testID": 1,
"dockerfile": dockerfile,
"script": script,
"environmentVariables": "a=b"
}
))
|
993,346 | 9e2d988efa25be96b01ae864dfb9699562271abe | # Functions goes here
# string checker function
def string_checker(question, to_check):
valid = False
while not valid:
response = input(question).lower()
for item in to_check:
if response == item:
return response
elif response == item[0]:
return item
print("Available list of shapes: "
"* square",
"* rectangle",
"* triangle",
"* parallelogram",
"* circle",
"* trapezium")
# number checking function
def num_check(question):
error = "It should contain a number more than zero."
valid = False
while not valid:
response = (input(question))
if response.lower() == "xxx":
return "xxx"
else:
try:
if float(response) <= 0:
print(error)
else:
return float(response)
except ValueError:
print(error)
# unit checking function
def unit_checker():
unit_to_check = input("Unit? ")
# Abbreviation listssnip
centimeters = ["cm", "centimeters"]
metres = ["m", "metres"]
millimeters = ["mm", "millimeters"]
if unit_to_check == "":
print("you chose {}".format(unit_to_check))
return unit_to_check
elif unit_to_check == "cm" or unit_to_check.lower() in centimeters:
return "cm"
elif unit_to_check.lower() in metres:
return "m"
elif unit_to_check.lower() in millimeters:
return "mm"
else:
return unit_to_check
shape_answer = []
# *** Main Routine starts here ***
keep_going = ""
while keep_going == "":
available_shapes = ["square", "rectangle", "triangle", "parallelogram", "circle", "trapezium"]
calculator_1 = ["area", "perimeter", "area and perimeter"]
# Asks user to choose what shape they want to work out
ask_user = string_checker("Choose a shape to work out:", available_shapes)
print(ask_user)
summary_1 = []
unit_central = {
"cm": 1,
"m": 100,
"mm": 0.1
}
# If shape is chosen
if ask_user == "square":
print("*** Square Area / Perimeter ***")
# Ask user length of square
square_length = num_check("What is the length: ")
unit = unit_checker()
# makes sqaure_length a float
square_length = float(square_length)
# formula for area and perimeter of square
area = square_length * square_length
perimeter = square_length * 4
# prints length to console
print("The length is {} {}".format(square_length, unit))
# prints area and perimeter of square
print("The area of the square is {} {} squared".format(area, unit))
print("The perimeter of the square is {} {}".format(perimeter, unit))
# brief summary of information given in order for output
shape_name = "Shape : {}".format(ask_user)
display_dimensions_1 = "Area (Dimensions): {} {} x {} {}".format(square_length, unit, square_length, unit)
display_dimensions_2 = "Perimeter (Dimensions): {} {} x 4".format(square_length, unit)
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Perimeter: {} {}".format(perimeter, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
print("*** Square Area / Perimeter ***")
if ask_user == "rectangle":
print("*** Rectangle Area / Perimeter ***")
# Ask user for length and width of rectangle
rectangle_length = num_check("What is the length:")
rectangle_width = num_check("What is the width:")
unit = unit_checker()
# takes the decimal out and turns it into integer
rectangle_length = int(rectangle_length)
rectangle_width = int(rectangle_width)
# works out area and perimeter of rectangle
area = rectangle_width * rectangle_length
perimeter = rectangle_length + rectangle_width + rectangle_length + rectangle_width
# displays length and width of rectangle
print("length:{} {}".format(rectangle_length, unit))
print("width:{} {}".format(rectangle_width, unit))
# displays area and perimeter of rectangle
print("The area of the rectangle is {} {} squared".format(area, unit))
print("The perimeter of the rectangle is {} {}".format(perimeter, unit))
# brief summary of information given in order for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): {} {} x {} {}".format(rectangle_width, unit, rectangle_length, unit)
display_dimensions_2 = "Perimeter(Dimensions): {} {} + {} {} + {} {} + {} {}".format(rectangle_length, unit, rectangle_width, unit, rectangle_length, unit, rectangle_width, unit)
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Perimeter: {} {}".format(perimeter, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
print("*** Rectangle Area / Perimeter ***")
if ask_user == "triangle":
print("*** Triangle Area / Perimeter ***")
# asks user if they want to work out A or P
ask_user_1 = string_checker("Area or perimeter or Area and perimeter ? ", calculator_1)
unit = unit_checker()
# turns it into integer
if ask_user_1 == "area":
# Ask user for the necessary sides for triangle
triangle_base = num_check("What is the base: ")
perpendicular_height = num_check("What is the perpendicular height: ")
# formula for area of triangle
area = 0.5 * triangle_base * perpendicular_height
# outputs area of triangle
print("The Area of the triangle is {} {} squared".format(area, unit))
# does not provide perimeter because never asked for it
print("The perimeter of the triangle is N/A")
# # brief summary of information given in order for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): 0.5 x {} {} x {} {}".format(triangle_base, unit,
perpendicular_height, unit)
display_dimensions_2 = "Perimeter is N/A"
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Perimeter: n/a"
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
if ask_user_1 == "perimeter":
# asks user for necessary sides
triangle_base = num_check("What is the base: ")
triangle_height = num_check("What is slant height 1: ")
triangle_height_2 = num_check("What is slant height 2:")
# works out perimeter of triangle
perimeter = triangle_base + triangle_height_2 + triangle_height
# outputs perimeter of triangle
print("The perimeter of the triangle is {} {}".format(perimeter, unit))
# Area is N/A because user only asked for perimeter
print("The Area of the triangle is N/A")
# brief summary of information given in order for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area is N/A"
display_dimensions_2 = "Perimeter(Dimensions): {} {} + {} {} + {} {}".format(triangle_base, unit,
triangle_height_2, unit,
triangle_height, unit)
display_area = "Area: n/a"
display_perimeter = "Perimeter: {} {}".format(perimeter, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
if ask_user_1 == "area and perimeter":
# asks user for necessary sides
triangle_base = num_check("What is the base: ")
triangle_height = num_check("What is slant height 1: ")
triangle_height_2 = num_check("What is slant height 2:")
perpendicular_height = num_check("What is the perpendicular height: ")
# becomes integer
triangle_base = int(triangle_base)
triangle_height = int(triangle_height)
triangle_height_2 = int(triangle_height_2)
perpendicular_height = int(perpendicular_height)
# formula for area and perimeter of triangle
area = 0.5 * triangle_base * perpendicular_height
perimeter = triangle_base + triangle_height_2 + triangle_height
# outputs area and perimeter of triangle
print("The Area of the triangle is {} {} squared".format(area, unit))
print("The perimeter of the triangle is {} {}".format(perimeter, unit))
# brief summary of information given in order for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): 0.5 x {} {} x {} {}".format(triangle_base, unit, perpendicular_height, unit)
display_dimensions_2 = "Perimeter(Dimensions): {} {} + {} {} + {} {}".format(triangle_base, unit, triangle_height_2, unit,
triangle_height, unit)
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Perimeter: {} {}".format(perimeter, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
print("*** Triangle Area / Perimeter ***")
if ask_user == "circle":
print("*** Circle Area / Circumference Solver ***")
# Asks user for radius of circle
circle_radius = num_check("What is the radius:")
unit = unit_checker()
# Turns string into integer
circle_radius = int(circle_radius)
# Works out area and circumference of circle
area = 3.14 * circle_radius ** 2
circumference = 2 * 3.14 * circle_radius
circumference = int(circumference)
# Displays area and circumference of circle
print("The area is {} {} squared".format(area, unit))
print("The circumference is {} {}".format(circumference, unit))
# Rounded off numbers
print(("Rounded off area is {}".format(round(area))))
print(("Rounded off circumference is {}".format(round(circumference))))
# brief summary of information given in order for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): 3.14 x {} {} ^ 2".format(circle_radius, unit)
display_dimensions_2 = "Circumference(Dimensions): 2 x 3.14 x {} {}".format(circle_radius, unit)
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Circumference: {} {}".format(circumference, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
print("*** Circle Area / Circumference Solver ***")
if ask_user == "parallelogram":
print("*** Parallelogram Area / Perimeter Solver ***")
# asks user if they want to work out A or P
ask_user_1 = string_checker("Area or perimeter or Area and perimeter ? ", calculator_1)
# asks user for necessary length
if ask_user_1 == "area":
# Asks user for necessary lengths
parallelogram_base = num_check("What is the base: ")
parallelogram_height = num_check("what is the height:")
unit = unit_checker()
# formula for area of parallelogram
area = parallelogram_base * parallelogram_height
# outputs area of parallelogram
print("The area of the parallelogram is {} {} squared".format(area, unit))
# perimeter is N/A because only area is wanted
print("The perimeter of the parallelogram is N/A")
# brief summary of information given in order for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): {} {} x {} {}".format(parallelogram_base, unit,
parallelogram_height, unit)
display_dimensions_2 = "Perimeter(Dimensions): n/a"
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Perimeter: n/a"
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
if ask_user_1 == "perimeter":
# Asks user for necessary lengths
parallelogram_height = num_check("What is the height: ")
parallelogram_side = num_check("What is the side length:")
unit = unit_checker()
# formula for perimeter of parallelogram
perimeter = (parallelogram_height + parallelogram_side) * 2
# outputs perimeter of parallelogram
print("The perimeter of the parallelogram is {} {}".format(perimeter, unit))
# area is N/A because only perimeter is wanted
print("The area of the parallelogram is N/A")
# brief summary of information given in order for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): N/A"
display_dimensions_2 = "Perimeter(Dimensions): 2 x ({} {} + {} {})".format(parallelogram_side, unit,
parallelogram_height, unit)
display_area = "Area: N/A"
display_perimeter = "Perimeter: {} {}".format(perimeter, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
if ask_user_1 == "area and perimeter":
# Asks user for necessary lengths
parallelogram_base = num_check("What is the base: ")
parallelogram_height = num_check("what is the height:")
parallelogram_side = num_check("What is the side length:")
unit = unit_checker()
# assigns to integer
parallelogram_base = int(parallelogram_base)
parallelogram_height = int(parallelogram_height)
parallelogram_side = int(parallelogram_side)
# formula for area of parallelogram
area = parallelogram_base * parallelogram_height
# formula for perimeter of parallelogram
perimeter = 2 * (parallelogram_height + parallelogram_side)
# returns the area and perimeter of parallelogram
print("The area of the parallelogram is {} {} squared".format(area, unit))
print("The perimeter of the parallelogram is {} {}".format(perimeter, unit))
print("*** Parallelogram Area / Perimeter Solver ***")
# brief summary of information given in order for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): {} {} x {} {}".format(parallelogram_base, unit, parallelogram_height, unit)
display_dimensions_2 = "Perimeter(Dimensions): 2 x ({} {} + {} {})".format(parallelogram_side, unit, parallelogram_base, unit)
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Perimeter: {} {}".format(perimeter, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
if ask_user == "trapezium":
print("*** Trapezium Area / Perimeter ***")
# asks user A or P or both
ask_user_1 = string_checker("Area or perimeter or Area and perimeter ? ", calculator_1)
# if user only wants to work out area
if ask_user_1 == "area":
# Asks user for necessary lengths
bottom_base = num_check("What is the bottom base:")
top_base = num_check("What is the top base:")
height = num_check("What is the height:")
unit = unit_checker()
# formula for trapezium
area = (bottom_base + top_base) / 2 * height
# returns answer for area
print("The area is {} {} squared".format(area, unit))
# perimeter is N/A because only area is wanted
print("The perimeter is N/A")
# Brief summary for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): {} {} + {} {} / 2 x {} {}".format(bottom_base, unit, top_base, unit,
height, unit)
display_dimensions_2 = "Perimeter(Dimensions): N/A"
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Perimeter: n/a "
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
# if user only wants to work out perimeter
if ask_user_1 == "perimeter":
# asks user for necessary lengths
bottom_base = num_check("What is the bottom base:")
top_base = num_check("What is the top base:")
side_1 = num_check("What is side 1:")
side_2 = num_check("What is side 2:")
unit = unit_checker()
# formula for perimeter of trapezium
perimeter = bottom_base + top_base + side_1 + side_2
# returns answer for perimeter
print("The perimeter is {} {}".format(perimeter, unit))
# area is N/A because only perimeter is wanted
print("The area is N/A")
# brief summary for output
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): n/a"
display_dimensions_2 = "Perimeter(Dimensions): {} {} + {} {} + {} {} + {} {}".format(bottom_base, unit,
top_base, unit,
side_1, unit, side_2, unit)
display_area = "Area: n/a"
display_perimeter = "Perimeter: {} {}".format(perimeter, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
# if user wants to work out area and perimeter
if ask_user_1 == "area and perimeter":
# asks user for necessary lengths
bottom_base = num_check("What is the bottom base:")
top_base = num_check("What is the top base:")
height = num_check("What is the height:")
side_1 = num_check("What is side 1:")
side_2 = num_check("What is side 2:")
unit = unit_checker()
# assigns to integer
bottom_base = int(bottom_base)
top_base = int(top_base)
height = int(height)
side_1 = int(side_1)
side_2 = int(side_2)
# formula for area of trapezium
area = (bottom_base + top_base) / 2 * height
# formula for perimeter of trapezium
perimeter = bottom_base + top_base + side_1 + side_2
# returns area and perimeter of trapezium
print("The area is {} {} squared".format(area, unit))
print("The perimeter is {} {}".format(perimeter, unit))
print("*** Trapezium Area / Perimeter ***")
# calculation summary for shape
shape_name = "Shape: {}".format(ask_user)
display_dimensions_1 = "Area(Dimensions): {} {} + {} {} / 2 x {} {}".format(bottom_base, unit, top_base, unit, height, unit)
display_dimensions_2 = "Perimeter(Dimensions): {} {} + {} {} + {} {} + {} {}".format(bottom_base, unit, top_base, unit,
side_1, unit, side_2, unit)
display_area = "Area: {} {} squared".format(area, unit)
display_perimeter = "Perimeter: {} {}".format(perimeter, unit)
summary_1.append(shape_name)
summary_1.append(display_dimensions_1)
summary_1.append(display_dimensions_2)
summary_1.append(display_area)
summary_1.append(display_perimeter)
# gives the option to continue or quit
keep_going = input("Press enter for another go or any key and then enter to quit")
shape_answer.append(summary_1)
# calculation summary
row = 0
# print(shape_answer)
for item in shape_answer:
print("***Calculation Summary***")
print(item[0])
print(item[1])
print(item[2])
print(item[3])
print(item[4])
print()
row += 1 |
993,347 | 686e3971a83637886361a2bdba2dcd4b284744f9 | '''
A mess needs to be reoptimized
Kyle Vonderwerth, Jenny Tang, Stephen Em
INF 141: Search Engine Milestone 1
Python 3.4
'''
from math import log2, log, sqrt
from collections import defaultdict
import json, os
class Indexer(object):
version = '0.1'
def __init__(self,directory):
if os.listdir(directory): #check is dir is empty
self.directory = directory #directory holding json objects
self.index = defaultdict(lambda:defaultdict(int)) #{termID : {docID : frequencyOfTermInDoc}}
self.docID = 0 #increment for every json object parsed
self.termToID = {} # {term : termID}
self.docIDToLength = defaultdict(list)
self.termIDtoTerm = {}
self.totalCorpus = 0
def generateIndex(self):
'''
for each json object in a directory, parse json objects to create term listings, accumulating in index, then write index to disk
'''
#counter = 0
for doc in os.listdir(self.directory):
#counter+=1
self.indexBlock(self.parseText(doc))
#if counter == 50:
#break
self.generateTFIDF()
self.writeIndex()
print('Index has been generate')
def parseText(self, doc):
'''
parse text from json object and tokneize to create terms
'''
def tokenizeFile(text) -> [str]:
'''
This function takes a string argument and uses a map/filter transform on it, returning a alphanumeric, all lower case tokenized list.
'''
ALPHANUMERICS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'#0123456789';
def alphaNumericMapping(token: str) -> str:
'''
This function utilizes lambda to check if a char in a token is alphanumeric, filtering the token and returning a list of tokens
that is only alphanumeric and normalized to lowercase.
'''
return ''.join(filter(lambda x: x in ALPHANUMERICS, token)).lower()
with open("stopWords.txt") as sWF:
stopWords = sWF.read().split("\n")
return list(filter(lambda x: x != '',filter(lambda x: x not in stopWords,map(alphaNumericMapping,filter(lambda x: 2<=len(x)<50,text.split(' '))))))
with open('FileDump'+'/'+doc) as jsonDoc:
self.docID = doc.replace('.txt','')
print(self.docID)
parsedJson = json.loads(jsonDoc.read())
return (tokenizeFile(parsedJson['text']), parsedJson['_id'])
def indexBlock(self, parsedJson):
'''
for data parsed from json object, update terms -> id mapping, update URL -> docID mapping, and update index with block data
'''
self.totalCorpus += 1
terms = set()
for term in parsedJson[0]:
if term not in self.termToID.keys():
self.termIDtoTerm[len(self.termToID) + 1] = term # termID -> term
self.termToID[term] = [len(self.termToID) + 1,0] # term -> [termID, termFreq]
self.index[self.termToID[term][0]][self.docID] += 1
terms.add(term)
self.docIDToLength[self.docID].append(self.termToID[term][0])
'''
get term frequency
'''
for term in terms:
self.termToID[term][1] += 1
self.index[self.termToID[term][0]][self.docID] = self.index[self.termToID[term][0]][self.docID]/len(parsedJson[0])
def generateTFIDF(self):
for k,v in self.index.items():
for doc,termFreq in v.items():
try:
idf = log2(self.totalCorpus/self.termToID[self.termIDtoTerm[k]][1])
self.index[k][doc] = termFreq * idf
self.termToID[self.termIDtoTerm[k]][1] = idf
except:
pass
'''
get lengths for cosine normalization
'''
for doc, terms in self.docIDToLength.items():
for term in range(len(terms)):
self.docIDToLength[doc][term] = self.index[self.docIDToLength[doc][term]][doc]**2
for doc in self.docIDToLength.keys():
self.docIDToLength[doc] = sqrt(sum(self.docIDToLength[doc]))
def writeIndex(self):
'''
write index, and ID mappings to disk
'''
with open('index.txt','a') as index: # write index
index.write(json.dumps(sorted([(k,v) for k,v in self.index.items()],key = lambda x: x[0])))
with open('termID_mapping.txt','a') as index: # write term -> termID mapping
for term, termID in sorted([(k,v) for k,v in self.termToID.items()],key = lambda x: x[0]): # sorted alphabetically by term
index.write(str(term) + ' : ' + str(termID[0]) + ' : '+ str(termID[1]) + '\n')
with open('docID_mapping.txt','a') as index: # write term -> termID mapping
for doc, length in sorted([(k,v) for k,v in self.docIDToLength.items()],key = lambda x: x[0]): # sorted alphabetically by term
index.write(str(doc) + ' : ' + str(length) + '\n')
if __name__ == "__main__":
Indexer('FileDump').generateIndex()
|
993,348 | 04ea37908069d6a807b76c3d6d3c7fc0488ffe8a |
import numpy as np
import matplotlib.pyplot as plt
from pandas import read_csv
from scipy.stats import pearsonr
import math,sys,os
import random as rnd
from scipy.misc import derivative
from keras.models import Sequential
from keras.layers import Input
from keras.models import Model
from keras.layers import Dense, regularizers
from keras.layers import LSTM
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from Functions import create_dataset, MakeNoise
#####################################################################
### run as python -W ignore main.py DataName.txt
### For example python -W ignore main.py 'deterministic_chaos.txt' 0 1
# load the dataset
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
NomeFile = sys.argv[1]
dataset = np.matrix(read_csv(NomeFile, sep=" ", header=None))
########################################################################################
train_length = 100
validation_length = 50
test_length = 30
tstart = int(sys.argv[3])
### Take the training set
ts_training = dataset[tstart:(tstart + train_length + validation_length),:]
ts_training = preprocessing.scale(ts_training)
Noise = int(sys.argv[2])
if Noise == 1:
ts_training = MakeNoise(ts_training, 0.2)
print('The time series has been contaminated with observational noise')
print('However, you check if you correctly predict the noise-free time series in the test set')
###
num_species = ts_training.shape[1]
#### Give a different representation of the training set
ts_training_original = ts_training
#ts_training = StackDAE(ts_training, train_length, validation_length, 5, dim_red = 0)
#### Reshape into X=t and Y=t+look_back
look_back = 1
### Here you create an array Ytrain with the column to predict scale by look_back points (e.g.,, 1)
ts_training_tr = ts_training[0:train_length,:]
tr_training_vl = ts_training[train_length:(train_length + validation_length),:]
trainX, trainY = create_dataset(ts_training_tr, look_back)
ValX, ValY = create_dataset(tr_training_vl, look_back)
####################################################################################
test_set = dataset[(tstart + train_length + validation_length):(tstart + train_length + validation_length + test_length), :]
test_set = preprocessing.scale(test_set)
####################################################################################
#### Take last point of the training set and start predictions from there
last_point_kept = ts_training[(np.shape(ts_training)[0] - 1), :]
#####################################################################################
###### Initialise the autoencoder
#### Some properties of the autoencoder
encoding_dim = np.shape(trainX)[1]
## This is the size of the decoder (dimension of the state space)
decoding_dim = np.shape(trainX)[1]
###########################################################################
input_ts = Input(shape = (decoding_dim,))
###########################################
#### Decide whether to use saprsity or not
#encoded = Dense(encoding_dim, activation= 'sigmoid', activity_regularizer=regularizers.l2(10e-3))(input_ts)
###########################################
encoded = Dense(encoding_dim, activation= 'sigmoid', activity_regularizer=regularizers.l2(10e-5))(input_ts)
decoded = Dense(decoding_dim, activation= 'linear', activity_regularizer=regularizers.l2(10e-5))(encoded)
#decoded = Dense(decoding_dim, activation= 'linear', activity_regularizer=regularizers.l2(10e-3))(encoded)
autoencoder = Model(input_ts, decoded)
encoder = Model(input_ts, encoded)
# create a placeholder for an encoded (d-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
# choose your loss function and otpimizer
autoencoder.compile(loss='mean_squared_error', optimizer='adam')
########################
#### Train the autoencoder but avoid writing on stdoutput
autoencoder.fit(trainX, trainY,
epochs= 400,
batch_size = 6,
shuffle = False,
validation_data=(ValX, ValY), verbose = 0)
# make predictions
length_predictions = test_length
realizations = 20
next_point = np.zeros((length_predictions,num_species))
for prd in range(realizations):
##### Last point of the training set for predictions
last_point = last_point_kept.reshape((1,num_species))
##
encoded_ts = encoder.predict(last_point)
last_point = decoder.predict(encoded_ts)
next_point[0,:] = next_point[0,:] + last_point
##
for i in range(1,length_predictions):
encoded_ts = encoder.predict(last_point)
last_point = decoder.predict(encoded_ts)
next_point[i,:] = next_point[i,:] + last_point
next_point = next_point/realizations
next_point = np.delete(next_point, (0), 0)
########### Training data
encoded_ts = encoder.predict(ts_training)
training_data = decoder.predict(encoded_ts)
training_data = np.insert(training_data, 0, np.array(np.repeat('nan',num_species)), 0)
os_rmse = np.sqrt(np.mean((next_point - test_set[1:(length_predictions),:])**2))
os_correlation = np.mean([pearsonr(next_point[:,i], test_set[1:(length_predictions), i])[0] for i in range(num_species)])
print 'RMSE of LSTM forecast = ', os_rmse
print 'correlation coefficient of LSTM forecast = ', os_correlation
########################################################################################################
plot = True
if plot == True:
all_data = np.concatenate((ts_training_original,test_set[0:(length_predictions),:]), axis = 0)
all_data_reconstructed = np.concatenate((training_data,next_point), axis = 0)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex='col', sharey='row')
interval_forecast = range((train_length + validation_length+1), np.shape(all_data_reconstructed)[0])
ax1.plot(all_data[:,0], color = 'b')
ax1.plot(interval_forecast, all_data_reconstructed[interval_forecast,0], lw = 2, linestyle = '--', color = 'r', label = 'Forecast')
ax1.axvline(x = (train_length + validation_length), lw = 2, ls = '--')
ax1.legend()
ax2.plot(all_data[:,1], color = 'b')
ax2.plot(interval_forecast, all_data_reconstructed[interval_forecast,1], lw = 2, linestyle = '--', color = 'r', label = 'Forecast')
ax2.axvline(x = (train_length + validation_length), lw = 2, ls = '--')
ax3.plot(all_data[:,2], color = 'b')
ax3.plot(interval_forecast, all_data_reconstructed[interval_forecast,2], lw = 2, linestyle = '--', color = 'r', label = 'Forecast')
ax3.axvline(x = (train_length + validation_length), lw = 2, ls = '--')
ax4.plot(all_data[:,3], color = 'b')
ax4.plot(interval_forecast, all_data_reconstructed[interval_forecast,3], lw = 2, linestyle = '--', color = 'r', label = 'Forecast')
ax4.axvline(x = (train_length + validation_length), lw = 2, ls = '--')
plt.show()
|
993,349 | 72029b64016fc8cfc8dd044f4614135c7e514cb0 | import os, io
import argparse
import subprocess
from time import strftime, localtime
import time
import pandas as pd
import numpy as np
import random, pickle
from tqdm import tqdm
import torch
import modeling
import Data
from pyNTCIREVAL import Labeler
from pyNTCIREVAL.metrics import MSnDCG, nERR, nDCG, AP, RR
import collections
SEED = 42
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
random.seed(SEED)
MODEL_MAP = {
'crossbert' : modeling.CrossBert,
}
def main(model, dataset, train_pairs, qrels, valid_run, test_run, model_out_dir, qrelDict, modelName, fold,
metricKeys, MAX_EPOCH, data, args):
LR = 0.001
BERT_LR = 2e-5
params = [(k, v) for k, v in model.named_parameters() if v.requires_grad]
non_bert_params = {'params': [v for k, v in params if not k.startswith('bert.')]}
bert_params = {'params': [v for k, v in params if k.startswith('bert.')], 'lr': BERT_LR}
optimizer = torch.optim.Adam([non_bert_params, bert_params], lr=LR)
# optimizer = torch.optim.Adam([non_bert_params], lr=LR)
top_valid_score = None
bestResults = {}
bestPredictions = []
bestQids = []
print("Fold: %d" % fold)
if args.model in ["unsup"]:
test_qids, test_results, test_predictions = validate(model, dataset, test_run, qrelDict, 0,
model_out_dir, data, args, "test")
print(test_results["ndcg@15"])
txt = 'new top validation score, %.4f' % np.mean(test_results["ndcg@10"])
print2file(args.out_dir, modelName, ".txt", txt, fold)
bestResults = test_results
bestPredictions = test_predictions
bestQids = test_qids
pass
else:
for epoch in range(MAX_EPOCH):
t2 = time.time()
loss = train_iteration(model, optimizer, dataset, train_pairs, qrels, data, args)
txt = f'train epoch={epoch} loss={loss}'
print2file(args.out_dir, modelName, ".txt", txt, fold)
valid_qids, valid_results, valid_predictions = validate(model, dataset, valid_run, qrelDict, epoch,
model_out_dir, data, args, "valid")
# valid_score = np.mean(valid_results["rp"])
valid_score = np.mean(valid_results["ndcg@10"])
elapsed_time = time.time() - t2
txt = f'validation epoch={epoch} score={valid_score} : {time.strftime("%H:%M:%S", time.gmtime(elapsed_time))}'
print2file(args.out_dir, modelName, ".txt", txt, fold)
if top_valid_score is None or valid_score > top_valid_score:
top_valid_score = valid_score
# model.save(os.path.join(model_out_dir, 'weights.p'))
test_qids, test_results, test_predictions = validate(model, dataset, test_run, qrelDict, epoch,
model_out_dir, data, args, "test")
# print(test_results["ndcg@15"])
txt = 'new top validation score, %.4f' % np.mean(test_results["ndcg@10"])
print2file(args.out_dir, modelName, ".txt", txt, fold)
bestResults = test_results
bestPredictions = test_predictions
bestQids = test_qids
# elif args.earlystop and epoch >=4:
elif args.earlystop:
break
# save outputs to files
for k in metricKeys:
result2file(args.out_dir, modelName, "." + k, bestResults[k], bestQids, fold)
prediction2file(args.out_dir, modelName, ".out", bestPredictions, fold)
print2file(args.out_dir, modelName, ".txt", txt, fold)
return bestResults
def train_iteration(model, optimizer, dataset, train_pairs, qrels, data, args):
BATCH_SIZE = 16
BATCHES_PER_EPOCH = 32 if "eai" in args.data else 256
GRAD_ACC_SIZE = 2
total = 0
model.train()
total_loss = 0.
with tqdm('training', total=BATCH_SIZE * BATCHES_PER_EPOCH, ncols=80, desc='train', leave=False) as pbar:
for record in Data.iter_train_pairs(model, dataset, train_pairs, qrels, GRAD_ACC_SIZE, data, args):
scores = model(record['query_tok'],
record['query_mask'],
record['doc_tok'],
record['doc_mask'],
record['wiki_tok'],
record['wiki_mask'],
record['question_tok'],
record['question_mask'])
count = len(record['query_id']) // 2
scores = scores.reshape(count, 2)
loss = torch.mean(1. - scores.softmax(dim=1)[:, 0]) # pariwse softmax
loss.backward()
total_loss += loss.item()
total += count
if total % BATCH_SIZE == 0:
optimizer.step()
optimizer.zero_grad()
pbar.update(count)
if total >= BATCH_SIZE * BATCHES_PER_EPOCH:
return total_loss
# break
def validate(model, dataset, run, qrel, epoch, model_out_dir, data, args, desc):
runf = os.path.join(model_out_dir, f'{epoch}.run')
return run_model(model, dataset, run, runf, qrel, data, args, desc)
def run_model(model, dataset, run, runf, qrels, data, args, desc='valid'):
BATCH_SIZE = 16
rerank_run = {}
with torch.no_grad(), tqdm(total=sum(len(r) for r in run.values()), ncols=80, desc=desc, leave=False) as pbar:
model.eval()
for records in Data.iter_valid_records(model, dataset, run, BATCH_SIZE, data, args):
scores = model(records['query_tok'],
records['query_mask'],
records['doc_tok'],
records['doc_mask'],
records['wiki_tok'],
records['wiki_mask'],
records['question_tok'],
records['question_mask'])
for qid, did, score in zip(records['query_id'], records['doc_id'], scores):
rerank_run.setdefault(qid, {})[did] = score.item()
pbar.update(len(records['query_id']))
# break
res = {"%s@%d" % (i, j): [] for i in ["p", "r", "ndcg", "nerr"] for j in [5, 10, 15, 20]}
res['map'] = []
res['mrr'] = []
res['rp'] = []
predictions = []
qids = []
for qid in rerank_run:
ranked_list_scores = sorted(rerank_run[qid].items(), key=lambda x: x[1], reverse=True)
ranked_list = [i[0] for i in ranked_list_scores]
for (pid, score) in ranked_list_scores:
predictions.append((qid, pid, score))
result = eval(qrels[qid], ranked_list)
for key in res:
res[key].append(result[key])
qids.append(qid)
return qids, res, predictions
def eval(qrels, ranked_list):
grades = [1, 2, 3, 4] # a grade for relevance levels 1 and 2 (Note that level 0 is excluded)
labeler = Labeler(qrels)
labeled_ranked_list = labeler.label(ranked_list)
rel_level_num = 5
xrelnum = labeler.compute_per_level_doc_num(rel_level_num)
result = {}
for i in [5, 10, 15, 20]:
metric = MSnDCG(xrelnum, grades, cutoff=i)
result["ndcg@%d" % i] = metric.compute(labeled_ranked_list)
nerr = nERR(xrelnum, grades, cutoff=i)
result["nerr@%d" % i] = nerr.compute(labeled_ranked_list)
_ranked_list = ranked_list[:i]
result["p@%d" % i] = len(set.intersection(set(qrels.keys()), set(_ranked_list))) / len(_ranked_list)
result["r@%d" % i] = len(set.intersection(set(qrels.keys()), set(_ranked_list))) / len(qrels)
result["rp"] = len(set.intersection(set(qrels.keys()), set(ranked_list[:len(qrels)]))) / len(qrels)
metric = MSnDCG(xrelnum, grades, cutoff=i)
map = AP(xrelnum, grades)
result["map"] = map.compute(labeled_ranked_list)
mrr = RR()
result["mrr"] = mrr.compute(labeled_ranked_list)
return result
def write2file(path, name, format, output):
print(output)
if not os.path.exists(path):
os.makedirs(path)
thefile = open(path + name + format, 'a')
thefile.write("%s\n" % output)
thefile.close()
def prediction2file(path, name, format, preds, fold):
if not os.path.exists(path):
os.makedirs(path)
thefile = open(path + name + format, 'a')
for (qid, pid, score) in preds:
thefile.write("%d\t%s\t%s\t%f\n" % (fold, qid, pid, score))
thefile.close()
def print2file(path, name, format, printout, fold):
print(printout)
if not os.path.exists(path):
os.makedirs(path)
thefile = open(path + name + format, 'a')
thefile.write("%d-%s\n" % (fold, printout))
thefile.close()
def result2file(path, name, format, res, qids, fold):
if not os.path.exists(path):
os.makedirs(path)
thefile = open(path + name + format, 'a')
for q, r in zip(qids, res):
thefile.write("%d\t%s\t%f\n" % (fold, q, r))
thefile.close()
def main_cli():
# argument
parser = argparse.ArgumentParser('CEDR model training and validation')
parser.add_argument('--model', choices=MODEL_MAP.keys(), default='crossbert')
parser.add_argument('--data', default='akgg')
parser.add_argument('--path', default="data/")
parser.add_argument('--wikifile', default="wikihow")
parser.add_argument('--questionfile', default="question-qq")
parser.add_argument('--initial_bert_weights', type=argparse.FileType('rb'))
parser.add_argument('--model_out_dir', default="models/vbert")
parser.add_argument('--epoch', type=int, default=20)
parser.add_argument('--fold', type=int, default=5)
parser.add_argument('--out_dir', default="out/")
parser.add_argument('--evalMode', default="all")
parser.add_argument('--mode', type=int, default=2)
parser.add_argument('--maxlen', type=int, default=16)
parser.add_argument('--earlystop', type=int, default=1)
args = parser.parse_args()
args.queryfile = io.TextIOWrapper(io.open("%s%s-query.tsv" % (args.path, args.data.split("-")[0]),'rb'), 'UTF-8')
args.docfile = io.TextIOWrapper(io.open("%s%s-doc.tsv" % (args.path, args.data.split("-")[0]),'rb'), 'UTF-8')
args.wikifile = io.TextIOWrapper(io.open("%s%s-%s.tsv" % (args.path, args.data.split("-")[0], args.wikifile),'rb'), 'UTF-8')
args.questionfile = io.TextIOWrapper(io.open("%s%s-%s.tsv" % (args.path, args.data.split("-")[0], args.questionfile),'rb'), 'UTF-8')
args.train_pairs = "%s%s-train" % (args.path, args.data)
args.valid_run = "%s%s-valid" % (args.path, args.data)
args.test_run = "%s%s-test" % (args.path, args.data)
args.qrels = io.TextIOWrapper(io.open("%s%s-qrel.tsv" % (args.path, args.data.split("-")[0]),'rb'), 'UTF-8')
dataset = Data.read_datafiles([args.queryfile, args.docfile, args.wikifile,
args.questionfile])
args.dataset = dataset
model = MODEL_MAP[args.model](args).cuda() if Data.device.type == 'cuda' else MODEL_MAP[args.model](args)
# if args.model == "cedr_pacrr":
# args.maxlen = 16 if args.mode == 1 else args.maxlen * args.mode
# model = MODEL_MAP[args.model](args).cuda() if Data.device.type == 'cuda' else MODEL_MAP[args.model](
# args)
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(pytorch_total_params)
qrels = Data.read_qrels_dict(args.qrels)
MAX_EPOCH = args.epoch
train_pairs = []
valid_run = []
test_run = []
foldNum = args.fold
for fold in range(foldNum):
f = open(args.train_pairs + "%d.tsv" % fold, "r")
train_pairs.append(Data.read_pairs_dict(f))
f = open(args.valid_run + "%d.tsv" % fold, "r")
valid_run.append(Data.read_run_dict(f))
f = open(args.test_run + "%d.tsv" % fold, "r")
test_run.append(Data.read_run_dict(f))
if args.initial_bert_weights is not None:
model.load(args.initial_bert_weights.name)
os.makedirs(args.model_out_dir, exist_ok=True)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
timestamp = strftime('%Y_%m_%d_%H_%M_%S', localtime())
if "birch" in args.model:
wikiName = args.wikifile.name.split("/")[-1].replace(".tsv", "")
questionName = args.questionfile.name.split("/")[-1].replace(".tsv", "")
additionName = []
if args.mode in [1, 3, 5, 6]:
additionName.append(wikiName)
if args.mode in [2, 4, 5, 6]:
additionName.append(questionName)
modelName = "%s_m%d_%s_%s_%s_e%d_es%d_%s" % (
args.model, args.mode, args.data, "_".join(additionName), args.evalMode, args.epoch, args.earlystop, timestamp)
else:
wikipediaFile = args.wikifile.name.split("/")[-1].replace(".tsv", "")
questionFile = args.questionfile.name.split("/")[-1].replace(".tsv", "")
modelName = "%s_%s_m%d_ml%d_%s_%s_%s_e%d_es%d_%s" % (args.data, args.model, args.mode, args.maxlen, wikipediaFile, questionFile, args.evalMode, args.epoch, args.earlystop, timestamp)
print(modelName)
df = pd.read_csv("%s%s-qrel.tsv" % (args.path, args.data.split("-")[0]), sep="\t", names=["qid", "empty", "pid", "rele_label", "etype"])
qrelDict = collections.defaultdict(dict)
type2pids = collections.defaultdict(set)
for qid, prop, label, etype in df[['qid', 'pid', 'rele_label', 'etype']].values:
qrelDict[str(qid)][str(prop)] = int(label)
type2pids[str(etype)].add(prop)
args.type2pids = type2pids
metricKeys = {"%s@%d" % (i, j): [] for i in ["p", "r", "ndcg", "nerr"] for j in [5, 10, 15, 20]}
metricKeys["rp"] = []
metricKeys["mrr"] = []
metricKeys["map"] = []
results = []
t1 = time.time()
args.isUnsupervised = True if args.model in ["sen_emb"] else False
for fold in range(len(train_pairs)):
results.append(
main(model, dataset, train_pairs[fold], qrels, valid_run[fold], test_run[fold], args.model_out_dir,
qrelDict, modelName, fold, metricKeys, MAX_EPOCH, Data, args))
elapsed_time = time.time() - t1
txt = f'total : {time.strftime("%H:%M:%S", time.gmtime(elapsed_time))}'
print2file(args.out_dir, modelName, ".txt", txt, fold)
# average results across 5 folds
output = []
for k in metricKeys:
tmp = []
for fold in range(foldNum):
tmp.extend(results[fold][k])
_res = np.mean(tmp)
output.append("%.4f" % _res)
write2file(args.out_dir, modelName, ".res", ",".join(output))
if __name__ == '__main__':
main_cli()
|
993,350 | 161aabe4aabbc8b47af2f4e15267f589f797553c | #!/usr/bin/env python2
import sys
import binascii
import pyautogui
import serial
import bitarray
key_list = ['a','b','c','d'
'e','f','g','h'
'i','j','k','l'
'm','n','o','p']
def serial_data(baudrate):
ser = serial.Serial()
ser.baudrate = baudrate
try:
ser.port = sys.argv[1]
except IndexError:
print 'You did not specify a port'
return
ser.timeout = 1000
try:
ser.open()
except serial.serialutil.SerialException:
print 'Invalid port'
return
while True:
yield ser.readline()
ser.close()
def main():
for packet in serial_data(115200):
# convert data to a bit array
packet_hex = binascii.hexlify(packet[:2])
packet_bit = "".join(["{0:04b}".format(int(c,16)) for c in packet_hex])
b_array = bitarray(packet_bit)
for cnt, value in enumerate(b_array):
if value == True:
pyautogui.press(keylist[value])
if __name__ == "__main__":
main()
|
993,351 | 4af677e2e21ffbd151bfb2062fbe9d3a22e9cc2c | import base64
hex = "72bca9b68fc16ac7beeb8f849dca1d8a783e8acf9679bf9269f7bf"
bytes_ = bytes.fromhex(hex)
base64_ = base64.b64encode(bytes_);
print(base64_) |
993,352 | aefa4d031c1a554e8f985cb0a79e5b7746f15f87 | from urllib import urlopen as uReq
from bs4 import BeautifulSoup as soup
if __name__ == '__main__':
pages = []
for i in range(1,100):
my_url = 'https://www.monster.se/jobb/sok/Data-IT_4?intcid=swoop_BrowseJobs_Data-IT&page={0}'.format(i)
pages.append(my_url)
for my_url in pages:
try:
uClient = uReq(my_url)
pageHtml = uClient.read()
uClient.close()
page_soup = soup(pageHtml,"html.parser")
print page_soup.h1.text.strip()
containers = page_soup.findAll("article",{"class":"js_result_row"})
for container in containers:
job_title = container.findAll("div",{"class":"jobTitle"})
print job_title[0].text.strip()
company = container.findAll("div",{"class":"company"})
print company[0].text.strip()
location = container.findAll("div",{"class":"location"})
print location[0].text.strip()
print ('-------------------------------')
except AttributeError:
break
pages_stepstone = []
for i in range(1,100):
my_url_stepstone = 'https://www.stepstone.se/lediga-jobb-i-hela-sverige/data-it/sida{0}/'.format(i)
pages_stepstone.append(my_url_stepstone)
for my_url_stepstone in pages_stepstone:
try:
uClient2 = uReq(my_url_stepstone)
pageHtml2 = uClient2.read()
uClient2.close()
page_soup2 = soup(pageHtml2,"html.parser")
containers2 = page_soup2.findAll("div",{"class":"description"})
for container in containers2:
companyName = container.span.a.text
print companyName
job_title = container.h5.a.text
print job_title
location2 = container.findAll("span",{"class":"text-opaque"})
print location2[1].text
print my_url_stepstone
print ('-------------------------------')
except AttributeError:
break
|
993,353 | f3906c00784ebb729b4f0c3a1efe12e1947f33b7 | from random import sample
# Sorting pair of lists
def lists_sort(list1, list2):
# Result list, combined of inputed pair
new_list = []
# Detect lengths of inputed lists
list1_len = len(list1)
list2_len = len(list2)
# Create iterators for inputed lists
list1_iterator, list2_iterator = 0, 0
# Iterate till the end of one list
while list1_iterator != list1_len and list2_iterator != list2_len:
# Get elements of lists at iterator positions and compare them
# Min of this elements appends to result list, after that - increment iterator for parent list of min element
if list1[list1_iterator] < list2[list2_iterator]:
new_list.append(list1[list1_iterator])
list1_iterator += 1
else:
new_list.append(list2[list2_iterator])
list2_iterator += 1
# Append "tail" elements of that list, which end was not reached, to the result
if list1_iterator == list1_len:
new_list += list2[list2_iterator:]
else:
new_list += list1[list1_iterator:]
return new_list
# Recursive function that splits list into two parts and calls sorting function for them
def recursion_sorting(numbers_list):
list_len = len(numbers_list)
# Terminate recursion if list contains only one element
if list_len == 1:
return numbers_list
# Split list into halfs (first part will be larger for lists with odd number of elements)
half = list_len // 2 + list_len % 2
# Recursive calling of list splitting and sorting for each pair
return lists_sort(recursion_sorting(numbers_list[:half]), recursion_sorting(numbers_list[half:]))
incorrect_input = True
# Getting list length from user
while incorrect_input:
list_len = input("Введите размер списка (он будет наполнен числами в случайном порядке):")
try:
list_len = int(list_len)
incorrect_input = False
except ValueError:
print("Ошибка! Пожалуйста, введите целое число")
# Generate list of random numbers with requested length
numbers_list = sample(range(list_len), k=list_len)
print("Случайный список:\n", numbers_list)
# Start recursive sorting
result = recursion_sorting(numbers_list)
print("Отсортированный список:\n", result) |
993,354 | 976a016a9489f1b001c38f3154f5d17d3515fbd5 | from flask import Blueprint, redirect, url_for, render_template, request, session, flash
from datetime import datetime
import time
from website.current import startRun, getCurrent, getImg
def getSevenDay():
day = ['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun']
date = ['2021-06-06', '2021-06-07', '2021-06-08', '2021-06-09', '2021-06-10', '2021-06-11', '2021-06-12']
cond = ['Rain', 'Drizzle', 'Clear', 'Rain', 'Clouds', 'Drizzle', 'Clouds']
icon = ['10d','09d','01d','10d','02d','09d','02d']
tempMin = [27, 27, 26, 27, 26, 27, 27]
tempMax = [30, 30, 30, 30, 30,29, 29]
humdMin = [65, 71, 59, 60, 63, 62, 70]
humdMax = [68, 75, 65, 62, 66, 70, 77]
prcpVolMin = [0.62, 0.55, 0.10, 0.78, 0.99, 0.44, 0.75]
prcpVolMax = [0.75, 0.70, 0.50, 0.90, 1, 0.70, 0.85]
airPreMin = [1009, 1010, 1010, 1010, 1010, 1008, 1008]
airPreMax = [1011, 1015, 1015, 1015, 1016, 1010, 1011]
avgWSMin = [4.69, 5.25, 4.74, 4.15, 5.32, 4.35, 3.77]
avgWSMax = [5, 6, 5, 5.5, 6.5, 5.75, 4.25]
cloudMin = [77, 94, 99, 92, 82, 84, 80]
cloudMax = [80, 95, 100, 100, 99, 94, 100]
return day, date, cond, icon, tempMin, tempMax, humdMin, humdMax, prcpVolMin, prcpVolMax, airPreMin, airPreMax, avgWSMax, avgWSMin, cloudMin, cloudMax |
993,355 | b0212d1943a7e7b84d02436fa0f314506f2bbb76 | # Link: https://leetcode.com/problems/two-sum/
# Approach: Add all the numbers in the dictionary with corresponding index in the array. Now again start iterating over elements in the array. Perform num = target-array[i].
# If the num is in dictionary then extract its position (let's say j) and return (i, j).
class Solution(object):
def twoSum(self, nums, target):
dic = {}
for i in range(len(nums)):
dic[nums[i]] = i
for i in range(len(nums)):
if target-nums[i] in dic:
j = dic[target-nums[i]]
if i != j:
return [min(i, j), max(i, j)] |
993,356 | 78c51721eafe8264b16aaf960629fd4d6c9fb6b5 | # for문
# for 변수 in list/tuple/string:
# code here
# loop
# nums = [10, 20, 30]
# for num in nums:
# print(num)
# values = [100, 200, 300]
# for value in values:
# print(value + 10)
# foods = ["김밥", "라면", "튀김"]
# for food in foods:
# print("오늘의 메뉴: " + food)
# strings = ["SK하이닉스", "삼성전자", "LG전자"]
# for string in strings:
# print(len(string))
# animals = ["dog", "cat", "parrot"]
# for animal in animals:
# print(f"{animal} {len(animal)}")
# for animal in animals:
# print(animal[0])
# enmurate()
# 순서가 있는 자료형(리스트 튜플 등등)을 입력받아서 enumerate 객체를 리턴 (값과 순서를 하나의 튜플로 갖는 객체)
# numbers = [3, 4, 5]
# for index, number in enumerate(numbers):
# # (0, 3),(1, 4),(3, 5)
# print((index + 1) * number)
# hg = ["가", "나", "다", "라"]
# # for index, value in enumerate(hg):
# # if index == 0:
# # continue
# # print(value)
# for value in hg[1:]:
# print(value)
# print("----")
# ##역순 리스트 만들기
# for value in list(reversed(hg)):
# print(value)
# print("----")
# for value in hg[::-1]:
# print(value)
# print("----")
# for value in hg[0::2]: # hg[::2]
# print(value)
# numbers1 = [3, -20, -3, 44]
# for number in numbers1:
# if number < 0:
# print(number)
# numbers2 = [3, 100, 23, 44]
# for number in numbers2:
# if number % 3 == 0:
# print(number)
# numbers3 = [13, 21, 12, 14, 30, 18]
# for number in numbers3:
# if (number % 3 == 0) and (number < 20):
# print(number)
# chars = ["I", "study", "python", "language", "!"]
# for char in chars:
# if len(char) >= 3:
# print(char)
# animals = ["dog", "cat", "parrot"]
# for animal in animals:
# print(animal[0].upper() + animal[1:])
# filenames = ["hello.py", "ex01.py", "intro.hwp"]
# for filename in filenames:
# print(filename.split(".")[0])
# filenames = ["intra.h", "intra.c", "define.h", "run.py"]
# for filename in filenames:
# extension = filename.split(".")[1]
# # if extension == "h":
# # print(filename)
# if extension == "h" or extension == "c":
# print(filename)
# range()
# range(start, stop, step)
# start ~ stop - 1 까지의 연속된 숫자로 된 range객체를 만든다
# ex)
# range(10) [0,1,2,3,4,5,6,7,8,9] : 0부터 시작 9까지
# range(1,10,2) [1,3,,5,7,9] : 1부터 시작 9까지 2칸씩 건너뛴다
# range 객체는 반복가능한 객체를 말한다. 예) 문자열, 리스트, 딕셔너리, 세트
# 자세한 작동원리 : https://dojang.io/mod/page/view.php?id=2405
# 반복가능한 객체 안에는 __iter__ 라는 메소드가 존재 => __iter__를 실행하면 이터레이터가 실행되고
# 이터레이터에 의해서 __next__가 실행되면서 반복할 때마다 해당 요소를 순서대로 꺼낸다.
# 주의!! 반복가능한 객체와 이터레이터는 다르다!!
# for i in range(100):
# print(i)
# for i in range(2002, 2051, 4):
# # if i % 4 == 2: # 해줄필요없음 어차피 range에 의해서 다 걸러짐
# # print(i)
# print(i)
# for i in range(1, 31):
# if i % 3 == 0:
# print(i)
# for i in range(3, 31, 3):
# print(i)
# for i in range(99, -1, -1):
# print(i)
# print(100 - i)
# for i in range(10):
# # print("0." + str(i))
# print(i / 10)
# for i in range(1, 10, 2):
# print(f"3 x {i} = {3*i}")
# sum = 0
# for i in range(1, 11):
# sum = sum + i
# print(sum)
# odd_sum = 0
# for i in range(1, 10, 2):
# odd_sum += i
# print(odd_sum)
# mul = 1
# for i in range(1, 11):
# mul *= i
# print(mul)
# 171 ⭐️ 생소한 방법
# 리스트가 있음에도 for/range를 이용하는 방법
# price_list = [32100, 32150, 32000, 32500]
# for i in range(len(price_list)):
# print(price_list[i])
# for i in range(len(price_list)):
# print(i, price_list[i])
# for i in range(len(price_list)):
# print(len(price_list) - 1 - i, price_list[i])
# for i in range(len(price_list) - 1):
# print(100 + 10 * i, price_list[i])
# my_list = ["가", "나", "다", "라"]
# for i in range(len(my_list) - 1):
# print(my_list[i : i + 2])
# my_list = ["가", "나", "다", "라", "마"]
# for i in range(len(my_list) - 2):
# print(" ".join(my_list[i : i + 3]))
# for i in range(len(my_list) - 1, 0, -1):
# print(my_list[i], my_list[i - 1])
# my_list = [100, 200, 400, 800]
# for i in range(len(my_list) - 1):
# print(my_list[i + 1] - my_list[i])
# my_list = [100, 200, 400, 800, 1000, 1300]
# for i in range(len(my_list) - 2):
# list = my_list[i : i + 3]
# print(sum(list) / 3)
# low_prices = [100, 200, 400, 800, 1000]
# high_prices = [150, 300, 430, 880, 1000]
# volatility = []
# for i in range(len(low_prices)):
# volatility.append(high_prices[i] - low_prices[i])
# print(volatility)
# apart = [["101호", "102호"], ["201호", "202호"], ["301호", "302호"]]
# print(apart)
# # stock = [["시가", 100, 200, 300], ["종가", 80, 210, 330]]
# stock = {"시가": [100, 200, 300], "종가": [80, 210, 330]}
# print(stock)
# stock1 = {"10/10": [80, 110, 70, 90], "10/11": [210, 230, 190, 200]}
# print(stock1)
# for i in range(len(apart)):
# for j in apart[i]:
# print(j)
# for row in apart:
# for col in row:
# print(col)
# for row in reversed(apart):
# for row in apart[::-1]:
# for col in row:
# print(col)
# for row in apart[::-1]:
# for col in row[::-1]:
# print(col)
# for row in apart:
# for col in row:
# print(col)
# print("-----")
# for row in apart:
# for col in row:
# print(col)
# print("-----")
data = [
[2000, 3050, 2050, 1980],
[7500, 2050, 2050, 1980],
[15450, 15050, 15550, 14900],
]
# 1치원 배열에 추가
# result = []
# for row in data:
# for price in row:
# print(price * 1.00014)
# result.append(price * 1.00014)
# print("-------")
# print(result)
# 2차원 배열에 추가
# result = []
# for row in data:
# r_row = []
# for price in row:
# r_row.append(price * 1.00014)
# result.append(r_row)
# print(result)
ohlc = [
["open", "high", "low", "close"],
[100, 110, 70, 100],
[200, 210, 180, 190],
[300, 310, 300, 310],
]
# close data만 출력
# for i in range(1, 4):
# print(ohlc[i][3])
# for row in ohlc[1:]:
# # print(row[3])
# if row[3] > 150:
# print(row[3])
# for row in ohlc[1:]:
# if row[3] >= row[0]:
# print(row[3])
# volatility = []
# for row in ohlc[1:]:
# diff = row[1] - row[2]
# volatility.append(diff)
# print(volatility)
# for row in ohlc[1:]:
# if row[3] > row[0]:
# print(row[1] - row[2])
total = 0
for row in ohlc[1:]:
profit = row[3] - row[0]
total += profit
print(total) |
993,357 | 76017cd0a68fae5a8f6e9f609eb60484069dfcb4 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-13 09:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cars', '0012_userprofile_dob'),
]
operations = [
migrations.CreateModel(
name='Dealer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.IntegerField()),
('manufacturer', models.CharField(blank=True, choices=[('Audi', 'Audi'), ('Maruti-Suzuki', 'Maruti-Suzuki'), ('Tata Motors', 'Tata Motors'), ('Hyundai', 'Hyundai'), ('Honda', 'Honda'), ('Volkswagen', 'Volkswagen'), ('Toyota', 'Toyota'), ('Mahindra', 'Mahindra'), ('Renault', 'Renault'), ('Fiat', 'Fiat'), ('Chevrolet', 'Chevrolet'), ('Ford', 'Ford'), ('KIA', 'KIA'), ('Porsche', 'Porsche'), ('Nissan', 'Nissan')], max_length=50)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
993,358 | a10dc83b9a05a0d214b5ab8c029313ae01a86a37 | from keras.layers import Conv2D, Conv2DTranspose, Input, MaxPooling2D, Dropout
from keras.layers import Concatenate, Activation, LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from keras.models import Model
from keras import backend as K
import tensorflow as tf
import numpy as np
def side_branch(x, factor):
x = Conv2D(1, (1, 1), activation=None, padding='same')(x)
kernel_size = (2*factor, 2*factor)
x = Conv2DTranspose(1, kernel_size, strides=factor, padding='same', use_bias=False, activation=None)(x)
return x
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
def network():
inputs = Input((256,256, 3))
fs = 16;
c1 = Conv2D(fs, (3, 3), padding='same', kernel_initializer = 'glorot_normal', name='block1_conv1')(inputs)
c1 = LeakyReLU()(c1)
c1 = Dropout(0.5)(c1)
c1 = Conv2D(fs, (3, 3), padding='same', kernel_initializer = 'glorot_normal', name='block1_conv2')(c1)
c1 = BatchNormalization()(c1)
c1 = LeakyReLU()(c1)
p1 = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block1_pool')(c1) #128
c2 = Conv2D(fs*2, (3, 3), padding='same', kernel_initializer = 'glorot_normal', name='block2_conv1')(p1)
c2 = LeakyReLU()(c2)
c2 = Dropout(0.5)(c2)
c2 = Conv2D(fs*2, (3, 3), padding='same', kernel_initializer = 'glorot_normal', name='block2_conv2')(c2)
c2 = BatchNormalization()(c2)
c2 = LeakyReLU()(c2)
p2 = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(c2) #64
c3 = Conv2D(fs*4, (3, 3), padding='same', kernel_initializer = 'glorot_normal', name='block3_conv1')(p2)
c3 = LeakyReLU()(c3)
c3 = Dropout(0.5)(c3)
c3 = Conv2D(fs*4, (3, 3), padding='same', kernel_initializer = 'glorot_normal', name='block3_conv2')(c3)
c3 = BatchNormalization()(c3)
c3 = LeakyReLU()(c3)
p3 = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(c3) #32
c4 = Conv2D(fs*8, (3, 3), padding='same', kernel_initializer = 'glorot_normal', name='block4_conv1')(p3)
c4 = LeakyReLU()(c4)
c4 = Dropout(0.5)(c4)
c4 = Conv2D(fs*8, (3, 3), padding='same', kernel_initializer = 'glorot_normal', name='block4_conv2')(c4)
c4 = BatchNormalization()(c4)
c4 = LeakyReLU()(c4)
u1 = Conv2DTranspose(fs*4, (2, 2), strides=(2, 2), padding='same') (c4)
u1 = concatenate([u1, c3])
c5 = Conv2D(fs*4, (3, 3), kernel_initializer='glorot_normal', padding='same') (u1)
c5 = LeakyReLU()(c5)
c5 = Dropout(0.5) (c5)
c5 = Conv2D(fs*4, (3, 3), kernel_initializer='glorot_normal', padding='same') (c5) #64x64x64
b1= side_branch(c5, 4)
c5 = BatchNormalization()(c5)
c5 = LeakyReLU()(c5)
u2 = Conv2DTranspose(fs*2, (2, 2), strides=(2, 2), padding='same') (c5)
u2 = concatenate([u2, c2])
c6 = Conv2D(fs*2, (3, 3), kernel_initializer='glorot_normal', padding='same') (u2)
c6 = LeakyReLU()(c6)
c6 = Dropout(0.5) (c6)
c6 = Conv2D(fs*2, (3, 3), kernel_initializer='glorot_normal', padding='same') (c6) #128x128x32
b2 = side_branch(c6, 2)
c6 = BatchNormalization()(c6)
c6 = LeakyReLU()(c6)
u3 = Conv2DTranspose(fs*4, (2, 2), strides=(2, 2), padding='same') (c6)
u3 = concatenate([u3, c1])
c7 = Conv2D(fs, (3, 3), kernel_initializer='glorot_normal', padding='same') (u3)
c7 = LeakyReLU()(c7)
c7 = Dropout(0.5) (c7)
c7 = Conv2D(fs, (3, 3), kernel_initializer='glorot_normal', padding='same') (c7) #256x256x16
b3 = side_branch(c7, 1)
c7 = BatchNormalization()(c7)
c7 = LeakyReLU()(c7)
# fuse
fuse = Concatenate(axis=-1)([b1, b2, b3])
fuse = Conv2D(1, (1,1), padding='same', use_bias=False, activation=None)(fuse) # 256x256x1
# outputs
o1 = Activation('sigmoid', name='o1')(b1)
o2 = Activation('sigmoid', name='o2')(b2)
o3 = Activation('sigmoid', name='o3')(b3)
ofuse = Activation('sigmoid', name='ofuse')(fuse)
model = Model(inputs=[inputs], outputs=[o1, o2, o3, ofuse])
model.compile(loss={'o1':'binary_crossentropy','o2':'binary_crossentropy','o3':'binary_crossentropy','ofuse':'binary_crossentropy'}, metrics={'ofuse': mean_iou}, optimizer='adam')
return model
|
993,359 | d675d6bfea034ff7110602e01e5c76825765db35 | import requests
from . import DynamicDnsPlugin
class Rackspace(DynamicDnsPlugin):
def update(self, ip):
fqdn = self.domain.split('.', 1)[1]
# Authenticate to get token and tenent IDs
data = {'auth': {'RAX-KSKEY:apiKeyCredentials': {'username': self.config['username'], 'apiKey': self.config['api_key']}}}
response = requests.post('https://identity.api.rackspacecloud.com/v2.0/tokens', json=data).json()
token_id = response['access']['token']['id']
tenant_id = response['access']['token']['tenant']['id']
# Get domain ID for fetching/updateing records of
headers = {'X-Auth-Token': token_id}
response = requests.get(f'https://dns.api.rackspacecloud.com/v1.0/{tenant_id}/domains?name={fqdn}', headers=headers).json()
domain_id = response['domains'][0]['id']
# Get record for the subdomain
response = requests.get(f'https://dns.api.rackspacecloud.com/v1.0/{tenant_id}/domains/{domain_id}/records?type=A&name={self.domain}', headers=headers).json()
record_id = response['records'][0]['id']
# Update existing record
record_data = {
'records': [
{
'name': self.domain,
'id': record_id,
'data': ip,
'ttl': 300
}
]
}
requests.put(f'https://dns.api.rackspacecloud.com/v1.0/{tenant_id}/domains/{domain_id}/records', headers=headers, json=record_data).json()
|
993,360 | 54e2eea678b07ba86b7ba3077656989142c19fb7 | from .body import MetaTexture |
993,361 | aa443653d78659bbe0ffeda3cf1a125bd1ec01ab | '''
Using 12X3 vectors from forst portion of CNN architecture
gets output 40 class classification
Input: 3 images with same labels (segregated image with 2 digits and 1 alphabet)
Output: 40 class classification problem
'''
from __future__ import print_function
import keras
import numpy as np
import os
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.models import load_model
import ipdb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
num_classes = 40
batch_size = 128
epochs = 150
# input image dimensions
img_rows, img_cols = 28, 28
def LoadTrainData():
path = '/home/ml/ajain25/Documents/Courses/AML/Project_3/NewDataMnsit'
data_txt = np.genfromtxt(os.path.join(path,'train.ocv'), dtype=np.int32, delimiter=" ", skip_header=1)
img = data_txt[:,2:]
y = data_txt[:,0]
return img, y
def LoadTestData():
path = '/home/ml/ajain25/Documents/Courses/AML/Project_3/NewDataMnsit'
data_txt = np.genfromtxt(os.path.join(path,'test.ocv'), dtype=np.int32, delimiter=" ", skip_header=1)
x = data_txt[:,2:]
return x
def WriteTestLabels(predicted_y, mapping_81, file_name):
total_size = predicted_y.size
print("Total images test data: ", str(total_size))
data_labels = []
for i in range(total_size):
data_labels.append(mapping_81[int(predicted_y[i])])
with open(file_name, "w") as f:
f.write("Id,Label")
for i in range(10000):
f.write("\n")
f.write("{0},{1}".format(str(i+1), str(int(data_labels[i]))))
print("Done writing labels in Test File")
def PlotHistory(history):
#history of accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig( "./accuracy_mnist_extended_nn_5_mnist_noiseAdded_v1.png")
plt.close()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig( "./loss_mnist_extended_nn_5_mnist_noiseAdded_v1.png")
plt.close()
def SaveCombinedFeatureData(x_train, y_train, x_val, y_val, x_test):
np.save('x_train',x_train)
np.save('y_train',y_train)
np.save('x_val',x_val)
np.save('y_val',y_val)
np.save('x_test',x_test)
def GetMappingTo40(mapping, labels):
y=[]
for i in labels:
y.append(mapping[i])
y_mappedto_40 = np.array(y).astype('int32')
return y_mappedto_40
def GetPredictedFeaturesFromMNIST(data, model):
img1 = data[0::3, :]
img2 = data[1::3, :]
img3 = data[2::3, :]
if K.image_data_format() == 'channels_first':
img1 = img1.reshape(img1.shape[0], 1, img_rows, img_cols)
img2 = img2.reshape(img2.shape[0], 1, img_rows, img_cols)
img3 = img3.reshape(img3.shape[0], 1, img_rows, img_cols)
else:
img1 = img1.reshape(img1.shape[0], img_rows, img_cols, 1)
img2 = img2.reshape(img2.shape[0], img_rows, img_cols, 1)
img3 = img3.reshape(img3.shape[0], img_rows, img_cols, 1)
p_label1 = model.predict(img1, batch_size=batch_size)
p_label2 = model.predict(img2, batch_size=batch_size)
p_label3 = model.predict(img3, batch_size=batch_size)
features = np.hstack((p_label1, p_label2, p_label3))
return features
#Loading the segregated inages data from train and test
x_train, y_train = LoadTrainData()
x_test = LoadTestData()
#learning mapping from 81 classes to 40 labels
labels_global = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 24, 25, 27, 28, 30, 32, 35, 36, 40, 42, 45, 48, 49, 54, 56, 63, 64, 72, 81]
mapping_40 = {}
mapping_81 = {}
for i,l in enumerate(labels_global):
mapping_40[l] =i
mapping_81[i] = l
#Loaded already trained EMNIST model
model = load_model('/home/ml/ajain25/Documents/Courses/AML/Project_3/Keras/MNSIT_Data/MNIST_rotated/my_model_EMNIST_Rotated_9_v4_noise_added_all_data.h5')
y_train = GetMappingTo40(mapping_40, y_train)
#normalizing the images
x_train = x_train/ 255.0
data_features = GetPredictedFeaturesFromMNIST(x_train, model)
#because label is same for all 3 images
y = y_train[0::3]
indices = np.random.permutation(y.size)
data_features = data_features[indices]
y = y[indices]
#divide into validation data
val_prec = 0.2
val_limits = int(val_prec * y.size)
x_val = data_features[:val_limits, :]
x_train = data_features[val_limits: , :]
y_val = y[:val_limits]
y_train = y[val_limits:]
# #test data
x_test = x_test/ 255.0
x_test = GetPredictedFeaturesFromMNIST(x_test, model)
SaveCombinedFeatureData(x_train, y_train, x_val, y_val, x_test)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
input_shape = x_train.shape[1]
#Adding more layer to predict 12*3 classes to 40 classes (final labels)
model = Sequential()
model.add(Dense(512, input_dim=36, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, input_dim=36, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adamax(decay= 1e-4),
metrics=['accuracy'])
history_nn = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_val, y_val))
model.save("CNN_second_portion.h5")
PlotHistory(history_nn)
y_predicted_test = model.predict(x_test)
predicted_labels = np.argmax(y_predicted_test, axis=1)
print("size of labels: ", predicted_labels.shape)
WriteTestLabels(predicted_labels, mapping_81, "./TestPredicted.csv") |
993,362 | 3641303ff15ea4a676b41efe3ccb82ac6c708f8f | -------------------------
lxml |
-------------------------
* 它仅仅只是一个第三方库,算不上框架,提供了强大的xml操作api
* from lxml import etree
-------------------------
lxml-etree 模块 函数 |
-------------------------
HTML(text, parser=None, base_url=None)
* 通过html文本构造一个 Element 对象
XML(text, parser=None, base_url=None)
* 通过xml文本构造一个 Element 对象
tostring(element_or_tree,
encoding=None, method="xml",
xml_declaration=None,
pretty_print=False,
with_tail=True,
standalone=None, doctype=None, exclusive=False, with_comments=True, inclusive_ns_prefixes=None)
* 以字符串形式输出指定节点对象
SubElement(_parent, _tag, attrib=None, nsmap=None, **_extra)
* 添加指定名称的子节点到节点对象,返回子节点对象
* 参数
_parent 父级节点对象
_tag 子节点名称(字符串)
* 关键字参数
attrib 指定子标签的属性值
XPath()
* 创建一个xpath对象,可以通过该对象来对标签文本进行检索操作
* demo
xpath = etree.XPath("//text()")
print(xpath(etree.XML('<i>Hello</i>'))) # ['Hello']
fromstring(xml_str)
* 把指定的xml文本解析为:Element 对象
parse(path)
* 读取指定的文件,解析为 Element 对象
-------------------------
lxml-etree-实例属性,方法 |
-------------------------
tag
* 返回标签名称
text
* 标签体
attrib
* 标签的属性dict
tail
* 自关闭标签后的文本
append(e)
* 添加一个Element对象到当前对象的子节点
set(k,v)
* 设置标签的属性值
get(k)
* 获取标签指定名称的属性值
items()
* 返回标签的属性[(k,v)]
iter()
* 返回子标签迭代器(递归)
* 也可以传递标签名称作为参数,来过滤要迭代的子标签
xpath()
* 根据xpath表达式检索数据,返回[]
iterfind()
* 返回满足匹配的节点列表,返回迭代器,支持xpath表达式
findall()
* 返回满足匹配的节点列表,支持xpath表达式
find()
* 返回满足匹配的第一个,支持xpath表达式
findtext()
* 返回第一个满足匹配条件的.text内容,支持xpath表达式
-------------------------
lxml-etree 基本操作 |
-------------------------
* 生成(创建)空xml节点对象
root = etree.Element("root")
print(etree.tostring(root, pretty_print=True))
* 生成子节点
from lxml import etree
root = etree.Element("root")
root.append(etree.Element("child1")) # 直接通过实例对象的append方法添加一个Element子标签对象
child2 = etree.SubElement(root, "child2") # 通过etree模块的SubElement来添加子标签
child2 = etree.SubElement(root, "child3")
print(etree.tostring(root))
* 带内容的xml节点
from lxml import etree
root = etree.Element("root")
root.text = "Hello World" # 通过节点对象的text属性来获取/设置标签体
print(etree.tostring(root))
* 属性生成
from lxml import etree
root = etree.Element("root", name = "Kevin") # 在构造函数传递关键字参数来设置属性
root.set("hello","huhu") # 通过节点对象的 set(key,value) 来设置属性
root.text = "Hello World" # 设置节点的标签体
print(etree.tostring(root))
* 获取属性
from lxml import etree
root = etree.Element("root", name = "Kevin")
print(root.get('name')) # 通过get()方法来获取指定节点对象的属性,如果属性不存在返回 None
from lxml import etree
root = etree.Element("root", name = "Kevin",age="15")
print(root.attrib) # 通过 attrib 属性来获取节点属性的dict
print(root.items()) # 通过 items() 方法返回节点属性的[(key,value),(key,value)]
* 特殊内容
from lxml import etree
html = etree.Element("html")
body = etree.Element("body")
body.text = 'Hello'
br = etree.Element("br")
br.tail = "KevinBlandy" # 在自关闭标签后添加的文本
body.append(br)
html.append(body)
print(etree.tostring(html))
# <html><body>Hello<br/>KevinBlandy</body></html>
* 节点遍历
for element in root.iter():
print(element.tag, element.text)
for element in root.iter("child"): # 指定节点名称来过滤子节点
print(element.tag, element.text)
* 节点查找
iterfind()
* 返回满足匹配的节点列表,返回迭代器
findall()
* 返回满足匹配的节点列表
find()
* 返回满足匹配的第一个
findtext()
* 返回第一个满足匹配条件的.text内容
* 他们都支持xpath表达式 |
993,363 | e8ad5f1c4d04be9c419b6c794f3bf3d580cf49c1 | from django.contrib import admin
from blog.models import *
from .actions import make_published, make_draft
from accounts.models import UserAccount
User = UserAccount
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'thumbnail_tag','slug', 'author', 'jpublish', 'status') #, 'preview_url'
list_filter = ('publish','status', 'author')
search_fields = ('title', 'description')
prepopulated_fields = {'slug': ('title',)}
ordering = ['-status', '-publish']
actions = [make_published, make_draft]
admin.site.register(Article, ArticleAdmin)
# admin.site.register(Comment, CommentAdmin)
|
993,364 | 5455de0896dd289bccb42c9bd4a801aa3d1b5f9d | # Copyright 2018 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cyborg.common import exception
from cyborg import db as db_api
LOG = logging.getLogger(__name__)
quota_opts = [
cfg.IntOpt('reservation_expire',
default=86400,
help='Number of seconds until a reservation expires'),
cfg.IntOpt('until_refresh',
default=0,
help='Count of reservations until usage is refreshed'),
cfg.StrOpt('quota_driver',
default="cyborg.quota.DbQuotaDriver",
help='Default driver to use for quota checks'),
cfg.IntOpt('quota_fpgas',
default=10,
help='Total amount of fpga allowed per project'),
cfg.IntOpt('quota_gpus',
default=10,
help='Total amount of storage allowed per project'),
cfg.IntOpt('max_age',
default=0,
help='Number of seconds between subsequent usage refreshes')
]
CONF = cfg.CONF
CONF.register_opts(quota_opts)
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
self._resources = {}
self._driver = DbQuotaDriver()
def register_resource(self, resource):
"""Register a resource."""
self._resources[resource.name] = resource
def register_resources(self, resources):
"""Register a list of resources."""
for resource in resources:
self.register_resource(resource)
def reserve(self, context, deltas, expire=None, project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas. The deltas are given as
keyword arguments, and current usage and other reservations
are factored into the quota check.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's project.
"""
if not project_id:
project_id = context.project_id
reservations = self._driver.reserve(context, self._resources, deltas,
expire=expire,
project_id=project_id)
LOG.debug("Created reservations %s", reservations)
return reservations
def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's project.
"""
project_id = context.project_id
try:
self._driver.commit(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception("Failed to commit reservations %s", reservations)
def rollback(self, context, reservations, project_id=None):
pass
class DbQuotaDriver(object):
"""Driver to perform check to enforcement of quotas.
Also allows to obtain quota information.
The default driver utilizes the local database.
"""
dbapi = db_api.get_instance()
def reserve(self, context, resources, deltas, expire=None,
project_id=None):
# Set up the reservation expiration
if expire is None:
expire = CONF.reservation_expire
if isinstance(expire, int):
expire = datetime.timedelta(seconds=expire)
if isinstance(expire, datetime.timedelta):
expire = timeutils.utcnow() + expire
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
return self._reserve(context, resources, deltas, expire,
project_id)
def _reserve(self, context, resources, deltas, expire, project_id):
return self.dbapi.quota_reserve(context, resources, deltas, expire,
CONF.until_refresh, CONF.max_age,
project_id=project_id)
def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's project.
"""
try:
self.dbapi.reservation_commit(context, reservations,
project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception("Failed to commit reservations %s", reservations)
QUOTAS = QuotaEngine()
|
993,365 | 58e0befa5a8f9358b533510c75261a82cf80d2ea | from commun.constants.colors import (
color_blanc,
color_bleu_gris,
color_gris_moyen,
color_gris_fonce,
color_gris_clair,
color_orange,
color_rouge,
color_rouge_clair,
color_vert,
color_vert_fonce,
color_vert_moyen,
color_noir,
color_bleu,
color_bleu_dune,
color_jaune_dune,
color_gris_noir)
# ____________LABEL STYLESHEET____________
def create_qlabel_stylesheet(background_color=None,
color=color_blanc,
font_size="14px",
padding="0px 5px 0px 5px",
bold=None,
italic=None):
return """
QLabel {{
background-color: {background_color};
color: {color};
font-size: {font_size};
padding: {padding};
font-weight: {bold};
font-style: {italic};
}}
""".format(
background_color=background_color.hex_string if background_color else "transparent",
color=color.hex_string,
font_size=font_size,
padding=padding,
bold=bold,
italic=italic
)
white_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu_gris)
white_12_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu_gris,
font_size="12px")
white_12_no_bg_label_stylesheet = create_qlabel_stylesheet(color=color_blanc, font_size="12px")
gray_moyen_12_no_bg_label_stylesheet = create_qlabel_stylesheet(color=color_gris_moyen, font_size="12px")
white_14_label_no_background_stylesheet = create_qlabel_stylesheet(color=color_blanc, font_size="14px")
test_label_stylesheet = create_qlabel_stylesheet(color=color_orange,
background_color=color_vert,
font_size="14px")
orange_label_stylesheet = create_qlabel_stylesheet(color=color_orange)
red_label_stylesheet = create_qlabel_stylesheet(color=color_rouge)
white_title_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu_gris,
font_size="16px")
white_22_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
font_size="22px")
red_title_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_rouge,
font_size="16px")
red_12_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_rouge,
font_size="12px")
white_12_no_background_label_stylesheet = create_qlabel_stylesheet(color=color_blanc, font_size="12px")
red_14_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_rouge,
font_size="14px")
red_16_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_rouge,
font_size="16px")
gray_16_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
background_color=color_gris_moyen,
font_size="16px")
blue_16_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu,
font_size="16px")
blue_14_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu,
font_size="14px")
blue_title_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu,
font_size="16px")
blue_12_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu,
font_size="12px")
orange_title_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_orange,
font_size="16px")
green_title_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_vert_fonce,
font_size="16px")
green_12_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_vert_fonce,
font_size="12px")
green_14_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_vert_moyen,
font_size="14px")
gray_title_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
background_color=color_gris_moyen,
font_size="16px")
gray_12_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
background_color=color_gris_moyen,
font_size="12px")
white_12_bold_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu_gris,
font_size="12px",
bold="bold")
gray_14_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
background_color=color_gris_moyen,
font_size="14px")
gris_moyen_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_gris_moyen,
font_size="16px")
gris_fonce_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_gris_fonce,
font_size="16px")
vert_fonce_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_vert_fonce,
font_size="16px")
white_20_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
font_size="20px")
white_title_20_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_bleu_gris,
font_size="20px")
disable_16_label_stylesheet = create_qlabel_stylesheet(color=color_gris_moyen,
font_size="16px")
white_16_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
font_size="16px")
white_16_bold_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
font_size="16px",
bold="bold")
black_12_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
font_size="12px")
black_16_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
font_size="16px")
gray_18_label_stylesheet = create_qlabel_stylesheet(color=color_gris_noir,
font_size="18px")
black_14_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
font_size="14px")
black_20_label_stylesheet = create_qlabel_stylesheet(color=color_noir, font_size="20px")
white_24_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
font_size="24px")
bleu_gris_20_label_stylesheet = create_qlabel_stylesheet(color=color_bleu_gris,
background_color=color_blanc,
font_size="20px")
bleu_gris_16_label_stylesheet = create_qlabel_stylesheet(color=color_bleu_gris,
background_color=color_blanc,
font_size="16px")
green_20_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_vert_fonce,
font_size="20px")
yellow_20_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
background_color=color_jaune_dune,
font_size="20px")
green_maj_label_stylesheet = create_qlabel_stylesheet(color=color_blanc,
background_color=color_vert_fonce,
font_size="16px",
padding="0px 20px 0px 20px",
bold="bold")
black_16_italic_label_stylesheet = create_qlabel_stylesheet(color=color_noir,
font_size="16px",
italic="italic")
red_16_bold_label_stylesheet = create_qlabel_stylesheet(color=color_rouge, font_size="16px", bold="bold")
red_14_bold_label_stylesheet = create_qlabel_stylesheet(color=color_rouge, font_size="14px", bold="bold")
black_14_bold_label_stylesheet = create_qlabel_stylesheet(color=color_noir, font_size="14px", bold="bold")
red_12_bold_label_stylesheet = create_qlabel_stylesheet(color=color_rouge, font_size="12px", bold="bold")
green_16_bold_label_stylesheet = create_qlabel_stylesheet(color=color_vert_fonce, font_size="16px", bold="bold")
red_16_no_background_label_stylesheet = create_qlabel_stylesheet(color=color_rouge, font_size="16px")
green_16_label_stylesheet = create_qlabel_stylesheet(color=color_vert, font_size="16px")
orange_16_bold_label_stylesheet = create_qlabel_stylesheet(color=color_orange, font_size="16px", bold="bold")
blue_16_bold_label_stylesheet = create_qlabel_stylesheet(color=color_bleu_dune, font_size="16px", bold="bold")
blue_12_bold_label_stylesheet = create_qlabel_stylesheet(color=color_bleu_dune, font_size="12px", bold="bold")
dune_title_stylesheet = create_qlabel_stylesheet(color=color_jaune_dune,
font_size="20px",
background_color=color_bleu_dune)
gray_italic_stylesheet = create_qlabel_stylesheet(color=color_gris_fonce, font_size="12px", italic="italic")
# ____________BUTTON STYLESHEET____________
button_stylesheet = """
QPushButton {{
background-color: {color_vert_fonce};
border-radius: 5;
color: {color_blanc};
font-size: 22px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_delete_bobine_selected_stylesheet = """
QPushButton {{
background-color: transparent;
border: none;
}}
""".format(
color_rouge=color_rouge.hex_string,
color_rouge_clair=color_rouge_clair.hex_string)
button_14_stylesheet = """
QPushButton {{
background-color: {color_vert_fonce};
border-radius: 2;
color: {color_blanc};
font-size: 14px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_no_radius_stylesheet = """
QPushButton {{
background-color: none;
border: none;
color: {color_noir};
font-size: 14px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
color: {color_blanc};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
""".format(
color_noir=color_noir.hex_string,
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_no_radius_orange_stylesheet = """
QPushButton {{
background-color: {color_orange};
border: none;
color: {color_blanc};
font-size: 14px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
color: {color_blanc};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
""".format(
color_noir=color_noir.hex_string,
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_orange=color_orange.hex_string,)
button_no_radius_no_hover_stylesheet = """
QPushButton {{
background-color: none;
border: none;
color: {color_noir};
font-size: 14px;
}}
""".format(
color_noir=color_noir.hex_string,
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_red_stylesheet = """
QPushButton {{
background-color: {color_rouge};
border-radius: 5;
color: {color_blanc};
font-size: 22px;
}}
QPushButton:hover {{
background-color: {color_rouge_clair};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,
color_rouge_clair=color_rouge_clair.hex_string)
button_white_stylesheet = """
QPushButton {{
background-color: {color_blanc};
text-align: left;
padding-left: 5px;
border-radius: 0;
color: {color_bleu_gris};
font-size: 16px;
}}
QPushButton:hover {{
background-color: {color_vert_fonce};
color: {color_blanc};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_bleu_gris=color_bleu_gris.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_green_stylesheet = """
QPushButton {{
background-color: {color_vert_fonce};
color: {color_blanc};
text-align: left;
padding-left: 5px;
border-radius: 0;
font-size: 16px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
color: {color_blanc};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_bleu_gris=color_bleu_gris.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_little_stylesheet = """
QPushButton {{
background-color: {color_vert_fonce};
border-radius: 5;
color: {color_blanc};
font-size: 16px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_little_red_stylesheet = """
QPushButton {{
background-color: {color_rouge};
border-radius: 5;
color: {color_blanc};
font-size: 16px;
}}
QPushButton:hover {{
background-color: {color_rouge_clair};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_rouge_clair=color_rouge_clair.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_stylesheet_unselected = """
QPushButton {{
background-color: {color_gris_moyen};
border-radius: 5;
color: {color_blanc};
font-size: 22px;
}}
QPushButton:hover {{
background-color: {color_gris_fonce};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
button_menu_stylesheet = """
QPushButton {{
padding: 0px 10px 0px 10px;
background-color: {color_vert_fonce};
border: none;
color: {color_blanc};
font-size: 16px;
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
button_menu_stylesheet_unselected = """
QPushButton {{
padding: 0px 10px 0px 10px;
background-color: {color_gris_fonce};
border: none;
color: {color_blanc};
font-size: 16px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
QPushButton:disabled {{
background-color: {color_gris_fonce};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
button_arrow_stylesheet = """
QPushButton {{
background-color: {color_blanc};
border: none;
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen};
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_red_cross_stylesheet = """
QPushButton {{
background-color: {color_rouge};
border: none;
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_gray_cross_stylesheet = """
QPushButton {{
background-color: {color_gris_moyen};
border: none;
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,)
button_blue_cross_stylesheet = """
QPushButton {{
background-color: {color_bleu};
border: none;
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_bleu=color_bleu.hex_string,
color_rouge=color_rouge.hex_string,)
button_dropdown_stylesheet = """
QPushButton {{
background-color: {color_blanc};
color: {color_noir};
padding-left: 5px;
font-size: 16px;
border-style: none;
text-align:left;
}}
QPushButton:hover {{
color: {color_vert_moyen};
}}
QPushButton:disabled {{
background-color: {color_gris_moyen};
color: {color_gris_clair};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,
color_noir=color_noir.hex_string,
color_gris_clair=color_gris_clair.hex_string)
button_dropdown_placeholder_stylesheet = """
QPushButton {{
background-color: {color_blanc};
color: {color_gris_moyen};
padding-left: 5px;
font-size: 16px;
border-style: none;
text-align:left;
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_rouge=color_rouge.hex_string,
color_noir=color_noir.hex_string,
color_gris_clair=color_gris_clair.hex_string)
button_no_stylesheet = """
QPushButton {
background-color: none;
border: none;
}
"""
# ____________CHECK BOX STYLESHEET____________
check_box_off_stylesheet = """
QPushButton {{
background-color: {color_blanc};
border-radius: 2px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
check_box_stylesheet_2 = """
QPushButton {{
background-color: {color_blanc};
border-color: {color_gris_fonce};
border-style: solid;
border-width: 2px;
border-radius: 2px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
check_box_on_stylesheet = """
QPushButton {{
background-color: {color_vert_fonce};
border-radius: 2px;
}}
QPushButton:hover {{
background-color: {color_vert_fonce};
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
check_box_disabled_stylesheet = """
QPushButton {{
background-color: {color_gris_moyen};
border-radius: 2px;
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
check_box_unselected_stylesheet = """
QPushButton {{
background-color: {color_gris_moyen};
border-radius: 2px;
}}
QPushButton:hover {{
background-color: {color_vert_moyen};
}}
QPushButton:pressed {{
border-style: solid;
border-width: 1px;
border-color: {color_gris_moyen}
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
# ____________TEXT EDIT STYLESHEET____________
white_text_edit_stylesheet = """
QTextEdit {{
background-color: {color_blanc};
color: {color_noir};
font-size: 14px;
border: none;
selection-background-color: {color_gris_moyen};
}}
""".format(
color_gris_moyen=color_gris_moyen.hex_string,
color_vert=color_vert.hex_string,
color_blanc=color_blanc.hex_string,
color_noir=color_noir.hex_string)
red_text_edit_stylesheet = """
QTextEdit {{
background-color: {color_blanc};
color: {color_rouge};
font-size: 14px;
border: none;
}}
""".format(
color_blanc=color_blanc.hex_string,
color_rouge=color_rouge.hex_string)
# ____________LINE EDIT STYLESHEET____________
line_edit_stylesheet = """
QLineEdit {{
qproperty-frame: false;
background-color: {color_blanc};
color: {color_noir};
font-size: 16px;
border: none;
selection-background-color: {color_gris_moyen};
}}
QLineEdit:focus {{
color: {color_vert_fonce};
}}
""".format(
color_gris_moyen=color_gris_moyen.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_blanc=color_blanc.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_noir=color_noir.hex_string,)
line_edit_green_stylesheet = """
QLineEdit {{
qproperty-frame: false;
background-color: {color_vert_fonce};
color: {color_blanc};
font-size: 16px;
border: none;
selection-background-color: {color_gris_fonce};
}}
""".format(
color_vert_fonce=color_vert_fonce.hex_string,
color_blanc=color_blanc.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_noir=color_noir.hex_string,)
line_edit_red_stylesheet = """
QLineEdit {{
qproperty-frame: false;
background-color: {color_blanc};
color: {color_rouge};
font-size: 16px;
border: 1px solid {color_rouge};
selection-background-color: {color_gris_fonce};
}}
""".format(
color_vert_fonce=color_vert_fonce.hex_string,
color_blanc=color_blanc.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_noir=color_noir.hex_string,
color_rouge=color_rouge.hex_string)
# ____________SCROLLBAR STYLESHEET____________
scroll_bar_stylesheet = """
QScrollBar:vertical {{
background-color: {color_blanc};
width: 14px;
padding: 2px;
}}
QScrollBar::handle:vertical {{
background-color: {color_gris_moyen};
min-height: 20px;
border-radius: 5px;
}}
QScrollBar::handle:vertical:hover {{
background-color: {color_gris_fonce};
}}
QScrollBar::handle:vertical:pressed {{
background-color: {color_gris_fonce};
}}
QScrollBar::add-line:vertical {{
width: 0px;
}}
QScrollBar::sub-line:vertical {{
width: 0px;
}}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {{
background: {color_blanc};
}}
""".format(
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_clair=color_gris_clair.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_blanc=color_blanc.hex_string)
# ____________RADIOBUTTON STYLESHEET____________
radio_button_stylesheet = """
QRadioButton {{
background: {color_vert_fonce}
}}
""".format(
color_blanc=color_blanc.hex_string,
color_vert_fonce=color_vert_fonce.hex_string,
color_vert_moyen=color_vert_moyen.hex_string,
color_vert=color_vert.hex_string,
color_gris_moyen=color_gris_moyen.hex_string,
color_gris_fonce=color_gris_fonce.hex_string,
color_rouge=color_rouge.hex_string,)
|
993,366 | 7a27c5b3af5c1da8e99ccac6ac340b0536f9273e | from enum import Enum
class SubscriptionStatus(Enum):
Active = 1
Expired = 2
Cancelled = 3
PendingCancellation = 4
PendingActivation = 5
class SubscriptionEventType(Enum):
StatusChange = 1
Renewal = 2
MailingAddressChange = 3
Cancellation = 4
Reactivation = 5
Creation = 6
|
993,367 | 08b1a1acd663d7bbfd437b5a98ac3875ecc5e90a | """
two hard coded parallel lines for......
"""
STATES_LIST = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware',
'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky',
'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi',
'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', 'New Mexico',
'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania',
'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah', 'Vermont',
'Virginia', 'Washington', 'West Virginia', 'Wisconsin', 'Wyoming']
CAPITALS_LIST = ['Montgomery', 'Juneau', 'Phoenix', 'Little Rock', 'Sacramento', 'Denver', 'Hartford', 'Dover',
'Tallahassee', 'Atlanta', 'Honolulu', 'Boise', 'Springfield', 'Indianapolis', 'Des Moines',
'Topeka', 'Frankfort', 'Baton Rouge', 'Augusta', 'Annapolis', 'Boston', 'Lansing', 'Saint Paul',
'Jackson', 'Jefferson City', 'Helena', 'Lincoln', 'Carson City', 'Concord', 'Trenton', 'Santa Fe',
'Albany', 'Raleigh', 'Bismarck', 'Columbus', 'Oklahoma City', 'Salem', 'Harrisburg', 'Providence',
'Columbia', 'Pierre', 'Nashville', 'Austin', 'Salt Lake City', 'Montpelier', 'Richmond', 'Olympia',
'Charleston', 'Madison', 'Cheyenne']
def main():
"""
Calls user_input_state() which returns the user_selected_state which is passed to check_user_input()
:return: Nothing
"""
user_input = user_input_state()
check_user_input(user_input)
def user_input_state():
"""
user_selected_state is returned
:return: String
"""
user_selected_state = str(input("Please enter one of the U.S. states. If the state exist "
"I'll tell you its' capital\n"))
final_user_selected_state = user_selected_state[0].upper() + user_selected_state[1:]
return final_user_selected_state
def check_user_input(user_selected_state):
"""
:param user_selected_state:
:return: Nothing
"""
if user_selected_state in STATES_LIST:
found_state_placeholder = STATES_LIST.index(user_selected_state)
print('You entered {}, the capital is {}'.format(user_selected_state, CAPITALS_LIST[found_state_placeholder]))
else:
print("The state you entered wasn't found in our list")
if __name__ == '__main__':
main()
|
993,368 | a2f53fe2959b7f9aad7d8c19497b7470f3fbf175 | var1 = 'Selamat Belajar!'
var2 = "Bahasa Pemograman Python"
print ("var1[0]: ",var1[0])
print ("var2[3:8]: ", var2[1:8])
|
993,369 | 29056180c8877570cb5e99a641e78ef1daee49e8 | def read_data():
with open ('input.txt') as f:
data = f.readlines()
return [d.strip() for d in data]
def write_data(data):
with open('output.txt','w') as f:
for d in data:
f.write(str(d)+'\n')
###
class Field(object):
"""docstring for Field"""
def __init__(self, name, lower, upper):
self.name = name
self.lower = [int(a) for a in lower.split('-')]
self.upper = [int(a) for a in upper.split('-')]
def in_lower(self,t):
return self.lower[0] <= t and t <= self.lower[1]
def in_upper(self,t):
return self.upper[0] <= t and t <= self.upper[1]
def in_range(self,t):
return self.in_lower(t) or self.in_upper(t)
def parse_fields(data):
fields = []
for ii in range(len(data)):
d = data[ii]
if len(d)==0:
data = data[ii+1:]
return fields, data
name, ranges = d.split(":")
lower, upper = ranges.split(" or ")
fields.append(Field(name, lower, upper))
def parse_your_ticket(data):
data.pop(0)
my_ticket = data.pop(0)
data.pop(0)
return eval(f'[{my_ticket}]')
def parse_nearby_tickets(data):
data.pop(0)
return [eval(f'[{d}]') for d in data]
import numpy as np
def error_invalid_ticket(ticket, fields):
error = 0
for v in ticket:
validity = np.zeros( (len(fields)) )
for ii in range(len(fields)):
f = fields[ii]
if f.in_range(v):
validity[ii] = 1
if sum(validity)==0:
error = error+v
return error
def part1():
data = read_data()
fields, data = parse_fields(data)
my_ticket = parse_your_ticket(data)
nearby_tickets = parse_nearby_tickets(data)
c = 0
for ticket in nearby_tickets:
c = c+error_invalid_ticket(ticket, fields)
return c
###
def part2():
data = read_data()
fields, data = parse_fields(data)
my_ticket = parse_your_ticket(data)
nearby_tickets = parse_nearby_tickets(data)
valid_tickets = [t for t in nearby_tickets if error_invalid_ticket(t, fields)==0]
c = 0
field_order = {}
valid_tickets = [my_ticket] + valid_tickets
from collections import Counter
for f in fields:
order = Counter()
for ii in range(len(valid_tickets)):
t = valid_tickets[ii]
for jj in range(len(t)):
v = t[jj]
if f.in_range(v):
order[jj] = order[jj]+1
field_order[f.name] = order
max_len_ticket = max( [len(t) for t in valid_tickets] )
elim = np.zeros( (len(fields), max_len_ticket) )
fieldnames,counters = zip(*field_order.items())
for ii in range(len(fields)):
for k,v in counters[ii].items():
elim[ii,k] = v
useme = elim- (len(valid_tickets)-1)
useme[np.where(useme<0)] = 0
position = np.arange(max_len_ticket)
final_orders = {}
while len(final_orders) < len(fieldnames):
rows = np.where(np.sum(useme,1)==1)[0]
delme = []
for row in rows:
col = np.where(useme[row]==1)[0]
r = np.asscalar(row)
c = np.asscalar(col)
delme.append(c)
final_orders[fieldnames[r]] = position[c]
useme = np.delete(useme, delme,1)
position = np.delete(position, delme)
p = 1
for k,v in final_orders.items():
if k.startswith('departure'):
p = p*my_ticket[v]
return p
print("part 1: {}".format(part1()))
print("part 2: {}".format(part2()))
|
993,370 | 94ee2e9d6f661402ea9b2716d4888b26e48a15de | import django_filters
from django.db.models import Q
from .models import *
class GoodsFilter(django_filters.rest_framework.FilterSet):
min_price = django_filters.NumberFilter(field_name="shop_price", lookup_expr='gte')
max_price = django_filters.NumberFilter(field_name="shop_price", lookup_expr='lte')
is_new = django_filters.BooleanFilter(field_name="is_new")
is_hot = django_filters.BooleanFilter(field_name="is_hot")
is_normal = django_filters.BooleanFilter(field_name="is_normal")
category_type = django_filters.NumberFilter(field_name="category")
class Meta:
model = Goods
fields = ['min_price', 'max_price', 'is_new', 'is_normal', 'is_hot', 'category_type'] |
993,371 | 6644390d45f717835b739fd487081b79b8d66b16 | import socket
class C2Manager:
def __init__(self):
self.__C2Sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__Server = "127.0.0.1"
self.__Port = 4444
def Run(self):
try:
self.__C2Sock.bind((self.__Server, self.__Port)) # throws
except socket.error as error:
print("[~] C2Manager.Run error: {}".format(error))
return False
# Start listening on socket
self.__C2Sock.listen(1)
self.__C2ManMainloop()
return True
def __C2ManMainloop(self):
while True:
# wait to accept a connection - blocking call
conn, addr = self.__C2Sock.accept()
print('Connected with ' + addr[0] + ':' + str(addr[1]))
def __DoHandshake(self):
if self.C2ManReceive() == "client install":
self.C2ManSend("ok")
def C2ManReceive(self):
buffer = self.__C2Sock.recv(1024) # does not throw !
return buffer
def C2ManSend(self, log):
self.__C2Sock.sendall(log.encode("ascii")) # does not throw !
def GetFileContent(self):
dylib_data = ""
while True:
buffer = self.C2ManReceive()
if not buffer:
break
dylib_data += buffer
return dylib_data
def __notify(self):
self.C2ManSend("Client Run")
|
993,372 | 7d4993e27c36eba9243be0a67f8bdd89b3f98e74 | import collections.Counter
def countCharacters(words: [str], chars: str) -> int:
sum = 0
template = Counter(chars)
for word in words:
mark = True
hs = Counter(word)
for key in hs:
if key in template:
if hs[key] > template[key]:
mark = False
break
else:
mark = False
break
if mark:sum += len(word)
return sum
if '__name' == '__main__':
print()
|
993,373 | a41a5c242edd1a75d2ae39aa0d518309bfd7305d | from temp_var import h
print(h)
prim_int = 5
prim_str = "String"
prim_float = 5.5
prim_bool = True
prim_copy = prim_int
prim_int = 6
print(prim_copy)
print(prim_int)
class BananaGabi:
def __init__(self):
pass
b1 = BananaGabi()
b2 = b1
b2.gabrizosa = "cosas"
print("Fin") |
993,374 | 338d41613b242672d7216d4371de423936545ea1 | """cd-dot-cz-price-search
Queries cd.cz for train ticket prices and emails a summary.
AWS Lambda optimized.
Example usage:
$ python lambda_function.py
"""
import argparse
import ast
import csv
import datetime
import io
import json
import pickle
import re
import boto3
import requests
AWS_REGION = None
EMAIL_FROM = None
EMAIL_TO = None
JOURNEY_ORIGIN = None
VIA = None
JOURNEY_DESTINATION = None
DATES_TO_QUERY = None
EMAIL_SUBJECT = "cd-dot-cz-price-search results"
CSV_COLUMNS = ["date", "origin", "destination", "price"]
EUR_CZK = 25.59
H_IN_CZK = 100
REQUEST_HEADERS = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
}
FIRST_REQUEST_URL = "https://www.cd.cz/de/spojeni-a-jizdenka/"
SECOND_REQUEST_URL = f"{FIRST_REQUEST_URL}spojeni-tam/"
def lambda_handler(
event, context, make_network_requests=True
): # pylint: disable=unused-argument
"""The main entrypoint for AWS Lambda."""
global AWS_REGION, EMAIL_FROM # pylint: disable=global-statement
global EMAIL_TO, JOURNEY_ORIGIN, VIA # pylint: disable=global-statement
global JOURNEY_DESTINATION # pylint: disable=global-statement
global DATES_TO_QUERY # pylint: disable=global-statement
AWS_REGION = event["AWS_REGION"]
EMAIL_FROM = event["EMAIL_FROM"]
EMAIL_TO = event["EMAIL_TO"]
JOURNEY_ORIGIN = event["JOURNEY_ORIGIN"]
VIA = event["VIA"]
JOURNEY_DESTINATION = event["JOURNEY_DESTINATION"]
DATES_TO_QUERY = event["DATES_TO_QUERY"]
csv_dict = []
dates = get_dates(DATES_TO_QUERY, start_date=datetime.date.today())
for date in dates:
query_data_object = {
"date": date,
"csv_dict": csv_dict,
"make_network_requests": make_network_requests,
}
csv_dict = run_query(
query_data_object,
origin=JOURNEY_ORIGIN,
destination=JOURNEY_DESTINATION,
)
csv_dict = run_query(
query_data_object,
origin=JOURNEY_DESTINATION,
destination=JOURNEY_ORIGIN,
)
csv_email_content = get_csv_email_content(csv_dict)
send_email(csv_email_content, make_network_requests)
return {"statusCode": 200, "body": json.dumps(csv_email_content)}
def send_email(email_body, make_network_requests):
"""Sends email using AWS SES."""
if make_network_requests:
ses = boto3.client("ses", region_name=AWS_REGION)
ses.send_email(
Source=EMAIL_FROM,
Destination={"ToAddresses": [EMAIL_TO]},
Message={
"Subject": {"Data": EMAIL_SUBJECT},
"Body": {"Text": {"Data": email_body}},
},
)
else:
print(email_body)
def get_dates(amount, start_date):
"""Returns a list of dates starting today"""
dates = []
for _ in range(amount):
dates.append(start_date.strftime("%d.%m.%Y"))
start_date += datetime.timedelta(days=1)
return dates
def get_api_response_string(payload, make_network_requests):
"""Chaining two POST requests to receive HTML with prices."""
with requests.session() as session:
if make_network_requests:
first_response = session.post(
FIRST_REQUEST_URL, data=payload, headers=REQUEST_HEADERS
)
# pickle.dump(first_response, open("first_response.pickle", "wb"))
first_response_string = first_response.content.decode("UTF-8")
first_response_dict = ast.literal_eval(first_response_string)
guid = first_response_dict["guid"]
second_response = session.post(f"{SECOND_REQUEST_URL}{guid}")
# pickle.dump(second_response,
# open("second_response.pickle", "wb"))
else:
first_response = pickle.load(open("first_response.pickle", "rb"))
second_response = pickle.load(open("second_response.pickle", "rb"))
second_response_string = second_response.content.decode("UTF-8")
return second_response_string
def run_query(query_data_object, origin, destination):
"""Generating payload, querying API, extracting lowest price
and adding an entry to the results list."""
csv_dict = query_data_object["csv_dict"]
payload = get_payload(query_data_object["date"], origin, destination, VIA)
second_response_string = get_api_response_string(
payload, query_data_object["make_network_requests"]
)
lowest_price = get_lowest_price(second_response_string)
csv_dict.append(
{
CSV_COLUMNS[0]: query_data_object["date"],
CSV_COLUMNS[1]: origin,
CSV_COLUMNS[2]: destination,
CSV_COLUMNS[3]: lowest_price,
}
)
return csv_dict
def get_lowest_price(second_response_string):
"""Get the lowest price from the HTML response"""
pattern = '(?<="price":)(.*?)(?=,)'
price_regex_matches = re.findall(pattern, second_response_string)
price_integers = []
for price_regex_match in price_regex_matches:
try:
price_integers.append(
int(int(price_regex_match) / H_IN_CZK / EUR_CZK)
)
except ValueError:
pass
try:
lowest_price = sorted(list(filter(lambda x: x > 0, price_integers)))[0]
except IndexError:
lowest_price = "Error"
return lowest_price
def get_csv_email_content(csv_dict):
"""Generate CSV output format from dict."""
csv_output = io.StringIO()
writer = csv.DictWriter(csv_output, fieldnames=CSV_COLUMNS)
writer.writeheader()
for data in csv_dict:
writer.writerow(data)
return csv_output.getvalue()
def get_payload(date, origin, destination, via):
"""Generating the Payload string for the API."""
payload = (
"ttCombination=25&"
"formType=1&"
"isReturnOnly=false&"
"stations%5Bfrom%5D%5BlistID%5D=100003&"
f"stations%5Bfrom%5D%5Bname%5D={origin}&"
"stations%5Bfrom%5D%5BerrorName%5D=From&"
"stations%5Bto%5D%5BlistID%5D=100003&"
f"stations%5Bto%5D%5Bname%5D={destination}&"
"stations%5Bto%5D%5BerrorName%5D=To&"
"stations%5Bvias%5D%5B0%5D%5BlistID%5D=0&"
f"stations%5Bvias%5D%5B0%5D%5Bname%5D={via}&"
"stations%5Bvias%5D%5B0%5D%5BerrorName%5D=Via%5B1%5D&"
"stations%5BisViaChange%5D=false&"
"services%5Bbike%5D=false&"
"services%5Bchildren%5D=false&"
"services%5BwheelChair%5D=false&"
"services%5Brefreshment%5D=false&"
"services%5BcarTrain%5D=false&"
"services%5BsilentComp%5D=false&"
"services%5BladiesComp%5D=false&"
"services%5BpowerSupply%5D=false&"
"services%5BwiFi%5D=false&"
"services%5BinSenior%5D=false&"
"services%5Bbeds%5D=false&"
"services%5BserviceClass%5D=Class2&"
"dateTime%5BisReturn%5D=false&"
f"dateTime%5Bdate%5D={date}&"
"dateTime%5Btime%5D=0%3A1&"
"dateTime%5BisDeparture%5D=true&"
f"dateTime%5BdateReturn%5D={date}&"
"dateTime%5BtimeReturn%5D=19%3A33&"
"dateTime%5BisDepartureReturn%5D=true&"
"params%5BonlyDirectConnections%5D=false&"
"params%5BonlyConnWithoutRes%5D=false&"
"params%5BuseBed%5D=NoLimit&"
"params%5BdeltaPMax%5D=-1&"
"params%5BmaxChanges%5D=4&"
"params%5BminChangeTime%5D=-1&"
"params%5BmaxChangeTime%5D=240&"
"params%5BonlyCD%5D=false&"
"params%5BonlyCDPartners%5D=true&"
"params%5BhistoryTrain%5D=false&"
"params%5BpsgOwnTicket%5D=false&"
"params%5BaddServiceReservation%5D=false&"
"params%5BaddServiceDog%5D=false&"
"params%5BaddServiceBike%5D=false&"
"params%5BaddServiceSMS%5D=false&"
"passengers%5Bpassengers%5D%5B0%5D%5Bid%5D=1&"
"passengers%5Bpassengers%5D%5B0%5D%5BtypeID%5D=5&"
"passengers%5Bpassengers%5D%5B0%5D%5Bcount%5D=1&"
"passengers%5Bpassengers%5D%5B0%5D%5Bage%5D=-1&"
"passengers%5Bpassengers%5D%5B0%5D%5BageState%5D=0&"
"passengers%5Bpassengers%5D%5B0%5D%5BcardIDs%5D=&"
"passengers%5Bpassengers%5D%5B0%5D%5BisFavourite%5D=false&"
"passengers%5Bpassengers%5D%5B0%5D%5BisDefault%5D=false&"
"passengers%5Bpassengers%5D%5B0%5D%5BisSelected%5D=true&"
"passengers%5Bpassengers%5D%5B0%5D%5Bnickname%5D=&"
"passengers%5Bpassengers%5D%5B0%5D%5Bphone%5D=&"
"passengers%5Bpassengers%5D%5B0%5D%5BcardTypeID%5D=0&"
"passengers%5Bpassengers%5D%5B0%5D%5Bfullname%5D=&"
"passengers%5Bpassengers%5D%5B0%5D%5BcardNumber%5D=&"
"passengers%5Bpassengers%5D%5B0%5D%5Bbirthdate%5D=&"
"passengers%5Bpassengers%5D%5B0%5D%5Bavatar%5D=&"
"passengers%5Bpassengers%5D%5B0%5D%5Bimage%5D=&"
"passengers%5Bpassengers%5D%5B0%5D%5BcompanyName%5D="
)
return payload
def cli_entry():
"""Providing a CLI entry by converting args into an AWS
Lambda style event."""
parser = argparse.ArgumentParser()
parser.add_argument("--AWS_REGION", required=True, type=str)
parser.add_argument("--EMAIL_FROM", required=True, type=str)
parser.add_argument("--EMAIL_TO", required=True, type=str)
parser.add_argument("--JOURNEY_ORIGIN", required=True, type=str)
parser.add_argument("--VIA", required=True, type=str)
parser.add_argument("--JOURNEY_DESTINATION", required=True, type=str)
parser.add_argument("--DATES_TO_QUERY", required=True, type=int)
args = parser.parse_args()
cli_event = {
"AWS_REGION": args.AWS_REGION,
"EMAIL_FROM": args.EMAIL_FROM,
"EMAIL_TO": args.EMAIL_TO,
"JOURNEY_ORIGIN": args.JOURNEY_ORIGIN,
"VIA": args.VIA,
"JOURNEY_DESTINATION": args.JOURNEY_DESTINATION,
"DATES_TO_QUERY": args.DATES_TO_QUERY,
}
lambda_handler(cli_event, None, make_network_requests=True)
if __name__ == "__main__":
cli_entry()
|
993,375 | fc412fcaeb9642c0a29811663deff032cc0f0e9e | from django.urls import path
from dreamtours_app.views import *#UserList, UserDetail, UserByCity
v = 'v2'
urlpatterns = [
path(v+'/user/', UserList.as_view(), name='User List'),
path(v+'/user/<int:pk>', UserDetail.as_view(), name='User Detail'),
path(v+'/user/<path:name>&<path:passwd>', VerifyUser, name='Verify user'),
path(v+'/city/', CityList.as_view(), name='City List'),
path(v+'/city/<int:pk>', CityDetail.as_view(), name='City Detail'),
path(v+'/particular/', ParticularList.as_view(), name='Particular List'),
path(v+'/particular/<int:pk>', ParticularDetail.as_view(), name='Particular Detail'),
path(v+'/particular/city/<int:pk>', ParticularByCity.as_view(), name='Particular By City'),
path(v+'/company/', CompanyList.as_view(), name='Company List'),
path(v+'/company/<int:pk>', CompanyDetail.as_view(), name='Company Detail'),
path(v+'/localtype/', LocalTypeList.as_view(), name='LocalType List'),
path(v+'/localtype/<int:pk>', LocalTypeDetail.as_view(), name='LocalType Detail'),
path(v+'/local/', LocalList.as_view(), name='Local List'),
path(v+'/local/<int:pk>', LocalDetail.as_view(), name='Local Detail'),
path(v+'/local/city/<int:pk>', LocalByCity.as_view(), name='Local By City'),
path(v+'/local/type/<int:pk>', LocalByType.as_view(), name='Local By City'),
path(v+'/local/company/<int:pk>', LocalByCompany.as_view(), name='Local By City'),
path(v+'/local/distance/<path:orig>/<path:dest>', LocalDistance, name='Local Distance'),
path(v+'/rating/', RatingList.as_view(), name='Rating List'),
path(v+'/rating/<int:pk>', RatingDetail.as_view(), name='Rating Detail'),
path(v+'/rating/local/<int:pk>', RatingByLocal.as_view(), name='Rating By City'),
path(v+'/rating/user/<int:pk>', RatingByUser.as_view(), name='Rating By City'),
path(v+'/rating/media/<path:id>', get_rating_media, name='Rating Media'),
path(v+'/comment/', CommentList.as_view(), name='Comment List'),
path(v+'/comment/<int:pk>', CommentDetail.as_view(), name='Comment Detail'),
path(v+'/comment/local/<int:pk>', CommentByLocal.as_view(), name='Comment By City'),
path(v+'/comment/user/<int:pk>', CommentByUser.as_view(), name='Comment By City'),
] |
993,376 | 8e7275970d394eaef7bc962ac3d1f793bc02842c | """
Create a function that determines whether four coordinates properly create a
rectangle. A rectangle has 4 sides and has 90 degrees for each angle.
Coordinates are given as strings containing an x- and a y- coordinate: `"(x,
y)"`.
For this problem, assume none of the rectangles are tilted.
is_rectangle(["(0, 0)", "(0, 1)", "(1, 0)", "(1,1)"]) ➞ True
### Examples
is_rectangle(["(-4, 3)", "(4, 3)", "(4, -3)", "(-4, -3)"]) ➞ True
is_rectangle(["(0, 0)", "(0, 1)"]) ➞ False
# A line is not a rectangle!
is_rectangle(["(0, 0)", "(0, 1)", "(1, 0)"]) ➞ False
# Neither is a triangle!
is_rectangle(["(0, 0)", "(9, 0)", "(7, 5)", "(16, 5)"]) ➞ False
# A parallelogram, but not a rectangle!
### Notes
* A square is also a rectangle!
* A parallelogram is NOT necessarily a rectangle (the rectangle is a special case of a parallelogram).
* If the input is fewer than or greater than 4 coordinates, return `False`.
"""
def is_rectangle(l):
if 4 > len(l) < 4:
return False
r = []
for i in l:
r += [int(i.split(",")[0][1:]), int(i.split(",")[1][:-1])]
if (r[7] - r[1])**2 + (r[6] - r[0])**2 == (r[5] - r[3])**2 + (r[4] - r[2])**2:
return True
return False
|
993,377 | 96b1729477e8a111f4a25845c190b1e19da66d1f | def vol_integrand(z, fsky=0.5, fkp_weighted=False, nbar=1.e-3, P0=5.e3):
## Purely volume integral as a sanity check.
## dV / dz [(h^{-1} Mpc)^3]; Differential comoving volume per redshift per steradian.
##
## Note: cosmo.differential_comoving_volume(z) = (const.c.to('km/s') / cosmo.H(z)) * cosmo.comoving_distance(z) ** 2.
## = dV/dz [d\Omega] = chi^2 dChi/dz [d\Omega].
##
dVdz = fsky * 4. * np.pi * cosmo.differential_comoving_volume(z).value * cparams['h_100'] ** 3.
if fkp_weighted:
## FKP volume weighting.
nP = nbar * P0
fkp = nP / (1. + nP)
return fkp * fkp * dVdz
else:
return dVdz
def _vvol_integrand(x, args):
## Vegas wrapper of vol_integrand; input args as a list.
z = x[0]
(fsky, fkp_weighted, nbar, P0) = args
return vol_integrand(z, fsky, fkp_weighted, nbar, P0)
|
993,378 | 6ee4fe22236fff5ad8d21b1a070f9776f8c210cb | #!/usr/bin/env python
import hello
print hello.getstr('hello world')
|
993,379 | f95f4e1adc1c041e113cba2dc97b3958f466862a | # 练习1: 计算 1~100之间所有数字的总和 5050
sum = 0
for i in range(1, 101):
sum += i
print(sum)
# 练习2: 计算1~100 之间所有 偶数之和
sum = 0
for i in range(1, 101):
if i % 2 == 0:
sum += i
print(sum)
# 练习3: 计算 1~100 之间, 同时被 3 和 2 整除的数字 之和
sum = 0
for i in range(1, 101):
if i % 2 == 0 and i % 3 == 0:
sum += i
print(sum)
|
993,380 | 1f4d48461d6a5b6f4e0bb8a8384d4d1ec33ae0e0 | import aiodns
import asyncio
import ipaddress
from merc import feature
class ResolverFeature(feature.Feature):
NAME = __name__
def __init__(self, app):
self.resolver = aiodns.DNSResolver(loop=app.loop)
install = ResolverFeature.install
@asyncio.coroutine
def resolve_hostname_coro(app, user, timeout):
feature = app.features.get(ResolverFeature)
host, *_ = user.protocol.transport.get_extra_info("peername")
host, _, _ = host.partition("%")
app.run_hooks("server.notify", user,
"*** Looking up your hostname...")
ip = ipaddress.ip_address(host)
is_ipv4 = False
if isinstance(ip, ipaddress.IPv4Address):
rip = ".".join(reversed(ip.exploded.split("."))) + ".in-addr.arpa."
is_ipv4 = True
elif isinstance(ip, ipaddress.IPv6Address):
rip = ".".join(reversed("".join(ip.exploded.split(":")))) + ".ip6.arpa."
try:
forward, *_ = yield from asyncio.wait_for(
feature.resolver.query(rip, "PTR"), timeout)
backward, *_ = yield from asyncio.wait_for(feature.resolver.query(
forward, "AAAA" if not is_ipv4 else "A"), timeout)
if ip == ipaddress.ip_address(backward):
app.run_hooks("server.notify", user,
"*** Found your hostname ({})".format(forward))
user.host = forward
else:
app.run_hooks("server.notify", user,
"*** Hostname does not resolve correctly")
except (aiodns.error.DNSError, asyncio.TimeoutError):
app.run_hooks("server.notify", user,
"*** Couldn't look up your hostname")
user.host = host
user.registration_latch.decrement()
@ResolverFeature.hook("user.connect")
def resolve_hostname(app, user):
user.registration_latch.increment()
asyncio.async(resolve_hostname_coro(app, user, 5), loop=app.loop)
|
993,381 | 7c400ed772ed4ab6f1a3416d9c3a7a779ff3d053 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ResumeWhitoutStaffDailyReportData'
db.create_table(u'dash_resumewhitoutstaffdailyreportdata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('report_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('resume_commends_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('resume_view_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('resume_view_proportion', self.gf('django.db.models.fields.CharField')(default='', max_length=20)),
('resume_fav_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('resume_down_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('resume_down_proportion', self.gf('django.db.models.fields.CharField')(max_length=20)),
('company_card_send_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('interviewed_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('entered_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('dash', ['ResumeWhitoutStaffDailyReportData'])
def backwards(self, orm):
# Deleting model 'ResumeWhitoutStaffDailyReportData'
db.delete_table(u'dash_resumewhitoutstaffdailyreportdata')
models = {
'dash.coredailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'CoreDailyReportData'},
'active_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lively_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lively_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'register_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repeat_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'dash.feeddailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'FeedDailyReportData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lively_feed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lively_feed_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lively_feed_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_feed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'dash.monthreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'MonthReportData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month_lively_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'month_lively_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'month_repeat_visit_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'month_repeat_visit_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'dash.partnerdailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'PartnerDailyReportData'},
'accept_task_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'accept_task_user_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'accusation_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'accusation_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'all_extra_reward_coin_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'all_reward_coin_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'do_task_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'do_task_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'entered_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'entered_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interviewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'interviewed_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'resume_download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_download_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_viewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_viewed_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_accedpted_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_accedpted_count_contrast': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'task_accedpted_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_viewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'today_commend_and_check_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'today_commend_and_download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'today_extra_reward_coin_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'today_reward_coin_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'upload_resume_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'upload_resume_total_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'dash.pinbotdailyreport': {
'Meta': {'object_name': 'PinbotDailyReport'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pay_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pv': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'register_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {}),
'total_pay_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'uv': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'dash.resumedailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'ResumeDailyReportData'},
'company_card_send_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'entered_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interviewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'resume_commends_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_down_proportion': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'resume_fav_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_view_proportion': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'})
},
'dash.resumewhitoutstaffdailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'ResumeWhitoutStaffDailyReportData'},
'company_card_send_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'entered_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interviewed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'resume_commends_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_down_proportion': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'resume_fav_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resume_view_proportion': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'})
},
'dash.tasksystemdailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'TaskSystemDailyReportData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'task_A10_R1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A10_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A11_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A12_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A13_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A14_L1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A15_R1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A15_R2_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A15_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A16_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A17_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A18_R1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A18_R2_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A2_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A3_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A4_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A5_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A6_L1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A6_R1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A6_R2_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A6_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A7_R1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A7_R2_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A7_R3_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A7_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A8_R1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A8_R2_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A8_R3_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A8_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A9_R1_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A9_R2_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A9_R3_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_A9_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'dash.userdailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'UserDailyReportData'},
'all_total_active_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_experience_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_manual_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_register_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_self_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'total_experience_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_manual_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_register_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_self_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'dash.weekreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'WeekReportData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'week_lively_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'week_lively_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'week_repeat_visit_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'week_repeat_visit_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'dash.weixindailyreportdata': {
'Meta': {'ordering': "['-report_date']", 'object_name': 'WeixinDailyReportData'},
'feed_notify_send_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_notify_view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lively_member_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lively_user_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_bind_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_feed_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_feed_favours_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'new_reg_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'report_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'total_bind_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['dash'] |
993,382 | 5368e768b7577130c9082e9344ab9cd22280dc1e | import timeit
import numpy as np
import random as rd
start_time = timeit.default_timer()
# code you want to evaluate
class Sudoku:
def __init__(self):
self.size = 9
self.cell = np.arange(1,self.size+1)
rd.shuffle(self.cell)
self.cells = np.array([self.cell])
for x in range(9):
rd.shuffle(self.cell)
self.cells = np.vstack([self.cells,self.cell])
pass
print(self.cells)
pass
# sudoku sorted in self.cells
def printSudoku(self):
i=0
while( i <= 6 ):
j=0
while( j <= 6 ):
# print(self.cells[i][j]," ",self.cells[i][j+1])
print(self.cells[i][j]," ",self.cells[i][j+1]," ",self.cells[i][j+2]," | ",self.cells[i+1][j]," ",self.cells[i+1][j+1]," ",self.cells[i+1][j+2]," | ",self.cells[i+2][j]," ",self.cells[i+2][j+1]," ",self.cells[i+2][j+2])
j+=3
pass
print("-------------------------------------")
i+=3
pass
pass
lassie = Sudoku();
lassie.printSudoku()
elapsed = timeit.default_timer() - start_time
print(elapsed) |
993,383 | 9808963e34b62d6cca63ca9dcd738aeb7b7f9f80 | #!/usr/bin/env python
#
# Azure Linux extension
#
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import xml.etree.ElementTree as ET
def setXmlValue(xml,path,property,value,selector=[]):
elements = xml.findall(path)
for element in elements:
if selector and element.get(selector[0])!=selector[1]:
continue
if not property:
element.text = value
elif not element.get(property) or len(element.get(property))==0 :
element.set(property,value)
def getXmlValue(xml,path,property):
element = xml.find(path)
if element is not None:
return element.get(property)
def addElement(xml,path,el,selector=[],addOnlyOnce=False):
elements = xml.findall(path)
for element in elements:
if selector and element.get(selector[0])!=selector[1]:
continue
element.append(el)
if addOnlyOnce:
return
def createElement(schema):
return ET.fromstring(schema)
def removeElement(tree, parent_path, removed_element_name):
parents = tree.findall(parent_path)
for parent in parents:
element = parent.find(removed_element_name)
while element is not None:
parent.remove(element)
element = parent.find(removed_element_name) |
993,384 | 9b6063216649341dd3ea19d03bd5a394ae54bf47 | """
from typing import List
def greet_all(names: List[str]) -> None:
for name in names:
print(name)
greet_all(["Guilherme", "Giovanna"])
"""
def greet_all(names: list[str]) -> None:
for name in names:
print(name)
greet_all(["Guilherme", "Giovanna"])
|
993,385 | 8347e829969ad60dbcfdb23fbf6d671c8c7f8f66 | zhweekday = ["星期日", "星期一", "星期二",
"星期三", "星期四", "星期五", "星期六"]
from datetime import datetime
from dateutil import relativedelta
import os.path
from pathlib import Path
def from_my_birthday (d):
"""
Calculate time difference between given datetime d and 1986-4-23.
"""
birthday = datetime(1986, 4, 23)
return relativedelta.relativedelta(d, birthday)
def from_epoch (d):
return (d - datetime(1970,1,1)).days
def from_gp (d):
return (d - datetime(2019,9,28)).days
def format_utc_en (d):
return d.strftime("%B %d, %Y (UTC)")
def format_utc_zh (d):
zhyear = "世界協調時間{0}年".format(d.year)
zhday = "{0}月{1}日".format(d.month, d.day)
rocyear = "(中華民國{0}年)".format(d.year-1911)
return zhyear + rocyear + zhday
def format_epoch_en (d):
return "{} days since Unix Epoch".format(from_epoch(d))
def format_epoch_zh (d):
return "Unix 紀元 {} 日".format(from_epoch(d))
def format_weekday_en (d):
return d.strftime ("%A")
def format_weekday_zh (d):
return zhweekday[int(d.strftime ("%w"))]
def format_gp_en (d):
return "Globus Pallidum day {}".format(from_gp(d))
def format_gp_zh (d):
return "蒼白球紀元第{}日".format(from_gp(d))
def format_age (d):
age = from_my_birthday (d)
years = age.years
months = age.months
days = age.days
age_en = ('{} years {} months {} days'.format(years, months, days))
age_zh = ('{} 歲 {} 個月 {} 天'.format(years, months, days))
return "### 年齡 Age\n* " + age_en + "\n* " + age_zh
def format_date_information (d):
date_information_zh = format_utc_zh(d) + " / " + format_epoch_zh(d) + \
" / " + format_weekday_zh(d) + " / " + format_gp_zh (d)
date_information_en = format_utc_en(d) + " / " + format_epoch_en(d) + \
" / " + format_weekday_en(d) + " / " + format_gp_en(d)
content_body = "* " + date_information_zh + "\n* " \
+ date_information_en
return "### 日期 Date\n" + content_body + "\n* 特殊註記:"
def format_title (d):
n = from_gp (d)
gpserial = "%04d"%n
briefdate = d.strftime ("%Y%m%d")
return "蒼白球日誌{}_gpdiary{}_{}".format(gpserial, gpserial, briefdate)
def format_filename ():
current_date = datetime.now()
n = from_gp (current_date)
gpserial = "%04d"%n
briefdate = current_date.strftime ("%Y%m%d")
return "../source/gpdiary{}_{}".format(gpserial, briefdate) + ".md"
def create_filehead():
current_date = datetime.now()
date_information = format_date_information(current_date)
age_information = format_age(current_date)
title = format_title (current_date)
return title + "\n===\n" + date_information + "\n\n" + age_information + "\n\n"
def create_template_body():
upper_body = "### 本文 Content\n1. \n\n---\n\n2. 雜記:物價與其他[2]\n\n---\n\n"
lower_body = "### 注釋 Comment\n\n[1] \n\n[2] 新台幣計價。有關新台幣可見蒼白球日誌0007。\n\n"
appendix = "### 附錄 Appendix\n"
return upper_body + lower_body + appendix
def create_template():
return create_filehead() + create_template_body()
current_path = Path(os.path.realpath(__file__))
root = current_path.parent.parent
newfilepath = root / "source"/ format_filename ()
print(os.path.exists(newfilepath))
if os.path.exists(newfilepath):
pass
else:
with newfilepath.open("w", encoding="utf-8") as f:
f.write(create_template())
|
993,386 | b6a146d8bbeb56b0299b42bfdfb77747a6af8354 | """ User Service Class"""
from organization.model.department import Department as department_model
from organization.model.user import User as user_model
from dding import Dding
class Department(object):
""" User Service Class"""
def __init__(self, mongo):
self.mongo = mongo
self.dding = Dding(self.mongo)
self.department_model = department_model(self.mongo)
self.user_model = user_model(self.mongo)
def list_all(self):
""" list all departments """
data = self.department_model.list_all()
return data, 0, ''
def list_tree(self):
""" list all tree """
data = self.department_model.list_tree()
return data, 0, ''
def create(self, name, parentid, create_dept_group):
""" create department """
parent_result = self.department_model.find_id(parentid)
dd_result = self.dding.create_department(name, parent_result['did'], create_dept_group)
if not dd_result['errcode']:
return self.department_model.add(dd_result['id'], name, parentid), 0, ''
else:
return "", dd_result['errcode'], dd_result['errmsg']
def update(self, _id, name, parentid, manager):
""" update department """
department = self.department_model.find_id(_id)
parent = self.department_model.find_id(parentid)
if manager:
user = self.user_model.find_id(manager)
if user:
manager = user['userid']
if not department:
return "", 1, ""
dd_result = self.dding.update_department(department['did'], name, parent['did'],
manager)
if not dd_result['errcode']:
return self.department_model.update(_id, name, parentid, manager), 0, ''
else:
return "", dd_result['errcode'], dd_result['errmsg']
def remove(self, _id):
""" remove department """
department = self.department_model.find_id(_id)
if not department:
return "", 1, ""
dd_result = self.dding.delete_department(department['did'])
if not dd_result['errcode']:
self.department_model.remove(_id)
return "", 0, ""
else:
return "", dd_result['errcode'], dd_result['errmsg']
|
993,387 | 02f263b288ce667a6b4c1cff20eb950ab7df56d0 | #Problem 2
text = input()
mid = int((len(text)-1)/2)
print("The old string:" ,text)
print("Middle 3 characters:" ,text[mid-1:mid+2])
print("The new string:" ,text[:mid-1] + text[mid-1:mid+2].upper() + text[mid+2:]) |
993,388 | f9c143360025696c26d837cb566766014c429657 | import os
from tqdm import tqdm # smart progress bar
from PIL import Image
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import rc_params
import pandas as pd
import numpy as np
from skimage.feature import daisy
from skimage.feature import hog
from skimage.color import rgb2gray
from skimage.exposure import equalize_hist
from get_image import get_image
from extract_rgb_info import extract_rgb_info
def preprocess(img, demo=False):
""" Turn raw pixel values into features.
"""
def _demo_plot(img, stage="", is_ints=False, axes_idx=0):
""" Utility to visualize the features we're building
"""
if demo:
axes[axes_idx].imshow(img / 255. if is_ints else img,
cmap=bees_cm)
axes[axes_idx].set_title(stage)
return axes_idx + 1
if demo:
fig, axes = plt.subplots(3, 2, figsize=(15, 20))
axes = axes.flatten()
# track which subplot we're plotting to
axes_idx = 0
axes_idx = _demo_plot(img, stage="Raw Image", is_ints=True, axes_idx=axes_idx)
# FEATURE 1: Raw image and color data
if demo:
color_info = extract_rgb_info(img, ax=axes[axes_idx])
axes_idx += 1
else:
color_info = extract_rgb_info(img)
# remove color information (hog and daisy only work on grayscale)
gray = rgb2gray(img)
axes_idx = _demo_plot(gray, stage="Convert to grayscale", axes_idx=axes_idx)
# equalize the image
gray = equalize_hist(gray)
axes_idx = _demo_plot(gray, stage="Equalized histogram", axes_idx=axes_idx)
# FEATURE 2: histogram of oriented gradients features
hog_features = hog(gray,
orientations=12,
pixels_per_cell=(8, 8),
cells_per_block=(1, 1),
visualise=demo)
# if demo, we actually got a tuple back; unpack it and plot
if demo:
hog_features, hog_image = hog_features
axes_idx = _demo_plot(hog_image, stage="HOG features", axes_idx=axes_idx)
# FEATURE 3: DAISY features - sparser for demo so can be visualized
params = {'step': 25, 'radius': 25, 'rings': 3} if demo \
else {'step': 10, 'radius': 15, 'rings': 4}
daisy_features = daisy(gray,
histograms=4,
orientations=8,
normalization='l1',
visualize=demo,
**params)
if demo:
daisy_features, daisy_image = daisy_features
axes_idx = _demo_plot(daisy_image, stage="DAISY features", axes_idx=axes_idx)
# return a flat array of the raw, hog and daisy features
return np.hstack([color_info, hog_features, daisy_features.flatten()]) |
993,389 | 2999c3beaf89f1167432eb78123dfeb525d0ea5f | from dataclasses import dataclass, field
from typing import Iterable, Optional, Callable
from rlbot.matchcomms.client import MatchcommsClient
from rlbot.matchconfig.match_config import MatchConfig
from rlbot.utils.game_state_util import GameState
from rlbot.utils.rendering.rendering_manager import RenderingManager
from rlbottraining.grading.grader import Grader, Grade
from rlbottraining.history.metric import Metric
from rlbottraining.match_configs import make_default_match_config
from rlbottraining.rng import SeededRandomNumberGenerator
@dataclass
class TrainingExercise(Metric):
name: str
grader: Grader
match_config: MatchConfig = field(default_factory=make_default_match_config)
# MatchcommsClient connected to the current match
_matchcomms: Optional[MatchcommsClient] = None
matchcomms_factory: Callable[[], MatchcommsClient] = None # Initialized externally.
def get_matchcomms(self) -> MatchcommsClient:
if (not self._matchcomms) or (not self._matchcomms.thread.is_alive()):
assert self.matchcomms_factory
self._matchcomms = self.matchcomms_factory()
return self._matchcomms
def on_briefing(self) -> Optional[Grade]:
"""
This method is called before state-setting such that bots can be "briefed" on the upcoming exercise.
The "briefing" is usually for using matchcomms to convey objectives and parameters.
A grade can be returned in case bot responded sufficient to pass or fail the exercise
before any on_tick() grading happens.
"""
pass
def make_game_state(self, rng: SeededRandomNumberGenerator) -> GameState:
raise NotImplementedError()
def render(self, renderer: RenderingManager):
"""
This method is called each tick to render exercise debug information.
This method is called after on_tick().
It is optional to override this method.
"""
self.grader.render(renderer)
Playlist = Iterable[TrainingExercise]
|
993,390 | 590d90ce58a8c371cc1840246884dc0ad5ceb94c | from django.conf import settings
from scheduled_job_client.exceptions import InvalidJobConfig
def get_job_config():
try:
return settings.SCHEDULED_JOB_CLIENT
except AttributeError as ex:
raise InvalidJobConfig('Missing Scheduled Job Client Configuration')
|
993,391 | deb7db09650632718e9a3fafc73ca1e970512999 | '''
this program plots frequency of top-10 tags using matplotlib reading from precreated json database named sample.json
'''
import matplotlib.pyplot as plt
import json
f = open('sample.json')
data = json.load(f)
dis = {}
data = sorted(data.items(), key=lambda x:x[1],reverse=True)
for i in range(10):
dis[data[i][0]]=data[i][1]
plt.bar(dis.keys(),dis.values(),color='g')
plt.show()
|
993,392 | 6cb519eaf5b5d6073c670a307b08803b0f8a039d | import synapse.lib.stormtypes as s_stormtypes
@s_stormtypes.registry.registerLib
class LibIters(s_stormtypes.Lib):
'''
A Storm library for providing iterator helpers.
'''
_storm_lib_path = ('iters', )
_storm_locals = (
{
'name': 'enum', 'desc': 'Yield (<indx>, <item>) tuples from an iterable or generator.',
'type': {
'type': 'function', '_funcname': 'enum',
'args': (
{'type': 'iter', 'name': 'genr', 'desc': 'An iterable or generator.'},
),
'returns': {'name': 'yields', 'type': 'list',
'desc': 'Yields (<indx>, <item>) tuples.'},
}
},
)
def __init__(self, runt, name=()):
s_stormtypes.Lib.__init__(self, runt, name)
def getObjLocals(self):
return {
'enum': self.enum,
}
async def enum(self, genr):
indx = 0
async for item in s_stormtypes.toiter(genr):
yield (indx, item)
indx += 1
|
993,393 | a23c0c376b5c1b099953c51a8096a62beff06f6e | '''
Выведите таблицу размером n×n, заполненную целыми числами от 1 до n2 по спирали, выходящей из левого верхнего угла
и закрученной по часовой стрелке, как показано в примере.
Формат ввода:
Одна строка, содержащая одно целое число n, n>0.
Формат вывода:
Таблица из n строк, значения в строках разделены пробелом.
Sample Input:
5
Sample Output:
1 2 3 4 5
16 17 18 19 6
15 24 25 20 7
14 23 22 21 8
13 12 11 10 9
'''
n, i, start = int(input()), 0, 1
matrix = [[0 for i in range(n)] for j in range(n)]
while n >= 1:
for j in range(i, n):
matrix[i][j] = start
start += 1
for j in range(i + 1, n):
matrix[j][n-1] = start
start += 1
for j in range(n-2, i-1 , -1):
matrix[n-1][j] = start
start += 1
for j in range(n-2, i, -1):
matrix[j][i] = start
start += 1
n -= 1
i += 1
for x in matrix:
print(' '.join(str(y) for y in x)) |
993,394 | ab784391291b27de25b57da0ac3f2ba70a5c8eed | # 싱글톤 구현 방법들
class BaseClass:
@classmethod
def gettext(cls):
return "static method string"
def singleton(clazz):
instances = {}
def getinstance(*args, **kargs):
if clazz not in instances:
instances[clazz] = clazz(*args, **kargs)
return instances[clazz]
return getinstance
@singleton
class MainClass(BaseClass):
pass
instance = singleton(MainClass)
print(type(instance))
print(instance.gettext()) # cannot find static method in function
|
993,395 | 5be603761135358c2e72e77d26e48e2d78040bfb | import scrapy
from scrapy.http import HtmlResponse
from jobparser.items import JobparserItem
class SuperjobruSpider(scrapy.Spider):
name = 'superjobru'
allowed_domains = ['superjob.ru']
start_urls = ['https://www.superjob.ru/vacancy/search/?keywords=python&geo%5Bt%5D%5B0%5D=4']
def parse(self, response: HtmlResponse):
vacancies_links = response.xpath("//div[contains(@class,'f-test-search-result-item')]//div[contains(@class,'jNMYr')]//a/@href").extract()
next_page = response.xpath("//a[@rel='next'][position()=2]/@href").extract_first()
for link in vacancies_links:
yield response.follow(link, callback=self.vacansy_parse)
if next_page:
yield response.follow(next_page, callback=self.parse)
def vacansy_parse(self, response: HtmlResponse):
vacancy_name = response.xpath('//h1/text()').extract_first()
salary = response.xpath('//h1/parent::div/span/span/span/text()').extract()
link = response.url
vacancy_company_name = response.xpath('//h2/parent::a/@href').extract_first()
item = JobparserItem(vacancy_name=vacancy_name, salary=salary, link=link,
vacancy_company_name=vacancy_company_name)
yield item
|
993,396 | f9a94a7af18d18f3c0bc0fb94cd7ce3a4ef8ba04 | def EachWork():
allWork = [
{
'title':"Geographic.",
'date':"2019-01-03",
"content":"chart"
},
{
'title': "Four sided",
'date': "2019-01-01",
"content": "chart"
},
{
'title': "Sparklines",
'date': "2019-01-01",
"content": "chart"
}
]
return allWork |
993,397 | d0031cc37bbcd1a14324f38fc9f375b3491ddaff | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-23 13:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ProjectPhase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(max_length=200, unique=True)),
('name', models.CharField(max_length=100, unique=True)),
('description', models.CharField(blank=True, max_length=400)),
('sequence', models.IntegerField(help_text='For ordering phases.', unique=True)),
('active', models.BooleanField(default=True, help_text='Whether this phase is in use or has been discarded.')),
('editable', models.BooleanField(default=True, help_text='Whether the project owner can change the details of theproject.')),
('viewable', models.BooleanField(default=True, help_text='Whether this phase, and projects in it show up at the website')),
('owner_editable', models.BooleanField(default=False, help_text='The owner can manually select between these phases')),
],
options={
'ordering': ['sequence'],
},
),
migrations.CreateModel(
name='ProjectTheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True, verbose_name='name')),
('name_nl', models.CharField(max_length=100, unique=True, verbose_name='name NL')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='slug')),
('description', models.TextField(blank=True, verbose_name='description')),
('disabled', models.BooleanField(default=False, verbose_name='disabled')),
],
options={
'ordering': ['name'],
'verbose_name': 'project theme',
'verbose_name_plural': 'project themes',
},
),
]
|
993,398 | 262c13ec682dbe0cbd7f1cf491b0eaa9911b2aaa | ITEM: TIMESTEP
1500
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
-3.2774172604533618e+00 5.0477417260445094e+01
-3.2774172604533618e+00 5.0477417260445094e+01
-3.2774172604533618e+00 5.0477417260445094e+01
ITEM: ATOMS id type xs ys zs
1611 1 0.0650907 0.151107 0.142289
180 1 0.128096 0.0699182 0.0557806
653 1 0.957262 0.117227 0.040565
889 1 0.12413 0.109392 0.132913
1045 1 0.146126 0.176592 0.125053
902 1 0.17466 0.0437869 0.134015
411 1 0.0456143 0.0430517 0.223414
268 1 0.209872 0.137671 0.0781442
1436 1 0.201483 0.213596 0.126655
1204 1 0.256995 0.0288652 0.0126715
1536 1 0.327184 0.47704 0.00809983
1873 1 0.264322 0.0825316 0.151725
1499 1 0.276741 0.162709 0.115613
1883 1 0.287109 0.0764856 0.0786463
491 1 0.201196 0.0757985 0.0282941
1037 1 0.486264 0.464279 0.0123246
298 1 0.331296 0.0486922 0.0413887
775 1 0.413363 0.136904 0.100237
1049 1 0.383082 0.141481 0.175704
1783 1 0.787399 0.460844 0.331419
412 1 0.0173623 0.0673903 0.0252637
1493 1 0.376043 0.0762089 0.111329
1903 1 0.332918 0.123788 0.122212
1702 1 0.486537 0.0897993 0.0789657
1564 1 0.559132 0.116521 0.0425034
1321 1 0.533865 0.143467 0.188142
622 1 0.500005 0.0271265 0.319352
844 1 0.400156 0.0965355 0.0400179
1466 1 0.531422 0.0836114 0.129775
1974 1 0.980655 0.0400111 0.0858813
1518 1 0.438962 0.0518408 0.137596
457 1 0.586975 0.1636 0.106019
867 1 0.655073 0.0754333 0.108085
857 1 0.624116 0.162266 0.19528
1413 1 0.732398 0.144211 0.154667
1094 1 0.581618 0.0522138 0.0739125
1776 1 0.678804 0.143832 0.107093
1949 1 0.410884 0.358351 0.0171103
476 1 0.596768 0.171408 0.0170865
1228 1 0.499443 0.0233484 0.0618704
749 1 0.733389 0.0669695 0.0963586
1660 1 0.683422 0.0323338 0.0558476
1072 1 0.812886 0.170613 0.0489733
1299 1 0.763621 0.141015 0.0924248
977 1 0.943662 0.445068 0.316042
1456 1 0.730111 0.203308 0.0533045
1268 1 0.788572 0.240334 0.0823567
546 1 0.883628 0.152763 0.0163886
42 1 0.851131 0.13344 0.117079
1897 1 0.620131 0.493687 0.264505
1363 1 0.823419 0.0841435 0.0718938
243 1 0.644656 0.488213 0.365668
1549 1 0.931128 0.0282695 0.202101
79 1 0.931095 0.0840351 0.14995
1122 1 0.883915 0.0827196 0.0243283
676 1 0.0339943 0.0730385 0.141701
823 1 0.677296 0.261269 0.0380766
949 1 0.0456015 0.113546 0.0711477
1116 1 0.870963 0.0408743 0.125246
1237 1 0.882627 0.0808128 0.204626
1464 1 0.447861 0.484204 0.228005
115 1 0.110783 0.252349 0.10202
1320 1 0.0188466 0.329663 0.0182593
1774 1 0.0438095 0.0346784 0.337084
956 1 0.0491494 0.240137 0.0430141
1577 1 0.0246261 0.155161 0.0169351
1364 1 0.10532 0.168398 0.0657845
1542 1 0.0590513 0.307909 0.089291
1734 1 0.119339 0.297677 0.0424244
1500 1 0.390057 0.496912 0.0433648
919 1 0.00186936 0.181604 0.0962534
1975 1 0.269269 0.24127 0.110985
1289 1 0.157508 0.301134 0.149207
541 1 0.266678 0.149381 0.0342957
1681 1 0.195582 0.274487 0.081957
448 1 0.752878 0.036719 0.0180522
1982 1 0.186197 0.239136 0.185621
486 1 0.292133 0.399729 0.244043
847 1 0.249296 0.331111 0.0337135
1892 1 0.355468 0.237817 0.0312745
1818 1 0.38716 0.199075 0.126184
500 1 0.34886 0.279783 0.120618
1230 1 0.316613 0.157579 0.185218
311 1 0.266014 0.336559 0.203633
620 1 0.419882 0.308216 0.154562
1026 1 0.335884 0.163915 0.0530379
671 1 0.289385 0.353831 0.103714
54 1 0.413699 0.216563 0.0543336
2003 1 0.432559 0.295288 0.0426983
1252 1 0.185245 0.497541 0.097724
908 1 0.498038 0.322293 0.101734
1655 1 0.524914 0.184323 0.0434701
1641 1 0.738353 0.103065 0.0301898
943 1 0.558148 0.233828 0.0847008
2002 1 0.561596 0.359379 0.154701
3 1 0.522874 0.313273 0.0358362
339 1 0.58338 0.348009 0.0304453
1926 1 0.972398 0.493407 0.363278
582 1 0.446487 0.362833 0.0732815
736 1 0.736537 0.317142 0.149656
195 1 0.732966 0.220896 0.143511
1005 1 0.689665 0.386299 0.113343
1977 1 0.665698 0.317581 0.108326
911 1 0.648726 0.212548 0.0941395
1195 1 0.55451 0.487046 0.301792
130 1 0.976161 0.37187 0.329575
1928 1 0.859689 0.205443 0.0923932
982 1 0.927979 0.298398 0.0043242
716 1 0.79224 0.366344 0.208108
354 1 0.829299 0.24943 0.0146356
370 1 0.889743 0.277547 0.0752377
855 1 0.736514 0.218056 0.225136
794 1 0.795045 0.242592 0.16542
211 1 0.689775 0.495229 0.288366
581 1 0.845808 0.328235 0.033474
759 1 0.923338 0.143236 0.0996329
1191 1 0.995145 0.373699 0.0755003
1540 1 0.71914 0.325505 0.0376783
978 1 0.951765 0.190857 0.0463585
1530 1 0.7259 0.283922 0.410456
93 1 0.96829 0.265646 0.0823215
313 1 0.892927 0.262579 0.153092
1806 1 0.159015 0.424147 0.0777119
179 1 0.955864 0.37084 0.427933
1608 1 0.0762579 0.375308 0.16112
2017 1 0.967901 0.44255 0.0832133
1820 1 0.0975657 0.360219 0.067742
1452 1 0.0935421 0.416728 0.106945
1427 1 0.0952513 0.0178993 0.27989
1606 1 0.165109 0.351778 0.0861976
870 1 0.235831 0.401972 0.0858975
863 1 0.1732 0.196567 0.049742
1863 1 0.590364 0.269152 0.0258335
162 1 0.210847 0.435701 0.146414
734 1 0.216116 0.358401 0.143757
1199 1 0.145133 0.462814 0.165973
1006 1 0.280991 0.4256 0.0170957
711 1 0.244209 0.010597 0.319099
2008 1 0.175269 0.47815 0.0152235
1223 1 0.379632 0.337543 0.0814872
40 1 0.370343 0.4343 0.0792046
254 1 0.330509 0.464621 0.179739
630 1 0.324429 0.368161 0.0300066
1200 1 0.362145 0.397112 0.142095
494 1 0.288138 0.403757 0.157255
396 1 0.76878 0.420569 0.405734
1701 1 0.823326 0.470606 0.391618
633 1 0.599822 0.422777 0.123274
1163 1 0.358721 0.0972625 0.4395
1287 1 0.529751 0.420485 0.106685
1792 1 0.522629 0.385263 0.0438852
286 1 0.549166 0.473422 0.0517148
1207 1 0.514422 0.340357 0.203645
472 1 0.447772 0.440599 0.0749366
2041 1 0.442024 0.384395 0.144787
1781 1 0.582508 0.29699 0.0812838
1069 1 0.623565 0.269684 0.136117
983 1 0.675503 0.396735 0.0186675
586 1 0.400554 0.0110525 0.342407
1881 1 0.647681 0.477548 0.152404
777 1 0.613998 0.434776 0.0351617
1665 1 0.460542 0.141007 0.00741114
925 1 0.697765 0.45639 0.0681553
1906 1 0.226823 0.18179 0.468682
1572 1 0.91027 0.351359 0.0550529
91 1 0.818179 0.373414 0.109068
1896 1 0.765475 0.0198735 0.265964
2039 1 0.629925 0.379727 0.0757094
1782 1 0.763009 0.37308 0.0645603
664 1 0.755092 0.499207 0.0964942
761 1 0.810804 0.448978 0.0906184
1208 1 0.76908 0.380644 0.322513
1492 1 0.0163838 0.472075 0.283774
261 1 0.0184586 0.423193 0.143099
1254 1 0.883463 0.439752 0.10887
950 1 0.836065 0.0120889 0.0670295
1743 1 0.842878 0.417178 0.174083
1066 1 0.101057 0.162853 0.277605
1472 1 0.13908 0.150773 0.192374
258 1 0.961692 0.164077 0.164386
147 1 0.0218899 0.0880331 0.269841
1081 1 0.191143 0.135466 0.240273
1169 1 0.124111 0.0430774 0.224505
1376 1 0.111127 0.0255788 0.151795
1927 1 0.200525 0.141049 0.153941
721 1 0.196122 0.0799035 0.188588
1650 1 0.263685 0.0315823 0.224184
732 1 0.080718 0.235651 0.168585
1445 1 0.264957 0.124863 0.220511
49 1 0.296103 0.137327 0.311714
312 1 0.160646 0.0649271 0.300515
1344 1 0.237857 0.0828948 0.27169
1885 1 0.857277 0.414019 0.0350345
174 1 0.398074 0.02697 0.029309
762 1 0.459837 0.133992 0.214109
1300 1 0.429788 0.0325145 0.282746
1614 1 0.332991 0.0677674 0.167373
1748 1 0.696375 0.482225 0.464263
320 1 0.544612 0.0288684 0.247289
1593 1 0.796119 0.0824258 0.473356
1538 1 0.349309 0.0793861 0.357871
837 1 0.390732 0.0950642 0.288226
1924 1 0.303216 0.0481475 0.286045
1177 1 0.336589 0.0251142 0.230352
680 1 0.33593 0.114574 0.250795
296 1 0.508498 0.067636 0.193889
157 1 0.44196 0.219444 0.187088
1829 1 0.50213 0.0866824 0.263203
393 1 0.43976 0.0360065 0.208906
1738 1 0.572376 0.217718 0.220271
1257 1 0.700583 0.219548 0.354274
1292 1 0.540616 0.0729198 0.37688
1999 1 0.634728 0.0390475 0.350966
1514 1 0.690367 0.148767 0.255505
684 1 0.304169 0.288814 0.0670629
1736 1 0.687508 0.049866 0.192399
1384 1 0.60888 0.183634 0.35864
1079 1 0.57742 0.109459 0.227776
1089 1 0.653135 0.0586792 0.256756
535 1 0.909292 0.152969 0.497324
381 1 0.603612 0.0843223 0.410257
1763 1 0.729743 0.0889159 0.284086
232 1 0.780581 0.0332545 0.135671
986 1 0.803011 0.116424 0.169914
1864 1 0.640407 0.371306 0.364498
1124 1 0.823982 0.0535164 0.3174
1573 1 0.833322 0.130093 0.248152
1484 1 0.990003 0.0217594 0.163015
549 1 0.777113 0.0827801 0.233385
1717 1 0.0699573 0.0705138 0.41165
1990 1 0.910583 0.072994 0.274613
2011 1 0.216547 0.264703 0.471948
1479 1 0.0052492 0.0129756 0.279067
435 1 0.876977 0.0135294 0.241958
1236 1 0.965975 0.0732083 0.321751
1288 1 0.837178 0.0267359 0.185259
284 1 0.09068 0.308898 0.153496
1401 1 0.970104 0.117428 0.235325
82 1 0.0534725 0.3215 0.214714
337 1 0.0827089 0.175324 0.353881
1772 1 0.0221352 0.334339 0.145122
1061 1 0.124991 0.29382 0.23065
1597 1 0.0368409 0.230924 0.21855
841 1 0.0140535 0.236599 0.304428
1515 1 0.00347257 0.146766 0.301487
975 1 0.00395267 0.300213 0.323897
1415 1 0.165321 0.132568 0.345848
1810 1 0.237218 0.196631 0.18547
2019 1 0.251015 0.2772 0.26596
1232 1 0.247009 0.352325 0.284667
647 1 0.188624 0.312055 0.218724
670 1 0.133595 0.210571 0.226842
703 1 0.310249 0.329796 0.268123
336 1 0.330365 0.227906 0.168161
963 1 0.404786 0.271857 0.225558
1661 1 0.398862 0.306927 0.294987
1417 1 0.324042 0.218482 0.280431
1400 1 0.264212 0.195949 0.257076
299 1 0.39493 0.18622 0.241109
1008 1 0.513048 0.266289 0.144859
506 1 0.44317 0.341124 0.226112
958 1 0.492506 0.229231 0.237865
537 1 0.643651 0.25956 0.383494
1487 1 0.35505 0.337132 0.182192
77 1 0.467747 0.196377 0.105753
144 1 0.487395 0.360671 0.394971
1731 1 0.439798 0.23805 0.2879
1507 1 0.471315 0.283063 0.199393
1488 1 0.533561 0.246151 0.322328
1964 1 0.654374 0.314949 0.257547
1244 1 0.600548 0.348568 0.229218
876 1 0.677479 0.304264 0.18394
329 1 0.559806 0.237772 0.394982
2018 1 0.675906 0.222056 0.277976
267 1 0.617539 0.267591 0.20818
1393 1 0.62408 0.275951 0.31397
574 1 0.691935 0.345335 0.310703
60 1 0.741705 0.308897 0.211883
700 1 0.580787 0.370205 0.307211
168 1 0.76573 0.156429 0.239188
19 1 0.91123 0.349683 0.175501
1921 1 0.817402 0.279268 0.235769
1157 1 0.814123 0.208072 0.26978
1340 1 0.905333 0.316128 0.278034
498 1 0.71464 0.154834 0.326057
1442 1 0.813641 0.269612 0.308177
1766 1 0.779354 0.327568 0.27766
1309 1 0.721857 0.27854 0.285857
1797 1 0.969681 0.233965 0.175003
357 1 0.912798 0.232542 0.309452
425 1 0.948775 0.224298 0.368612
594 1 0.921962 0.274439 0.221097
897 1 0.870294 0.156584 0.183822
1758 1 0.847642 0.268121 0.375581
1626 1 0.0323059 0.254598 0.134016
64 1 0.956216 0.298175 0.148175
1182 1 0.98384 0.189988 0.231482
1330 1 0.975713 0.325163 0.222819
1180 1 0.786659 0.483097 0.164138
879 1 0.145174 0.376012 0.165476
1901 1 0.182572 0.401584 0.316174
1631 1 0.0996235 0.379468 0.230063
990 1 0.0811469 0.4447 0.300555
81 1 0.0333847 0.423203 0.362391
269 1 0.0972046 0.337412 0.296464
571 1 0.408167 0.478228 0.490329
404 1 0.100659 0.451269 0.223706
250 1 0.224802 0.423163 0.253314
751 1 0.167903 0.378111 0.260036
1991 1 0.188986 0.483049 0.225734
1672 1 0.20856 0.480067 0.293326
148 1 0.350736 0.463646 0.338389
797 1 0.261057 0.458908 0.484535
1690 1 0.367643 0.37843 0.255749
443 1 0.260024 0.47401 0.193274
277 1 0.33124 0.453237 0.259649
1939 1 0.389774 0.494235 0.274225
345 1 0.39714 0.427269 0.30607
1402 1 0.334082 0.384463 0.315014
525 1 0.291377 0.213973 0.0183087
1823 1 0.497887 0.412079 0.188622
1876 1 0.411902 0.423977 0.217541
832 1 0.525011 0.333623 0.343038
1253 1 0.664842 0.42708 0.399574
657 1 0.513631 0.386291 0.276134
992 1 0.426054 0.495824 0.358699
1505 1 0.462961 0.294887 0.313308
757 1 0.561386 0.486534 0.139944
788 1 0.505664 0.302377 0.261864
1971 1 0.450613 0.364365 0.316892
866 1 0.713478 0.418206 0.286836
470 1 0.566302 0.424669 0.204338
776 1 0.571252 0.429877 0.345501
154 1 0.639915 0.394104 0.174543
1250 1 0.609549 0.425129 0.267098
764 1 0.665443 0.453426 0.231507
1242 1 0.861277 0.335248 0.347142
1386 1 0.716913 0.380349 0.220571
1598 1 0.76508 0.401774 0.156337
1759 1 0.886061 0.45025 0.221877
1825 1 0.778002 0.418842 0.250148
1023 1 0.670221 0.353041 0.435125
1139 1 0.840366 0.488318 0.262646
273 1 0.0526855 0.388692 0.0164744
85 1 0.7114 0.449696 0.166883
43 1 0.0240104 0.400108 0.212674
793 1 0.85078 0.377114 0.268372
18 1 0.937965 0.475708 0.184245
1256 1 0.933631 0.301622 0.347981
1255 1 0.953069 0.402921 0.200974
945 1 0.0410805 0.455538 0.035548
895 1 0.869219 0.443687 0.322735
1979 1 0.0249441 0.399828 0.279344
727 1 0.184238 0.477172 0.374439
1338 1 0.115527 0.129751 0.398923
803 1 0.147534 0.000826307 0.408887
73 1 0.954036 0.129726 0.407738
930 1 0.133306 0.0660044 0.365682
1938 1 0.107071 0.0362176 0.487019
1373 1 0.08315 0.105776 0.324225
555 1 0.33147 0.0268073 0.436904
1394 1 0.302352 0.466643 0.111397
1084 1 0.224115 0.0793968 0.365685
1125 1 0.182314 0.0137721 0.476977
1281 1 0.253545 0.0290494 0.405988
1516 1 0.284292 0.105416 0.458103
1933 1 0.178039 0.0888574 0.41807
1470 1 0.294106 0.0901871 0.392987
750 1 0.442795 0.150444 0.289804
745 1 0.397576 0.049764 0.407219
1907 1 0.757537 0.497195 0.435403
1635 1 0.41407 0.120898 0.35011
655 1 0.265638 0.208479 0.33934
349 1 0.549485 0.0456083 0.00730536
909 1 0.372463 0.299914 0.00171749
910 1 0.927958 0.399024 0.485474
149 1 0.449964 0.142591 0.405388
932 1 0.0298566 0.0270106 0.467413
2043 1 0.417777 0.217757 0.418771
593 1 0.49238 0.0259939 0.390471
1349 1 0.478658 0.0920868 0.33145
991 1 0.530881 0.142397 0.401455
1984 1 0.558958 0.175609 0.293981
1389 1 0.96837 0.410491 0.016098
1686 1 0.67789 0.0550202 0.453785
1161 1 0.758412 0.197481 0.468921
263 1 0.649037 0.119154 0.319681
898 1 0.540175 0.000663092 0.427754
340 1 0.586629 0.0464086 0.473367
281 1 0.203868 0.399826 0.478488
1315 1 0.576285 0.0664878 0.2985
1882 1 0.876349 0.422425 0.446632
145 1 0.704806 0.0475431 0.340827
1460 1 0.844932 0.118486 0.414503
1673 1 0.677003 0.127929 0.416033
78 1 0.772463 0.115451 0.41182
1709 1 0.766446 0.0849401 0.348567
251 1 0.542259 0.301192 0.487239
1306 1 0.785652 0.137824 0.304138
469 1 0.183908 0.366536 0.00821738
917 1 0.750128 0.0503667 0.434116
636 1 0.635 0.00792193 0.423684
1382 1 0.001305 0.157762 0.455463
980 1 0.866958 0.131752 0.328829
529 1 0.818558 0.0407 0.394816
1610 1 0.976344 0.0200748 0.368482
1249 1 0.383613 0.00512079 0.108836
560 1 0.938749 0.160896 0.329281
1625 1 0.929027 0.19366 0.427045
2006 1 0.890924 0.0427663 0.398168
24 1 0.0971211 0.269671 0.299984
1951 1 0.718301 0.412471 0.482605
583 1 0.845633 0.352153 0.439212
1457 1 0.0611746 0.153524 0.426531
31 1 0.203723 0.235708 0.405656
1392 1 0.0656679 0.250997 0.414025
1833 1 0.158255 0.17956 0.428587
406 1 0.0727627 0.325006 0.363236
1745 1 0.0406536 0.293587 0.455181
71 1 0.0646381 0.209039 0.485056
1685 1 0.134103 0.258795 0.451997
1623 1 0.844622 0.494757 0.135116
1960 1 0.259458 0.281282 0.363994
1109 1 0.135572 0.289949 0.37217
1791 1 0.173829 0.211552 0.341932
2022 1 0.268554 0.225264 0.420939
1958 1 0.169351 0.268282 0.274756
430 1 0.236738 0.141493 0.403292
401 1 0.333294 0.307522 0.342917
1477 1 0.378451 0.156129 0.401257
1913 1 0.334718 0.313837 0.423028
799 1 0.359765 0.220072 0.469151
1127 1 0.328797 0.22635 0.392083
467 1 0.397839 0.294052 0.425124
1969 1 0.382217 0.247814 0.336107
1189 1 0.579881 0.487621 0.397847
292 1 0.618363 0.0190625 0.014947
1849 1 0.30339 0.159201 0.415262
1590 1 0.574017 0.2432 0.500705
21 1 0.109968 0.490677 0.115289
101 1 0.473256 0.314267 0.452838
1696 1 0.517269 0.0714982 0.449499
380 1 0.552784 0.137568 0.476705
1206 1 0.43863 0.261677 0.487433
1934 1 0.374336 0.17898 0.323944
422 1 0.441565 0.19429 0.347967
1692 1 0.512761 0.240985 0.466837
1680 1 0.593419 0.3366 0.433239
1967 1 0.4879 0.278948 0.392942
1117 1 0.590102 0.309028 0.364002
1138 1 0.464182 0.452484 0.289849
1009 1 0.690916 0.224608 0.452436
1931 1 0.693496 0.138628 0.484433
1796 1 0.625189 0.170304 0.434628
512 1 0.791262 0.301013 0.385281
356 1 0.624561 0.28204 0.474829
235 1 0.735749 0.190786 0.402909
1917 1 0.801309 0.186327 0.375524
1956 1 0.834887 0.138412 0.489936
869 1 0.724225 0.357481 0.394182
50 1 0.844798 0.21219 0.462039
1909 1 0.766481 0.239009 0.353714
188 1 0.904722 0.392548 0.357114
399 1 0.768322 0.33025 0.457597
1029 1 0.914711 0.326193 0.476842
121 1 0.882816 0.194206 0.376592
334 1 0.712647 0.461348 0.358618
1348 1 0.968693 0.288598 0.432763
717 1 0.0601318 0.0222293 0.0645208
398 1 0.934389 0.385118 0.267481
1179 1 0.410513 0.161707 0.476607
530 1 0.0104798 0.424354 0.428044
665 1 0.939937 0.436703 0.410588
1136 1 0.0706527 0.418275 0.475237
429 1 0.0241081 0.358551 0.390568
386 1 0.113025 0.481877 0.406476
1327 1 0.522949 0.00342824 0.141077
468 1 0.851221 0.487189 0.458404
1360 1 0.161429 0.344143 0.454421
140 1 0.147974 0.416899 0.427201
1269 1 0.987793 0.455749 0.488058
1780 1 0.831625 0.405388 0.372718
818 1 0.141643 0.366999 0.362713
1184 1 0.191508 0.319648 0.325528
1648 1 0.632869 0.422798 0.471537
390 1 0.279057 0.43745 0.329789
163 1 0.220989 0.31161 0.415348
272 1 0.697221 0.308799 0.497741
1837 1 0.225112 0.454127 0.425034
1998 1 0.217452 0.377849 0.378366
1843 1 0.362648 0.446213 0.455298
973 1 0.417375 0.40693 0.469108
1218 1 0.297806 0.473153 0.38968
1828 1 0.334172 0.405402 0.396141
1025 1 0.284672 0.401317 0.447412
662 1 0.289502 0.334804 0.483415
155 1 0.356152 0.3706 0.4536
891 1 0.282979 0.353736 0.368582
218 1 0.424746 0.327777 0.368856
1391 1 0.467911 0.464336 0.429177
658 1 0.564376 0.424417 0.448369
953 1 0.497561 0.41744 0.362996
921 1 0.514976 0.461989 0.491261
1666 1 0.426864 0.41027 0.377477
1922 1 0.482045 0.388295 0.459326
209 1 0.988364 0.266774 0.0119447
937 1 0.191563 0.120404 0.484027
449 1 0.79324 0.416264 0.472111
437 1 0.132343 0.00274858 0.341963
1054 1 0.788513 0.498053 0.0188801
1303 1 0.816439 0.0512351 0.00811693
893 1 0.244224 0.0523698 0.476955
1946 1 0.978279 0.489731 0.00294941
441 1 0.224187 0.228411 0.00592813
1374 1 0.897923 0.228241 0.00163768
1024 1 0.091425 0.0320799 0.561318
1669 1 0.181665 0.011236 0.610868
1857 1 0.0645781 0.10066 0.654102
638 1 0.674937 0.373026 0.518959
970 1 0.15775 0.0691427 0.545352
1368 1 0.143263 0.115019 0.647341
1703 1 0.903012 0.470571 0.998591
1980 1 0.127227 0.0356406 0.664843
2010 1 0.222476 0.0891567 0.577063
1369 1 0.834057 0.339948 0.513101
526 1 0.354679 0.42431 0.52462
1602 1 0.112584 0.128963 0.577068
1737 1 0.442018 0.153465 0.544712
722 1 0.203221 0.169108 0.660807
46 1 0.340121 0.0895633 0.675203
1229 1 0.30488 0.463989 0.716771
741 1 0.271303 0.104181 0.629512
1920 1 0.290629 0.0934658 0.539735
226 1 0.424538 0.435582 0.542464
1291 1 0.453569 0.112205 0.624616
1068 1 0.418073 0.330436 0.503109
289 1 0.316848 0.0219978 0.571261
377 1 0.555652 0.0431505 0.716852
233 1 0.515554 0.0863288 0.591853
550 1 0.396076 0.0700808 0.621062
838 1 0.440143 0.0375732 0.659217
1663 1 0.66259 0.498866 0.627446
348 1 0.326017 0.159154 0.502439
649 1 0.0734764 0.180592 0.966639
228 1 0.507643 0.00286879 0.672624
1695 1 0.0418155 0.359192 0.534179
1011 1 0.67201 0.169794 0.542335
1235 1 0.754536 0.166338 0.692592
883 1 0.560771 0.0237788 0.543063
395 1 0.5812 0.458394 0.556581
1408 1 0.556822 0.159366 0.607536
1286 1 0.725107 0.140167 0.61513
994 1 0.570639 0.00321912 0.631146
221 1 0.313544 0.0240892 0.645975
1099 1 0.485892 0.0135527 0.589276
1104 1 0.909566 0.376826 0.849012
626 1 0.809913 0.0851699 0.546869
1789 1 0.658124 0.0232782 0.664865
768 1 0.366971 0.429554 0.917307
12 1 0.644057 0.0750314 0.518012
1078 1 0.422152 0.442098 0.973528
858 1 0.764742 0.0805083 0.717784
1654 1 0.879739 0.0424906 0.570531
928 1 0.21167 0.339938 0.514851
1 1 0.622455 0.497881 0.863426
493 1 0.828603 0.183051 0.648884
181 1 0.968604 0.111547 0.503941
1637 1 0.947065 0.0962507 0.583844
2009 1 0.0164086 0.250389 0.517102
1062 1 0.933311 0.0226583 0.620372
1001 1 0.00913916 0.055335 0.535617
1698 1 0.893614 0.159724 0.569142
177 1 0.0325816 0.162148 0.709499
544 1 0.191691 0.153015 0.581919
1334 1 0.094599 0.191662 0.55314
1192 1 0.0946074 0.260816 0.625983
728 1 0.0456302 0.137455 0.535078
779 1 0.0353426 0.173578 0.628402
1114 1 0.197363 0.242585 0.550764
4 1 0.249087 0.20131 0.705425
1981 1 0.239344 0.260111 0.659444
1550 1 0.287455 0.302808 0.568918
216 1 0.668443 0.453844 0.711724
23 1 0.245255 0.355095 0.604701
210 1 0.18179 0.223275 0.702029
260 1 0.128839 0.265068 0.570714
673 1 0.734951 0.483702 0.655401
220 1 0.253181 0.167264 0.549819
1511 1 0.305218 0.239656 0.667355
1684 1 0.306404 0.172698 0.61408
37 1 0.283803 0.230568 0.579928
1434 1 0.774733 0.491447 0.883854
765 1 0.368399 0.233728 0.614229
618 1 0.322394 0.0180326 0.801058
285 1 0.462888 0.17021 0.67092
197 1 0.382821 0.137379 0.598236
613 1 0.418371 0.285399 0.63519
1107 1 0.565499 0.236799 0.569618
1675 1 0.531218 0.304354 0.569629
463 1 0.46537 0.228723 0.534379
1616 1 0.453825 0.187813 0.599002
1120 1 0.387836 0.28729 0.565277
374 1 0.465071 0.298918 0.550024
605 1 0.526317 0.225953 0.64349
1808 1 0.774886 0.255704 0.620992
207 1 0.579527 0.0915276 0.539057
2025 1 0.702295 0.283118 0.595262
1404 1 0.182626 0.480708 0.930798
63 1 0.630875 0.34848 0.588966
833 1 0.71161 0.221814 0.633837
208 1 0.641267 0.224008 0.57677
1035 1 0.777388 0.264061 0.535286
1013 1 0.938895 0.245734 0.576466
371 1 0.898785 0.240136 0.51262
1485 1 0.740353 0.197945 0.572996
1513 1 0.904512 0.320628 0.552963
427 1 0.863471 0.247995 0.59231
1447 1 0.995903 0.185315 0.57541
985 1 0.0241043 0.295981 0.569013
1446 1 0.0400399 0.241863 0.6581
362 1 0.991621 0.321765 0.505502
886 1 0.893808 0.208777 0.644557
900 1 0.940806 0.301437 0.623458
265 1 0.969838 0.214534 0.638363
483 1 0.0252615 0.402897 0.601842
1366 1 0.535776 0.482939 0.726859
1592 1 0.530172 0.374973 0.510957
127 1 0.546597 0.307842 0.972841
215 1 0.154168 0.436979 0.725732
1411 1 0.139293 0.330917 0.599769
1992 1 0.101616 0.423659 0.608656
2044 1 0.539717 0.37969 0.95415
14 1 0.30335 0.318023 0.671434
278 1 0.929052 0.263865 0.945688
920 1 0.118176 0.412157 0.671307
1997 1 0.173694 0.386601 0.555785
1918 1 0.848404 0.0445189 0.500132
436 1 0.214165 0.454387 0.784579
1063 1 0.214046 0.452309 0.59681
1589 1 0.249206 0.409336 0.540662
1043 1 0.466538 0.379455 0.55592
805 1 0.93868 0.0489002 0.511309
1778 1 0.328011 0.373654 0.591771
1853 1 0.806251 0.375695 0.871795
1972 1 0.350319 0.354267 0.529269
1830 1 0.309002 0.447987 0.572553
743 1 0.379013 0.441224 0.597993
1970 1 0.757146 0.311862 0.933279
1962 1 0.424984 0.401296 0.655565
1131 1 0.785313 0.0364263 0.598312
165 1 0.507262 0.443562 0.571181
954 1 0.806633 0.174843 0.538398
1715 1 0.45039 0.341678 0.633226
355 1 0.346508 0.301812 0.615169
853 1 0.51479 0.26421 0.699449
1762 1 0.547463 0.322436 0.652109
1862 1 0.553773 0.452839 0.627379
454 1 0.512777 0.386409 0.635403
742 1 0.648942 0.387575 0.718372
1420 1 0.854518 0.463129 0.950179
611 1 0.605015 0.405475 0.634767
562 1 0.0875796 0.477417 0.749604
813 1 0.610927 0.0626436 0.596081
22 1 0.603955 0.352978 0.515012
1773 1 0.419983 0.0254963 0.545565
137 1 0.703816 0.429811 0.563597
1646 1 0.710691 0.356783 0.603654
533 1 0.603966 0.172936 0.500736
1817 1 0.748777 0.34773 0.526472
906 1 0.379556 0.208563 0.543926
1226 1 0.772644 0.336675 0.641962
421 1 0.807278 0.416838 0.551974
753 1 0.84123 0.413389 0.620577
1708 1 0.762384 0.409157 0.620299
1429 1 0.754149 0.463966 0.520386
1545 1 0.707631 0.392044 0.87365
588 1 0.969137 0.390754 0.546375
713 1 0.875669 0.426879 0.558861
1556 1 0.893763 0.415016 0.702251
1347 1 0.957312 0.365784 0.662515
1834 1 0.0119577 0.470282 0.612496
1444 1 0.905008 0.474186 0.503891
387 1 0.924305 0.419715 0.612412
575 1 0.887093 0.484985 0.605873
1132 1 0.621992 0.401451 0.971714
748 1 0.0181722 0.10183 0.751007
47 1 0.0687511 0.0549001 0.755417
200 1 0.0618269 0.149634 0.908805
807 1 0.0803093 0.102092 0.841161
1802 1 0.134423 0.112836 0.727571
1812 1 0.133159 0.106551 0.91233
1423 1 0.302318 0.245384 0.508627
1719 1 0.184321 0.068171 0.716463
568 1 0.139472 0.025075 0.739921
773 1 0.207486 0.0742103 0.641323
817 1 0.288809 0.48917 0.617819
981 1 0.242763 0.0907912 0.768291
565 1 0.111852 0.173179 0.671908
62 1 0.212546 0.153783 0.84887
353 1 0.391323 0.0634647 0.722464
1483 1 0.311558 0.082834 0.772597
1948 1 0.64911 0.0249457 0.861456
255 1 0.554407 0.387858 0.575969
1838 1 0.357738 0.123138 0.746103
446 1 0.249084 0.0398765 0.709022
1305 1 0.254223 0.0814695 0.857156
1941 1 0.477339 0.0640587 0.806085
503 1 0.574346 0.17352 0.760538
685 1 0.485529 0.156912 0.760101
2012 1 0.414959 0.126678 0.691747
2004 1 0.537072 0.101253 0.761408
1713 1 0.387888 0.0387821 0.82916
1354 1 0.199201 0.486414 0.515786
489 1 0.592965 0.0510783 0.810426
1455 1 0.478302 0.0700782 0.720324
1110 1 0.642331 0.0190369 0.760554
634 1 0.521184 0.111279 0.682228
1335 1 0.654011 0.139754 0.747057
378 1 0.91735 0.36394 0.969308
725 1 0.64087 0.132381 0.626917
834 1 0.649206 0.113524 0.81139
2028 1 0.617536 0.0825592 0.684501
306 1 0.767039 0.00994806 0.656597
733 1 0.694031 0.102071 0.708477
1314 1 0.853793 0.102837 0.79988
1539 1 0.916186 0.486305 0.674425
202 1 0.729721 0.113058 0.815666
1463 1 0.579012 0.388244 0.860763
709 1 0.470038 0.243607 0.996018
1468 1 0.795084 0.136454 0.768242
25 1 0.888391 0.121131 0.954308
94 1 0.723065 0.193614 0.87841
831 1 0.991821 0.106824 0.682358
1919 1 0.960516 0.147393 0.630408
452 1 0.862532 0.111795 0.602101
1276 1 0.940606 0.147658 0.729427
1118 1 0.789114 0.111047 0.628084
193 1 0.907784 0.073631 0.741319
203 1 0.880377 0.0603406 0.668658
1185 1 0.849386 0.129554 0.71836
801 1 0.973045 0.0279729 0.764701
17 1 0.0600556 0.153456 0.780697
1504 1 0.0391616 0.223336 0.751061
372 1 0.12807 0.340094 0.675366
1356 1 0.986557 0.266836 0.874915
132 1 0.112401 0.238714 0.755706
1993 1 0.0691847 0.217747 0.829347
939 1 0.147137 0.245326 0.866394
1835 1 0.0144853 0.316477 0.652042
359 1 0.982807 0.37074 0.78414
718 1 0.274925 0.12512 0.698442
2007 1 0.256068 0.211767 0.775094
392 1 0.138684 0.15773 0.80358
627 1 0.209703 0.324217 0.659493
601 1 0.219726 0.227769 0.85266
1869 1 0.181697 0.220123 0.772131
1383 1 0.21349 0.149588 0.737893
1156 1 0.166553 0.303741 0.75919
1175 1 0.183428 0.29378 0.824125
1622 1 0.324051 0.217423 0.750294
256 1 0.38651 0.271291 0.71589
947 1 0.231631 0.269544 0.72478
365 1 0.395292 0.198325 0.667131
815 1 0.326629 0.179691 0.689242
2016 1 0.307996 0.149152 0.84023
927 1 0.312204 0.307236 0.900164
1555 1 0.443715 0.236911 0.700368
1630 1 0.522369 0.187994 0.706295
142 1 0.4022 0.185217 0.74142
167 1 0.572235 0.343992 0.762144
83 1 0.453397 0.311499 0.70552
1251 1 0.505927 0.291142 0.761269
545 1 0.375098 0.196354 0.810397
447 1 0.593387 0.174752 0.669218
1272 1 0.644055 0.229336 0.649012
1594 1 0.554094 0.299145 0.817224
849 1 0.620643 0.268934 0.774775
1786 1 0.597016 0.256733 0.854444
871 1 0.732861 0.285042 0.697322
767 1 0.60685 0.323464 0.705724
712 1 0.586579 0.230093 0.712167
1735 1 0.615032 0.192704 0.822557
1311 1 0.790997 0.254955 0.69427
514 1 0.704 0.181052 0.796174
186 1 0.680468 0.173056 0.676118
1112 1 0.674547 0.22574 0.728499
637 1 0.754504 0.273389 0.786677
1329 1 0.747798 0.225732 0.743194
133 1 0.79546 0.341694 0.706473
1486 1 0.801272 0.0693722 0.841586
484 1 0.973814 0.208597 0.722101
1860 1 0.905425 0.12929 0.670621
846 1 0.89348 0.257436 0.725867
1632 1 0.947761 0.272848 0.683718
481 1 0.891156 0.358529 0.610432
674 1 0.820414 0.216559 0.77031
1724 1 0.993605 0.298983 0.740215
1607 1 0.0319417 0.358704 0.707107
1036 1 0.830115 0.323955 0.832034
1267 1 0.890768 0.180095 0.810256
1034 1 0.0543609 0.496886 0.806808
610 1 0.103371 0.387775 0.73677
1056 1 0.076559 0.334217 0.780586
1033 1 0.0513236 0.423046 0.773977
413 1 0.113128 0.453834 0.814315
305 1 0.975913 0.442751 0.682031
461 1 0.0575056 0.46757 0.869409
1629 1 0.0891974 0.394277 0.835426
1978 1 0.742111 0.362289 0.817832
914 1 0.260259 0.279993 0.792852
1846 1 0.186744 0.369936 0.72763
152 1 0.233416 0.359615 0.786741
415 1 0.0492069 0.424546 0.672098
253 1 0.343638 0.270813 0.814761
65 1 0.377902 0.328285 0.670044
599 1 0.346202 0.421521 0.671951
444 1 0.35294 0.323382 0.743433
212 1 0.336615 0.367794 0.883784
570 1 0.306237 0.377363 0.797799
1700 1 0.26579 0.405661 0.644912
2045 1 0.266973 0.380193 0.714912
663 1 0.49827 0.431642 0.763561
343 1 0.700293 0.354314 0.950177
1140 1 0.516325 0.368795 0.796866
1687 1 0.555365 0.402952 0.705719
1693 1 0.428438 0.329633 0.779727
884 1 0.404327 0.387537 0.720437
791 1 0.458208 0.449664 0.839006
1469 1 0.397227 0.457268 0.73527
61 1 0.487612 0.381983 0.707898
1313 1 0.698423 0.341756 0.669008
229 1 0.729484 0.4129 0.701011
2020 1 0.598613 0.426033 0.771676
1264 1 0.645275 0.423627 0.83651
1779 1 0.631405 0.362844 0.808532
1871 1 0.691547 0.397931 0.782388
86 1 0.219842 0.025096 0.544245
388 1 0.746774 0.455882 0.775961
730 1 0.843214 0.392531 0.801563
894 1 0.622072 0.431421 0.905318
310 1 0.80933 0.457672 0.668953
1426 1 0.727621 0.344997 0.732283
1679 1 0.806464 0.443036 0.832012
492 1 0.866597 0.4732 0.738068
2029 1 0.81449 0.312641 0.588793
1371 1 0.690544 0.289064 0.770171
1749 1 0.863253 0.277141 0.654391
287 1 0.545992 0.438296 0.822134
1396 1 0.690921 0.464249 0.884687
72 1 0.92213 0.422355 0.781593
15 1 0.815823 0.412606 0.715218
1431 1 0.698274 0.243637 0.528056
1799 1 0.0823223 0.269791 0.516496
614 1 0.0136613 0.123887 0.955889
1866 1 0.0433829 0.0757663 0.915235
1580 1 0.684177 0.482951 0.789393
1052 1 0.0610861 0.46495 0.537516
67 1 0.115175 0.0228118 0.896827
1618 1 0.933396 0.0423966 0.99734
1312 1 0.0227369 0.0535491 0.831068
1337 1 0.211708 0.148621 0.975462
146 1 0.637845 0.324463 0.978202
1077 1 0.177799 0.0606799 0.86757
1585 1 0.14942 0.181528 0.924112
1096 1 0.14294 0.0746073 0.801022
1905 1 0.223345 0.0907206 0.92153
1765 1 0.271414 0.032064 0.940708
315 1 0.266632 0.153377 0.906541
1283 1 0.125765 0.0457087 0.973184
166 1 0.357144 0.139401 0.978768
1868 1 0.284276 0.115917 0.980753
150 1 0.634874 0.359828 0.910845
34 1 0.0513288 0.000725999 0.871044
475 1 0.49245 0.44409 0.664139
1683 1 0.435146 0.0370025 0.878574
1296 1 0.346188 0.0819353 0.870761
660 1 0.337411 0.0662583 0.963674
507 1 0.37938 0.0148051 0.928597
2046 1 0.254215 0.0300868 0.613213
1048 1 0.538931 0.0609735 0.869546
1653 1 0.649741 0.296482 0.547615
1126 1 0.401057 0.0834605 0.947092
301 1 0.452093 0.121287 0.920016
1752 1 0.487927 0.143354 0.839422
124 1 0.78062 0.309533 1.00004
547 1 0.569716 0.0782456 0.938336
1667 1 0.585198 0.129663 0.858958
252 1 0.526824 0.149123 0.902817
1889 1 0.513801 0.0457465 0.938905
1093 1 0.67575 0.0763219 0.999505
214 1 0.642616 0.0333119 0.940884
335 1 0.71636 0.0732151 0.880604
1699 1 0.647071 0.192595 0.892298
524 1 0.64623 0.21963 0.986087
1174 1 0.403007 0.00165629 0.772191
1899 1 0.686868 0.130773 0.907854
1225 1 0.405507 0.485104 0.883696
1350 1 0.686741 0.155674 0.989083
964 1 0.131053 0.441602 0.51506
1865 1 0.82591 0.0303257 0.791245
1894 1 0.761936 0.165108 0.975274
961 1 0.72031 0.0456498 0.79816
573 1 0.760385 0.103753 0.929004
419 1 0.765106 0.0117586 0.87503
641 1 0.96998 0.469887 0.932174
606 1 0.850066 0.0285818 0.91905
1803 1 0.904435 0.188428 0.938516
1134 1 0.912434 0.0367564 0.824924
1890 1 0.996842 0.123534 0.867331
1664 1 0.889089 0.0807344 0.868349
2032 1 0.835079 0.00386187 0.852793
578 1 0.91124 0.00678425 0.905509
508 1 0.88474 0.154206 0.883765
597 1 0.924619 0.100163 0.806087
1065 1 0.817602 0.118367 0.998549
1106 1 0.978525 0.152124 0.79121
1085 1 0.088183 0.326816 0.974443
951 1 0.102085 0.286308 0.831118
1723 1 0.169642 0.252171 0.952103
1961 1 0.0480991 0.274587 0.942647
1409 1 0.0851497 0.10465 0.975662
1645 1 0.608236 0.100795 0.990684
295 1 0.962218 0.169826 0.976243
1621 1 0.107955 0.23619 0.924732
1526 1 0.982338 0.206898 0.912149
1227 1 0.743768 0.231113 0.986877
39 1 0.233936 0.218581 0.923229
691 1 0.217195 0.295415 0.935774
206 1 0.225261 0.0195883 0.776719
1935 1 0.317109 0.21759 0.938241
1932 1 0.702719 0.00933071 0.917649
656 1 0.375799 0.26953 0.929756
629 1 0.068694 0.0184447 0.954902
405 1 0.295179 0.246217 0.86941
738 1 0.378876 0.145649 0.900814
1604 1 0.52007 0.278748 0.913831
288 1 0.532462 0.204182 0.845136
1751 1 0.473322 0.298269 0.837318
585 1 0.406553 0.254004 0.781561
819 1 0.491858 0.197883 0.937778
318 1 0.664465 0.471335 0.950996
1000 1 0.440152 0.242935 0.879643
965 1 0.553395 0.237727 0.96621
1145 1 0.510192 0.392657 0.869327
1410 1 0.611039 0.271687 0.933097
199 1 0.60547 0.136463 0.927539
915 1 0.697912 0.325941 0.864524
131 1 0.581248 0.205365 0.907393
714 1 0.700466 0.279527 0.972188
903 1 0.570828 0.33445 0.905961
109 1 0.228932 0.0119856 0.890111
1959 1 0.846068 0.307602 0.955361
1458 1 0.788884 0.223566 0.91365
1097 1 0.838144 0.210484 0.852978
128 1 0.815121 0.160129 0.905875
1221 1 0.964578 0.202579 0.513885
1357 1 0.676127 0.264386 0.868307
563 1 0.789142 0.335055 0.775092
1187 1 0.916866 0.321988 0.73302
1768 1 0.788451 0.162733 0.83449
136 1 0.733114 0.427704 0.93554
2001 1 0.939233 0.0762245 0.92561
480 1 0.903814 0.256725 0.803824
1965 1 0.855369 0.238419 0.919742
409 1 0.830374 0.374225 0.975208
1197 1 0.478011 0.497246 0.941142
1711 1 0.976604 0.400095 0.92862
1278 1 0.871986 0.400503 0.910206
1461 1 0.865465 0.305283 0.886488
1381 1 0.791774 0.433474 0.973962
944 1 0.0973499 0.425246 0.904846
1149 1 0.00992009 0.411275 0.855549
1275 1 0.153247 0.320194 0.892199
182 1 0.0359764 0.451173 0.952405
172 1 0.99208 0.460616 0.769855
885 1 0.15642 0.294286 0.512691
678 1 0.16659 0.39576 0.789031
222 1 0.461958 0.0211286 0.983147
247 1 0.303752 0.352621 0.951348
330 1 0.25385 0.375691 0.903317
126 1 0.299999 0.424664 0.893375
27 1 0.875979 0.448554 0.861573
1243 1 0.990275 0.482953 0.849653
1727 1 0.475783 0.412182 0.928749
1042 1 0.377903 0.354136 0.818839
874 1 0.98341 0.0204149 0.685929
1326 1 0.297033 0.285005 0.993853
1721 1 0.00220526 0.042359 0.612893
820 1 0.395569 0.359466 0.944012
1671 1 0.389621 0.313117 0.876242
1851 1 0.375316 0.442275 0.823857
316 1 0.769591 0.284447 0.870814
1216 1 0.588018 0.489847 0.960524
789 1 0.441327 0.403881 0.788511
1952 1 0.467905 0.334502 0.984954
1385 1 0.110181 0.491071 0.929651
196 1 0.411493 0.391479 0.87621
1017 1 0.529335 0.455495 0.928186
1628 1 0.367742 0.28018 0.499664
1481 1 0.155148 0.482192 0.578837
1816 1 0.450077 0.458551 0.607458
1668 1 0.832422 0.188194 0.971008
1953 1 0.523683 0.120968 0.967975
1092 1 0.448827 0.484247 0.766425
1467 1 0.355771 0.0970479 0.539141
217 1 0.416183 0.181967 0.960201
455 1 0.504885 0.0623082 0.520927
236 1 0.318008 0.0259022 0.713601
1531 1 0.430425 0.0838289 0.501161
1785 1 0.88956 0.00183956 0.735455
314 1 0.516788 0.00779214 0.76049
812 1 0.864628 0.00689207 0.99466
1196 1 0.117575 0.215602 0.999005
938 1 0.153475 0.124148 0.999225
1878 1 0.309627 0.0371102 0.504389
719 1 0.0876053 0.495332 0.995504
1271 1 0.723379 0.450369 0.997956
1443 1 0.157532 0.205004 0.503486
648 1 0.10998 0.619793 0.0762015
802 1 0.956647 0.693811 0.0813478
1341 1 0.712585 0.998417 0.397888
735 1 0.618938 0.915044 0.315229
151 1 0.105147 0.668845 0.131853
604 1 0.990843 0.602941 0.18017
434 1 0.135525 0.742692 0.110393
934 1 4.94222e-05 0.649713 0.103683
1904 1 0.116399 0.547019 0.35468
1565 1 0.0397879 0.510223 0.129586
651 1 0.732046 0.500029 0.217884
1100 1 0.174263 0.988932 0.184106
99 1 0.112425 0.551705 0.0598329
282 1 0.175183 0.624684 0.00976193
120 1 0.243604 0.631132 0.192653
516 1 0.30539 0.594264 0.0640914
1262 1 0.333144 0.970797 0.0392256
1351 1 0.214468 0.693859 0.0911505
1082 1 0.139968 0.649389 0.216444
1658 1 0.12983 0.68407 0.0456613
1523 1 0.252041 0.589271 0.124108
542 1 0.582697 0.527496 0.204382
1929 1 0.491685 0.568771 0.0485876
690 1 0.36339 0.613844 0.00563713
1847 1 0.498955 0.507783 0.359715
948 1 0.371159 0.588942 0.0762452
1407 1 0.853019 0.894678 0.435627
1915 1 0.294101 0.527917 0.0730368
332 1 0.794986 0.545285 0.23472
1336 1 0.442757 0.530954 0.125126
338 1 0.607943 0.60492 0.0884194
864 1 0.384155 0.763244 0.0228245
1886 1 0.488628 0.627928 0.122297
1527 1 0.678881 0.984003 0.146608
352 1 0.490029 0.50049 0.0786224
438 1 0.468864 0.527279 0.488135
1814 1 0.632118 0.66259 0.0565798
1534 1 0.676833 0.622258 0.125008
1168 1 0.199854 0.992105 0.084232
699 1 0.515108 0.559542 0.140961
478 1 0.634797 0.570591 0.0197083
861 1 0.685416 0.523489 0.0912625
808 1 0.507464 0.75264 0.015282
1245 1 0.853314 0.631734 0.118805
1583 1 0.829988 0.588198 0.0339008
1277 1 0.0096365 0.524789 0.445577
687 1 0.766066 0.651231 0.0848121
935 1 0.730535 0.688244 0.0371589
621 1 0.256945 0.993248 0.157018
1474 1 0.821114 0.576915 0.175405
1398 1 0.76086 0.577245 0.0423833
495 1 0.506129 0.67304 0.428595
697 1 0.790372 0.652309 0.0148815
1478 1 0.64463 0.532863 0.420756
1087 1 0.717258 0.931931 0.374093
1044 1 0.0458609 0.592367 0.0877372
505 1 0.867643 0.520241 0.193129
259 1 0.87903 0.630424 0.0577866
1412 1 0.954116 0.643612 0.0216282
1567 1 0.885094 0.562105 0.0912168
852 1 0.925888 0.526792 0.140684
385 1 0.831798 0.816069 0.439756
84 1 0.585016 0.992575 0.117256
1730 1 0.936031 0.620906 0.121017
1546 1 0.892551 0.597006 0.179087
26 1 0.952987 0.74622 0.133202
809 1 0.0246661 0.626321 0.0168025
1627 1 0.0522909 0.692396 0.0571957
1083 1 0.017818 0.756664 0.0901462
1318 1 0.102981 0.747516 0.0518602
702 1 0.0559184 0.77188 0.247977
176 1 0.701869 0.545683 0.0260459
1317 1 0.159945 0.70588 0.166456
795 1 0.607741 0.946958 0.474151
1167 1 0.6741 0.948679 0.447036
946 1 0.794863 0.551514 0.100686
1379 1 0.218374 0.768081 0.129243
1395 1 0.904193 0.541917 0.0285648
873 1 0.273728 0.789677 0.0861477
1205 1 0.24017 0.729644 0.0340439
929 1 0.225301 0.85316 0.142949
810 1 0.229819 0.878668 0.0307407
59 1 0.320344 0.721858 0.0530039
1800 1 0.433071 0.633755 0.0655223
1801 1 0.340668 0.813585 0.071026
1203 1 0.421959 0.784352 0.0806144
1584 1 0.485862 0.805124 0.0681983
1241 1 0.381707 0.880451 0.0390414
984 1 0.37479 0.792874 0.133807
1600 1 0.438949 0.706408 0.0299757
1307 1 0.474987 0.747275 0.105464
551 1 0.563854 0.701111 0.226086
1559 1 0.565094 0.785821 0.230173
859 1 0.514098 0.869483 0.188974
2023 1 0.841939 0.514243 0.340583
74 1 0.584469 0.745234 0.0678025
1533 1 0.218665 0.959384 0.404435
240 1 0.573355 0.81414 0.0574659
642 1 0.675369 0.749153 0.0303418
646 1 0.59313 0.741129 0.145778
1966 1 0.812188 0.715125 0.049935
113 1 0.566202 0.540298 0.0483641
519 1 0.85675 0.763815 0.0379939
740 1 0.733076 0.839559 0.0265262
1568 1 0.687587 0.624435 0.0224368
737 1 0.84358 0.847469 0.102365
1293 1 0.91957 0.961687 0.225597
496 1 0.751532 0.756147 0.0449751
189 1 0.679054 0.72822 0.191979
600 1 0.923071 0.755054 0.0661999
117 1 0.342793 0.53488 0.129867
1058 1 0.890715 0.70103 0.104813
2024 1 0.0178774 0.800015 0.155368
1153 1 0.882051 0.781339 0.118173
1624 1 0.907418 0.812906 0.0265372
1144 1 0.80933 0.709965 0.120578
816 1 0.861193 0.742255 0.193977
107 1 0.973544 0.801976 0.0485778
445 1 0.0289333 0.936558 0.13517
1551 1 0.0300224 0.845625 0.0635209
781 1 0.120402 0.80798 0.0163133
1004 1 0.0869664 0.909072 0.066388
55 1 0.737611 0.544057 0.496033
201 1 0.196395 0.935588 0.140498
326 1 0.956478 0.987903 0.0477881
1612 1 0.0935243 0.877052 0.146319
1490 1 0.209266 0.817133 0.0612405
1176 1 0.222203 0.919965 0.262545
1473 1 0.104238 0.825422 0.0816714
532 1 0.156611 0.870588 0.0681037
1358 1 0.240112 0.936716 0.0816934
1051 1 0.268458 0.943869 0.0122097
784 1 0.241882 0.911818 0.18852
553 1 0.0277622 0.766778 0.0172764
1688 1 0.940265 0.535844 0.454939
417 1 0.825697 0.839255 0.301728
615 1 0.768181 0.852088 0.4774
490 1 0.298305 0.84662 0.120743
1747 1 0.310434 0.901202 0.0652803
1075 1 0.394908 0.947506 0.0197986
579 1 0.325992 0.914836 0.144073
366 1 0.322153 0.998691 0.347184
112 1 0.534837 0.900028 0.0102482
2034 1 0.368293 0.863196 0.112633
650 1 0.45032 0.978549 0.0843209
66 1 0.46744 0.915648 0.0802167
16 1 0.955942 0.967866 0.417657
1575 1 0.63176 0.964746 0.0674896
1809 1 0.905732 0.774234 0.483051
912 1 0.53746 0.861949 0.111062
967 1 0.588333 0.888181 0.0689714
2014 1 0.562905 0.976881 0.00233542
309 1 0.668334 0.882444 0.100964
521 1 0.650073 0.877597 0.0150265
1214 1 0.672314 0.814896 0.051443
786 1 0.803464 0.965073 0.00823633
2033 1 0.690986 0.926845 0.0350874
1378 1 0.943655 0.88729 0.377495
1450 1 0.288383 0.953913 0.21298
1211 1 0.821345 0.89059 0.160534
1674 1 0.0659012 0.967317 0.476783
423 1 0.773966 0.853597 0.099759
1323 1 0.310797 0.522033 0.222099
979 1 0.898006 0.968997 0.0822187
517 1 0.171231 0.54036 0.177328
373 1 0.963716 0.879882 0.00694711
1325 1 0.778034 0.92556 0.0619766
341 1 0.896199 0.883825 0.0709567
1295 1 0.887056 0.917065 0.143191
554 1 0.122305 0.948345 0.129119
1957 1 0.951118 0.843826 0.100621
304 1 0.926738 0.985243 0.142209
499 1 0.848043 0.956147 0.198092
1067 1 0.581499 0.630466 0.412272
1529 1 0.410986 0.993203 0.456805
1032 1 0.07465 0.6808 0.263414
1159 1 0.11641 0.758805 0.277302
1697 1 0.0656098 0.62131 0.193813
100 1 0.0646255 0.545625 0.229026
1123 1 0.223631 0.552777 0.298854
887 1 0.18236 0.555368 0.380176
262 1 0.261589 0.609953 0.270079
1891 1 0.175042 0.59369 0.240942
1164 1 0.185744 0.611982 0.330316
1055 1 0.164628 0.606866 0.124122
1274 1 0.326937 0.526192 0.495283
829 1 0.372081 0.555757 0.196792
576 1 0.311346 0.596389 0.174902
1578 1 0.231418 0.540055 0.213684
800 1 0.185138 0.511613 0.445827
1342 1 0.332417 0.71677 0.271337
1552 1 0.289405 0.506739 0.290774
1290 1 0.403498 0.574904 0.287068
1777 1 0.384396 0.637051 0.314675
397 1 0.416802 0.600624 0.164838
1855 1 0.421533 0.648328 0.247569
868 1 0.364277 0.612506 0.23175
123 1 0.458443 0.592957 0.328653
825 1 0.568706 0.973422 0.347395
164 1 0.595153 0.607867 0.174861
223 1 0.562973 0.661764 0.116168
640 1 0.46353 0.538323 0.284132
619 1 0.26018 0.96448 0.462418
543 1 0.466831 0.607638 0.218938
1744 1 0.505106 0.534287 0.207141
1046 1 0.533493 0.61359 0.332145
1879 1 0.414006 0.532426 0.421468
7 1 0.591908 0.551718 0.1287
787 1 0.731493 0.694833 0.160474
993 1 0.62981 0.662513 0.222187
822 1 0.656 0.529314 0.205975
1502 1 0.683217 0.625143 0.193211
482 1 0.645389 0.622721 0.293281
266 1 0.675441 0.569861 0.25339
1520 1 0.709795 0.618317 0.318707
1453 1 0.565287 0.646557 0.273797
843 1 0.508891 0.806572 0.46539
1770 1 0.649225 0.553936 0.322612
1261 1 0.772222 0.923803 0.414013
1406 1 0.306105 0.972086 0.283803
485 1 0.7379 0.608394 0.133418
955 1 0.73668 0.53458 0.151031
1544 1 0.806526 0.611055 0.253228
1822 1 0.811925 0.656435 0.180153
2047 1 0.768648 0.54303 0.328489
431 1 0.745386 0.583556 0.20008
830 1 0.0163308 0.618357 0.330332
2042 1 0.854149 0.564925 0.286095
368 1 0.94742 0.538412 0.212821
1091 1 0.826006 0.595652 0.339616
1925 1 0.794189 0.924341 0.497612
683 1 0.934965 0.59263 0.271621
1135 1 0.0145589 0.772645 0.371675
896 1 0.0881821 0.711843 0.340178
80 1 0.0481269 0.723196 0.141996
1826 1 0.0942181 0.795617 0.168459
1031 1 0.0891635 0.72647 0.197353
959 1 0.20274 0.80798 0.257467
1569 1 0.161359 0.828833 0.13613
424 1 0.0166462 0.634757 0.251534
1525 1 0.0644725 0.926764 0.267113
623 1 0.0222458 0.685037 0.322861
962 1 0.104375 0.782149 0.34328
1716 1 0.0443374 0.892533 0.200298
1644 1 0.285728 0.707571 0.2015
1150 1 0.182239 0.707417 0.252557
704 1 0.15837 0.674343 0.318354
760 1 0.228345 0.676011 0.298461
1754 1 0.290239 0.791872 0.170543
827 1 0.149495 0.780573 0.205723
1015 1 0.241203 0.748698 0.251873
158 1 0.152571 0.854337 0.21384
1725 1 0.362383 0.684942 0.384526
1022 1 0.42388 0.700197 0.119233
995 1 0.44358 0.667565 0.181304
504 1 0.366932 0.738902 0.17435
239 1 0.288504 0.714951 0.129934
1421 1 0.333265 0.783262 0.229171
1566 1 0.307399 0.745588 0.339937
350 1 0.374502 0.835154 0.202992
1521 1 0.448067 0.663856 0.323269
590 1 0.401142 0.718681 0.242877
746 1 0.482694 0.683973 0.265419
1377 1 0.538686 0.597354 0.225058
1007 1 0.650249 0.950241 0.25273
1372 1 0.488697 0.757225 0.322989
119 1 0.452809 0.839201 0.215471
497 1 0.517763 0.843352 0.253082
635 1 0.496828 0.790123 0.178507
659 1 0.570838 0.728552 0.311834
1963 1 0.724373 0.801409 0.1898
1322 1 0.660754 0.696914 0.282295
548 1 0.613281 0.805143 0.184831
204 1 0.637711 0.761743 0.268757
1705 1 0.730565 0.754255 0.24334
466 1 0.712107 0.789234 0.300966
639 1 0.697223 0.716043 0.097878
87 1 0.733954 0.860144 0.265036
1930 1 0.824378 0.787341 0.142722
564 1 0.806715 0.826308 0.20151
20 1 0.93133 0.732366 0.268533
509 1 0.785149 0.797427 0.263282
70 1 0.794565 0.710856 0.261016
1497 1 0.780973 0.740158 0.187776
384 1 0.863775 0.709688 0.26326
1794 1 0.883176 0.66636 0.169186
942 1 0.938637 0.659397 0.295452
1787 1 0.00667604 0.716119 0.244509
1146 1 0.860147 0.770565 0.291442
1599 1 0.958187 0.681139 0.196908
1947 1 0.932395 0.814686 0.27118
1040 1 0.84284 0.943722 0.339199
1824 1 0.123745 0.924484 0.197935
11 1 0.149761 0.960061 0.248315
1181 1 0.00294959 0.824935 0.243998
1345 1 0.0683765 0.970807 0.203576
854 1 0.949921 0.933222 0.29992
1750 1 0.118571 0.884662 0.0100414
44 1 0.996022 0.937339 0.233338
1509 1 0.449228 0.789325 0.00849177
960 1 0.247066 0.924549 0.347255
391 1 0.150593 0.898303 0.271411
501 1 0.309215 0.868345 0.271227
931 1 0.0936746 0.83263 0.245268
996 1 0.211563 0.989419 0.249242
561 1 0.192625 0.965621 0.323828
1270 1 0.386996 0.974654 0.272423
271 1 0.956152 0.858749 0.447636
1845 1 0.413591 0.891489 0.159098
1832 1 0.368929 0.93531 0.209881
974 1 0.301767 0.862951 0.200191
904 1 0.385729 0.904449 0.27359
1018 1 0.672289 0.993757 0.497323
1129 1 0.51768 0.929443 0.221107
1430 1 0.384126 0.825396 0.272544
723 1 0.461401 0.962053 0.163195
624 1 0.508254 0.922653 0.140602
1459 1 0.838429 0.790071 0.355065
1900 1 0.702476 0.53353 0.380823
1839 1 0.449049 0.961412 0.393066
1706 1 0.580643 0.99444 0.190638
347 1 0.447154 0.976867 0.31484
905 1 0.577402 0.94837 0.248972
628 1 0.646217 0.920858 0.173059
1501 1 0.690113 0.849787 0.162171
274 1 0.645434 0.876764 0.222996
138 1 0.833338 0.835475 0.0294464
1496 1 0.586429 0.862575 0.266214
1064 1 0.68062 0.981988 0.337284
1596 1 0.0408129 0.549877 0.0251401
625 1 0.754385 0.941733 0.163927
1704 1 0.784915 0.928889 0.221804
102 1 0.806378 0.900286 0.285252
1201 1 0.74587 0.948304 0.278026
1874 1 0.723242 0.900487 0.208777
1535 1 0.821691 0.979592 0.128321
400 1 0.754995 0.892289 0.336533
76 1 0.926237 0.885255 0.193912
321 1 0.248076 0.519729 0.135443
1073 1 0.980462 0.876112 0.157301
860 1 0.876921 0.831883 0.169134
724 1 0.861518 0.843001 0.236278
1842 1 0.826577 0.977222 0.447511
1070 1 0.871963 0.915213 0.26136
1582 1 0.900344 0.981906 0.358426
559 1 0.903197 0.84055 0.328256
459 1 0.758392 0.810369 0.417401
1397 1 0.015761 0.656247 0.388838
969 1 0.0778284 0.623925 0.38036
1246 1 0.0225699 0.607423 0.439618
1726 1 0.0593534 0.723852 0.400956
364 1 0.971053 0.513974 0.071179
88 1 0.981698 0.58247 0.376232
1587 1 0.776592 0.99587 0.200726
1986 1 0.141065 0.680692 0.385738
323 1 0.736125 0.991752 0.0776834
557 1 0.160593 0.636081 0.443837
1437 1 0.104088 0.678993 0.457193
968 1 0.0894867 0.588571 0.442578
976 1 0.258058 0.697095 0.400343
363 1 0.231486 0.629365 0.491877
1581 1 0.701574 0.82039 0.461761
592 1 0.264101 0.524749 0.439163
1558 1 0.853412 0.502944 0.0542254
1178 1 0.25047 0.523959 0.361734
875 1 0.374576 0.591475 0.440777
972 1 0.371063 0.650471 0.477843
1510 1 0.320446 0.579513 0.390259
477 1 0.245118 0.561424 0.495629
1613 1 0.260596 0.601793 0.417945
1471 1 0.0608253 0.499586 0.350748
192 1 0.376613 0.930872 0.0897047
433 1 0.48785 0.534988 0.424507
839 1 0.680167 0.892096 0.308139
609 1 0.696112 0.863954 0.403228
1543 1 0.0877378 0.505981 0.476302
587 1 0.409785 0.631332 0.388943
1273 1 0.809902 0.870457 0.38622
1859 1 0.440369 0.622106 0.480117
1636 1 0.644035 0.867474 0.472829
90 1 0.569991 0.537454 0.354958
785 1 0.448834 0.693068 0.393559
835 1 0.515075 0.599772 0.454069
1433 1 0.586187 0.583648 0.463364
324 1 0.648768 0.621149 0.432762
1574 1 0.660043 0.597882 0.375328
572 1 0.770873 0.661318 0.348941
135 1 0.606334 0.648751 0.347921
689 1 0.582435 0.561326 0.279452
1591 1 0.690856 0.669619 0.380917
780 1 0.48157 0.991972 0.252608
327 1 0.795158 0.629347 0.421477
198 1 0.791311 0.572669 0.473503
1805 1 0.803978 0.546734 0.41334
1422 1 0.833671 0.666785 0.312275
2000 1 0.104362 0.565136 0.133708
1884 1 0.751027 0.655049 0.230027
308 1 0.722213 0.592365 0.410859
1462 1 0.73111 0.637904 0.454071
325 1 0.869965 0.569515 0.430874
824 1 0.020451 0.987795 0.00401336
1328 1 0.906081 0.733366 0.340886
1108 1 0.920953 0.585416 0.35049
1324 1 0.899387 0.657983 0.357411
1870 1 0.9307 0.604698 0.445326
92 1 0.970595 0.528254 0.289789
1633 1 0.942114 0.674811 0.420503
1212 1 0.771932 0.992339 0.347362
1050 1 0.872667 0.649676 0.437582
1652 1 0.0704866 0.736625 0.472229
531 1 0.115079 0.768783 0.408539
1989 1 0.919879 0.740256 0.413682
1038 1 0.0634796 0.84266 0.385041
1088 1 0.015571 0.790433 0.450297
75 1 0.187063 0.705359 0.441691
1495 1 0.206641 0.658497 0.373761
2031 1 0.24443 0.781891 0.313829
1258 1 0.176901 0.768142 0.316643
369 1 0.1627 0.828716 0.398821
2 1 0.136035 0.507532 0.278641
1682 1 0.197085 0.736519 0.381272
1858 1 0.18231 0.776271 0.438615
1601 1 0.253304 0.775429 0.388967
1166 1 0.289234 0.762875 0.468607
1074 1 0.426359 0.776562 0.372166
772 1 0.391761 0.824554 0.401776
426 1 0.348999 0.712778 0.447653
129 1 0.377127 0.78889 0.328619
1712 1 0.3109 0.634056 0.336744
971 1 0.441339 0.785594 0.46376
283 1 0.280934 0.674811 0.460001
872 1 0.419198 0.715145 0.468262
926 1 0.616559 0.775172 0.384999
654 1 0.487103 0.608361 0.392367
1767 1 0.462369 0.836493 0.310192
408 1 0.00163381 0.91658 0.468127
1722 1 0.606217 0.812313 0.450821
696 1 0.475181 0.902575 0.344444
1388 1 0.521643 0.768534 0.387328
1517 1 0.51745 0.689346 0.35183
403 1 0.531368 0.826931 0.333424
1016 1 0.546357 0.891352 0.340864
899 1 0.648665 0.749739 0.481158
1639 1 0.711567 0.976783 0.217056
1940 1 0.519073 0.945686 0.301272
51 1 0.71396 0.762254 0.381462
108 1 0.117197 0.99178 0.0184564
747 1 0.582397 0.708528 0.396905
134 1 0.655376 0.715527 0.346911
171 1 0.64945 0.70786 0.422418
1172 1 0.61141 0.840552 0.33772
966 1 0.805224 0.721069 0.344593
1795 1 0.796402 0.747228 0.435423
28 1 0.753623 0.978397 0.499252
1936 1 0.00726726 0.503902 0.205223
836 1 0.717795 0.733796 0.449297
1570 1 0.762757 0.811771 0.343961
1057 1 0.850533 0.702994 0.395689
1945 1 0.822383 0.686093 0.468329
234 1 0.97212 0.744933 0.474291
1595 1 0.961854 0.788243 0.332529
1733 1 0.909614 0.700061 0.486362
1950 1 0.0183708 0.857394 0.319182
1761 1 0.0112724 0.692976 0.446287
758 1 0.0652663 0.979927 0.384877
185 1 0.0336204 0.913047 0.391347
1416 1 0.119202 0.828687 0.472446
1260 1 0.379498 0.543123 0.345062
48 1 0.589046 0.911981 0.396376
686 1 0.112233 0.933967 0.3565
1548 1 0.998189 0.835663 0.392994
159 1 0.140421 0.93557 0.464777
1111 1 0.115201 0.886739 0.414116
677 1 0.103002 0.86384 0.326421
804 1 0.902021 0.81543 0.414027
1387 1 0.216229 0.848715 0.433528
1418 1 0.210809 0.854313 0.312562
1343 1 0.898613 0.992591 0.474085
291 1 0.179529 0.899985 0.365032
173 1 0.277795 0.85036 0.349073
580 1 0.291888 0.958125 0.394835
1638 1 0.325718 0.92588 0.328289
1308 1 0.620039 0.512663 0.489066
1831 1 0.968206 0.923065 0.0962181
2027 1 0.294149 0.83857 0.447334
89 1 0.876278 0.696331 0.0198484
1173 1 0.356026 0.990476 0.166238
1482 1 0.387892 0.867113 0.344247
1030 1 0.380534 0.942668 0.389513
161 1 0.411589 0.88426 0.448303
1888 1 0.572786 0.751084 0.461084
850 1 0.54902 0.828958 0.408133
10 1 0.523233 0.937519 0.395274
584 1 0.832861 0.768346 0.49666
383 1 0.489996 0.86368 0.398919
518 1 0.455578 0.938481 0.470935
1707 1 0.641248 0.500947 0.0114902
1213 1 0.521463 0.902597 0.484111
1718 1 0.0370445 0.853872 0.475951
322 1 0.829887 0.97416 0.267868
1302 1 0.897802 0.506506 0.397382
52 1 0.866848 0.933648 0.00685861
1728 1 0.39939 0.547326 0.494823
69 1 0.446122 0.863294 0.0119375
1449 1 0.931593 0.912106 0.494309
458 1 0.302908 0.993626 0.0958942
892 1 0.320455 0.600481 0.492753
862 1 0.873361 0.551836 0.497258
98 1 0.679307 0.641467 0.494141
194 1 0.96648 0.809736 0.494876
798 1 0.0838997 0.590096 0.619524
1362 1 0.0218354 0.552712 0.532131
1390 1 0.0220664 0.702118 0.52688
923 1 0.119764 0.655479 0.545356
752 1 0.996897 0.589789 0.57984
1827 1 0.0599693 0.680156 0.588834
1807 1 0.173655 0.715981 0.958129
1137 1 0.23504 0.722913 0.528595
1760 1 0.173513 0.566596 0.636595
230 1 0.19555 0.557977 0.552692
1756 1 0.178113 0.632574 0.533169
1155 1 0.144498 0.61137 0.59156
1491 1 0.197591 0.649686 0.70512
1041 1 0.345815 0.64134 0.573573
407 1 0.0348025 0.532225 0.873983
1119 1 0.266527 0.590671 0.570481
1346 1 0.281269 0.659335 0.529753
382 1 0.40586 0.666717 0.613563
379 1 0.834147 0.653631 0.563532
30 1 0.954882 0.573245 0.513419
184 1 0.500092 0.59487 0.520259
473 1 0.994191 0.999481 0.914103
539 1 0.401597 0.601585 0.533142
1265 1 0.360324 0.589819 0.619591
744 1 0.319034 0.99859 0.880428
1248 1 0.534661 0.705603 0.567091
104 1 0.744833 0.618053 0.521043
755 1 0.615363 0.51992 0.576747
1102 1 0.555828 0.525901 0.534157
1113 1 0.502867 0.615269 0.637621
1190 1 0.551133 0.630419 0.579054
170 1 0.610948 0.938577 0.973833
1976 1 0.692013 0.526066 0.548012
913 1 0.769235 0.624831 0.588344
375 1 0.775243 0.684412 0.559362
729 1 0.642014 0.578701 0.608548
1586 1 0.880904 0.857386 0.975864
1793 1 0.761949 0.51003 0.565772
1332 1 0.25561 0.516332 0.91208
1263 1 0.954216 0.544675 0.8812
1448 1 0.799733 0.573515 0.547834
1912 1 0.71431 0.503749 0.723311
1298 1 0.83104 0.645148 0.63087
38 1 0.141272 0.711902 0.505846
442 1 0.898058 0.649517 0.5988
1902 1 0.508023 0.502266 0.849894
877 1 0.963833 0.683687 0.699957
1985 1 0.650847 0.714086 0.551019
826 1 0.884804 0.667893 0.690105
1282 1 0.883612 0.553341 0.575143
790 1 0.923105 0.971494 0.97636
1848 1 0.974539 0.546131 0.63661
1188 1 0.949443 0.630587 0.651143
510 1 0.196515 0.503292 0.653793
1193 1 0.10665 0.735873 0.634546
1506 1 0.15135 0.7991 0.551655
175 1 0.982085 0.694497 0.608105
231 1 0.0404034 0.778299 0.608019
1914 1 0.102581 0.737246 0.561328
110 1 0.627154 0.83246 0.895063
566 1 0.175502 0.727817 0.666691
32 1 0.0424378 0.772369 0.529128
1908 1 0.575092 0.561159 0.953517
456 1 0.211901 0.616065 0.599603
1841 1 0.217121 0.825307 0.591628
290 1 0.186665 0.712434 0.591516
1047 1 0.134357 0.646466 0.657156
1512 1 0.268056 0.785498 0.642947
1480 1 0.328049 0.792075 0.693711
922 1 0.910164 0.517779 0.923942
2040 1 0.0548196 0.852779 0.994214
205 1 0.382991 0.767346 0.505662
1152 1 0.256021 0.679946 0.597167
840 1 0.379597 0.740239 0.653345
333 1 0.386634 0.758217 0.583931
754 1 0.326548 0.721213 0.525397
1435 1 0.350878 0.827002 0.63244
596 1 0.488264 0.741531 0.675738
1528 1 0.279876 0.957715 0.542295
1923 1 0.549905 0.672179 0.644049
1170 1 0.46899 0.763828 0.612556
303 1 0.532908 0.783324 0.564793
2037 1 0.453262 0.669331 0.676571
1911 1 0.59314 0.699149 0.50215
1691 1 0.554088 0.748157 0.654461
227 1 0.474683 0.812033 0.687493
851 1 0.467222 0.671583 0.576825
57 1 0.578279 0.695428 0.772626
346 1 0.58673 0.741382 0.589978
771 1 0.584696 0.6076 0.636445
1105 1 0.675355 0.638002 0.577209
1732 1 0.714406 0.734622 0.595102
675 1 0.651523 0.78557 0.623544
428 1 0.601432 0.784705 0.524268
1154 1 0.772304 0.965529 0.929851
1994 1 0.678497 0.776225 0.541349
1617 1 0.690426 0.800919 0.703772
2013 1 0.763428 0.799422 0.530425
1937 1 0.758904 0.671885 0.633849
342 1 0.72903 0.741924 0.664196
1996 1 0.71636 0.720172 0.527457
1746 1 0.800466 0.752103 0.560967
1331 1 0.979128 0.755582 0.55593
1954 1 0.712775 0.573659 0.962996
821 1 0.845531 0.717363 0.610284
1476 1 0.84228 0.729001 0.683101
643 1 0.868534 0.778194 0.617897
1647 1 0.913911 0.71674 0.623188
1012 1 0.938179 0.6854 0.554151
1775 1 0.997982 0.899139 0.659403
1217 1 0.0318105 0.85249 0.570095
1165 1 0.0562688 0.989836 0.625666
536 1 0.907698 0.777766 0.557431
1215 1 0.0872171 0.895794 0.530186
213 1 0.0783455 0.921037 0.639667
317 1 0.215291 0.956484 0.573149
567 1 0.0369216 0.82271 0.834587
1942 1 0.138199 0.902532 0.592404
1576 1 0.201575 0.885526 0.680162
1130 1 0.254072 0.891132 0.725481
1186 1 0.174781 0.961003 0.665774
513 1 0.136648 0.801493 0.628949
307 1 0.0206547 0.896503 0.861814
6 1 0.323467 0.946534 0.684792
782 1 0.318145 0.88725 0.561421
1875 1 0.31612 0.801281 0.520605
878 1 0.275688 0.871376 0.633101
1840 1 0.383738 0.992572 0.703284
300 1 0.903311 0.847836 0.535635
2038 1 0.526386 0.860034 0.556005
811 1 0.502614 0.822491 0.623222
474 1 0.538946 0.86312 0.689775
1279 1 0.473377 0.805479 0.53572
1588 1 0.412455 0.980135 0.633183
1656 1 0.418161 0.834029 0.573767
918 1 0.724509 0.892318 0.594067
9 1 0.703347 0.988919 0.724398
1854 1 0.588001 0.906442 0.554552
1059 1 0.993364 0.540396 0.947657
1916 1 0.738479 0.52761 0.785942
1720 1 0.656381 0.913407 0.587453
1852 1 0.694867 0.856229 0.538952
1642 1 0.663131 0.853679 0.642382
1233 1 0.585839 0.821793 0.654252
1039 1 0.853358 0.540074 0.751005
1634 1 0.394244 0.699875 0.540163
515 1 0.721679 0.815182 0.601204
1060 1 0.172795 0.845184 0.973294
219 1 0.840544 0.82013 0.559037
242 1 0.835278 0.501764 0.541046
270 1 0.690851 0.964095 0.633257
1128 1 0.784749 0.877717 0.669153
527 1 0.784801 0.851755 0.59166
440 1 0.168125 0.543754 0.98234
679 1 0.853235 0.850473 0.631552
997 1 0.635214 0.576568 0.522562
952 1 0.917236 0.976962 0.56289
770 1 0.046271 0.92793 0.578313
603 1 0.444244 0.875769 0.524197
1294 1 0.982603 0.870701 0.524111
540 1 0.824875 0.506371 0.804676
1850 1 0.260736 0.51688 0.554481
1798 1 0.00213367 0.563223 0.805551
556 1 0.0731141 0.651322 0.77038
523 1 0.08518 0.582504 0.815576
1219 1 0.131819 0.610849 0.714222
225 1 0.2491 0.588138 0.653258
294 1 0.209815 0.619083 0.763578
1316 1 0.356779 0.577211 0.739787
41 1 0.115624 0.529274 0.844366
1844 1 0.175172 0.564501 0.738732
464 1 0.443923 0.537009 0.997152
118 1 0.333966 0.534537 0.664718
778 1 0.381508 0.518562 0.781147
1355 1 0.39853 0.569836 0.675578
96 1 0.434018 0.528634 0.625252
607 1 0.444368 0.603996 0.603805
1438 1 0.368885 0.658182 0.869836
710 1 0.392855 0.596786 0.836454
367 1 0.532694 0.560038 0.59119
1103 1 0.498474 0.53339 0.754195
1432 1 0.510045 0.52163 0.658213
881 1 0.537758 0.623641 0.704348
631 1 0.427112 0.557711 0.742905
1609 1 0.471712 0.577584 0.692158
418 1 0.406677 0.628641 0.716811
410 1 0.44913 0.511283 0.698407
1643 1 0.444089 0.538735 0.82078
414 1 0.993948 0.980031 0.53009
402 1 0.567598 0.546575 0.701024
796 1 0.703642 0.659678 0.787099
246 1 0.877982 0.633144 0.516674
1440 1 0.636695 0.640462 0.769609
125 1 0.605143 0.614386 0.705474
1943 1 0.621867 0.510003 0.731523
1603 1 0.748499 0.598958 0.803818
453 1 0.659344 0.624632 0.653299
538 1 0.7507 0.606785 0.728279
1297 1 0.910344 0.931211 0.886989
1115 1 0.471374 0.515702 0.557782
183 1 0.782401 0.538216 0.661926
845 1 0.825557 0.606556 0.745417
814 1 0.681171 0.626894 0.724559
522 1 0.0473706 0.643357 0.865153
1670 1 0.881317 0.585228 0.661123
1238 1 0.902242 0.59602 0.72969
1620 1 0.0651997 0.596714 0.701941
1944 1 0.887259 0.617951 0.803867
2030 1 0.0342266 0.718599 0.681923
1224 1 0.998867 0.646526 0.7744
1895 1 0.101186 0.824585 0.793153
1160 1 0.0780923 0.850118 0.618342
998 1 0.120193 0.736456 0.802744
1319 1 0.980566 0.819363 0.781203
987 1 0.306904 0.648811 0.730713
2035 1 0.285619 0.816378 0.883226
1147 1 0.140066 0.69001 0.706295
351 1 0.176317 0.833782 0.724645
1503 1 0.0802322 0.832801 0.712561
907 1 0.233971 0.764562 0.745238
1819 1 0.249166 0.684964 0.747544
1419 1 0.308586 0.71932 0.672788
1010 1 0.207721 0.809525 0.665833
880 1 0.237173 0.8176 0.800175
1553 1 0.174321 0.67758 0.797898
1877 1 0.10093 0.677335 0.833453
1222 1 0.174643 0.836515 0.830716
644 1 0.345158 0.646491 0.657232
667 1 0.359719 0.657609 0.793728
595 1 0.362163 0.831048 0.837676
297 1 0.400648 0.806872 0.673438
97 1 0.39406 0.857895 0.739516
1143 1 0.391728 0.764639 0.815972
358 1 0.308856 0.721372 0.781056
394 1 0.370252 0.771473 0.747298
33 1 0.364492 0.699917 0.7195
1753 1 0.442205 0.654566 0.820053
1465 1 0.466445 0.627699 0.752956
999 1 0.512193 0.68523 0.726064
1428 1 0.456293 0.782298 0.758967
450 1 0.536282 0.815931 0.809926
1804 1 0.675538 0.69944 0.639926
1554 1 0.57445 0.776441 0.762782
1508 1 0.699026 0.907799 0.687319
1194 1 0.604543 0.77655 0.698536
688 1 0.645883 0.704755 0.810701
1771 1 0.607168 0.696642 0.672056
792 1 0.527955 0.664126 0.84728
739 1 0.649757 0.793029 0.765364
1141 1 0.64918 0.847498 0.812303
828 1 0.662102 0.704971 0.726697
706 1 0.774374 0.735491 0.804202
1519 1 0.779382 0.753112 0.730528
479 1 0.738086 0.686755 0.720203
275 1 0.860725 0.852695 0.852581
1560 1 0.807105 0.670412 0.720869
890 1 0.799075 0.785247 0.630797
1973 1 0.816633 0.802115 0.788369
460 1 0.971382 0.753347 0.65125
1532 1 0.862902 0.699276 0.770299
1151 1 0.973286 0.768204 0.841193
672 1 0.906223 0.746449 0.725917
806 1 0.839518 0.732416 0.832412
1162 1 0.917412 0.784231 0.79147
1662 1 0.0335275 0.788839 0.755419
1541 1 0.918886 0.667271 0.754481
141 1 0.923871 0.713808 0.812137
783 1 0.0950856 0.979498 0.711471
56 1 0.99512 0.833745 0.622975
1742 1 0.96486 0.87496 0.724316
376 1 0.114501 0.77343 0.735067
957 1 0.133452 0.835837 0.918475
988 1 0.95966 0.899539 0.596412
1399 1 0.158616 0.942269 0.840372
1757 1 0.0247924 0.98023 0.808648
1080 1 0.931711 0.529679 0.735272
1266 1 0.109896 0.73343 0.959412
666 1 0.123139 0.895461 0.702694
191 1 0.251268 0.977455 0.823462
1815 1 0.617639 0.736032 0.948945
1788 1 0.27889 0.980818 0.756116
2005 1 0.260107 0.823762 0.709257
726 1 0.307489 0.794296 0.798016
1983 1 0.215215 0.918046 0.501392
139 1 0.376425 0.938385 0.751435
187 1 0.343957 0.956477 0.831497
1202 1 0.298985 0.915371 0.779366
2026 1 0.424685 0.865655 0.640597
1158 1 0.527274 0.923012 0.767778
1475 1 0.393274 0.920653 0.678161
2015 1 0.580284 0.86863 0.752382
1784 1 0.467566 0.900884 0.699212
1367 1 0.496995 0.85889 0.757529
1247 1 0.456132 0.921672 0.784158
1142 1 0.863471 0.712443 0.543875
328 1 0.549499 0.942515 0.694511
360 1 0.667877 0.962966 0.816917
1121 1 0.740899 0.839615 0.766138
668 1 0.571214 0.926207 0.619528
933 1 0.564199 0.867492 0.830966
924 1 0.550911 0.974609 0.933796
1403 1 0.646086 0.929929 0.727872
591 1 0.609452 0.890807 0.681169
698 1 0.593571 0.969828 0.773735
1689 1 0.829662 0.87695 0.751053
528 1 0.830076 0.7917 0.713832
608 1 0.781441 0.868406 0.82246
1240 1 0.900636 0.916574 0.738483
36 1 0.817593 0.949339 0.676289
1537 1 0.889514 0.836842 0.74817
361 1 0.890778 0.894547 0.675465
1414 1 0.205249 0.786887 0.508039
1677 1 0.929242 0.959174 0.668711
602 1 0.933518 0.868442 0.790048
111 1 0.943163 0.806062 0.677195
1729 1 0.963397 0.942669 0.765623
1910 1 0.86957 1.00148 0.624096
888 1 0.0856127 0.558094 0.940549
1375 1 0.100385 0.617266 0.905126
569 1 0.0291967 0.605838 0.947159
1887 1 0.0694628 0.785287 0.887886
248 1 0.155348 0.645437 0.935686
694 1 0.300271 0.762491 0.993026
1571 1 0.157003 0.544682 0.912125
1615 1 0.2644 0.520775 0.832407
1339 1 0.143597 0.608754 0.794713
1053 1 0.242796 0.585925 0.820736
293 1 0.237738 0.666019 0.964494
1259 1 0.101287 0.54258 0.732405
1310 1 0.178166 0.572453 0.842449
487 1 0.193009 0.638649 0.85825
465 1 0.328285 0.501253 0.882502
1524 1 0.300154 0.577095 0.88889
245 1 0.25354 0.5204 0.985183
1280 1 0.384882 0.675329 0.970392
534 1 0.416488 0.553706 0.900898
682 1 0.320234 0.564817 0.81457
1021 1 0.311932 0.664297 0.941528
114 1 0.36839 0.597814 0.94029
13 1 0.260115 0.856276 0.526237
1811 1 0.126079 0.572038 0.531078
1740 1 0.943511 0.526459 0.818556
652 1 0.442157 0.61136 0.982148
1867 1 0.53634 0.57952 0.83374
153 1 0.510201 0.554547 0.905757
731 1 0.455867 0.708466 0.925653
1183 1 0.618084 0.577306 0.882084
35 1 0.686229 0.569953 0.758474
8 1 0.729334 0.619357 0.909817
1710 1 0.610716 0.662267 0.931246
916 1 0.623566 0.639708 0.840798
95 1 0.689927 0.531669 0.912075
1014 1 0.988288 0.966633 0.607242
224 1 0.562379 0.61498 0.906661
1028 1 0.660227 0.613707 0.955155
105 1 0.844378 0.969295 0.516706
1424 1 0.772083 0.555566 0.850208
1880 1 0.192467 0.786229 0.991832
577 1 0.690759 0.609588 0.85335
552 1 0.19755 0.920829 0.978367
1893 1 0.823182 0.584485 0.810358
1090 1 0.771047 0.651669 0.856034
669 1 0.844634 0.513389 0.881679
1741 1 0.856256 0.654301 0.960945
1234 1 0.750181 0.709425 0.958212
156 1 0.914542 0.580676 0.96225
1790 1 0.963383 0.648073 0.9151
502 1 0.420578 0.969217 0.851998
244 1 0.681784 0.549782 0.823443
122 1 0.940286 0.640109 0.845179
1171 1 0.891864 0.567961 0.865
190 1 0.559948 0.991654 0.864824
1562 1 0.849077 0.630396 0.866366
1133 1 0.999414 0.59438 0.872286
68 1 0.223979 0.905978 0.823533
2021 1 0.164127 0.703802 0.861501
1210 1 0.0825423 0.662638 0.963023
511 1 0.0780172 0.794314 0.957171
1284 1 0.00837291 0.719387 0.954242
1563 1 0.995888 0.821769 0.909794
1076 1 0.141985 0.77819 0.862503
1095 1 0.0662869 0.701735 0.90453
616 1 0.0757637 0.525037 0.574149
1301 1 0.255653 0.75218 0.825793
940 1 0.226025 0.704188 0.895328
701 1 0.186863 0.747839 0.814378
707 1 0.406361 0.905037 0.828498
882 1 0.486054 0.993431 0.833404
143 1 0.402962 0.791193 0.884441
842 1 0.323279 0.761145 0.851302
1494 1 0.378724 0.760859 0.948248
1198 1 0.352212 0.837793 0.926411
1619 1 0.48008 0.833946 0.880904
865 1 0.436525 0.827894 0.942006
720 1 0.358847 0.976304 0.535751
2048 1 0.479633 0.770103 0.945767
705 1 0.47011 0.605291 0.881488
589 1 0.51361 0.669447 0.915378
1352 1 0.846095 0.553128 0.977257
901 1 0.501837 0.719351 0.787563
53 1 0.47479 0.780375 0.83471
389 1 0.337618 0.968172 0.619608
1836 1 0.499363 0.87477 0.826718
520 1 0.582124 0.769935 0.844169
848 1 0.545918 0.740776 0.906283
774 1 0.686608 0.748613 0.914961
160 1 0.69405 0.676358 0.921512
763 1 0.768986 0.779843 0.940612
612 1 0.212535 0.60687 0.918294
1365 1 0.706817 0.745406 0.762898
1755 1 0.773964 0.73077 0.873769
1239 1 0.716717 0.70382 0.837014
756 1 0.785518 0.64726 0.932916
1764 1 0.780935 0.661663 0.779704
1861 1 0.769788 0.860154 0.901063
766 1 0.720465 0.784852 0.825082
1678 1 0.814053 0.740563 0.98177
1489 1 0.941299 0.692746 0.974095
5 1 0.956088 0.757221 0.905941
1209 1 0.491958 0.945541 0.607775
1605 1 0.849585 0.734114 0.902649
1547 1 0.939262 0.834989 0.848232
319 1 0.921711 0.699444 0.88369
488 1 0.889323 0.797513 0.900911
1002 1 0.00374304 0.700332 0.88588
1361 1 0.954266 0.815092 0.973025
420 1 0.976063 0.889964 0.93013
1370 1 0.824515 0.541287 0.603248
1898 1 0.482784 0.715028 0.519189
331 1 0.0311955 0.804338 0.682475
1353 1 0.0595635 0.85517 0.914676
661 1 0.162462 0.892258 0.880832
58 1 0.0331862 0.855967 0.755933
103 1 0.0429236 0.936423 0.913594
2036 1 0.724088 0.998218 0.583451
1995 1 0.601165 0.625009 0.996296
1872 1 0.260223 0.939197 0.90476
1769 1 0.306145 0.895325 0.85579
1148 1 0.0854691 0.910489 0.819208
1020 1 0.115299 0.917602 0.939351
1027 1 0.275526 0.83114 0.98673
1454 1 0.144608 0.884942 0.772526
1856 1 0.379157 0.828432 0.989622
1019 1 0.328204 0.551169 0.990379
1086 1 0.0766213 0.939068 0.987697
1821 1 0.125785 0.513977 0.678364
416 1 0.471382 0.915195 0.871812
344 1 0.421699 0.903763 0.948923
237 1 0.431064 0.836232 0.804132
471 1 0.34335 0.901356 0.971506
462 1 0.356241 0.948785 0.912876
1955 1 0.3141 0.981376 0.955753
941 1 0.467404 0.971777 0.904977
692 1 0.716355 0.859453 0.837646
257 1 0.514943 0.82111 0.986272
238 1 0.613514 0.951131 0.864661
302 1 0.499167 0.896094 0.934452
280 1 0.542986 0.934662 0.83357
645 1 0.551721 0.811224 0.912991
1557 1 0.286525 0.620408 0.999011
1425 1 0.660309 0.958412 0.922072
249 1 0.609614 0.909129 0.799996
1968 1 0.722949 0.93384 0.871073
1714 1 0.594772 0.864291 0.967121
1987 1 0.653908 0.76986 0.84625
106 1 0.701849 0.907796 0.7828
1333 1 0.70561 0.825907 0.901052
1498 1 0.745948 0.9311 0.983011
1359 1 0.185702 0.858148 0.506798
1739 1 0.836775 0.899961 0.93161
1285 1 0.795889 0.931815 0.874759
1813 1 0.680527 0.887142 0.948661
681 1 0.753483 0.975766 0.793088
1439 1 0.827336 0.816249 0.947935
1676 1 0.812625 0.801903 0.859073
1071 1 0.911514 0.868989 0.90925
279 1 0.972585 0.955232 0.856225
598 1 0.862423 0.960824 0.942396
1003 1 0.715237 0.923706 0.530512
439 1 0.807096 0.932998 0.599988
1657 1 0.791716 0.873232 0.98664
29 1 0.852768 0.524596 0.68181
632 1 0.417363 0.972465 0.95898
432 1 0.195204 0.951889 0.749536
45 1 0.402248 0.511782 0.564843
1988 1 0.725671 0.570122 0.610385
1694 1 0.0410453 0.923384 0.718798
1451 1 0.763024 0.930953 0.739115
1522 1 0.749736 0.501317 0.946306
1220 1 0.24444 0.805603 0.9292
1579 1 0.911856 0.758511 0.959268
1101 1 0.520145 0.531945 0.991912
264 1 0.470053 0.973397 0.721582
856 1 0.231859 0.53239 0.764681
558 1 0.883122 0.924265 0.817964
989 1 0.5135 0.610629 0.982416
1441 1 0.639784 0.813187 0.968824
695 1 0.591837 0.509944 0.799125
1231 1 0.104196 0.986784 0.792677
1561 1 0.275367 0.586893 0.724544
1651 1 0.574759 0.770736 0.99082
1304 1 0.465801 0.942319 0.99115
1405 1 0.869925 0.915164 0.567779
693 1 0.422122 0.942924 0.551112
769 1 0.144842 0.962463 0.527573
276 1 0.544842 0.698031 0.975647
936 1 0.679516 0.98329 0.983842
1380 1 0.61332 0.970654 0.679825
169 1 0.7902 0.995864 0.719813
708 1 0.986603 0.942814 0.968667
241 1 0.804638 0.579422 0.916885
116 1 0.613401 0.980035 0.572018
617 1 0.953119 0.507157 0.556165
1659 1 0.791574 0.503337 0.727006
1640 1 0.146839 0.506965 0.776721
715 1 0.107813 0.59659 0.998175
451 1 0.568773 0.847015 0.50057
1098 1 0.961215 0.648829 0.507087
178 1 0.513262 0.979215 0.501688
1649 1 0.838685 0.868309 0.499669
|
993,399 | 4223f26158342f99b412dbe77e3675eeb2a65a05 | clothes = [int(x) for x in input().split()]
rack_capacity = int(input())
racks_count = 1
curr_sum = 0
while clothes:
if curr_sum + clothes[-1] <= rack_capacity:
curr_sum += clothes.pop()
else:
racks_count += 1
curr_sum = 0
print(racks_count)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.