index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
6,900 | f25d86e857970854b2239ce0ab5280132b89280e | # -*- coding: utf-8 -*-
'''
一球从100米高度自由落下
每次落地后反跳回原高度的一半;再落下,求它在第10次落地时,共经过多少米?第10次反弹多高?
求两个东西, 1是经过了多少米, 2是反弹多高
1: 100 100+50+50 100+50+50+25+25
2: 100 100/2=50 50/2=25 25/2=2
'''
import math
start_height = 100
rebound_rate = 0.5
meter_list = [100]
def rebound(time):
m = start_height*(rebound_rate ** (time))
return m
'''
1.第一次落地, 经过了100米
2.第二次落地, 经过了100+50+50米
3.第三次落地, 经过了100+50+50+25+25米
'''
def get_all_meter(time):
for k in range(1, time):
meter = start_height + rebound(time-1)*2
meter_list.append(meter)
def main():
#print rebound(10)
print get_all_meter(11)
if __name__ == '__main__':
main() |
6,901 | 5bb894feaf9293bf70b3f831e33be555f74efde8 | from django import forms
from .models import File, Sample, Plate, Well, Machine, Project
class MachineForm(forms.ModelForm):
class Meta:
model = Machine
fields = ['name', 'author', 'status', 'comments']
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['name', 'author', 'collaborators', 'status', 'comments']
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = ['name', 'script', 'author', 'file']
class SampleForm(forms.ModelForm):
class Meta:
model = Sample
fields = ['name', 'alias', 'sample_type', 'description', 'project', 'author', 'sequence',
'length', 'genbank', 'source_reference', 'comments', 'parent_id',
'organism', 'genus_specie', 'marker', 'application', 'strategy', 'seq_verified', 'origin_rep',
'cloning_system', 'strand', 'order_number', 'part_type', 'moclo_type', 'sub_sample_id', 'primer_id',
'end', 'direction', 'tm']
class PlateForm(forms.ModelForm):
class Meta:
model = Plate
fields = [
'name', 'barcode', 'type', 'contents', 'location', 'num_cols', 'num_rows', 'num_well', 'function',
'project', 'active', 'status']
class WellForm(forms.ModelForm):
class Meta:
model = Well
fields = ['name', 'volume', 'concentration', 'plate', 'samples', 'active', 'status']
|
6,902 | fc06d8a26a99c16a4b38ad0b4bbb28a1dc522991 | #This script reads through a Voyager import log and outputs duplicate bib IDs as well as the IDs of bibs, mfhds, and items created.
#import regular expressions and openpyxl
import re
import openpyxl
# prompt for file names
fname = input("Enter input file, including extension: ")
fout = input("Enter output file, without extension: ")
fh = open(fname, "r")
# set up lists
duplicates = [["Duplicate Bib ID"]]
bibs = [["Bib ID"]]
mfhds = [["MFHD ID"]]
items = [["Item ID"]]
# create and open workbook with two sheets
wb1=openpyxl.Workbook()
ws1=wb1.active
ws1.title = "Duplicate Bibs"
ws2 = wb1.create_sheet(index=1, title="IDs Added")
# read through file, extract the line after the line starting with BibID & rank and write to lists
with fh as f:
lines = f.readlines()
n_lines = len(lines)
for i, line in enumerate (lines) :
line = line.rstrip()
if line.startswith(" BibID & rank") and \
n_lines > i + 2 and lines[i + 2].startswith("") :
bibline = re.findall(r'\d+\s-\s', lines[i+1])
dupeid = re.findall(r'\d+', str(bibline))
duplicates.append(dupeid)
elif line.startswith(" Adding Bib") :
line = re.findall(r'\d+',str(line))
bibs.append(line)
elif line.startswith("MFHD_ID ") :
line = re.findall(r'\d+',str(line))
mfhds.append(line)
elif line.startswith("ITEM_ID ") :
line = re.findall(r'\d+',str(line))
items.append(line)
else :
continue
# write the lists to columns in the spreadsheet and save
for row in duplicates:
ws1.append(row)
for r in range(0,len(bibs)):
ws2.cell(row=r+1,column=1).value=bibs[r][0]
for r in range(0,len(mfhds)):
ws2.cell(row=r+1,column=2).value=mfhds[r][0]
for r in range(0,len(items)):
ws2.cell(row=r+1,column=3).value=items[r][0]
wb1.save(fout + ".xlsx")
|
6,903 | fca46c095972e8190ee9c93f3bddbb2a49363a7f | # coding: utf-8
"""
Meme Meister
API to create memes # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.default_api import DefaultApi # noqa: E501
from swagger_client.rest import ApiException
class TestDefaultApi(unittest.TestCase):
"""DefaultApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.default_api.DefaultApi() # noqa: E501
def tearDown(self):
pass
def test_meme_get(self):
"""Test case for meme_get
Get meme(s) # noqa: E501
"""
pass
def test_meme_meme_id_delete(self):
"""Test case for meme_meme_id_delete
Delete meme by ID # noqa: E501
"""
pass
def test_meme_meme_id_get(self):
"""Test case for meme_meme_id_get
Get meme by ID # noqa: E501
"""
pass
def test_meme_post(self):
"""Test case for meme_post
Post meme # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
6,904 | 0b7523035fdad74454e51dc9da9fc4e9bea2f6bf | import typing
from rest_framework.exceptions import ValidationError
from rest_framework.request import Request
def extract_organization_id_from_request_query(request):
return request.query_params.get('organization') or request.query_params.get('organization_id')
def extract_organization_id_from_request_data(request) -> (int, bool):
"""
Returns the organization id from the request.data and a bool indicating if the key
was present in the data (to distinguish between missing data and empty input value)
:param request:
:return:
"""
for source in (request.data, request.GET):
if 'organization' in source:
return source.get('organization'), True
if 'organization_id' in request.data:
return source.get('organization_id'), True
return None, False
def extract_field_from_request(request: Request, field_name: str) -> typing.Optional[int]:
"""
Extracts attribte from request
if attribute is present in data it has precedence over query parameters
"""
try:
# Try to get value from data
value = request.data.get(field_name)
except AttributeError:
raise ValidationError('Malformed request')
if not value:
# Try to get value from query parameters
value = request.query_params.get(field_name)
if value:
try:
return int(value)
except ValueError:
raise ValidationError(f"Value of field '{field_name}' is not a valid integer ({value})")
return None
|
6,905 | 378c07c512425cb6ac6c998eaaa86892b02a37b8 | """
Like Places but possibly script based and temporary.
Like a whisper command where is keeps tracks of participants.
""" |
6,906 | ee161ff66a6fc651a03f725427c3731bdf4243eb | from django.shortcuts import render
from django.http import HttpResponse
# # Create your views here.
# def Login_Form(request):
# return render(request, 'Login.html') |
6,907 | 83a92c0b645b9a2a483a01c19a47ab5c296ccbd9 | import numpy as np
import sys
import os
import os.path
import json
import optparse
import time
import pandas as pd
#Randomize and split the inference set according to hor_pred
#Generate .npy file for each hp selected
#Coge valores aleatorios de la columna de etiquetas en función del horizonte de predicción.
#Coge los índices de las muestras seleccionadas y los usa para seleccionar las imágenes que
##tienen asociadas.
#Tenemos que tener pandas para la seleccion primera de las etiquetas, luego solo generamos un
##.npy con ese hor_pred y con la cantidad que queramos en función del valor del split
####PARSEAR CON EL JSON
###################
# PARSE CONNFIG #####
##################
def addOptions(parser):
parser.add_option("--NNfile", default="",
help="Config json file for the data to pass to the model")
parser = optparse.OptionParser()
addOptions(parser)
(options, args) = parser.parse_args()
if not options.NNfile:
print(sys.stderr, "No configuration file specified\n")
sys.exit(1)
with open(options.NNfile, 'r') as cfg_file:
cfg_data = json.load(cfg_file)
days_info_file = cfg_data['days_info']
days_info = pd.read_csv(days_info_file)
day_length = days_info['length_day'][0]
days = days_info['number_train_days'][0]
tg = cfg_data['time_granularity']
hor_pred = cfg_data['hor_pred']
forecast_prediction = []
cut_1 = cfg_data['cut']
img_rows = cfg_data['img_rows']
img_cols = cfg_data['img_cols']
orig_folder = cfg_data['orig_folder']
dest_folder = cfg_data['dest_folder']
##################
# DATA LOAD ######
###################
print('Loading images...\n')
load_start = time.time()
x_original = np.load("x_train.npy")
print(x_original.shape)
print(len(x_original))
print('Loading tags...\n')
y_original = pd.read_csv(orig_folder + '/Y_tr_val.csv')
load_end = time.time()
load_time = load_end - load_start
load_min = int(load_time / 60)
load_sec = load_time % 60
print('Dataframes loaded in {} minutes {} seconds! Splitting for train and validation...\n'.format(load_min, load_sec))
#################
# RANDOMIZATION##
#################
# Since we configured our matrices with an offset we have to adjust to "jump" to the sample we want to actually predict
for hp in hor_pred:
if hp.endswith("min"):
hor_pred_indices = int(int(hp.replace('min', '')) * 60 / tg)
if hp.endswith("s"):
hor_pred_indices = int(int(hp.replace('s', '')) / tg)
forecast_prediction.append(hp)
y_t = y_original # y_train y son iquals
y_t_index = y_t.index # devulve una array de index
# Don't get values for the previous or next day:
y_t_index_valid = y_t_index[(y_t_index % day_length) < (day_length - hor_pred_indices)]
y_t_indices_lost = len(y_t_index) - len(y_t_index_valid)
print('Indices computed. {} indices lost \n.'.format(y_t_indices_lost))
print('Building randomized y matrix with valid indices...\n')
y_t = np.ravel(y_original.iloc[y_t_index_valid + hor_pred_indices])
print('Building y matrix removing invalid indices for persistence model...\n')
y_pred_persistence = np.ravel(y_original.iloc[y_t_index_valid]) # una row de dataFram combia por numpy array
print('Building X matrix...Same thing as before...\n')
# like our randomization, just picking the same indices
x_t = x_original[y_t_index_valid]
x_t = x_t.reshape(x_t.shape[0], img_rows, img_cols, 1)
#Split:
cut = int(cut_1*len(x_t))
x_train, x_test = x_t[:cut,:], x_t[cut:,:]
y_train, y_test = y_t[:cut], y_t[cut:]
#print(x_train.shape, x_test.shape)
#print(y_train.shape, y_test.shape) #Etiquetas (valores reales que debería predecir con cada muestra)
name = "set_hp_" + str(hp) + "_" + str (cut_1) + "total" + ".npy"
name2 = "tags_hp_" + str(hp) + "_" + str (cut_1) + "total" + ".npy"
#Para cada horizonte de predicción genero un array para inferencia
np.save(name, x_train)
np.save(name2, y_train)
print('Generated {} images array \n.'.format(x_train.shape))
|
6,908 | 3d854c83488eeafa035ccf5d333eeeae63505255 | thisdict = {"brand": "ford", "model": "Mustang", "year": 1964}
module = thisdict["modal"]
print("model:", module)
thisdict = {"brand": "ford", "model": "Mustang", "year": 1964}
module = thisdict.get["modal"]
print("model:", module) |
6,909 | c024e12fe06e47187c25a9f384ceed566bf94645 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module that defines a controller for database's operations over business rules
"""
# built-in dependencies
import functools
import typing
# external dependencies
import sqlalchemy
from sqlalchemy.orm import sessionmaker
# project dependencies
from database.table import ResourceTable
__authors__ = ["Gabriel Castro", "Gustavo Possebon", "Henrique Kops"]
__date__ = "24/10/2020"
class _DatabaseResourceTableController:
"""
Controller for resource table access
"""
def __init__(self):
# sqlalchemy
self.engine = sqlalchemy.create_engine("sqlite:///db.sqlite3")
self.session = sessionmaker(bind=self.engine)
def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,
resource_name: str, resource_path: str, resource_hash: str) -> None:
"""
Register 'peer x resource' relationship at database
:param peer_id: Peer's id
:param peer_ip: Peer's ip
:param peer_port: Peer's listen port
:param resource_name: Resource's name
:param resource_path: Resource's path
:param resource_hash: Resource's MD5
"""
session = self.session()
try:
new_resource = ResourceTable()
new_resource.peerId = peer_id
new_resource.peerIp = peer_ip
new_resource.peerPort = peer_port
new_resource.resourceName = resource_name
new_resource.resourcePath = resource_path
new_resource.resourceHash = resource_hash
session.add(new_resource)
session.commit()
finally:
session.close()
def get_available_peer(self, resource_name: str) -> typing.List:
"""
Get peer's ip and port and resource's path, name and hash
that contains same resource name
:param resource_name: Name of the resource to be searched at database
:return: List containing matching peer's and resource's info
"""
session = self.session()
try:
available_peers = session\
.query(
ResourceTable.peerIp,
ResourceTable.peerPort,
ResourceTable.resourcePath,
ResourceTable.resourceName,
ResourceTable.resourceHash
)\
.filter(ResourceTable.resourceName == resource_name)\
.group_by(ResourceTable.peerId)\
.all()
if available_peers:
return available_peers[0]
else:
return []
finally:
session.close()
def get_all_resources(self) -> typing.List:
"""
Get every register of peer's ip and port and resource's path, name and hash
:return: List of every 'peer x resource' info
"""
session = self.session()
try:
available_peers = session\
.query(
ResourceTable.peerIp,
ResourceTable.peerPort,
ResourceTable.resourcePath,
ResourceTable.resourceName,
ResourceTable.resourceHash
)\
.group_by(ResourceTable.peerId, ResourceTable.resourceHash)\
.all()
return available_peers
finally:
session.close()
def drop_peer(self, peer_id: str) -> None:
"""
Delete every record that contains same peer's id
:param peer_id: Peer's ip to be used as filter
"""
session = self.session()
try:
session\
.query(ResourceTable)\
.filter(ResourceTable.peerId == peer_id)\
.delete()
session.commit()
finally:
session.close()
@functools.lru_cache()
def get_database_resource_table_controller() -> [_DatabaseResourceTableController]:
"""
Singleton for DatabaseResourceTableController class
:return: Same instance for DatabaseResourceTableController class
"""
return _DatabaseResourceTableController()
|
6,910 | d82b68d5c83ae538d7a8b5ae5547b43ac4e8a3d4 | from models.readingtip import ReadingTip
from database import db
class ReadingTipRepository:
def __init__(self):
pass
def get_tips(self, user, tag="all"):
if tag == "all":
return ReadingTip.query.filter_by(user=user).all()
else:
return ReadingTip.query.filter_by(user=user).filter(ReadingTip.tags.any(name=tag)).all()
def update_tip(self, tip_id, title, link, tags):
tip = self.get_tip(tip_id)
print(tags)
tip.title = title
tip.link = link
tip.tags = tags
db.session.commit()
def create_tip(self, tip):
db.session.add(tip)
db.session.commit()
return tip
def get_tip(self, tip_id):
return ReadingTip.query.get(tip_id)
def delete_tip(self, tip):
db.session.delete(tip)
db.session.commit()
def contains_title(self, user, title):
amount = ReadingTip.query.filter_by(user=user, title=title).count()
return amount > 0
def read_tip(self, tip, date):
ReadingTip.query.filter_by(id=tip.id).update({"read":date})
db.session.commit()
readingtip_repository = ReadingTipRepository()
|
6,911 | 8535020e7157699310b3412fe6c5a28ee8e61f49 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ApplicationCredential',
'ApplicationTag',
]
@pulumi.output_type
class ApplicationCredential(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "credentialType":
suggest = "credential_type"
elif key == "databaseName":
suggest = "database_name"
elif key == "secretId":
suggest = "secret_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApplicationCredential.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApplicationCredential.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
credential_type: Optional['ApplicationCredentialCredentialType'] = None,
database_name: Optional[str] = None,
secret_id: Optional[str] = None):
if credential_type is not None:
pulumi.set(__self__, "credential_type", credential_type)
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
if secret_id is not None:
pulumi.set(__self__, "secret_id", secret_id)
@property
@pulumi.getter(name="credentialType")
def credential_type(self) -> Optional['ApplicationCredentialCredentialType']:
return pulumi.get(self, "credential_type")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[str]:
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> Optional[str]:
return pulumi.get(self, "secret_id")
@pulumi.output_type
class ApplicationTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
|
6,912 | 8474205d49aef2d18755fc1a25a82718962f4120 | times = np.linspace(0.0, 10.0, 100)
result = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(), sigmay()])
fig, ax = plt.subplots()
ax.plot(times, result.expect[0]) # doctest: +SKIP
ax.plot(times, result.expect[1]) # doctest: +SKIP
ax.set_xlabel('Time') # doctest: +SKIP
ax.set_ylabel('Expectation values') # doctest: +SKIP
ax.legend(("Sigma-Z", "Sigma-Y")) # doctest: +SKIP
plt.show() # doctest: +SKIP
|
6,913 | dfe79d2f4bf4abc1d04035cf4556237a53c01122 | import bisect
import sys
input = sys.stdin.readline
N = int(input())
A = [int(input()) for _ in range(N)]
dp = [float('inf')]*(N+1)
for a in A[::-1]:
idx = bisect.bisect_right(dp, a)
dp[idx] = a
ans = 0
for n in dp:
if n != float('inf'):
ans += 1
print(ans) |
6,914 | aa0a69e3286934fcfdf31bd713eca1e8dd90aeaa | from omt.gui.abstract_panel import AbstractPanel
class SourcePanel(AbstractPanel):
def __init__(self):
super(SourcePanel, self).__init__()
def packagePath(self):
"""
This file holds the link to the active panels.
The structure is a dictionary, the key is the class name
and the values is where the object is define.
"""
altenatives = {
'Agilent':'omt.gui.sourcepanel.alternatives.agilent',
'RodeSchwartz':'omt.gui.sourcepanel.alternatives.rodeschwartz',
'BeamScanner':'omt.gui.sourcepanel.alternatives.beam_scanner',
'PCG':'omt.gui.sourcepanel.alternatives.pcg',
'Anritsu':'omt.gui.sourcepanel.alternatives.anritsu',
}
return altenatives
def get_name(self):
return "Source"
def get_configurations(self):
return_dic = {}
for source in self.pannels_instants:
if source.is_active():
if source.do_sweep():
if 'sweep' in return_dic:
raise Exception('Only one sweep')
return_dic['sweep'] = source.get_source_config()
else:
try:
return_dic['tone'].append(source.get_source_config())
except KeyError as e:
return_dic['tone'] = [source.get_source_config(),]
return return_dic
def pass_sources(self):
return self.pannels_instants |
6,915 | 44c04cf79d02823318b06f02af13973960413bea | #!/usr/bin/env python
import os, glob, sys, math, time, argparse
import ROOT
from ROOT import TFile, TTree, TH2D
def main():
parser = argparse.ArgumentParser(description='Program that takes as an argument a pattern of LEAF rootfiles (* wildcards work) enclosed by quotation marks ("<pattern>") and creates a rootfile with the MC b-tagging efficiencies. Assumes the b-tagging MC efficiency histogram folder inside the root files is called "BTaggingMCEfficiencies".')
parser.add_argument('--input', '-i', required=True, type=str, help='Name of the json converted to a .txt file')
parser.add_argument('--output', '-o', type=str, help='Name of the output .root file. Default is "BTaggingMCEfficiencies.root"')
args = parser.parse_args()
infilepattern = os.path.abspath(args.input)
outfilename = os.path.abspath(args.output) if args.output is not None else 'BTaggingMCEfficiencies.root'
infilenames = glob.glob(infilepattern)
foldername = 'BTaggingMCEfficiencies'
btag_histo_names = ['b_passing', 'b_total', 'c_passing', 'c_total', 'udsg_passing', 'udsg_total']
merged_histograms = []
for idx, infilename in enumerate(infilenames):
infile = TFile(infilename, 'READ')
for idx_hist, name in enumerate(btag_histo_names):
histname = foldername + '/' + name
if idx == 0:
hist = infile.Get(histname)
merged_histograms.append(hist)
hist.SetDirectory(0)
# if idx_hist == 5: print 'number of new entries:', hist.GetEntries()
else:
merged_histograms[idx_hist].Add(infile.Get(histname))
thishist = infile.Get(histname)
# if idx_hist == 5: print 'number of new entries:', thishist.GetEntries()
# print 'number of entries merged:', merged_histograms[5].GetEntries()
# print merged_histograms
outfile = TFile(outfilename, 'RECREATE')
# outhists = []
for idx, name in enumerate(btag_histo_names):
if 'passing' in name:
num = merged_histograms[idx]
for hist in merged_histograms:
if hist.GetName() == num.GetName().replace('passing', 'total'):
den = hist
print num.GetBinContent(4,1), den.GetBinContent(4,1)
num.Divide(den)
print num.GetBinContent(4,1)
num.SetName(num.GetName().replace('passing', 'efficiency'))
num.Write()
outfile.Close()
if __name__ == '__main__':
main()
|
6,916 | a9876c61578a53f29865062c0915db622aaaba72 | from PIL import Image
from pdf2image import convert_from_path
import glob
from pathlib import Path
import shutil, os
from docx import Document
import fnmatch
import re
import shutil
def find_files_ignore_case(which, where='.'):
'''Returns list of filenames from `where` path matched by 'which'
shell pattern. Matching is case-insensitive.'''
# TODO: recursive param with walk() filtering
rule = re.compile(fnmatch.translate(which), re.IGNORECASE)
return [name for name in os.listdir(where) if rule.match(name)]
def crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):
img = Image.open(file)
x, y = img.size
box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top - crop_bottom)
crop = img.crop(box)
crop.save(file)
def create_empty_folder(path):
'''Create a folder. Delete content if exists'''
Path(path).mkdir(parents=True, exist_ok=True)
# Remove existing files
existing_files = find_files_ignore_case(os.path.join(path, '*'))
for ef in existing_files:
os.remove(ef)
def convert_pdf_to_images(file):
'''Convert a PDF file into images and save to folder of same name
Return folder which contains the images
'''
# Create directory for each file
folder = os.path.splitext(file)[0]
create_empty_folder(folder)
# Convert PDF to images into the directory
images = convert_from_path(file)
for i, image in enumerate(images):
file_name = 'Z{:05}.jpg'.format(i+1)
image.save(os.path.join(folder, file_name), 'JPEG')
return folder
def get_file_name_prefix(filename):
with open('file_name_prefixes.txt') as f:
for line in f:
line = line.strip()
if filename.lower().startswith(line.lower()):
return line.strip()
return None
import errno, os, stat, shutil
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
if __name__ == '__main__':
cur_folder = os.path.abspath('')
# Convert PDFs to Images
print('Convert PDFs to images...')
files = find_files_ignore_case('*.pdf')
for pdf_file in files:
pdf_file = os.path.join(cur_folder, pdf_file)
print(pdf_file)
folder = convert_pdf_to_images(pdf_file)
# Crop images
print('Crop images...')
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print(folder)
images = find_files_ignore_case('*.jpg', folder)
images.sort()
print(images)
for image_file in images:
try:
image_file = os.path.join(folder, image_file)
crop_image_center(image_file, crop_left=160,
crop_right=-40, crop_top=100, crop_bottom=20)
except:
pass
# Copy Image *.jpg From Reference to Folder
files = find_files_ignore_case('*.pdf')
for file in files:
print(file)
folder = os.path.splitext(file)[0]
file_prefix = get_file_name_prefix(file)
print(file_prefix)
# Copy Image *.jpg From Reference to Folder
source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix), 'Reference')
for f in source_files:
f = os.path.join('Reference', f)
shutil.copy(f, folder)
# Insert Images to Word
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
word_file = folder+".docx"
# Copy from template docx
file_prefix = get_file_name_prefix(file)
files = find_files_ignore_case('{}*.docx'.format(file_prefix), 'Reference')
print(file, file_prefix, files)
if files:
document = Document(os.path.join('Reference', files[0]))
document.add_section()
else:
document = Document()
document.save(word_file)
section = document.sections[0]
# width = section.page_width - section.left_margin - section.right_margin
height = section.page_height - section.top_margin - section.bottom_margin
images = find_files_ignore_case('*.jpg', folder)
for image_file in images:
image_file = os.path.join(folder, image_file)
# document.add_picture(image_file, width=width)
document.add_picture(image_file, height=height)
document.save(word_file)
# Delete folders including its images
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print('Deleting', folder, os.path.isdir(folder))
try:
files_in_dir = os.listdir(folder)
for file in files_in_dir: # loop to delete each file in folder
os.remove(os.path.join(folder,file))
#os.rmdir(folder)
shutil.rmtree(folder, ignore_errors=False, onerror=handleRemoveReadonly)
except Exception as ex:
print('Error deleting', folder, ex)
|
6,917 | 6a8007e44d2c4b56426cd49772cbc23df2eca49c | #program_skeleton.py
#import load_json_files as bm
import write
import merge as m
import load_df as ldf
import load_vars as lv
import log as log
import clean_df as clean
import download as dl
import gc
import confirm_drcts as cfs
import fix_files as ff
import readwrite as rw
import df_filter as df_f
import realtor_scraper_sheets_3 as scraper
import get_creds as creds
import goog_sheets as sheets
from pprint import pprint
import google_drive as drive
import batch_download as download
import rew_scraper as rew_scraper
import rew_scraper3 as rew3
def program_skeleton(dictionary: dict):
## Batch Merge creates a back_up of contacts from csv in batches no greater than 500 contacts per document. Can be expanded. Keeps files from getting to large
if dictionary['tasks']['environmental_vars']['run'] == True:
dictionary['tasks']['environmental_vars']['log']['environmental_vars_set'] = lv.set_environmental_vars(dictionary['tasks'])
dictionary['tasks']['environmental_vars']['goog_creds'] = creds.get_creds()
dictionary['tasks']['environmental_vars']['sheets_service'] = sheets.get_sheet_service(dictionary['tasks']['environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['drive_service'] = drive.get_drive_service(dictionary['tasks']['environmental_vars']['goog_creds'])
dictionary['tasks']['environmental_vars']['criteria_sheet_meta'] = sheets.confirm_sheet_ids(dictionary['tasks']['environmental_vars']['criteria_sheet_ids'],dictionary['tasks']['environmental_vars']['sheets_service'])
#dictionary['tasks']['environmental_vars']['output_sheet_meta'] = drive.add_spreadsheet_to_folder(dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'],dictionary['tasks']['environmental_vars']['date']['datetime'])
#dictionary['tasks']['environmental_vars']['dfs']['cities_search'] = goog_sheets.
#pprint(dictionary['tasks']['environmental_vars']['sheet_meta'])
lv.batchify(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'],dictionary['tasks']['environmental_vars']['batch_size'])
dictionary['tasks']['environmental_vars']['dnn'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['dnn'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
#sheets.batch_download(dictionary['tasks']['environmental_vars'])
#print(dictionary['tasks']['environmental_vars']['directories']['log_directory'])
#log.json_dump(dictionary['tasks'])
#log.csv_dump(dictionary['tasks'])
#print(dictionary)
if dictionary['tasks']['scrape_web_data_rew']['run'] == True:
#if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:
#pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])
#input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
#pprint(dictionary['tasks']['environmental_vars']['sheets_service'])
rew3.initial(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'])
#rew_scraper.scrape("agents/areas/toronto-on",dictionary['tasks']['environmental_vars']['sheets_service'],2,2)
#print('true')
if dictionary['tasks']['scrape_web_data_sheets']['run'] == True:
if dictionary['tasks']['scrape_web_data_sheets']['input_list']['run'] == True:
#pprint(dictionary['tasks']['environmental_vars']['criteria_sheet_meta'])
#input_df = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
dictionary['tasks']['environmental_vars']['input_list'] = sheets.batch_download(dictionary['tasks']['environmental_vars']['criteria_sheet_meta']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],True)
scraper.scrape(dictionary['tasks']['environmental_vars']['input_list'],dictionary['tasks']['environmental_vars']['sheets_service'],dictionary['tasks']['environmental_vars']['drive_service'],dictionary['tasks']['environmental_vars']['output_folder_id'])
#print('true')
#download.batch_download(dictionary['tasks']['environmental_vars'])
if dictionary['tasks']['confirm_folder_structure']['run'] == True:
dictionary['tasks']['confirm_folder_structure']['log']['folder_structure_confirmed'] = cfs.confirm_folder_structure(dictionary)
#ff.fix_files(dictionary) # fix files if necessary. This is a fuck up on my end...
if dictionary['tasks']['scrape_web_data']['run'] == True:
dictionary['tasks']['scrape_web_data']['log']['cities'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['cities'])
df = dictionary['tasks']['environmental_vars']['dfs']['cities'] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['cities'])
df_f.filter_state_data(df,'ct')
#dictionary['tasks']['environmental_vars']['dfs']['cities']['directory'] = df. apply dictionary['tasks']['environmental_vars']['sep'].join((dictionary['tasks']['environmental_vars']['directories']['to_merge'], dictionary['tasks']['environmental_vars']['dfs']['cities'].state_name,dictionary['tasks']['environmental_vars']['dfs']['cities'].city))
df['to_merge'] = dictionary['tasks']['environmental_vars']['directories']['to_merge']
df['directory'] = df[['to_merge','state_name', 'city']].apply(lambda x: dictionary['tasks']['environmental_vars']['sep'].join(x), axis=1)
#df['period'] = df[['Year', 'quarter']].apply(lambda x: ''.join(x), axis=1)
#print(dictionary['tasks']['environmental_vars']['dfs']['cities'].directory)
scraper.scrape(df)
#dictionary['tasks']['environmental_vars']['dfs'][''] = m.merge_zip_data(dictionary['tasks']['scrape_web_data']['log']['zip_codes'])
#dictionary['tasks']['environmental_vars']['dfs']['zip_codes'] = rw.file_list(dictionary['tasks']['environmental_vars']['files']['zip_database'])
if dictionary['tasks']['merge_data']['run'] == True:
dictionary['tasks']['merge_data']['log']['files_to_merge'] = rw.file_list_walk(dictionary['tasks']['environmental_vars']['directories']['to_merge'])
dictionary['tasks']['environmental_vars']['dfs']['master_merge'] = m.merge_agent_data(dictionary['tasks']['merge_data']['log']['files_to_merge'])
#rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['master_merge'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_raw'],dictionary['tasks']['environmental_vars']['dfs']['master_merge'],dictionary['tasks']['environmental_vars']['directories']['merged_data'])
#print(dictionary['tasks']['environmental_vars']['dfs']['master_merge'])
if dictionary['tasks']['filter_data']['run'] == True:
print('filtering_data')
dictionary['tasks']['filter_data']['log']['files_to_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['merged_data'])
dictionary['tasks']['filter_data']['log']['dnn_filter'] = rw.file_list(dictionary['tasks']['environmental_vars']['directories']['dnn'])
df = dictionary['tasks']['environmental_vars']['dfs']['dnn'] = m.merge_csv(dictionary['tasks']['filter_data']['log']['dnn_filter'])
df["first_name"] = df["first_name"].str.lower()
df["last_name"] = df["last_name"].str.lower()
## checks to see if the df is already in memory. If not the pass
try:
if dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'].empty:
#if try succeeds and if is true then fill it anyways
dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])
else:
#if alrady exists move on
print('The Df already exists')
pass
#do something
except:
#if exception is raised then the df does not exist. Create it
print('The Df no exists')
dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'] = m.merge_json(dictionary['tasks']['filter_data']['log']['files_to_filter'])
df_f.clean_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'])
df_f.filter_realtor_data(dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],df,800000,3)
rw.df_toCsv(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])
rw.df_toJson(dictionary['tasks'],dictionary['tasks']['environmental_vars']['file_names']['agent_data_mapped'],dictionary['tasks']['environmental_vars']['dfs']['merged_agent_data'],dictionary['tasks']['environmental_vars']['directories']['mapped_data'])
#if dictionary['tasks']['extract_agent_data']['run'] == True:
# dictionary['tasks']['environmental_vars']['dfs']['agent_data'] = m.merge_agent_data(dictionary['tasks'])
|
6,918 | 78ddae64cc576ebaf7f2cfaa4553bddbabe474b7 | from django.db import models
from orders.constants import OrderStatus
from subscriptions.models import Subscription
class Order(models.Model):
subscription = models.OneToOneField(
Subscription,
on_delete=models.CASCADE,
related_name='order',
)
order_status = models.CharField(
max_length=50,
choices=OrderStatus.Choices,
default=OrderStatus.IN_PROGRESS,
)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
email = models.EmailField()
price = models.DecimalField(max_digits=10, decimal_places=2)
# def get_email(self):
# if self.email is None:
# self.email = Subscription.objects.get(client__email=...)
|
6,919 | 5cd767564e8a261561e141abeebb5221cb3ef2c2 | # Generated by Django 2.2.1 on 2019-05-23 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('presentes', '0015_caso_lugar_del_hecho'),
]
operations = [
migrations.AddField(
model_name='organizacion',
name='descripcion',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='organizacion',
name='email',
field=models.CharField(default='', max_length=200),
),
]
|
6,920 | 4d722975b4ffc1bbfe7591e6ceccc758f67a5599 | # Multiple Linear Regression
# To set the working directory save this .py file where we have the Data.csv file
# and then press the Run button. This will automatically set the working directory.
# Importing the data from preprocessing data
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('50_Startups.csv')
# iloc integer location based [rows, columns] : means all rows :-1 all columns except last one
X = dataset.iloc[:, :-1].values
# In python indexes are started from 0 and R starts from 1
y = dataset.iloc[:, 4].values
# Categorical Data
# Encoding Independent Data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:,3] = labelencoder_X.fit_transform(X[:,3])
onehotencoder = OneHotEncoder(categorical_features= [3])
X = onehotencoder.fit_transform(X).toarray()
# Avoiding Dummy Variable Trap
X = X[:, 1:]
#In the above thing it The above column will start from 1 to end.
#Splitting the dataset into Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state =0)
# Feature Scaling
# For multi-comment line use """ This will not be executed """
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Building the model using Backword Elimination
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)
X_opt = X[:, [0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# Omit the variables which have prob more than .95
X_opt = X[:, [0,1,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# Omit the variables until you have P < SL
X_opt = X[:, [0,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0,3,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0,3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# End of Backward ELimination Algorithm
# I would like to visualize the performance of R&D vs Profit scale
|
6,921 | 253d37f29e33f61d7e1a5ec2f9a1d6307a2ae108 | """
Tests for parsers.py
@author Kevin Wilson <khwilson@gmail.com>
"""
import crisis.parsers as undertest
import datetime
import unittest
class TestParsers(unittest.TestCase):
def test_parse_date(self):
date = '8/5/2013 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),
undertest.parse_date(date))
def test_part_date_short(self):
date = '8/5/13 16:14'
self.assertEqual(datetime.datetime(2013, 8, 5, 16, 14),
undertest.parse_date_short(date))
def test_parse_line(self):
line = ["1","2","3"]
actual = undertest.parse_line(line)
expected = [1,2,3]
self.assertTrue(all(x == y for x, y in zip(expected, actual)))
if __name__ == '__main__':
unittest.main()
|
6,922 | 23ba9e498dd153be408e973253d5f2a858d4771b | """Module just for fun game"""
# -*- coding: utf-8 -*-
from __future__ import print_function
from itertools import chain
import tabulate
import numpy
class Game(object):
"""Класс игры"""
def __init__(self):
self.field = numpy.array([(-10, -10, -10), (-10, -10, -10), (-10, -10, -10)])
self.rendered_field = [['-', '-', '-'], ['-', '-', '-'], ['-', '-', '-']]
def render_field(self):
"""Метод отрисовки поля"""
print(tabulate.tabulate(self.rendered_field, tablefmt='grid'))
def check_free_place(self, i, j):
"""Метод проверки клетки на занятость"""
return self.field[i][j] == -10
def check_win(self):
"""Метод проверки на чью-либо победу"""
return any(summa % 3 == 0 and \
summa > 0 for summa in list(chain(*[tuple(list((self.field.sum(axis=0)))), \
tuple(list((self.field.sum(axis=1)))), (sum(self.field.diagonal()), \
sum(numpy.fliplr(self.field).diagonal()))])))
@staticmethod
def validate_number(number):
"""Метод проверки номера клеточки"""
if not isinstance(number, str):
return 'TypeError'
try:
value = int(number)
except ValueError:
return 'Error'
except TypeError:
return 'Error'
except ZeroDivisionError:
return 'Error'
else:
if (value < 1 or value > 9):
return 'Error'
return value
def main(self):
"""Метод main()"""
print("Приветствую вас в игре 'крестики-нолики'")
answer = True
while answer:
#current_game = Game()
self.render_field()
self.game_logic()
answer = None
while answer is None:
print("Хотите ли продолжить игровой сеанс? 'y(д)' - да, 'n(н)' - нет")
str_answer = input().lower()
answer = True if str_answer == 'y' or str_answer == 'д' else \
(False if str_answer == 'n' or str_answer == 'н' else None)
def game_logic(self):
"""Метод реализации игровой логики"""
symbols = ('X', 'O')
move_counter = 0
start = True
while not self.check_win() and move_counter != 9 or start:
start = False
num_for_validation = input('Введите номер клеточки, куда поставить {}\n' \
.format(symbols[move_counter % 2]))
number = Game.validate_number(num_for_validation)
if number == 'Error':
print('Да введите число от 1 до 9, сложно что ли?')
else:
our_index = number - 1 # у нас же индексы от 1 до 9
index = (our_index // 3, our_index % 3)
if not self.check_free_place(index[0], index[1]):
print('Эта клеточка уже занята, пожалуйста, посмотрите другие варианты')
else:
self.field[index[0]][index[1]] = move_counter % 2 + 1 # 'X' - 1, 'O' - 2
self.rendered_field[index[0]][index[1]] = symbols[move_counter % 2]
move_counter += 1
print('Ситуация на поле боя: (ходов произведено {})'.format(move_counter))
self.render_field()
print('Окончание игры')
if not self.check_win() and move_counter == 9:
print('Это ничья. Но это ожидаемый результат')
if __name__ == '__main__':
a = Game()
a.main()
|
6,923 | e60fcf19560b4826577797c8ae8b626ff984dcfd | from pynput import keyboard
# list of chars entered by the user
list = []
number_of_chars = 0
# if entered chars go above MAX LENGTH they will be written inside a file
MAX_LENGTH = 300
def on_press(key):
global number_of_chars
global list
list.append(key)
number_of_chars+=1
if number_of_chars>=MAX_LENGTH:
write_in_file()
list.clear()
number_of_chars = 0
def on_release(key):
if key == keyboard.Key.esc:
# if the user exist write all the contents inside the file
write_in_file()
return False
def write_in_file():
file = open("strokes.txt","a")
for k in list:
file.writelines("{}\n".format(str(k)))
file.close()
# erases contents of the file when the program is runned
open("strokes.txt","w").close()
with keyboard.Listener(on_press = on_press,on_release=on_release) as listener:
listener.join() |
6,924 | ffd11d49f8499b4bfec8f17d07b66d899dd23d2e | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-26 20:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Cbrowser', '0002_links_l_title'),
]
operations = [
migrations.AddField(
model_name='student',
name='dp',
field=models.CharField(default='https://thebenclark.files.wordpress.com/2014/03/facebook-default-no-profile-pic.jpg', max_length=1000),
),
migrations.AddField(
model_name='student',
name='gpa',
field=models.IntegerField(default=0),
),
]
|
6,925 | 5bcfb0d4fd371a0882dd47814935700eed7885ec | import sys
def main(stream=sys.stdin):
"""
Input, output, and parsing, etc. Yeah.
"""
num_cases = int(stream.readline().strip())
for i in xrange(num_cases):
rows, cols = map(int, stream.readline().strip().split())
board = []
for r in xrange(rows):
board = board + [map(int, stream.readline().strip().split())]
if is_board_valid(board, rows, cols):
print "Case #%d: YES" % (i+1,)
else:
print "Case #%d: NO" % (i+1,)
def is_board_valid(board, rows, cols):
"""
>>> is_board_valid([[1,2,1]], 1, 3)
True
"""
return all(all(is_cell_valid(board, r, c) for c in xrange(cols)) for r in xrange(rows))
def is_cell_valid(board, r, c):
"""
>>> is_cell_valid([ [2, 2, 2, 2, 2], [2, 1, 1, 1, 2], [2, 1, 2, 1, 2], [2, 1, 1, 1, 2], [2, 2, 2, 2, 2] ], 0, 0)
True
>>> is_cell_valid([ [2, 2, 2, 2, 2], [2, 1, 1, 1, 2], [2, 1, 2, 1, 2], [2, 1, 1, 1, 2], [2, 2, 2, 2, 2] ], 1, 1)
False
"""
return is_cell_row_valid(board, r, c) or is_cell_col_valid(board, r, c)
def is_cell_row_valid(board, r, c):
"""
>>> is_cell_row_valid([[2,1,2],[1,1,1],[2,1,2]], 1, 1)
True
>>> is_cell_row_valid([[2,1,2],[1,1,1],[2,1,2]], 0, 1)
False
"""
return all(board[r][i] <= board[r][c] for i in xrange(len(board[r])))
def is_cell_col_valid(board, r, c):
"""
>>> is_cell_col_valid([[1,2,1]], 0, 1)
True
"""
return all(board[i][c] <= board[r][c] for i in xrange(len(board)))
if __name__ == '__main__':
import doctest
if doctest.testmod():
main()
|
6,926 | f8bb2851192a53e94e503c0c63b17477878ad9a7 | import os
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
name="/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv"
name_bkg="/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv"
drop_cols=[0,1,2,15]
names = [i for i in range(16)]
#columns=[] #list of columns we want to take
file_df_sig=pd.read_csv(name, sep=",",names=names)
tmp_df_sig = file_df_sig.drop(drop_cols, axis=1)
file_df_bkg = pd.read_csv(name_bkg, sep=",",names=names)
tmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)
tmp_df = pd.concat([tmp_df_sig , tmp_df_bkg] , ignore_index=True)
#fig , ax = plt.subplots()
#tmp_df.hist(bins=10,ax=ax)
#fig.savefig("before_pca.pdf")
pca=PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)
pca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)
#fig , ax = plt.subplots()
#df.hist(bins=10,ax=ax)
#fig.savefig("after_pca.pdf")
final_df= pd.concat([file_df_sig , file_df_bkg] , ignore_index=True)
print("Before PCA" , final_df)
for i in pca_df.columns :
final_df[i]=pca_df[i]
print("After PCA" , final_df)
cut=len(file_df_sig.index)
final_df.iloc[:cut].to_csv("pca_stop_train_sig_wc.csv",header= False,index=False)
final_df.iloc[cut:].to_csv("pca_stop_train_bkg_wc.csv",header= False , index =False)
|
6,927 | 46aa795bb72db0fcd588b1747e3559b8828be17c | #!/usr/bin/env python3.7
import Adafruit_GPIO
import Adafruit_GPIO.I2C as I2C
import time
import sys
import argparse
import os
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Select I2C channel multiplexed by TCA9548A")
argparser.add_argument('ch', nargs='?', help="channel", type=int)
args = argparser.parse_args()
TCA9548A = I2C.get_i2c_device(0x70)
if args.ch is None:
for channel in range(0,8):
print(f"== CHANNEL {channel} ==")
TCA9548A.write8(0, 1<<channel)
os.system("i2cdetect -y 1")
else:
TCA9548A.write8(0, 1<<args.ch)
|
6,928 | d211594a034489d36a5648bf0b926fbd734fd0df | import xdrlib,sys
import xlrd
def open_excel(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx'):
try:
data=xlrd.open_workbook('D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx')
return data
except Exception as e:
print (str(e))
def excel_table_byindex(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx',colnameindex=0,by_index=0):
data=open_excel(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx')
table=data.sheets()[by_index]
nrows=table.nrows
ncols=table.ncols
colnames=table.row_values(colnameindex)
list=[]
for rownum in range(1,nrows):
row=table.row_values(rownum)
if row:
app={}
for i in range(len(colnames)):
app[colnames[i]]=row[i]
list.apend(app)
return list
|
6,929 | f37d016dc49820239eb42198ca922e8681a2e0a6 | import simplejson as json
json_list = [ "/content/squash-generation/squash/final/Custom.json",
"/content/squash-generation/squash/temp/Custom/final_qa_set.json",
"/content/squash-generation/squash/temp/Custom/generated_questions.json",
"/content/squash-generation/squash/temp/Custom/nbest_predictions.json",
"/content/squash-generation/squash/temp/Custom/null_odds.json",
"/content/squash-generation/squash/temp/Custom/predictions.json" ]
for i in json_list:
with open(i,) as f:
obj = json.load(f)
f.close()
outfile = open(i, "w")
outfile.write(json.dumps(obj, indent=4, sort_keys=True))
outfile.close()
|
6,930 | c700af6d44cd036212c9e4ae4932bc60630f961e | #!/usr/bin/env python3
import os
import fileinput
project = input("Enter short project name: ")
if os.path.isdir(project):
print("ERROR: Project exists")
exit()
os.mkdir(project)
os.chdir(project)
cmd = "virtualenv env -p `which python3` --prompt=[django-" + project + "]"
os.system(cmd)
# Install django with default packages
requirements = """django
flake8
autopep8
pytz
django-debug-toolbar
django-autofixture
"""
with open('requirements.txt', 'w+') as ouf:
ouf.write(requirements)
os.system("env/bin/pip install -r requirements.txt")
# Initiate git repository
gitignore = """env
*.sqlite3
*_local*
*.pyc
__pycache__
*.rdb
*.log
log
static
"""
with open('.gitignore', 'w+') as ouf:
ouf.write(gitignore)
os.system("git init && git add .gitignore && git commit -m 'Initial commit.'")
cmd = "env/bin/django-admin startproject " + project
os.system(cmd)
cmd = "mv " + project + " tmp && mv tmp/* . && rm -rf tmp"
os.system(cmd)
settings_new_lines = """ 'autofixture',
'debug_toolbar',
"""
settings_path = project + '/settings.py'
for line in fileinput.FileInput(settings_path, inplace=1):
if " 'django.contrib.staticfiles'," in line:
line = line.replace(line, line + settings_new_lines)
print(line, end='')
os.system("git add . && git commit -m 'Install Django project.'")
# Output message
message = """
You can now type:
cd {0}
activate
"""
print(message.format(project))
|
6,931 | a3ccd526b70db2061566274852a7fc0c249c165a | class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
ln=len(nums)
k=k%ln
nums[:]=nums+nums[:ln-k]
del nums[0:ln-k]
#menthod 2自己输入[1,2],k=1通过了测试但是leetcode的output和自己的不一样,怎么都不pass。What's wrong?
#后来发现了原因nums后面要加[:]
# ln=len(nums)
# k=k%ln
# lastnum=nums[0:ln-k]
# nums[:]=nums[ln-k:]
# nums[:]=nums+lastnum
#method 1:
# ln=len(nums)
# k=k%ln
# for i in range(ln-k):
# nums.append(nums[i])
# del nums[0:ln-k] |
6,932 | 475cc5130e847b1a74a33bfa5cbc202a6bf31621 | from codar.cheetah import Campaign
from codar.cheetah import parameters as p
from codar.savanna.machines import SummitNode
import copy
def get_shared_node_layout (n_writers, n_readers):
nc = SummitNode()
for i in range(n_writers):
nc.cpu[i] = "writer:{}".format(i)
for i in range(n_readers):
nc.cpu[i+n_writers] = "reader:{}".format(i)
return [nc]
def get_separate_node_layout (n_writers, n_readers):
nc_w = SummitNode()
for i in range(n_writers):
nc_w.cpu[i] = "writer:{}".format(i)
nc_r = SummitNode()
for i in range(n_readers):
nc_r.cpu[i] = "reader:{}".format(i)
return [nc_w,nc_r]
def get_sweeps(ref_params_d, n_writers):
params_d = copy.deepcopy(ref_params_d)
params_d['writer']['nprocs'].values=[n_writers]
params_d['writer']['decomposition'].values=[n_writers]
all_dicts = []
all_sweeps = []
# Loop over ratio of the no. of reader ranks
for r in [8]:
par_r = copy.deepcopy(params_d)
par_r['reader']['nprocs'].values = [n_writers//r]
par_r['reader']['decomposition'].values = [n_writers//r]
# Loop over data size per process
for d in ['512MB']:
par_r_d = copy.deepcopy(par_r)
par_r_d['writer']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]
par_r_d['reader']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]
# Loop over engines
for e in ["bp4","sst-rdma","sst-tcp","ssc","insitumpi"]:
par_r_d_e = copy.deepcopy(par_r_d)
par_r_d_e['writer']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]
par_r_d_e['reader']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]
all_dicts.append(par_r_d_e)
for d in all_dicts:
sweep_params = []
sweep_params.extend(list(d['writer'].values()))
sweep_params.extend(list(d['reader'].values()))
sep_node_layout = get_separate_node_layout(32, 32)
shared_node_layout = None
if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0] == 8:
shared_node_layout = get_shared_node_layout(32,4)
elif n_writers//32 < 4096:
shared_node_layout = get_shared_node_layout(16,16)
rc_dependency = None
if 'bp4' in d['writer']['xmlfile'].values[0]:
rc_dependency = {'reader': 'writer'}
sweep_sep = p.Sweep(parameters = sweep_params, node_layout = {'summit':sep_node_layout}, rc_dependency=rc_dependency)
if 'insitumpi' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode='mpmd'
if 'ssc' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode='mpmd'
sweep_shared = None
if shared_node_layout:
sweep_shared = p.Sweep(parameters = sweep_params, node_layout = {'summit':shared_node_layout}, rc_dependency=rc_dependency)
if n_writers//32 < 4096:
all_sweeps.append(sweep_sep)
if sweep_shared:
all_sweeps.append(sweep_shared)
return all_sweeps
class Adios_iotest(Campaign):
# A name for the campaign
name = "ADIOS_IOTEST"
# A list of the codes that will be part of the workflow
# If there is an adios xml file associated with the codes, list it here
codes = [ ("writer", dict(exe="adios_iotest")),
("reader", dict(exe="adios_iotest"))
]
# A list of machines that this campaign must be supported on
supported_machines = ['local', 'theta', 'summit']
# Option to kill an experiment (just one experiment, not the full sweep or campaign) if one of the codes fails
kill_on_partial_failure = True
# Some pre-processing in the experiment directory
# This is performed when the campaign directory is created (before the campaign is launched)
run_dir_setup_script = None
# A post-processing script to be run in the experiment directory after the experiment completes
# For example, removing some large files after the experiment is done
run_post_process_script = 'cleanup.sh'
# umask applied to your directory in the campaign so that colleagues can view files
umask = '027'
# Scheduler information: job queue, account-id etc. Leave it to None if running on a local machine
scheduler_options = {'theta': {'project':'CSC249ADCD01', 'queue': 'batch'},
'summit': {'project':'csc303'}}
# Setup your environment. Loading modules, setting the LD_LIBRARY_PATH etc.
# Ensure this script is executable
app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh', 'summit':'env_setup.sh'}
input_files = [
'staging-perf-test-16MB-2to1.txt',
'staging-perf-test-16MB-8to1.txt',
'staging-perf-test-1MB-2to1.txt',
'staging-perf-test-1MB-8to1.txt',
'staging-perf-test-512MB-2to1.txt',
'staging-perf-test-512MB-8to1.txt',
'staging-perf-test-bp4.xml',
'staging-perf-test-insitumpi.xml',
'staging-perf-test-ssc.xml',
'staging-perf-test-sst-rdma.xml',
'staging-perf-test-sst-tcp.xml'
]
# Create the sweep parameters for a sweep
params = {}
params['writer'] = {}
params['reader'] = {}
params['writer']['nprocs'] = p.ParamRunner ('writer', 'nprocs', [])
params['writer']['appid'] = p.ParamCmdLineOption ('writer', 'appid', '-a', [1])
params['writer']['configfile'] = p.ParamCmdLineOption ('writer', 'configFile', '-c', [])
params['writer']['scaling'] = p.ParamCmdLineOption ('writer', 'scaling', '-w', [None])
params['writer']['xmlfile'] = p.ParamCmdLineOption ('writer', 'xmlfile', '-x', [])
params['writer']['decomposition'] = p.ParamCmdLineOption ('writer', 'decomposition', '-d', [])
params['reader']['nprocs'] = p.ParamRunner ('reader', 'nprocs', [])
params['reader']['appid'] = p.ParamCmdLineOption ('reader', 'appid', '-a', [2])
params['reader']['configfile'] = p.ParamCmdLineOption ('reader', 'configFile', '-c', [])
params['reader']['scaling'] = p.ParamCmdLineOption ('reader', 'scaling', '-w', [None])
params['reader']['xmlfile'] = p.ParamCmdLineOption ('reader', 'xmlfile', '-x', [])
params['reader']['decomposition'] = p.ParamCmdLineOption ('reader', 'decomposition', '-d', [])
sweeps = []
for n in [8]:
group_sweeps = get_sweeps (params, n*32)
# pdb.set_trace()
s_group = p.SweepGroup("{}-nodes".format(n),
walltime=7200,
per_run_timeout=600,
component_inputs={'writer':input_files},
#nodes=128,
parameter_groups=group_sweeps,)
sweeps.append(s_group)
|
6,933 | 4c927f14065d0557dbe7b371002e133c351d3478 | import collections
import itertools
from . import stats
__all__ = [
'Party',
'HoR',
'Coalition'
]
Party = collections.namedtuple('Party', 'name,votes,seats')
class HoR(object):
"""House of Representatives"""
def __init__(self, parties, name='HoR'):
self.name = name
self._parties = tuple(sorted(parties, key=lambda p: (p.seats, p.votes), reverse=True))
self._party_mapping = {p.name: p for p in self._parties}
def __getitem__(self, item):
return self._party_mapping[item]
@property
def parties(self):
return self._parties
def seats_list(self):
return [p.seats for p in self._parties]
def votes_list(self):
return [p.votes for p in self._parties]
def names_list(self):
return [p.name for p in self._parties]
def vote_shares_list(self):
v = self.votes
return [vi / v for vi in self.votes_list()]
def seat_shares_list(self):
s = self.seats
return [si / s for si in self.seats_list()]
@property
def seats(self):
return sum(self.seats_list())
@property
def votes(self):
return sum(self.votes_list())
def top(self, n=1):
return Coalition(self, self._parties[:n])
def as_coalition(self):
return Coalition(self, self._parties)
def __contains__(self, item):
return item in self._parties
def __iter__(self):
return iter(self._parties)
def iter_coalitions(self):
for n in range(1, len(self)):
for coalition in itertools.combinations(self._parties, n):
yield Coalition(self, coalition)
def __len__(self):
return len(self._parties)
def __hash__(self):
return hash(self._parties)
def same_as(self, hor):
return self.parties == hor.parties
def __eq__(self, other):
return self.seats == other.seats
def __gt__(self, other):
return self.seats > other.seats
def __ge__(self, other):
return self.seats >= other.seats
def __le__(self, other):
return self.seats <= other.seats
def __lt__(self, other):
return self.seats < other.seats
haar = stats.haar
dev = stats.dev
ens = stats.ens
env = stats.env
rrp = stats.rrp
bantsaf_influence = stats.bantsaf_influence
shepli_shubic = stats.shepli_shubic
jonson_general = stats.jonson_general
jonson_influence = stats.jonson_influence
digen_pakel_general = stats.digen_pakel_general
digen_pakel_influence = stats.digen_pakel_influence
holer_pakel = stats.holer_pakel
describe = stats.describe
def map_stat(self, stat):
if stat in ('seats', 'votes'):
return {party.name: getattr(party, stat)
for party in self._parties}
elif stat in (
stats.bantsaf_influence,
stats.shepli_shubic,
stats.jonson_general,
stats.jonson_influence,
stats.digen_pakel_general,
stats.digen_pakel_influence,
stats.holer_pakel,
):
return {party.name: stat(self, party)
for party in self._parties}
elif stat not in (
'bantsaf_influence',
'shepli_shubic',
'jonson_general',
'jonson_influence',
'digen_pakel_general',
'digen_pakel_influence',
'holer_pakel',
):
raise ValueError('Stat {} cannot be computed'.format(stat))
return {party.name: getattr(self, stat)(party)
for party in self._parties}
class Coalition(HoR):
def __init__(self, hor, parties, name='Coalition', *, _opposition=None):
super().__init__(parties, name=name)
self._hor = hor
self._opposition = _opposition
@property
def opposition(self):
if self._opposition is None:
others = [p for p in self._hor if p not in self]
self._opposition = Coalition(self._hor, others, _opposition=self)
return self._opposition
@property
def hor(self):
return self._hor
def __add__(self, other):
if isinstance(other, Party):
if other in self:
raise ValueError('{} is already present in HoR'.format(other))
new = self._parties + (other, )
elif isinstance(other, Coalition) and other.hor.same_as(self.hor):
intercept = set(other) & set(self._parties)
if intercept:
raise ValueError('{} are already present in HoR'.format(intercept))
new = self._parties + tuple(other)
else:
raise TypeError('Wrong type for {}'.format(other))
return self.__class__(self.hor, new)
def __sub__(self, other):
if isinstance(other, Party):
if other not in self:
raise ValueError('{} is not present in HoR'.format(other))
new = set(self._parties) - {other}
elif isinstance(other, Coalition) and other.hor.same_as(self.hor):
intercept = set(other) & set(self._parties)
if not intercept:
raise ValueError('{} are not present in HoR'.format(intercept))
new = set(self._parties) - set(other.parties)
else:
raise TypeError('Wrong type for {}'.format(other))
return self.__class__(self.hor, new)
def has_key_party(self, party):
if party not in self:
return False
else:
opposition = self.opposition
return (
(self > opposition)
and
((self - party) <= (opposition + party))
)
def key_parties(self):
return list(filter(self.has_key_party, self.parties))
def is_minimum_winning(self):
return all(map(self.has_key_party, self.parties))
|
6,934 | 44274446673225c769f63191d43e4747d8ddfbf7 | # ===================================================================
# Setup
# ===================================================================
from time import sleep
import sys, termios, tty, os, pygame, threading
# ===================================================================
# Functions
# ===================================================================
def play_emergency_sound():
print("Playing emergency sound. There are " + str( threading.active_count() ) + " threads active")
while getattr(emergency_sound_thread, "do_run", True):
pygame.mixer.init()
pygame.mixer.Channel(0).play( pygame.mixer.Sound('audio/alien_danger.wav') )
while pygame.mixer.Channel(0).get_busy() == True:
sleep(.25)
print( "Stopping emergency sound" )
def play_background_sound():
print("Playing background sound. There are " + str( threading.active_count() ) + " threads active")
while getattr(background_sound_thread, "do_run", True):
pygame.mixer.init()
pygame.mixer.Channel(1).play( pygame.mixer.Sound('audio/buzzer.wav') )
while pygame.mixer.Channel(1).get_busy() == True:
sleep(.25)
print( "Stopping background sound" )
def get_keypress():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return key
# ===================================================================
# Main program
# ===================================================================
while True:
key = get_keypress()
if (key == "0"):
print("Exiting!")
exit(0)
if (key == "1"):
print("1 pressed")
global background_sound_thread
background_sound_thread = threading.Thread( target=play_background_sound, args=() )
background_sound_thread.start()
if (key == "2"):
print("1 pressed")
global emergency_sound_thread
emergency_sound_thread = threading.Thread( target=play_emergency_sound, args=() )
emergency_sound_thread.start()
if (key == "z"):
print("z pressed")
background_sound_thread.do_run = False
if (key == "x"):
print("x pressed")
emergency_sound_thread.do_run = False
|
6,935 | e47e614c88c78fb6e8ff4098ea2b89d21bfa9684 | import numpy as np
from .metrics import r2_score
class LinearRegression:
def __init__(self):
self.coef_ = None # 系数
self.interception_ = None # 截距
self._theta = None
def fit_normal(self, X_train, y_train):
assert X_train.shape[0] == y_train.shape[0], ""
#!!!important
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_gd(self, X_train, y_train, eta=0.01, n_iter=1e4):
assert X_train.shape[0] == y_train.shape[0], ""
def J(theta, X_b, y):
try:
return np.sum((y - X_b.dot(theta)) ** 2) / len(X_b)
except:
return float('inf')
def dJ(theta, X_b, y):
# 向量化实现
return X_b.T.dot(X_b.dot(theta) - y) * 2 / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iter, epsilon=1e-8):
theta = initial_theta
i_iter = 0
while i_iter < n_iter:
gradient = dJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
break
i_iter += 1
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
# n_iter 代表观测所有数据几次
def fit_sgd(self, X_train, y_train, n_iter=5, t0=5, t1=50):
assert X_train.shape[0] == y_train.shape[0], ""
def dJ_sgd(theta, X_b_i, y_i):
return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2
# Stochastic gradient descent
def sgd(X_b, y, initial_theta, n_iter, t0=5, t1=50):
def learning_rate(t):
return t0 / (t + t1)
theta = initial_theta
m = len(X_b)
for curr_iter in range(n_iter):
indexes = np.random.permutation(m)
X_b_new = X_b[indexes]
y_new = y[indexes]
for i in range(m):
gradient = dJ_sgd(theta, X_b_new[i], y_new[i])
theta = theta - learning_rate(curr_iter * m + i) * gradient
return theta
X_b = np.hstack([np.ones([len(X_train), 1]), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = sgd(X_b, y_train, initial_theta, n_iter)
self.interception_ = self._theta[0]
self.coef_ = self._theta[1:]
def predict(self,X_predict):
assert self.interception_ is not None and self.coef_ is not None,\
"must fit before predict"
assert X_predict.shape[1] == len(self.coef_),\
"the feature number of X_predict must be equal to X_train"
X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
y_predict = X_b.dot(self._theta)
return y_predict
def score(self,X_test,y_test):
y_predict = self.predict(X_test)
return r2_score(y_test,y_predict)
def __repr__(self):
return "LinearRegression()" |
6,936 | c70681f5ff8d49a243b7d26164aa5430739354f4 | # Uses python3
from decimal import Decimal
def gcd_naive(a, b):
x = 5
while x > 1:
if a % b != 0:
c = a % b
a = b
b = c
else:
x = 1
return b
there = input()
store = there.split()
a = int(max(store))
b = int(min(store))
factor = gcd_naive(a,b)
if factor > 1:
multiple = (Decimal(a) * Decimal(b)) / Decimal(factor)
else:
multiple = Decimal(a * b)
print(int(multiple))
|
6,937 | 7539042b92a5188a11f625cdfc0f341941f751f0 | # -*- coding:utf-8 -*-
import requests
from lxml import etree
import codecs
from transfrom import del_extra
import re
MODIFIED_TEXT = [r'一秒记住.*?。', r'(看书.*?)', r'纯文字.*?问', r'热门.*?>', r'最新章节.*?新',
r'は防§.*?e', r'&.*?>', r'r.*?>', r'c.*?>',
r'复制.*?>', r'字-符.*?>', r'最新最快,无.*?。',
r' .Shumilou.Co M.Shumilou.Co<br /><br />', r'[Ww]{3}.*[mM]',
r'&nbsp; &nbsp; &nbsp; &nbsp; ']
HEADER = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '}
URL = 'http://www.xxbiquge.com/5_5422/'
def crawl_urls(u):
response = requests.get(u, headers=HEADER)
body = etree.HTML(response.content)
content_urls = body.xpath('//div[@class="box_con"]/div/dl//dd/a/@href')
for pk_id, u in enumerate(content_urls):
content_url = 'http://www.xxbiquge.com' + u
yield pk_id, content_url
def crwal(content_url):
""" 爬出目标网站的目标文章,并过滤文章"""
content_response = requests.get(content_url, headers=HEADER)
content_body = etree.HTML(content_response.content)
try:
chapter = content_body.xpath('//div[@class="bookname"]/h1/text()')[0]
content = content_body.xpath('//div[@id="content"]')[0]
except IndexError:
raise IndexError('rules haved change in %s' % content_url)
final_content, need_confirm = transform_content(etree.tounicode(content))
final_content = content_filter(final_content)
return chapter, final_content, need_confirm
def transform_content(txt):
need_confirm = 0
if 'div' in txt:
txt = txt.split('<div id="content">')[-1].split('</div>')[0]
if len(txt) > 0:
while True:
if txt.startswith(' ') or txt.startswith(' '):
break
if '\u4e00' <= txt[0] <= '\u9fff':
break
txt = txt[1:]
txt = del_extra(txt)
if '\\' in txt or len(txt) < 100:
need_confirm = 1
return txt, need_confirm
def content_filter(content):
""" 正则去除文章中间的广告,乱码"""
m_content = content
for ccc in MODIFIED_TEXT:
m_content = re.sub(ccc, '', m_content)
return m_content
if __name__ == '__main__':
pass
|
6,938 | 3fdf67c3e0e4c3aa8a3fed09102aca0272b5ff4f | from django.db.models import Exists
from django.db.models import OuterRef
from django.db.models import QuerySet
from django.utils import timezone
class ProductQuerySet(QuerySet):
def available(self):
return self.filter(available_in__contains=timezone.now(), category__public=True)
def annotate_subproducts(self):
from .models import SubProductRelation
subproducts = SubProductRelation.objects.filter(
bundle_product=OuterRef("pk"),
)
return self.annotate(
has_subproducts=Exists(subproducts),
)
class OrderQuerySet(QuerySet):
def not_cancelled(self):
return self.filter(cancelled=False)
def open(self):
return self.filter(open__isnull=False)
def paid(self):
return self.filter(paid=True)
def unpaid(self):
return self.filter(paid=False)
def cancelled(self):
return self.filter(cancelled=True)
|
6,939 | 47476fbb78ca8ce14d30bf226795bbd85b5bae45 | import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import distance
with open('input.txt', 'r') as f:
data = f.read()
res = [i for i in data.splitlines()]
print(res)
newHold = []
for line in res:
newHold.append((tuple(int(i) for i in line.split(', '))))
print(newHold)
mapper = np.zeros((400,400))
#plt.scatter(*zip(*newHold))
#plt.show()
for i, tup in enumerate(newHold):
x = tup[0]
y = tup[1]
if mapper[y][x] == 0:
mapper[y][x] = i
rows = mapper.shape[0]
cols = mapper.shape[1]
for num, top in enumerate(newHold):
first = list(newHold[num])
for i in range(0, rows):
for j in range(0, cols):
if ((mapper[i][j] > distance.cityblock(first, [i,j])) or (mapper[i][j] == 0)):
mapper[i][j] = distance.cityblock(first, [i,j])
elif mapper[i][j] == distance.cityblock(first, [i,j]):
mapper[i][j] = -1000
print(num)
plt.imshow(mapper, cmap="viridis")
plt.show()
plt.imshow(mapper, cmap="viridis")
plt.show() |
6,940 | dc2c429bae10ee14737583a3726eff8fde8306c7 | from src import npyscreen
from src.MainForm import MainForm
from src.ContactsForm import ContactsForm
from src.SendFileForm import SendFileForm
from src.MessageInfoForm import MessageInfoForm
from src.ForwardMessageForm import ForwardMessageForm
from src.RemoveMessageForm import RemoveMessageForm
class App(npyscreen.StandardApp):
def onStart(self):
self.MainForm = self.addForm("MAIN", MainForm)
self.ContactsForm = self.addForm("CONTACTS", ContactsForm)
self.SendFileForm = self.addForm("SEND_FILE", SendFileForm, lines=15)
self.MessageInfoForm = self.addForm("MESSAGE_INFO", MessageInfoForm)
self.ForwardMessageForm = self.addForm("FORWARD_MESSAGE", ForwardMessageForm)
self.RemoveMessageForm = self.addForm("REMOVE_MESSAGE", RemoveMessageForm, lines=5, columns=20)
|
6,941 | 98841630964dd9513e51c3f13bfdb0719600712d | from flask import Flask, render_template, request, jsonify, make_response
app = Flask(__name__)
@app.route("/")
def hello():
# return render_template('chat.html')
return make_response(render_template('chat.html'),200)
if __name__ == "__main__":
app.run(debug=True) |
6,942 | 32c62bb8b6e4559bb7dfc67f4311bc8e71e549c9 | s = 'Daum KaKao'
# s_split = s.split()
# s = s_split[1] + ' ' + s_split[0]
s = s[5:] + ' ' + s[:4]
print(s) |
6,943 | 3346ca7cdcfe9d9627bfe08be2b282897b3c319c | import os
from pathlib import Path
from sphinx_testing import with_app
@with_app(buildername="html", srcdir="doc_test/doc_role_need_max_title_length_unlimited")
def test_max_title_length_unlimited(app, status, warning):
os.environ["MAX_TITLE_LENGTH"] = "-1"
app.build()
html = Path(app.outdir, "index.html").read_text()
assert "ROLE NEED TEMPLATE" in html
assert (
"[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - "
"The Tool awesome shall have a command line interface." in html
)
@with_app(buildername="html", srcdir="doc_test/doc_role_need_max_title_length")
def test_max_title_length_10(app, status, warning):
os.environ["MAX_TITLE_LENGTH"] = "10"
app.build()
html = Path(app.outdir, "index.html").read_text()
assert "ROLE NEED TEMPLATE" in html
assert (
"[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - "
"The Tool awesome shall have a command line interface." in html
)
|
6,944 | d3425017d4e604a8940997afd0c35a4f7eac1170 | from django import forms
from .models import Appointment, Prescription
from account.models import User
class AppointmentForm(forms.ModelForm):
class Meta:
model = Appointment
fields = '__all__'
widgets = {
'date': forms.DateInput(attrs={'type': 'date'}),
'time': forms.TimeInput(attrs={'type': 'time'})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
# self.fields['patient'].empty_label = 'select patient'
self.fields['doctor'].queryset = User.objects.filter(usertype='D')
# self.fields['doctor'].empty_label = 'select doctor'
class PrescriptionForm(forms.ModelForm):
class Meta:
model = Prescription
exclude = ['doctor']
widgets = {
'prescription': forms.Textarea(attrs={'rows': 4}),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['patient'].queryset = User.objects.filter(usertype='P')
|
6,945 | fd564d09d7320fd444ed6eec7e51afa4d065ec4d | import os, time
def counter(count): # run in new process
for i in range(count):
time.sleep(1) # simulate real work
print('[%s] => %s' % (os.getpid(), i))
import pdb;pdb.set_trace()
for i in range(5):
pid= os.fork()
if pid != 0:
print('Process %d spawned' % pid) # in parent: continue
else:
counter(5) # else in child/new process
os._exit(0) # run function and exit
print('Main process exiting.')
|
6,946 | 63d9aa55463123f32fd608ada83e555be4b5fe2c | from tkinter import *
import psycopg2
import sys
import pprint
import Base_de_datos
import MergeSort
class Cliente:
def __init__(self,id=None,nombre=None):
self.id=id
self.nombre=nombre
def ingresar(self):
self.ventanaIngresar= Toplevel()
self.ventanaIngresar.geometry("570x400")
self.ventanaIngresar.title("Cliente")
img = PhotoImage(file="C:/Users/checo/Desktop/41-INVERSION-MEDIOS-DIGITALES.png")
imagen= Label(self.ventanaIngresar, image=img)
imagen.pack()
Label(self.ventanaIngresar, text="Cliente",font=("Cambria",14)).place(x=5,y=0)
Label(self.ventanaIngresar, text="Id: ",font=("Cambria",11)).place(x=0,y=30)
Label(self.ventanaIngresar, text="Nombre: ",font=("Cambria",11)).place(x=0,y=60)
self.id=StringVar()
Entry(self.ventanaIngresar, textvariable=self.id).place(x=30,y=30)
self.nombre=StringVar()
Entry(self.ventanaIngresar, textvariable=self.nombre).place(x=65,y=60)
Button(self.ventanaIngresar,text="Guardar",font=("Cambria",11),
width=15,command=self.BD).place(x=420,y=5)
#Button(self.ventanaIngresar,text="Modificar",font=("Cambria",11),
# width=15).place(x=420,y=365)
Button(self.ventanaIngresar,text="Mostrar",font=("Cambria",11),
width=15,command=self.Mostrar).place(x=0,y=365)
Button(self.ventanaIngresar,text="Ordenar",font=("Cambria",11),
width=15, command=self.ordenamiento).place(x=220,y=365)
self.ventanaIngresar.mainloop()
def BD(self):
conectar=Base_de_datos.BaseDeDatos()
comando="INSERT INTO public.cliente(id, nombre) VALUES('"+self.id.get()+"','"+self.nombre.get()+"')"
print(comando)
conectar.cursor.execute(comando)
def Mostrar(self):
comando="SELECT * FROM cliente;"
conectar=Base_de_datos.BaseDeDatos()
conectar.cursor.execute(comando)
Scroll=Scrollbar(self.ventanaIngresar, orient=VERTICAL)
self.listbox=Listbox(self.ventanaIngresar, font=("Cambria",9), borderwidth=0, yscrollcommand=Scroll.set,height=15,relief="sunken",width=60)
self.listbox.place(x=5, y=90)
Scroll.config(command=self.listbox.yview)
Scroll.pack(side=RIGHT, fill=Y)
for dato1, dato2 in enumerate(conectar.cursor.fetchall()):
self.listbox.insert(0, "Id: {}".format(dato2[0]))
self.listbox.insert(1, "Nombre: {}".format(dato2[1]))
self.listbox.insert(2, " ")
def ordenamiento(self):
comando="SELECT id FROM cliente;"
conectar=Base_de_datos.BaseDeDatos()
conectar.cursor.execute(comando)
rows= conectar.cursor.fetchall()
ordenar=MergeSort.merge_sort(rows)
print(ordenar)
|
6,947 | a25fb9b59d86de5a3180e4257c4e398f22cdbb05 | #!/usr/bin/python
import os
from nao.tactics import Tactic
from nao.inspector import Inspector
def test_file():
print("\n[*] === file ===")
name_libmagic_so = 'libmagic.so.1'
inspector = Inspector("./sample/file", debug=True)
# find_addr = 0x1742D # ret block of is_tar
find_addr = 0x173F8 # return 3 at is_tar
# find_addr = 0x17293
cond = inspector.get_condition_at(Tactic.near_path_constraint, object_name=name_libmagic_so, relative_addr=find_addr)
print("post condition = {}".format(cond))
inspector.run(args=["./sample.tar"], env={'LD_LIBRARY_PATH': os.environ['LD_LIBRARY_PATH']})
return inspector.collect(cond)
if __name__ == "__main__":
res = test_file()
print(res)
assert len(res) > 0
|
6,948 | d73832d3f0adf22085a207ab223854e11fffa2e8 | """
"""
import json
import logging
import re
import asyncio
from typing import Optional
import discord
from discord.ext import commands
import utils
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
log = logging.getLogger("YTEmbedFixer")
client = commands.Bot(command_prefix="yt!",
max_messages=5000,
description="A bot for fixing what Discord can't.\n",
owner_id=389590659335716867,
case_insensitive=True)
@client.event
async def on_ready():
log.info('Connected using discord.py {}!'.format(discord.__version__))
log.info('Username: {0.name}, ID: {0.id}'.format(client.user))
log.info("Connected to {} servers.".format(len(client.guilds)))
activity = discord.Game("Fixing what Discord can't since 12/5/2019.".format(client.command_prefix))
await client.change_presence(status=discord.Status.online, activity=activity)
log.info('------')
async def fix_yt_embed(message: discord.Message) -> Optional[discord.Embed]:
regex_search_string = r'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)'
if len(message.embeds) == 1:
matches = re.findall(regex_search_string, message.content)
if len(matches) > 0:
# We have a valid youtube link with Embed! Check if it broken.
# We are lazy and trying to get this done quickly, so for the time being ignore all other embeds other than the first one.
if message.embeds[0].type == "link": # description == 'Enjoy the videos and music you love, upload original content, and share it all with friends, family, and the world on YouTube.':
# We have a broken embed!
await asyncio.sleep(2) # Sleep for a bit to let PK delete the message if it a proxy message
msg_check = discord.utils.get(client.cached_messages, id=message.id) # Check if message was deleted by PK.
if msg_check is not None:
html = await utils.get_video_webpage(matches[0])
video_url = "https://www.youtube.com/watch?v={}".format(matches[0])
video_image = await utils.get_video_image_url(html)
video_title = await utils.get_video_title(html)
author_name = await utils.get_author_name(html)
author_url = await utils.get_author_url(html)
if video_title is None and video_image is None and author_name is None and author_url is None:
#We got no info from the video. Prehaps the video is dead on youtube or the DOM has totally changed.
return None # Don't post empty embed.
embed = build_embed(video_url, video_image, video_title, author_name, author_url)
await send_new_embed(message, embed)
return None
async def send_new_embed(original_msg: discord.Message, embed: discord.Embed):
webhook: discord.Webhook = await utils.get_webhook(client, original_msg.channel)
try:
if original_msg.guild.me.permissions_in(original_msg.channel).manage_messages:
await original_msg.delete()
await webhook.send(content=original_msg.content, embed=embed, username=original_msg.author.display_name,
avatar_url=original_msg.author.avatar_url)
else:
await webhook.send(embed=embed, username=client.user.display_name,
avatar_url=client.user.avatar_url)
except discord.errors.NotFound:
pass # SHOULD never get here because we check before deleting, but just in case... Don't post replacement.
def build_embed(_video_url: str, _video_image_url: Optional[str], _video_title: Optional[str],
_author_name: Optional[str], _author_url: Optional[str]) -> discord.Embed:
embed = discord.Embed(type="video", colour=discord.Colour.from_rgb(255, 0, 0))
if _video_image_url is not None:
embed.set_image(url=_video_image_url)
if _author_name is not None:
if _author_url is not None:
embed.set_author(name=_author_name, url=_author_url)
else:
embed.set_author(name=_author_name)
if _video_title is not None:
embed.title = _video_title
embed.url = _video_url
return embed
# ---- Command Error Handling ----- #
@client.event
async def on_command_error(ctx, error):
if type(error) == discord.ext.commands.NoPrivateMessage:
await ctx.send("⚠ This command can not be used in DMs!!!")
return
elif type(error) == discord.ext.commands.CommandNotFound:
await ctx.send("⚠ Invalid Command!!!")
return
elif type(error) == discord.ext.commands.MissingPermissions:
await ctx.send("⚠ You need the **Manage Messages** permission to use this command".format(error.missing_perms))
return
elif type(error) == discord.ext.commands.MissingRequiredArgument:
await ctx.send("⚠ {}".format(error))
elif type(error) == discord.ext.commands.BadArgument:
await ctx.send("⚠ {}".format(error))
else:
await ctx.send("⚠ {}".format(error))
raise error
@client.event
async def on_message(message: discord.Message):
await fix_yt_embed(message)
await client.process_commands(message)
@client.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await fix_yt_embed(after)
@client.command(name="invite", brief="Sends the invite link")
async def send_invite_link(ctx: commands.Context):
# link = "https://discordapp.com/oauth2/authorize?client_id=500711320497160199&scope=bot&permissions=536882176"
link = "https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176".format(client.user.id)
await ctx.send(link)
if __name__ == '__main__':
with open('config.json') as json_data_file:
config = json.load(json_data_file)
client.command_prefix = config['bot_prefix']
client.run(config['token'])
log.info("cleaning Up and shutting down")
|
6,949 | 56a681015ea27e2c8e00ab8bcc8019d5987c4ee1 | import os
f_s_list = [2, 1.5, 1, 0.5, 0.2]
g_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]
h_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]
i_seed_list = [1, 12, 123, 1234, 12345, 123456]
for s in f_s_list:
os.system("python SKs_model.py " + str(s) + " 0 10000 0 relu")
for train_end in g_end_list:
os.system("python SKs_model.py 0.2 0 " + str(train_end) + " 0 relu")
for train_begin, train_end in h_i_list:
os.system("python SKs_model.py 0.2 " + str(train_begin) + " " + str(train_end) + " 0 relu")
for seed in i_seed_list:
os.system("python SKs_model.py 0.2 0 10000 " + str(seed) + " relu")
for activation in ["sigmoid", "relu"]:
os.system("python SKs_model.py 0.2 0 10000 0 " + activation) |
6,950 | f8f538773693b9d9530775094d9948626247a3bb | import cv2
import numpy as np
import os
from tqdm import tqdm
DIR = '/home/nghiatruong/Desktop'
INPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')
INPUT_2 = os.path.join(DIR, '20190715_180940.mp4')
INPUT_3 = os.path.join(DIR, '20190715_181200.mp4')
RIGHT_SYNC_1 = 1965
LEFT_SYNC_1 = 1700
RIGHT_SYNC_2 = 5765
LEFT_SYNC_2 = 1282
def add_frame_id(video, output_dir):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return -1
os.makedirs(output_dir, exist_ok=True)
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_id in tqdm(range(frame_count)):
has_frame, frame = reader.read()
if not has_frame:
break
cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)
reader.release()
return 0
def get_meta(video):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return None, None, None
width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
return width, height, frame_count
w1, h1, fc1 = get_meta(INPUT_1)
h2, w2, fc2 = get_meta(INPUT_2)
ratio = h1 / h2
w2 = int(w2*ratio)+1
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (w1+w2+10, h1))
border = np.zeros((h1, 10, 3), dtype='uint8')
filler = np.zeros((h1, w2, 3), dtype='uint8')
reader1 = cv2.VideoCapture(INPUT_1)
reader2 = cv2.VideoCapture(INPUT_2)
reader3 = cv2.VideoCapture(INPUT_3)
last_shape = (h1, w1+w2+10, 3)
for fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1)):
_, right_frame = reader1.read()
if fid < RIGHT_SYNC_1-LEFT_SYNC_1:
left_frame = filler
else:
_, left_frame = reader2.read()
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1, RIGHT_SYNC_2-LEFT_SYNC_2)):
_, right_frame = reader1.read()
new_frame = np.concatenate([filler, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for fid in tqdm(range(RIGHT_SYNC_2-LEFT_SYNC_2, fc1)):
r1, right_frame = reader1.read()
if not r1:
break
r3, left_frame = reader3.read()
if not r3:
left_frame = filler
else:
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
reader1.release()
reader2.release()
writer.release()
cv2.destroyAllWindows()
|
6,951 | 20722cf82371d176942e068e91b8fb38b4db61fd | from scipy.optimize import newton
from math import sqrt
import time
def GetRadius(Ri,DV,mu):
def f(Rf):
return sqrt(mu/Ri)*(sqrt(2*Rf/(Rf+Ri))-1)+sqrt(mu/Rf)*(1-sqrt(2*Ri/(Rf+Ri)))-DV
return newton(f,Ri)
if __name__ == '__main__':
starttime = time.time()
print(GetRadius(10000.0,23546.214671053374,(398600.*10**9)))
# time = time.time()-starttime
# print(time)
|
6,952 | 616ff35f818130ebf54bd33f67df79857cd45965 | ../testing.py |
6,953 | 77884dd72f5efe91fccad27e6328c4ad34378be2 | import os
import logging
from datetime import datetime
import torch
from naruto_skills.training_checker import TrainingChecker
from data_for_train import is_question as my_dataset
from model_def.lstm_attention import LSTMAttention
from utils import pytorch_utils
from train.new_trainer import TrainingLoop, TrainingLogger, EvaluateLogger, Evaluator
def input2_text(first_input, *params):
return my_dataset.voc.idx2docs(first_input)
def target2_text(first_input, *params):
return first_input
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
BATCH_SIZE = 128
NUM_EPOCHS = 500
NUM_WORKERS = 0
PRINT_EVERY = 100
PREDICT_EVERY = 500
EVAL_EVERY = 500
PRE_TRAINED_MODEL = ''
my_dataset.bootstrap()
train_loader = my_dataset.get_dl_train(batch_size=BATCH_SIZE, size=None)
eval_loader = my_dataset.get_dl_eval(batch_size=BATCH_SIZE, size=None)
logging.info('There will be %s steps for training', NUM_EPOCHS * len(train_loader))
model = LSTMAttention(vocab_size=len(my_dataset.voc.index2word), no_class=2)
model.train()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
logging.info('Model architecture: \n%s', model)
logging.info('Total trainable parameters: %s', pytorch_utils.count_parameters(model))
init_step = 0
# Restore model
if PRE_TRAINED_MODEL != '':
checkpoint = torch.load(PRE_TRAINED_MODEL, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
init_step = checkpoint.get('step', 0)
logging.info('Load pre-trained model from %s successfully', PRE_TRAINED_MODEL)
root_dir = '/source/main/train/output/'
exp_id = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')
path_checkpoints = os.path.join(root_dir, 'saved_models', model.__class__.__name__, exp_id)
training_checker = TrainingChecker(model, root_dir=path_checkpoints, init_score=-10000)
path_logging = os.path.join(root_dir, 'logging', model.__class__.__name__, exp_id)
train_logger = TrainingLogger(model, measure_interval=PRINT_EVERY, predict_interval=PREDICT_EVERY,
path_to_file=path_logging + '_train', input_transform=input2_text,
output_transform=target2_text)
eval_logger = EvaluateLogger(path_logging + '_validate')
evaluator = Evaluator(model, eval_loader, device, EVAL_EVERY, eval_logger, training_checker)
training_loop = TrainingLoop(model, train_loader, device, NUM_EPOCHS, train_logger, evaluator)
training_loop.run()
|
6,954 | 328c483bf59c6b84090e6bef8814e829398c5a56 | #!/usr/bin/env python
from lemonpie import lemonpie
from flask_debugtoolbar import DebugToolbarExtension
def main():
lemonpie.debug = True
lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
toolbar = DebugToolbarExtension(lemonpie)
lemonpie.run('0.0.0.0')
if __name__ == '__main__':
main()
|
6,955 | 28978bc75cb8c5585fd0d145fe0d0c0c5456ad2e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class Tweet(models.Model):
owner = models.ForeignKey(User, related_name='tweets')
content = models.CharField(max_length=255)
when_created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} by {}'.format(self.content, self.owner.username)
|
6,956 | de4c31ad474b7ce75631214aceafbe4d7334f14b | import testTemplate
def getTests():
tests = []
suite=testTemplate.testSuite("Sample Test Cases")
testcase = testTemplate.testInstance("3\n1 1 1\n1 1 1\n1 1 1" , "6" , "Sample #1")
suite.add(testcase)
testcase = testTemplate.testInstance("11\n1 0 0 1 0 0 0 0 0 1 1 \n1 1 1 1 1 0 1 0 1 0 0 \n1 0 0 1 0 0 1 1 0 1 0 \n1 0 1 1 1 0 1 1 0 1 1 \n0 1 1 1 0 1 0 0 1 1 1 \n1 1 1 0 0 1 0 0 0 0 0 \n0 0 0 0 1 0 1 0 0 0 1 \n1 0 1 1 0 0 0 0 0 0 1 \n0 0 1 0 1 1 0 0 0 1 1 \n1 1 1 0 0 0 1 0 1 0 1 \n1 0 0 0 1 1 1 1 0 0 0" , "7588" , "Sample #2")
suite.add(testcase)
testcase = testTemplate.testInstance("11\n0 1 1 1 0 1 0 0 0 1 0 \n0 0 1 1 1 1 1 1 1 1 1 \n1 1 0 1 0 0 0 0 0 1 0 \n0 1 0 1 0 1 0 1 0 1 1 \n1 0 0 1 0 0 0 0 1 0 1 \n0 0 1 0 1 1 0 0 0 0 1 \n1 0 1 0 1 1 1 0 1 1 0 \n1 0 1 1 0 1 1 0 0 1 0 \n0 0 1 1 0 1 1 1 1 1 1 \n0 1 0 0 0 0 0 0 0 1 1 \n0 1 1 0 0 0 0 0 1 0 1 " , "7426" , "Sample #3")
suite.add(testcase)
tests.append(suite)
return tests
|
6,957 | 84b98ebf6e44d03d16f792f3586be1248c1d0221 | # Copyright (c) 2015, the Fletch project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE.md file.
{
'variables': {
'mac_asan_dylib': '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
},
'targets': [
{
'target_name': 'fletch-vm',
'type': 'none',
'dependencies': [
'src/vm/vm.gyp:fletch-vm',
],
},
{
'target_name': 'c_test_library',
'type': 'none',
'dependencies': [
'src/vm/vm.gyp:ffi_test_library',
],
},
{
'target_name': 'natives_json',
'type': 'none',
'toolsets': ['host'],
'dependencies': [
'src/shared/shared.gyp:natives_json',
],
},
{
'target_name': 'toplevel_fletch',
'type': 'none',
'toolsets': ['target'],
'dependencies': [
'src/tools/driver/driver.gyp:fletch',
'copy_dart#host',
],
},
{
# C based test executables. See also tests/cc_tests/README.md.
'target_name': 'cc_tests',
'type': 'none',
'toolsets': ['target'],
'dependencies': [
'src/shared/shared.gyp:shared_cc_tests',
'src/vm/vm.gyp:vm_cc_tests',
'copy_asan',
],
},
{
# The actual snapshots used in these tests are generated at test time.
# TODO(zerny): Compile these programs at test time and remove this target.
'target_name': 'snapshot_tests',
'type': 'none',
'toolsets': ['target'],
'dependencies': [
'src/vm/vm.gyp:fletch-vm',
'copy_dart#host',
'tests/service_tests/service_tests.gyp:service_performance_test',
'tests/service_tests/service_tests.gyp:service_conformance_test',
'samples/todomvc/todomvc.gyp:todomvc_sample',
'copy_asan',
],
},
{
'target_name': 'copy_asan',
'type': 'none',
'conditions': [
[ 'OS=="mac"', {
'copies': [
{
# The asan dylib file sets its install name as
# @executable_path/..., and by copying to PRODUCT_DIR, we avoid
# having to set DYLD_LIBRARY_PATH.
'destination': '<(PRODUCT_DIR)',
'files': [
'third_party/clang/mac/lib/clang/3.7.0/'
'lib/darwin/libclang_rt.asan_osx_dynamic.dylib',
],
},
],
}, { # OS!="mac"
'actions': [
{
'action_name': 'touch_asan_dylib',
'inputs': [
],
'outputs': [
'<(mac_asan_dylib)',
],
'action': [
'touch', '<@(_outputs)'
],
},
],
}],
],
},
{
'target_name': 'copy_dart',
'type': 'none',
'toolsets': ['host'],
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'third_party/bin/<(OS)/dart',
],
},
],
},
],
}
|
6,958 | 24f3284a7a994951a1f0a4ef64c951499bbba1b4 | """
pytest.mark.parametrize(“变量参数名称”,变量数据列表[‘123’,‘34’,‘567’,‘78’])
上面的变量个数有4个,测试用例传入变量名称后,会依序4次使用变量的数据,执行4次测试用例
def test001(self,"变量参数名称")
assert 变量名称
""" |
6,959 | cd564ebb51cf91993d2ed1810707aead44c19a6b | #! /usr/bin/env python
# -*- conding:utf-8 -*-
import MySQLdb
import os
import commands
from common import logger_init
from logging import getLogger
import re
from db import VlanInfo,Session,WafBridge
def getVlan(): # get vlan data from t_vlan
session=Session()
vlanport=[]
for info in session.query(VlanInfo):
a=[]
a.append(info.nets)
a.append(info.vlan_id)
vlanport.append(a)
interface=[]
for i in range(len(vlanport)):
nic=vlanport[i]
a=nic[0].split(',')
interface.append( a[0]+'.'+nic[1])
interface.append(a[1]+'.'+nic[1])
return interface
def getBridgeInfo(): #get data from t_bridge
session=Session()
brgport=[]
for info in session.query(WafBridge.nics):
info=list(tuple(info))
info=''.join(info)
brgport.append(info)
brgport=' '.join(brgport)
return brgport
def getSysInterface(): #Gets the configured interface
info=os.popen('ifconfig').read()
f=open('ifconfig_info.txt','w')
print >>f,info
f.close()
match=re.compile(r'(.+?)\s*?Link')
f=open('ifconfig_info.txt','r')
interface=[]
for line in f:
if 'Link encap' in line:
info=match.match(line).groups()
interface.append(info)
f.close()
b=[]
for i in range(len(interface)):
a=list(tuple(interface[i]))
a=''.join(a)
b.append(a)
strinfo=' '.join(b)
listinfo=strinfo.split()
port=[]
nic=[]
for i in range(len(listinfo)):
if '.'in listinfo[i]:
port.append(listinfo[i])
else:
nic.append(listinfo[i])
all_port=[]
all_port.append(port)
all_port.append(nic)
return all_port
def VlanConfig(): #config vlan(add and delete)
logger_init('main','log/vlanconfig.log','INFO')
config_interface=getVlan()
configured_port=getSysInterface()
vlan_port=' '.join(configured_port[0])
configured_nic=' '.join(configured_port[1])
for i in range(len(config_interface)):
if config_interface[i] in vlan_port:
continue
else:
a=config_interface[i].split('.')
if a[0] not in configured_nic:
(status,output)=commands.getstatusoutput('ifconfig %s up'%a[0])
if status!=0:
return
(status,output)=commands.getstatusoutput('vconfig add %s %s'%(a[0],a[1]))
getLogger('main').info(output)
(status,output)=commands.getstatusoutput('ifconfig %s up'%config_interface[i])
if status==0:
getLogger('main').info('ifconfig %s up OK'%config_interface[i])
config_interface=' '.join(config_interface)
vlan_port=configured_port[0]
brgport=getBridgeInfo()
for i in range(len(vlan_port)):
if vlan_port[i] not in config_interface:
if vlan_port[i] not in brgport:
(status,output)=commands.getstatusoutput('vconfig rem %s'%vlan_port[i])
if status==0:
getLogger('main').info('vconfig rem %s ok'%vlan_port[i])
if __name__=='__main__':
VlanConfig()
# getVlan()
# getSysInterface()
# getBridgeInfo()
|
6,960 | 5eee3953193e0fc9f44b81059ce66997c22bc8f1 | # Make an array of dictionaries. Each dictionary should have keys:
#
# lat: the latitude
# lon: the longitude
# name: the waypoint name
#
# Make up three entries of various values.
waypoints = [
{ 'lat': 106.72888 },
{ 'lon': 0.69622 },
{ 'name': 'Kepulauan Riau' }
]
# Write a loop that prints out all the field values for all the waypoints
for dict in waypoints:
print(dict) |
6,961 | b1573f80395d31017ceacbb998e421daf20ab75f | # class Mob:
# def __init__(self, name, health=10):
# self.name = name
# self.health = health
# def get_hit(self, power):
# self.health -= power
# print(
# f"I, {self.name} was hit for {power} points. {self.health} pts remaining")
# hero = Mob("Sir Barks-alot", 30)
# hero.get_hit(6)
class Vehicle:
def __init__(self, category, top_speed, acceleration, position=0, speed=0, wheels=4):
self.category = category
self.speed = speed
self.top_speed = top_speed
self.position = position
self.acceleration = acceleration
self.wheels = wheels
def move(self):
self.position += self.speed
# print(f"{self.speed}")
print(f"{self.category} is moving. New position is {self.position}")
def accelerate(self):
potential = self.speed + self.acceleration
if self.top_speed >= potential:
self.speed += self.acceleration
print(self.speed)
else:
self.speed = self.top_speed
print(self.speed)
i = 0
motorcycle = Vehicle("Ducati", 12, 3)
while i <= 20:
motorcycle.accelerate()
motorcycle.move()
i += 1
# motorcycle.accelerate()
# motorcycle.move()
# motorcycle.accelerate()
# motorcycle.move()
# motorcycle.accelerate()
# motorcycle.move()
# motorcycle.accelerate()
# motorcycle.move()
|
6,962 | fa511411e59880fd80fba0ccc49c95d42cb4b78d | import requests
from requests.auth import HTTPBasicAuth
def __run_query(self, query):
URL = 'https://api.github.com/graphql'
request = requests.post(URL, json=query,auth=HTTPBasicAuth('gleisonbt', 'Aleister93'))
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def user_get_starred(self, username):
query = """
query userGetStarred($username: String!){
user(login: $username){
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
following(first:100){
nodes{
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
}
}
"""
json = {
"query": query, "variables":{
"username": username
}
}
return __run_query(self, json)
def repos_for_query(self, query):
query2 = """
query queryByItems($queryString: String!){
search(query:$queryString, type:REPOSITORY, first: 100){
nodes{
... on Repository{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
"""
json = {
"query": query2, "variables":{
"queryString": query
}
}
return __run_query(self, json)
|
6,963 | 4d5b2ed016cfc6740c3ee5397c894fabc1bec73f | class CustomPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
res = "{"
for m in xrange(64):
res += hex(int(self.val[m]))
if m != 63:
res += ", "
res += " }"
return res
def lookup_type(val):
if str(val.type) == 'unsigned char [64]':
return CustomPrinter(val)
return None
gdb.pretty_printers.append(lookup_type)
|
6,964 | fd52379d125d6215fe12b6e01aa568949511549d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('products', '0007_auto_20150904_1320'),
]
operations = [
migrations.AddField(
model_name='customer',
name='in_close',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='customer',
name='time_close',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 26, 23, 25, 34, 205639)),
),
migrations.AddField(
model_name='historicalcustomer',
name='in_close',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='historicalcustomer',
name='time_close',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 26, 23, 25, 34, 205639)),
),
]
|
6,965 | b63221af86748241fdce34052819569a06d37afe | # this is for the 12/30/2015 experiments
# varied over 1, 10, 25, 50, 100 repeat particles per particle
# 10000 particles total per filter
# bias is at 0.8 in both the "real" world (realWorld.cpp)
files = ['data0Tue_Dec_30_20_37_34_2014.txt',
'data0Tue_Dec_30_20_37_49_2014.txt',
'data0Tue_Dec_30_20_38_04_2014.txt',
'data0Tue_Dec_30_20_38_19_2014.txt',
'data0Tue_Dec_30_20_38_34_2014.txt',
'data0Tue_Dec_30_20_38_49_2014.txt',
'data0Tue_Dec_30_20_39_04_2014.txt',
'data0Tue_Dec_30_20_39_19_2014.txt',
'data0Tue_Dec_30_20_39_34_2014.txt',
'data0Tue_Dec_30_20_39_49_2014.txt',
'data0Tue_Dec_30_20_40_04_2014.txt',
'data0Tue_Dec_30_20_40_19_2014.txt',
'data0Tue_Dec_30_20_40_34_2014.txt',
'data0Tue_Dec_30_20_40_49_2014.txt',
'data0Tue_Dec_30_20_41_04_2014.txt',
'data0Tue_Dec_30_20_41_18_2014.txt',
'data0Tue_Dec_30_20_41_34_2014.txt',
'data0Tue_Dec_30_20_41_49_2014.txt',
'data0Tue_Dec_30_20_42_04_2014.txt',
'data0Tue_Dec_30_20_42_19_2014.txt',
'data0Tue_Dec_30_20_42_34_2014.txt',
'data0Tue_Dec_30_20_42_49_2014.txt',
'data0Tue_Dec_30_20_43_04_2014.txt',
'data0Tue_Dec_30_20_43_19_2014.txt',
'data0Tue_Dec_30_20_43_34_2014.txt',
'data0Tue_Dec_30_20_43_49_2014.txt',
'data0Tue_Dec_30_20_44_04_2014.txt',
'data0Tue_Dec_30_20_44_19_2014.txt',
'data0Tue_Dec_30_20_44_34_2014.txt',
'data0Tue_Dec_30_20_44_49_2014.txt',
'data0Tue_Dec_30_20_45_04_2014.txt',
'data0Tue_Dec_30_20_45_19_2014.txt',
'data0Tue_Dec_30_20_45_34_2014.txt',
'data0Tue_Dec_30_20_45_49_2014.txt',
'data0Tue_Dec_30_20_46_04_2014.txt',
'data0Tue_Dec_30_20_46_19_2014.txt',
'data0Tue_Dec_30_20_46_34_2014.txt',
'data0Tue_Dec_30_20_46_49_2014.txt',
'data0Tue_Dec_30_20_47_04_2014.txt',
'data0Tue_Dec_30_20_47_19_2014.txt',
'data0Tue_Dec_30_20_47_34_2014.txt',
'data0Tue_Dec_30_20_47_50_2014.txt',
'data0Tue_Dec_30_20_48_05_2014.txt',
'data0Tue_Dec_30_20_48_20_2014.txt',
'data0Tue_Dec_30_20_48_35_2014.txt',
'data0Tue_Dec_30_20_48_50_2014.txt',
'data0Tue_Dec_30_20_49_05_2014.txt',
'data0Tue_Dec_30_20_49_20_2014.txt',
'data0Tue_Dec_30_20_49_35_2014.txt',
'data0Tue_Dec_30_20_49_50_2014.txt',
'data1Tue_Dec_30_20_50_05_2014.txt',
'data1Tue_Dec_30_20_50_20_2014.txt',
'data1Tue_Dec_30_20_50_35_2014.txt',
'data1Tue_Dec_30_20_50_50_2014.txt',
'data1Tue_Dec_30_20_51_05_2014.txt',
'data1Tue_Dec_30_20_51_20_2014.txt',
'data1Tue_Dec_30_20_51_35_2014.txt',
'data1Tue_Dec_30_20_51_50_2014.txt',
'data1Tue_Dec_30_20_52_05_2014.txt',
'data1Tue_Dec_30_20_52_20_2014.txt',
'data1Tue_Dec_30_20_52_35_2014.txt',
'data1Tue_Dec_30_20_52_50_2014.txt',
'data1Tue_Dec_30_20_53_05_2014.txt',
'data1Tue_Dec_30_20_53_20_2014.txt',
'data1Tue_Dec_30_20_53_35_2014.txt',
'data1Tue_Dec_30_20_53_50_2014.txt',
'data1Tue_Dec_30_20_54_04_2014.txt',
'data1Tue_Dec_30_20_54_19_2014.txt',
'data1Tue_Dec_30_20_54_34_2014.txt',
'data1Tue_Dec_30_20_54_49_2014.txt',
'data1Tue_Dec_30_20_55_04_2014.txt',
'data1Tue_Dec_30_20_55_19_2014.txt',
'data1Tue_Dec_30_20_55_34_2014.txt',
'data1Tue_Dec_30_20_55_49_2014.txt',
'data1Tue_Dec_30_20_56_04_2014.txt',
'data1Tue_Dec_30_20_56_19_2014.txt',
'data1Tue_Dec_30_20_56_34_2014.txt',
'data1Tue_Dec_30_20_56_49_2014.txt',
'data1Tue_Dec_30_20_57_04_2014.txt',
'data1Tue_Dec_30_20_57_19_2014.txt',
'data1Tue_Dec_30_20_57_33_2014.txt',
'data1Tue_Dec_30_20_57_48_2014.txt',
'data1Tue_Dec_30_20_58_03_2014.txt',
'data1Tue_Dec_30_20_58_18_2014.txt',
'data1Tue_Dec_30_20_58_33_2014.txt',
'data1Tue_Dec_30_20_58_48_2014.txt',
'data1Tue_Dec_30_20_59_03_2014.txt',
'data1Tue_Dec_30_20_59_18_2014.txt',
'data1Tue_Dec_30_20_59_33_2014.txt',
'data1Tue_Dec_30_20_59_48_2014.txt',
'data1Tue_Dec_30_21_00_03_2014.txt',
'data1Tue_Dec_30_21_00_17_2014.txt',
'data1Tue_Dec_30_21_00_32_2014.txt',
'data1Tue_Dec_30_21_00_47_2014.txt',
'data1Tue_Dec_30_21_01_02_2014.txt',
'data1Tue_Dec_30_21_01_17_2014.txt',
'data1Tue_Dec_30_21_01_32_2014.txt',
'data1Tue_Dec_30_21_01_47_2014.txt',
'data1Tue_Dec_30_21_02_03_2014.txt',
'data1Tue_Dec_30_21_02_17_2014.txt',
'data2Tue_Dec_30_21_02_32_2014.txt',
'data2Tue_Dec_30_21_02_47_2014.txt',
'data2Tue_Dec_30_21_03_02_2014.txt',
'data2Tue_Dec_30_21_03_17_2014.txt',
'data2Tue_Dec_30_21_03_32_2014.txt',
'data2Tue_Dec_30_21_03_47_2014.txt',
'data2Tue_Dec_30_21_04_02_2014.txt',
'data2Tue_Dec_30_21_04_17_2014.txt',
'data2Tue_Dec_30_21_04_31_2014.txt',
'data2Tue_Dec_30_21_04_46_2014.txt',
'data2Tue_Dec_30_21_05_01_2014.txt',
'data2Tue_Dec_30_21_05_16_2014.txt',
'data2Tue_Dec_30_21_05_31_2014.txt',
'data2Tue_Dec_30_21_05_45_2014.txt',
'data2Tue_Dec_30_21_06_00_2014.txt',
'data2Tue_Dec_30_21_06_16_2014.txt',
'data2Tue_Dec_30_21_06_31_2014.txt',
'data2Tue_Dec_30_21_06_46_2014.txt',
'data2Tue_Dec_30_21_07_01_2014.txt',
'data2Tue_Dec_30_21_07_16_2014.txt',
'data2Tue_Dec_30_21_07_31_2014.txt',
'data2Tue_Dec_30_21_07_46_2014.txt',
'data2Tue_Dec_30_21_08_01_2014.txt',
'data2Tue_Dec_30_21_08_16_2014.txt',
'data2Tue_Dec_30_21_08_30_2014.txt',
'data2Tue_Dec_30_21_08_45_2014.txt',
'data2Tue_Dec_30_21_09_01_2014.txt',
'data2Tue_Dec_30_21_09_16_2014.txt',
'data2Tue_Dec_30_21_09_31_2014.txt',
'data2Tue_Dec_30_21_09_46_2014.txt',
'data2Tue_Dec_30_21_10_00_2014.txt',
'data2Tue_Dec_30_21_10_16_2014.txt',
'data2Tue_Dec_30_21_10_31_2014.txt',
'data2Tue_Dec_30_21_10_45_2014.txt',
'data2Tue_Dec_30_21_11_00_2014.txt',
'data2Tue_Dec_30_21_11_16_2014.txt',
'data2Tue_Dec_30_21_11_31_2014.txt',
'data2Tue_Dec_30_21_11_45_2014.txt',
'data2Tue_Dec_30_21_12_01_2014.txt',
'data2Tue_Dec_30_21_12_16_2014.txt',
'data2Tue_Dec_30_21_12_31_2014.txt',
'data2Tue_Dec_30_21_12_46_2014.txt',
'data2Tue_Dec_30_21_13_00_2014.txt',
'data2Tue_Dec_30_21_13_15_2014.txt',
'data2Tue_Dec_30_21_13_31_2014.txt',
'data2Tue_Dec_30_21_13_46_2014.txt',
'data2Tue_Dec_30_21_14_00_2014.txt',
'data2Tue_Dec_30_21_14_15_2014.txt',
'data2Tue_Dec_30_21_14_30_2014.txt',
'data2Tue_Dec_30_21_14_45_2014.txt',
'data3Tue_Dec_30_21_15_00_2014.txt',
'data3Tue_Dec_30_21_15_15_2014.txt',
'data3Tue_Dec_30_21_15_29_2014.txt',
'data3Tue_Dec_30_21_15_44_2014.txt',
'data3Tue_Dec_30_21_15_59_2014.txt',
'data3Tue_Dec_30_21_16_15_2014.txt',
'data3Tue_Dec_30_21_16_30_2014.txt',
'data3Tue_Dec_30_21_16_44_2014.txt',
'data3Tue_Dec_30_21_16_59_2014.txt',
'data3Tue_Dec_30_21_17_15_2014.txt',
'data3Tue_Dec_30_21_17_29_2014.txt',
'data3Tue_Dec_30_21_17_45_2014.txt',
'data3Tue_Dec_30_21_18_00_2014.txt',
'data3Tue_Dec_30_21_18_15_2014.txt',
'data3Tue_Dec_30_21_18_29_2014.txt',
'data3Tue_Dec_30_21_18_44_2014.txt',
'data3Tue_Dec_30_21_18_59_2014.txt',
'data3Tue_Dec_30_21_19_14_2014.txt',
'data3Tue_Dec_30_21_19_29_2014.txt',
'data3Tue_Dec_30_21_19_44_2014.txt',
'data3Tue_Dec_30_21_19_59_2014.txt',
'data3Tue_Dec_30_21_20_14_2014.txt',
'data3Tue_Dec_30_21_20_29_2014.txt',
'data3Tue_Dec_30_21_20_45_2014.txt',
'data3Tue_Dec_30_21_21_00_2014.txt',
'data3Tue_Dec_30_21_21_15_2014.txt',
'data3Tue_Dec_30_21_21_30_2014.txt',
'data3Tue_Dec_30_21_21_45_2014.txt',
'data3Tue_Dec_30_21_21_59_2014.txt',
'data3Tue_Dec_30_21_22_14_2014.txt',
'data3Tue_Dec_30_21_22_29_2014.txt',
'data3Tue_Dec_30_21_22_44_2014.txt',
'data3Tue_Dec_30_21_22_58_2014.txt',
'data3Tue_Dec_30_21_23_14_2014.txt',
'data3Tue_Dec_30_21_23_28_2014.txt',
'data3Tue_Dec_30_21_23_43_2014.txt',
'data3Tue_Dec_30_21_23_58_2014.txt',
'data3Tue_Dec_30_21_24_13_2014.txt',
'data3Tue_Dec_30_21_24_28_2014.txt',
'data3Tue_Dec_30_21_24_43_2014.txt',
'data3Tue_Dec_30_21_24_58_2014.txt',
'data3Tue_Dec_30_21_25_12_2014.txt',
'data3Tue_Dec_30_21_25_28_2014.txt',
'data3Tue_Dec_30_21_25_43_2014.txt',
'data3Tue_Dec_30_21_25_58_2014.txt',
'data3Tue_Dec_30_21_26_12_2014.txt',
'data3Tue_Dec_30_21_26_27_2014.txt',
'data3Tue_Dec_30_21_26_42_2014.txt',
'data3Tue_Dec_30_21_26_57_2014.txt',
'data3Tue_Dec_30_21_27_12_2014.txt',
'data0Tue_Dec_30_21_27_52_2014.txt',
'data0Tue_Dec_30_21_28_07_2014.txt',
'data0Tue_Dec_30_21_28_22_2014.txt',
'data0Tue_Dec_30_21_28_37_2014.txt',
'data0Tue_Dec_30_21_28_51_2014.txt',
'data0Tue_Dec_30_21_29_06_2014.txt',
'data0Tue_Dec_30_21_29_21_2014.txt',
'data0Tue_Dec_30_21_29_36_2014.txt',
'data0Tue_Dec_30_21_29_51_2014.txt',
'data0Tue_Dec_30_21_30_06_2014.txt',
'data0Tue_Dec_30_21_30_21_2014.txt',
'data0Tue_Dec_30_21_30_36_2014.txt',
'data0Tue_Dec_30_21_30_50_2014.txt',
'data0Tue_Dec_30_21_31_06_2014.txt',
'data0Tue_Dec_30_21_31_21_2014.txt',
'data0Tue_Dec_30_21_31_36_2014.txt',
'data0Tue_Dec_30_21_31_51_2014.txt',
'data0Tue_Dec_30_21_32_06_2014.txt',
'data0Tue_Dec_30_21_32_21_2014.txt',
'data0Tue_Dec_30_21_32_36_2014.txt',
'data0Tue_Dec_30_21_32_51_2014.txt',
'data0Tue_Dec_30_21_33_05_2014.txt',
'data0Tue_Dec_30_21_33_20_2014.txt',
'data0Tue_Dec_30_21_33_35_2014.txt',
'data0Tue_Dec_30_21_33_50_2014.txt',
'data0Tue_Dec_30_21_34_05_2014.txt',
'data0Tue_Dec_30_21_34_20_2014.txt',
'data0Tue_Dec_30_21_34_34_2014.txt',
'data0Tue_Dec_30_21_34_49_2014.txt',
'data0Tue_Dec_30_21_35_04_2014.txt',
'data0Tue_Dec_30_21_35_20_2014.txt',
'data0Tue_Dec_30_21_35_35_2014.txt',
'data0Tue_Dec_30_21_35_49_2014.txt',
'data0Tue_Dec_30_21_36_04_2014.txt',
'data0Tue_Dec_30_21_36_19_2014.txt',
'data0Tue_Dec_30_21_36_34_2014.txt',
'data0Tue_Dec_30_21_36_49_2014.txt',
'data0Tue_Dec_30_21_37_04_2014.txt',
'data0Tue_Dec_30_21_37_19_2014.txt',
'data0Tue_Dec_30_21_37_34_2014.txt',
'data0Tue_Dec_30_21_37_49_2014.txt',
'data0Tue_Dec_30_21_38_04_2014.txt',
'data0Tue_Dec_30_21_38_18_2014.txt',
'data0Tue_Dec_30_21_38_33_2014.txt',
'data0Tue_Dec_30_21_38_48_2014.txt',
'data0Tue_Dec_30_21_39_03_2014.txt',
'data0Tue_Dec_30_21_39_18_2014.txt',
'data0Tue_Dec_30_21_39_33_2014.txt',
'data0Tue_Dec_30_21_39_48_2014.txt',
'data0Tue_Dec_30_21_40_02_2014.txt',
'data1Tue_Dec_30_21_40_18_2014.txt',
'data1Tue_Dec_30_21_40_33_2014.txt',
'data1Tue_Dec_30_21_40_48_2014.txt',
'data1Tue_Dec_30_21_41_02_2014.txt',
'data1Tue_Dec_30_21_41_17_2014.txt',
'data1Tue_Dec_30_21_41_31_2014.txt',
'data1Tue_Dec_30_21_41_46_2014.txt',
'data1Tue_Dec_30_21_42_01_2014.txt',
'data1Tue_Dec_30_21_42_16_2014.txt',
'data1Tue_Dec_30_21_42_31_2014.txt',
'data1Tue_Dec_30_21_42_46_2014.txt',
'data1Tue_Dec_30_21_43_01_2014.txt',
'data1Tue_Dec_30_21_43_16_2014.txt',
'data1Tue_Dec_30_21_43_31_2014.txt',
'data1Tue_Dec_30_21_43_46_2014.txt',
'data1Tue_Dec_30_21_44_01_2014.txt',
'data1Tue_Dec_30_21_44_15_2014.txt',
'data1Tue_Dec_30_21_44_30_2014.txt',
'data1Tue_Dec_30_21_44_46_2014.txt',
'data1Tue_Dec_30_21_45_01_2014.txt',
'data1Tue_Dec_30_21_45_15_2014.txt',
'data1Tue_Dec_30_21_45_30_2014.txt',
'data1Tue_Dec_30_21_45_45_2014.txt',
'data1Tue_Dec_30_21_46_00_2014.txt',
'data1Tue_Dec_30_21_46_15_2014.txt',
'data1Tue_Dec_30_21_46_29_2014.txt',
'data1Tue_Dec_30_21_46_44_2014.txt',
'data1Tue_Dec_30_21_46_59_2014.txt',
'data1Tue_Dec_30_21_47_14_2014.txt',
'data1Tue_Dec_30_21_47_29_2014.txt',
'data1Tue_Dec_30_21_47_44_2014.txt',
'data1Tue_Dec_30_21_47_59_2014.txt',
'data1Tue_Dec_30_21_48_13_2014.txt',
'data1Tue_Dec_30_21_48_28_2014.txt',
'data1Tue_Dec_30_21_48_43_2014.txt',
'data1Tue_Dec_30_21_48_58_2014.txt',
'data1Tue_Dec_30_21_49_13_2014.txt',
'data1Tue_Dec_30_21_49_28_2014.txt',
'data1Tue_Dec_30_21_49_43_2014.txt',
'data1Tue_Dec_30_21_49_57_2014.txt',
'data1Tue_Dec_30_21_50_13_2014.txt',
'data1Tue_Dec_30_21_50_27_2014.txt',
'data1Tue_Dec_30_21_50_42_2014.txt',
'data1Tue_Dec_30_21_50_57_2014.txt',
'data1Tue_Dec_30_21_51_12_2014.txt',
'data1Tue_Dec_30_21_51_27_2014.txt',
'data1Tue_Dec_30_21_51_42_2014.txt',
'data1Tue_Dec_30_21_51_56_2014.txt',
'data1Tue_Dec_30_21_52_11_2014.txt',
'data1Tue_Dec_30_21_52_26_2014.txt',
'data2Tue_Dec_30_21_52_40_2014.txt',
'data2Tue_Dec_30_21_52_55_2014.txt',
'data2Tue_Dec_30_21_53_10_2014.txt',
'data2Tue_Dec_30_21_53_25_2014.txt',
'data2Tue_Dec_30_21_53_40_2014.txt',
'data2Tue_Dec_30_21_53_54_2014.txt',
'data2Tue_Dec_30_21_54_09_2014.txt',
'data2Tue_Dec_30_21_54_24_2014.txt',
'data2Tue_Dec_30_21_54_39_2014.txt',
'data2Tue_Dec_30_21_54_53_2014.txt',
'data2Tue_Dec_30_21_55_08_2014.txt',
'data2Tue_Dec_30_21_55_23_2014.txt',
'data2Tue_Dec_30_21_55_38_2014.txt',
'data2Tue_Dec_30_21_55_53_2014.txt',
'data2Tue_Dec_30_21_56_08_2014.txt',
'data2Tue_Dec_30_21_56_23_2014.txt',
'data2Tue_Dec_30_21_56_37_2014.txt',
'data2Tue_Dec_30_21_56_52_2014.txt',
'data2Tue_Dec_30_21_57_07_2014.txt',
'data2Tue_Dec_30_21_57_22_2014.txt',
'data2Tue_Dec_30_21_57_37_2014.txt',
'data2Tue_Dec_30_21_57_51_2014.txt',
'data2Tue_Dec_30_21_58_06_2014.txt',
'data2Tue_Dec_30_21_58_21_2014.txt',
'data2Tue_Dec_30_21_58_35_2014.txt',
'data2Tue_Dec_30_21_58_50_2014.txt',
'data2Tue_Dec_30_21_59_05_2014.txt',
'data2Tue_Dec_30_21_59_20_2014.txt',
'data2Tue_Dec_30_21_59_34_2014.txt',
'data2Tue_Dec_30_21_59_50_2014.txt',
'data2Tue_Dec_30_22_00_05_2014.txt',
'data2Tue_Dec_30_22_00_19_2014.txt',
'data2Tue_Dec_30_22_00_34_2014.txt',
'data2Tue_Dec_30_22_00_49_2014.txt',
'data2Tue_Dec_30_22_01_03_2014.txt',
'data2Tue_Dec_30_22_01_18_2014.txt',
'data2Tue_Dec_30_22_01_33_2014.txt',
'data2Tue_Dec_30_22_01_48_2014.txt',
'data2Tue_Dec_30_22_02_03_2014.txt',
'data2Tue_Dec_30_22_02_18_2014.txt',
'data2Tue_Dec_30_22_02_32_2014.txt',
'data2Tue_Dec_30_22_02_47_2014.txt',
'data2Tue_Dec_30_22_03_02_2014.txt',
'data2Tue_Dec_30_22_03_17_2014.txt',
'data2Tue_Dec_30_22_03_31_2014.txt',
'data2Tue_Dec_30_22_03_46_2014.txt',
'data2Tue_Dec_30_22_04_01_2014.txt',
'data2Tue_Dec_30_22_04_15_2014.txt',
'data2Tue_Dec_30_22_04_30_2014.txt',
'data2Tue_Dec_30_22_04_45_2014.txt',
'data3Tue_Dec_30_22_05_00_2014.txt',
'data3Tue_Dec_30_22_05_15_2014.txt',
'data3Tue_Dec_30_22_05_30_2014.txt',
'data3Tue_Dec_30_22_05_44_2014.txt',
'data3Tue_Dec_30_22_06_00_2014.txt',
'data3Tue_Dec_30_22_06_14_2014.txt',
'data3Tue_Dec_30_22_06_29_2014.txt',
'data3Tue_Dec_30_22_06_44_2014.txt',
'data3Tue_Dec_30_22_06_59_2014.txt',
'data3Tue_Dec_30_22_07_14_2014.txt',
'data3Tue_Dec_30_22_07_29_2014.txt',
'data3Tue_Dec_30_22_07_43_2014.txt',
'data3Tue_Dec_30_22_07_58_2014.txt',
'data3Tue_Dec_30_22_08_13_2014.txt',
'data3Tue_Dec_30_22_08_28_2014.txt',
'data3Tue_Dec_30_22_08_43_2014.txt',
'data3Tue_Dec_30_22_08_57_2014.txt',
'data3Tue_Dec_30_22_09_12_2014.txt',
'data3Tue_Dec_30_22_09_27_2014.txt',
'data3Tue_Dec_30_22_09_42_2014.txt',
'data3Tue_Dec_30_22_09_57_2014.txt',
'data3Tue_Dec_30_22_10_12_2014.txt',
'data3Tue_Dec_30_22_10_26_2014.txt',
'data3Tue_Dec_30_22_10_41_2014.txt',
'data3Tue_Dec_30_22_10_56_2014.txt',
'data3Tue_Dec_30_22_11_11_2014.txt',
'data3Tue_Dec_30_22_11_25_2014.txt',
'data3Tue_Dec_30_22_11_41_2014.txt',
'data3Tue_Dec_30_22_11_56_2014.txt',
'data3Tue_Dec_30_22_12_11_2014.txt',
'data3Tue_Dec_30_22_12_26_2014.txt',
'data3Tue_Dec_30_22_12_40_2014.txt',
'data3Tue_Dec_30_22_12_55_2014.txt',
'data3Tue_Dec_30_22_13_10_2014.txt',
'data3Tue_Dec_30_22_13_25_2014.txt',
'data3Tue_Dec_30_22_13_40_2014.txt',
'data3Tue_Dec_30_22_13_55_2014.txt',
'data3Tue_Dec_30_22_14_09_2014.txt',
'data3Tue_Dec_30_22_14_24_2014.txt',
'data3Tue_Dec_30_22_14_39_2014.txt',
'data3Tue_Dec_30_22_14_53_2014.txt',
'data3Tue_Dec_30_22_15_08_2014.txt',
'data3Tue_Dec_30_22_15_23_2014.txt',
'data3Tue_Dec_30_22_15_37_2014.txt',
'data3Tue_Dec_30_22_15_52_2014.txt',
'data3Tue_Dec_30_22_16_07_2014.txt',
'data3Tue_Dec_30_22_16_22_2014.txt',
'data3Tue_Dec_30_22_16_36_2014.txt',
'data3Tue_Dec_30_22_16_51_2014.txt',
'data3Tue_Dec_30_22_17_06_2014.txt',
'data0Tue_Dec_30_22_17_47_2014.txt',
'data0Tue_Dec_30_22_18_01_2014.txt',
'data0Tue_Dec_30_22_18_16_2014.txt',
'data0Tue_Dec_30_22_18_31_2014.txt',
'data0Tue_Dec_30_22_18_46_2014.txt',
'data0Tue_Dec_30_22_19_01_2014.txt',
'data0Tue_Dec_30_22_19_15_2014.txt',
'data0Tue_Dec_30_22_19_30_2014.txt',
'data0Tue_Dec_30_22_19_45_2014.txt',
'data0Tue_Dec_30_22_20_00_2014.txt',
'data0Tue_Dec_30_22_20_15_2014.txt',
'data0Tue_Dec_30_22_20_30_2014.txt',
'data0Tue_Dec_30_22_20_44_2014.txt',
'data0Tue_Dec_30_22_20_59_2014.txt',
'data0Tue_Dec_30_22_21_14_2014.txt',
'data0Tue_Dec_30_22_21_29_2014.txt',
'data0Tue_Dec_30_22_21_44_2014.txt',
'data0Tue_Dec_30_22_21_58_2014.txt',
'data0Tue_Dec_30_22_22_13_2014.txt',
'data0Tue_Dec_30_22_22_28_2014.txt',
'data0Tue_Dec_30_22_22_43_2014.txt',
'data0Tue_Dec_30_22_22_58_2014.txt',
'data0Tue_Dec_30_22_23_12_2014.txt',
'data0Tue_Dec_30_22_23_27_2014.txt',
'data0Tue_Dec_30_22_23_42_2014.txt',
'data0Tue_Dec_30_22_23_57_2014.txt',
'data0Tue_Dec_30_22_24_12_2014.txt',
'data0Tue_Dec_30_22_24_26_2014.txt',
'data0Tue_Dec_30_22_24_41_2014.txt',
'data0Tue_Dec_30_22_24_56_2014.txt',
'data0Tue_Dec_30_22_25_11_2014.txt',
'data0Tue_Dec_30_22_25_25_2014.txt',
'data0Tue_Dec_30_22_25_41_2014.txt',
'data0Tue_Dec_30_22_25_55_2014.txt',
'data0Tue_Dec_30_22_26_10_2014.txt',
'data0Tue_Dec_30_22_26_25_2014.txt',
'data0Tue_Dec_30_22_26_39_2014.txt',
'data0Tue_Dec_30_22_26_54_2014.txt',
'data0Tue_Dec_30_22_27_09_2014.txt',
'data0Tue_Dec_30_22_27_24_2014.txt',
'data0Tue_Dec_30_22_27_39_2014.txt',
'data0Tue_Dec_30_22_27_54_2014.txt',
'data0Tue_Dec_30_22_28_09_2014.txt',
'data0Tue_Dec_30_22_28_23_2014.txt',
'data0Tue_Dec_30_22_28_38_2014.txt',
'data0Tue_Dec_30_22_28_53_2014.txt',
'data0Tue_Dec_30_22_29_08_2014.txt',
'data0Tue_Dec_30_22_29_23_2014.txt',
'data0Tue_Dec_30_22_29_37_2014.txt',
'data0Tue_Dec_30_22_29_52_2014.txt',
'data1Tue_Dec_30_22_30_07_2014.txt',
'data1Tue_Dec_30_22_30_21_2014.txt',
'data1Tue_Dec_30_22_30_36_2014.txt',
'data1Tue_Dec_30_22_30_51_2014.txt',
'data1Tue_Dec_30_22_31_06_2014.txt',
'data1Tue_Dec_30_22_31_20_2014.txt',
'data1Tue_Dec_30_22_31_35_2014.txt',
'data1Tue_Dec_30_22_31_49_2014.txt',
'data1Tue_Dec_30_22_32_04_2014.txt',
'data1Tue_Dec_30_22_32_19_2014.txt',
'data1Tue_Dec_30_22_32_34_2014.txt',
'data1Tue_Dec_30_22_32_48_2014.txt',
'data1Tue_Dec_30_22_33_03_2014.txt',
'data1Tue_Dec_30_22_33_18_2014.txt',
'data1Tue_Dec_30_22_33_33_2014.txt',
'data1Tue_Dec_30_22_33_48_2014.txt',
'data1Tue_Dec_30_22_34_03_2014.txt',
'data1Tue_Dec_30_22_34_17_2014.txt',
'data1Tue_Dec_30_22_34_32_2014.txt',
'data1Tue_Dec_30_22_34_47_2014.txt',
'data1Tue_Dec_30_22_35_01_2014.txt',
'data1Tue_Dec_30_22_35_16_2014.txt',
'data1Tue_Dec_30_22_35_31_2014.txt',
'data1Tue_Dec_30_22_35_46_2014.txt',
'data1Tue_Dec_30_22_36_01_2014.txt',
'data1Tue_Dec_30_22_36_16_2014.txt',
'data1Tue_Dec_30_22_36_30_2014.txt',
'data1Tue_Dec_30_22_36_45_2014.txt',
'data1Tue_Dec_30_22_37_00_2014.txt',
'data1Tue_Dec_30_22_37_15_2014.txt',
'data1Tue_Dec_30_22_37_30_2014.txt',
'data1Tue_Dec_30_22_37_44_2014.txt',
'data1Tue_Dec_30_22_37_59_2014.txt',
'data1Tue_Dec_30_22_38_14_2014.txt',
'data1Tue_Dec_30_22_38_28_2014.txt',
'data1Tue_Dec_30_22_38_44_2014.txt',
'data1Tue_Dec_30_22_38_58_2014.txt',
'data1Tue_Dec_30_22_39_13_2014.txt',
'data1Tue_Dec_30_22_39_28_2014.txt',
'data1Tue_Dec_30_22_39_42_2014.txt',
'data1Tue_Dec_30_22_39_57_2014.txt',
'data1Tue_Dec_30_22_40_13_2014.txt',
'data1Tue_Dec_30_22_40_27_2014.txt',
'data1Tue_Dec_30_22_40_41_2014.txt',
'data1Tue_Dec_30_22_40_56_2014.txt',
'data1Tue_Dec_30_22_41_11_2014.txt',
'data1Tue_Dec_30_22_41_26_2014.txt',
'data1Tue_Dec_30_22_41_41_2014.txt',
'data1Tue_Dec_30_22_41_56_2014.txt',
'data1Tue_Dec_30_22_42_10_2014.txt',
'data2Tue_Dec_30_22_42_25_2014.txt',
'data2Tue_Dec_30_22_42_40_2014.txt',
'data2Tue_Dec_30_22_42_54_2014.txt',
'data2Tue_Dec_30_22_43_09_2014.txt',
'data2Tue_Dec_30_22_43_24_2014.txt',
'data2Tue_Dec_30_22_43_39_2014.txt',
'data2Tue_Dec_30_22_43_53_2014.txt',
'data2Tue_Dec_30_22_44_08_2014.txt',
'data2Tue_Dec_30_22_44_23_2014.txt',
'data2Tue_Dec_30_22_44_37_2014.txt',
'data2Tue_Dec_30_22_44_52_2014.txt',
'data2Tue_Dec_30_22_45_06_2014.txt',
'data2Tue_Dec_30_22_45_21_2014.txt',
'data2Tue_Dec_30_22_45_36_2014.txt',
'data2Tue_Dec_30_22_45_50_2014.txt',
'data2Tue_Dec_30_22_46_05_2014.txt',
'data2Tue_Dec_30_22_46_20_2014.txt',
'data2Tue_Dec_30_22_46_35_2014.txt',
'data2Tue_Dec_30_22_46_50_2014.txt',
'data2Tue_Dec_30_22_47_05_2014.txt',
'data2Tue_Dec_30_22_47_20_2014.txt',
'data2Tue_Dec_30_22_47_35_2014.txt',
'data2Tue_Dec_30_22_47_49_2014.txt',
'data2Tue_Dec_30_22_48_04_2014.txt',
'data2Tue_Dec_30_22_48_19_2014.txt',
'data2Tue_Dec_30_22_48_34_2014.txt',
'data2Tue_Dec_30_22_48_49_2014.txt',
'data2Tue_Dec_30_22_49_04_2014.txt',
'data2Tue_Dec_30_22_49_19_2014.txt',
'data2Tue_Dec_30_22_49_34_2014.txt',
'data2Tue_Dec_30_22_49_49_2014.txt',
'data2Tue_Dec_30_22_50_04_2014.txt',
'data2Tue_Dec_30_22_50_19_2014.txt',
'data2Tue_Dec_30_22_50_33_2014.txt',
'data2Tue_Dec_30_22_50_48_2014.txt',
'data2Tue_Dec_30_22_51_03_2014.txt',
'data2Tue_Dec_30_22_51_18_2014.txt',
'data2Tue_Dec_30_22_51_32_2014.txt',
'data2Tue_Dec_30_22_51_47_2014.txt',
'data2Tue_Dec_30_22_52_02_2014.txt',
'data2Tue_Dec_30_22_52_16_2014.txt',
'data2Tue_Dec_30_22_52_31_2014.txt',
'data2Tue_Dec_30_22_52_46_2014.txt',
'data2Tue_Dec_30_22_53_01_2014.txt',
'data2Tue_Dec_30_22_53_16_2014.txt',
'data2Tue_Dec_30_22_53_31_2014.txt',
'data2Tue_Dec_30_22_53_45_2014.txt',
'data2Tue_Dec_30_22_54_00_2014.txt',
'data2Tue_Dec_30_22_54_15_2014.txt',
'data2Tue_Dec_30_22_54_29_2014.txt',
'data3Tue_Dec_30_22_54_44_2014.txt',
'data3Tue_Dec_30_22_54_59_2014.txt',
'data3Tue_Dec_30_22_55_13_2014.txt',
'data3Tue_Dec_30_22_55_28_2014.txt',
'data3Tue_Dec_30_22_55_43_2014.txt',
'data3Tue_Dec_30_22_55_58_2014.txt',
'data3Tue_Dec_30_22_56_13_2014.txt',
'data3Tue_Dec_30_22_56_28_2014.txt',
'data3Tue_Dec_30_22_56_43_2014.txt',
'data3Tue_Dec_30_22_56_57_2014.txt',
'data3Tue_Dec_30_22_57_12_2014.txt',
'data3Tue_Dec_30_22_57_27_2014.txt',
'data3Tue_Dec_30_22_57_42_2014.txt',
'data3Tue_Dec_30_22_57_56_2014.txt',
'data3Tue_Dec_30_22_58_12_2014.txt',
'data3Tue_Dec_30_22_58_26_2014.txt',
'data3Tue_Dec_30_22_58_41_2014.txt',
'data3Tue_Dec_30_22_58_56_2014.txt',
'data3Tue_Dec_30_22_59_10_2014.txt',
'data3Tue_Dec_30_22_59_25_2014.txt',
'data3Tue_Dec_30_22_59_40_2014.txt',
'data3Tue_Dec_30_22_59_54_2014.txt',
'data3Tue_Dec_30_23_00_10_2014.txt',
'data3Tue_Dec_30_23_00_25_2014.txt',
'data3Tue_Dec_30_23_00_39_2014.txt',
'data3Tue_Dec_30_23_00_54_2014.txt',
'data3Tue_Dec_30_23_01_09_2014.txt',
'data3Tue_Dec_30_23_01_23_2014.txt',
'data3Tue_Dec_30_23_01_38_2014.txt',
'data3Tue_Dec_30_23_01_53_2014.txt',
'data3Tue_Dec_30_23_02_07_2014.txt',
'data3Tue_Dec_30_23_02_22_2014.txt',
'data3Tue_Dec_30_23_02_37_2014.txt',
'data3Tue_Dec_30_23_02_52_2014.txt',
'data3Tue_Dec_30_23_03_06_2014.txt',
'data3Tue_Dec_30_23_03_21_2014.txt',
'data3Tue_Dec_30_23_03_36_2014.txt',
'data3Tue_Dec_30_23_03_51_2014.txt',
'data3Tue_Dec_30_23_04_05_2014.txt',
'data3Tue_Dec_30_23_04_20_2014.txt',
'data3Tue_Dec_30_23_04_34_2014.txt',
'data3Tue_Dec_30_23_04_49_2014.txt',
'data3Tue_Dec_30_23_05_04_2014.txt',
'data3Tue_Dec_30_23_05_19_2014.txt',
'data3Tue_Dec_30_23_05_34_2014.txt',
'data3Tue_Dec_30_23_05_49_2014.txt',
'data3Tue_Dec_30_23_06_04_2014.txt',
'data3Tue_Dec_30_23_06_18_2014.txt',
'data3Tue_Dec_30_23_06_33_2014.txt',
'data3Tue_Dec_30_23_06_48_2014.txt',
'data0Tue_Dec_30_23_07_28_2014.txt',
'data0Tue_Dec_30_23_07_42_2014.txt',
'data0Tue_Dec_30_23_07_58_2014.txt',
'data0Tue_Dec_30_23_08_12_2014.txt',
'data0Tue_Dec_30_23_08_27_2014.txt',
'data0Tue_Dec_30_23_08_42_2014.txt',
'data0Tue_Dec_30_23_08_57_2014.txt',
'data0Tue_Dec_30_23_09_12_2014.txt',
'data0Tue_Dec_30_23_09_27_2014.txt',
'data0Tue_Dec_30_23_09_42_2014.txt',
'data0Tue_Dec_30_23_09_57_2014.txt',
'data0Tue_Dec_30_23_10_12_2014.txt',
'data0Tue_Dec_30_23_10_26_2014.txt',
'data0Tue_Dec_30_23_10_42_2014.txt',
'data0Tue_Dec_30_23_10_57_2014.txt',
'data0Tue_Dec_30_23_11_12_2014.txt',
'data0Tue_Dec_30_23_11_27_2014.txt',
'data0Tue_Dec_30_23_11_42_2014.txt',
'data0Tue_Dec_30_23_11_56_2014.txt',
'data0Tue_Dec_30_23_12_11_2014.txt',
'data0Tue_Dec_30_23_12_26_2014.txt',
'data0Tue_Dec_30_23_12_40_2014.txt',
'data0Tue_Dec_30_23_12_55_2014.txt',
'data0Tue_Dec_30_23_13_10_2014.txt',
'data0Tue_Dec_30_23_13_25_2014.txt',
'data0Tue_Dec_30_23_13_40_2014.txt',
'data0Tue_Dec_30_23_13_55_2014.txt',
'data0Tue_Dec_30_23_14_11_2014.txt',
'data0Tue_Dec_30_23_14_26_2014.txt',
'data0Tue_Dec_30_23_14_40_2014.txt',
'data0Tue_Dec_30_23_14_55_2014.txt',
'data0Tue_Dec_30_23_15_09_2014.txt',
'data0Tue_Dec_30_23_15_24_2014.txt',
'data0Tue_Dec_30_23_15_39_2014.txt',
'data0Tue_Dec_30_23_15_54_2014.txt',
'data0Tue_Dec_30_23_16_08_2014.txt',
'data0Tue_Dec_30_23_16_23_2014.txt',
'data0Tue_Dec_30_23_16_37_2014.txt',
'data0Tue_Dec_30_23_16_52_2014.txt',
'data0Tue_Dec_30_23_17_08_2014.txt',
'data0Tue_Dec_30_23_17_23_2014.txt',
'data0Tue_Dec_30_23_17_37_2014.txt',
'data0Tue_Dec_30_23_17_52_2014.txt',
'data0Tue_Dec_30_23_18_07_2014.txt',
'data0Tue_Dec_30_23_18_22_2014.txt',
'data0Tue_Dec_30_23_18_36_2014.txt',
'data0Tue_Dec_30_23_18_51_2014.txt',
'data0Tue_Dec_30_23_19_06_2014.txt',
'data0Tue_Dec_30_23_19_21_2014.txt',
'data0Tue_Dec_30_23_19_36_2014.txt',
'data1Tue_Dec_30_23_19_50_2014.txt',
'data1Tue_Dec_30_23_20_05_2014.txt',
'data1Tue_Dec_30_23_20_20_2014.txt',
'data1Tue_Dec_30_23_20_34_2014.txt',
'data1Tue_Dec_30_23_20_49_2014.txt',
'data1Tue_Dec_30_23_21_04_2014.txt',
'data1Tue_Dec_30_23_21_19_2014.txt',
'data1Tue_Dec_30_23_21_33_2014.txt',
'data1Tue_Dec_30_23_21_48_2014.txt',
'data1Tue_Dec_30_23_22_03_2014.txt',
'data1Tue_Dec_30_23_22_18_2014.txt',
'data1Tue_Dec_30_23_22_33_2014.txt',
'data1Tue_Dec_30_23_22_48_2014.txt',
'data1Tue_Dec_30_23_23_03_2014.txt',
'data1Tue_Dec_30_23_23_17_2014.txt',
'data1Tue_Dec_30_23_23_32_2014.txt',
'data1Tue_Dec_30_23_23_47_2014.txt',
'data1Tue_Dec_30_23_24_02_2014.txt',
'data1Tue_Dec_30_23_24_16_2014.txt',
'data1Tue_Dec_30_23_24_31_2014.txt',
'data1Tue_Dec_30_23_24_45_2014.txt',
'data1Tue_Dec_30_23_25_00_2014.txt',
'data1Tue_Dec_30_23_25_15_2014.txt',
'data1Tue_Dec_30_23_25_29_2014.txt',
'data1Tue_Dec_30_23_25_44_2014.txt',
'data1Tue_Dec_30_23_25_59_2014.txt',
'data1Tue_Dec_30_23_26_13_2014.txt',
'data1Tue_Dec_30_23_26_28_2014.txt',
'data1Tue_Dec_30_23_26_43_2014.txt',
'data1Tue_Dec_30_23_26_58_2014.txt',
'data1Tue_Dec_30_23_27_13_2014.txt',
'data1Tue_Dec_30_23_27_27_2014.txt',
'data1Tue_Dec_30_23_27_42_2014.txt',
'data1Tue_Dec_30_23_27_57_2014.txt',
'data1Tue_Dec_30_23_28_11_2014.txt',
'data1Tue_Dec_30_23_28_26_2014.txt',
'data1Tue_Dec_30_23_28_42_2014.txt',
'data1Tue_Dec_30_23_28_56_2014.txt',
'data1Tue_Dec_30_23_29_11_2014.txt',
'data1Tue_Dec_30_23_29_26_2014.txt',
'data1Tue_Dec_30_23_29_41_2014.txt',
'data1Tue_Dec_30_23_29_56_2014.txt',
'data1Tue_Dec_30_23_30_10_2014.txt',
'data1Tue_Dec_30_23_30_25_2014.txt',
'data1Tue_Dec_30_23_30_40_2014.txt',
'data1Tue_Dec_30_23_30_55_2014.txt',
'data1Tue_Dec_30_23_31_10_2014.txt',
'data1Tue_Dec_30_23_31_25_2014.txt',
'data1Tue_Dec_30_23_31_39_2014.txt',
'data1Tue_Dec_30_23_31_54_2014.txt',
'data2Tue_Dec_30_23_32_09_2014.txt',
'data2Tue_Dec_30_23_32_24_2014.txt',
'data2Tue_Dec_30_23_32_39_2014.txt',
'data2Tue_Dec_30_23_32_53_2014.txt',
'data2Tue_Dec_30_23_33_08_2014.txt',
'data2Tue_Dec_30_23_33_23_2014.txt',
'data2Tue_Dec_30_23_33_38_2014.txt',
'data2Tue_Dec_30_23_33_53_2014.txt',
'data2Tue_Dec_30_23_34_08_2014.txt',
'data2Tue_Dec_30_23_34_23_2014.txt',
'data2Tue_Dec_30_23_34_37_2014.txt',
'data2Tue_Dec_30_23_34_52_2014.txt',
'data2Tue_Dec_30_23_35_07_2014.txt',
'data2Tue_Dec_30_23_35_22_2014.txt',
'data2Tue_Dec_30_23_35_37_2014.txt',
'data2Tue_Dec_30_23_35_52_2014.txt',
'data2Tue_Dec_30_23_36_07_2014.txt',
'data2Tue_Dec_30_23_36_22_2014.txt',
'data2Tue_Dec_30_23_36_36_2014.txt',
'data2Tue_Dec_30_23_36_51_2014.txt',
'data2Tue_Dec_30_23_37_06_2014.txt',
'data2Tue_Dec_30_23_37_20_2014.txt',
'data2Tue_Dec_30_23_37_35_2014.txt',
'data2Tue_Dec_30_23_37_50_2014.txt',
'data2Tue_Dec_30_23_38_05_2014.txt',
'data2Tue_Dec_30_23_38_20_2014.txt',
'data2Tue_Dec_30_23_38_35_2014.txt',
'data2Tue_Dec_30_23_38_50_2014.txt',
'data2Tue_Dec_30_23_39_05_2014.txt',
'data2Tue_Dec_30_23_39_19_2014.txt',
'data2Tue_Dec_30_23_39_34_2014.txt',
'data2Tue_Dec_30_23_39_49_2014.txt',
'data2Tue_Dec_30_23_40_04_2014.txt',
'data2Tue_Dec_30_23_40_18_2014.txt',
'data2Tue_Dec_30_23_40_33_2014.txt',
'data2Tue_Dec_30_23_40_48_2014.txt',
'data2Tue_Dec_30_23_41_03_2014.txt',
'data2Tue_Dec_30_23_41_18_2014.txt',
'data2Tue_Dec_30_23_41_33_2014.txt',
'data2Tue_Dec_30_23_41_48_2014.txt',
'data2Tue_Dec_30_23_42_03_2014.txt',
'data2Tue_Dec_30_23_42_18_2014.txt',
'data2Tue_Dec_30_23_42_33_2014.txt',
'data2Tue_Dec_30_23_42_47_2014.txt',
'data2Tue_Dec_30_23_43_02_2014.txt',
'data2Tue_Dec_30_23_43_18_2014.txt',
'data2Tue_Dec_30_23_43_33_2014.txt',
'data2Tue_Dec_30_23_43_47_2014.txt',
'data2Tue_Dec_30_23_44_02_2014.txt',
'data2Tue_Dec_30_23_44_17_2014.txt',
'data3Tue_Dec_30_23_44_32_2014.txt',
'data3Tue_Dec_30_23_44_46_2014.txt',
'data3Tue_Dec_30_23_45_01_2014.txt',
'data3Tue_Dec_30_23_45_16_2014.txt',
'data3Tue_Dec_30_23_45_31_2014.txt',
'data3Tue_Dec_30_23_45_46_2014.txt',
'data3Tue_Dec_30_23_46_00_2014.txt',
'data3Tue_Dec_30_23_46_16_2014.txt',
'data3Tue_Dec_30_23_46_31_2014.txt',
'data3Tue_Dec_30_23_46_46_2014.txt',
'data3Tue_Dec_30_23_47_01_2014.txt',
'data3Tue_Dec_30_23_47_16_2014.txt',
'data3Tue_Dec_30_23_47_31_2014.txt',
'data3Tue_Dec_30_23_47_46_2014.txt',
'data3Tue_Dec_30_23_48_01_2014.txt',
'data3Tue_Dec_30_23_48_16_2014.txt',
'data3Tue_Dec_30_23_48_31_2014.txt',
'data3Tue_Dec_30_23_48_45_2014.txt',
'data3Tue_Dec_30_23_49_00_2014.txt',
'data3Tue_Dec_30_23_49_15_2014.txt',
'data3Tue_Dec_30_23_49_30_2014.txt',
'data3Tue_Dec_30_23_49_45_2014.txt',
'data3Tue_Dec_30_23_49_59_2014.txt',
'data3Tue_Dec_30_23_50_14_2014.txt',
'data3Tue_Dec_30_23_50_29_2014.txt',
'data3Tue_Dec_30_23_50_44_2014.txt',
'data3Tue_Dec_30_23_50_59_2014.txt',
'data3Tue_Dec_30_23_51_13_2014.txt',
'data3Tue_Dec_30_23_51_28_2014.txt',
'data3Tue_Dec_30_23_51_43_2014.txt',
'data3Tue_Dec_30_23_51_57_2014.txt',
'data3Tue_Dec_30_23_52_13_2014.txt',
'data3Tue_Dec_30_23_52_28_2014.txt',
'data3Tue_Dec_30_23_52_43_2014.txt',
'data3Tue_Dec_30_23_52_58_2014.txt',
'data3Tue_Dec_30_23_53_12_2014.txt',
'data3Tue_Dec_30_23_53_27_2014.txt',
'data3Tue_Dec_30_23_53_42_2014.txt',
'data3Tue_Dec_30_23_53_56_2014.txt',
'data3Tue_Dec_30_23_54_11_2014.txt',
'data3Tue_Dec_30_23_54_26_2014.txt',
'data3Tue_Dec_30_23_54_41_2014.txt',
'data3Tue_Dec_30_23_54_56_2014.txt',
'data3Tue_Dec_30_23_55_11_2014.txt',
'data3Tue_Dec_30_23_55_26_2014.txt',
'data3Tue_Dec_30_23_55_41_2014.txt',
'data3Tue_Dec_30_23_55_55_2014.txt',
'data3Tue_Dec_30_23_56_10_2014.txt',
'data3Tue_Dec_30_23_56_25_2014.txt',
'data3Tue_Dec_30_23_56_40_2014.txt',
'data0Tue_Dec_30_23_57_21_2014.txt',
'data0Tue_Dec_30_23_57_36_2014.txt',
'data0Tue_Dec_30_23_57_51_2014.txt',
'data0Tue_Dec_30_23_58_06_2014.txt',
'data0Tue_Dec_30_23_58_20_2014.txt',
'data0Tue_Dec_30_23_58_35_2014.txt',
'data0Tue_Dec_30_23_58_50_2014.txt',
'data0Tue_Dec_30_23_59_05_2014.txt',
'data0Tue_Dec_30_23_59_20_2014.txt',
'data0Tue_Dec_30_23_59_35_2014.txt',
'data0Tue_Dec_30_23_59_49_2014.txt',
'data0Wed_Dec_31_00_00_04_2014.txt',
'data0Wed_Dec_31_00_00_18_2014.txt',
'data0Wed_Dec_31_00_00_33_2014.txt',
'data0Wed_Dec_31_00_00_48_2014.txt',
'data0Wed_Dec_31_00_01_02_2014.txt',
'data0Wed_Dec_31_00_01_17_2014.txt',
'data0Wed_Dec_31_00_01_32_2014.txt',
'data0Wed_Dec_31_00_01_48_2014.txt',
'data0Wed_Dec_31_00_02_02_2014.txt',
'data0Wed_Dec_31_00_02_18_2014.txt',
'data0Wed_Dec_31_00_02_32_2014.txt',
'data0Wed_Dec_31_00_02_47_2014.txt',
'data0Wed_Dec_31_00_03_01_2014.txt',
'data0Wed_Dec_31_00_03_17_2014.txt',
'data0Wed_Dec_31_00_03_32_2014.txt',
'data0Wed_Dec_31_00_03_46_2014.txt',
'data0Wed_Dec_31_00_04_01_2014.txt',
'data0Wed_Dec_31_00_04_16_2014.txt',
'data0Wed_Dec_31_00_04_31_2014.txt',
'data0Wed_Dec_31_00_04_46_2014.txt',
'data0Wed_Dec_31_00_05_00_2014.txt',
'data0Wed_Dec_31_00_05_15_2014.txt',
'data0Wed_Dec_31_00_05_31_2014.txt',
'data0Wed_Dec_31_00_05_46_2014.txt',
'data0Wed_Dec_31_00_06_01_2014.txt',
'data0Wed_Dec_31_00_06_15_2014.txt',
'data0Wed_Dec_31_00_06_30_2014.txt',
'data0Wed_Dec_31_00_06_45_2014.txt',
'data0Wed_Dec_31_00_07_00_2014.txt',
'data0Wed_Dec_31_00_07_14_2014.txt',
'data0Wed_Dec_31_00_07_29_2014.txt',
'data0Wed_Dec_31_00_07_44_2014.txt',
'data0Wed_Dec_31_00_07_59_2014.txt',
'data0Wed_Dec_31_00_08_13_2014.txt',
'data0Wed_Dec_31_00_08_28_2014.txt',
'data0Wed_Dec_31_00_08_43_2014.txt',
'data0Wed_Dec_31_00_08_57_2014.txt',
'data0Wed_Dec_31_00_09_12_2014.txt',
'data0Wed_Dec_31_00_09_27_2014.txt',
'data1Wed_Dec_31_00_09_42_2014.txt',
'data1Wed_Dec_31_00_09_57_2014.txt',
'data1Wed_Dec_31_00_10_11_2014.txt',
'data1Wed_Dec_31_00_10_26_2014.txt',
'data1Wed_Dec_31_00_10_41_2014.txt',
'data1Wed_Dec_31_00_10_56_2014.txt',
'data1Wed_Dec_31_00_11_11_2014.txt',
'data1Wed_Dec_31_00_11_26_2014.txt',
'data1Wed_Dec_31_00_11_40_2014.txt',
'data1Wed_Dec_31_00_11_55_2014.txt',
'data1Wed_Dec_31_00_12_10_2014.txt',
'data1Wed_Dec_31_00_12_25_2014.txt',
'data1Wed_Dec_31_00_12_40_2014.txt',
'data1Wed_Dec_31_00_12_54_2014.txt',
'data1Wed_Dec_31_00_13_09_2014.txt',
'data1Wed_Dec_31_00_13_24_2014.txt',
'data1Wed_Dec_31_00_13_39_2014.txt',
'data1Wed_Dec_31_00_13_54_2014.txt',
'data1Wed_Dec_31_00_14_09_2014.txt',
'data1Wed_Dec_31_00_14_24_2014.txt',
'data1Wed_Dec_31_00_14_38_2014.txt',
'data1Wed_Dec_31_00_14_53_2014.txt',
'data1Wed_Dec_31_00_15_07_2014.txt',
'data1Wed_Dec_31_00_15_22_2014.txt',
'data1Wed_Dec_31_00_15_37_2014.txt',
'data1Wed_Dec_31_00_15_52_2014.txt',
'data1Wed_Dec_31_00_16_06_2014.txt',
'data1Wed_Dec_31_00_16_22_2014.txt',
'data1Wed_Dec_31_00_16_38_2014.txt',
'data1Wed_Dec_31_00_16_52_2014.txt',
'data1Wed_Dec_31_00_17_07_2014.txt',
'data1Wed_Dec_31_00_17_22_2014.txt',
'data1Wed_Dec_31_00_17_37_2014.txt',
'data1Wed_Dec_31_00_17_51_2014.txt',
'data1Wed_Dec_31_00_18_06_2014.txt',
'data1Wed_Dec_31_00_18_20_2014.txt',
'data1Wed_Dec_31_00_18_35_2014.txt',
'data1Wed_Dec_31_00_18_50_2014.txt',
'data1Wed_Dec_31_00_19_04_2014.txt',
'data1Wed_Dec_31_00_19_19_2014.txt',
'data1Wed_Dec_31_00_19_34_2014.txt',
'data1Wed_Dec_31_00_19_48_2014.txt',
'data1Wed_Dec_31_00_20_03_2014.txt',
'data1Wed_Dec_31_00_20_18_2014.txt',
'data1Wed_Dec_31_00_20_33_2014.txt',
'data1Wed_Dec_31_00_20_48_2014.txt',
'data1Wed_Dec_31_00_21_03_2014.txt',
'data1Wed_Dec_31_00_21_18_2014.txt',
'data1Wed_Dec_31_00_21_32_2014.txt',
'data1Wed_Dec_31_00_21_47_2014.txt',
'data2Wed_Dec_31_00_22_02_2014.txt',
'data2Wed_Dec_31_00_22_17_2014.txt',
'data2Wed_Dec_31_00_22_32_2014.txt',
'data2Wed_Dec_31_00_22_47_2014.txt',
'data2Wed_Dec_31_00_23_01_2014.txt',
'data2Wed_Dec_31_00_23_16_2014.txt',
'data2Wed_Dec_31_00_23_31_2014.txt',
'data2Wed_Dec_31_00_23_46_2014.txt',
'data2Wed_Dec_31_00_24_01_2014.txt',
'data2Wed_Dec_31_00_24_16_2014.txt',
'data2Wed_Dec_31_00_24_30_2014.txt',
'data2Wed_Dec_31_00_24_45_2014.txt',
'data2Wed_Dec_31_00_25_00_2014.txt',
'data2Wed_Dec_31_00_25_15_2014.txt',
'data2Wed_Dec_31_00_25_29_2014.txt',
'data2Wed_Dec_31_00_25_44_2014.txt',
'data2Wed_Dec_31_00_25_59_2014.txt',
'data2Wed_Dec_31_00_26_14_2014.txt',
'data2Wed_Dec_31_00_26_29_2014.txt',
'data2Wed_Dec_31_00_26_43_2014.txt',
'data2Wed_Dec_31_00_26_59_2014.txt',
'data2Wed_Dec_31_00_27_13_2014.txt',
'data2Wed_Dec_31_00_27_28_2014.txt',
'data2Wed_Dec_31_00_27_43_2014.txt',
'data2Wed_Dec_31_00_27_58_2014.txt',
'data2Wed_Dec_31_00_28_13_2014.txt',
'data2Wed_Dec_31_00_28_28_2014.txt',
'data2Wed_Dec_31_00_28_43_2014.txt',
'data2Wed_Dec_31_00_28_57_2014.txt',
'data2Wed_Dec_31_00_29_12_2014.txt',
'data2Wed_Dec_31_00_29_27_2014.txt',
'data2Wed_Dec_31_00_29_42_2014.txt',
'data2Wed_Dec_31_00_29_57_2014.txt',
'data2Wed_Dec_31_00_30_12_2014.txt',
'data2Wed_Dec_31_00_30_27_2014.txt',
'data2Wed_Dec_31_00_30_42_2014.txt',
'data2Wed_Dec_31_00_30_57_2014.txt',
'data2Wed_Dec_31_00_31_12_2014.txt',
'data2Wed_Dec_31_00_31_27_2014.txt',
'data2Wed_Dec_31_00_31_41_2014.txt',
'data2Wed_Dec_31_00_31_56_2014.txt',
'data2Wed_Dec_31_00_32_11_2014.txt',
'data2Wed_Dec_31_00_32_26_2014.txt',
'data2Wed_Dec_31_00_32_40_2014.txt',
'data2Wed_Dec_31_00_32_55_2014.txt',
'data2Wed_Dec_31_00_33_10_2014.txt',
'data2Wed_Dec_31_00_33_24_2014.txt',
'data2Wed_Dec_31_00_33_39_2014.txt',
'data2Wed_Dec_31_00_33_54_2014.txt',
'data2Wed_Dec_31_00_34_09_2014.txt',
'data3Wed_Dec_31_00_34_24_2014.txt',
'data3Wed_Dec_31_00_34_39_2014.txt',
'data3Wed_Dec_31_00_34_54_2014.txt',
'data3Wed_Dec_31_00_35_09_2014.txt',
'data3Wed_Dec_31_00_35_24_2014.txt',
'data3Wed_Dec_31_00_35_39_2014.txt',
'data3Wed_Dec_31_00_35_54_2014.txt',
'data3Wed_Dec_31_00_36_08_2014.txt',
'data3Wed_Dec_31_00_36_23_2014.txt',
'data3Wed_Dec_31_00_36_38_2014.txt',
'data3Wed_Dec_31_00_36_53_2014.txt',
'data3Wed_Dec_31_00_37_08_2014.txt',
'data3Wed_Dec_31_00_37_22_2014.txt',
'data3Wed_Dec_31_00_37_38_2014.txt',
'data3Wed_Dec_31_00_37_53_2014.txt',
'data3Wed_Dec_31_00_38_08_2014.txt',
'data3Wed_Dec_31_00_38_22_2014.txt',
'data3Wed_Dec_31_00_38_37_2014.txt',
'data3Wed_Dec_31_00_38_52_2014.txt',
'data3Wed_Dec_31_00_39_07_2014.txt',
'data3Wed_Dec_31_00_39_22_2014.txt',
'data3Wed_Dec_31_00_39_36_2014.txt',
'data3Wed_Dec_31_00_39_51_2014.txt',
'data3Wed_Dec_31_00_40_06_2014.txt',
'data3Wed_Dec_31_00_40_21_2014.txt',
'data3Wed_Dec_31_00_40_36_2014.txt',
'data3Wed_Dec_31_00_40_50_2014.txt',
'data3Wed_Dec_31_00_41_05_2014.txt',
'data3Wed_Dec_31_00_41_20_2014.txt',
'data3Wed_Dec_31_00_41_34_2014.txt',
'data3Wed_Dec_31_00_41_50_2014.txt',
'data3Wed_Dec_31_00_42_04_2014.txt',
'data3Wed_Dec_31_00_42_19_2014.txt',
'data3Wed_Dec_31_00_42_33_2014.txt',
'data3Wed_Dec_31_00_42_48_2014.txt',
'data3Wed_Dec_31_00_43_03_2014.txt',
'data3Wed_Dec_31_00_43_18_2014.txt',
'data3Wed_Dec_31_00_43_33_2014.txt',
'data3Wed_Dec_31_00_43_48_2014.txt',
'data3Wed_Dec_31_00_44_03_2014.txt',
'data3Wed_Dec_31_00_44_18_2014.txt',
'data3Wed_Dec_31_00_44_33_2014.txt',
'data3Wed_Dec_31_00_44_48_2014.txt',
'data3Wed_Dec_31_00_45_03_2014.txt',
'data3Wed_Dec_31_00_45_18_2014.txt',
'data3Wed_Dec_31_00_45_33_2014.txt',
'data3Wed_Dec_31_00_45_48_2014.txt',
'data3Wed_Dec_31_00_46_03_2014.txt',
'data3Wed_Dec_31_00_46_18_2014.txt',
'data3Wed_Dec_31_00_46_32_2014.txt']
|
6,966 | 778ee9a0ea7f57535b4de88a38cd741f2d46e092 | txt = './KF_neko.txt.mecab'
mapData = {}
listData = []
with open('./KF31.txt', 'w') as writeFile:
with open(txt, 'r') as readFile:
for text in readFile:
# print(text)
# \tで区切って先頭だけ見る
listData = text.split('\t')
# 表層形
surface = listData[0]
# EOSが入ってたら消す
if surface == 'EOS\n':
surface = ''
# print(surface)
# 表層形以外をバラす
splitted = listData[-1].split(',')
# EOSが入ってたら消す
if splitted == 'EOS\n':
continue
else:
# 品詞
pos = splitted[0]
if pos in ('動詞'):
dousiSurface = surface
writeFile.write(dousiSurface+'\n')
|
6,967 | c0216dbd52be134eb417c20ed80b398b22e5d844 | from sklearn.cluster import MeanShift
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import style
style.use('ggplot')
# Create random data points whose centers are the following
centers = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]
X, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)
# Fit the data into MeanShift classifier with search bandwidth = 10
clf = MeanShift(bandwidth=10)
clf.fit(X)
# Get the labels of each data point
# and cluster centers of the number of clusters formed
labels = clf.labels_
cluster_centers = clf.cluster_centers_
print(cluster_centers)
n_clusters = len(cluster_centers)
print('Number of clusters found:', n_clusters)
# Plot the data points with their clusters and centers on a 3d graph
colors = 10*['r', 'g', 'b', 'y', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(X)):
ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:, 2],
marker='x', s=150, linewidth=5, zorder=10, color='k')
plt.show()
|
6,968 | d13b402b90bb948e5722f45096a8c0a33e4cac67 | import cv2
cam = cv2.VideoCapture("./bebop.sdp")
while True:
ret, frame = cam.read()
cv2.imshow("frame", frame)
cv2.waitKey(1)
|
6,969 | c3bfcb971a6b08cdf98200bd2b2a8fe6ac2dd083 | from partyparrot import convert_with_alphabet_emojis, convert
def test_convert_char_to_alphabet():
assert convert_with_alphabet_emojis("") == ""
assert convert_with_alphabet_emojis(" ") == " "
assert convert_with_alphabet_emojis("\n") == "\n"
assert (
convert_with_alphabet_emojis(" one two")
== " :alphabet-white-o::alphabet-white-n::alphabet-white-e: "
":alphabet-white-t::alphabet-white-w::alphabet-white-o:"
)
assert convert_with_alphabet_emojis("1_'") == ":alphabet-white-question:" * 3
assert (
convert_with_alphabet_emojis("?!")
== ":alphabet-white-question::alphabet-white-exclamation:"
)
def test_convert():
assert (
convert("Hello world", ":icon:", ":nbsp")
== ":icon::nbsp:nbsp:icon::nbsp:icon::icon::icon::icon::nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:nbsp:icon::icon::nbsp:nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:icon::nbsp:nbsp:icon::icon::nbsp:nbsp:icon::icon::icon::nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::icon::icon:\n:icon::nbsp:nbsp:icon::nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:icon::nbsp:icon::nbsp:nbsp:icon::nbsp:icon::nbsp:nbsp:icon::nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:icon:\n:icon::icon::icon::icon::nbsp:icon::icon::icon::nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:icon::nbsp:icon::nbsp:icon::nbsp:nbsp:icon::nbsp:icon::icon::icon::nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:icon:\n:icon::nbsp:nbsp:icon::nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:icon::nbsp:icon::nbsp:icon::nbsp:nbsp:icon::nbsp:icon::nbsp:nbsp:icon::nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:icon:\n:icon::nbsp:nbsp:icon::nbsp:icon::icon::icon::icon::nbsp:icon::icon::icon::icon::nbsp:icon::icon::icon::icon::nbsp:nbsp:icon::icon::nbsp:nbsp:nbsp:nbsp:nbsp:nbsp:icon::nbsp:icon::nbsp:nbsp:nbsp:icon::icon::nbsp:nbsp:icon::nbsp:nbsp:icon::nbsp:icon::icon::icon::icon::nbsp:icon::icon::icon:"
)
def test_convert_wrong_char():
txt = convert("@!*", ":icon:", ":nbsp")
assert (
txt
== ":icon::icon::icon::nbsp:nbsp:icon::icon::icon::nbsp:nbsp:icon::icon::icon:\n:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon:\n:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon:\n\n:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon::nbsp:nbsp:nbsp:nbsp:icon:"
)
|
6,970 | 7245d4db6440d38b9302907a6203c1507c373112 |
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from django.shortcuts import render, redirect
from app.models import PaidTimeOff, Schedule
from django.utils import timezone
from django.contrib import messages
from app.decorators import user_is_authenticated
from app.views import utils
@require_http_methods(["GET", "POST"])
@user_is_authenticated
def index(request, user_id):
user = utils.current_user(request)
if not user:
return HttpResponse("User " + str(user_id) + " NOT FOUND")
pto = PaidTimeOff.objects.filter(user=user).first()
if not pto:
return HttpResponse("PTO " + str(user_id) + " NOT FOUND")
if request.method == "GET":
return index_get(request, user_id, user, pto)
elif request.method == "POST":
return index_post(request, user_id, user, pto)
else:
return HttpResponse("Invalid HTTP method")
def index_get(request, user_id, user, pto): # pylint: disable=unused-argument
schedules = Schedule.to_calendar((Schedule.objects.filter(pto=pto)))
context = pto.__dict__
context.update({"schedules": schedules, "current_user": user})
return render(request, "users/paid_time_off.html",
context=context)
def index_post(request, user_id, user, pto):
form = request.POST
if not form:
return HttpResponse("No form found")
err_msg = PaidTimeOff.validate_PTO_form(form)
if len(err_msg) > 0:
messages.add_message(request, messages.INFO, err_msg)
else:
try:
date_begin = Schedule.reformat(form['date_begin'])
date_end = Schedule.reformat(form['date_end'])
Schedule.objects.create(
user=user, pto=pto, date_begin=date_begin,
date_end=date_end, event_name=form['event_name'],
event_type='PTO', event_desc=form['event_description'],
created_at=timezone.now(), updated_at=timezone.now())
messages.add_message(request, messages.INFO,
"Information successfully updated")
except Exception as e:
messages.add_message(request, messages.INFO, str(e))
url = "/users/%s/paid_time_off/" % user_id
return redirect(url, permanent=False)
|
6,971 | 2a5c6f442e6e6cec6c4663b764c8a9a15aec8c40 | import hashlib
import json
#import logger
import Login.loger as logger
#configurations
import Configurations.config as config
def generate_data(*args):
#add data into seperate variables
try:
station_data = args[0]
except KeyError as e:
logger.log(log_type=config.log_error,params=e)
return None
#extract all variables from data
"""
There are the Parameters need to be extracted from the packet
Weather Parameters
1 - dateist
2 - dailyrainMM
3 - rain
4 - tempc
5 - winddir
6 - windspeedkmh
7 - humidity
8 - baromMM
Technical Parameters
1 - batt
2 - network
3 - RSSI
4 - action
5 - softwaretype
6 - version
"""
data_hashed = dict()
#data_hashed['dateist']=generate_id('dateist',station_data['station_id'])
data_hashed['dailyrainMM']=generate_id('dailyrainMM',station_data['station_id'])
data_hashed['rain']=generate_id('rain',station_data['station_id'])
data_hashed['tempc']=generate_id('tempc',station_data['station_id'])
data_hashed['winddir']=generate_id('winddir',station_data['station_id'])
data_hashed['windspeedkmh']=generate_id('windspeedkmh',station_data['station_id'])
data_hashed['humidity']=generate_id('humidity',station_data['station_id'])
data_hashed['baromMM']=generate_id('baromMM',station_data['station_id'])
data_hashed['BAT']=generate_id('BAT',station_data['station_id'])
data_hashed['network']=generate_id('network',station_data['station_id'])
data_hashed['RSSI']=generate_id('RSSI',station_data['station_id'])
data_hashed['action']=generate_id('action',station_data['station_id'])
data_hashed['softwareType']=generate_id('softwareType',station_data['station_id'])
data_hashed['version']=generate_id('version',station_data['station_id'])
return data_hashed
def generate_id(parameter,station_id):
meta_data= parameter+station_id
#generate all the keys for the has ids
hash_id = hashlib.sha256(config.encryption_key)
hash_id.update(json.dumps(meta_data).encode())
return hash_id.hexdigest()
|
6,972 | 179a9cf0713001e361f39aa30192618b392c78c7 | pal = []
for i in range(100, 1000):
for j in range( 100, 1000):
s = str(i*j)
if s[::-1] == s:
pal.append(int(s))
print(max(pal)) |
6,973 | 88a469eba61fb6968db8cc5e1f93f12093b7f128 | from api.decidim_connector import DecidimConnector
from api.participatory_processes_reader import ParticipatoryProcessesReader
from api.version_reader import VersionReader
API_URL = "https://meta.decidim.org/api"
decidim_connector = DecidimConnector(API_URL)
version_reader = VersionReader(decidim_connector)
version = version_reader.process_query()
print(version)
participatory_processes_reader = ParticipatoryProcessesReader(decidim_connector)
participatory_processes = participatory_processes_reader.process_query()
|
6,974 | 1cca94040cdd8db9d98f587c62eff7c58eae7535 | from mathmodule import *
import sys
print("Welcome to my basic \'Calculator\'")
print("Please choose your best option (+, -, *, /) ")
# user input part
while True:
try:
A = int(input("Now Enter your first Value="))
break
except:
print("Oops!", sys.exc_info()[0], "occurred.")
while True:
mathoparetor = input("Enter your Math oparetor=")
try:
if mathoparetor in ['+','-','*','/']:
break
else:
raise Exception
except:
print("Opp, Enter Math again")
while True:
try:
B = int(input("Now Enter your second Value="))
break
except:
print("Oops!", sys.exc_info()[0], "occurred.")
# programing for perform
if mathoparetor == '+':
print('The addition number is', add(A,B))
elif mathoparetor == '-':
print('The subtraction number is', sub(A,B))
elif mathoparetor == '*':
print('The multiaplication number is', mull(A,B))
elif mathoparetor == '/':
print('The division number is', divi(A,B)) |
6,975 | 427d3d386d4b8a998a0b61b8c59984c6003f5d7b | import subprocess as sp
from .dummy_qsub import dummy_qsub
from os.path import exists
from os import makedirs
from os import remove
from os.path import dirname
QUEUE_NAME = 'fact_medium'
def qsub(job, exe_path, queue=QUEUE_NAME):
o_path = job['o_path'] if job['o_path'] is not None else '/dev/null'
e_path = job['e_path'] if job['e_path'] is not None else '/dev/null'
for p in [o_path, e_path]:
if p == '/dev/null':
continue
if exists(p):
remove(p)
else:
makedirs(dirname(p), exist_ok=True)
cmd = [
'qsub',
'-q', queue,
'-o', o_path,
'-e', e_path,
'-N', job['name'],
exe_path
]
for key in job:
if '--' in key:
cmd += [key, job[key]]
if 'test_dummy' in queue:
dummy_qsub(cmd)
else:
try:
sp.check_output(cmd, stderr=sp.STDOUT)
except sp.CalledProcessError as e:
print('returncode', e.returncode)
print('output', e.output)
raise
|
6,976 | 6ab5ac0caa44366268bb8b70ac044376d9c062f0 | # Code By it4min
# t.me/it4min
# t.me/LinuxArmy
# -- Combo List Maker v1 --
import time, os
os.system("clear")
banner = '''
\033[92m .o88b. .d88b. .88b d88. d8888b. .d88b.
d8P Y8 .8P Y8. 88'YbdP`88 88 `8D .8P Y8.
8P 88 88 88 88 88 88oooY' 88 88
8b 88 88 88 88 88 88~~~b. 88 88
Y8b d8 `8b d8' 88 88 88 88 8D `8b d8'
`Y88P' `Y88P' YP YP YP Y8888P' `Y88P'
t.me/LinuxArmy
---------------
.88b d88. .d8b. db dD d88888b d8888b. Code by it4min
88'YbdP`88 d8' `8b 88 ,8P' 88' 88 `8D
88 88 88 88ooo88 88,8P 88ooooo 88oobY'
88 88 88 88~~~88 88`8b 88~~~~~ 88`8b
88 88 88 88 88 88 `88. 88. 88 `88.
YP YP YP YP YP YP YD Y88888P 88 YD
'''
print(banner)
userf = input("\033[91m>>> \033[93mEnter the username address: ")
passf = input("\033[91m>>> \033[93mEnter the password address: ")
usrf = open(userf, "r").read().splitlines()
pasf = open(passf, "r").read().splitlines()
userlist = []
passlist = []
os.system("clear")
print ('\n'+"\033[94m - Loading Data ...")
time.sleep(3)
for u in usrf:
userlist.append(u.replace('\n',""))
for p in pasf:
passlist.append(p.replace('\n',""))
os.system("clear")
print ('\n'+" - Combo List Makeing ...")
time.sleep(3)
combof = open("ComboList.txt","a")
if len(userlist) > len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username+":"+password
combof.write(combo+'\n')
print (combo)
elif len(userlist) < len(passlist):
for num in range(len(userlist)):
username = userlist[num]
password = passlist[num]
combo = username+":"+password
combof.write(combo+'\n')
print (combo)
if len(userlist) == len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username+":"+password
combof.write(combo+'\n')
print (combo)
combof.close()
os.system("clear")
print ('\n'+" - Combo List Maked ;")
|
6,977 | 9d8d8e97f7d3dbbb47dc6d4105f0f1ffb358fd2f | from enum import Enum
import os
from pathlib import Path
from typing import Optional
from loguru import logger
import pandas as pd
from pydantic.class_validators import root_validator, validator
from tqdm import tqdm
from zamba.data.video import VideoLoaderConfig
from zamba.models.config import (
ZambaBaseModel,
check_files_exist_and_load,
get_filepaths,
validate_model_cache_dir,
)
from zamba.models.densepose.densepose_manager import MODELS, DensePoseManager
from zamba.models.utils import RegionEnum
class DensePoseOutputEnum(Enum):
segmentation = "segmentation"
chimp_anatomy = "chimp_anatomy"
class DensePoseConfig(ZambaBaseModel):
"""Configuration for running dense pose on videos.
Args:
video_loader_config (VideoLoaderConfig): Configuration for loading videos
output_type (str): one of DensePoseOutputEnum (currently "segmentation" or "chimp_anatomy").
render_output (bool): Whether to save a version of the video with the output overlaid on top.
Defaults to False.
embeddings_in_json (bool): Whether to save the embeddings matrices in the json of the
DensePose result. Setting to True can result in large json files. Defaults to False.
data_dir (Path): Where to find the files listed in filepaths (or where to look if
filepaths is not provided).
filepaths (Path, optional): Path to a CSV file with a list of filepaths to process.
save_dir (Path, optional): Directory for where to save the output files;
defaults to os.getcwd().
cache_dir (Path, optional): Path for downloading and saving model weights. Defaults
to env var `MODEL_CACHE_DIR` or the OS app cache dir.
weight_download_region (RegionEnum, optional): region where to download weights; should
be one of RegionEnum (currently 'us', 'asia', and 'eu'). Defaults to 'us'.
"""
video_loader_config: VideoLoaderConfig
output_type: DensePoseOutputEnum
render_output: bool = False
embeddings_in_json: bool = False
data_dir: Path
filepaths: Optional[Path] = None
save_dir: Optional[Path] = None
cache_dir: Optional[Path] = None
weight_download_region: RegionEnum = RegionEnum("us")
_validate_cache_dir = validator("cache_dir", allow_reuse=True, always=True)(
validate_model_cache_dir
)
def run_model(self):
"""Use this configuration to execute DensePose via the DensePoseManager"""
if not isinstance(self.output_type, DensePoseOutputEnum):
self.output_type = DensePoseOutputEnum(self.output_type)
if self.output_type == DensePoseOutputEnum.segmentation.value:
model = MODELS["animals"]
elif self.output_type == DensePoseOutputEnum.chimp_anatomy.value:
model = MODELS["chimps"]
else:
raise Exception(f"invalid {self.output_type}")
output_dir = Path(os.getcwd()) if self.save_dir is None else self.save_dir
dpm = DensePoseManager(
model, model_cache_dir=self.cache_dir, download_region=self.weight_download_region
)
for fp in tqdm(self.filepaths.filepath, desc="Videos"):
fp = Path(fp)
vid_arr, labels = dpm.predict_video(fp, video_loader_config=self.video_loader_config)
# serialize the labels generated by densepose to json
output_path = output_dir / f"{fp.stem}_denspose_labels.json"
dpm.serialize_video_output(
labels, filename=output_path, write_embeddings=self.embeddings_in_json
)
# re-render the video with the densepose labels visualized on top of the video
if self.render_output:
output_path = output_dir / f"{fp.stem}_denspose_video{''.join(fp.suffixes)}"
visualized_video = dpm.visualize_video(
vid_arr, labels, output_path=output_path, fps=self.video_loader_config.fps
)
# write out the anatomy present in each frame to a csv for later analysis
if self.output_type == DensePoseOutputEnum.chimp_anatomy.value:
output_path = output_dir / f"{fp.stem}_denspose_anatomy.csv"
dpm.anatomize_video(
visualized_video,
labels,
output_path=output_path,
fps=self.video_loader_config.fps,
)
_get_filepaths = root_validator(allow_reuse=True, pre=False, skip_on_failure=True)(
get_filepaths
)
@root_validator(skip_on_failure=True)
def validate_files(cls, values):
# if globbing from data directory, already have valid dataframe
if isinstance(values["filepaths"], pd.DataFrame):
files_df = values["filepaths"]
else:
# make into dataframe even if only one column for clearer indexing
files_df = pd.DataFrame(pd.read_csv(values["filepaths"]))
if "filepath" not in files_df.columns:
raise ValueError(f"{values['filepaths']} must contain a `filepath` column.")
# can only contain one row per filepath
duplicated = files_df.filepath.duplicated()
if duplicated.sum() > 0:
logger.warning(
f"Found {duplicated.sum():,} duplicate row(s) in filepaths csv. Dropping duplicates so predictions will have one row per video."
)
files_df = files_df[["filepath"]].drop_duplicates()
values["filepaths"] = check_files_exist_and_load(
df=files_df,
data_dir=values["data_dir"],
skip_load_validation=True,
)
return values
|
6,978 | 210fcb497334ad8bf5433b917fc199c3e22f0f6e | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
p1=float(input('digite o p1:'))
c1=float(input('digite o c1:'))
p2=float(input('digite o p2:'))
c2=float(input('digite o c2:'))
if p1*c1=p2*c2:
print('O')
if pi*c1>p2*c2:
print('-1')
else:
print('1') |
6,979 | 7b2ad0b4eca7b31b314e32ad57d51be82f0eaf61 | from bs4 import BeautifulSoup
from aiounfurl.parsers import oembed
def test_oembed_not_match(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'http://test.com'
assert oembed_url_extractor.get_oembed_url(url) is None
def test_oembed_founded(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
def test_oembed_discovery(oembed_providers, files_dir):
oembed_html = (files_dir / 'oembed_json.html').read_text()
soup = BeautifulSoup(oembed_html)
oembed_url_extractor = oembed.OEmbedURLExtractor(oembed_providers)
oembed_url = oembed_url_extractor.get_oembed_url_from_html(soup)
assert isinstance(oembed_url, str)
def test_oembed_params(oembed_providers):
oembed_url_extractor = oembed.OEmbedURLExtractor(
oembed_providers, params={'maxwidth': 200})
url = 'https://www.instagram.com/p/BNHh2YJDdcY/'
oembed_url = oembed_url_extractor.get_oembed_url(url)
assert isinstance(oembed_url, str)
assert 'maxwidth=200' in oembed_url
|
6,980 | 5ee2a51ea981f0feab688d9c571620a95d89a422 | __author__ = 'anderson'
from pyramid.security import Everyone, Allow, ALL_PERMISSIONS
class Root(object):
#Access Control List
__acl__ = [(Allow, Everyone, 'view'),
(Allow, 'role_admin', ALL_PERMISSIONS),
(Allow, 'role_usuario', 'comum')]
def __init__(self, request):
pass
|
6,981 | 36a7d3ed28348e56e54ce4bfa937363a64ee718f | A, B = map(int, input().split())
K = (B ** 2 - A ** 2) / (2 * A - 2 * B)
print(int(abs(K))) if K.is_integer() else print('IMPOSSIBLE')
|
6,982 | 5186400c9b3463d6be19e73de665f8792d8d68c7 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tornado.web
from sqlalchemy import desc
from sqlalchemy.orm import contains_eager
from main_app.models.post import Post
from main_app.models.thread import PostThread, User2Thread
from main_app.handlers.base_handler import BaseHandler
class API_Comments(BaseHandler):
def post(self):
'''
add comment to a post
example:
POST /comment
body: post_id, text
returns:
200 - the comment created
406 - incorrect data
'''
arg_comment = self.get_argument('comment')
try:
post_id = int(arg_comment['post_id'])
text = str(arg_comment['text'])
except KeyError, ValueError:
raise tornado.web.HTTPError(406)
if not text:
# the comment text is empty
raise tornado.web.HTTPError(406)
# get post + thread + User2Thread
post = self.db.query(Post).\
join(
PostThread, Post.thread_id == PostThread.id
).join(
User2Thread
).options(
contains_eager(PostThread.user2thread)
).filter(
Post.id == post_id
).filter(
User2Thread.user_id.in_(DEFAULT_USER_ID, self.current_user),
).filter(
User2Thread.is_current()
).filter(
User2Thread.allow_add_posts == True
).order_by(
desc(User2Thread.user_id)
).first() |
6,983 | f93b7f2939bbee9b0cb5402d3e5f5d6c482d37c4 | import pandas as pd
import sweetviz as sv
b = pd.read_csv("final_cricket_players.csv", low_memory=False)
b = b.replace(to_replace="-",value="")
b = b.replace(to_replace="[]",value="")
b = b.replace(to_replace="{}",value="")
b.drop(b.columns[b.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)
b.to_csv('Cleaned_dataset.csv', index=False)
report = sv.analyze(b, pairwise_analysis='off')
report.show_html() |
6,984 | 3b613ec75088d6d9a645443df2bbc2f33b80000b | #!/usr/bin/env python
# Creates a new task from a given task definition json and starts on
# all instances in the given cluster name
# USAGE:
# python ecs-tasker.py <task_definition_json_filename> <cluster_name>
# EXAMPLE:
# python ecs-tasker.py ecs-task-stage.json cops-cluster
import boto3
import json
import sys
import time
from pprint import pprint
fname = sys.argv[1]
cluster_name = sys.argv[2]
service_name = 'fhid-service-prod'
target_group_arn = 'arn:aws:elasticloadbalancing:us-east-1:188894168332:targetgroup/tg-fhid-prod/97843ffd9cf6b6c0'
container_name = 'fhid-prod'
container_port = 8090
desired_count = 2
sleeptime = 10
role_arn = 'arn:aws:iam::188894168332:role/ecrAccess'
fmt_logs_uri = "https://us-east-1.console.aws.amazon.com/cloudwatch/home?region=us-east-1#logEventViewer:group=awslogs-ecs;stream=awslogs-fhid-prod/fhid-prod/{0}"
with open(fname,'rb') as f:
task = json.load(f)
s = boto3.session.Session()
c = s.client('ecs', region_name='us-east-1')
def create_service(task_definition):
tries = 0
max_tries = 3
print("Attempt %d of %d..." % (tries, max_tries))
while 1:
if tries > max_tries:
print("Max tries exceeded, exiting with failure....")
sys.exit(1)
try:
response = c.create_service(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_definition,
loadBalancers=[
{
'targetGroupArn': target_group_arn,
'containerName': container_name,
'containerPort': container_port
},
],
desiredCount=desired_count,
role=role_arn,
deploymentConfiguration={
'maximumPercent': 200,
'minimumHealthyPercent': 100
},
placementConstraints=[],
placementStrategy=[{
"field": "memory",
"type": "binpack"
}
]
)
print response
break
except Exception as e:
print("Exception creating service: '%s'" % str(e))
tries += 1
print("Sleeping...")
time.sleep(5)
container_instances = c.list_container_instances(cluster=cluster_name).get('containerInstanceArns')
response = c.register_task_definition(containerDefinitions=task.get('containerDefinitions'),
networkMode=task.get('networkMode'),
taskRoleArn=task.get('taskRoleArn'),
family=task.get('family'))
definition = response.get('taskDefinition').get('taskDefinitionArn')
def task_tester():
retries = 1
max_retries = 5
tasks = []
while 1:
print("Attempt %d of %d..." % (retries, max_retries))
if retries > max_retries:
print("Too many task start failures")
sys.exit(1)
tasker = c.start_task(taskDefinition=definition,
cluster=cluster_name,
containerInstances=[container_instances[0]]) # max of 10 instances
print("Sleeping %d seconds to wait for tasks to start..." % sleeptime)
time.sleep(sleeptime)
print("Number of tasks started: %d" % len(tasker.get('tasks')))
if len(tasker.get('failures')) > 0:
print("Number of failed tasks: %d" % len(tasker.get('failures')))
for failure in tasker.get('failures'):
print(failure)
if failure.get('reason') == "RESOURCE:MEMORY":
retries += 1
else:
break
all_tasks = c.list_tasks(cluster=cluster_name)
all_tasks_arns = all_tasks.get('taskArns')
for task_arn in c.describe_tasks(cluster=cluster_name, tasks=all_tasks_arns).get('tasks'):
if task_arn.get('taskDefinitionArn') == definition:
tasks.append(task_arn.get('taskArn'))
status = c.describe_tasks(cluster=cluster_name,
tasks=tasks)
return tasks
tasks = task_tester()
# check on status of tasks and exit with failure if
# containers don't stay running
count = 0
maxCount = 10
FAILED = False
RUNNING = False
runningCount = 0
task_definition_arn = ""
task_arn = ""
while 1:
count += 1
status = c.describe_tasks(cluster=cluster_name,
tasks=tasks)
for task in status.get('tasks'):
if task.get('lastStatus') == "STOPPED":
print("CONTAINER FAILED:")
pprint(status)
FAILED = True
try:
guid = task.get('taskArn').split('/')[-1]
print("LOGS URL: %s" % fmt_logs_uri.format(guid))
except:
pass
break
if task.get('lastStatus') == "PENDING":
print("Task still PENDING...sleeping")
else:
pprint(status)
task_definition_arn = task.get('taskDefinitionArn')
task_arn = task.get("taskArn")
RUNNING = True
break
if count > maxCount:
print("Too many iterations, exiting status failed.")
FAILED = True
if FAILED:
break
if RUNNING:
runningCount += 1
if runningCount > 3:
create_service(task_definition_arn)
c.stop_task(cluster=cluster_name,
task=task_arn,
reason="Temporary task for pipeline build")
break
time.sleep(5)
if FAILED:
sys.exit(1)
else:
sys.exit(0)
|
6,985 | 4ae24d1e39bdcde3313a8a0c8029a331864ba40e | from tkinter import *
janela = Tk()
janela.title("Teste de frame")
janela.geometry("800x600")
frame = Frame(janela, width = 300, height = 300, bg = 'red').grid(row = 0, column = 0)
#frames servem para caso queira colocar labels e butoes dentro de uma area especifica
#assim deve se declarar o frame como pai no inicio dos parametros, por exemplo
Label(frame, text = 'lsdakçasd').grid(row = 0, column = 0)
janela.mainloop() |
6,986 | 388772386f25d6c2f9cc8778b7ce1b2ad0920851 | # Generated by Django 2.2 on 2021-01-31 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_product_pr_number'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='PA_id',
),
migrations.AddField(
model_name='payment',
name='buyer',
field=models.CharField(default=0, max_length=32),
preserve_default=False,
),
migrations.AlterField(
model_name='payment',
name='PA_type',
field=models.CharField(default='credit', max_length=32),
),
]
|
6,987 | b47f15a79f7a82304c2be6af00a5854ff0f6ad3e | import csv
import json
from urllib import request
from urllib.error import HTTPError
from urllib.parse import urljoin, urlparse, quote_plus
from optparse import OptionParser
HEADER = ["id", "module", "channel", "type", "value", "datetime"]
def parse_options():
parser = OptionParser()
parser.add_option("-H", "--host")
parser.add_option("-t", "--token")
parser.add_option("-r", "--recursive", action="store_true", default=False)
return parser.parse_args()
def write_csv(url, recursive=False, writer=None, token=""):
response = fetch(url)
if recursive:
write_rows(writer, response)
cursor = next_cursor(response)
if cursor is not None:
print(f"next cursor exists...{cursor}")
ret = urlparse(url)
next_url = f"{ret.scheme}://{ret.netloc}{ret.path}?cursor={quote_plus(cursor)}&token={token}"
write_csv(next_url, recursive=True, writer=writer, token=token)
else:
write_rows(writer, response)
def fetch(url):
print(f"url...{url}\n")
urlData = request.urlopen(url)
data = urlData.read()
encoding = urlData.info().get_content_charset("utf-8")
return json.loads(data.decode(encoding))
def write_rows(writer, response):
for msg in response["results"]:
values = [msg[k] for k in HEADER]
writer.writerow(values)
def next_cursor(response):
return response["meta"]["cursor"]
if __name__ == "__main__":
opt, args = parse_options()
if opt.host is not None:
url = urljoin(f"https://{opt.host}",
f"datastore/v1/channels?token={opt.token}")
else:
url = f"https://api.sakura.io/datastore/v1/channels?token={opt.token}"
f = open('./datastore.csv', 'w')
writer = csv.writer(f, lineterminator="\n")
# write header
writer.writerow(HEADER)
write_csv(url, writer=writer, recursive=opt.recursive, token=opt.token)
f.close() |
6,988 | b76c868a29b5edd07d0da60b1a13ddb4ac3e2913 | class Config(object):
DEBUG = False
TESTING = False
class ProductionConfig(Config):
CORS_ALLOWED_ORIGINS = "productionexample.com"
class DevelopmentConfig(Config):
DEBUG = True
CORS_ALLOWED_ORIGINS = "developmentexample.com"
class TestingConfig(Config):
TESTING = True
|
6,989 | a38a5010c9edbed0929da225b4288396bb0d814e | #
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__all__ = ['lenet_mnist']
class Lenet(nn.Module):
def __init__(self):
super(Lenet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(20, 50, 5)
self.fc1 = nn.Linear(800, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
#print("weights sizes")
#print(self.conv1.weight.size())
layer_w = self.fc2.weight
sigma = layer_w.std().data.cpu().numpy()
layer_w_numpy = layer_w.data.cpu().numpy()
scale = 0.17
noise = np.random.normal(0, scale*sigma, layer_w.size())
w_noise = np.add(layer_w_numpy, noise)
w_noise_tensor = torch.tensor(w_noise)
#print(w_noise_tensor.size())
w_noise_tensor = w_noise_tensor.to('cuda')
w_noise = torch.nn.Parameter(w_noise_tensor.float())
self.fc2.weight = w_noise
#print("---------------------")
#print(self.conv2.weight.size())
#print("---------------------")
#print(self.fc1.weight.size())
#print("---------------------")
#print(self.fc2.weight.size())
#print("---------------------")
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 800)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
#x = nn.Threshold(0.2, 0.0)#ActivationZeroThreshold(x)
return x
def lenet_mnist():
model = Lenet()
return model
|
6,990 | 9dd59fee46bd4bec87cc8c40099110b483ad0496 | import ambulance_game as abg
import numpy as np
import sympy as sym
from sympy.abc import a, b, c, d, e, f, g, h, i, j
def get_symbolic_pi(num_of_servers, threshold, system_capacity, buffer_capacity):
Q_sym = abg.markov.get_symbolic_transition_matrix(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
dimension = Q_sym.shape[0]
if dimension > 7:
return "Capacity of 6 exceeded"
M_sym = sym.Matrix([Q_sym.transpose()[:-1, :], sym.ones(1, dimension)])
b_sym = sym.Matrix([sym.zeros(dimension - 1, 1), [1]])
system = M_sym.col_insert(dimension, b_sym)
sol = sym.solve_linear_system_LU(system, [a, b, c, d, e, f, g])
return sol
def get_symbolic_state_probabilities_1222():
num_of_servers = 1
threshold = 2
system_capacity = 2
buffer_capacity = 2
sym_pi_1222 = get_symbolic_pi(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
all_states_1222 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1222 = [0 for _ in range(len(all_states_1222))]
sym_state_probs_1222[0] = sym.factor(sym_pi_1222[a]) # (0,0)
sym_state_probs_1222[1] = sym.factor(sym_pi_1222[b]) # (0,1)
sym_state_probs_1222[2] = sym.factor(sym_pi_1222[c]) # (1,1)
sym_state_probs_1222[3] = sym.factor(sym_pi_1222[d]) # (0,2)
sym_state_probs_1222[4] = sym.factor(sym_pi_1222[e]) # (1,2)
sym_state_recursive_ratios_1222 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1222[0, 0] = 1
sym_state_recursive_ratios_1222[0, 1] = sym.factor(
sym_state_probs_1222[1] / sym_state_probs_1222[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1222[0, 2] = sym.factor(
sym_state_probs_1222[2] / sym_state_probs_1222[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1222[1, 2] = sym.factor(
sym_state_probs_1222[3] / sym_state_probs_1222[2]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1222[2, 2] = sym.factor(
sym_state_probs_1222[4] / sym_state_probs_1222[3]
) # (0,2) -> (1,2)
return sym_state_probs_1222, sym_state_recursive_ratios_1222
def get_symbolic_state_probabilities_1121():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 1
all_states_1121 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_pi_1121 = get_symbolic_pi(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1121 = [0 for _ in range(len(all_states_1121))]
sym_state_probs_1121[0] = sym.factor(sym_pi_1121[a]) # (0,0)
sym_state_probs_1121[1] = sym.factor(sym_pi_1121[b]) # (0,1)
sym_state_probs_1121[2] = sym.factor(sym_pi_1121[c]) # (1,1)
sym_state_probs_1121[3] = sym.factor(sym_pi_1121[d]) # (0,2)
sym_state_probs_1121[4] = sym.factor(sym_pi_1121[e]) # (1,2)
sym_state_recursive_ratios_1121 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1121[0, 0] = 1
sym_state_recursive_ratios_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[3]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_right_1121 = sym_state_recursive_ratios_1121.copy()
sym_state_recursive_ratios_right_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_P0_1121 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1121[0, 0] = 1
sym_state_recursive_ratios_P0_1121[0, 1] = sym.factor(
sym_state_probs_1121[1] / sym_state_probs_1121[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1121[1, 1] = sym.factor(
sym_state_probs_1121[2] / sym_state_probs_1121[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1121[0, 2] = sym.factor(
sym_state_probs_1121[3] / sym_state_probs_1121[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1121[1, 2] = sym.factor(
sym_state_probs_1121[4] / sym_state_probs_1121[0]
) # (0,0) -> (1,2)
return (
sym_state_probs_1121,
sym_state_recursive_ratios_1121,
sym_state_recursive_ratios_right_1121,
sym_state_recursive_ratios_P0_1121,
)
def get_symbolic_state_probabilities_1122():
# num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 2
all_states_1122 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1122 = [0 for _ in range(len(all_states_1122))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
sym_state_probs_1122[0] = (
(sym_mu**6)
+ 2 * (sym_lambda_2) * (sym_mu**5)
+ (sym_lambda_2**2) * (sym_mu**4)
) # (0,0)
sym_state_probs_1122[1] = (sym_Lambda * sym_mu**3) * (
sym_mu**2 + 2 * sym_mu * sym_lambda_2 + sym_lambda_2**2
) # (0,1)
sym_state_probs_1122[2] = (sym_Lambda * sym_lambda_2 * sym_mu**2) * (
sym_lambda_2**2
+ sym_lambda_2 * sym_lambda_1
+ sym_lambda_1 * sym_mu
+ sym_mu**2
+ 2 * sym_lambda_2 * sym_mu
) # (1,1)
sym_state_probs_1122[3] = (sym_Lambda * sym_lambda_2**2 * sym_mu) * (
sym_lambda_2**2
+ 2 * sym_lambda_1 * sym_lambda_2
+ 3 * sym_lambda_1 * sym_mu
+ sym_mu**2
+ 2 * sym_lambda_2 * sym_mu
+ sym_lambda_1**2
) # (2,1)
sym_state_probs_1122[4] = (sym_Lambda * sym_lambda_1 * sym_mu**3) * (
sym_lambda_2 + sym_mu
) # (0,2)
sym_state_probs_1122[5] = (
sym_Lambda * sym_lambda_1 * sym_lambda_2 * sym_mu**2
) * (
2 * sym_mu + sym_lambda_1 + sym_lambda_2
) # (1,2)
sym_state_probs_1122[6] = (sym_Lambda * sym_lambda_1 * sym_lambda_2**2) * (
sym_lambda_1**2
+ 4 * sym_lambda_1 * sym_mu
+ 2 * sym_lambda_1 * sym_lambda_2
+ 3 * sym_mu**2
+ sym_lambda_2**2
+ 3 * sym_lambda_2 * sym_mu
) # (2,2)
total_1122 = np.sum(sym_state_probs_1122)
sym_state_probs_1122 = [i / total_1122 for i in sym_state_probs_1122]
sym_state_recursive_ratios_1122 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1122[0, 0] = 1
sym_state_recursive_ratios_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[2]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[4]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[5]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_right_1122 = sym_state_recursive_ratios_1122.copy()
sym_state_recursive_ratios_right_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[3]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_P0_1122 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1122[0, 0] = 1
sym_state_recursive_ratios_P0_1122[0, 1] = sym.factor(
sym_state_probs_1122[1] / sym_state_probs_1122[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1122[1, 1] = sym.factor(
sym_state_probs_1122[2] / sym_state_probs_1122[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1122[2, 1] = sym.factor(
sym_state_probs_1122[3] / sym_state_probs_1122[0]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1122[0, 2] = sym.factor(
sym_state_probs_1122[4] / sym_state_probs_1122[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1122[1, 2] = sym.factor(
sym_state_probs_1122[5] / sym_state_probs_1122[0]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1122[2, 2] = sym.factor(
sym_state_probs_1122[6] / sym_state_probs_1122[0]
) # (0,0) -> (2,2)
return (
sym_state_probs_1122,
sym_state_recursive_ratios_1122,
sym_state_recursive_ratios_right_1122,
sym_state_recursive_ratios_P0_1122,
)
def get_symbolic_state_probabilities_1123():
num_of_servers = 1
threshold = 1
system_capacity = 2
buffer_capacity = 3
Q_sym_1123 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p21, p31, p02, p12, p22, p32 = sym.symbols(
"p00, p01, p11, p21, p31, p02, p12, p22, p32"
)
pi_1123 = sym.Matrix([p00, p01, p11, p21, p31, p02, p12, p22, p32])
dimension_1123 = Q_sym_1123.shape[0]
M_sym_1123 = sym.Matrix(
[Q_sym_1123.transpose()[:-1, :], sym.ones(1, dimension_1123)]
)
sym_diff_equations_1123 = M_sym_1123 @ pi_1123
b_sym_1123 = sym.Matrix([sym.zeros(dimension_1123 - 1, 1), [1]])
eq0_1123 = sym.Eq(sym_diff_equations_1123[0], b_sym_1123[0])
eq1_1123 = sym.Eq(sym_diff_equations_1123[1], b_sym_1123[1])
eq2_1123 = sym.Eq(sym_diff_equations_1123[2], b_sym_1123[2])
eq3_1123 = sym.Eq(sym_diff_equations_1123[3], b_sym_1123[3])
eq4_1123 = sym.Eq(sym_diff_equations_1123[4], b_sym_1123[4])
eq5_1123 = sym.Eq(sym_diff_equations_1123[5], b_sym_1123[5])
eq6_1123 = sym.Eq(sym_diff_equations_1123[6], b_sym_1123[6])
eq7_1123 = sym.Eq(sym_diff_equations_1123[7], b_sym_1123[7])
eq8_1123 = sym.Eq(sym_diff_equations_1123[8], b_sym_1123[8])
sym_state_probs_1123 = sym.solve(
[
eq0_1123,
eq1_1123,
eq2_1123,
eq3_1123,
eq4_1123,
eq5_1123,
eq6_1123,
eq7_1123,
eq8_1123,
],
(p00, p01, p11, p21, p31, p02, p12, p22, p32),
)
sym_state_recursive_ratios_1123 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1123[0, 0] = 1
sym_state_recursive_ratios_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p21]
) # (2,1) -> (3,1)
sym_state_recursive_ratios_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1123[2, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22]
) # (2,2) -> (3,2)
sym_state_recursive_ratios_right_1123 = sym_state_recursive_ratios_1123.copy()
sym_state_recursive_ratios_right_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p22]
) # (2,2) -> (3,2)
sym_state_recursive_ratios_P0_1123 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1123[0, 0] = 1
sym_state_recursive_ratios_P0_1123[0, 1] = sym.factor(
sym_state_probs_1123[p01] / sym_state_probs_1123[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1123[1, 1] = sym.factor(
sym_state_probs_1123[p11] / sym_state_probs_1123[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1123[2, 1] = sym.factor(
sym_state_probs_1123[p21] / sym_state_probs_1123[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1123[3, 1] = sym.factor(
sym_state_probs_1123[p31] / sym_state_probs_1123[p00]
) # (0,0) -> (3,1)
sym_state_recursive_ratios_P0_1123[0, 2] = sym.factor(
sym_state_probs_1123[p02] / sym_state_probs_1123[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1123[1, 2] = sym.factor(
sym_state_probs_1123[p12] / sym_state_probs_1123[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1123[2, 2] = sym.factor(
sym_state_probs_1123[p22] / sym_state_probs_1123[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1123[3, 2] = sym.factor(
sym_state_probs_1123[p32] / sym_state_probs_1123[p00]
) # (0,0) -> (3,2)
return (
sym_state_probs_1123,
sym_state_recursive_ratios_1123,
sym_state_recursive_ratios_right_1123,
sym_state_recursive_ratios_P0_1123,
)
def get_symbolic_state_probabilities_1341():
# num_of_servers = 1
threshold = 3
system_capacity = 4
buffer_capacity = 1
all_states_1341 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1341 = [0 for _ in range(len(all_states_1341))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
sym_state_probs_1341[0] = (sym_lambda_2) * (sym_mu**5) + (sym_mu**6) # (0,0)
sym_state_probs_1341[1] = sym_Lambda * sym_lambda_2 * (sym_mu**4) + sym_Lambda * (
sym_mu**5
) # (0,1)
sym_state_probs_1341[2] = (sym_Lambda**2) * sym_lambda_2 * (sym_mu**3) + (
sym_Lambda**2
) * (
sym_mu**4
) # (0,2)
sym_state_probs_1341[3] = (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2) + (
sym_Lambda**3
) * (
sym_mu**3
) # (0,3)
sym_state_probs_1341[4] = (
(sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu
+ (sym_Lambda**3) * sym_lambda_2 * (sym_mu**2)
+ (sym_Lambda**3) * sym_lambda_2 * sym_lambda_2 * sym_mu
) # (1,3)
sym_state_probs_1341[5] = (sym_Lambda**3) * sym_lambda_1 * (sym_mu**2) # (0,4)
sym_state_probs_1341[6] = (
(sym_Lambda**3) * (sym_lambda_1**2) * sym_lambda_2
+ (sym_Lambda**3) * sym_lambda_1 * (sym_lambda_2**2)
+ 2 * (sym_Lambda**3) * sym_lambda_1 * sym_lambda_2 * sym_mu
) # (1,4)
total_1341 = np.sum(sym_state_probs_1341)
sym_state_probs_1341 = [i / total_1341 for i in sym_state_probs_1341]
sym_state_recursive_ratios_1341 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1341[0, 0] = 1
sym_state_recursive_ratios_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[2]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[3]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[3]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[5]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_right_1341 = sym_state_recursive_ratios_1341.copy()
sym_state_recursive_ratios_right_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[4]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_P0_1341 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1341[0, 0] = 1
sym_state_recursive_ratios_P0_1341[0, 1] = sym.factor(
sym_state_probs_1341[1] / sym_state_probs_1341[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1341[0, 2] = sym.factor(
sym_state_probs_1341[2] / sym_state_probs_1341[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1341[0, 3] = sym.factor(
sym_state_probs_1341[3] / sym_state_probs_1341[0]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1341[1, 3] = sym.factor(
sym_state_probs_1341[4] / sym_state_probs_1341[0]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1341[0, 4] = sym.factor(
sym_state_probs_1341[5] / sym_state_probs_1341[0]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1341[1, 4] = sym.factor(
sym_state_probs_1341[6] / sym_state_probs_1341[0]
) # (0,0) -> (1,4)
return (
sym_state_probs_1341,
sym_state_recursive_ratios_1341,
sym_state_recursive_ratios_right_1341,
sym_state_recursive_ratios_P0_1341,
)
def get_symbolic_state_probabilities_1131():
# num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 1
all_states_1131 = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
sym_state_probs_1131 = [0 for _ in range(len(all_states_1131))]
sym_Lambda = sym.symbols("Lambda")
sym_lambda_1 = sym.symbols("lambda_1")
sym_lambda_2 = sym.symbols("lambda_2")
sym_mu = sym.symbols("mu")
# (0,0)
sym_state_probs_1131[0] = (
(sym_mu**6)
+ 2 * (sym_lambda_2 * (sym_mu**5))
+ ((sym_lambda_2**2) * (sym_mu**4))
+ (sym_lambda_1 * sym_lambda_2 * (sym_mu**4))
)
# (0,1)
sym_state_probs_1131[1] = sym_state_probs_1131[0] * sym_Lambda / sym_mu
# (1,1)
sym_state_probs_1131[2] = (
(sym_Lambda * (sym_lambda_1**2) * sym_lambda_2 * (sym_mu**2))
+ (sym_Lambda * sym_lambda_2 * sym_lambda_1 * (sym_mu**3))
+ 2 * (sym_Lambda * sym_lambda_1 * (sym_lambda_2**2) * (sym_mu**2))
+ 2 * (sym_Lambda * (sym_lambda_2**2) * (sym_mu**3))
+ (sym_Lambda * (sym_lambda_2**3) * (sym_mu**2))
+ (sym_Lambda * sym_lambda_2 * (sym_mu**4))
)
# (0,2)
sym_state_probs_1131[3] = (
sym_Lambda * sym_lambda_1 * sym_mu**3 * (sym_lambda_2 + sym_mu)
)
# (1,2)
sym_state_probs_1131[4] = (sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu) * (
(sym_lambda_2**2)
+ 2 * sym_lambda_2 * sym_lambda_1
+ 3 * sym_lambda_2 * sym_mu
+ (sym_lambda_1**2)
+ 2 * sym_lambda_1 * sym_mu
+ 2 * (sym_mu**2)
)
# (0,3)
sym_state_probs_1131[5] = sym_Lambda * (sym_lambda_1**2) * (sym_mu**3)
# (1,3)
sym_state_probs_1131[6] = (sym_Lambda * sym_lambda_2 * (sym_lambda_1**2)) * (
(sym_lambda_2**2)
+ 2 * sym_lambda_2 * sym_lambda_1
+ 3 * sym_lambda_2 * sym_mu
+ (sym_lambda_1**2)
+ 2 * sym_lambda_1 * sym_mu
+ 3 * (sym_mu**2)
)
denominator = (
sym_Lambda * sym_lambda_2**3 * sym_lambda_1**2
+ sym_Lambda * sym_lambda_2**3 * sym_lambda_1 * sym_mu
+ sym_Lambda * sym_lambda_2**3 * sym_mu**2
+ 2 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**3
+ 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1**2 * sym_mu
+ 5 * sym_Lambda * sym_lambda_2**2 * sym_lambda_1 * sym_mu**2
+ 3 * sym_Lambda * sym_lambda_2**2 * sym_mu**3
+ sym_Lambda * sym_lambda_2 * sym_lambda_1**4
+ 3 * sym_Lambda * sym_lambda_2 * sym_lambda_1**3 * sym_mu
+ 6 * sym_Lambda * sym_lambda_2 * sym_lambda_1**2 * sym_mu**2
+ 5 * sym_Lambda * sym_lambda_2 * sym_lambda_1 * sym_mu**3
+ 3 * sym_Lambda * sym_lambda_2 * sym_mu**4
+ sym_Lambda * sym_lambda_1**2 * sym_mu**3
+ sym_Lambda * sym_lambda_1 * sym_mu**4
+ sym_Lambda * sym_mu**5
+ sym_lambda_2**2 * sym_mu**4
+ sym_lambda_2 * sym_lambda_1 * sym_mu**4
+ 2 * sym_lambda_2 * sym_mu**5
+ sym_mu**6
)
sym_state_probs_1131 = [i / denominator for i in sym_state_probs_1131]
sym_state_recursive_ratios_1131 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1131[0, 0] = 1
sym_state_recursive_ratios_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[1]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[1]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[3]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[3]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[5]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_right_1131 = sym_state_recursive_ratios_1131.copy()
sym_state_recursive_ratios_right_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[2]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[4]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_P0_1131 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1131[0, 0] = 1
sym_state_recursive_ratios_P0_1131[0, 1] = sym.factor(
sym_state_probs_1131[1] / sym_state_probs_1131[0]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1131[1, 1] = sym.factor(
sym_state_probs_1131[2] / sym_state_probs_1131[0]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1131[0, 2] = sym.factor(
sym_state_probs_1131[3] / sym_state_probs_1131[0]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1131[1, 2] = sym.factor(
sym_state_probs_1131[4] / sym_state_probs_1131[0]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1131[0, 3] = sym.factor(
sym_state_probs_1131[5] / sym_state_probs_1131[0]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1131[1, 3] = sym.factor(
sym_state_probs_1131[6] / sym_state_probs_1131[0]
) # (0,0) -> (1,3)
return (
sym_state_probs_1131,
sym_state_recursive_ratios_1131,
sym_state_recursive_ratios_right_1131,
sym_state_recursive_ratios_P0_1131,
)
def get_symbolic_state_probabilities_1132():
num_of_servers = 1
threshold = 1
system_capacity = 3
buffer_capacity = 2
Q_sym_1132 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23 = sym.symbols(
"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23"
)
pi_1132 = sym.Matrix([p00, p01, p11, p21, p02, p12, p22, p03, p13, p23])
dimension_1132 = Q_sym_1132.shape[0]
M_sym_1132 = sym.Matrix(
[Q_sym_1132.transpose()[:-1, :], sym.ones(1, dimension_1132)]
)
sym_diff_equations_1132 = M_sym_1132 @ pi_1132
b_sym_1132 = sym.Matrix([sym.zeros(dimension_1132 - 1, 1), [1]])
eq0_1132 = sym.Eq(sym_diff_equations_1132[0], b_sym_1132[0])
eq1_1132 = sym.Eq(sym_diff_equations_1132[1], b_sym_1132[1])
eq2_1132 = sym.Eq(sym_diff_equations_1132[2], b_sym_1132[2])
eq3_1132 = sym.Eq(sym_diff_equations_1132[3], b_sym_1132[3])
eq4_1132 = sym.Eq(sym_diff_equations_1132[4], b_sym_1132[4])
eq5_1132 = sym.Eq(sym_diff_equations_1132[5], b_sym_1132[5])
eq6_1132 = sym.Eq(sym_diff_equations_1132[6], b_sym_1132[6])
eq7_1132 = sym.Eq(sym_diff_equations_1132[7], b_sym_1132[7])
eq8_1132 = sym.Eq(sym_diff_equations_1132[8], b_sym_1132[8])
eq9_1132 = sym.Eq(sym_diff_equations_1132[9], b_sym_1132[9])
sym_state_probs_1132 = sym.solve(
[
eq0_1132,
eq1_1132,
eq2_1132,
eq3_1132,
eq4_1132,
eq5_1132,
eq6_1132,
eq7_1132,
eq8_1132,
eq9_1132,
],
(p00, p01, p11, p21, p02, p12, p22, p03, p13, p23),
)
sym_state_recursive_ratios_1132 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1132[0, 0] = 1
sym_state_recursive_ratios_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p13]
) # (1,3) -> (2,3)
sym_state_recursive_ratios_right_1132 = sym_state_recursive_ratios_1132.copy()
sym_state_recursive_ratios_right_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p22]
) # (2,2) -> (2,3)
sym_state_recursive_ratios_P0_1132 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1132[0, 0] = 1
sym_state_recursive_ratios_P0_1132[0, 1] = sym.factor(
sym_state_probs_1132[p01] / sym_state_probs_1132[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1132[1, 1] = sym.factor(
sym_state_probs_1132[p11] / sym_state_probs_1132[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1132[2, 1] = sym.factor(
sym_state_probs_1132[p21] / sym_state_probs_1132[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1132[0, 2] = sym.factor(
sym_state_probs_1132[p02] / sym_state_probs_1132[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1132[1, 2] = sym.factor(
sym_state_probs_1132[p12] / sym_state_probs_1132[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1132[2, 2] = sym.factor(
sym_state_probs_1132[p22] / sym_state_probs_1132[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1132[0, 3] = sym.factor(
sym_state_probs_1132[p03] / sym_state_probs_1132[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1132[1, 3] = sym.factor(
sym_state_probs_1132[p13] / sym_state_probs_1132[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1132[2, 3] = sym.factor(
sym_state_probs_1132[p23] / sym_state_probs_1132[p00]
) # (0,0) -> (2,3)
return (
sym_state_probs_1132,
sym_state_recursive_ratios_1132,
sym_state_recursive_ratios_right_1132,
sym_state_recursive_ratios_P0_1132,
)
def get_symbolic_state_probabilities_1141():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 1
Q_sym_1141 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14"
)
pi_1141 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14])
dimension_1141 = Q_sym_1141.shape[0]
M_sym_1141 = sym.Matrix(
[Q_sym_1141.transpose()[:-1, :], sym.ones(1, dimension_1141)]
)
sym_diff_equations_1141 = M_sym_1141 @ pi_1141
b_sym_1141 = sym.Matrix([sym.zeros(dimension_1141 - 1, 1), [1]])
eq0_1141 = sym.Eq(sym_diff_equations_1141[0], b_sym_1141[0])
eq1_1141 = sym.Eq(sym_diff_equations_1141[1], b_sym_1141[1])
eq2_1141 = sym.Eq(sym_diff_equations_1141[2], b_sym_1141[2])
eq3_1141 = sym.Eq(sym_diff_equations_1141[3], b_sym_1141[3])
eq4_1141 = sym.Eq(sym_diff_equations_1141[4], b_sym_1141[4])
eq5_1141 = sym.Eq(sym_diff_equations_1141[5], b_sym_1141[5])
eq6_1141 = sym.Eq(sym_diff_equations_1141[6], b_sym_1141[6])
eq7_1141 = sym.Eq(sym_diff_equations_1141[7], b_sym_1141[7])
eq8_1141 = sym.Eq(sym_diff_equations_1141[8], b_sym_1141[8])
sym_state_probs_1141 = sym.solve(
[
eq0_1141,
eq1_1141,
eq2_1141,
eq3_1141,
eq4_1141,
eq5_1141,
eq6_1141,
eq7_1141,
eq8_1141,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14),
)
sym_state_recursive_ratios_1141 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1141[0, 0] = 1
sym_state_recursive_ratios_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_right_1141 = sym_state_recursive_ratios_1141.copy()
sym_state_recursive_ratios_right_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_P0_1141 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1141[0, 0] = 1
sym_state_recursive_ratios_P0_1141[0, 1] = sym.factor(
sym_state_probs_1141[p01] / sym_state_probs_1141[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1141[1, 1] = sym.factor(
sym_state_probs_1141[p11] / sym_state_probs_1141[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1141[0, 2] = sym.factor(
sym_state_probs_1141[p02] / sym_state_probs_1141[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1141[1, 2] = sym.factor(
sym_state_probs_1141[p12] / sym_state_probs_1141[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1141[0, 3] = sym.factor(
sym_state_probs_1141[p03] / sym_state_probs_1141[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1141[1, 3] = sym.factor(
sym_state_probs_1141[p13] / sym_state_probs_1141[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1141[0, 4] = sym.factor(
sym_state_probs_1141[p04] / sym_state_probs_1141[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1141[1, 4] = sym.factor(
sym_state_probs_1141[p14] / sym_state_probs_1141[p00]
) # (0,0) -> (1,4)
return (
sym_state_probs_1141,
sym_state_recursive_ratios_1141,
sym_state_recursive_ratios_right_1141,
sym_state_recursive_ratios_P0_1141,
)
def get_symbolic_state_probabilities_1142():
num_of_servers = 1
threshold = 1
system_capacity = 4
buffer_capacity = 2
Q_sym_1142 = abg.markov.get_symbolic_transition_matrix(
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24 = sym.symbols(
"p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24"
)
pi_1142 = sym.Matrix(
[p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24]
)
dimension_1142 = Q_sym_1142.shape[0]
M_sym_1142 = sym.Matrix(
[Q_sym_1142.transpose()[:-1, :], sym.ones(1, dimension_1142)]
)
sym_diff_equations_1142 = M_sym_1142 @ pi_1142
b_sym_1142 = sym.Matrix([sym.zeros(dimension_1142 - 1, 1), [1]])
eq0_1142 = sym.Eq(sym_diff_equations_1142[0], b_sym_1142[0])
eq1_1142 = sym.Eq(sym_diff_equations_1142[1], b_sym_1142[1])
eq2_1142 = sym.Eq(sym_diff_equations_1142[2], b_sym_1142[2])
eq3_1142 = sym.Eq(sym_diff_equations_1142[3], b_sym_1142[3])
eq4_1142 = sym.Eq(sym_diff_equations_1142[4], b_sym_1142[4])
eq5_1142 = sym.Eq(sym_diff_equations_1142[5], b_sym_1142[5])
eq6_1142 = sym.Eq(sym_diff_equations_1142[6], b_sym_1142[6])
eq7_1142 = sym.Eq(sym_diff_equations_1142[7], b_sym_1142[7])
eq8_1142 = sym.Eq(sym_diff_equations_1142[8], b_sym_1142[8])
eq9_1142 = sym.Eq(sym_diff_equations_1142[9], b_sym_1142[9])
eq10_1142 = sym.Eq(sym_diff_equations_1142[10], b_sym_1142[10])
eq11_1142 = sym.Eq(sym_diff_equations_1142[11], b_sym_1142[11])
eq12_1142 = sym.Eq(sym_diff_equations_1142[12], b_sym_1142[12])
sym_state_probs_1142 = sym.solve(
[
eq0_1142,
eq1_1142,
eq2_1142,
eq3_1142,
eq4_1142,
eq5_1142,
eq6_1142,
eq7_1142,
eq8_1142,
eq9_1142,
eq10_1142,
eq11_1142,
eq12_1142,
],
(p00, p01, p11, p21, p02, p12, p22, p03, p13, p23, p04, p14, p24),
)
sym_state_recursive_ratios_1142 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1142[0, 0] = 1
sym_state_recursive_ratios_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p11]
) # (1,1) -> (2,1)
sym_state_recursive_ratios_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p12]
) # (1,2) -> (2,2)
sym_state_recursive_ratios_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p13]
) # (1,3) -> (2,3)
sym_state_recursive_ratios_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p14]
) # (1,4) -> (2,4)
sym_state_recursive_ratios_right_1142 = sym_state_recursive_ratios_1142.copy()
sym_state_recursive_ratios_right_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p21]
) # (2,1) -> (2,2)
sym_state_recursive_ratios_right_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p22]
) # (2,2) -> (2,3)
sym_state_recursive_ratios_right_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p23]
) # (2,3) -> (2,4)
sym_state_recursive_ratios_P0_1142 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1142[0, 0] = 1
sym_state_recursive_ratios_P0_1142[0, 1] = sym.factor(
sym_state_probs_1142[p01] / sym_state_probs_1142[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1142[1, 1] = sym.factor(
sym_state_probs_1142[p11] / sym_state_probs_1142[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1142[2, 1] = sym.factor(
sym_state_probs_1142[p21] / sym_state_probs_1142[p00]
) # (0,0) -> (2,1)
sym_state_recursive_ratios_P0_1142[0, 2] = sym.factor(
sym_state_probs_1142[p02] / sym_state_probs_1142[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1142[1, 2] = sym.factor(
sym_state_probs_1142[p12] / sym_state_probs_1142[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1142[2, 2] = sym.factor(
sym_state_probs_1142[p22] / sym_state_probs_1142[p00]
) # (0,0) -> (2,2)
sym_state_recursive_ratios_P0_1142[0, 3] = sym.factor(
sym_state_probs_1142[p03] / sym_state_probs_1142[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1142[1, 3] = sym.factor(
sym_state_probs_1142[p13] / sym_state_probs_1142[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1142[2, 3] = sym.factor(
sym_state_probs_1142[p23] / sym_state_probs_1142[p00]
) # (0,0) -> (2,3)
sym_state_recursive_ratios_P0_1142[0, 4] = sym.factor(
sym_state_probs_1142[p04] / sym_state_probs_1142[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1142[1, 4] = sym.factor(
sym_state_probs_1142[p14] / sym_state_probs_1142[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1142[2, 4] = sym.factor(
sym_state_probs_1142[p24] / sym_state_probs_1142[p00]
) # (0,0) -> (2,4)
return (
sym_state_probs_1142,
sym_state_recursive_ratios_1142,
sym_state_recursive_ratios_right_1142,
sym_state_recursive_ratios_P0_1142,
)
def get_symbolic_state_probabilities_1151():
num_of_servers = 1
threshold = 1
system_capacity = 5
buffer_capacity = 1
Q_sym_1151 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15"
)
pi_1151 = sym.Matrix([p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15])
dimension_1151 = Q_sym_1151.shape[0]
M_sym_1151 = sym.Matrix(
[Q_sym_1151.transpose()[:-1, :], sym.ones(1, dimension_1151)]
)
sym_diff_equations_1151 = M_sym_1151 @ pi_1151
b_sym_1151 = sym.Matrix([sym.zeros(dimension_1151 - 1, 1), [1]])
eq0_1151 = sym.Eq(sym_diff_equations_1151[0], b_sym_1151[0])
eq1_1151 = sym.Eq(sym_diff_equations_1151[1], b_sym_1151[1])
eq2_1151 = sym.Eq(sym_diff_equations_1151[2], b_sym_1151[2])
eq3_1151 = sym.Eq(sym_diff_equations_1151[3], b_sym_1151[3])
eq4_1151 = sym.Eq(sym_diff_equations_1151[4], b_sym_1151[4])
eq5_1151 = sym.Eq(sym_diff_equations_1151[5], b_sym_1151[5])
eq6_1151 = sym.Eq(sym_diff_equations_1151[6], b_sym_1151[6])
eq7_1151 = sym.Eq(sym_diff_equations_1151[7], b_sym_1151[7])
eq8_1151 = sym.Eq(sym_diff_equations_1151[8], b_sym_1151[8])
eq9_1151 = sym.Eq(sym_diff_equations_1151[9], b_sym_1151[9])
eq10_1151 = sym.Eq(sym_diff_equations_1151[10], b_sym_1151[10])
sym_state_probs_1151 = sym.solve(
[
eq0_1151,
eq1_1151,
eq2_1151,
eq3_1151,
eq4_1151,
eq5_1151,
eq6_1151,
eq7_1151,
eq8_1151,
eq9_1151,
eq10_1151,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15),
)
sym_state_recursive_ratios_1151 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1151[0, 0] = 1
sym_state_recursive_ratios_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_right_1151 = sym_state_recursive_ratios_1151.copy()
sym_state_recursive_ratios_right_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_P0_1151 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1151[0, 0] = 1
sym_state_recursive_ratios_P0_1151[0, 1] = sym.factor(
sym_state_probs_1151[p01] / sym_state_probs_1151[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1151[1, 1] = sym.factor(
sym_state_probs_1151[p11] / sym_state_probs_1151[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1151[0, 2] = sym.factor(
sym_state_probs_1151[p02] / sym_state_probs_1151[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1151[1, 2] = sym.factor(
sym_state_probs_1151[p12] / sym_state_probs_1151[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1151[0, 3] = sym.factor(
sym_state_probs_1151[p03] / sym_state_probs_1151[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1151[1, 3] = sym.factor(
sym_state_probs_1151[p13] / sym_state_probs_1151[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1151[0, 4] = sym.factor(
sym_state_probs_1151[p04] / sym_state_probs_1151[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1151[1, 4] = sym.factor(
sym_state_probs_1151[p14] / sym_state_probs_1151[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1151[0, 5] = sym.factor(
sym_state_probs_1151[p05] / sym_state_probs_1151[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1151[1, 5] = sym.factor(
sym_state_probs_1151[p15] / sym_state_probs_1151[p00]
) # (0,0) -> (1,5)
return (
sym_state_probs_1151,
sym_state_recursive_ratios_1151,
sym_state_recursive_ratios_right_1151,
sym_state_recursive_ratios_P0_1151,
)
def get_symbolic_state_probabilities_1161():
num_of_servers = 1
threshold = 1
system_capacity = 6
buffer_capacity = 1
Q_sym_1161 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16 = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16"
)
pi_1161 = sym.Matrix(
[p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16]
)
dimension_1161 = Q_sym_1161.shape[0]
M_sym_1161 = sym.Matrix(
[Q_sym_1161.transpose()[:-1, :], sym.ones(1, dimension_1161)]
)
sym_diff_equations_1161 = M_sym_1161 @ pi_1161
b_sym_1161 = sym.Matrix([sym.zeros(dimension_1161 - 1, 1), [1]])
eq0_1161 = sym.Eq(sym_diff_equations_1161[0], b_sym_1161[0])
eq1_1161 = sym.Eq(sym_diff_equations_1161[1], b_sym_1161[1])
eq2_1161 = sym.Eq(sym_diff_equations_1161[2], b_sym_1161[2])
eq3_1161 = sym.Eq(sym_diff_equations_1161[3], b_sym_1161[3])
eq4_1161 = sym.Eq(sym_diff_equations_1161[4], b_sym_1161[4])
eq5_1161 = sym.Eq(sym_diff_equations_1161[5], b_sym_1161[5])
eq6_1161 = sym.Eq(sym_diff_equations_1161[6], b_sym_1161[6])
eq7_1161 = sym.Eq(sym_diff_equations_1161[7], b_sym_1161[7])
eq8_1161 = sym.Eq(sym_diff_equations_1161[8], b_sym_1161[8])
eq9_1161 = sym.Eq(sym_diff_equations_1161[9], b_sym_1161[9])
eq10_1161 = sym.Eq(sym_diff_equations_1161[10], b_sym_1161[10])
eq11_1161 = sym.Eq(sym_diff_equations_1161[11], b_sym_1161[11])
eq12_1161 = sym.Eq(sym_diff_equations_1161[12], b_sym_1161[12])
sym_state_probs_1161 = sym.solve(
[
eq0_1161,
eq1_1161,
eq2_1161,
eq3_1161,
eq4_1161,
eq5_1161,
eq6_1161,
eq7_1161,
eq8_1161,
eq9_1161,
eq10_1161,
eq11_1161,
eq12_1161,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16),
)
sym_state_recursive_ratios_1161 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1161[0, 0] = 1
sym_state_recursive_ratios_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_right_1161 = sym_state_recursive_ratios_1161.copy()
sym_state_recursive_ratios_right_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_P0_1161 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1161[0, 0] = 1
sym_state_recursive_ratios_P0_1161[0, 1] = sym.factor(
sym_state_probs_1161[p01] / sym_state_probs_1161[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1161[1, 1] = sym.factor(
sym_state_probs_1161[p11] / sym_state_probs_1161[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1161[0, 2] = sym.factor(
sym_state_probs_1161[p02] / sym_state_probs_1161[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1161[1, 2] = sym.factor(
sym_state_probs_1161[p12] / sym_state_probs_1161[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1161[0, 3] = sym.factor(
sym_state_probs_1161[p03] / sym_state_probs_1161[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1161[1, 3] = sym.factor(
sym_state_probs_1161[p13] / sym_state_probs_1161[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1161[0, 4] = sym.factor(
sym_state_probs_1161[p04] / sym_state_probs_1161[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1161[1, 4] = sym.factor(
sym_state_probs_1161[p14] / sym_state_probs_1161[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1161[0, 5] = sym.factor(
sym_state_probs_1161[p05] / sym_state_probs_1161[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1161[1, 5] = sym.factor(
sym_state_probs_1161[p15] / sym_state_probs_1161[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1161[0, 6] = sym.factor(
sym_state_probs_1161[p06] / sym_state_probs_1161[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1161[1, 6] = sym.factor(
sym_state_probs_1161[p16] / sym_state_probs_1161[p00]
) # (0,0) -> (1,6)
return (
sym_state_probs_1161,
sym_state_recursive_ratios_1161,
sym_state_recursive_ratios_right_1161,
sym_state_recursive_ratios_P0_1161,
)
def get_symbolic_state_probabilities_1171():
num_of_servers = 1
threshold = 1
system_capacity = 7
buffer_capacity = 1
Q_sym_1171 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17"
)
pi_1171 = sym.Matrix(
[p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17]
)
dimension_1171 = Q_sym_1171.shape[0]
M_sym_1171 = sym.Matrix(
[Q_sym_1171.transpose()[:-1, :], sym.ones(1, dimension_1171)]
)
sym_diff_equations_1171 = M_sym_1171 @ pi_1171
b_sym_1171 = sym.Matrix([sym.zeros(dimension_1171 - 1, 1), [1]])
eq0_1171 = sym.Eq(sym_diff_equations_1171[0], b_sym_1171[0])
eq1_1171 = sym.Eq(sym_diff_equations_1171[1], b_sym_1171[1])
eq2_1171 = sym.Eq(sym_diff_equations_1171[2], b_sym_1171[2])
eq3_1171 = sym.Eq(sym_diff_equations_1171[3], b_sym_1171[3])
eq4_1171 = sym.Eq(sym_diff_equations_1171[4], b_sym_1171[4])
eq5_1171 = sym.Eq(sym_diff_equations_1171[5], b_sym_1171[5])
eq6_1171 = sym.Eq(sym_diff_equations_1171[6], b_sym_1171[6])
eq7_1171 = sym.Eq(sym_diff_equations_1171[7], b_sym_1171[7])
eq8_1171 = sym.Eq(sym_diff_equations_1171[8], b_sym_1171[8])
eq9_1171 = sym.Eq(sym_diff_equations_1171[9], b_sym_1171[9])
eq10_1171 = sym.Eq(sym_diff_equations_1171[10], b_sym_1171[10])
eq11_1171 = sym.Eq(sym_diff_equations_1171[11], b_sym_1171[11])
eq12_1171 = sym.Eq(sym_diff_equations_1171[12], b_sym_1171[12])
eq13_1171 = sym.Eq(sym_diff_equations_1171[13], b_sym_1171[13])
eq14_1171 = sym.Eq(sym_diff_equations_1171[14], b_sym_1171[14])
sym_state_probs_1171 = sym.solve(
[
eq0_1171,
eq1_1171,
eq2_1171,
eq3_1171,
eq4_1171,
eq5_1171,
eq6_1171,
eq7_1171,
eq8_1171,
eq9_1171,
eq10_1171,
eq11_1171,
eq12_1171,
eq13_1171,
eq14_1171,
],
(p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17),
)
sym_state_recursive_ratios_1171 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1171[0, 0] = 1
sym_state_recursive_ratios_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_right_1171 = sym_state_recursive_ratios_1171.copy()
sym_state_recursive_ratios_right_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_P0_1171 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1171[0, 0] = 1
sym_state_recursive_ratios_P0_1171[0, 1] = sym.factor(
sym_state_probs_1171[p01] / sym_state_probs_1171[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1171[1, 1] = sym.factor(
sym_state_probs_1171[p11] / sym_state_probs_1171[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1171[0, 2] = sym.factor(
sym_state_probs_1171[p02] / sym_state_probs_1171[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1171[1, 2] = sym.factor(
sym_state_probs_1171[p12] / sym_state_probs_1171[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1171[0, 3] = sym.factor(
sym_state_probs_1171[p03] / sym_state_probs_1171[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1171[1, 3] = sym.factor(
sym_state_probs_1171[p13] / sym_state_probs_1171[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1171[0, 4] = sym.factor(
sym_state_probs_1171[p04] / sym_state_probs_1171[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1171[1, 4] = sym.factor(
sym_state_probs_1171[p14] / sym_state_probs_1171[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1171[0, 5] = sym.factor(
sym_state_probs_1171[p05] / sym_state_probs_1171[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1171[1, 5] = sym.factor(
sym_state_probs_1171[p15] / sym_state_probs_1171[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1171[0, 6] = sym.factor(
sym_state_probs_1171[p06] / sym_state_probs_1171[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1171[1, 6] = sym.factor(
sym_state_probs_1171[p16] / sym_state_probs_1171[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1171[0, 7] = sym.factor(
sym_state_probs_1171[p07] / sym_state_probs_1171[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1171[1, 7] = sym.factor(
sym_state_probs_1171[p17] / sym_state_probs_1171[p00]
) # (0,0) -> (1,7)
return (
sym_state_probs_1171,
sym_state_recursive_ratios_1171,
sym_state_recursive_ratios_right_1171,
sym_state_recursive_ratios_P0_1171,
)
def get_symbolic_state_probabilities_1181():
num_of_servers = 1
threshold = 1
system_capacity = 8
buffer_capacity = 1
Q_sym_1181 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18"
)
pi_1181 = sym.Matrix(
[
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
]
)
dimension_1181 = Q_sym_1181.shape[0]
M_sym_1181 = sym.Matrix(
[Q_sym_1181.transpose()[:-1, :], sym.ones(1, dimension_1181)]
)
sym_diff_equations_1181 = M_sym_1181 @ pi_1181
b_sym_1181 = sym.Matrix([sym.zeros(dimension_1181 - 1, 1), [1]])
eq0_1181 = sym.Eq(sym_diff_equations_1181[0], b_sym_1181[0])
eq1_1181 = sym.Eq(sym_diff_equations_1181[1], b_sym_1181[1])
eq2_1181 = sym.Eq(sym_diff_equations_1181[2], b_sym_1181[2])
eq3_1181 = sym.Eq(sym_diff_equations_1181[3], b_sym_1181[3])
eq4_1181 = sym.Eq(sym_diff_equations_1181[4], b_sym_1181[4])
eq5_1181 = sym.Eq(sym_diff_equations_1181[5], b_sym_1181[5])
eq6_1181 = sym.Eq(sym_diff_equations_1181[6], b_sym_1181[6])
eq7_1181 = sym.Eq(sym_diff_equations_1181[7], b_sym_1181[7])
eq8_1181 = sym.Eq(sym_diff_equations_1181[8], b_sym_1181[8])
eq9_1181 = sym.Eq(sym_diff_equations_1181[9], b_sym_1181[9])
eq10_1181 = sym.Eq(sym_diff_equations_1181[10], b_sym_1181[10])
eq11_1181 = sym.Eq(sym_diff_equations_1181[11], b_sym_1181[11])
eq12_1181 = sym.Eq(sym_diff_equations_1181[12], b_sym_1181[12])
eq13_1181 = sym.Eq(sym_diff_equations_1181[13], b_sym_1181[13])
eq14_1181 = sym.Eq(sym_diff_equations_1181[14], b_sym_1181[14])
eq15_1181 = sym.Eq(sym_diff_equations_1181[15], b_sym_1181[15])
eq16_1181 = sym.Eq(sym_diff_equations_1181[16], b_sym_1181[16])
sym_state_probs_1181 = sym.solve(
[
eq0_1181,
eq1_1181,
eq2_1181,
eq3_1181,
eq4_1181,
eq5_1181,
eq6_1181,
eq7_1181,
eq8_1181,
eq9_1181,
eq10_1181,
eq11_1181,
eq12_1181,
eq13_1181,
eq14_1181,
eq15_1181,
eq16_1181,
],
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
),
)
sym_state_recursive_ratios_1181 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1181[0, 0] = 1
sym_state_recursive_ratios_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p07]
) # (0,7) -> (0,8)
sym_state_recursive_ratios_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p08]
) # (0,8) -> (1,8)
sym_state_recursive_ratios_right_1181 = sym_state_recursive_ratios_1181.copy()
sym_state_recursive_ratios_right_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_right_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p17]
) # (1,7) -> (1,8)
sym_state_recursive_ratios_P0_1181 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1181[0, 0] = 1
sym_state_recursive_ratios_P0_1181[0, 1] = sym.factor(
sym_state_probs_1181[p01] / sym_state_probs_1181[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1181[1, 1] = sym.factor(
sym_state_probs_1181[p11] / sym_state_probs_1181[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1181[0, 2] = sym.factor(
sym_state_probs_1181[p02] / sym_state_probs_1181[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1181[1, 2] = sym.factor(
sym_state_probs_1181[p12] / sym_state_probs_1181[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1181[0, 3] = sym.factor(
sym_state_probs_1181[p03] / sym_state_probs_1181[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1181[1, 3] = sym.factor(
sym_state_probs_1181[p13] / sym_state_probs_1181[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1181[0, 4] = sym.factor(
sym_state_probs_1181[p04] / sym_state_probs_1181[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1181[1, 4] = sym.factor(
sym_state_probs_1181[p14] / sym_state_probs_1181[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1181[0, 5] = sym.factor(
sym_state_probs_1181[p05] / sym_state_probs_1181[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1181[1, 5] = sym.factor(
sym_state_probs_1181[p15] / sym_state_probs_1181[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1181[0, 6] = sym.factor(
sym_state_probs_1181[p06] / sym_state_probs_1181[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1181[1, 6] = sym.factor(
sym_state_probs_1181[p16] / sym_state_probs_1181[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1181[0, 7] = sym.factor(
sym_state_probs_1181[p07] / sym_state_probs_1181[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1181[1, 7] = sym.factor(
sym_state_probs_1181[p17] / sym_state_probs_1181[p00]
) # (0,0) -> (1,7)
sym_state_recursive_ratios_P0_1181[0, 8] = sym.factor(
sym_state_probs_1181[p08] / sym_state_probs_1181[p00]
) # (0,0) -> (0,8)
sym_state_recursive_ratios_P0_1181[1, 8] = sym.factor(
sym_state_probs_1181[p18] / sym_state_probs_1181[p00]
) # (0,0) -> (1,8)
return (
sym_state_probs_1181,
sym_state_recursive_ratios_1181,
sym_state_recursive_ratios_right_1181,
sym_state_recursive_ratios_P0_1181,
)
def get_symbolic_state_probabilities_1191():
num_of_servers = 1
threshold = 1
system_capacity = 9
buffer_capacity = 1
Q_sym_1191 = abg.markov.get_symbolic_transition_matrix(
num_of_servers, threshold, system_capacity, buffer_capacity
)
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
) = sym.symbols(
"p00, p01, p11, p02, p12, p03, p13, p04, p14, p05, p15, p06, p16, p07, p17, p08, p18, p09, p19"
)
pi_1191 = sym.Matrix(
[
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
]
)
dimension_1191 = Q_sym_1191.shape[0]
M_sym_1191 = sym.Matrix(
[Q_sym_1191.transpose()[:-1, :], sym.ones(1, dimension_1191)]
)
sym_diff_equations_1191 = M_sym_1191 @ pi_1191
b_sym_1191 = sym.Matrix([sym.zeros(dimension_1191 - 1, 1), [1]])
eq0_1191 = sym.Eq(sym_diff_equations_1191[0], b_sym_1191[0])
eq1_1191 = sym.Eq(sym_diff_equations_1191[1], b_sym_1191[1])
eq2_1191 = sym.Eq(sym_diff_equations_1191[2], b_sym_1191[2])
eq3_1191 = sym.Eq(sym_diff_equations_1191[3], b_sym_1191[3])
eq4_1191 = sym.Eq(sym_diff_equations_1191[4], b_sym_1191[4])
eq5_1191 = sym.Eq(sym_diff_equations_1191[5], b_sym_1191[5])
eq6_1191 = sym.Eq(sym_diff_equations_1191[6], b_sym_1191[6])
eq7_1191 = sym.Eq(sym_diff_equations_1191[7], b_sym_1191[7])
eq8_1191 = sym.Eq(sym_diff_equations_1191[8], b_sym_1191[8])
eq9_1191 = sym.Eq(sym_diff_equations_1191[9], b_sym_1191[9])
eq10_1191 = sym.Eq(sym_diff_equations_1191[10], b_sym_1191[10])
eq11_1191 = sym.Eq(sym_diff_equations_1191[11], b_sym_1191[11])
eq12_1191 = sym.Eq(sym_diff_equations_1191[12], b_sym_1191[12])
eq13_1191 = sym.Eq(sym_diff_equations_1191[13], b_sym_1191[13])
eq14_1191 = sym.Eq(sym_diff_equations_1191[14], b_sym_1191[14])
eq15_1191 = sym.Eq(sym_diff_equations_1191[15], b_sym_1191[15])
eq16_1191 = sym.Eq(sym_diff_equations_1191[16], b_sym_1191[16])
eq17_1191 = sym.Eq(sym_diff_equations_1191[17], b_sym_1191[17])
eq18_1191 = sym.Eq(sym_diff_equations_1191[18], b_sym_1191[18])
sym_state_probs_1191 = sym.solve(
[
eq0_1191,
eq1_1191,
eq2_1191,
eq3_1191,
eq4_1191,
eq5_1191,
eq6_1191,
eq7_1191,
eq8_1191,
eq9_1191,
eq10_1191,
eq11_1191,
eq12_1191,
eq13_1191,
eq14_1191,
eq15_1191,
eq16_1191,
eq17_1191,
eq18_1191,
],
(
p00,
p01,
p11,
p02,
p12,
p03,
p13,
p04,
p14,
p05,
p15,
p06,
p16,
p07,
p17,
p08,
p18,
p09,
p19,
),
)
sym_state_recursive_ratios_1191 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_1191[0, 0] = 1
sym_state_recursive_ratios_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p01]
) # (0,1) -> (1,1)
sym_state_recursive_ratios_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p01]
) # (0,1) -> (0,2)
sym_state_recursive_ratios_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p02]
) # (0,2) -> (1,2)
sym_state_recursive_ratios_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p02]
) # (0,2) -> (0,3)
sym_state_recursive_ratios_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p03]
) # (0,3) -> (1,3)
sym_state_recursive_ratios_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p03]
) # (0,3) -> (0,4)
sym_state_recursive_ratios_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p04]
) # (0,4) -> (1,4)
sym_state_recursive_ratios_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p04]
) # (0,4) -> (0,5)
sym_state_recursive_ratios_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p05]
) # (0,5) -> (1,5)
sym_state_recursive_ratios_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p05]
) # (0,5) -> (0,6)
sym_state_recursive_ratios_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p06]
) # (0,6) -> (1,6)
sym_state_recursive_ratios_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p06]
) # (0,6) -> (0,7)
sym_state_recursive_ratios_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p07]
) # (0,7) -> (1,7)
sym_state_recursive_ratios_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p07]
) # (0,7) -> (0,8)
sym_state_recursive_ratios_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p08]
) # (0,8) -> (1,8)
sym_state_recursive_ratios_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p08]
) # (0,8) -> (0,9)
sym_state_recursive_ratios_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p09]
) # (0,9) -> (1,9)
sym_state_recursive_ratios_right_1191 = sym_state_recursive_ratios_1191.copy()
sym_state_recursive_ratios_right_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p11]
) # (1,1) -> (1,2)
sym_state_recursive_ratios_right_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p12]
) # (1,2) -> (1,3)
sym_state_recursive_ratios_right_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p13]
) # (1,3) -> (1,4)
sym_state_recursive_ratios_right_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p14]
) # (1,4) -> (1,5)
sym_state_recursive_ratios_right_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p15]
) # (1,5) -> (1,6)
sym_state_recursive_ratios_right_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p16]
) # (1,6) -> (1,7)
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17]
) # (1,7) -> (1,8)
sym_state_recursive_ratios_right_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p17]
) # (1,8) -> (1,9)
sym_state_recursive_ratios_P0_1191 = sym.zeros(
buffer_capacity + 1, system_capacity + 1
)
sym_state_recursive_ratios_P0_1191[0, 0] = 1
sym_state_recursive_ratios_P0_1191[0, 1] = sym.factor(
sym_state_probs_1191[p01] / sym_state_probs_1191[p00]
) # (0,0) -> (0,1)
sym_state_recursive_ratios_P0_1191[1, 1] = sym.factor(
sym_state_probs_1191[p11] / sym_state_probs_1191[p00]
) # (0,0) -> (1,1)
sym_state_recursive_ratios_P0_1191[0, 2] = sym.factor(
sym_state_probs_1191[p02] / sym_state_probs_1191[p00]
) # (0,0) -> (0,2)
sym_state_recursive_ratios_P0_1191[1, 2] = sym.factor(
sym_state_probs_1191[p12] / sym_state_probs_1191[p00]
) # (0,0) -> (1,2)
sym_state_recursive_ratios_P0_1191[0, 3] = sym.factor(
sym_state_probs_1191[p03] / sym_state_probs_1191[p00]
) # (0,0) -> (0,3)
sym_state_recursive_ratios_P0_1191[1, 3] = sym.factor(
sym_state_probs_1191[p13] / sym_state_probs_1191[p00]
) # (0,0) -> (1,3)
sym_state_recursive_ratios_P0_1191[0, 4] = sym.factor(
sym_state_probs_1191[p04] / sym_state_probs_1191[p00]
) # (0,0) -> (0,4)
sym_state_recursive_ratios_P0_1191[1, 4] = sym.factor(
sym_state_probs_1191[p14] / sym_state_probs_1191[p00]
) # (0,0) -> (1,4)
sym_state_recursive_ratios_P0_1191[0, 5] = sym.factor(
sym_state_probs_1191[p05] / sym_state_probs_1191[p00]
) # (0,0) -> (0,5)
sym_state_recursive_ratios_P0_1191[1, 5] = sym.factor(
sym_state_probs_1191[p15] / sym_state_probs_1191[p00]
) # (0,0) -> (1,5)
sym_state_recursive_ratios_P0_1191[0, 6] = sym.factor(
sym_state_probs_1191[p06] / sym_state_probs_1191[p00]
) # (0,0) -> (0,6)
sym_state_recursive_ratios_P0_1191[1, 6] = sym.factor(
sym_state_probs_1191[p16] / sym_state_probs_1191[p00]
) # (0,0) -> (1,6)
sym_state_recursive_ratios_P0_1191[0, 7] = sym.factor(
sym_state_probs_1191[p07] / sym_state_probs_1191[p00]
) # (0,0) -> (0,7)
sym_state_recursive_ratios_P0_1191[1, 7] = sym.factor(
sym_state_probs_1191[p17] / sym_state_probs_1191[p00]
) # (0,0) -> (1,7)
sym_state_recursive_ratios_P0_1191[0, 8] = sym.factor(
sym_state_probs_1191[p08] / sym_state_probs_1191[p00]
) # (0,0) -> (0,8)
sym_state_recursive_ratios_P0_1191[1, 8] = sym.factor(
sym_state_probs_1191[p18] / sym_state_probs_1191[p00]
) # (0,0) -> (1,8)
sym_state_recursive_ratios_P0_1191[0, 9] = sym.factor(
sym_state_probs_1191[p09] / sym_state_probs_1191[p00]
) # (0,0) -> (0,9)
sym_state_recursive_ratios_P0_1191[1, 9] = sym.factor(
sym_state_probs_1191[p19] / sym_state_probs_1191[p00]
) # (0,0) -> (1,9)
return (
sym_state_probs_1191,
sym_state_recursive_ratios_1191,
sym_state_recursive_ratios_right_1191,
sym_state_recursive_ratios_P0_1191,
)
|
6,991 | b58cc08f8f10220373fa78f5d7249bc883b447bf | from mathgraph3D.core.plot import *
from mathgraph3D.core.functions import *
|
6,992 | 43eb221758ebcf1f01851fc6cda67b72f32a73c7 | #!/usr/bin/python
if __name__ == '__main__':
import sys
import os
sys.path.insert(0, os.path.abspath('config'))
import configure
configure_options = [
'CC=icc',
'CXX=icpc',
'FC=ifort',
'--with-blas-lapack-dir=/soft/com/packages/intel/13/update5/mkl/',
'--with-mkl_pardiso-dir=/soft/com/packages/intel/13/update5/mkl/',
'--download-mpich=1',
]
configure.petsc_configure(configure_options)
|
6,993 | 11259c92b005a66e5f3c9592875f478df199c813 | # Name: Calvin Liew
# Date: 2021-01-29
# Purpose: Video game final project, Tic-Tac-Toe 15 by Calvin Liew.
import random
# Function that reminds the users of the game rules and other instructions.
def intro():
print("""\n####### ####### ####### # #######
# # #### # ## #### # #### ###### ## #
# # # # # # # # # # # # # # # #
# # # ##### # # # # ##### # # # ##### # ######
# # # # ###### # # # # # # #
# # # # # # # # # # # # # # # #
# # #### # # # #### # #### ###### ##### #####
How to play Tic-Tac-Toe 15:
To win, you must get three numbers in a row/column/diagonal that add up to the sum of 15! The first player enters odd numbers and the second player enters even numbers.
Board Instructions: Tell the program the position of which you would like to enter by entering the number position of
the boxes as shown below. Players can can only enter from numbers from 1-9.
| |
1 | 2 | 3
_____|_____|_____
| |
4 | 5 | 6
_____|_____|_____
| |
7 | 8 | 9
| |
""")
# Function that prints the tic-tac-toe board.
def print_board(board):
print("\n\t | |")
print("\t {} | {} | {}".format(board[0], board[1], board[2]))
print('\t_____|_____|_____')
print("\t | |")
print("\t {} | {} | {}".format(board[3], board[4], board[5]))
print('\t_____|_____|_____')
print("\t | |")
print("\t {} | {} | {}".format(board[6], board[7], board[8]))
print("\t | |")
# Function that chooses who goes first and assigns the player order.
def choose_who_first(player1, player2, player_order):
flip = random.randint(1, 2)
if flip == 1:
print("\n" + player1, "goes first.", player1, "can only play odd numbers and", player2,
"can only play even numbers from 1-9. ")
print()
player_order.append(player1)
player_order.append(player2)
return player1
elif flip == 2:
print("\n" + player2, "goes first.", player2, "can only play odd numbers and", name1,
"can only play even numbers from 1-9. ")
print()
player_order.append(player2)
player_order.append(player1)
return player2
# Function that calls the print_board() function as well as makes the moves that the players provide while checking if the moves are legal or not.
def make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order):
odd_moves = [1, 3, 5, 7, 9]
even_moves = [2, 4, 6, 8]
try:
if turn == player1:
print("\nIts your turn", player1 + ": ")
print()
p1_move_input = int(input("Move to which space? (1-9): "))
if player_order[0] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1] == 0:
print()
p1_num_input = int(input("Enter an ODD NUMBER from 1-9: "))
if p1_num_input in odd_moves and p1_num_input not in unavailable_moves_p1:
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print("\nINVALID INPUT, Please try again and enter a number that you haven't used. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an ODD number. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print("\nINVALID INPUT, Please try again and enter a number between 1-9. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an unoccupied spot. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif player_order[1] == player1:
if 1 <= p1_move_input <= 9 and the_board[p1_move_input - 1] == 0:
print()
p1_num_input = int(input("Enter a EVEN NUMBER from 1-9: "))
if p1_num_input in even_moves and p1_num_input not in unavailable_moves_p1:
the_board[p1_move_input - 1] = p1_num_input
unavailable_moves_p1.append(p1_num_input)
elif p1_num_input in unavailable_moves_p1:
print("\nINVALID INPUT, Please try again and enter a number that you haven't used. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter a EVEN number. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif p1_move_input < 1 or p1_move_input > 9:
print("\nINVALID INPUT, Please try again and enter a number between 1-9. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an unoccupied spot. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
if turn == player2:
print("\nIts your turn", player2 + ": ")
print()
p2_move_input = int(input("Move to which space? (1-9): "))
if player_order[0] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1] == 0:
print()
p2_num_input = int(input("Enter an ODD NUMBER from 1-9: "))
if p2_num_input in odd_moves and p2_num_input not in unavailable_moves_p2:
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print("\nINVALID INPUT, Please try again and enter a number that you haven't used. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an ODD number. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print("\nINVALID INPUT, Please try again and enter a number between 1-9. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter an unoccupied spot. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif player_order[1] == player2:
if 1 <= p2_move_input <= 9 and the_board[p2_move_input - 1] == 0:
print()
p2_num_input = int(input("Enter a EVEN NUMBER from 1-9: "))
if p2_num_input in even_moves and p2_num_input not in unavailable_moves_p2:
the_board[p2_move_input - 1] = p2_num_input
unavailable_moves_p2.append(p2_num_input)
elif p2_num_input in unavailable_moves_p2:
print("\nINVALID INPUT, Please try again and enter a number that you haven't used. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID INPUT, Please try again and enter a EVEN number. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
elif p2_move_input < 1 or p2_move_input > 9:
print("\nINVALID INPUT, Please try again and enter a number between 1-9. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
else:
print("\nINVALID, Please try again and enter an unoccupied spot. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
except ValueError:
print("\nINVALID INPUT, Please try again and enter only in integers. ")
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
# Function that checks if any three numbers in a row/column/diagonal add up to 15. If there is, the function returns is_game_over and the game ends.
def check_game(board, winner):
is_game_over = ""
if board[0] + board[1] + board[2] == 15 and board[0] != 0 and board[1] != 0 and board[2] != 0:
print_board(board)
print("\n"+str(board[0])+",", str(board[1])+",", "and", str(board[2]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[3] + board[4] + board[5] == 15 and board[3] != 0 and board[4] != 0 and board[5] != 0:
print_board(board)
print("\n"+str(board[3])+",", str(board[4])+",", "and", str(board[5]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[6] + board[7] + board[8] == 15 and board[6] != 0 and board[7] != 0 and board[8] != 0:
print_board(board)
print("\n"+str(board[6])+",", str(board[7])+",", "and", str(board[8]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[0] + board[3] + board[6] == 15 and board[0] != 0 and board[3] != 0 and board[6] != 0:
print_board(board)
print("\n"+str(board[0])+",", str(board[3])+",", "and", str(board[6]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[1] + board[4] + board[7] == 15 and board[1] != 0 and board[4] != 0 and board[7] != 0:
print_board(board)
print("\n"+str(board[1])+",", str(board[4])+",", "and", str(board[7]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[2] + board[5] + board[8] == 15 and board[2] != 0 and board[5] != 0 and board[8] != 0:
print_board(board)
print("\n"+str(board[2])+",", str(board[5])+",", "and", str(board[8]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[6] + board[4] + board[2] == 15 and board[6] != 0 and board[4] != 0 and board[2] != 0:
print_board(board)
print("\n"+str(board[6])+",", str(board[4])+",", "and", str(board[2]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
elif board[0] + board[4] + board[8] == 15 and board[0] != 0 and board[4] != 0 and board[8] != 0:
print_board(board)
print("\n"+str(board[0])+",", str(board[4])+",", "and", str(board[8]), "add up to 15! ")
print("\n"+winner, "wins! ")
is_game_over = True
return is_game_over
# Function that prints the scoreboard and the scores of the two players. Prints after a round has ended.
def score(score1, score2, player1, player2):
print("\n\t------------------")
print("\t SCOREBOARD")
print("\t------------------")
print("\t" + " " + player1 + ":", score1)
print("\t" + " " + player2 + ":", score2)
print("\t------------------")
print()
# Function that is where most of the game takes place. Function calls other functions such as make_move_and_update, choose_who_first, score and other code that make up the game.
# Function keeps track of the player order, the board, unavailable moves, amount of rounds and other variables. The game ends in a draw when count reaches 9. At the end of the round, it asks the users if they want to play again.
def play_game(score1, score2, player1, player2):
unavailable_moves_p1 = []
unavailable_moves_p2 = []
player_order = []
the_board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
count = 0
restart = ""
turn = choose_who_first(player1, player2, player_order)
input("Enter anything to start the round: ")
for i in range(10):
print_board(the_board)
make_move_and_update(the_board, turn, player1, player2, unavailable_moves_p1, unavailable_moves_p2, player_order)
count += 1
if check_game(the_board, turn):
if turn == player1:
score1 += 1
elif turn == player2:
score2 += 1
break
if count == 9:
print("No numbers added up to 15, it's a DRAW! ")
break
if turn == player1:
turn = player2
else:
turn = player1
input("\nEnter anything to continue: ")
score(score1, score2, player1, player2)
# Asks if the players want to restart. If yes, it calls the play_game function. If no, it ends the game and congratulates the overall winner.
while restart != "yes" or restart != "y" or restart != "n" or restart != "no":
restart = input("Do want to play Again? (y/n) ").lower()
if restart == "y" or restart == "yes":
print("\nLoading new round...")
play_game(score1, score2, player1, player2)
elif restart == "n" or restart == "no":
if score1 > score2:
print("\n"+player1, "is the overall winner! Congratulations!")
elif score2 > score1:
print("\n"+player2, "is the overall winner! Congratulations!")
elif score1 == score2:
print("\nBoth players have one the same amount of rounds. It's a draw! ")
print("\nThanks for playing! ")
break
else:
print("\nPlease enter YES or NO ")
print()
# This code manages the important things before the actual game starts such as the instructions, usernames, etc. Calls the play_game function.
if __name__ == "__main__":
intro()
input("Enter anything to continue: ")
print("\nEnter usernames: ")
name1 = input("\nPlayer 1, Enter your name: ").title()
name2 = input("\nPlayer 2, Enter your name: ").title()
p1_score = 0
p2_score = 0
play_game(p1_score, p2_score, name1, name2)
|
6,994 | 007caece16f641947043faa94b8a074efe8ebadb | #!/usr/bin/env python
import rospy
from std_msgs.msg import *
__print__ = ''
def Print(msg):
global __print__
target = int(msg.data.split(';')[0]) * 2
if target == 0:
__print__ = msg.data.split(';')[-1] + '\n' + '\n'.join(__print__.split('\n')[1:])
else:
__print__ = '\n'.join(__print__.split('\n')[0:target]) + '\n' + msg.data.split(';')[-1] + '\n' + '\n'.join(__print__.split('\n')[target+1:])
print __print__
if __name__ == '__main__':
rospy.init_node('negomo_print')
num = rospy.get_param('~max_targets')
rospy.Subscriber('/negomo/observation_sequence', String, Print)
__print__ = 'null\n\n' * num
rospy.spin()
|
6,995 | 227b71cb6d4cde8f498ad19c1c5f95f7fc572752 | from collections import defaultdict, Counter
import numpy as np
import sys
import re
def parseFile(file, frequency_tree):
readnumber = re.compile('[r]+\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
num_a = 0
num_c = 0
num_g = 0
num_t = 0
print("############# OPENING SAM FILE", file=sys.stderr)
with open(file, 'rt') as fp:
line = fp.readline()
while line:
subline = line_spliter.split(line)
line = fp.readline()
if (int(subline[1]) & 4 == 4):
unmatched_reads += 1
elif (int(subline[1]) & 16 == 16):
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
bases_count = Counter(read)
num_a += bases_count["A"]
num_c += bases_count["C"]
num_g += bases_count["G"]
num_t += bases_count["T"]
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if (forward_reads + reverse_reads + unmatched_reads) != 0:
read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
#TODO there is for sure a better way to do this than with a break
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = gene_annotation_match / gene_annotation_total
print("gene_annotation_percent = " + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] - position_list[i])
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference, num_a/ read_lengths_total, num_c/ read_lengths_total, num_g / read_lengths_total, num_t / read_lengths_total]
def parseString(txt, frequency_tree):
spliter = re.compile('\n+')
readnumber = re.compile('[r]+\d+')
line_spliter = re.compile('\t+')
colon_spliter = re.compile(':')
forward_reads = 0
reverse_reads = 0
unmatched_reads = 0
read_positions = defaultdict(list)
position_differences = []
position_differences_stdv_list = []
total_position_diffs = []
read_lengths_count = 0
read_lengths_total = 0
read_frequency = 0
read_lengths_average = 0
num_chromosomes = 0
lines = spliter.split(txt)
#Itterating though everyline
for i in range(len(lines) - 1):
subline = line_spliter.split(lines[i])
if (int(subline[1]) & 4 == 4):
unmatched_reads += 1
elif (int(subline[1]) & 16 == 16):
reverse_reads += 1
else:
forward_reads += 1
read = subline[9]
read_lengths_count += 1
read_lengths_total += len(read)
chromosome = getChromosome(subline[2])
if chromosome != -1:
read_positions[chromosome].append(int(subline[3]))
if read_lengths_count != 0:
read_lengths_average = read_lengths_total / read_lengths_count
if (forward_reads + reverse_reads + unmatched_reads) != 0:
read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads)
gene_annotation_match = 0
gene_annotation_total = 0
gene_annotation_percent = 0
for key in read_positions.keys():
for position in read_positions[key]:
#TODO there is for sure a better way to do this than with a break
for _ in frequency_tree[key].find_overlap(position, position):
gene_annotation_match += 1
break
gene_annotation_total += 1
if gene_annotation_total != 0:
gene_annotation_percent = gene_annotation_match / gene_annotation_total
print("gene_annotation_percent = " + str(gene_annotation_percent))
for _, position_list in read_positions.items():
position_list.sort()
num_chromosomes += 1
for i in range(len(position_list) - 1):
position_differences.append(position_list[i + 1] - position_list[i])
try:
std_of_pos_diff = np.std(position_differences)
mean_of_pos_diffs = np.nanmean(position_differences)
max_position_difference = np.amax(position_differences)
min_position_difference = np.amin(position_differences)
except:
return None
return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference]
def getChromosome(str):
if str == "*" or str[3:] == 'X':
return -1
try:
return int(str[3:])
except:
return -1
|
6,996 | ea0a59953f2571f36e65f8f958774074b39a9ae5 | '''
'''
import numpy as np
from scipy.spatial import distance
def synonym_filter(WordVectors_npArray, WordLabels_npArray):
'''
'''
pass
def synonym_alternatives_range(WordVectors_npArray,
AlternativesVectorOne_npArray,
AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray,
AlternativesVectorFour_npArray):
'''
'''
synonym_alternatives_range = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorOne_npArray[word_int,:])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorTwo_npArray[word_int,:])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorThree_npArray[word_int,:])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorFour_npArray[word_int,:])
print(DistToAltFour)
synonym_alternatives_range[word_int] = (max(DistToAltOne, \
DistToAltTwo, DistToAltThree, DistToAltFour) - min(DistToAltOne, \
DistToAltTwo, DistToAltThree, DistToAltFour))
return synonym_alternatives_range
def synonym_alternatives_average(WordVectors_npArray,
AlternativesVectorOne_npArray,
AlternativesVectorTwo_npArray,
AlternativesVectorThree_npArray,
AlternativesVectorFour_npArray):
'''
'''
synonym_alternatives_average = np.zeros(len(WordVectors_npArray))
for word_int in range(len(WordVectors_npArray)):
DistToAltOne = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorOne_npArray[word_int,:])
print(DistToAltOne)
DistToAltTwo = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorTwo_npArray[word_int,:])
print(DistToAltTwo)
DistToAltThree = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorThree_npArray[word_int,:])
print(DistToAltThree)
DistToAltFour = distance.cosine(WordVectors_npArray[word_int,:], \
AlternativesVectorFour_npArray[word_int,:])
print(DistToAltFour)
synonym_alternatives_average[word_int] = (DistToAltOne +\
DistToAltTwo + DistToAltThree + DistToAltFour)/4
return synonym_alternatives_average
def nth_neighbor_filter():
''' Maybe we won't have this.
'''
pass
|
6,997 | 50b2b9d1edc8eaa44050e2b3b2375e966f16e10c | '''
-Medium-
*BFS*
You are given a 0-indexed integer array nums containing distinct numbers, an integer start, and an integer goal. There is an integer x that is initially set to start, and you want to perform operations on x such that it is converted to goal. You can perform the following operation repeatedly on the number x:
If 0 <= x <= 1000, then for any index i in the array (0 <= i < nums.length), you can set x to any of the following:
x + nums[i]
x - nums[i]
x ^ nums[i] (bitwise-XOR)
Note that you can use each nums[i] any number of times in any order. Operations that set x to be out of the range 0 <= x <= 1000 are valid, but no more operations can be done afterward.
Return the minimum number of operations needed to convert x = start into goal, and -1 if it is not possible.
Example 1:
Input: nums = [2,4,12], start = 2, goal = 12
Output: 2
Explanation: We can go from 2 → 14 → 12 with the following 2 operations.
- 2 + 12 = 14
- 14 - 2 = 12
Example 2:
Input: nums = [3,5,7], start = 0, goal = -4
Output: 2
Explanation: We can go from 0 → 3 → -4 with the following 2 operations.
- 0 + 3 = 3
- 3 - 7 = -4
Note that the last operation sets x out of the range 0 <= x <= 1000, which is valid.
Example 3:
Input: nums = [2,8,16], start = 0, goal = 1
Output: -1
Explanation: There is no way to convert 0 into 1.
Constraints:
1 <= nums.length <= 1000
-109 <= nums[i], goal <= 109
0 <= start <= 1000
start != goal
All the integers in nums are distinct.
'''
from typing import List
from collections import deque
class Solution:
def minimumOperations(self, nums: List[int], start: int, goal: int) -> int:
que = deque([(start,0)])
visited = set()
while que:
x, steps = que.popleft()
for i in nums:
for t in [x+i, x-i, x^i]:
if t == goal: return steps + 1
if 0 <= t <= 1000 and t not in visited:
visited.add(t)
que.append((t, steps+1))
return -1
if __name__ == "__main__":
print(Solution().minimumOperations(nums = [2,4,12], start = 2, goal = 12))
print(Solution().minimumOperations(nums = [3,5,7], start = 0, goal = -4))
print(Solution().minimumOperations(nums = [2,8,16], start = 0, goal = 1))
nums = [-574083075,-393928592,-508025046,942818778,355796909,515245901,40297943,106087952,112856312,-516143616,363801856,431681353,726373078,947630603,357311001,594181298,-797268217,-741740009,310972287,588107527,-535699426,56324906,-77958073,739798122,-839472160,439902753,-599749231,-378067373,-466272504,-668036170,404827976,805486978,-762507067,726001618,-761047930,574054980,365793614,112020312,612806855,-256862366,174046424,646109365,263765015,952305939,864217737,-236873371,-991807014,365730786,-908194963,-778205177,-949314048,-636570500,-883257881,316313456,-846577965,132287864,-143230736,425542510,-99852882,-845180792,-329895545,402782707,-52191127,-470380017,-788836785,-655887976,-899430590,481923982,45348738,-595401481,-470990760,-417390352,-570278840,-873871723,-905595403,276201114,-733014032,126018863,452235438,-512574658,-172220362,845468743,-743189114,597319839,-584451932,410604481,-508885990,-670396751,-765996786,345814977,-920014372,-826696704,640912714,119494504,745808962,-503060001,-677959595,-831428592,282855843,150678167,-467803553,-503929808,636431692,-235369757,-964826080,93942566,-65314422,-385277528,-379647659,601981747,-724269861,-516713072,-487487495,655771565,406499531,-943540581,-290169291,438686645,-227355533,-822612523,218329747,-800810927,-944724740,-978181517,274815523,296317841,56043572,-712672386,-374759873,86973233,-246165119,73819230,-801140338,414767806,883318746,-822063159,-705772942,-674915800,710520717,-97115365,599549847,115344568,53002314,242487774,-665998906,-986068895,-844909606,-515222297,-500827406,317865850,-50395059,522417393,51184184,241544846,-996297136,-227251827,924359619,822815774,149467545,523511343,252991991,450254984,-393459583,617410075,197030479,-234418418,-256650708,872334551,779068346,216294504,-708680875,-171498970,-970211466,-176493993,729939373,-658054782,-342680218,75508900,-377139149,392008859,121412250,-163586626,-468148273,624248706,50004864,-862378428,-849927586,33598413,-157654824,-229712613,149116317,183820138,378717707,-995563605,777654910,511275580,-157964872,-718605034,-764316227,-225837302,-166208500,-587688677,78982205,-488693575,667205793,419165994,731543316,97551954,-387317666,-580873271,533504431,-31624036,-356035140,-849089082,-767376392,-625237600,940717947,-337709497,915255567,727274007,-879463448,-363148174,-854892492,110472344,-466194659,-146843198,-454944217,-365338018,-349424052,994474446,-554968068,-883734951,-697723265,583756420,-5696410,-413731452,-278706136,-399245668,83345207,-227231270,618384545,846514423,-556667092,590460194,-686116067,-509669269,-510065093,77094171,270317951,166095128,-918526061,-766370855,-20861321,478791777,663673443,-152055285,224745414,123998803,66824877,-85117337,212126175,-718523523,615359230,-212148589,620733736,-81197397,51814471,709312024,562145805,-770811828,321230393,-611636320,-421337549,-804527290,-416739656,-886764000,170695026,414273830,-449987380,-56782953,772039002,-961265403,-896009751,-524231358,497253209,-507048459,-308522246,-508249054,-53240581,-241704483,-974133571,232897679,-152365934,-861310248,-305766289,340680726,844612779,-180227470,40798478,729446447,395975250,-142447074,-606021375,47555730,294446347,452346091,-409427076,-845574381,-838995437,45787728,714700474,-315824001,694717388,502723269,119244099,-538412679,-207297135,-189078560,-812610469,-350061253,-73975237,-119323509,791863263,741180208,740488891,-475394166,-191585617,-441527154,767292531,201222965,-150196525,588513813,245328283,396662663,100705864,126789247,487161165,-460512081,-469521559,-998848254,-917609155,314537168,418002454,-926920818,-628671538,179971032,-105401559,449618919,823404672,178494651,-773108884,10686795,-506642993,-60172121,-510142552,651623281,-163851428,158562600,-782456228,-336697076,-571952851,849878818,-456510759,-65997243,-506043404,-558981572,186946604,124948039,954065944,707437320,-224056616,-319237038,512138196,742466011,-49725596,-784781640,-753413026,-331602365,-246166733,-658650959,-4888181,-547553549,786689548,-866846384,-212028209,-98029403,-325422497,-409855095,320083382,-491251215,-471713326,890922019,-766590943,-481641953,-227197451,-709166930,-965945544,407688175,-78385698,-372800469,389036825,79885300,-858488452,-390177477,233839191,-518116358,420408256,872470025,241770824,-106901417,-328631191,548580365,-88408815,-647601013,658880218,-870455388,277154380,370022702,-381519264,-800726224,183685380,208169777,925905330,732494840,251754641,-681988029,593628349,153852085,353590607,242118102,-788094641,-242801844,474214244,579450364,580046580,-269927114,249739292,295331955,-544556236,-814569172,808895922,707421114,305101587,621173158,-248896453,988552702,-375313331,-87289858,-796466539,-529411285,-197315984,33984203,-122839651,-90735568,277265491,762059774,-628018119,-406508643,-856856769,364613737,59319066,614382155,-614620718,-133957131,-394985422,-29943491,154443077,-72727846,392096990,562681453,364248049,-156700958,717335155,-343408748,77301840,-155372684,-432114609,414752267,-485732822,876096548,842614035,-614245110,-872219121,291509502,334817026,214330487,405297459,-449582485,789314834,936409758,452350380,-146649749,898255045,116506422,671728835,280507922,-189039799,-565803074,-439924663,-14345985,-98428526,57303809,424685389,-84977856,-9251973,998935249,229402894,-405424548,448394272,182149207,-728030940,347577568,567511928,-27655302,400866779,-509269521,-580602375,405956020,-855173313,258091129,909162200,-315251598,-236890006,-531780379,342955474,-65890269,-111521851,-139906773,34939329,927781348,300458386,-603518159,341287362,-234266006,634183737,454833275,79631354,-954691672,102295826,688738167,-958428411,-293858940,480440548,590037773,-365477625,-425165732,170388756,164258145,-507355122,44132561,982798160,-101120201,-920959602,-239250887,534862084,-834736952,-123162323,389682556,656996523,864481760,381156936,129520066,-995551618,106129054,-471580461,856850511,653020333,531769579,-190375506,-992983956,73867968,-931909584,403329114,-945055546,627782991,-666011011,214665550,505169020,210703185,-591690068,11218620,790987020,561646751,-33552011,-407054835,-850936697,-838201457,-878394038,-759131062,-857347819,531582062,941614352,-743754869,650338718,178603580,-834368178,-976933957,138667533,746471721,551579035,-173400777,-1191455,320121832,-756997945,402594806,934711944,970489131,-193223639,276816990,842959026,-799673669,-367385466,681433973,468892554,-455199860,393993101,905435993,218314965,284795080,913357885,-652530417,743455659,869345718,808902357,829820413,7206928,544900359,225903242,-507688526,750219353,-663810717,-643969173,-269151675,348252329,-144351998,693995296,-692546103,869432378,650161259,568234384,710782517,179157604,-446849233,-922615096,-61183498,30945194,819052356,467911324,119876349,46908453,-420671619,344944591,889080726,-619477633,174882730,553799129,-941691933,146036558,-116064711,222282163,-272996845,-147041859,-381977096,-786757040,229096334,712541239,326039628,-952490563,-362214129,-680530864,421358212,-472290821,-331398150,-42297937,-393141325,-467541333,655524006,452908624,-626562356,-758303565,338224482,312047704,599445442,-328430584,259549134,838272865,-755896597,-151000710,607787908,11870257,-680877184,528161590,769242561,-447486537,-127579653,135915595,-271181270,12536315,693445551,900639800,-692327759,-671179999,977783490,935798407,659688020,-478438023,-852131846,-900332354,-71029072,888095095,924175448,430392829,391195112,399460998,-173259008,-168543477,-495967896,-697314804,591126097,301126906,946273416,-772817341,-996445410,466876435,-92937212,-226599286,43831927,-588596503,-55759661,212885530,-805455693,572269060,415773175,-320900489,-651775079,5276363,91615150,-882588415,502210147,-401039810,26713405,-723806893,125439289,472777644,869504248,967552969,-268043646,-146710780,-511973692,-803204681,-146827180,-453201623,-878534466,631307563,507752930,-63646026,-348120807,222898965,-410732708,617953050,-478244422,877782569,-507956686,-196516478,-477074335,329039585,-480651334,-890030740,461391919,-977815738,-943937849,321402466,-588396975,-945139052,871313567,-484830305,365305963,891985414,466048577,880607400,-245705654,359506342,-612177301,840415132,693541406,707348310,971762025,-871678269,897143169,625100531,743908163,-315815019,-63211252,-962051459,510469141,566817231,-186207711,309838979,101194721,-127111899,-109107404,-702499174,918781433,34041307,927374088,-67369303,-680339659,202481166,-218771120,329951816,-280782626,-423403505,619779171,-567310903,-660420942,756801677,996208091,822990010,940351540,1331227,382201579,891956260,-894584436,346600029,805733487,-691767750,859030444,1]
print(Solution().minimumOperations(nums, 938, 80)) |
6,998 | 8240e6483f47abbe12e7bef02493bd147ad3fec6 | from flask import Flask, render_template, flash, request
import pandas as pd
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
df = pd.read_csv('data1.csv')
try:
row = df[df['District'] == 'Delhi'].index[0]
except:
print("now city found")
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
class ReusableForm(FlaskForm)
name = StringField('name', validators=[validators.required()])
submit = SubmitField('Enter')
@app.route("/", methods=['GET', 'POST'])
def hello():
form = ReusableForm( )
if form.is_submitted():
city = request.form['name'].capitalize()
try:
row = df[df['District'] == city].index[0]
print(city)
cases = df.at[row, 'count(district)']
print(cases)
except:
cases = -1
print("cases are", cases)
flash("cases are " + str(cases))
return render_template('data.html', form=form)
if __name__ == "__main__":
app.run()
|
6,999 | b8f9633ab3110d00b2f0b82c78ad047fca0d3eee | import discord
from app.vars.client import client
from app.helpers import delete, getUser, getGuild
@client.command()
async def inviteInfo(ctx, link):
try:
await delete.byContext(ctx)
except:
pass
linkData = await client.fetch_invite(url=link)
if (linkData.inviter):
inviterData = await getUser.byID(linkData.inviter.id)
try:
guildData = await getGuild.byID(linkData.guild.id)
except:
guildData = linkData.guild
embed = discord.Embed(title="Invite information", colour=discord.Color.purple())
embed.set_thumbnail(url=guildData.icon_url)
fields = [
("ID", f"```{guildData.id}```", True),
("Name::", f"```{guildData.name}```", True),
("Description", f"```{guildData.description}```", True),
("Created in:", f'```{guildData.created_at.strftime("%d/%m/%Y")}```', True),
("Member Count:", f"```{int(linkData.approximate_member_count)}```", True),
("Link", f"```{linkData.url}```", True),
("\u200b", "\u200b", True),
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
if (linkData.inviter):
embed.add_field(name="Inviter ID:", value=f"```{inviterData.id}```", inline=True)
embed.add_field(name="Inviter:", value=f"```{inviterData.name + '#' + inviterData.discriminator}```", inline=True)
embed.set_footer(text='Selfium (◔‿◔)')
await ctx.send(embed=embed) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.