content stringlengths 1 1.05M | input_ids listlengths 1 883k | ratio_char_token float64 1 22.9 | token_count int64 1 883k |
|---|---|---|---|
from django import forms
from django.core.exceptions import ValidationError
from secret.models import Secret
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
198,
6738,
3200,
13,
27530,
1330,
3943,
628
] | 4.111111 | 27 |
#Subistitua xxxxxx pelo seu token!! | [
2,
7004,
396,
270,
6413,
2124,
12343,
87,
16176,
78,
384,
84,
11241,
3228
] | 2.5 | 14 |
import xgboost as xgb
import datetime
import real_estate_analysis.models.functions as func
import real_estate_analysis.models.xgb_model.utils as XGB_utils
import real_estate_analysis.Model.utils as model_utils
if __name__ == '__main__':
main()
| [
11748,
2124,
70,
39521,
355,
2124,
22296,
198,
11748,
4818,
8079,
198,
198,
11748,
1103,
62,
44146,
62,
20930,
13,
27530,
13,
12543,
2733,
355,
25439,
198,
11748,
1103,
62,
44146,
62,
20930,
13,
27530,
13,
87,
22296,
62,
19849,
13,
26... | 3.073171 | 82 |
from datetime import date
from os import environ
PARAMS_LESSON_PLAN = [
(
date(2018, 9, 4),
[
{"IdPrzedmiot": 173, "IdPracownik": 99},
{"IdPrzedmiot": 123, "IdPracownik": 101},
{"IdPrzedmiot": 172, "IdPracownik": 92},
{"IdPrzedmiot": 189, "IdPracownik": 91},
{"IdPrzedmiot": 119, "IdPracownik": 100},
{"IdPrzedmiot": 175, "IdPracownik": 97},
{"IdPrzedmiot": 118, "IdPracownik": 89},
],
)
]
PARAMS_TESTS = [
(date(2018, 10, 5), [{"Id": 661, "IdPrzedmiot": 177, "IdPracownik": 87}]),
(
date(2018, 10, 23),
[
{"Id": 798, "IdPrzedmiot": 173, "IdPracownik": 99},
{"Id": 838, "IdPrzedmiot": 172, "IdPracownik": 92},
],
),
]
PARAMS_HOMEWORKS = [
(
date(2018, 10, 23),
[
{"Id": 305, "IdPracownik": 100, "IdPrzedmiot": 119},
{"Id": 306, "IdPracownik": 100, "IdPrzedmiot": 119},
],
)
]
| [
6738,
4818,
8079,
1330,
3128,
198,
6738,
28686,
1330,
551,
2268,
198,
198,
27082,
40834,
62,
48481,
1340,
62,
6489,
1565,
796,
685,
198,
220,
220,
220,
357,
198,
220,
220,
220,
220,
220,
220,
220,
3128,
7,
7908,
11,
860,
11,
604,
... | 1.76 | 575 |
# vim:fileencoding=utf8
from distutils.core import setup
import os
README = os.path.join(os.path.dirname(__file__),'PKG-INFO')
long_description = open(README).read() + "\n"
setup(name="vbcode",
version='0.2.0',
py_modules=['vbcode'],
description="Variable byte codes",
author="utahta",
author_email = "labs.ninxit@gmail.com",
long_description=long_description,
classifiers=["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Natural Language :: Japanese"
],
url="https://github.com/utahta/pyvbcode",
license="MIT"
)
| [
2,
43907,
25,
7753,
12685,
7656,
28,
40477,
23,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
11748,
28686,
198,
15675,
11682,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
6,
40492... | 2.307292 | 384 |
"""
Extension desined to test bot functionality, just for testing
"""
# Library includes
from discord.ext import commands
# App includes
from app.client import BotClient
def setup(client):
"""
Setup function for testing_cog extension
Args:
client (app.client.BotClient): Client that connects to discord API
"""
client.add_cog(TestCog(client))
| [
37811,
198,
11627,
3004,
748,
1389,
284,
1332,
10214,
11244,
11,
655,
329,
4856,
198,
37811,
198,
2,
10074,
3407,
198,
198,
6738,
36446,
13,
2302,
1330,
9729,
628,
198,
2,
2034,
3407,
198,
198,
6738,
598,
13,
16366,
1330,
18579,
11792... | 3.158333 | 120 |
import urllib3
from bs4 import BeautifulSoup
import shutil
import re
import os
if __name__ == '__main__':
#https://www.minsal.cl/nuevo-coronavirus-2019-ncov/informe-epidemiologico-covid-19/
obtenerInformeEpidemiologico('https://www.gob.cl/coronavirus/cifrasoficiales/', '../input/InformeEpidemiologico/')
obtenerReporteDiario('https://www.gob.cl/coronavirus/cifrasoficiales/', '../input/ReporteDiario/')
obtenerSituacionCOVID19('http://epi.minsal.cl/informes-covid-19/', '../input/InformeSituacionCOVID19/')
| [
11748,
2956,
297,
571,
18,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
4423,
346,
198,
11748,
302,
198,
11748,
28686,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1303,
5450,
... | 2.346667 | 225 |
## This script set up classes for 4 bus and 2 bus environment
import pandapower as pp
import pandapower.networks as nw
import pandapower.plotting as plot
import enlopy as el
import numpy as np
import pandas as pd
import pickle
import copy
import math
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import pandapower.control as ct
import statistics as stat
from FACTScontrol import SeriesFACTS, ShuntFACTS
pd.options.display.float_format = '{:.4g}'.format
### This 4-bus class is not complete as of handover to ABB PG and Magnus Tarle.
# The 2-bus class further below is however complete.
#The class for the 2-bus test network used in the Master Thesis by Joakim Oldeen & Vishnu Sharma.
#The class also include several methods used by different RL algorithms such as taking action, calculating reward, recieving states and more
##Load Profile data has been pickled already, do not run this function for now
def createLoadProfile():
ML = (np.cos(2 * np.pi/12 * np.linspace(0,11,12)) * 50 + 100 ) * 1000 # monthly load
ML = el.make_timeseries(ML) #convenience wrapper around pd.DataFrame with pd.DateTimeindex
#print(ML)
DWL = el.gen_daily_stoch_el() #daily load working
DNWL = el.gen_daily_stoch_el() #daily load non working
#print(sum(DNWL))
Weight = .60 # i.e energy will be split 55% in working day 45% non working day
Load1 = el.gen_load_from_daily_monthly(ML, DWL, DNWL, Weight)
Load1.name = 'L1'
Load1=Load1.round();
#print(Load1)
disag_profile = np.random.rand(60)
JanLoadEveryMinute=el.generate.disag_upsample(Load1[0:744],disag_profile, to_offset='min');
JanLoadEvery5mins=[];
l=0;
for i in range(0,JanLoadEveryMinute.shape[0]):
l=l+JanLoadEveryMinute[i];
if np.mod(i+1,5) == 0:
JanLoadEvery5mins.append(l);
l=0;
windDataDF = pd.read_excel('Data/WindEnergyData.xlsx');
generatorValuesEvery5mins=[];
for i in range(1,windDataDF.shape[0]):
randomValue=np.random.choice(100, 1)[0]
randomValue_prob = np.random.random();
if randomValue > windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] or randomValue_prob < 0.4:
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
else :
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] - randomValue)
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] + randomValue)
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
print(len(generatorValuesEvery5mins))
print(len(JanLoadEvery5mins))
pickle.dump(generatorValuesEvery5mins, open("Data/generatorValuesEvery5mins.pkl", "wb"))
pickle.dump(JanLoadEvery5mins, open("Data/JanLoadEvery5mins.pkl", "wb"))
def trainTestSplit():
with open('Data/JanLoadEvery5mins.pkl', 'rb') as pickle_file:
loadProfile = pickle.load(pickle_file)
numOFTrainingIndices = int(np.round(0.8*len(loadProfile)))
trainIndices=np.random.choice(range(0,len(loadProfile)),numOFTrainingIndices,replace=False)
trainIndicesSet=set(trainIndices)
testIndices=[x for x in range(0,len(loadProfile)) if x not in trainIndicesSet]
pickle.dump(trainIndices, open("Data/trainIndices.pkl", "wb"))
pickle.dump(testIndices, open("Data/testIndices.pkl", "wb"))
#print(len(loadProfile))
#print(len(trainIndicesSet))
#print(len(trainIndices))
#print(len(testIndices))
#createLoadProfile()
#trainTestSplit()
| [
2235,
770,
4226,
900,
510,
6097,
329,
604,
1323,
290,
362,
1323,
2858,
198,
11748,
19798,
499,
789,
355,
9788,
198,
11748,
19798,
499,
789,
13,
3262,
5225,
355,
299,
86,
198,
11748,
19798,
499,
789,
13,
29487,
889,
355,
7110,
198,
1... | 2.600141 | 1,423 |
import json
from unittest import TestCase
from flask import Flask
from flask_controllers.GameServerController import GameServerController
from flask_helpers.VersionHelpers import VersionHelpers
from python_cowbull_server import app
from python_cowbull_server.Configurator import Configurator
from flask_helpers.ErrorHandler import ErrorHandler
from Persistence.PersistenceEngine import PersistenceEngine
| [
11748,
33918,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
3642,
36667,
13,
8777,
10697,
22130,
1330,
3776,
10697,
22130,
198,
6738,
42903,
62,
16794,
364,
13,
14815,
12621,
19276,... | 4.273684 | 95 |
import logging
import time
from datetime import datetime, timedelta
from itertools import product
from typing import List
import requests
from python_flights.itinerary import Itinerary
from python_flights.pods import Country, Currency, Airport, Place, Agent, Carrier, Direction, Trip, Segment, Price, \
CabinClass, SortType, SortOrder
PARAM_DATE_FORMATTING = "%Y-%m-%d"
JSON_DATE_FORMATTING = "%Y-%m-%dT%H:%M:%S"
API_ADDRESS = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices"
LOCALES = [
'de-DE', 'el-GR', 'en-GB', 'en-US', 'es-ES', 'es-MX', 'et-EE', 'fi-FI', 'fr-FR', 'hr-HR', 'hu-HU', 'id-ID', 'it-IT',
'ja-JP', 'ko-KR', 'lt-LT', 'lv-LV', 'ms-MY', 'nb-NO', 'nl-NL', 'pl-PL', 'pt-BR', 'pt-PT', 'ro-RO', 'ru-RU', 'sk-SK',
'sv-SE', 'th-TH', 'tr-TR', 'uk-UA', 'vi-VN', 'zh-CN', 'zh-HK', 'zh-SG', 'zh-TW'
]
| [
11748,
18931,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
7007,
198,
198,
6738,
21015,
62,
2704,
2337,
13,
270,
7274,... | 2.244737 | 380 |
#!/usr/bin/python
# coding: utf-8
# Copyright 2018 AstroLab Software
# Author: Chris Arnault
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Dataset monitor
This is the client part.
The monitor.py script has to be present on the <host> machine
where the minimal HTML server has been activated as
> python server.py
Then, call in a web navigator the URL
http://<host>:24701/monitor.py
"""
import cgi
from pylivy.session import *
from pylivy.client import *
from variables import HTMLVariableSet
# ======================================================
LIVY_URL = "http://vm-75222.lal.in2p3.fr:21111"
form = cgi.FieldStorage()
print("Content-type: text/html; charset=utf-8\n")
client = LivyClient(LIVY_URL)
# init data
html = HTMLVariableSet(["started",
"simul",
"change_simul",
"livy_session",
"waiting_session",
"waiting_statement",
"livy_statement",
"kill_session"],
["new_statement", "result"])
url = "/monitor.py"
method = "POST"
# ======================================================
def html_header():
"""
Global & common html header. SHould be used everywhere
Returns:
--------
out: str
"""
return """
<!DOCTYPE html>
<head>
<link rel="stylesheet" type="text/css" href="css/finkstyle.css">
<title>Mon programme test</title>
</head>
<body>
<div class="hero-image">
<div class="hero-text">
<h1 style="font-size:50px">Fink</h1>
<h3>Alert dataset monitor</h3>
<div class="topnav"> """
def html_trailer():
"""
Global & common html trailer. SHould be used everywhere
Returns:
--------
out: str
"""
return """
</div>
<p>© AstroLab Software 2018-2019</p>
</div>
</div>
</body>
</html>
"""
# Read all HTML POST variables
html.read(form)
if not html.started.is_set():
# Handle the very first launch to set the default
html.simul.set(1)
html.started.set(1)
# ======================================================
# the start of the WEB page
# ======================================================
out = html_header()
out = html_manage_simulation_mode(out)
# out += html.debug()
# Manage Livy session & Spark statements
out += """<form action="{}" method="{}">""".format(url, method)
if html.simul.is_set():
if html.waiting_session.above(5):
print("<br> session is now idle")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.reset()
html.livy_session.set(1)
if html.waiting_statement.above(5):
print("<br> statement just finished")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.incr()
# debugging
# print("<br>")
# print("Keys = [", ",".join(form.keys()), "]")
# print(html.debug())
"""
Command interface
- select Livy simulation
- open session & wait for idle
- start statement & wait for completion
"""
if html.kill_session.is_set():
session_id = html.livy_session.value
try:
client.delete_session(session_id)
except:
print("error killing session ", session_id)
html.livy_session.reset()
html.waiting_session.reset()
html.kill_session.reset()
if html.livy_session.is_set():
# statement management
if not html.waiting_statement.is_set():
out += """<br>session is idle: we may start a statement<br>"""
html.waiting_statement.set(0)
out += html.to_form()
out += """
Enter a Spark statement
<input type="text" name="new_statement" value="{}" />
<input type="text" name="result" value="{}" />
<button type="submit">Run</button>
""".format(html.new_statement.value, html.result.value)
else:
out += """<br>session is idle, we do wait a statement to complete<br>"""
html.waiting_statement.incr()
s = client.get_session(html.livy_session.value)
if not html.livy_statement.is_set():
st = client.create_statement(s.session_id, html.new_statement.value)
html.livy_statement.set(st.statement_id)
else:
st = client.get_statement(s.session_id, html.livy_statement.value)
if st.state == StatementState.AVAILABLE:
html.waiting_statement.reset()
html.result.set(st.output.text)
print("<br>", html.result.value)
html.livy_statement.reset()
out += html.to_form()
out += """<button type="submit">waiting statement to complete</button>"""
else:
# session management
if not html.waiting_session.is_set():
out += """<br>No session<br>"""
html.waiting_session.set(0)
# print(html.waiting_session.debug())
html.waiting_statement.reset()
out += html.to_form()
out += """<button type="submit">Open a session</button>"""
else:
# we have requested a new session thus waiting_session is set
if html.simul.is_set():
html.waiting_session.incr()
else:
if not html.livy_session.is_set():
print("Create a session ")
s = client.create_session(SessionKind.PYSPARK)
print("<br> session {} <br>".format(s.session_id))
html.livy_session.set(s.session_id)
# we test if the session is already idle
s = client.get_session(html.livy_session.value)
if s.state == SessionState.IDLE:
print("<br> session is now idle")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.reset()
html.new_statement.reset()
out += """<br>Waiting session to become idle<br>"""
out += html.to_form()
out += """<button type="submit">waiting session</button>"""
out += """</form>"""
if html.livy_session.is_set():
out += """<form action="{}" method="{}">""".format(url, method)
html.kill_session.set(1)
out += html.to_form()
out += """
<button type="submit">Delete the session</button>
</form>
"""
out += html_trailer()
print(out)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
15069,
2864,
35167,
17822,
10442,
198,
2,
6434,
25,
5180,
16644,
1721,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,... | 2.393887 | 2,879 |
import click
import json
import os
import re
from tqdm import tqdm
from utils.imutil import *
import numpy as np
import math
PROCESSED_SCAN_FOLDER = 'processedScan'
# rows, cols = camHeight, camWidth
# confidence.shape: rows, cols (float)
# cam_binary_map.shape: rows, cols, 2 (int)
# cam_xyz_map.shape: rows, cols, 3 (float)
# cam_index: int
def pack_maps(confidence, cam_binary_map, cam_xyz_map, cam_index, proj_size):
""" Pack camera confidence, cam binary projector map and camera xyz map """
# prepare confidence_flat
confidence_flat = confidence.reshape(-1, 1)
# prepare cam_binary_mapFlat
cam_binary_map_flat = cam_binary_map.reshape((-1, 2))
overflow_fix(cam_binary_map_flat, proj_size)
cam_binary_map_flat = np.ravel_multi_index(cam_binary_map_flat.transpose()[
::-1], (proj_size[1], proj_size[0])).reshape(-1, 1)
# prepare cam_xyz_map_flat
# scale = len(cam_binary_map) / len(cam_xyz_map)
cam_xyz_map_flat = cam_xyz_map.reshape(-1, 3)
# DEBUG STUFF
# Pack camera index into array
cam_index_flat = np.full((cam_xyz_map_flat.shape[0], 1), cam_index)
# Cam Pixel Index
cam_pixel_index = np.arange(cam_xyz_map_flat.shape[0])[:, np.newaxis]
# stack and return everything in shape: (rows x cols), 7
return np.hstack((confidence_flat, cam_binary_map_flat, cam_xyz_map_flat, cam_index_flat, cam_pixel_index))
def unpack_maps(packed, proj_size):
""" Unpack projector xyz map and projector confidence """
proj_width = proj_size[0]
proj_height = proj_size[1]
projector_xyz = np.zeros((proj_height, proj_width, 3))
projector_confidence = np.zeros((proj_height, proj_width, 1))
cam_index = np.full((proj_height, proj_width, 1), -1)
cam_pixel_index = np.zeros((proj_height, proj_width, 1))
# assign xyzMap values use proMapFlat indices
# packed[:,0] contains confidence value
# packed[:,1] contains binary code (projector pixel coordinate)
# packed[:,2:5] contains xyz coordinate
# packed[:,5] contains camera index (debug)
# packed[:,6] contains camera pixel index (debug)
proMapFlat = packed[:, 1].astype(np.int32)
projector_confidence.reshape(-1)[proMapFlat] = packed[:, 0]
projector_xyz.reshape(-1, 3)[proMapFlat] = packed[:, 2:5]
# DEBUG STUFF
cam_index.reshape(-1)[proMapFlat] = packed[:, 5]
cam_pixel_index.reshape(-1)[proMapFlat] = packed[:, 6].astype(np.uint64)
return projector_xyz, projector_confidence, cam_index, cam_pixel_index
| [
11748,
3904,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
3384,
4487,
13,
320,
22602,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
198,
480... | 2.491211 | 1,024 |
# Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import simplejson as json
import base64
| [
2,
15069,
1853,
11397,
23500,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
198,
2,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
74... | 3.763314 | 169 |
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('signup', views.signup, name="signup"),
path('activate/<uidb64>/<token>/', views.activate_account, name='activate'),
path('sell-book', views.sell_book, name='sell_book'),
path('book/<int:id>/detail', views.book_detail, name='book_detail'),
path('add-balance', views.add_balance, name='add_balance'),
path('books-for-sale', views.books_for_sale, name='books_for_sale'),
path('purchased-books', views.purchased_books, name='purchased_books'),
path('profile/<str:username>', views.profile, name='profile'),
path('cart-items', views.cart_items, name='cart_items'),
path('add-items-to-cart/<int:book_item>', views.add_items_to_cart, name="add_items_to_cart"),
path('cancel-items', views.cancel_items, name="cancel_items"),
path('checkout', views.checkout, name='checkout')
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
11195,
11,
1438,
2625,
11195,
12340,
198,
220,
220,
220,
3108,
... | 2.713043 | 345 |
import pytest
from pathlib import Path
import json
if __name__ == "__main__":
pytest.main([__file__])
| [
11748,
12972,
9288,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
33918,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
12972,
9288,
13,
12417,
26933,
834,
7753,
834,
12962,
628,
628,
628
] | 2.846154 | 39 |
import boto3
import base64
import hmac
import hashlib
from .automl import AWS_ACC_KEY_ID, AWS_SEC_ACC_KEY, USER_POOL_ID, CLIENT_ID, CLIENT_SECRET, AWS_REGION_NAME
client_cognito = boto3.client('cognito-idp',
aws_access_key_id=AWS_ACC_KEY_ID,
aws_secret_access_key=AWS_SEC_ACC_KEY,
region_name=AWS_REGION_NAME)
| [
11748,
275,
2069,
18,
198,
11748,
2779,
2414,
198,
11748,
289,
20285,
198,
11748,
12234,
8019,
198,
6738,
764,
2306,
296,
75,
1330,
30865,
62,
26861,
62,
20373,
62,
2389,
11,
30865,
62,
23683,
62,
26861,
62,
20373,
11,
1294,
1137,
62,... | 2.18543 | 151 |
#!/usr/bin/env python3
from LIPM_with_dsupport import *
import random
import subprocess
from mono_define import *
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
i = 0
# while True:
# # initiate_time = random.choice([x / 100 for x in range(40, 71)])
# # T_dbl = random.choice([0.09, 0.1])
# # zc = random.choice([x / 100 for x in range(40, 71)])
# # i+=1
# # print(i)
# print(walk_test(0.48, 0.08, 0.41,0.05))
# # print(walk_test(initiate_time,T_dbl, zc))
# #
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
406,
4061,
44,
62,
4480,
62,
67,
11284,
1330,
1635,
198,
11748,
4738,
198,
11748,
850,
14681,
198,
6738,
33361,
62,
13086,
1330,
1635,
198,
6738,
6812,
62,
907,
14542,
... | 2.232456 | 228 |
from BCBio import GFF
from Bio import SeqIO
import csv
import sys
in_gff_file = sys.argv[1]
out_file = sys.argv[2]
#Add annotations to sequences
print("Parsing .gff file...")
in_handle = open(in_gff_file)
limit_info = dict(gff_type = ["mRNA"])
protnames = []
protanno = []
for rec in GFF.parse(in_handle, limit_info = limit_info, target_lines = 1):
feat = rec.features[0]
protnames.append(feat.qualifiers["Name"][0])
protanno.append(feat.qualifiers["Note"][0])
in_handle.close()
#Write lists of sequences and annotations to .tsv file
print("Writing annotations to %s ..." % out_file)
with open(out_file, "w") as f:
for protname, protan in zip(protnames, protanno):
entry = [protname, protan]
f.write("\t".join(entry) + "\n")
f.close()
print("Extraction complete.")
| [
6738,
11843,
42787,
1330,
402,
5777,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
198,
259,
62,
70,
487,
62,
7753,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
448,
62,
7753,
796,
25064,
13,
... | 2.647458 | 295 |
"""
In an alien language, surprisingly they also use english lowercase letters, but possibly in a different order. The order of the alphabet is some permutation of lowercase letters.
Given a sequence of words written in the alien language, and the order of the alphabet, return true if and only if the given words are sorted lexicographicaly in this alien language.
Example 1:
Input: words = ["hello","luther"], order = "hlabcdefgijkmnopqrstuvwxyz"
Output: true
Explanation: As 'h' comes before 'l' in this language, then the sequence is sorted.
"""
words1=["hello","luther"]
order1="hlabcdefgijkmnopqrstuvwxyz"
print(Solution().isAlienSorted(words1, order1))
words2=["word","world","row"]
order2="worldabcefghijkmnpqstuvxyz"
print(Solution().isAlienSorted(words2, order2))
words2=["apple","app"]
order2="abcdefghijklmnopqrstuvwxyz"
print(Solution().isAlienSorted(words2, order2)) | [
37811,
198,
818,
281,
8756,
3303,
11,
12362,
484,
635,
779,
46932,
2793,
7442,
7475,
11,
475,
5457,
287,
257,
1180,
1502,
13,
383,
1502,
286,
262,
24830,
318,
617,
9943,
7094,
286,
2793,
7442,
7475,
13,
198,
198,
15056,
257,
8379,
2... | 3.163701 | 281 |
import urizen.core
from urizen.core import *
import urizen.generators
from urizen.generators import *
import urizen.visualizers
from urizen.visualizers import *
| [
11748,
2956,
33977,
13,
7295,
198,
6738,
2956,
33977,
13,
7295,
1330,
1635,
198,
198,
11748,
2956,
33977,
13,
8612,
2024,
198,
6738,
2956,
33977,
13,
8612,
2024,
1330,
1635,
198,
198,
11748,
2956,
33977,
13,
41464,
11341,
198,
6738,
295... | 3.395833 | 48 |
import pandas as pd
from collections import Counter
import re
if __name__=='__main__':
Mystats(directory)
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
17268,
1330,
15034,
198,
11748,
302,
628,
628,
198,
198,
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
198,
220,
220,
220,
32347,
1381,
7,
34945,
8,
198
] | 3.026316 | 38 |
import codecs
import re
from collections import namedtuple
import unittest
from typing import Collection, Iterable, Sequence, Tuple, Type
import io
from pathlib import Path
from styler import decode
import json
import logging
from itertools import islice
logger = logging.getLogger(__name__)
CSS_PARSING_TESTS_DIR = Path(__file__).parent / "css-parsing-tests"
JSONCase = namedtuple("JSONCase", "case, expectation")
def pairs(iterable):
"s -> (s0,s1), (s2,s3), (s4, s5), ..."
return zip(
islice(iterable, 0, None, 2),
islice(iterable, 1, None, 2),
)
| [
11748,
40481,
82,
198,
11748,
302,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
555,
715,
395,
198,
198,
6738,
19720,
1330,
12251,
11,
40806,
540,
11,
45835,
11,
309,
29291,
11,
5994,
198,
198,
11748,
33245,
198,
6738,
3108,
8... | 2.651584 | 221 |
from collections import Counter
from functools import reduce
with open("./input.txt", "r") as inputFile:
readingsStr = inputFile.read().splitlines()
columnsRange = range(len(readingsStr[0]))
columns = map(lambda columnIndex : map(lambda row : row[columnIndex], readingsStr), columnsRange)
multiModes = map(lambda column: Counter(column).most_common(), columns)
multiModesWithoutCount = map(lambda mm: (mm[0][0], mm[1][0]), multiModes)
rates = reduce(lambda multiModeX, multiModeY: [multiModeX[0] + multiModeY[0], multiModeX[1] + multiModeY[1]], multiModesWithoutCount)
gamma = int(rates[0], 2)
epsilon = int(rates[1], 2)
print(f'Gamma: {gamma}, Epsilon: {epsilon}, Power: {gamma * epsilon}')
# Part 2
oxygenFilteredReadings = readingsStr.copy()
co2FilteredReadings = readingsStr.copy()
for columnIndex in range(len(readingsStr[0])):
oxygenColumns = map(lambda row : row[columnIndex], oxygenFilteredReadings)
oxygenCounter = Counter(oxygenColumns)
oxygenMostCommon = oxygenCounter.most_common()[0]
oxygenMostCommonVal = oxygenMostCommon[0]
if oxygenMostCommon[1] == oxygenCounter.total() / 2:
oxygenMostCommonVal = '1'
oxygenFilteredReadings = list(filter(lambda row : row[columnIndex] == oxygenMostCommonVal, oxygenFilteredReadings))
co2Columns = map(lambda row : row[columnIndex], co2FilteredReadings)
co2Counter = Counter(co2Columns)
co2MostCommon = co2Counter.most_common()
co2LeastCommon = co2MostCommon[len(co2MostCommon)-1]
co2LeastCommonVal = co2LeastCommon[0]
if co2LeastCommon[1] == co2Counter.total() / 2:
co2LeastCommonVal = '0'
co2FilteredReadings = list(filter(lambda row : row[columnIndex] == co2LeastCommonVal, co2FilteredReadings))
oxygen = int(oxygenFilteredReadings[0], 2)
co2 = int(co2FilteredReadings[0], 2)
print(f'Oxygen: {oxygen}, CO2: {co2}, Life Support Rating: {oxygen * co2}') | [
6738,
17268,
1330,
15034,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
198,
4480,
1280,
7,
1911,
14,
15414,
13,
14116,
1600,
366,
81,
4943,
355,
5128,
8979,
25,
198,
220,
220,
220,
24654,
13290,
796,
5128,
8979,
13,
961,
22446,
35312... | 2.769457 | 681 |
import os, sys, re
while True:
path = os.getcwd() + " $"
# User input
os.write(1, path.encode())
args = os.read(0, 1000).decode().split()
# Exit
if args[0] == "exit":
if len(args) > 1:
print("Program terminated with exit code", args[1])
sys.exit(int(args[1]))
print("Program terminated without exit code")
sys.exit(1)
# Change Directory
if args[0] == "cd":
try:
if len(args) < 2:
os.chdir(os.path.expanduser("~"))
else:
os.chdir(args[1])
except FileNotFoundError:
print("File not found!")
pass
continue
# Forking
rc = os.fork()
if rc < 0:
os.write(1, "Fork failure :( !")
sys.exit(1)
# Child process for redirect & piping
elif rc == 0:
# Redirect output
if '>' in args:
i = args.index('>')
os.close(1)
os.open(args[i+1], os.O_CREAT | os.O_WRONLY)
os.set_inheritable(1, True)
child_command = args[:i]
# Redirect output
elif '<' in args:
i = args.index('<')
os.close(1)
os.open(args[i-1], os.O_CREAT | os.O_WRONLY)
os.set_inheritable(1, True)
child_command = args[i:]
# Piping
elif '|' in args:
i = args.index('|')
pipe1 = args[:i]
pipe2 = args[(i + 1):]
pr, pw = os.pipe()
os.set_inheritable(pr, True)
os.set_inheritable(pw, True)
pipe_child = os.fork()
if pipe_child < 0:
sys.exit(1)
if pipe_child == 0:
os.close(1)
os.dup(pw)
os.set_inheritable(1, True)
os.close(pr)
os.close(pw)
child_command = pipe1
else:
os.close(0)
os.dup(pr)
os.set_inheritable(0, True)
os.close(pr)
os.close(pw)
child_command = pipe2
# Command not found
else:
print("Command not found")
sys.exit(1)
# Try each directory in path
for directory in re.split(":", os.environ['PATH']):
program = "%s/%s" % (directory, args[0])
try:
os.execve(program, child_command, os.environ)
except FileNotFoundError:
pass
sys.exit(1)
# Check for background processes
else:
childPidCode = os.wait()
| [
11748,
28686,
11,
25064,
11,
302,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
3108,
796,
28686,
13,
1136,
66,
16993,
3419,
1343,
366,
720,
1,
628,
220,
220,
220,
1303,
11787,
5128,
198,
220,
220,
220,
28686,
13,
13564,
7,
16,
11... | 1.734868 | 1,520 |
def lexicographic_order(w_list):
"""
"""
w_list = sorted(w_list)
#
# w_list.sort()
print(w_list)
if __name__ == '__main__':
w_list = ["", "", "", "?", "", "japan", "!", "", "", \
"", "01", "25", "012", "", "", "", "", "", "", \
"", "", "abc", "def"]
lexicographic_order(w_list) | [
4299,
31191,
291,
6826,
62,
2875,
7,
86,
62,
4868,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
220,
220,
220,... | 1.785366 | 205 |
#####################################################
##radar kitti ##
#####################################################
import json
import math
import os
import numpy as np
import utils
| [
29113,
14468,
4242,
2,
198,
2235,
6335,
283,
479,
715,
72,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22492,
198,
29113,
14468,
4242,
2,
198,
11748,... | 3.47619 | 63 |
"""
OpenVINO DL Workbench
Class for creating per tensor scripts job
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import closing
from pathlib import Path
from sqlalchemy.orm import Session
from config.constants import (ACCURACY_ARTIFACTS_FOLDER, JOBS_SCRIPTS_FOLDER_NAME, JOB_SCRIPT_NAME)
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.enumerates import JobTypesEnum, StatusEnum
from wb.main.jobs.interfaces.ijob import IJob
from wb.main.models import (PerTensorReportJobsModel, CreatePerTensorScriptsJobModel)
from wb.main.scripts.job_scripts_generators.tensor_distance_job_script_generator import \
get_tensor_distance_job_script_generator
from wb.main.utils.utils import create_empty_dir
| [
37811,
198,
4946,
53,
46016,
23641,
5521,
26968,
198,
5016,
329,
4441,
583,
11192,
273,
14750,
1693,
628,
15069,
357,
66,
8,
33448,
8180,
10501,
628,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
... | 3.476965 | 369 |
# Generated by Django 3.1.2 on 2020-11-30 22:19
import django.core.validators
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
1157,
12,
1270,
2534,
25,
1129,
198,
198,
11748,
42625,
14208,
13,
7295,
13,
12102,
2024,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.95122 | 41 |
"""
enqueue
dequeue
size
traverse
Queue Implementation using SLL
"""
obj = Queue()
obj.enqueue(1)
obj.enqueue(2)
obj.enqueue(3)
obj.enqueue(4)
obj.enqueue(5)
obj.traverse()
obj.dequeue()
obj.traverse() | [
37811,
198,
268,
36560,
198,
2934,
36560,
198,
7857,
198,
9535,
4399,
198,
198,
34991,
46333,
1262,
311,
3069,
198,
198,
37811,
198,
198,
26801,
796,
4670,
518,
3419,
198,
26801,
13,
268,
36560,
7,
16,
8,
198,
26801,
13,
268,
36560,
... | 2.329545 | 88 |
from spotify import values
from spotify.page import Page
from spotify.resource import Resource, UpgradableInstance
| [
6738,
4136,
1958,
1330,
3815,
198,
6738,
4136,
1958,
13,
7700,
1330,
7873,
198,
6738,
4136,
1958,
13,
31092,
1330,
20857,
11,
3205,
9744,
540,
33384,
628,
628,
198
] | 4.103448 | 29 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a-- print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespac
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def print_words(filename):
"""Analyse text file. Print words and their counts
Args:
Return:
"""
dic = make_dic_from_wds(filename)
print("Word Count")
print("=======================")
for k, v in dic.items():
print(k," " ,v)
def print_top(filename):
"""Print 20 most common words sorted. So the most common word is first, so on..."""
dic = make_dic_from_wds(filename)
print("=======================")
print("20 most common words")
n= 0
for key, value in sorted(dic.items(), key=lambda kv:kv[1], reverse=True):
print(key," ", value)
n += 1
if n>= 20:
break
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
532,
926,
198,
2,
15069,
3050,
3012,
3457,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,... | 3.252336 | 749 |
from lrasm.multicollinearity_tst import multicollinearity_test
import numpy as np
import pandas as pd
from statsmodels.stats.outliers_influence import variance_inflation_factor
import pytest
def test_multicollinearity_test():
"""Test multicollinearity test outputs from dataset"""
X_proper = pd.DataFrame({"head": [1,2,3,3,5,8,7],"Feet": [7,6,5,4,3,2,1], 'Random': [12,24,25,26,29,55,99]})
X_str_df = pd.DataFrame({"head": ["str",2,3,4,5,6,7]})
X_series = pd.Series([1,2,3,4,5,6,7])
with pytest.raises(TypeError):
multicollinearity_test(X_str_df, 10)
multicollinearity_test(X_series, 10)
assert round(multicollinearity_test(X_proper, 10)['VIF'][0], 2) == 9.04
assert round(multicollinearity_test(X_proper, 10)['VIF'][2], 2) == 8.37
assert isinstance(multicollinearity_test(X_proper, 10), pd.DataFrame) | [
6738,
300,
81,
8597,
13,
16680,
291,
692,
259,
451,
414,
62,
83,
301,
1330,
47368,
692,
259,
451,
414,
62,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
9756,
27530,
13,
34242,
13,
448,
... | 2.166667 | 402 |
# References
# https://docs.aws.amazon.com/sagemaker/latest/dg/adapt-inference-container.html
import logging
import numpy as np
import PIL
from numpy import ndarray as NDArray
from PIL.Image import Image
from six import BytesIO
from torch.nn import Module
from facenet_pytorch import MTCNN
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
| [
2,
31458,
198,
2,
3740,
1378,
31628,
13,
8356,
13,
33103,
13,
785,
14,
82,
363,
32174,
14,
42861,
14,
67,
70,
14,
42552,
12,
259,
4288,
12,
34924,
13,
6494,
198,
198,
11748,
18931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
117... | 2.98374 | 123 |
import itertools
import random
NUM_CANS = 1
filename = "namo_probs/sort_prob_{0}.prob".format(NUM_CANS)
GOAL = "(RobotAt pr2 robot_end_pose)"
HEIGHT = 5
WIDTH = 5
if __name__ == "__main__":
main()
| [
11748,
340,
861,
10141,
198,
11748,
4738,
198,
198,
41359,
62,
34,
15037,
796,
352,
198,
198,
34345,
796,
366,
7402,
78,
62,
1676,
1443,
14,
30619,
62,
1676,
65,
23330,
15,
27422,
1676,
65,
1911,
18982,
7,
41359,
62,
34,
15037,
8,
... | 2.3 | 90 |
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst', 'rb') as f:
long_desc = f.read().decode('utf-8')
setup(name='pygeon',
version='0.1.0',
description='IP Geolocation in Python',
long_description=long_desc,
author='Alastair Houghton',
author_email='alastair@alastairs-place.net',
url='http://bitbucket.org/al45tair/pygeon',
license='MIT License',
packages=['pygeon'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking'
],
scripts=['scripts/pygeon'],
install_requires=[
'sqlalchemy >= 0.9.8',
'IPy >= 0.82',
'bintrees >= 2.0.1'
],
provides=['pygeon']
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
81,
301,
3256,
705,
26145,
11537,
355,
277,
25,
198,
220,
220,
220,
890,
62,
20147,
... | 2.160401 | 399 |
from flask import Blueprint, render_template, send_file
from flask_app import app
static_api = Blueprint('static_api', __name__)
# @static_api.route('/', methods=['GET'])
# def index():
# return render_template('index.html') | [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
3758,
62,
7753,
198,
198,
6738,
42903,
62,
1324,
1330,
598,
198,
12708,
62,
15042,
796,
39932,
10786,
12708,
62,
15042,
3256,
11593,
3672,
834,
8,
198,
198,
2,
2488,
12708,
62,
15042,... | 3.150685 | 73 |
from cluster.preprocess.pre_node_feed import PreNodeFeed
| [
6738,
13946,
13,
3866,
14681,
13,
3866,
62,
17440,
62,
12363,
1330,
3771,
19667,
18332,
628,
198
] | 3.470588 | 17 |
#!/usr/bin/env python
from datetime import datetime
import pika
import os
import sys
import steps # noqa: F401
import json
from climate_simulation_platform.db import step_parameters, save_step, step_seen
from climate_simulation_platform import create_app
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Interrupted")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
279,
9232,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
4831,
220,
1303,
645,
20402,
25,
376,
21844,
198,
11748,
33918,
198,
198,
673... | 2.494565 | 184 |
import os
import sys
import random
import json
import tqdm
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
from transformers import BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup
from tool.data_process import *
from tool.inference_utils import write_predictions
MIN_FLOAT = -1e30
import argparse
parser = argparse.ArgumentParser(description="CQA")
### Arguments for Traning
parser.add_argument("--batch-size", type=int)
### Directories
parser.add_argument("--output-dir", type=str)
parser.add_argument("--result-dir", type=str)
### Arguments for Dataset
parser.add_argument("--num-turn", type=int, default=3)
parser.add_argument("--max-seq-length", type=int, default=512)
parser.add_argument("--max-history-length", type=int, default=128)
parser.add_argument("--doc-stride", type=int, default=192)
parser.add_argument("--model-name", type=str, default="bert-cased-large")
### Inference Setting
parser.add_argument("--n-best-size", type=int, default=5)
parser.add_argument("--max-answer-length", type=int, default=30)
args = parser.parse_args()
exp_dir = os.path.join(args.output_dir, args.result_dir)
model_file=exp_dir+"/model/model.pth"
tokenizer_dir=exp_dir+"/tokenizer"
config = exp_dir+"/config.json"
with open(config, "r") as f:
config_items = json.load(f)
model_name = config_items["model_name"]
max_seq_length = config_items["max_seq_length"]
max_history_length = config_items["max_history_length"]
doc_stride = config_items["doc_stride"]
num_turn = config_items["num_turn"]
test_data = f"data/coqa/coqa-dev-v1.0.json"
test_example = f"data/coqa/dev_{args.num_turn}_examples.pkl"
test_feature = f"data/coqa/dev_{args.num_turn}_features.pkl"
seed = 2022
seed_everything(seed)
def prediction(model, test_dataset, device):
progress_bar = tqdm.tqdm
model = model.to(device)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
test_pbar = progress_bar(test_loader, total=len(test_loader))
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
all_results = []
print("Predicting answers...")
for input_ids, attention_mask, p_mask, segment_ids, history_ids, unique_id in test_pbar:
start_logits, end_logits = model(input_ids=input_ids.to(device),
segment_ids=segment_ids.to(device),
attention_mask=attention_mask.to(device))
batch_num = start_logits.size(0)
for idx in range(batch_num):
start_logit = [float(x) for x in start_logits[idx].tolist()]
end_logit = [float(x) for x in end_logits[idx].tolist()]
all_results.append(RawResult(unique_id=int(unique_id[idx]),
start_logits=start_logit,
end_logits=end_logit))
return all_results
print(f"Loading tokenizer from {tokenizer_dir}...")
tokenizer = BertTokenizer.from_pretrained(tokenizer_dir)
print(f"Loading trained model from {model_file}...")
device = torch.device("cuda")
model = CQA(model_name, tokenizer, args.batch_size, device)
model.load_state_dict(torch.load(model_file))
test_dataset = Dataset(data_file=test_data,
example_file=test_example,
feature_file=test_feature,
tokenizer=tokenizer,
mode="test")
all_results = prediction(model, test_dataset, device)
output_prediction_file = os.path.join(exp_dir, "predictions.json")
output_nbest_file = os.path.join(exp_dir, "nbest_predictions.json")
print("Writing predictions...")
write_predictions(all_examples=test_dataset.examples,
features_dict=test_dataset.features,
all_results=all_results,
n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length,
do_lower_case=True,
tokenizer=tokenizer,
output_prediction_file=output_prediction_file,
output_nbest_file=output_nbest_file)
print("Done") | [
11748,
28686,
201,
198,
11748,
25064,
201,
198,
11748,
4738,
201,
198,
11748,
33918,
201,
198,
11748,
256,
80,
36020,
201,
198,
11748,
2298,
293,
201,
198,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
... | 2.186024 | 2,032 |
import webapp2
from models import *
from webapp2_extras import sessions
| [
11748,
3992,
1324,
17,
201,
198,
6738,
4981,
1330,
1635,
201,
198,
6738,
3992,
1324,
17,
62,
2302,
8847,
1330,
10991,
201,
198,
201,
198,
201,
198
] | 2.925926 | 27 |
# Basic libs
import os, time, glob, random, pickle, copy, torch
import numpy as np
import open3d
from scipy.spatial.transform import Rotation
# Dataset parent class
from torch.utils.data import Dataset
from lib.benchmark_utils import to_tsfm, to_o3d_pcd, get_correspondences
| [
2,
14392,
9195,
82,
198,
11748,
28686,
11,
640,
11,
15095,
11,
4738,
11,
2298,
293,
11,
4866,
11,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1280,
18,
67,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
35636,
1330,
371,
... | 3.01087 | 92 |
#!/usr/bin/env python
"""
Make plots to compare two different versions of desimodel
Stephen Bailey, LBL
July 2014
"""
import os, sys
import numpy as np
import pylab as P
import matplotlib.pyplot as plt
import fitsio
camcolor = dict(b='b', r='r', z='k')
#-------------------------------------------------------------------------
dir1, dir2 = sys.argv[1:3]
compare_throughput(dir1, dir2)
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
12050,
21528,
284,
8996,
734,
1180,
6300,
286,
748,
320,
375,
417,
198,
198,
24920,
20330,
11,
406,
9148,
198,
16157,
1946,
198,
37811,
198,
198,
11748,
28686,
11,
250... | 3.060606 | 132 |
import matplotlib.pyplot as plt
import numpy as np
import os
import json
import seaborn as sns
import re
sns.set(style="darkgrid")
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
our_times = convert_files_to_lists("./tests/results/grad/json/parallel/parallel_results_good.json")
print(our_times)
generate_two_graph(our_times, range(1, 48))
speedup_list = get_speedup_list(our_times)
generate_two_graph(speedup_list, range(1, 47), suffix="-speedup", ylabel="Speedup (Time Single Thread / Time X Threads)")
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28686,
201,
198,
11748,
33918,
201,
198,
11748,
384,
397,
1211,
355,
3013,
82,
201,
198,
11748,
302,
201,
198,
201,
198... | 2.503333 | 300 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is a tool developed for analysing transposon insertions for experiments using SAturated Transposon Analysis in Yeast (SATAY).
This python code contains one function called transposonmapper().
For more information about this code and the project, see https://satay-ll.github.io/SATAY-jupyter-book/Introduction.html
This code is based on the Matlab code created by the Kornmann lab which is available at: sites.google.com/site/satayusers/
__Author__ = Gregory van Beek. LaanLab, department of Bionanoscience, Delft University of Technology
__version__ = 1.5
__Date last update__ = 2021-01-11
Version history:
1.1; Added code for creating two text files for storing insertion locations per gene and per essential gene [2020-07-27]
1.2; Improved searching algorithm for essential genes [2020-08-06]
1.3; Load file containing all essential genes so that a search for essential genes in multiple file is not needed anymore. This file is created using Create_EssentialGenes_list.py located in the same directory as this code [2020-08-07]
1.4; Fixed bug where the gene position and transposon insertion location did not start at zero for each chromosome, causing confusing values to be stored in the _pergene_insertions.txt and _peressential_insertions.txt files [2020-08-09]
1.5; Added functionality to handle all possible sam flags in the alignment file (bam-file) instead of only flag=0 or flag=16. This is needed for the function to handle paired-end sequencing data [2021-01-11]
"""
# Local imports
from transposonmapper.properties import (
get_chromosome_names,
get_sequence_length,
)
from transposonmapper.mapping import (
get_reads,
add_chromosome_length,
add_chromosome_length_inserts,
get_insertions_and_reads,
)
from transposonmapper.utils import chromosomename_roman_to_arabic
from transposonmapper.importing import (
load_default_files,
read_genes,
)
from transposonmapper.exporting import (
save_as_bed,
save_per_gene,
save_per_gene_insertions,
save_per_essential_insertions,
save_as_wig
)
import sys
def transposonmapper(bamfile, gff_file=None, essential_file=None, gene_name_file=None):
"""This function is created for analysis of SATAY data using the species Saccharomyces Cerevisiae.
The function assumes that the reads are already aligned to a reference genome.
The input data should be a .bam-file and the location where the .bam-file is stored should also contain an index file (.bam.bai-file, which for example can be created using sambamba).
The function uses the pysam package for handling bam files (see pysam.readthedocs.io/en/latest/index.html) and therefore this function only runs on Linux systems with SAMTools installed.
Parameters
----------
bamfile : str, required
Path to the bamfile. This location should also contain the .bam.bai index file (does not need to be input in this function).
gff_file : str, optional
Path to a .gff-file including all gene information (e.g. downloaded from SGD).
Default file is 'Saccharomyces_cerevisiae.R64-1-1.99.gff3'., by default None
essential_file : str, optional
Path to a .txt file containing a list all essential genes. Every line should consist of a single essential gene and the file should have one header line.
Ideally this file is created using 'Create_EssentialGenes_list.py'. Default file is 'Cerevisiae_AllEssentialGenes_List.txt'., by default None
gene_name_file : str, optional
Path to text file that includes aliases for all genes. Default file is 'Yeast_Protein_Names.txt', by default None
Returns
-------
A set of files
It outputs the following files that store information regarding the location of all insertions:
- .bed-file: Includes all individual basepair locations of the whole genome where at least one transposon has been mapped and the number of insertions for each locations (the number of reads) according to the Browser Extensible Data (bed) format.
A distinction is made between reads that had a different reading orientation during sequencing. The number of reads are stored using the equation #reads*20+100 (e.g. 2 reads is stored as 140).
- .wig-file: Includes all individual basepair locations of the whole genome where at least one transposon has been mapped and the number of insertions for each locations (the number of reads) according to the Wiggle (wig) format.
In this file no distinction is made between reads that had a different reading orientation during sequencing. The number of reads are stored as the absolute count.
- _pergene.txt-file: Includes all genes (currently 6600) with the total number of insertions and number of reads within the genomic region of the gene.
- _peressential.txt-file: Includes all annotated essential genes (currently 1186) with the total number of insertions and number of reads within the genomic region of the gene.
- _pergene_insertions.txt-file: Includes all genes with their genomic location (i.e. chromosome number, start and end position) and the locations of all insertions within the gene location. It also include the number number of reads per insertions.
- _peressential_insertions.txt-file: Includes all essential genes with their genomic location (i.e. chromosome number, start and end position) and the locations of all insertions within the gene location. It also include the number number of reads per insertions.
(note that in the latter two files, the genomic locations are continous, for example chromosome II does not start at 0, but at 'length chromosome I + 1' etc.).
The output files are saved at the location of the input file using the same name as the input file, but with the corresponding extension.
"""
# If necessary, load default files
gff_file, essential_file, gene_name_file = load_default_files(
gff_file, essential_file, gene_name_file
)
# Verify presence of files
data_files = {
"bam": bamfile,
"gff3": gff_file,
"essentials": essential_file,
"gene_names": gene_name_file,
}
for filetype, file_path in data_files.items():
assert file_path, f"{filetype} not found at {file_path}"
# Read files for all genes and all essential genes
print("Getting coordinates of all genes ...")
gene_coordinates, essential_coordinates, aliases_designation = read_genes(
gff_file, essential_file, gene_name_file
)
try:
import pysam
except ImportError:
print("Failed to import pysam")
sys.exit(1)
# Read bam file
bam = pysam.AlignmentFile(bamfile, "rb")
# Get names of all chromosomes as stored in the bam file
ref_tid = get_chromosome_names(bam)
ref_names = list(ref_tid.keys())
# Convert chromosome names in data file to roman numerals
ref_romannums = chromosomename_roman_to_arabic()[1]
ref_tid_roman = {key: value for key, value in zip(ref_romannums, ref_tid)}
# Get sequence lengths of all chromosomes
chr_lengths, chr_lengths_cumsum = get_sequence_length(bam)
# Get all reads within a specified genomic region
readnumb_array, tncoordinates_array, tncoordinatescopy_array = get_reads(bam)
#%% CONCATENATE ALL CHROMOSOMES
# For each insertion location, add the length of all previous chromosomes
tncoordinatescopy_array = add_chromosome_length_inserts(
tncoordinatescopy_array, ref_names, chr_lengths
)
# For each gene location, add the length of all previous chromosomes
gene_coordinates = add_chromosome_length(
gene_coordinates, chr_lengths_cumsum, ref_tid_roman
)
# For each essential gene location, add the length of all previous chromosomes
essential_coordinates = add_chromosome_length(
essential_coordinates, chr_lengths_cumsum, ref_tid_roman
)
# GET NUMBER OF TRANSPOSONS AND READS PER GENE
print("Get number of insertions and reads per gene ...")
# All genes
tn_per_gene, reads_per_gene, tn_coordinates_per_gene = get_insertions_and_reads(
gene_coordinates, tncoordinatescopy_array, readnumb_array
)
# Only essential genes
(
tn_per_essential,
reads_per_essential,
tn_coordinates_per_essential,
) = get_insertions_and_reads(
essential_coordinates, tncoordinatescopy_array, readnumb_array
)
# CREATE BED FILE
bedfile = bamfile + ".bed"
print("Writing bed file at: ", bedfile)
print("")
save_as_bed(bedfile, tncoordinates_array, ref_tid, readnumb_array)
# CREATE TEXT FILE WITH TRANSPOSONS AND READS PER GENE
# NOTE THAT THE TRANSPOSON WITH THE HIGHEST READ COUNT IS IGNORED.
# E.G. IF THIS FILE IS COMPARED WITH THE _PERGENE_INSERTIONS.TXT FILE THE READS DON'T ADD UP (SEE https://groups.google.com/forum/#!category-topic/satayusers/bioinformatics/uaTpKsmgU6Q)
# TOO REMOVE THIS HACK, CHANGE THE INITIALIZATION OF THE VARIABLE readpergene
per_gene_file = bamfile + "_pergene.txt"
print("Writing pergene.txt file at: ", per_gene_file)
print("")
save_per_gene(per_gene_file, tn_per_gene, reads_per_gene, aliases_designation)
# CREATE TEXT FILE TRANSPOSONS AND READS PER ESSENTIAL GENE
per_essential_file = bamfile + "_peressential.txt"
print("Writing peressential.txt file at: ", per_essential_file)
print("")
save_per_gene(
per_essential_file, tn_per_essential, reads_per_essential, aliases_designation
)
# CREATE TEXT FILE WITH LOCATION OF INSERTIONS AND READS PER GENE
per_gene_insertions_file = bamfile + "_pergene_insertions.txt"
print("Witing pergene_insertions.txt file at: ", per_gene_insertions_file)
print("")
save_per_gene_insertions(
per_gene_insertions_file,
tn_coordinates_per_gene,
gene_coordinates,
chr_lengths_cumsum,
ref_tid_roman,
aliases_designation,
)
# CREATE TEXT FILE WITH LOCATION OF INSERTIONS AND READS PER ESSENTIAL GENE
per_essential_insertions_file = bamfile + "_peressential_insertions.txt"
print(
"Writing peressential_insertions.txt file at: ", per_essential_insertions_file
)
print("")
save_per_essential_insertions(
per_essential_insertions_file,
tn_coordinates_per_essential,
gene_coordinates,
chr_lengths_cumsum,
ref_tid_roman,
aliases_designation,
)
# ADD INSERTIONS AT SAME LOCATION BUT WITH DIFFERENT ORIENTATIONS TOGETHER (FOR STORING IN WIG-FILE)
wigfile = bamfile + ".wig"
print("Writing wig file at: ", wigfile)
print("")
save_as_wig(wigfile, tncoordinates_array, ref_tid, readnumb_array)
#%%
if __name__ == "__main__":
bamfile = "transposonmapper/data_files/files4test/SRR062634.filt_trimmed.sorted.bam"
transposonmapper(bamfile=bamfile)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1212,
318,
257,
2891,
4166,
329,
11090,
278,
1007,
1930,
261,
7550,
507,
329,
10256,
1262,
311,
2953,
... | 2.90732 | 3,798 |
# -*- coding: UTF-8 -*-
import sys
from datatable import datatable
if __name__ == "__main__":
if datatable.is_config_file_valid():
if len(sys.argv) > 1:
if sys.argv[1] == "all":
datatable.process_all_file()
elif sys.argv[1] == "export":
datatable.export_data_all()
exit(0)
#
print("Init Success!")
print("-------------")
file_dict = datatable.get_file_dict()
for file_key in file_dict:
print(str(file_key) + "." + file_dict[file_key][3:])
print("0.All")
print("-------------")
file_choose = input("Choose File : ")
if file_choose == "0":
process_all_file_choose()
else:
file_id = datatable.select_file_id(file_choose)
if file_id < 0:
print("not valid!")
else:
process_file(file_id)
else:
print("not valid!")
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
198,
6738,
4818,
21156,
1330,
4818,
21156,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
220,
220,
220,
611,
4818,
21... | 1.886538 | 520 |
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import OrderedDict from the standard library if possible, and from
# the ordereddict library (required on Python 2.6) otherwise.
try:
from collections import OrderedDict # NOQA
except ImportError: # Python < 2.7
from ordereddict import OrderedDict # NOQA
# Import simplejson if we have it (Python 2.6), and use json from the
# standard library otherwise.
#
# Note: Python 2.6 does have a JSON library, but it lacks `object_pairs_hook`
# as a keyword argument to `json.loads`, so we still need simplejson on
# Python 2.6.
import sys
if sys.version_info < (2, 7):
import simplejson as json # NOQA
else:
import json # NOQA
| [
2,
15069,
1853,
11,
28038,
856,
11,
3457,
13,
198,
2,
11336,
27065,
1586,
263,
1279,
7278,
710,
1586,
263,
31,
504,
856,
13,
785,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
... | 3.390374 | 374 |
import json
import pytest
from buildpg import Values
from pytest import fixture
from pytest_toolbox.comparison import CloseToNow, RegexStr
from shared.actions import ActionTypes
from shared.donorfy import DonorfyActor
from shared.utils import RequestError
from web.utils import encrypt_json
from .conftest import Factory
| [
11748,
33918,
198,
198,
11748,
12972,
9288,
198,
6738,
1382,
6024,
1330,
27068,
198,
6738,
12972,
9288,
1330,
29220,
198,
6738,
12972,
9288,
62,
25981,
3524,
13,
785,
1845,
1653,
1330,
13872,
2514,
3844,
11,
797,
25636,
13290,
198,
198,
... | 3.57732 | 97 |
import colorama
from lxman.cli import cli
if __name__ == "__main__":
cli()
| [
11748,
3124,
1689,
201,
198,
6738,
300,
87,
805,
13,
44506,
1330,
537,
72,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
537,
72,
3419,
201,
198
] | 2.236842 | 38 |
###########################################
# EXERCICIO 047 #
###########################################
'''CRIE UM PROGRAMA QUE MOSTRE NA TELA TODOS OS NUMEROS
PARES DE 1 E 50'''
for c in range(1, 51):
if c % 2 == 0:
print(c, end=' ') | [
29113,
7804,
21017,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
7788,
47691,
2149,
9399,
657,
2857,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
198,
29113,
7804,
21017,
198,
... | 2.433628 | 113 |
def handle(automata, result):
"""
This is a simple handler
:param automata: the automata which yielded the result
:type automata: :class:`Automata`
:param result: the result of the automata
:type result: bool
"""
print(result)
if not result:
automata.switch("ask m: try again: f: handle")
| [
4299,
5412,
7,
2306,
296,
1045,
11,
1255,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
770,
318,
257,
2829,
21360,
198,
220,
220,
220,
1058,
17143,
3557,
1045,
25,
262,
3557,
1045,
543,
26403,
262,
1255,
198,
220,
220,
220,
... | 2.707317 | 123 |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
| [
37811,
198,
15069,
357,
34,
8,
2864,
12,
42334,
8180,
10501,
628,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
9... | 3.952941 | 170 |
import pygame
from app.view.animations import Delay, FadeIn, FadeOut, ChooseRandom, FrameAnimate, MovePosition, DelayCallBack, MoveValue, SequenceAnimation, ParallelAnimation, Timeout
from app.resources.event_handler import SET_GAME_STATE
from app.resources import text_renderer, colours
from app.resources.music import MusicManager
from app.resources.images import ImageManager
from app.conway.game_of_life import GameOfLife
from app.resources.event_handler import SOUND_EFFECT
| [
11748,
12972,
6057,
198,
6738,
598,
13,
1177,
13,
11227,
602,
1330,
42698,
11,
376,
671,
818,
11,
376,
671,
7975,
11,
17489,
29531,
11,
25184,
2025,
1920,
11,
10028,
26545,
11,
42698,
14134,
7282,
11,
10028,
11395,
11,
45835,
39520,
1... | 3.801587 | 126 |
import numpy as np
import math
import functools as fu
import cv2
import random as rand
def transform_points(m, points):
""" It transforms the given point/points using the given transformation matrix.
:param points: numpy array, list
The point/points to be transformed given the transformation matrix.
:param m: An 3x3 matrix
The transformation matrix which will be used for the transformation.
:return: The transformed point/points.
"""
ph = make_homogeneous(points).T
ph = m @ ph
return make_euclidean(ph.T)
def transform_image(image, m):
""" It transforms the given image using the given transformation matrix.
:param img: An image
The image to be transformed given the transformation matrix.
:param m: An 3x3 matrix
The transformation matrix which will be used for the transformation.
:return: The transformed image.
"""
row, col, _ = image.shape
return cv2.warpPerspective(image, m, (col, row))
def make_homogeneous(points):
""" It converts the given point/points in an euclidean coordinates into a homogeneous coordinate
:param points: numpy array, list
The point/points to be converted into a homogeneous coordinate.
:return: The converted point/points in the homogeneous coordinates.
"""
if isinstance(points, list):
points = np.asarray([points], dtype=np.float64)
return np.hstack((points, np.ones((points.shape[0], 1), dtype=points.dtype)))
else:
return np.hstack((points, np.ones((points.shape[0], 1), dtype=points.dtype)))
def make_euclidean(points):
"""It converts the given point/points in a homogeneous coordinate into an euclidean coordinates.
:param points: numpy array, list
The point/points to be converted into an euclidean coordinates.
:return: The converted point/points in the euclidean coordinates.
"""
return points[:, :-1]
def identity():
""" It provides an identity transformation matrix.
:return: An identity matrix (3 x 3) using homogeneous coordinates.
"""
return np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=np.float64)
def rotating(=0):
""" It provides a rotation matrix given degrees which can then be used to rotate 2D point/points or an image
clockwise about the origin. If you want to rotate counterclockwise pass a negative degree.
:param : int
The amount of degree to be rotated. The default value is 0 which means when using it to rotate it won't rotate
the point/points or the image at all.
:returns: The rotation matrix (3 x 3) using homogeneous coordinates.
"""
= np.radians()
cos = math.cos()
sin = math.sin()
return np.array([[cos, sin, 0],
[-sin, cos, 0],
[0, 0, 1]],
dtype=np.float64)
def translating(t_x=0, t_y=0):
""" It provides a translate matrix given quantity t_x and t_y for shifting x and y axes respectively.It can then
be used to translate or shift a 2D point/points or an image.
as well as the y-axis by t_y.
:param t_x: int
The amount of shifting in the direction of the x-axis
:param t_y: int
The amount of shifting in the direction of the y-axis
The default values for both are 0. That is it does not translate the point/points or the image when applied.
:returns: The translation matrix (3 x 3) in homogeneous coordinates.
"""
return np.array([[1, 0, t_x],
[0, 1, t_y],
[0, 0, 1]],
dtype=np.float64)
def scaling(scale_x=1, scale_y=1):
""" It provides a scale matrix given quantity scale_x and scale_y for scaling x and y axes respectively.It can then
be used to scale a 2D point/points or an image.
scales (enlarge or shrink) the given 2D point/points in the direction of the x-axis by scale_x
as well as the y-axis by scale_x.
:param scale_x: int
The scale factor by which we wish to enlarge/shrink the point/points in the direction of the x-axis.
:param scale_y: int
The scale factor by which we wish to enlarge/shrink the point/points in the direction of the y-axis.
The default values for both are 1. That is it does not scale the point/points or the image when applied.
:return: The scaling matrix (3 x 3) in homogeneous coordinates.
"""
return np.array([[scale_x, 0, 0],
[0, scale_y, 0],
[0, 0, 1]],
dtype=np.float64)
def arbitrary():
"""
:return: An (3 x 3) arbitrary transformation matrix using translating, scaling and rotating function randomly.
"""
= rand.randint(-360, 361)
r = rotating()
sx, sy = rand.sample(range(-10, 11), 2)
s = scaling(sx, sy)
tx, ty = rand.sample(range(-400, 401), 2)
t = translating(tx, ty)
I = identity()
if 0 <= tx <= 200:
return s @ t @ r @ I
else:
return r @ s @ I @ t
def invert(m):
""" It provides a matrix for performing the inversion.
:param m: a (3 x 3) matrix.
:return: The inverse of the given matrix.
"""
d = np.linalg.det(m)
if d != 0:
return np.linalg.inv(m).astype(dtype=np.float64)
else:
raise Exception("It is a non-invertible matrix")
def combine(*transformations):
""" It combines the given transformation matrices.
Be aware of which order you are passing the transformation matrices since it will be used to transform in that order.
:param transformations: (3 x 3) transformation matrices. As many as you want.
The matrices to be combined.
:return: The combined matrix (3 x 3).
"""
transformations = reversed(transformations)
return fu.reduce(lambda tr1, tr2: tr1 @ tr2, transformations)
def learn_affine(srs, tar):
""" It finds the affine transformation matrix between the two given triangles (3 points).
A x = b => x = inv(A) b
:param srs: three 2D points in homogeneous coordinates representing a triangle.
The source points.
:param tar: three 2D points in homogeneous coordinates representing a triangle.
The target pints.
:return: The affine transformation matrix.
"""
x1, x2, x3 = srs[0, 0], srs[1, 0], srs[2, 0]
y1, y2, y3 = srs[0, 1], srs[1, 1], srs[2, 1]
b = tar.flatten()
a = np.array([[x1, y1, 1, 0, 0, 0],
[0, 0, 0, x1, y1, 1],
[x2, y2, 1, 0, 0, 0],
[0, 0, 0, x2, y2, 1],
[x3, y3, 1, 0, 0, 0],
[0, 0, 0, x3, y3, 1]],
dtype=np.float64)
d = np.linalg.det(a)
if d != 0:
ai = np.linalg.inv(a)
x = ai @ b
x = x.flatten()
a1, a2, a3, a4 = x[0], x[1], x[3], x[4]
tx, ty = x[2], x[5]
aff_transformation = np.array([[a1, a2, tx],
[a3, a4, ty],
[0, 0, 1]],
dtype=np.float64)
return aff_transformation
else:
raise Exception("It is a non-invertible matrix") | [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
1257,
310,
10141,
355,
14035,
198,
11748,
269,
85,
17,
198,
11748,
4738,
355,
43720,
628,
198,
4299,
6121,
62,
13033,
7,
76,
11,
2173,
2599,
198,
220,
220,
220,
37227,
632,... | 2.425703 | 2,988 |
#sys.argv[1] = bin_dir, sys.argv[2] = flye_info, sys.argv[3] = output_dir, sys.argv[3] = output_dir
import os, sys
bin_name=sys.argv[1]
bin_dir = sys.argv[2]
output_dir = sys.argv[3]
large_circular = []
flye_info = open(bin_dir + '/assembly_info.txt','r')
read_info = True
while read_info:
read_info = flye_info.readline()
entry = read_info.split('\t')
if len(entry) > 3:
if (entry[3] == "Y") and (int(entry[1]) > 2000000):
large_circular.append(entry[0])
for i in large_circular:
os.system('seqkit grep -n -p '+ i + ' ' + bin_dir + '/assembly.fasta -o ' +output_dir + '/' + bin_name + '_'+ i + '_unpolished_rf.fasta' )
| [
2,
17597,
13,
853,
85,
58,
16,
60,
796,
9874,
62,
15908,
11,
25064,
13,
853,
85,
58,
17,
60,
796,
6129,
68,
62,
10951,
11,
25064,
13,
853,
85,
58,
18,
60,
796,
5072,
62,
15908,
11,
25064,
13,
853,
85,
58,
18,
60,
796,
5072,
... | 2.111111 | 315 |
LOGGEDOUT_SCSS_MSG = "User Logged out successfully"
LOGIN_SCSS_MSG = "User Logged in successfully"
INVALID_PASS = "Passowrd not valid"
INVALID_USER = "User dose not exsists"
INVALID_SESSION = "Session Invalid"
INVALID_REQUEST = "Not a valid request"
BAD_REQUEST = "Bad request"
PASSWORD_EXPIERD = "Password Expierd" | [
25294,
38,
1961,
12425,
62,
6173,
5432,
62,
5653,
38,
796,
366,
12982,
50098,
503,
7675,
1,
201,
198,
25294,
1268,
62,
6173,
5432,
62,
5653,
38,
796,
366,
12982,
50098,
287,
7675,
1,
201,
198,
1268,
23428,
2389,
62,
47924,
796,
366,... | 2.639344 | 122 |
#!/usr/bin/env/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
| [
2,
48443,
14629,
14,
8800,
14,
24330,
14,
29412,
198,
198,
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
... | 3.942584 | 209 |
#!/usr/bin/env python
# -*- coding: utf-8-unix -*-
"""
FOO = Register("A:4 B:4", 0x12)
BAR = Register("B:4 C:4", 0x23)
# evals as int which is a register address
print FOO == 0x12
# each field attribute returns a mask for that field
print FOO.B == 0b00001111
print BAR.B == 0b11110000
# ERROR: Register definition is readonly
FOO.B = 0b10101010
# creates register instance with initial value
foo = FOO(0xAC)
print foo.A == 0xA
print foo.B == 0xC
print foo == 0xAC
foo.B = 0
print foo == 0xA0
"""
import sys, os
from bitstring import Bits, BitArray
"""
Convert various typed values into BitArray value.
"""
"""
Installs filter function to limit access to non-existing attribute.
NOTE:
This replaces belonging class of passed object to dynamically
generated subclass of the original class.
"""
"""
Generic class to wrap built-in types with custom attributes.
"""
def unsubscribe(self, func):
if self.__mon.has_key(func):
del self.__mon[func]
"""
Returns a new register value instance with the same initial value.
"""
if __name__ == "__main__":
from IPython import embed
sys.excepthook = handle_exception
REG = Register("FOO:3 :1 BAR:4", 0x12)
print(REG)
print(REG.FOO)
print(REG.BAR)
reg = REG(0xAC)
print(reg)
print(reg.FOO)
print(reg.BAR)
embed()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
12,
403,
844,
532,
9,
12,
198,
198,
37811,
198,
6080,
46,
796,
17296,
7203,
32,
25,
19,
347,
25,
19,
1600,
657,
87,
1065,
8,
198,
... | 2.678643 | 501 |
#!/usr/bin/env python
# coding=utf-8
#
# Copyright 2015-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the X11 (MIT) (the License) set forth below;
#
# you may not use this file except in compliance with the License. Unless required by applicable law or agreed to in
# writing, software distributed under the License is distributed on an AS IS BASIS, without warranties or conditions
# of any kind, EITHER EXPRESS OR IMPLIED. See the License for the specific language governing permissions and
# limitations under the License. Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# "THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import ConfigParser
from argparse import RawTextHelpFormatter
from tabulate import tabulate
from nsxramlclient.client import NsxClient
from pkg_resources import resource_filename
from libutils import dfw_rule_list_helper
from libutils import connect_to_vc
from libutils import nametovalue
__author__ = 'Emanuele Mazza'
def dfw_section_list(client_session):
"""
This function returns all the sections of the NSX distributed firewall
:param client_session: An instance of an NsxClient Session
:return returns
- for each of the three available sections types (L2, L3Redirect, L3) a list with item 0 containing the
section name as string, item 1 containing the section id as string, item 2 containing the section type
as a string
- a dictionary containing all sections' details, including dfw rules
"""
all_dfw_sections = client_session.read('dfwConfig')['body']['firewallConfiguration']
if str(all_dfw_sections['layer2Sections']) != 'None':
l2_dfw_sections = all_dfw_sections['layer2Sections']['section']
else:
l2_dfw_sections = list()
if str(all_dfw_sections['layer2Sections']) != 'None':
l3r_dfw_sections = all_dfw_sections['layer3RedirectSections']['section']
else:
l3r_dfw_sections = list()
if str(all_dfw_sections['layer3Sections']) != 'None':
l3_dfw_sections = all_dfw_sections['layer3Sections']['section']
else:
l3_dfw_sections = list()
l2_section_list = [['---', '---', '---']]
l3r_section_list = [['---', '---', '---']]
l3_section_list = [['---', '---', '---']]
if type(l2_dfw_sections) is not list:
keys_and_values = zip(dict.keys(l2_dfw_sections), dict.values(l2_dfw_sections))
l2_dfw_sections = list()
l2_dfw_sections.append(dict(keys_and_values))
if type(l3_dfw_sections) is not list:
keys_and_values = zip(dict.keys(l3_dfw_sections), dict.values(l3_dfw_sections))
l3_dfw_sections = list()
l3_dfw_sections.append(dict(keys_and_values))
if type(l3r_dfw_sections) is not list:
keys_and_values = zip(dict.keys(l3r_dfw_sections), dict.values(l3r_dfw_sections))
l3r_dfw_sections = list()
l3r_dfw_sections.append(dict(keys_and_values))
if len(l2_dfw_sections) != 0:
l2_section_list = list()
for sl in l2_dfw_sections:
try:
section_name = sl['@name']
except KeyError:
section_name = '<empty name>'
l2_section_list.append((section_name, sl['@id'], sl['@type']))
if len(l3r_dfw_sections) != 0:
l3r_section_list = list()
for sl in l3r_dfw_sections:
try:
section_name = sl['@name']
except KeyError:
section_name = '<empty name>'
l3r_section_list.append((section_name, sl['@id'], sl['@type']))
if len(l3_dfw_sections) != 0:
l3_section_list = list()
for sl in l3_dfw_sections:
try:
section_name = sl['@name']
except KeyError:
section_name = '<empty name>'
l3_section_list.append((section_name, sl['@id'], sl['@type']))
return l2_section_list, l3r_section_list, l3_section_list, all_dfw_sections
def dfw_section_delete(client_session, section_id):
"""
This function delete a section given its id
:param client_session: An instance of an NsxClient Session
:param section_id: The id of the section that must be deleted
:return returns
- A table containing these information: Return Code (True/False), Section ID, Section Name, Section Type
- ( verbose option ) A list containing a single list which elements are Return Code (True/False),
Section ID, Section Name, Section Type
If there is no matching list
- Return Code is set to False
- Section ID is set to the value passed as input parameter
- Section Name is set to "---"
- Section Type is set to "---"
"""
l2_section_list, l3r_section_list, l3_section_list, detailed_dfw_sections = dfw_section_list(client_session)
dfw_section_id = str(section_id)
for i, val in enumerate(l3_section_list):
if dfw_section_id == str(val[1]) and str(val[0]) != 'Default Section Layer3':
client_session.delete('dfwL3SectionId', uri_parameters={'sectionId': dfw_section_id})
result = [["True", dfw_section_id, str(val[0]), str(val[-1])]]
return result
if dfw_section_id == str(val[1]) and str(val[0]) == 'Default Section Layer3':
result = [["False-Delete Default Section is not allowed", dfw_section_id, "---", "---"]]
return result
for i, val in enumerate(l2_section_list):
if dfw_section_id == str(val[1]) and str(val[0]) != 'Default Section Layer2':
client_session.delete('dfwL2SectionId', uri_parameters={'sectionId': dfw_section_id})
result = [["True", dfw_section_id, str(val[0]), str(val[-1])]]
return result
if dfw_section_id == str(val[1]) and str(val[0]) == 'Default Section Layer2':
result = [["False-Delete Default Section is not allowed", dfw_section_id, "---", "---"]]
return result
for i, val in enumerate(l3r_section_list):
if dfw_section_id == str(val[1]) and str(val[0]) != 'Default Section':
client_session.delete('section', uri_parameters={'section': dfw_section_id})
result = [["True", dfw_section_id, str(val[0]), str(val[-1])]]
return result
if dfw_section_id == str(val[1]) and str(val[0]) == 'Default Section':
result = [["False-Delete Default Section is not allowed", dfw_section_id, "---", "---"]]
return result
result = [["False", dfw_section_id, "---", "---"]]
return result
def dfw_rule_delete(client_session, rule_id):
"""
This function delete a dfw rule given its id
:param client_session: An instance of an NsxClient Session
:param rule_id: The id of the rule that must be deleted
:return returns
- A table containing these information: Return Code (True/False), Rule ID, Rule Name, Applied-To, Section ID
- ( verbose option ) A list containing a single list which elements are Return Code (True/False),
Rule ID, Rule Name, Applied-To, Section ID
If there is no matching rule
- Return Code is set to False
- Rule ID is set to the value passed as input parameter
- All other returned parameters are set to "---"
"""
l2_rule_list, l3_rule_list, l3r_rule_list = dfw_rule_list(client_session)
dfw_rule_id = str(rule_id)
for i, val in enumerate(l3_rule_list):
if dfw_rule_id == str(val[0]) and str(val[1]) != 'Default Rule':
dfw_section_id = str(val[-1])
section_list, dfwL3_section_details = dfw_section_read(client_session, dfw_section_id)
etag = str(section_list[0][3])
client_session.delete('dfwL3Rule', uri_parameters={'ruleId': dfw_rule_id, 'sectionId': dfw_section_id},
additional_headers={'If-match': etag})
result = [["True", dfw_rule_id, str(val[1]), str(val[-2]), str(val[-1])]]
return result
else:
result = [["False-Delete Default Rule is not allowed", dfw_rule_id, "---", "---", "---"]]
return result
for i, val in enumerate(l2_rule_list):
if dfw_rule_id == str(val[0]) and str(val[1]) != 'Default Rule':
dfw_section_id = str(val[-1])
section_list, dfwL2_section_details = dfw_section_read(client_session, dfw_section_id)
etag = str(section_list[0][3])
client_session.delete('dfwL2Rule', uri_parameters={'ruleId': dfw_rule_id, 'sectionId': dfw_section_id},
additional_headers={'If-match': etag})
result = [["True", dfw_rule_id, str(val[1]), str(val[-2]), str(val[-1])]]
return result
else:
result = [["False-Delete Default Rule is not allowed", dfw_rule_id, "---", "---", "---"]]
return result
for i, val in enumerate(l3r_rule_list):
if dfw_rule_id == str(val[0]) and str(val[1]) != 'Default Rule':
dfw_section_id = str(val[-1])
section_list, dfwL3r_section_details = dfw_section_read(client_session, dfw_section_id)
etag = str(section_list[0][3])
client_session.delete('rule', uri_parameters={'ruleID': dfw_rule_id, 'section': dfw_section_id})
result = [["True", dfw_rule_id, str(val[1]), str(val[-2]), str(val[-1])]]
return result
else:
result = [["False-Delete Default Rule is not allowed", dfw_rule_id, "---", "---", "---"]]
return result
result = [["False", dfw_rule_id, "---", "---", "---"]]
return result
def dfw_section_id_read(client_session, dfw_section_name):
"""
This function returns the section(s) ID(s) given a section name
:param client_session: An instance of an NsxClient Session
:param dfw_section_name: The name ( case sensitive ) of the section for which the ID is wanted
:return returns
- A list of dictionaries. Each dictionary contains the type and the id of each section with named as
specified by the input parameter. If no such section exist, the list contain a single dictionary with
{'Type': 0, 'Id': 0}
"""
l2_section_list, l3r_section_list, l3_section_list, detailed_dfw_sections = dfw_section_list(client_session)
dfw_section_id = list()
dfw_section_name = str(dfw_section_name)
for i, val in enumerate(l3_section_list):
if str(val[0]) == dfw_section_name:
dfw_section_id.append({'Type': str(val[2]), 'Id': int(val[1])})
for i, val in enumerate(l3r_section_list):
if str(val[0]) == dfw_section_name:
dfw_section_id.append({'Type': str(val[2]), 'Id': int(val[1])})
for i, val in enumerate(l2_section_list):
if str(val[0]) == dfw_section_name:
dfw_section_id.append({'Type': str(val[2]), 'Id': int(val[1])})
if len(dfw_section_id) == 0:
dfw_section_id.append({'Type': 0, 'Id': 0})
return dfw_section_id
def dfw_rule_id_read(client_session, dfw_section_id, dfw_rule_name):
"""
This function returns the rule(s) ID(s) given a section id and a rule name
:param client_session: An instance of an NsxClient Session
:param dfw_rule_name: The name ( case sensitive ) of the rule for which the ID is/are wanted. If rhe name includes
includes spaces, enclose it between ""
:param dfw_section_id: The id of the section where the rule must be searched
:return returns
- A dictionary with the rule name as the key and a list as a value. The list contains all the matching
rules id(s). For example {'RULE_ONE': [1013, 1012]}. If no matching rule exist, an empty dictionary is
returned
"""
l2_rule_list, l3_rule_list, l3r_rule_list = dfw_rule_list(client_session)
list_names = list()
list_ids = list()
dfw_rule_name = str(dfw_rule_name)
dfw_section_id = str(dfw_section_id)
for i, val in enumerate(l2_rule_list):
if (dfw_rule_name == val[1]) and (dfw_section_id == val[-1]):
list_names.append(dfw_rule_name)
list_ids.append(int(val[0]))
for i, val in enumerate(l3_rule_list):
if (dfw_rule_name == val[1]) and (dfw_section_id == val[-1]):
list_names.append(dfw_rule_name)
list_ids.append(int(val[0]))
for i, val in enumerate(l3r_rule_list):
if (dfw_rule_name == val[1]) and (dfw_section_id == val[-1]):
list_names.append(dfw_rule_name)
list_ids.append(int(val[0]))
dfw_rule_id = dict.fromkeys(list_names, list_ids)
return dfw_rule_id
def dfw_rule_list(client_session):
"""
This function returns all the rules of the NSX distributed firewall
:param client_session: An instance of an NsxClient Session
:return returns
- a tabular view of all the dfw rules defined across L2, L3, L3Redirect
- ( verbose option ) a list containing as many list as the number of dfw rules defined across
L2, L3, L3Redirect (in this order). For each rule, these fields are returned:
"ID", "Name", "Source", "Destination", "Service", "Action", "Direction", "Packet Type", "Applied-To",
"ID (Section)"
"""
all_dfw_sections_response = client_session.read('dfwConfig')
all_dfw_sections = client_session.normalize_list_return(all_dfw_sections_response['body']['firewallConfiguration'])
if str(all_dfw_sections[0]['layer3Sections']) != 'None':
l3_dfw_sections = all_dfw_sections[0]['layer3Sections']['section']
else:
l3_dfw_sections = list()
if str(all_dfw_sections[0]['layer2Sections']) != 'None':
l2_dfw_sections = all_dfw_sections[0]['layer2Sections']['section']
else:
l2_dfw_sections = list()
if str(all_dfw_sections[0]['layer3RedirectSections']) != 'None':
l3r_dfw_sections = all_dfw_sections[0]['layer3RedirectSections']['section']
else:
l3r_dfw_sections = list()
if type(l2_dfw_sections) is not list:
keys_and_values = zip(dict.keys(l2_dfw_sections), dict.values(l2_dfw_sections))
l2_dfw_sections = list()
l2_dfw_sections.append(dict(keys_and_values))
if type(l3_dfw_sections) is not list:
keys_and_values = zip(dict.keys(l3_dfw_sections), dict.values(l3_dfw_sections))
l3_dfw_sections = list()
l3_dfw_sections.append(dict(keys_and_values))
if type(l3r_dfw_sections) is not list:
keys_and_values = zip(dict.keys(l3r_dfw_sections), dict.values(l3r_dfw_sections))
l3r_dfw_sections = list()
l3r_dfw_sections.append(dict(keys_and_values))
l2_temp = list()
l2_rule_list = list()
if len(l2_dfw_sections) != 0:
for i, val in enumerate(l2_dfw_sections):
if 'rule' in val:
l2_temp.append(l2_dfw_sections[i])
l2_dfw_sections = l2_temp
if len(l2_dfw_sections) > 0:
if 'rule' in l2_dfw_sections[0]:
rule_list = list()
for sptr in l2_dfw_sections:
section_rules = client_session.normalize_list_return(sptr['rule'])
l2_rule_list = dfw_rule_list_helper(client_session, section_rules, rule_list)
else:
l2_rule_list = []
l3_temp = list()
l3_rule_list = list()
if len(l3_dfw_sections) != 0:
for i, val in enumerate(l3_dfw_sections):
if 'rule' in val:
l3_temp.append(l3_dfw_sections[i])
l3_dfw_sections = l3_temp
if len(l3_dfw_sections) > 0:
if 'rule' in l3_dfw_sections[0]:
rule_list = list()
for sptr in l3_dfw_sections:
section_rules = client_session.normalize_list_return(sptr['rule'])
l3_rule_list = dfw_rule_list_helper(client_session, section_rules, rule_list)
else:
l3_rule_list = []
l3r_temp = list()
l3r_rule_list = list()
if len(l3r_dfw_sections) != 0:
for i, val in enumerate(l3r_dfw_sections):
if 'rule' in val:
l3r_temp.append(l3r_dfw_sections[i])
l3r_dfw_sections = l3r_temp
if len(l3r_dfw_sections) > 0:
if 'rule' in l3r_dfw_sections[0]:
rule_list = list()
for sptr in l3r_dfw_sections:
section_rules = client_session.normalize_list_return(sptr['rule'])
l3r_rule_list = dfw_rule_list_helper(client_session, section_rules, rule_list)
else:
l3r_rule_list = []
return l2_rule_list, l3_rule_list, l3r_rule_list
def dfw_rule_read(client_session, rule_id):
"""
This function retrieves details of a dfw rule given its id
:param client_session: An instance of an NsxClient Session
:param rule_id: The ID of the dfw rule to retrieve
:return: returns
- tabular view of the dfw rule
- ( verbose option ) a list containing the dfw rule information: ID(Rule)- Name(Rule)- Source- Destination-
Services- Action - Direction- Pktytpe- AppliedTo- ID(section)
"""
rule_list = dfw_rule_list(client_session)
rule = list()
for sectionptr in rule_list:
for ruleptr in sectionptr:
if ruleptr[0] == str(rule_id):
rule.append(ruleptr)
return rule
def dfw_rule_source_delete(client_session, rule_id, source):
"""
This function delete one of the sources of a dfw rule given the rule id and the source to be deleted
If two or more sources have the same name, the function will delete all of them
:param client_session: An instance of an NsxClient Session
:param rule_id: The ID of the dfw rule to retrieve
:param source: The source of the dfw rule to be deleted. If the source name contains any space, then it must be
enclosed in double quotes (like "VM Network")
:return: returns
- tabular view of the dfw rule after the deletion process has been performed
- ( verbose option ) a list containing a list with the following dfw rule informations after the deletion
process has been performed: ID(Rule)- Name(Rule)- Source- Destination- Services- Action - Direction-
Pktytpe- AppliedTo- ID(section)
"""
source = str(source)
rule = dfw_rule_read(client_session, rule_id)
if len(rule) == 0:
# It means a rule with id = rule_id does not exist
result = [[rule_id, "---", source, "---", "---", "---", "---", "---", "---", "---"]]
return result
# Get the rule data structure that will be modified and then piped into the update function
section_list = dfw_section_list(client_session)
sections = [section_list[0], section_list[1], section_list[2]]
section_id = rule[0][-1]
rule_type_selector = ''
for scan in sections:
for val in scan:
if val[1] == section_id:
rule_type_selector = val[2]
if rule_type_selector == '':
print 'ERROR: RULE TYPE SELECTOR CANNOT BE EMPTY - ABORT !'
return
if rule_type_selector == 'LAYER2':
rule_type = 'dfwL2Rule'
elif rule_type_selector == 'LAYER3':
rule_type = 'dfwL3Rule'
else:
rule_type = 'rule'
rule_schema = client_session.read(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id})
rule_etag = rule_schema.items()[-1][1]
if 'sources' not in rule_schema.items()[1][1]['rule']:
# It means the only source is "any" and it cannot be deleted short of deleting the whole rule
rule = dfw_rule_read(client_session, rule_id)
return rule
if type(rule_schema.items()[1][1]['rule']['sources']['source']) == list:
# It means there are more than one sources, each one with his own dict
sources_list = rule_schema.items()[1][1]['rule']['sources']['source']
for i, val in enumerate(sources_list):
if val['type'] == 'Ipv4Address' and val['value'] == source or 'name' in val and val['name'] == source:
del rule_schema.items()[1][1]['rule']['sources']['source'][i]
# The order dict "rule_schema" must be parsed to find the dict that will be piped into the update function
rule = client_session.update(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id},
request_body_dict=rule_schema.items()[1][1],
additional_headers={'If-match': rule_etag})
rule = dfw_rule_read(client_session, rule_id)
return rule
if type(rule_schema.items()[1][1]['rule']['sources']['source']) == dict:
# It means there is just one explicit source with his dict
source_dict = rule_schema.items()[1][1]['rule']['sources']['source']
if source_dict['type'] == 'Ipv4Address' and source_dict['value'] == source or \
'name' in dict.keys(source_dict) and source_dict['name'] == source:
del rule_schema.items()[1][1]['rule']['sources']
rule = client_session.update(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id},
request_body_dict=rule_schema.items()[1][1],
additional_headers={'If-match': rule_etag})
rule = dfw_rule_read(client_session, rule_id)
return rule
def dfw_rule_destination_delete(client_session, rule_id, destination):
"""
This function delete one of the destinations of a dfw rule given the rule id and the destination to be deleted.
If two or more destinations have the same name, the function will delete all of them
:param client_session: An instance of an NsxClient Session
:param rule_id: The ID of the dfw rule to retrieve
:param destination: The destination of the dfw rule to be deleted. If the destination name contains any space, then
it must be enclosed in double quotes (like "VM Network")
:return: returns
- tabular view of the dfw rule after the deletion process has been performed
- ( verbose option ) a list containing a list with the following dfw rule informations after the deletion
process has been performed: ID(Rule)- Name(Rule)- Source- Destination- Services- Action - Direction-
Pktytpe- AppliedTo- ID(section)
"""
destination = str(destination)
rule = dfw_rule_read(client_session, rule_id)
if len(rule) == 0:
# It means a rule with id = rule_id does not exist
result = [[rule_id, "---", "---", destination, "---", "---", "---", "---", "---", "---"]]
return result
# Get the rule data structure that will be modified and then piped into the update function
section_list = dfw_section_list(client_session)
sections = [section_list[0], section_list[1], section_list[2]]
section_id = rule[0][-1]
rule_type_selector = ''
for scan in sections:
for val in scan:
if val[1] == section_id:
rule_type_selector = val[2]
if rule_type_selector == '':
print 'ERROR: RULE TYPE SELECTOR CANNOT BE EMPTY - ABORT !'
return
if rule_type_selector == 'LAYER2':
rule_type = 'dfwL2Rule'
elif rule_type_selector == 'LAYER3':
rule_type = 'dfwL3Rule'
else:
rule_type = 'rule'
rule_schema = client_session.read(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id})
rule_etag = rule_schema.items()[-1][1]
if 'destinations' not in rule_schema.items()[1][1]['rule']:
# It means the only destination is "any" and it cannot be deleted short of deleting the whole rule
rule = dfw_rule_read(client_session, rule_id)
return rule
if type(rule_schema.items()[1][1]['rule']['destinations']['destination']) == list:
# It means there are more than one destinations, each one with his own dict
destination_list = rule_schema.items()[1][1]['rule']['destinations']['destination']
for i, val in enumerate(destination_list):
if val['type'] == 'Ipv4Address' and val['value'] == destination or \
'name' in val and val['name'] == destination:
del rule_schema.items()[1][1]['rule']['destinations']['destination'][i]
# The order dict "rule_schema" must be parsed to find the dict that will be piped into the update function
rule = client_session.update(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id},
request_body_dict=rule_schema.items()[1][1],
additional_headers={'If-match': rule_etag})
rule = dfw_rule_read(client_session, rule_id)
return rule
if type(rule_schema.items()[1][1]['rule']['destinations']['destination']) == dict:
# It means there is just one explicit destination with his dict
destination_dict = rule_schema.items()[1][1]['rule']['destinations']['destination']
if destination_dict['type'] == 'Ipv4Address' and destination_dict['value'] == destination or \
'name' in dict.keys(destination_dict) and \
destination_dict['name'] == destination:
del rule_schema.items()[1][1]['rule']['destinations']
rule = client_session.update(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id},
request_body_dict=rule_schema.items()[1][1],
additional_headers={'If-match': rule_etag})
rule = dfw_rule_read(client_session, rule_id)
return rule
#def dfw_rule_create(client_session, vccontent, section_id, rule_name, rule_direction, rule_pktype, rule_disabled,
#rule_action, rule_applyto, rule_source_type, rule_source_name, rule_source_value,
#rule_source_excluded, rule_destination_type, rule_destination_name, rule_destination_value,
#rule_destination_excluded, rule_service_protocolname, rule_service_destport,
#rule_service_srcport, rule_service_name, rule_note, rule_tag, rule_logged):
def dfw_rule_service_delete(client_session, rule_id, service):
"""
This function delete one of the services of a dfw rule given the rule id and the service to be deleted.
If two or more services have the same name, the function will delete all of them
:param client_session: An instance of an NsxClient Session
:param rule_id: The ID of the dfw rule to retrieve
:param service: The service of the dfw rule to be deleted. If the service name contains any space, then
it must be enclosed in double quotes (like "VM Network"). For TCP/UDP services the syntax is as
follows: Proto:SourcePort:DestinationPort ( example TCP:9090:any )
:return: returns
- tabular view of the dfw rule after the deletion process has been performed
- ( verbose option ) a list containing a list with the following dfw rule informations after the deletion
process has been performed: ID(Rule)- Name(Rule)- Source- Destination- Services- Action - Direction-
Pktytpe- AppliedTo- ID(section)
"""
service = str(service).split(':', 3)
if len(service) == 1:
service.append('')
if len(service) == 2:
service.append('')
rule = dfw_rule_read(client_session, rule_id)
if len(rule) == 0:
# It means a rule with id = rule_id does not exist
result = [[rule_id, "---", "---", "---", service, "---", "---", "---", "---", "---"]]
return result
# Get the rule data structure that will be modified and then piped into the update function
section_list = dfw_section_list(client_session)
sections = [section_list[0], section_list[1], section_list[2]]
section_id = rule[0][-1]
rule_type_selector = ''
for scan in sections:
for val in scan:
if val[1] == section_id:
rule_type_selector = val[2]
if rule_type_selector == '':
print 'ERROR: RULE TYPE SELECTOR CANNOT BE EMPTY - ABORT !'
return
if rule_type_selector == 'LAYER2':
rule_type = 'dfwL2Rule'
elif rule_type_selector == 'LAYER3':
rule_type = 'dfwL3Rule'
else:
rule_type = 'rule'
rule_schema = client_session.read(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id})
rule_etag = rule_schema.items()[-1][1]
if 'services' not in rule_schema.items()[1][1]['rule']:
# It means the only service is "any" and it cannot be deleted short of deleting the whole rule
rule = dfw_rule_read(client_session, rule_id)
return rule
if type(rule_schema.items()[1][1]['rule']['services']['service']) == list:
# It means there are more than one services, each one with his own dict
service_list = rule_schema.items()[1][1]['rule']['services']['service']
for i, val in enumerate(service_list):
if ('name' in val and val['name'] == service[0]) or ('sourcePort' not in val and service[1] == 'any'
and 'destinationPort' not in val and service[2] == 'any' and 'protocolName' in val and
val['protocolName'] == service[0]) or ('sourcePort' in val and val['sourcePort'] == service[1]
and 'destinationPort' not in val and service[2] == 'any' and 'protocolName' in val
and val['protocolName'] == service[0]) or ('sourcePort' in val and val['sourcePort'] == service[1]
and 'destinationPort' in val and val['destinationPort'] == service[2] and 'protocolName' in val
and val['protocolName'] == service[0]) or ('sourcePort' not in val and service[1] == 'any'
and 'destinationPort' in val and val['destinationPort'] == service[2] and 'protocolName' in val and
val['protocolName'] == service[0]):
del rule_schema.items()[1][1]['rule']['services']['service'][i]
# The order dict "rule_schema" must be parsed to find the dict that will be piped into the update function
rule = client_session.update(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id},
request_body_dict=rule_schema.items()[1][1],
additional_headers={'If-match': rule_etag})
rule = dfw_rule_read(client_session, rule_id)
return rule
if type(rule_schema.items()[1][1]['rule']['services']['service']) == dict:
# It means there is just one explicit service with his dict
service_dict = rule_schema.items()[1][1]['rule']['services']['service']
val = service_dict
if ('name' in val and val['name'] == service[0]) or ('sourcePort' not in val and service[1] == 'any'
and 'destinationPort' not in val and service[2] == 'any' and 'protocolName' in val and
val['protocolName'] == service[0]) or ('sourcePort' in val and val['sourcePort'] == service[1]
and 'destinationPort' not in val and service[2] == 'any' and 'protocolName' in val
and val['protocolName'] == service[0]) or ('sourcePort' in val and val['sourcePort'] == service[1]
and 'destinationPort' in val and val['destinationPort'] == service[2] and 'protocolName' in val
and val['protocolName'] == service[0]) or ('sourcePort' not in val and service[1] == 'any'
and 'destinationPort' in val and val['destinationPort'] == service[2] and 'protocolName' in val and
val['protocolName'] == service[0]):
del rule_schema.items()[1][1]['rule']['services']
rule = client_session.update(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id},
request_body_dict=rule_schema.items()[1][1],
additional_headers={'If-match': rule_etag})
rule = dfw_rule_read(client_session, rule_id)
return rule
def dfw_rule_applyto_delete(client_session, rule_id, applyto):
"""
This function delete one of the applyto clauses of a dfw rule given the rule id and the clause to be deleted.
If two or more clauses have the same name, the function will delete all of them
:param client_session: An instance of an NsxClient Session
:param rule_id: The ID of the dfw rule to retrieve
:param applyto: The name of the applyto clause of the dfw rule to be deleted. If it contains any space, then
it must be enclosed in double quotes (like "VM Network").
:return: returns
- tabular view of the dfw rule after the deletion process has been performed
- ( verbose option ) a list containing a list with the following dfw rule information after the deletion
process has been performed: ID(Rule)- Name(Rule)- Source- Destination- Services- Action - Direction-
Pktytpe- AppliedTo- ID(section)
"""
apply_to = str(applyto)
rule = dfw_rule_read(client_session, rule_id)
if len(rule) == 0:
# It means a rule with id = rule_id does not exist
result = [[rule_id, "---", "---", "---", "---", "---", "---", "---", apply_to, "---"]]
return result
# Get the rule data structure that will be modified and then piped into the update function
section_list = dfw_section_list(client_session)
sections = [section_list[0], section_list[1], section_list[2]]
section_id = rule[0][-1]
rule_type_selector = ''
for scan in sections:
for val in scan:
if val[1] == section_id:
rule_type_selector = val[2]
if rule_type_selector == '':
print 'ERROR: RULE TYPE SELECTOR CANNOT BE EMPTY - ABORT !'
return
if rule_type_selector == 'LAYER2':
rule_type = 'dfwL2Rule'
elif rule_type_selector == 'LAYER3':
rule_type = 'dfwL3Rule'
else:
rule_type = 'rule'
rule_schema = client_session.read(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id})
rule_etag = rule_schema.items()[-1][1]
if type(rule_schema.items()[1][1]['rule']['appliedToList']['appliedTo']) == list:
# It means there are more than one applyto clauses, each one with his own dict
applyto_list = rule_schema.items()[1][1]['rule']['appliedToList']['appliedTo']
for i, val in enumerate(applyto_list):
if 'name' in val and val['name'] == apply_to:
del rule_schema.items()[1][1]['rule']['appliedToList']['appliedTo'][i]
# The order dict "rule_schema" must be parsed to find the dict that will be piped into the update function
rule = client_session.update(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id},
request_body_dict=rule_schema.items()[1][1],
additional_headers={'If-match': rule_etag})
rule = dfw_rule_read(client_session, rule_id)
return rule
if type(rule_schema.items()[1][1]['rule']['appliedToList']['appliedTo']) == dict:
# It means there is just one explicit applyto clause with his dict
applyto_dict = rule_schema.items()[1][1]['rule']['appliedToList']['appliedTo']
val = applyto_dict
if 'name' in val and val['name'] == "DISTRIBUTED_FIREWALL":
# It means the only applyto clause is "DISTRIBUTED_FIREWALL" and it cannot be deleted short of deleting
# the whole rule
rule = dfw_rule_read(client_session, rule_id)
return rule
if 'name' in val and val['name'] == apply_to:
del rule_schema.items()[1][1]['rule']['appliedToList']
rule = client_session.update(rule_type, uri_parameters={'ruleId': rule_id, 'sectionId': section_id},
request_body_dict=rule_schema.items()[1][1],
additional_headers={'If-match': rule_etag})
rule = dfw_rule_read(client_session, rule_id)
return rule
def dfw_section_read(client_session, dfw_section_id):
"""
This function retrieves details of a dfw section given its id
:param client_session: An instance of an NsxClient Session
:param dfw_section_id: The ID of the dfw section to retrieve details from
:return: returns
- a tabular view of the section with the following information: Name, Section id, Section type, Etag
- ( verbose option ) a dictionary containing all sections's details
"""
section_list = []
dfw_section_id = str(dfw_section_id)
uri_parameters = {'sectionId': dfw_section_id}
dfwL3_section_details = dict(client_session.read('dfwL3SectionId', uri_parameters))
section_name = dfwL3_section_details['body']['section']['@name']
section_id = dfwL3_section_details['body']['section']['@id']
section_type = dfwL3_section_details['body']['section']['@type']
section_etag = dfwL3_section_details['Etag']
section_list.append((section_name, section_id, section_type, section_etag))
return section_list, dfwL3_section_details
def dfw_section_create(client_session, dfw_section_name, dfw_section_type):
"""
This function creates a new dfw section given its name and its type
The new section is created on top of all other existing sections and with no rules
If a section of the same time and with the same name already exist, nothing is done
:param client_session: An instance of an NsxClient Session
:param dfw_section_name: The name of the dfw section to be created
:param dfw_section_type: The type of the section. Allowed values are L2/L3/L3R
:return: returns
- a tabular view of all the sections of the same type of the one just created. The table contains the
following information: Name, Section id, Section type
- ( verbose option ) a dictionary containing for each possible type all sections' details, including
dfw rules
"""
dfw_section_name = str(dfw_section_name)
dfw_section_selector = str(dfw_section_type)
if dfw_section_selector != 'L2' and dfw_section_selector != 'L3' and dfw_section_selector != 'L3R':
print ('Section Type Unknown - Allowed values are L2/L3/L3R -- Aborting')
return
if dfw_section_selector == 'L2':
dfw_section_type = 'dfwL2Section'
elif dfw_section_selector == 'L3':
dfw_section_type = 'dfwL3Section'
else:
dfw_section_type = 'layer3RedirectSections'
# Regardless of the final rule type this line below is the correct way to get the empty schema
section_schema = client_session.extract_resource_body_example('dfwL3Section', 'create')
section_schema['section']['@name'] = dfw_section_name
# Delete the rule section to create an empty section
del section_schema['section']['rule']
# Check for duplicate sections of the same type as the one that will be created, create and return
l2_section_list, l3r_section_list, l3_section_list, detailed_dfw_sections = dfw_section_list(client_session)
if dfw_section_type == 'dfwL2Section':
for val in l2_section_list:
if dfw_section_name in val:
# Section with the same name already exist
return l2_section_list, detailed_dfw_sections
section = client_session.create(dfw_section_type, request_body_dict=section_schema)
l2_section_list, l3r_section_list, l3_section_list, detailed_dfw_sections = dfw_section_list(client_session)
return l2_section_list, detailed_dfw_sections
if dfw_section_type == 'dfwL3Section':
for val in l3_section_list:
if dfw_section_name in val:
# Section with the same name already exist
return l3_section_list, detailed_dfw_sections
section = client_session.create(dfw_section_type, request_body_dict=section_schema)
l2_section_list, l3r_section_list, l3_section_list, detailed_dfw_sections = dfw_section_list(client_session)
return l3_section_list, detailed_dfw_sections
if dfw_section_type == 'layer3RedirectSections':
for val in l3r_section_list:
if dfw_section_name in val:
# Section with the same name already exist
return l3r_section_list, detailed_dfw_sections
section = client_session.create(dfw_section_type, request_body_dict=section_schema)
l2_section_list, l3r_section_list, l3_section_list, detailed_dfw_sections = dfw_section_list(client_session)
return l3r_section_list, detailed_dfw_sections
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
2,
198,
2,
15069,
220,
1853,
12,
5304,
37754,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
1395,
1157,
357,
36393,
8,
3... | 2.380041 | 17,656 |
import pathlib
from django.conf import settings
from django.core import mail
from django.core.mail import EmailMessage
from django.test import TestCase
| [
11748,
3108,
8019,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
1330,
6920,
198,
6738,
42625,
14208,
13,
7295,
13,
4529,
1330,
9570,
12837,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
... | 3.756098 | 41 |
#! encoding = utf-8
""" Practice French conjugaison """
import sys
from os.path import isfile
from time import sleep
from sqlite3 import Error as dbError
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QTextOption, QKeySequence
from dictionary import TENSE_MOODS, PERSONS
from dictionary import conjug, conjug_all
from config import Config, from_json_, to_json
from lang import LANG_PKG
from db import AppDB
class Box1(QtWidgets.QGroupBox):
sig_checked = QtCore.pyqtSignal()
def _gen(self):
""" Generate a verb & a conjugaison """
# clear previous result
self.lblCk.clear()
self.editInput.clear()
# draw random verb until there is a valid conjugation
# this is to avoid those few special verbs that do not have full conjug.
try:
while True:
# every <retry_intvl> practices, retrieve the verb with
# maximum incorrect number and try again
if not (self.config.nft % self.config.retry_intvl):
entry_id, verb, explanation, tm_idx, pers_idx = self.db.choose_verb(
'practice_forward', self.config.enabled_tm_idx,
order='correct_num ASC')
else: # randomly select a verb
entry_id, verb, explanation, tm_idx, pers_idx = self.db.choose_verb(
'practice_forward', self.config.enabled_tm_idx)
tense, mood = TENSE_MOODS[tm_idx]
answer = conjug(verb, tense, mood, pers_idx)
if answer:
self.lblVerb.setText(verb)
self.lblExp.setText(explanation)
self.lblPerson.setText(PERSONS[pers_idx])
if mood == 'impratif':
pass
else:
self.editInput.setText(PERSONS[pers_idx])
self.lblTense.setText(tense)
self.lblMood.setText(mood)
self.editInput.setFocus()
self._answer = answer
self._entry_id = entry_id
self._tm_idx = tm_idx
self.config.nft += 1 # add 1 to n total forward
self.btnCheck.setDisabled(False)
break
except ValueError as err:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Critical,
LANG_PKG[self.config.lang]['msg_error_title'], str(err))
d.exec_()
except TypeError:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_entry'])
d.exec_()
except KeyError as err:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_config'].format(str(err))
)
d.exec_()
def _ck(self):
""" Check the answer """
txt = self.editInput.text()
# remove extra spaces and only put 1
txt_striped = ' '.join(txt.split())
if txt_striped == self._answer:
self.lblCk.setText('')
self.lblCk.setStyleSheet('font-size: 14pt; font: bold; color: #009933')
self.config.nfc += 1
self._timer.start()
else:
self.lblCk.setText('')
self.lblCk.setStyleSheet('font-size: 14pt; font: bold; color: #D63333')
try:
self.db.update_res('practice_forward', self._entry_id, txt_striped == self._answer)
self.sig_checked.emit()
except TypeError:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_entry']
)
d.exec_()
class Box2(QtWidgets.QGroupBox):
sig_checked = QtCore.pyqtSignal()
def set_tm(self, checked_tm_idx):
""" set tense mood options """
self.comboTenseMood.clear()
self.comboTenseMood.addItems([', '.join(TENSE_MOODS[i]) for i in checked_tm_idx])
self.comboTenseMood.adjustSize()
def _gen(self):
""" Generate a conjugaison """
# clear previous result
self.lblCk.clear()
self.editVerb.clear()
# draw random verb until there is a valid conjugation
# this is to avoid those few special verbs that do not have full conjug.
try:
while True:
# every <retry_intvl> practices, retrieve the verb with
# maximum incorrect number and try again
if not (self.config.nbt % self.config.retry_intvl):
entry_id, verb, explanation, tm_idx, pers_idx = self.db.choose_verb(
'practice_backward', self.config.enabled_tm_idx,
order='correct_num ASC')
else: # randomly select a verb
entry_id, verb, explanation, tm_idx, pers_idx = self.db.choose_verb(
'practice_backward', self.config.enabled_tm_idx)
tense, mood = TENSE_MOODS[tm_idx]
conjug_str = conjug(verb, tense, mood, pers_idx)
if conjug_str:
self.lblConjug.setText(conjug_str)
self.lblAns.clear()
self.editVerb.setFocus()
self._answer = verb
self._entry_id = entry_id
self._tm_idx = tm_idx
self.config.nbt += 1 # add 1 to n total forward
self.btnCheck.setDisabled(False)
break
except ValueError as err:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Critical,
LANG_PKG[self.config.lang]['msg_error_title'], str(err))
d.exec_()
except TypeError:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_entry'])
d.exec_()
except KeyError as err:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_config'].format(str(err))
)
d.exec_()
def _ck(self):
""" Check the answer """
is_correct = self.editVerb.text().lower() == self._answer and \
self.comboTenseMood.currentText() == ', '.join(TENSE_MOODS[self._tm_idx])
if is_correct:
self.lblCk.setText('')
self.lblCk.setStyleSheet('font-size: 14pt; color: #009933')
self.config.nbc += 1
self._timer.start(1000)
else:
self.lblCk.setText('')
self.lblCk.setStyleSheet('font-size: 14pt; color: #D63333')
self.lblAns.setText(' '.join((self._answer,) + TENSE_MOODS[self._tm_idx]))
self.btnCheck.setDisabled(True)
self._timer.start(5000)
try:
self.db.update_res('practice_backward', self._entry_id, is_correct)
self.sig_checked.emit()
except TypeError:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_entry']
)
d.exec_()
class Box3(QtWidgets.QGroupBox):
def set_tm(self, checked_tm_idx):
""" set tense mood options """
self.comboTenseMood.clear()
self.comboTenseMood.addItems([', '.join(TENSE_MOODS[i]) for i in checked_tm_idx])
class DialogConfig(QtWidgets.QDialog):
class DialogPref(QtWidgets.QDialog):
class DialogAddVoc(QtWidgets.QDialog):
class DialogBrowse(QtWidgets.QDialog):
class DialogStats(QtWidgets.QDialog):
class MenuBar(QtWidgets.QMenuBar):
class StatusBar(QtWidgets.QStatusBar):
if __name__ == '__main__':
launch()
| [
2,
0,
21004,
796,
3384,
69,
12,
23,
198,
198,
37811,
19939,
4141,
11644,
30302,
1653,
37227,
198,
198,
11748,
25064,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
198,
6738,
640,
1330,
3993,
198,
6738,
44161,
578,
18,
1330,
13047,
355,... | 1.867958 | 4,544 |
import matplotlib.pyplot as plt, numpy as np, pandas as pd
# general functions for plotting
# Tim Tyree
# 7.23.2021
def format_plot(ax=None,xlabel=None,ylabel=None,fontsize=20,use_loglog=False,xlim=None,ylim=None,use_bigticks=True,**kwargs):
'''format plot formats the matplotlib axis instance, ax,
performing routine formatting to the plot,
labeling the x axis by the string, xlabel and
labeling the y axis by the string, ylabel
'''
if not ax:
ax=plt.gca()
if use_loglog:
ax.set_xscale('log')
ax.set_yscale('log')
if xlabel:
ax.set_xlabel(xlabel,fontsize=fontsize,**kwargs)
if ylabel:
ax.set_ylabel(ylabel,fontsize=fontsize,**kwargs)
if use_bigticks:
ax.tick_params(axis='both', which='major', labelsize=fontsize,**kwargs)
ax.tick_params(axis='both', which='minor', labelsize=0,**kwargs)
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_xlim(ylim)
return True
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
11,
299,
32152,
355,
45941,
11,
19798,
292,
355,
279,
67,
198,
2,
2276,
5499,
329,
29353,
198,
2,
5045,
7039,
631,
198,
2,
767,
13,
1954,
13,
1238,
2481,
198,
198,
4299,
579... | 2.263279 | 433 |
from lambdatest import Operations
from unicode1 import convert
file_name = "ocr.json"
k = Operations()
jsonObj = k.load_Json(file_name)
dictData = convert(jsonObj)
endpoints_list = dictData["endpoints"]
enp_path = []
dfp = {}
for i in endpoints_list:
enp_path.append(i["path"])
try:
dfp[i["path"]]=i['result']
except: pass
| [
6738,
19343,
19608,
395,
1330,
16205,
198,
6738,
28000,
1098,
16,
1330,
10385,
198,
7753,
62,
3672,
796,
366,
1696,
13,
17752,
1,
198,
74,
796,
16205,
3419,
198,
17752,
49201,
796,
479,
13,
2220,
62,
41,
1559,
7,
7753,
62,
3672,
8,
... | 2.626984 | 126 |
import requests
from decouple import config
import datetime
from calendar import monthrange
import psycopg2
import time
if __name__ == "__main__":
# check db table, if doesn't exists then create tables and pull last month's data into the db.
check_db_table_exits()
# endless loop, sleep until next morning 9 am. and run again
while True:
remain = get_remaining_time()
print("Sleeping: " + str(remain))
time.sleep(remain)
# run daily api request and insert fresh data into db.
insert_into_db()
# https://fixer.io/quickstart
# https://fixer.io/documentation
# https://www.dataquest.io/blog/python-api-tutorial/
# python get time --> https://tecadmin.net/get-current-date-time-python/
# python postgresql --> https://stackabuse.com/working-with-postgresql-in-python/
# check table if exists --> https://stackoverflow.com/questions/1874113/checking-if-a-postgresql-table-exists-under-python-and-probably-psycopg2
# postgres data types (postgres float) --> https://www.postgresqltutorial.com/postgresql-data-types/
# python get number of days in month --> https://stackoverflow.com/questions/4938429/how-do-we-determine-the-number-of-days-for-a-given-month-in-python | [
11748,
7007,
198,
6738,
875,
43846,
1330,
4566,
198,
11748,
4818,
8079,
198,
6738,
11845,
1330,
1227,
9521,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
640,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
... | 2.916865 | 421 |
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
... | 2.868852 | 61 |
import readExcel
import modifyFile
import sys
import time
import sendEmail
if __name__ == "__main__":
if len (sys.argv) < 2:
main ("HeraldConfig.xlsx")
else:
main (sys.argv [1]) | [
11748,
1100,
3109,
5276,
198,
11748,
13096,
8979,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
3758,
15333,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
611,
18896,
357,
17597,
13,
... | 2.340909 | 88 |
# type: ignore
import cv2
import numpy as np
TWO_PI = 2 * np.pi
def kmeans_periodic(columns, intervals, data, *args, **kwargs):
"""Runs kmeans with periodicity in a subset of dimensions.
Transforms columns with periodicity on the specified intervals into two
columns with coordinates on the unit circle for kmeans. After running
through kmeans, the centers are transformed back to the range specified
by the intervals.
Arguments
---------
columns : sequence
Sequence of indexes specifying the columns that have periodic data
intervals : sequence of length-2 sequences
Sequence of (min, max) intervals, one interval per column
See help(cv2.kmeans) for all other arguments, which are passed through.
Returns
-------
See help(cv2.kmeans) for outputs, which are passed through; except centers,
which is modified so that it returns centers corresponding to the input
data, instead of the transformed data.
Raises
------
cv2.error
If len(columns) != len(intervals)
"""
# Check each periodic column has an associated interval
if len(columns) != len(intervals):
raise cv2.error("number of intervals must be equal to number of columns")
ndims = data.shape[1]
ys = []
# transform each periodic column into two columns with the x and y coordinate
# of the angles for kmeans; x coord at original column, ys are appended
for col, interval in zip(columns, intervals):
a, b = min(interval), max(interval)
width = b - a
data[:, col] = TWO_PI * (data[:, col] - a) / width % TWO_PI
ys.append(width * np.sin(data[:, col]))
data[:, col] = width * np.cos(data[:, col])
# append the ys to the end
ys = np.array(ys).transpose()
data = np.hstack((data, ys)).astype(np.float32)
# run kmeans
retval, bestLabels, centers = cv2.kmeans(data, *args, **kwargs)
# transform the centers back to range they came from
for i, (col, interval) in enumerate(zip(columns, intervals)):
a, b = min(interval), max(interval)
angles = np.arctan2(centers[:, ndims + i], centers[:, col]) % TWO_PI
centers[:, col] = a + (b - a) * angles / TWO_PI
centers = centers[:, :ndims]
return retval, bestLabels, centers
| [
2,
2099,
25,
8856,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628,
198,
34551,
46,
62,
11901,
796,
362,
1635,
45941,
13,
14415,
628,
198,
4299,
479,
1326,
504,
62,
41007,
291,
7,
28665,
82,
11,
20016,
11,
13... | 2.801453 | 826 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.embedding import TokenEmbedding, PositionalEncoding, TransformerEmbedding
from models.attention import ScaledDotProductAttention, MultiHeadAttention, FeedForward
from models.layers import EncoderLayer, DecoderLayer
def build_model(src_pad_idx, tgt_pad_idx,
enc_vocab_size, dec_vocab_size,
model_dim, key_dim, value_dim, hidden_dim,
num_head, num_layer, enc_max_len, dec_max_len, drop_prob):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = TransformersModel(src_pad_idx, tgt_pad_idx,
enc_vocab_size, dec_vocab_size,
model_dim, key_dim, value_dim, hidden_dim,
num_head, num_layer, enc_max_len, dec_max_len, drop_prob,device)
return model.cuda() if torch.cuda.is_available() else model | [
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
6738,
4981,
13,
20521,
12083,
1330,
29130,
31567,
6048,
278,
11,
18574,
1859,
27195,
7656,
11,
3602,
16354,... | 2.258454 | 414 |
# _*_ coding:UTF-8 _*_
"""
"""
from django.db import models
import uuid
from decimal import Decimal
import time
from .clients import BaseClient
from .sim_market import SimMarket
from .sim_clients import SimHoldingElem, SimCommissionElem, SimTransactionElem
from .sim_stocks import SimStock, SimOrderBookEntry, SimOrderBookElem
from .config import *
def sim_instant_trade(msg):
"""
client
:param msg: TradeMsg
"""
initiator = msg.initiator
stock_symbol = msg.stock_symbol
initiator_object = BaseClient.objects.get(id=initiator)
stock_object = SimStock.objects.get(symbol=stock_symbol)
SimTransactionElem.objects.create(one_side=initiator, the_other_side=msg.acceptor,
stock_symbol=stock_symbol, price_traded=msg.trade_price, vol_traded=msg.trade_vol,
date_traded=msg.trade_date, operation=msg.trade_direction)
if msg.trade_direction == 'a':
#
hold_element = SimHoldingElem.objects.get(owner=initiator, stock_symbol=stock_symbol)
available_shares = hold_element.available_vol
assert available_shares >= msg.trade_vol
hold_element.available_vol -= msg.trade_vol
hold_element.vol -= msg.trade_vol
if hold_element.vol == 0:
#
hold_element.delete()
else:
hold_element.save(update_fields=['vol', 'available_vol'])
earning = float(msg.trade_price * msg.trade_vol - msg.tax_charged)
initiator_object.cash += earning
initiator_object.flexible_cash += earning
elif msg.trade_direction == 'b':
#
holding = SimHoldingElem.objects.filter(owner=initiator, stock_symbol=stock_symbol)
if holding.exists():
#
assert holding.count() == 1
new_holding = holding[0]
new_holding.cost = Decimal((new_holding.cost * new_holding.vol + msg.trade_price * msg.trade_vol) /
(new_holding.vol + msg.trade_vol))
new_holding.price_guaranteed = new_holding.cost
new_holding.last_price = stock_object.last_price
new_holding.vol += msg.trade_vol
new_holding.available_vol += msg.trade_vol
new_holding.profit -= msg.tax_charged
new_holding.value = float(stock_object.last_price) * new_holding.vol
new_holding.save()
else:
#
SimHoldingElem.objects.create(owner=initiator, stock_symbol=stock_symbol,
vol=msg.trade_vol, frozen_vol=0, available_vol=msg.trade_vol,
cost=msg.trade_price, price_guaranteed=msg.trade_price,
last_price=stock_object.last_price, profit=- msg.tax_charged,
value=stock_object.last_price * msg.trade_vol, date_bought=msg.trade_date)
spending = float(msg.trade_price * msg.trade_vol + msg.tax_charged)
initiator_object.cash -= spending
initiator_object.flexible_cash -= spending
initiator_object.save(update_fields=['cash', 'flexible_cash'])
return True
def sim_delayed_trade(msg):
"""
client
:param msg: TradeMsg
"""
assert isinstance(msg, SimTradeMsg)
acceptor = msg.acceptor
stock_symbol = msg.stock_symbol
if msg.trade_direction == 'a':
acceptor_direction = 'b'
else:
acceptor_direction = 'a'
acceptor_object = BaseClient.objects.get(id=acceptor)
stock_object = SimStock.objects.get(symbol=stock_symbol)
#
commission_element = SimCommissionElem.objects.get(unique_id=msg.commission_id)
assert commission_element.stock_symbol == stock_symbol
assert commission_element.operation == acceptor_direction
assert commission_element.vol_traded + msg.trade_vol <= commission_element.vol_committed
new_avg_price = (commission_element.price_traded * commission_element.vol_traded +
msg.trade_price * msg.trade_vol) / (commission_element.vol_traded + msg.trade_vol)
commission_element.price_traded = new_avg_price
commission_element.vol_traded += msg.trade_vol
#
if commission_element.vol_traded == commission_element.vol_committed:
commission_element.delete()
else:
commission_element.save(update_fields=['price_traded', 'vol_traded'])
if acceptor_direction == 'a':
#
hold_element = SimHoldingElem.objects.get(owner=acceptor, stock_symbol=stock_symbol)
frozen_shares = hold_element.frozen_vol
assert frozen_shares >= msg.trade_vol
hold_element.frozen_vol -= msg.trade_vol
hold_element.vol -= msg.trade_vol
if hold_element.vol == 0:
#
hold_element.delete()
else:
hold_element.save(update_fields=['vol', 'frozen_vol'])
#
earning = float(msg.trade_price * msg.trade_vol - msg.tax_charged)
acceptor_object.cash += earning
acceptor_object.flexible_cash += earning
elif acceptor_direction == 'b':
#
holding = SimHoldingElem.objects.filter(owner=acceptor, stock_symbol=stock_symbol)
if holding.exists():
#
assert holding.count() == 1
new_holding = holding[0]
new_holding.cost = Decimal((new_holding.cost * new_holding.vol + msg.trade_price * msg.trade_vol) /
(new_holding.vol + msg.trade_vol))
new_holding.price_guaranteed = new_holding.cost
new_holding.last_price = stock_object.last_price
new_holding.vol += msg.trade_vol
new_holding.available_vol += msg.trade_vol
new_holding.profit -= msg.tax_charged
new_holding.value = float(stock_object.last_price) * new_holding.vol
new_holding.save()
else:
#
SimHoldingElem.objects.create(owner=acceptor, stock_symbol=stock_symbol,
vol=msg.trade_vol, frozen_vol=0, available_vol=msg.trade_vol,
cost=msg.trade_price, price_guaranteed=msg.trade_price,
last_price=stock_object.last_price, profit=- msg.tax_charged,
value=stock_object.last_price * msg.trade_vol, date_bought=msg.trade_date)
#
spending = float(msg.trade_price * msg.trade_vol + msg.tax_charged)
acceptor_object.cash -= spending
acceptor_object.frozen_cash -= spending
acceptor_object.save(update_fields=['cash', 'frozen_cash', 'flexible_cash'])
return True
def sim_add_commission(msg):
"""
clientclientorder book
:param msg:CommissionMsg
"""
assert isinstance(msg, SimCommissionMsg)
assert msg.confirmed is True
principle = msg.commit_client
stock_symbol = msg.stock_symbol
market = SimMarket.objects.get(id=1)
order_book_entry, created = SimOrderBookEntry.objects.get_or_create(stock_symbol=stock_symbol,
entry_price=msg.commit_price,
entry_direction=msg.commit_direction)
order_book_entry.total_vol += msg.commit_vol
order_book_entry.save(update_fields=['total_vol'])
new_order_book_element = SimOrderBookElem.objects.create(entry_belonged=order_book_entry.id,
client=principle,
direction_committed=msg.commit_direction,
price_committed=msg.commit_price,
vol_committed=msg.commit_vol,
date_committed=market.datetime)
SimCommissionElem.objects.create(owner=principle, stock_symbol=stock_symbol, operation=msg.commit_direction,
price_committed=msg.commit_price, vol_committed=msg.commit_vol,
date_committed=market.datetime, unique_id=new_order_book_element.unique_id)
if msg.commit_direction == 'a':
#
holding = SimHoldingElem.objects.get(owner=principle, stock_symbol=stock_symbol)
assert msg.commit_vol <= holding.available_vol
holding.frozen_vol += msg.commit_vol
holding.available_vol -= msg.commit_vol
holding.save(update_fields=['frozen_vol', 'available_vol'])
elif msg.commit_direction == 'b':
#
principle_object = BaseClient.objects.get(id=principle)
freeze = float(msg.commit_price * msg.commit_vol)
assert freeze <= principle_object.flexible_cash
principle_object.frozen_cash += freeze
principle_object.flexible_cash -= freeze
principle_object.save(update_fields=['frozen_cash', 'flexible_cash'])
return True
def sim_order_book_matching(commission):
"""
clientorder bookorder
"""
assert isinstance(commission, SimCommissionMsg)
assert commission.confirmed is False
stock_symbol = commission.stock_symbol
stock_object = SimStock.objects.get(symbol=stock_symbol)
direction = commission.commit_direction
remaining_vol = commission.commit_vol
market = SimMarket.objects.get(id=1)
if direction == 'a':
#
matching_direction = 'b'
while not stock_object.is_order_book_empty(matching_direction):
best_element = stock_object.get_best_element(matching_direction)
if best_element.price_committed < commission.commit_price:
#
break
if remaining_vol == 0:
#
break
if remaining_vol >= best_element.vol_committed:
# order book
trade_message = SimTradeMsg(stock_symbol=stock_symbol, initiator=commission.commit_client,
trade_direction=direction, trade_price=best_element.price_committed,
trade_vol=best_element.vol_committed, acceptor=best_element.client,
commission_id=best_element.unique_id, tax_charged=0,
trade_date=market.datetime, trade_tick=market.tick)
#
sim_instant_trade(trade_message)
sim_delayed_trade(trade_message)
# order book
stock_object.trading_behaviour(direction, best_element.price_committed, best_element.vol_committed,
trade_message.trade_date, trade_message.trade_tick)
remaining_vol -= best_element.vol_committed
best_entry = SimOrderBookEntry.objects.get(id=best_element.entry_belonged)
best_entry.total_vol -= best_element.vol_committed
if best_entry.total_vol == 0:
best_entry.delete()
else:
best_entry.save(update_fields=['total_vol'])
best_element.delete()
else:
# order book
trade_message = SimTradeMsg(stock_symbol=stock_symbol, initiator=commission.commit_client,
trade_direction=direction, trade_price=best_element.price_committed,
trade_vol=remaining_vol, acceptor=best_element.client,
commission_id=best_element.unique_id, tax_charged=0,
trade_date=market.datetime, trade_tick=market.tick)
#
sim_instant_trade(trade_message)
sim_delayed_trade(trade_message)
# order book
stock_object.trading_behaviour(direction, best_element.price_committed, remaining_vol,
trade_message.trade_date, trade_message.trade_tick)
best_element.vol_committed -= remaining_vol
best_entry = SimOrderBookEntry.objects.get(id=best_element.entry_belonged)
best_entry.total_vol -= remaining_vol
remaining_vol = 0
best_element.save(update_fields=['vol_committed'])
best_entry.save(update_fields=['total_vol'])
elif direction == 'b':
#
matching_direction = 'a'
while not stock_object.is_order_book_empty(matching_direction):
best_element = stock_object.get_best_element(matching_direction)
if best_element.price_committed > commission.commit_price:
#
break
if remaining_vol == 0:
#
break
if remaining_vol >= best_element.vol_committed:
# order book
trade_message = SimTradeMsg(stock_symbol=stock_symbol, initiator=commission.commit_client,
trade_direction=direction, trade_price=best_element.price_committed,
trade_vol=best_element.vol_committed, acceptor=best_element.client,
commission_id=best_element.unique_id, tax_charged=0,
trade_date=market.datetime, trade_tick=market.tick)
#
sim_instant_trade(trade_message)
sim_delayed_trade(trade_message)
# order book
stock_object.trading_behaviour(direction, best_element.price_committed, best_element.vol_committed,
trade_message.trade_date, trade_message.trade_tick)
remaining_vol -= best_element.vol_committed
best_entry = SimOrderBookEntry.objects.get(id=best_element.entry_belonged)
best_entry.total_vol -= best_element.vol_committed
if best_entry.total_vol == 0:
best_entry.delete()
else:
best_entry.save(update_fields=['total_vol'])
best_element.delete()
else:
# order book
trade_message = SimTradeMsg(stock_symbol=stock_object.symbol, initiator=commission.commit_client,
trade_direction=direction, trade_price=best_element.price_committed,
trade_vol=remaining_vol, acceptor=best_element.client,
commission_id=best_element.unique_id, tax_charged=0,
trade_date=market.datetime, trade_tick=market.tick)
#
sim_instant_trade(trade_message)
sim_delayed_trade(trade_message)
# order book
stock_object.trading_behaviour(direction, best_element.price_committed, remaining_vol,
trade_message.trade_date, trade_message.trade_tick)
best_element.vol_committed -= remaining_vol
best_entry = SimOrderBookEntry.objects.get(id=best_element.entry_belonged)
best_entry.total_vol -= remaining_vol
remaining_vol = 0
best_element.save(update_fields=['vol_committed'])
best_entry.save(update_fields=['total_vol'])
elif direction == 'c':
#
assert commission.commission_to_cancel is not None
order_book_element_corr = SimOrderBookElem.objects.get(unique_id=commission.commission_to_cancel)
try:
assert commission.commit_client == order_book_element_corr.client
assert commission.commit_vol <= order_book_element_corr.vol_committed #
order_book_entry = SimOrderBookEntry.objects.get(id=order_book_element_corr.entry_belonged)
order_book_entry.total_vol -= commission.commit_vol
order_book_element_corr.vol_committed -= commission.commit_vol
if order_book_element_corr.vol_committed == 0:
order_book_element_corr.delete()
else:
order_book_element_corr.save(update_fields=['vol_committed'])
if order_book_entry.total_vol == 0:
order_book_entry.delete()
else:
order_book_entry.save(update_fields=['total_vol'])
#
origin_commission = SimCommissionElem.objects.get(unique_id=commission.commission_to_cancel)
if origin_commission.operation == 'a':
holding = SimHoldingElem.objects.get(owner=commission.commit_client, stock_symbol=stock_symbol)
holding.frozen_vol -= commission.commit_vol
holding.available_vol += commission.commit_vol
holding.save(update_fields=['frozen_vol', 'available_vol'])
else:
assert origin_commission.operation == 'b'
freeze = float(commission.commit_price * commission.commit_vol)
client_object = BaseClient.objects.get(id=commission.commit_client)
client_object.frozen_cash -= freeze
client_object.flexible_cash += freeze
client_object.save(update_fields=['frozen_cash', 'flexible_cash'])
origin_commission.vol_committed -= commission.commit_vol
if origin_commission.vol_traded == origin_commission.vol_committed:
origin_commission.delete()
else:
origin_commission.save(update_fields=['vol_committed'])
except AssertionError:
print("")
commission.confirmed = True
commission.save()
return True
else:
raise ValueError
if remaining_vol > 0:
# /
commission.commit_vol = remaining_vol
commission.confirmed = True
ok = sim_add_commission(commission)
assert ok
else:
commission.confirmed = True
return True
def sim_commission_handler(new_commission, handle_info=False):
"""
message/order book/
:param new_commission:
:param handle_info:
"""
time0 = time.time()
assert isinstance(new_commission, SimCommissionMsg)
if not new_commission.is_valid():
return False
sim_order_book_matching(new_commission)
assert new_commission.confirmed
time1 = time.time()
if handle_info:
print('Commission Handled: symbol-{} {} price-{} vol-{}, Cost {} s.'.format(new_commission.stock_symbol,
new_commission.commit_direction,
new_commission.commit_price,
new_commission.commit_vol,
time1 - time0))
return True
| [
2,
4808,
9,
62,
19617,
25,
48504,
12,
23,
4808,
9,
62,
198,
198,
37811,
628,
198,
37811,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
11748,
334,
27112,
198,
6738,
32465,
1330,
4280,
4402,
198,
11748,
640,
198,
198,
6738... | 2.013853 | 9,601 |
test = { 'name': 'q3_2_3',
'points': 1,
'suites': [ { 'cases': [ { 'code': '>>> # this test just checks that your classify_feature_row works correctly.;\n'
'>>> def check(r):\n'
'... t = test_my_features.row(r)\n'
"... return classify(t, train_my_features, train_movies.column('Genre'), 13) == classify_feature_row(t);\n"
'>>> all([check(i) for i in np.arange(13)])\n'
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
9288,
796,
1391,
220,
220,
705,
3672,
10354,
705,
80,
18,
62,
17,
62,
18,
3256,
198,
220,
220,
220,
705,
13033,
10354,
352,
11,
198,
220,
220,
220,
705,
2385,
2737,
10354,
685,
220,
220,
1391,
220,
220,
705,
33964,
10354,
685,
220... | 1.492586 | 607 |
# Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Base klasa dla MIME type messages that are nie multipart."""
__all__ = ['MIMENonMultipart']
z email zaimportuj errors
z email.mime.base zaimportuj MIMEBase
klasa MIMENonMultipart(MIMEBase):
"""Base klasa dla MIME non-multipart type messages."""
| [
2,
15069,
357,
34,
8,
6244,
12,
13330,
11361,
10442,
5693,
198,
2,
6434,
25,
14488,
32955,
198,
2,
14039,
25,
3053,
12,
82,
328,
31,
29412,
13,
2398,
198,
198,
37811,
14881,
479,
75,
15462,
288,
5031,
337,
12789,
2099,
6218,
326,
... | 2.823077 | 130 |
from notifications import ImproperlyInstalledNotificationProvider
try:
from slack_sdk import WebClient
except ImportError as err:
raise ImproperlyInstalledNotificationProvider(
missing_package='slack_sdk', provider='slack'
) from err
from notifications import default_settings as settings
from . import BaseNotificationProvider
| [
6738,
19605,
1330,
12205,
525,
306,
6310,
4262,
3673,
2649,
29495,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
30740,
62,
21282,
74,
1330,
5313,
11792,
198,
16341,
17267,
12331,
355,
11454,
25,
198,
220,
220,
220,
5298,
12205,
525,
... | 3.591837 | 98 |
from django.dispatch import Signal
points_awarded = Signal(providing_args=["target", "key", "points", "source"])
| [
6738,
42625,
14208,
13,
6381,
17147,
1330,
26484,
198,
198,
13033,
62,
707,
10676,
796,
26484,
7,
15234,
2530,
62,
22046,
28,
14692,
16793,
1600,
366,
2539,
1600,
366,
13033,
1600,
366,
10459,
8973,
8,
198
] | 3.166667 | 36 |
from sympy.functions import exp
from symplyphysics import (
symbols, Eq, pretty, solve, Quantity, units, S,
Probability, validate_input, expr_to_quantity, convert_to
)
# Description
## Ptnl (fast non-leakage factor) is the ratio of the number of fast neutrons that do not leak from the reactor
## core during the slowing down process to the number of fast neutrons produced by fissions at all energies.
## Law: Pfnl e^(-Bg^2 * th)
## Where:
## e - exponent.
## Bg^2 - geometric buckling.
## See [geometric buckling](./buckling/geometric_buckling_from_neutron_flux.py) implementation.
## th - neutron Fermi age.
## The Fermi age is related to the distance traveled during moderation, just as the diffusion length is for
## thermal neutrons. The Fermi age is the same quantity as the slowing-down length squared (Ls^2).
## Pfnl - fast non-leakage probability.
geometric_buckling = symbols('geometric_buckling')
neutron_fermi_age = symbols('neutron_fermi_age')
fast_non_leakage_probability = symbols('thermal_non_leakage_probability')
law = Eq(fast_non_leakage_probability, exp(-1 * geometric_buckling * neutron_fermi_age))
| [
6738,
10558,
88,
13,
12543,
2733,
1330,
1033,
198,
6738,
5659,
2145,
746,
23154,
1330,
357,
198,
220,
220,
220,
14354,
11,
412,
80,
11,
2495,
11,
8494,
11,
39789,
11,
4991,
11,
311,
11,
198,
220,
220,
220,
30873,
1799,
11,
26571,
... | 3.100543 | 368 |
import re
lines = []
with open("text.txt", "r") as f:
content = f.readlines()
for index, line in enumerate(content):
letters_count = count_letters(line)
punctuation = count_punctuation(line)
line = line.strip("\n")
lines.append(f"Line {index + 1}: {line} ({letters_count})({punctuation})\n")
with open("output.txt", "w") as f:
for line in lines:
f.write(line)
| [
11748,
302,
628,
628,
198,
6615,
796,
17635,
198,
198,
4480,
1280,
7203,
5239,
13,
14116,
1600,
366,
81,
4943,
355,
277,
25,
198,
220,
220,
220,
2695,
796,
277,
13,
961,
6615,
3419,
198,
220,
220,
220,
329,
6376,
11,
1627,
287,
27... | 2.348315 | 178 |
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField,BooleanField,PasswordField
from wtforms.validators import Required,DataRequired,Length | [
6738,
42903,
62,
86,
27110,
1330,
46947,
8479,
198,
6738,
266,
83,
23914,
1330,
10903,
15878,
11,
45135,
15878,
11,
46120,
13087,
15878,
11,
35215,
15878,
198,
6738,
266,
83,
23914,
13,
12102,
2024,
1330,
20906,
11,
6601,
37374,
11,
245... | 3.95122 | 41 |
from django.db import models
from . import TimestampedModel
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
764,
1330,
5045,
395,
13322,
17633,
628
] | 3.8125 | 16 |
nums = [12,26,77,22,88,1]
print(pigeonHoleSort(nums))
print(nums)
| [
198,
77,
5700,
796,
685,
1065,
11,
2075,
11,
3324,
11,
1828,
11,
3459,
11,
16,
60,
198,
4798,
7,
79,
10045,
261,
39,
2305,
42758,
7,
77,
5700,
4008,
198,
4798,
7,
77,
5700,
8,
198
] | 1.810811 | 37 |
# parsers.py
from werkzeug.datastructures import FileStorage
from flask_restplus import reqparse
file_upload = reqparse.RequestParser()
file_upload.add_argument('resource_csv',
type=FileStorage,
location='files',
required=True,
help='CSV file') | [
2,
13544,
364,
13,
9078,
198,
6738,
266,
9587,
2736,
1018,
13,
19608,
459,
1356,
942,
1330,
9220,
31425,
198,
6738,
42903,
62,
2118,
9541,
1330,
43089,
29572,
198,
198,
7753,
62,
25850,
796,
43089,
29572,
13,
18453,
46677,
3419,
198,
... | 2.005814 | 172 |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kfp.components.yaml_component."""
import os
import tempfile
import textwrap
import unittest
from unittest import mock
import requests
from kfp.components import structures
from kfp.components import yaml_component
SAMPLE_YAML = textwrap.dedent("""\
components:
comp-component-1:
executorLabel: exec-component-1
inputDefinitions:
parameters:
input1:
parameterType: STRING
outputDefinitions:
parameters:
output1:
parameterType: STRING
deploymentSpec:
executors:
exec-component-1:
container:
command:
- sh
- -c
- 'set -ex
echo "$0" > "$1"'
- '{{$.inputs.parameters[''input1'']}}'
- '{{$.outputs.parameters[''output1''].output_file}}'
image: alpine
pipelineInfo:
name: component-1
root:
dag:
tasks:
component-1:
cachingOptions:
enableCache: true
componentRef:
name: comp-component-1
inputs:
parameters:
input1:
componentInputParameter: input1
taskInfo:
name: component-1
inputDefinitions:
parameters:
input1:
parameterType: STRING
schemaVersion: 2.1.0
sdkVersion: kfp-2.0.0-alpha.3
""")
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
33448,
383,
24921,
891,
9319,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.5 | 766 |
#
# Constant Price Market Making Simulator
#
# simulate different liquidity provision and trading strategies
#
from typing import Tuple
import csv
import numpy as np
import pandas as pd
from numpy.random import binomial, default_rng
# TODO: switch to decimal type and control quantization. numeric errors will kill us quickly
if __name__ == "__main__":
main()
| [
2,
198,
2,
20217,
7886,
5991,
16427,
13942,
198,
2,
198,
2,
29308,
1180,
31061,
8287,
290,
7313,
10064,
198,
2,
198,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
269,
21370,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1979... | 3.673267 | 101 |
"""
rave input backend using the SDL2 library.
"""
import sys
import sdl2
import rave.log
import rave.events
import rave.backends
from ..common import events_for
from . import keyboard, mouse, touch, controller
BACKEND_PRIORITY = 50
## Module API.
## Backend API.
## Internal API.
_log = rave.log.get(__name__)
| [
37811,
198,
5758,
5128,
30203,
1262,
262,
45417,
17,
5888,
13,
198,
37811,
198,
11748,
25064,
198,
11748,
264,
25404,
17,
198,
198,
11748,
46242,
13,
6404,
198,
11748,
46242,
13,
31534,
198,
11748,
46242,
13,
1891,
2412,
198,
6738,
1148... | 3.137255 | 102 |
try:
from setuptools import setup, find_packages
except ImportError as e:
from distutils.core import setup
dependencies = ['docopt', 'termcolor', 'requests']
setup(
name = 'pyDownload',
version = '1.0.2',
description = 'CLI based download utility',
url = 'https://github.com/Dhruv-Jauhar/pyDownload',
author = 'Dhruv Jauhar',
author_email = 'dhruv.jhr@gmail.com',
license = 'MIT',
install_requires = dependencies,
packages = find_packages(),
entry_points = {
'console_scripts': ['pyd = pyDownload.main:start'],
},
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
#'Programming Language :: Python :: 3 :: Only',
'Topic :: Utilities'
)
)
| [
198,
28311,
25,
198,
197,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
16341,
17267,
12331,
355,
304,
25,
198,
197,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
45841,
3976,
796,
37250,
15390,
8738,
3256,
705,
... | 2.922034 | 295 |
from django.db import models
from django.contrib.auth.models import User
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
628
] | 3.52381 | 21 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import time
import json
with open("coverage.json", "r") as file:
timestamp = str(int(time.time()))
data = json.load(file)["data"][0]
lines_covered = str(data["totals"]["lines"]["covered"])
lines_valid = str(data["totals"]["lines"]["count"])
with open("perf_summary.csv", "a") as f:
f.write(
timestamp
+ ","
+ lines_valid
+ ",Unit_Test_Coverage,0,0,0,"
+ lines_covered
+ ",0,0,0,0"
)
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
24843,
362,
13,
15,
13789,
13,
198,
11748,
640,
198,
11748,
33918,
198,
198,
4480,
1280,
7203,
1073,
1857,
13,
17752,
1600,
366,
81,
4943,
355,
... | 2.277551 | 245 |
import pytest
import allure
from data.parameters import data_parameters
| [
11748,
12972,
9288,
198,
11748,
477,
495,
198,
6738,
1366,
13,
17143,
7307,
1330,
1366,
62,
17143,
7307,
628
] | 3.842105 | 19 |
import asyncio
import json
import time
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
from diem import AuthKey, testnet, utils
from indy import anoncreds, wallet
from indy import pool
from get_schema import get_schema
from diem_txn import create_diem_script, create_diem_raw_txn, sign_and_wait_diem_txn
from compress_decompress_cred_def import compress_cred_def, clean_up_cred_def_res, decompress_cred_def
from async_calls import create_master_secret, create_credential_offer, \
create_credential_req, create_credential, store_credential
PROTOCOL_VERSION = 2
CURRENCY = "XUS"
issuer = {
'did': 'NcYxiDXkpYi6ov5FcYDi1e',
'wallet_config': json.dumps({'id': 'issuer_wallet'}),
'wallet_credentials': json.dumps({'key': 'issuer_wallet_key'})
}
prover = {
'did': 'VsKV7grR1BUE29mG2Fm2kX',
'wallet_config': json.dumps({"id": "prover_wallet"}),
'wallet_credentials': json.dumps({"key": "issuer_wallet_key"})
}
verifier = {}
store = {}
loop = asyncio.get_event_loop()
schema_and_cred_def = loop.run_until_complete(create_schema())
# connect to testnet
client = testnet.create_client()
# generate private key for sender account
sender_private_key = Ed25519PrivateKey.generate()
# generate auth key for sender account
sender_auth_key = AuthKey.from_public_key(sender_private_key.public_key())
print(f"Generated sender address: {utils.account_address_hex(sender_auth_key.account_address())}")
# create sender account
faucet = testnet.Faucet(client)
testnet.Faucet.mint(faucet, sender_auth_key.hex(), 100000000, "XUS")
# get sender account
sender_account = client.get_account(sender_auth_key.account_address())
# generate private key for receiver account
receiver_private_key = Ed25519PrivateKey.generate()
# generate auth key for receiver account
receiver_auth_key = AuthKey.from_public_key(receiver_private_key.public_key())
print(f"Generated receiver address: {utils.account_address_hex(receiver_auth_key.account_address())}")
# create receiver account
faucet = testnet.Faucet(client)
faucet.mint(receiver_auth_key.hex(), 10000000, CURRENCY)
METADATA = str.encode(schema_and_cred_def[0])
# create script
script = create_diem_script(CURRENCY, receiver_auth_key, METADATA)
# create transaction
raw_transaction = create_diem_raw_txn(sender_auth_key, sender_account, script, CURRENCY)
sign_and_wait_diem_txn(sender_private_key, raw_transaction, client)
print("\nRetrieving SCHEMA from Diem ledger:\n")
schema = get_schema(utils.account_address_hex(sender_auth_key.account_address()), sender_account.sequence_number,
"https://testnet.diem.com/v1")
cred_def_dict = compress_cred_def(schema_and_cred_def)
METADATA_CRED_DEF = str.encode(str(cred_def_dict))
# create script
script = create_diem_script(CURRENCY, receiver_auth_key, METADATA_CRED_DEF)
# create transaction
raw_transaction = create_diem_raw_txn(sender_auth_key, sender_account, script, CURRENCY, 1)
sign_and_wait_diem_txn(sender_private_key, raw_transaction, client)
print("\nRetrieving CRE_DEF from Diem ledger:\n")
cred_def_res = get_schema(utils.account_address_hex(sender_auth_key.account_address()),
sender_account.sequence_number + 1,
"https://testnet.diem.com/v1")
filtered_cred_def = clean_up_cred_def_res(cred_def_res)
decomp_comp = decompress_cred_def(filtered_cred_def)
master_secret_id = loop.run_until_complete(create_master_secret(prover))
prover['master_secret_id'] = master_secret_id
print("\nmaster sectet id:" + master_secret_id)
cred_offer = loop.run_until_complete(create_credential_offer(issuer['wallet'], decomp_comp['id']))
# set some values
issuer['cred_offer'] = cred_offer
prover['cred_offer'] = issuer['cred_offer']
cred_offer = json.loads(prover['cred_offer'])
prover['cred_def_id'] = cred_offer['cred_def_id']
prover['schema_id'] = cred_offer['schema_id']
prover['cred_def'] = store[prover['cred_def_id']]
prover['schema'] = store[prover['schema_id']]
# create the credential request
prover['cred_req'], prover['cred_req_metadata'] = loop.run_until_complete(create_credential_req(prover))
prover['cred_values'] = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"age": {"raw": "28", "encoded": "28"}
})
issuer['cred_values'] = prover['cred_values']
issuer['cred_req'] = prover['cred_req']
print("wallet:")
print(issuer['wallet'])
print("\ncred_offer:")
print(issuer['cred_offer'])
print("\ncred_req:")
print(issuer['cred_req'])
print("\ncred_values:")
print(issuer['cred_values'])
(cred_json, _, _) = loop.run_until_complete(create_credential(issuer))
prover['cred'] = cred_json
loop.run_until_complete(store_credential(prover)) | [
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
640,
198,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
19795,
20288,
13,
4107,
3020,
19482,
13,
276,
13381,
1129,
1330,
1717,
13381,
1129,
29067,
9218,
198,
6738,
2566,
368,
1330,
26828,
92... | 2.563206 | 1,859 |
import pytest
from GoogleCloudFunctions import resolve_default_project_id, functions_list_command
def test_no_functions():
"""
Given:
- Google client without functions
When:
- Running functions-list command
Then:
- Ensure expected human readable response is returned
"""
client = GoogleClientMock()
hr, _, _ = functions_list_command(client, {})
assert hr == 'No functions found.'
| [
11748,
12972,
9288,
198,
6738,
3012,
18839,
24629,
2733,
1330,
10568,
62,
12286,
62,
16302,
62,
312,
11,
5499,
62,
4868,
62,
21812,
628,
628,
198,
198,
4299,
1332,
62,
3919,
62,
12543,
2733,
33529,
198,
220,
220,
220,
37227,
198,
220,... | 2.953333 | 150 |
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
BATCH_SIZE = 128
MAX_WORDS_IN_REVIEW = 200 # Maximum length of a review to consider
EMBEDDING_SIZE = 50 # Dimensions for each word vector
stop_words = set({'ourselves', 'hers', 'between', 'yourself', 'again',
'there', 'about', 'once', 'during', 'out', 'very', 'having',
'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its',
'yours', 'such', 'into', 'of', 'most', 'itself', 'other',
'off', 'is', 's', 'am', 'or', 'who', 'as', 'from', 'him',
'each', 'the', 'themselves', 'below', 'are', 'we',
'these', 'your', 'his', 'through', 'don', 'me', 'were',
'her', 'more', 'himself', 'this', 'down', 'should', 'our',
'their', 'while', 'above', 'both', 'up', 'to', 'ours', 'had',
'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them',
'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does',
'yourselves', 'then', 'that', 'because', 'what', 'over',
'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you',
'herself', 'has', 'just', 'where', 'too', 'only', 'myself',
'which', 'those', 'i', 'after', 'few', 'whom', 't', 'being',
'if', 'theirs', 'my', 'against', 'a', 'by', 'doing', 'it',
'how', 'further', 'was', 'here', 'than',
'wouldn', 'shouldn', 'll', 'aren', 'isn', 'get'})
def preprocess(review):
"""
Apply preprocessing to a single review. You can do anything here that is manipulation
at a string level, e.g.
- removing stop words
- stripping/adding punctuation
- changing case
- word find/replace
RETURN: the preprocessed review in string form.
"""
"""
input: the content of each training file. type : string("\n" means return)
used in the runner file line 58
output: word list form.
Note: remeber choice 100 words at random
"""
import re
page = r"<.*?>"
pieces_nopara = re.compile(page).sub("", review)
patten = r"\W+"
pieces = re.compile(patten).split(pieces_nopara)
piece = [p.lower() for p in pieces if p != '' and p.lower() not in stop_words and len(p) > 2]
processed_review = piece
return processed_review
def define_graph():
"""
Implement your model here. You will need to define placeholders, for the input and labels,
Note that the input is not strings of words, but the strings after the embedding lookup
has been applied (i.e. arrays of floats).
In all cases this code will be called by an unaltered runner.py. You should read this
file and ensure your code here is compatible.
Consult the assignment specification for details of which parts of the TF API are
permitted for use in this function.
You must return, in the following order, the placeholders/tensors for;
RETURNS: input, labels, optimizer, accuracy and loss
"""
"""
training_data_embedded[exampleNum., ]
input data is placeholder, size NUM_SAMPLES x MAX_WORDS_IN_REVIEW x EMBEDDING_SIZE
labels placeholder,
dropout_keep_prob placeholder,
optimizer is function with placeholder input_data, labels, dropout_keep_prob
Accuracy, loss is function with placeholder input_data, labels
"""
lstm_hidden_unit = 256
learning_rate = 0.00023
training = tf.placeholder_with_default(False, shape = (), name="IsTraining")
dropout_keep_prob = tf.placeholder_with_default(0.6, shape=(), name='drop_rate')
with tf.name_scope("InputData"):
input_data = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE),
name="input_data"
)
with tf.name_scope("Labels"):
labels = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 2), name="labels")
with tf.name_scope("BiRNN"):
LSTM_cell_fw = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_unit)
LSTM_cell_bw = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_unit)
LSTM_drop_fw = tf.nn.rnn_cell.DropoutWrapper(
cell = LSTM_cell_fw,
output_keep_prob = dropout_keep_prob
)
LSTM_drop_bw = tf.nn.rnn_cell.DropoutWrapper(
cell = LSTM_cell_bw,
output_keep_prob = dropout_keep_prob
)
(RNNout_fw, RNNout_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw = LSTM_drop_fw,
cell_bw = LSTM_drop_bw,
inputs = input_data,
initial_state_fw=LSTM_cell_fw.zero_state(BATCH_SIZE, dtype=tf.float32),
initial_state_bw=LSTM_cell_bw.zero_state(BATCH_SIZE, dtype=tf.float32),
parallel_iterations = 64
)
lastoutput = tf.concat(values = [RNNout_fw[:, -1, :], RNNout_bw[:, -1, :]], axis = 1)
with tf.name_scope("FC"):
# pred = tf.layers.batch_normalization(lastoutput, axis=1, training = training)
pred = tf.layers.batch_normalization(lastoutput, training = training)
pred = tf.layers.dense(pred, 128, activation = tf.nn.relu)
pred = tf.nn.dropout(pred, dropout_keep_prob)
# pred = tf.layers.batch_normalization(pred, axis=1, training = training)
pred = tf.layers.batch_normalization(pred, training = training)
pred = tf.layers.dense(pred, 128, activation = tf.nn.relu)
pred = tf.nn.dropout(pred, dropout_keep_prob)
# pred = tf.layers.batch_normalization(pred, axis=1, training = training)
pred = tf.layers.batch_normalization(pred, training = training)
pred = tf.layers.dense(pred, 128, activation = tf.nn.relu)
pred = tf.nn.dropout(pred, dropout_keep_prob)
# pred = tf.layers.batch_normalization(pred, axis=1, training = training)
pred = tf.layers.batch_normalization(pred, training = training)
pred = tf.layers.dense(pred, 64, activation = tf.nn.relu)
pred = tf.layers.dropout(pred, rate = dropout_keep_prob)
# pred = tf.layers.batch_normalization(pred, axis=1, training = training)
pred = tf.layers.batch_normalization(pred, training = training)
pred = tf.layers.dense(pred, 2, activation = tf.nn.softmax)
with tf.name_scope("CrossEntropy"):
cross_entropy = \
tf.nn.softmax_cross_entropy_with_logits_v2(
logits = pred,
labels = labels
)
loss = tf.reduce_mean(cross_entropy, name = "loss")
with tf.name_scope("Accuracy"):
Accuracy = tf.reduce_mean(
tf.cast(
tf.equal(
tf.argmax(pred, 1),
tf.argmax(labels, 1)
),
dtype = tf.float32
),
name = "accuracy"
)
with tf.name_scope("Optimizer"):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
optimizer = tf.group([optimizer, update_ops])
return input_data, labels, dropout_keep_prob, optimizer, Accuracy, loss, training
# def define_graph():
# """
# Implement your model here. You will need to define placeholders, for the input and labels,
# Note that the input is not strings of words, but the strings after the embedding lookup
# has been applied (i.e. arrays of floats).
# In all cases this code will be called by an unaltered runner.py. You should read this
# file and ensure your code here is compatible.
# Consult the assignment specification for details of which parts of the TF API are
# permitted for use in this function.
# You must return, in the following order, the placeholders/tensors for;
# RETURNS: input, labels, optimizer, accuracy and loss
# """
# """
# training_data_embedded[exampleNum., ]
# input data is placeholder, size NUM_SAMPLES x MAX_WORDS_IN_REVIEW x EMBEDDING_SIZE
# labels placeholder,
# dropout_keep_prob placeholder,
# optimizer is function with placeholder input_data, labels, dropout_keep_prob
# Accuracy, loss is function with placeholder input_data, labels
# """
# lstm_hidden_unit = 256
# learning_rate = 0.001
# dropout_keep_prob = tf.placeholder_with_default(0.6, shape=(), name='drop_rate')
# input_data = tf.placeholder(
# tf.float32,
# shape=(BATCH_SIZE, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE),
# name="input_data"
# )
# input_data_norm = tf.layers.batch_normalization(input_data, axis=1)
# input_data_norm = input_data
# labels = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 2), name="labels")
# LSTM_cell_fw = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_unit)
# LSTM_cell_bw = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_unit)
# LSTM_drop_fw = tf.nn.rnn_cell.DropoutWrapper(
# cell = LSTM_cell_fw,
# output_keep_prob = dropout_keep_prob
# )
# LSTM_drop_bw = tf.nn.rnn_cell.DropoutWrapper(
# cell = LSTM_cell_bw,
# output_keep_prob = dropout_keep_prob
# )
# (RNNout_fw, RNNout_bw), _ = tf.nn.bidirectional_dynamic_rnn(
# cell_fw = LSTM_drop_fw,
# cell_bw = LSTM_drop_bw,
# inputs = input_data_norm,
# initial_state_fw=LSTM_cell_fw.zero_state(BATCH_SIZE, dtype=tf.float32),
# initial_state_bw=LSTM_cell_bw.zero_state(BATCH_SIZE, dtype=tf.float32),
# parallel_iterations = 16
# )
# last_output = []
# for i in range(1):
# last_output.append(RNNout_fw[:, -i, :])
# last_output.append(RNNout_bw[:, -i, :])
# lastoutput = tf.concat(last_output, 1)
# with tf.name_scope("fc_layer"):
# lastoutput_norm = tf.layers.batch_normalization(lastoutput, axis=1)
# # lastoutput_norm = lastoutput
# pred = tf.layers.dense(lastoutput_norm, 128, activation = tf.nn.relu)
# pred = tf.layers.batch_normalization(pred, axis=1)
# pred = tf.layers.dropout(pred, rate = dropout_keep_prob)
# pred = tf.layers.dense(pred, 128, activation = tf.nn.relu)
# pred = tf.layers.batch_normalization(pred, axis=1)
# pred = tf.layers.dropout(pred, rate = dropout_keep_prob)
# pred = tf.layers.dense(pred, 128, activation = tf.nn.relu)
# pred = tf.layers.batch_normalization(pred, axis=1)
# pred = tf.layers.dropout(pred, rate = dropout_keep_prob)
# pred = tf.layers.dense(pred, 64, activation = tf.nn.relu)
# pred = tf.layers.batch_normalization(pred, axis=1)
# pred = tf.layers.dropout(pred, rate = dropout_keep_prob)
# pred = tf.layers.dense(pred, 2, activation = tf.nn.softmax)
# cross_entropy = \
# tf.nn.softmax_cross_entropy_with_logits_v2(
# logits = pred,
# labels = labels
# )
# Accuracy = tf.reduce_mean(
# tf.cast(
# tf.equal(
# tf.argmax(pred, 1),
# tf.argmax(labels, 1)
# ),
# dtype = tf.float32
# ),
# name = "accuracy"
# )
# loss = tf.reduce_mean(cross_entropy, name = "loss")
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# # optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss)
# return input_data, labels, dropout_keep_prob, optimizer, Accuracy, loss | [
11748,
11192,
273,
11125,
355,
48700,
628,
198,
11748,
28686,
198,
418,
13,
268,
2268,
17816,
10234,
62,
8697,
47,
62,
23678,
62,
25294,
62,
2538,
18697,
20520,
796,
705,
18,
6,
628,
198,
33,
11417,
62,
33489,
796,
13108,
198,
22921,
... | 2.108442 | 5,662 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# cssqc/__init__.py
#
# css quality control
# ----------------------------------------------------------------
# copyright (c) 2014 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
import importlib
import csslex, cssyacc
from cssyacc.ruleset import Ruleset
from cssqc.statistics import Statistics
EVENTS = (
'IDENT',
'ATKEYWORD',
'ATBRACES',
'STRING',
'HASH',
'NUMBER',
'PERCENTAGE',
'DIMENSION',
'URI',
'UNICODE_RANGE',
'CDO',
'CDC',
'COLON',
'SEMICOLON',
'BRACES_R',
'BRACES_L',
'PARENTHESES_R',
'PARENTHESES_L',
'BRACKETS_R',
'BRACKETS_L',
'COMMENT',
'WS',
'FUNCTION',
'INCLUDES',
'DASHMATCH',
'DELIM',
'Block',
'Brackets',
'Comment',
'Function',
'Parentheses',
'Ruleset',
'Selector',
'Statement',
'Whitespace'
)
instance = None
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
198,
2,
269,
824,
80,
66,
14,
834,
15003,
834,
13,
9078,
198,
2,
220,
198,
2,
269,
824,
3081,
1630,... | 2.402685 | 447 |
from abstract.instruccion import *
from tools.console_text import *
from tools.tabla_tipos import *
from storage import jsonMode as funciones
from error.errores import *
from tools.tabla_simbolos import * | [
6738,
12531,
13,
8625,
622,
535,
295,
1330,
1635,
198,
6738,
4899,
13,
41947,
62,
5239,
1330,
1635,
198,
6738,
4899,
13,
8658,
5031,
62,
22504,
418,
1330,
1635,
198,
6738,
6143,
1330,
33918,
19076,
355,
25439,
295,
274,
198,
6738,
404... | 3.344262 | 61 |
# import sys
# sys.path.appenval.'/usr/local/lib/python2.7/site-packages')
import dlib
import scipy
import skimage as io
import numpy as np
imagenet_path = 'path/to/imagenet/val.ta/Images'
names = 'path/to/imagenet/val.ta/ImageSets/train.txt'
count = 0
all_proposals = []
imagenms = []
nameFile = open(names)
for line in nameFile.reaval.ines():
filename = imagenet_path + line.split('\n')[0] + '.png'
single_proposal = dlib_selective_search(filename)
all_proposals.apped(single_proposal)
count += 1
print count
scipy.savemat('train.mat', mdict={'all_boxes': all_proposals,
'images': imagenms})
obj_proposals = scipy.loadmat('train.mat')
print(obj_proposals)
| [
2,
1330,
25064,
198,
2,
25064,
13,
6978,
13,
1324,
268,
2100,
2637,
14,
14629,
14,
12001,
14,
8019,
14,
29412,
17,
13,
22,
14,
15654,
12,
43789,
11537,
198,
11748,
288,
8019,
198,
11748,
629,
541,
88,
198,
11748,
1341,
9060,
355,
... | 2.327922 | 308 |
# each JSON has: {instructions}, {opt}, {compiler}
# MODEL SETTINGS: please set these before running the main #
mode = "opt" # Labels of the model: [opt] or [compiler]
samples = 3000 # Number of the blind set samples
fav_instrs_in = ["mov"] # Set of instructions of which DEST register should be extracted [IN]
fav_instrs_eq = ["lea"] # Set of instructions of which DEST register should be extracted [EQ]
# -------------- #
# import warnings filter
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
import json
import csv
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import *
from sklearn.metrics import confusion_matrix, classification_report, log_loss
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
import scikitplot as skplt
import matplotlib.pyplot as plt
# Function that parses the input file
# Dataset can be 1 (train) or 2 (blind test)
# Function that deals with the csv file
# Index can be: 1 (opt) or 2 (compiler)
if __name__ == "__main__":
index = 1 if mode == "opt" else 0
instrs = list()
opt = list()
comp = list()
processFile("train_dataset.jsonl", instrs, opt, comp, 1)
vectorizer = CountVectorizer(min_df=5)
#vectorizer = TfidfVectorizer(min_df=5)
X_all = vectorizer.fit_transform(instrs)
y_all = opt if mode == "opt" else comp
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.2, random_state=15)
#model = RandomForestClassifier(n_estimators=200).fit(X_train, y_train)
model = GradientBoostingClassifier(n_estimators=200, max_depth=7).fit(X_train, y_train)
print("Outcomes on test set")
pred = model.predict(X_test)
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
ll = log_loss(y_test, model.predict_proba(X_test))
print("Log Loss: {}".format(ll))
#skplt.metrics.plot_precision_recall_curve(y_test, model.predict_proba(X_test), title="MOGB")
#skplt.metrics.plot_confusion_matrix(y_test, pred, normalize=True, title="MOGB")
#plt.show()
# Calculating the overfitting
print("Outcomes on training set")
pred2 = model.predict(X_train)
print(confusion_matrix(y_train, pred2))
print(classification_report(y_train, pred2))
# Predicting the blind dataset
b_instrs = list()
processFile("test_dataset_blind.jsonl", b_instrs, list(), list(), 2)
b_X_all = vectorizer.transform(b_instrs)
b_pred = model.predict(b_X_all)
produceOutput("1743168.csv", b_pred, index)
| [
2,
1123,
19449,
468,
25,
1391,
259,
7249,
507,
5512,
1391,
8738,
5512,
1391,
5589,
5329,
92,
198,
198,
2,
19164,
3698,
25823,
51,
20754,
25,
3387,
900,
777,
878,
2491,
262,
1388,
1303,
198,
14171,
796,
366,
8738,
1,
220,
1303,
3498,... | 2.726895 | 963 |
from django.contrib import admin
from django.conf.urls import url, include
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
] | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
198,
2,
14712,
510,
674,
7824,
1262,
11353,
10289,
28166,
13,
198,
2,
12032,
11,
356,
2291,
17594,
32336,
32... | 3.377049 | 61 |
from jsonlink import JsonLink
from globals import read_json_file
task = Task()
task.update_from_dict(read_json_file("exampletask.json"))
print(task)
| [
6738,
33918,
8726,
1330,
449,
1559,
11280,
198,
6738,
15095,
874,
1330,
1100,
62,
17752,
62,
7753,
628,
628,
198,
35943,
796,
15941,
3419,
198,
35943,
13,
19119,
62,
6738,
62,
11600,
7,
961,
62,
17752,
62,
7753,
7203,
20688,
35943,
13... | 3.039216 | 51 |
# Copyright 2020 Siftrics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__version__ = '1.2.0'
import base64
import requests
import time
| [
2,
15069,
220,
12131,
311,
2135,
10466,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
366,
25423,
12340,
284,
1730... | 3.782895 | 304 |
from django.contrib import admin
from .models import Attachment, EmailHeaders, Newsletter
admin.site.register(EmailHeaders)
admin.site.register(Attachment)
admin.site.register(Newsletter)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
3460,
15520,
11,
9570,
13847,
364,
11,
35798,
198,
198,
28482,
13,
15654,
13,
30238,
7,
15333,
13847,
364,
8,
198,
28482,
13,
15654,
13,
30238,
7,
8086,... | 3.518519 | 54 |
fullform = input("Enter a full form: ")
words = fullform.split(" ")
acro = ""
for word in words:
acro+=word[0]
print("Acronym is ", acro) | [
12853,
687,
796,
5128,
7203,
17469,
257,
1336,
1296,
25,
366,
8,
198,
10879,
796,
1336,
687,
13,
35312,
7203,
366,
8,
198,
330,
305,
796,
13538,
198,
1640,
1573,
287,
2456,
25,
198,
220,
220,
220,
936,
305,
47932,
4775,
58,
15,
60... | 2.457627 | 59 |
from skimage import img_as_int
import cv2
import numpy as np
from pylab import *
import scipy.ndimage.filters as filters
#img = cv2.imread('images/profile.jpg', 0)
img = cv2.imread('images/moon.jpg',0)
sobel_operator_v = np.array([
[-1, 0, 1],
[-2, 0 ,2],
[-1, 0, 1]
])
sobelX = cv2.Sobel(img, -1, 1, 0, ksize=5)
sobelY = cv2.Sobel(img, -1, 0, 1, ksize=5)
subplot(2,2,1)
plt.imshow(sobelX, cmap='gray')
plt.title('(-1, 1, 0)')
subplot(2,2,2)
plt.imshow(sobelY, cmap='gray')
plt.title('(-1, 0, 1)')
subplot(2,2,3)
plt.imshow(filters.convolve(img_as_int(img), sobel_operator_v), cmap='gray')
plt.title('sobel vertical')
subplot(2,2,4)
plt.imshow(filters.convolve(img_as_int(img), sobel_operator_v.T), cmap='gray')
plt.title('sobel horizontal')
plt.show()
| [
6738,
1341,
9060,
1330,
33705,
62,
292,
62,
600,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
279,
2645,
397,
1330,
1635,
201,
198,
11748,
629,
541,
88,
13,
358,
9060,
13,
10379,
1010,
355,
... | 1.956098 | 410 |
# Relative imports
from ._estimator import ProphetEstimator
from ._predictor import ProphetPredictor, PROPHET_IS_INSTALLED
__all__ = ['ProphetEstimator', 'ProphetPredictor', 'PROPHET_IS_INSTALLED']
| [
2,
45344,
17944,
198,
6738,
47540,
395,
320,
1352,
1330,
13583,
22362,
320,
1352,
198,
6738,
47540,
79,
17407,
273,
1330,
13583,
47,
17407,
273,
11,
4810,
3185,
39,
2767,
62,
1797,
62,
38604,
7036,
1961,
198,
198,
834,
439,
834,
796,
... | 2.884058 | 69 |
"""
<Filename>
factoids/__init__.py
<Purpose>
Used to print saesh factoids.
It implements the following command:
show factoids [number of factoids]/all
"""
import seash_exceptions
import random
import os
# List which will contain all factoids after fatching from a file.
factoids = []
def initialize():
"""
<Purpose>
Used to print random seash factoid when user runs seash.
<Arguments>
None
<Side Effects>
Prints random factoid onto the screen.
<Exceptions>
UserError: Error during generating path to "factoid.txt" file or
Error while opening, reading or closing "factoid.txt" file.
<Return>
None
"""
# Global 'factoids' list will be used to store factoids, fetched from a file.
global factoids
# Path to "factoid.txt" file is created.
try:
current_path = os.getcwd()
file_path = os.path.join(current_path, "modules", "factoids", "factoid.txt")
except OSError, error:
raise seash_exceptions.InitializeError("Error during initializing factoids module: '" + str(error) + "'.")
# We have to fatch list of factoids from "factoid.txt" file.
try:
file_object = open(file_path, 'r')
factoids_temp = file_object.readlines()
file_object.close()
except IOError, error:
raise seash_exceptions.InitializeError("Error during initializing factoids module: '" + str(error) + "'.")
# Newline characters in a list, read from a file are removed.
for factoid in factoids_temp:
factoids.append(factoid.strip('\n'))
# A random factoid is printed every time user runs seash.
print random.choice(factoids)+"\n"
def cleanup():
"""
<Purpose>
Used to clean 'factoids' list when this module is going to be disabled.
<Arguments>
None
<Side Effects>
None
<Exceptions>
None
<Return>
None
"""
# Data from a global 'factoids' list will be removed.
global factoids
factoids = []
def print_factoids(input_dict, environment_dict):
"""
<Purpose>
Used to print seash factoids when user uses 'show factoids'
command.
<Arguments>
input_dict: Input dictionary generated by seash_dictionary.parse_command().
environment_dict: Dictionary describing the current seash environment.
For more information, see command_callbacks.py's module docstring.
<Side Effects>
Prints factoids onto the screen.
<Exceptions>
UserError: If user does not type appropriate command.
ValueError: If user does not provide valid input (integer).
<Return>
None
"""
# User will insert an argument regarding how many factoids should be printed.
# We have to find what is user argument.
# User can type any positive number or he can type 'all' to see all factoids.
dict_mark = input_dict
try:
command = dict_mark.keys()[0]
while dict_mark[command]['name'] != 'args':
dict_mark = dict_mark[command]['children']
command = dict_mark.keys()[0]
args = command
except IndexError:
raise seash_exceptions.UserError("\nError, Syntax of the command is: show factoids [number of factoids]/all \n")
# User decided to print all factoids
if args == 'all':
print
for factoid in factoids:
print factoid
print
return
# User can not insert other than integer number.
try:
no_of_factoids = int(args)
except ValueError:
raise seash_exceptions.UserError("\nYou have to enter number only.\n")
# If number of factoids decided by user is greater than total number of
# available factoids than whole factoids list is printed.
if (no_of_factoids > (len(factoids))):
print "\nWe have only %d factoids. Here is the list of factoids:" % (len(factoids))
no_of_factoids = len(factoids)
elif (no_of_factoids <= 0):
raise seash_exceptions.UserError("\nYou have to enter positive number only.\n")
# 'factoids' list will be shuffled every time for printing random factoids.
random.shuffle(factoids)
# Factoids will be printed.
for factoid in factoids[:no_of_factoids]:
print factoid
print
command_dict = {
'show': {'name':'show', 'callback': None, 'children':{
'factoids':{'name':'factoids', 'callback': print_factoids,
'summary': "Displays available seash factoids.",
'help_text': '','children':{
'[ARGUMENT]':{'name':'args', 'callback': None, 'children':{}}
}},}}
}
help_text = """
Factoids Module
This module includes command that prints seash factoids.
'show factoids [number of factoids]/all' is used to print
available seash factoids.
You can type 'show factoids [number of factoids]' to print
that much number of factoids.
You can type 'show factoids all' to see all available factoids.
"""
# This is where the module importer loads the module from.
moduledata = {
'command_dict': command_dict,
'help_text': help_text,
'url': None,
'initialize': initialize,
'cleanup': cleanup
} | [
37811,
198,
27,
35063,
29,
198,
220,
1109,
10994,
14,
834,
15003,
834,
13,
9078,
198,
198,
27,
30026,
3455,
29,
198,
220,
16718,
284,
3601,
473,
5069,
1109,
10994,
13,
198,
220,
220,
198,
220,
632,
23986,
262,
1708,
3141,
25,
220,
... | 2.836457 | 1,761 |