content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from unittest import mock
import pytest
from django.contrib.auth.models import AnonymousUser
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, AddonUser
from olympia.amo.tests import addon_factory, TestCase, req_factory_factory
from olympia.users.models import UserProfile
from .acl import (
action_allowed, check_addon_ownership, check_addons_reviewer,
check_ownership, check_static_theme_reviewer,
check_unlisted_addons_reviewer,
is_reviewer, is_user_any_kind_of_reviewer, match_rules)
pytestmark = pytest.mark.django_db
def test_match_rules():
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Editors:*,Admin:EditAnyAddon,Admin:flagged,Admin:addons,'
'Admin:EditAnyCollection',
'Tests:*,Admin:serverstatus,Admin:users',
'Admin:EditAnyAddon,Admin:EditAnyLocale,Editors:*,'
'Admin:lists,Admin:applications,Admin:addons',
'Admin:EditAnyAddon',
'Admin:ViewAnyStats,Admin:ViewAnyCollectionStats',
'Admin:ViewAnyStats',
'Editors:*,Admin:features',
'Admin:Statistics',
'Admin:Features,Editors:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Admin:Bar',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), "%s != Admin:%%" % rule
rules = (
'Doctors:*',
'Stats:View',
'CollectionStats:View',
'Addons:Review',
'Users:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), \
"%s == Admin:%% and shouldn't" % rule
class ACLTestCase(TestCase):
"""Test some basic ACLs by going to various locked pages on AMO."""
fixtures = ['access/login.json']
| [
6738,
555,
715,
395,
1330,
15290,
198,
11748,
12972,
9288,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
19200,
12982,
198,
198,
6738,
267,
6760,
544,
1330,
716,
78,
198,
6738,
267,
6760,
544,
13,
15526,
13,... | 2.39267 | 764 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Judit Acs <judit@sch.bme.hu>
#
# Distributed under terms of the MIT license.
from __future__ import unicode_literals
from argparse import ArgumentParser
from sys import stdin
from experiment import Seq2seqExperiment
from data import DataSet
if __name__ == '__main__':
import logging
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
69,
12685,
28,
40477,
12,
23,
198,
2,
198,
2,
15069,
10673,
2177,
4794,
270,
4013,
82,
1279,
10456,... | 2.637755 | 196 |
#!/usr/bin/python
import argparse
from xdg import BaseDirectory
import requests
import json
import sys
KOALA_LOGIN_FILE='login'
KOALA_SERVER="http://localhost:5000/"
KOALA_API_PATH=""
X_KOALA_USERNAME='x-koala-username'
X_KOALA_KEY='x-koala-key'
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers()
loginparser = subparser.add_parser('login')
registerparser = subparser.add_parser('register')
addparser = subparser.add_parser('add')
rmparser = subparser.add_parser('rm')
listparser = subparser.add_parser('list')
genkeyparser = subparser.add_parser('genkey')
readparser = subparser.add_parser('read')
unreadparser = subparser.add_parser('unread')
favoriteparser = subparser.add_parser('favorite')
unfavoriteparser = subparser.add_parser('unfavorite')
registerparser.add_argument('-u', '--username', required=True)
registerparser.add_argument('-p', '--password', required=True)
registerparser.set_defaults(which='register')
loginparser.add_argument('-u', '--username')
loginparser.add_argument('-k', '--key')
loginparser.set_defaults(which='login')
addparser.add_argument('-u', '--url', required=True)
addparser.add_argument('-t', '--title')
addparser.set_defaults(which='add')
rmparser.add_argument('-a', '--article', required=True)
rmparser.set_defaults(which='rm')
listparser.add_argument('-l', '--limit')
listparser.add_argument('-v', '--verbose', action='store_true')
listparser.set_defaults(which='list')
genkeyparser.add_argument('-u', '--username')
genkeyparser.add_argument('-p', '--password')
genkeyparser.set_defaults(which='genkey')
readparser.add_argument('-a', '--article', required=True)
readparser.set_defaults(which='read')
unreadparser.add_argument('-a', '--article', required=True)
unreadparser.set_defaults(which='unread')
favoriteparser.add_argument('-a', '--article', required=True)
favoriteparser.set_defaults(which='favorite')
unfavoriteparser.add_argument('-a', '--article', required=True)
unfavoriteparser.set_defaults(which='unfavorite')
args = parser.parse_args()
try:
if args.which == 'register':
register(args)
elif args.which == 'list':
listing(args)
elif args.which == 'add':
add(args)
elif args.which == 'genkey':
genkey(args)
elif args.which == 'login':
login(args)
elif args.which == 'rm':
rm(args)
elif args.which == 'read':
read(args, True)
elif args.which == 'unread':
read(args, False)
elif args.which == 'favorite':
favorite(args, True)
elif args.which == 'unfavorite':
favorite(args, False)
except:
print("Unexpected error:", sys.exc_info()[0])
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
1822,
29572,
198,
6738,
2124,
67,
70,
1330,
7308,
43055,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
25064,
198,
198,
22328,
1847,
32,
62,
25294,
1268,
62,
25664,
11639,
38235,
6,
... | 2.661631 | 993 |
#------------------------------------------------------------------------------
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import datetime
from dateutil import parser as isoparser
import wx
from .wx_control import WxControl
def as_wx_date(iso_date):
""" Convert an iso date string to a wxDateTime.
"""
# wx doesn't have iso date parsing until version 2.9
py_date = isoparser.parse(iso_date)
day = py_date.day
month = py_date.month - 1 # wx peculiarity!
year = py_date.year
return wx.DateTimeFromDMY(day, month, year)
def as_iso_date(wx_date):
""" Convert a QDate object into and iso date string.
"""
day = wx_date.GetDay()
month = wx_date.GetMonth() + 1 # wx peculiarity!
year = wx_date.GetYear()
return datetime.date(year, month, day).isoformat()
class WxBoundedDate(WxControl):
""" A base class for use with Wx widgets implementing behavior
for subclasses of BoundedDate.
"""
#--------------------------------------------------------------------------
# Setup Methods
#--------------------------------------------------------------------------
def create(self, tree):
""" Create and initialize the bounded date widget.
"""
super(WxBoundedDate, self).create(tree)
self.set_min_date(as_wx_date(tree['minimum']))
self.set_max_date(as_wx_date(tree['maximum']))
self.set_date(as_wx_date(tree['date']))
#--------------------------------------------------------------------------
# Message Handlers
#--------------------------------------------------------------------------
def on_action_set_date(self, content):
""" Handle the 'set_date' action from the Enaml widget.
"""
self.set_date(as_wx_date(content['date']))
def on_action_set_minimum(self, content):
""" Hanlde the 'set_minimum' action from the Enaml widget.
"""
self.set_min_date(as_wx_date(content['minimum']))
def on_action_set_maximum(self, content):
""" Handle the 'set_maximum' action from the Enaml widget.
"""
self.set_max_date(as_wx_date(content['maximum']))
#--------------------------------------------------------------------------
# Event Handlers
#--------------------------------------------------------------------------
def on_date_changed(self, event):
""" An event handler to connect to the date changed signal of
the underlying widget.
This will convert the wxDateTime to iso format and send the Enaml
widget the 'date_changed' action.
"""
wx_date = self.get_date()
content = {'date': as_iso_date(wx_date)}
self.send_action('date_changed', content)
#--------------------------------------------------------------------------
# Abstract Methods
#--------------------------------------------------------------------------
def get_date(self):
""" Return the current date in the control.
Returns
-------
result : wxDateTime
The current control date as a wxDateTime object.
"""
raise NotImplementedError
def set_date(self, date):
""" Set the widget's current date.
Parameters
----------
date : wxDateTime
The wxDateTime object to use for setting the date.
"""
raise NotImplementedError
def set_max_date(self, date):
""" Set the widget's maximum date.
Parameters
----------
date : wxDateTime
The wxDateTime object to use for setting the maximum date.
"""
raise NotImplementedError
def set_min_date(self, date):
""" Set the widget's minimum date.
Parameters
----------
date : wxDateTime
The wxDateTime object to use for setting the minimum date.
"""
raise NotImplementedError
| [
2,
10097,
26171,
198,
2,
220,
15069,
357,
66,
8,
2321,
11,
2039,
28895,
11,
3457,
13,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
10097,
26171,
198,
11748,
4818,
8079,
198,
198,
6738,
3128,
22602,
1330,
30751,
355,
318,
404,
28198,
... | 2.899214 | 1,399 |
from django.conf.urls import url, patterns
from django.contrib.auth.decorators import login_required, permission_required
from django.views.generic import TemplateView
from cbsapp import views as v
urlpatterns = patterns(
'cbsapp.views',
url(r'success/', v.MyView1.as_view()),
url(r'func/', 'my_view'),
url(r'form/', 'my_form_view'),
url(r'protect_form/', v.ProtectedView.as_view()),
url(r'secret/', login_required(TemplateView.as_view(template_name='secret.html'))),
url(r'permission/', permission_required('is_superuser')(TemplateView.as_view(template_name='secret.html'))),
url(r'publishers/', v.PublisherList.as_view()),
url(r'books/([\w-]+)/$', v.PubBookList.as_view()),
url(r'author/$', v.AuthView.as_view(), name='author'),
url(r'thanks/', TemplateView.as_view(template_name='thanks.html')),
url(r'authors/$', v.AuthorList.as_view(), name='author-list'),
url(r'authors/(?P<pk>[0-9]+)/$', v.AuthorDetailView.as_view(), name='author-detail'),
url(r'authors/add/$', v.AuthorCreate.as_view(), name='author-add'),
url(r'authors/create/$', v.AuthorJsonCreate.as_view(), name='author-create'),
url(r'authors/update/(?P<pk>[0-9]+)/$', v.AuthUpdate.as_view(), name='author-update'),
url(r'authors/delete/(?P<pk>[0-9]+)/$', v.AuthDelete.as_view(), name='author-delete'),
)
urlpatterns += [
# url(r'about/cls/', v.MyView.as_view()),
url(r'about/cls/', v.MyView.as_view(greeting='as_view')),
# url(r'about/cls/', v.MyView.as_view(non_exist='as_view')),
# url(r'^about/func/', 'cbsapp.views.my_view')
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
7572,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
11,
7170,
62,
35827,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1... | 2.394856 | 661 |
import cv2
import numpy as np
# Load an image in grayscale
img = cv2.imread('football.jpg', cv2.IMREAD_GRAYSCALE)
# Displaying an Image
cv2.imshow("Football Picha", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
8778,
281,
2939,
287,
1036,
592,
38765,
198,
9600,
796,
269,
85,
17,
13,
320,
961,
10786,
15914,
13,
9479,
3256,
269,
85,
17,
13,
3955,
15675,
62,
38,
30631,
617... | 2.482353 | 85 |
from .delete_nth import *
from .flatten import *
from .garage import *
from .josephus import *
from .longest_non_repeat import *
from .max_ones_index import *
from .merge_intervals import *
from .missing_ranges import *
from .move_zeros import *
from .plus_one import *
from .rotate import *
from .summarize_ranges import *
from .three_sum import *
from .trimmean import *
from .top_1 import *
from .two_sum import *
from .limit import *
from .n_sum import *
| [
6738,
764,
33678,
62,
77,
400,
1330,
1635,
198,
6738,
764,
2704,
41769,
1330,
1635,
198,
6738,
764,
4563,
496,
1330,
1635,
198,
6738,
764,
73,
577,
746,
385,
1330,
1635,
198,
6738,
764,
6511,
395,
62,
13159,
62,
44754,
1330,
1635,
1... | 3 | 153 |
"""
Inforalgo control panel
Written by Alexandre Almosni alexandre.almosni@gmail.com
(C) 2016 Alexandre Almosni
Released under Apache 2.0 license. More info at http://www.apache.org/licenses/LICENSE-2.0
"""
import wx
#import datetime
import wx.grid as gridlib
import inforalgo
from wx.lib.scrolledpanel import ScrolledPanel
# def wxdate2pydate(date):
# """Function to convert wx.datetime to datetime.datetime format
# """
# assert isinstance(date, wx.DateTime)
# if date.IsValid():
# ymd = map(int, date.FormatISODate().split('-'))
# return datetime.datetime(*ymd)
# else:
# return None
###BELOW USED FOR DEBUGGING SO FILE CAN BE SELF-CONTAINED###
if __name__ == "__main__":
app = wx.App()
frame = InforalgoControlFrame().Show()
app.MainLoop()
| [
37811,
198,
818,
1640,
282,
2188,
1630,
6103,
198,
25354,
416,
21000,
260,
978,
16785,
8461,
220,
220,
257,
2588,
49078,
13,
282,
16785,
8461,
31,
14816,
13,
785,
198,
7,
34,
8,
1584,
21000,
260,
978,
16785,
8461,
198,
45037,
739,
2... | 2.523364 | 321 |
from django.conf.urls import include, url
from django.views.generic import TemplateView
from .models import Strategy, TradingBot
from . import views
urlpatterns = [
# Landing page, etc
url(r'^$', views.landing, name="landing"),
url(r'^faq$', TemplateView.as_view(template_name="landing/faq.html"), name="faq"),
url(r'^pricing$', views.PlanList.as_view(), name="pricing"),
# Control Panels
url(r'^home$', views.index, name="index"),
url(r'^strategy/([0-9]+)$', views.strategy_page, name=Strategy.EDIT_URL),
url(r'^bot/([0-9]+)$', views.bot_page, name=TradingBot.EDIT_URL),
url(r'^graph_data$', views.graph_data, name="graph_data"),
# Legal Info
url(r'^legal/terms$', TemplateView.as_view(template_name="main/terms.html"), name="terms"),
url(r'^legal/privacy$', TemplateView.as_view(template_name="main/privacy.html"), name="privacy"),
# API Methods
# - CRUD for strats & bots
url(r'^api/strategies$', views.StrategyList.as_view(), name="strategies_list"),
url(r'^api/strategy/new$', views.StrategyNew.as_view(), name="strategy_new"),
url(r'^api/strategy$', views.StrategyDetail.as_view(), name="strategy_detail"),
url(r'^api/strategy/delete$', views.StrategyDelete.as_view(), name="strategy_delete"),
url(r'^api/bots$', views.BotList.as_view(), name="bot_list"),
url(r'^api/bot/new$', views.BotNew.as_view(), name="bot_new"),
url(r'^api/bot$', views.BotDetail.as_view(), name="bot_detail"),
url(r'^api/bot/delete$', views.BotDelete.as_view(), name="bot_delete"),
# - Backtest method test
url(r'^api/backtest$', views.BacktestDetail.as_view(), name="backtest_detail"),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
198,
6738,
764,
27530,
1330,
20561,
11,
25469,
20630,
198,
6738,
764,
1330,
5009,
198,
198,
6371,... | 2.450725 | 690 |
dna = open('dna.txt','r')
date = dna.readline()
rna = ""
for i in date:
if i == 'T':
rna+="A"
elif i == 'G':
rna +="C"
elif i == 'C':
rna+="G"
elif i == "A":
rna+="U"
saveRna = open('rna.txt', 'w')
saveRna.write(rna)
saveRna.close()
"""
No DNA temos as bases
T - A
C - G
no RNA temos que
U - A
C - G
Então para transformar uma fita de DNA em RNA fazemos
a troca do T pelo U somente
""" | [
67,
2616,
796,
1280,
10786,
67,
2616,
13,
14116,
41707,
81,
11537,
198,
198,
4475,
796,
288,
2616,
13,
961,
1370,
3419,
198,
81,
2616,
796,
13538,
198,
198,
1640,
1312,
287,
3128,
25,
628,
220,
220,
220,
611,
1312,
6624,
705,
51,
... | 1.901709 | 234 |
"""
Ball Class (based on Gameobject).
"""
import pygame
from game_object import GameObject
| [
37811,
198,
23410,
5016,
357,
3106,
319,
3776,
15252,
737,
198,
37811,
198,
198,
11748,
12972,
6057,
198,
198,
6738,
983,
62,
15252,
1330,
3776,
10267,
198
] | 3.444444 | 27 |
from .sentence import Sentence
from .sentences import Sentences
from .section import Section
from .sections import Sections
from .document import Document
| [
6738,
764,
34086,
594,
1330,
11352,
594,
198,
6738,
764,
34086,
3007,
1330,
11352,
3007,
198,
6738,
764,
5458,
1330,
7275,
198,
6738,
764,
23946,
1330,
37703,
198,
6738,
764,
22897,
1330,
16854,
198
] | 4.558824 | 34 |
'''A substring is a contiguous (non-empty) sequence of characters within a string.
A vowel substring is a substring that only consists of vowels ('a', 'e', 'i', 'o', and 'u') and has all five vowels present in it.
Given a string word, return the number of vowel substrings in word.
Example 1:
Input: word = "aeiouu"
Output: 2
Explanation: The vowel substrings of word are as follows (underlined):
- "aeiouu"
- "aeiouu"'''
# word = "aeiouu"
# substrings = []
# c = 0
# for i in range(len(word)):
# for j in range(i + 1, len(word) + 1):
# substrings.append(word[i:j])
# vowels_set = {"a", "e", "i", "o", "u"}
# for i in range(len(substrings)):
# if set(substrings[i]) == vowels_set:
# c += 1
# print(c)
word = "0110111"
substrings = []
c = 0
for i in range(len(word)):
for j in range(i + 1, len(word) + 1):
substrings.append(word[i:j])
vowels_set = {"1"}
for i in range(len(substrings)):
if set(substrings[i]) == vowels_set:
c += 1
print(c)
| [
7061,
6,
32,
3293,
1806,
318,
257,
48627,
357,
13159,
12,
28920,
8,
8379,
286,
3435,
1626,
257,
4731,
13,
201,
198,
201,
198,
32,
48617,
3293,
1806,
318,
257,
3293,
1806,
326,
691,
10874,
286,
23268,
1424,
19203,
64,
3256,
705,
68,
... | 2.274123 | 456 |
import os
import logging
from logging.handlers import RotatingFileHandler
from .Statics import *
# create format of the log message
logFormatter = logging.Formatter(LOG_FORMAT)
# Get the path of the running script (api command)
appPath = os.path.dirname(os.path.abspath(__file__))
# add subdirectory "log" to the path
logPath = os.path.join(appPath, 'logs')
if not os.path.isdir(logPath):
os.makedirs(logPath)
# create full logfilename
logFullPathName = os.path.join(logPath, 'ExportIt.log')
# create log handler
logHandler = RotatingFileHandler(logFullPathName, mode='a', maxBytes=200000, backupCount=2, encoding=None, delay=0)
logHandler.setFormatter(logFormatter)
logHandler.setLevel(LOG_LEVEL)
# create logger
logger = logging.getLogger('ExportIt')
logger.setLevel(LOG_LEVEL)
# add logger no logger is available
if not len(logger.handlers):
logger.addHandler(logHandler)
logger.debug("Logging started")
| [
11748,
28686,
198,
11748,
18931,
198,
6738,
18931,
13,
4993,
8116,
1330,
18481,
803,
8979,
25060,
198,
6738,
764,
17126,
873,
1330,
1635,
198,
198,
2,
2251,
5794,
286,
262,
2604,
3275,
198,
6404,
8479,
1436,
796,
18931,
13,
8479,
1436,
... | 3 | 308 |
import numpy as np
# from IPython import embed;embed()
from make_densebox_target import \
make_densebox_target as make_densebox_target_old
from make_densebox_target_dev import \
make_densebox_target as make_densebox_target_new
gt_boxes = np.asarray([[150, 250, 130, 60, 1]])
config_dict = dict(
x_size=303,
score_size=17,
total_stride=8,
score_offset=(303 - 1 - (17 - 1) * 8) // 2,
)
target_old = make_densebox_target_old(gt_boxes, config_dict)
target_new = make_densebox_target_new(gt_boxes, config_dict)
for v_old, v_new in zip(target_old, target_new):
v_new = v_new.numpy()
# uncomment the next line to inspect tensors in detail
# from IPython import embed;embed()
np.testing.assert_allclose(v_new, v_old, atol=1e-6, verbose=True)
print("Values closed.")
| [
11748,
299,
32152,
355,
45941,
198,
2,
422,
6101,
7535,
1330,
11525,
26,
20521,
3419,
198,
6738,
787,
62,
67,
1072,
3524,
62,
16793,
1330,
3467,
198,
220,
220,
220,
787,
62,
67,
1072,
3524,
62,
16793,
355,
787,
62,
67,
1072,
3524,
... | 2.49226 | 323 |
import numpy as np
import os
import pandas as pd
LEARNING_RATE = 0.15
REGULARISATION = 0.005
MAX_ITERATION = 15000
TARGET_COLUMN_NAME = 53
FOLD_COUNT = 5
# x^j+1 = x^j - lambda^j * grad(F(x^j))
# block of function for features
# normalise values in selected column
if __name__ == "__main__":
main() | [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2538,
1503,
15871,
62,
49,
6158,
796,
657,
13,
1314,
198,
31553,
37232,
1797,
6234,
796,
657,
13,
22544,
198,
22921,
62,
2043,
1137,
623... | 2.483871 | 124 |
from datetime import date
from behave import *
from core.model import to_dict
from core.model.membership import Membership
from core.model.membership_type import MembershipType
from core.utils.serialization import serialize
use_step_matcher("re")
@when("a serialized membership is requested")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.dict = serialize(context.membership).for_api()
@given("a membership is created with date (?P<date_string>.+)")
def step_impl(context, date_string: str):
"""
:type context: behave.runner.Context
:type date: str
"""
date_obj = date.fromisoformat(date_string)
context.membership = Membership(id=1, user_id=1,start_date=date_obj, end_date=date_obj, type=MembershipType.Smart)
@then("the dates are converted to ISO date format (?P<date_string>.+)")
def step_impl(context, date_string: str):
"""
:type context: behave.runner.Context
:type date: str
"""
assert context.dict["start_date"] == date_string
| [
6738,
4818,
8079,
1330,
3128,
198,
198,
6738,
17438,
1330,
1635,
198,
198,
6738,
4755,
13,
19849,
1330,
284,
62,
11600,
198,
6738,
4755,
13,
19849,
13,
30814,
1056,
1330,
37939,
198,
6738,
4755,
13,
19849,
13,
30814,
1056,
62,
4906,
1... | 2.991329 | 346 |
import glob
import os
from conans import ConanFile, CMake, tools
| [
11748,
15095,
198,
11748,
28686,
198,
198,
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
11,
4899,
198
] | 3.473684 | 19 |
"""Role testing files using testinfra."""
testinfra_hosts = ["node1.osgiliath.test"]
| [
37811,
47445,
4856,
3696,
1262,
1332,
10745,
430,
526,
15931,
198,
9288,
10745,
430,
62,
4774,
82,
796,
14631,
17440,
16,
13,
418,
70,
2403,
776,
13,
9288,
8973,
628
] | 2.866667 | 30 |
#Electric potential energy, the easy way to solve physics problems.
#This energy is converted from potential to kinetic just like in any other situation.
#The calculations based upon Energy in general can be used to design and build anything.
#This application will attempt to quickly formulate Energy quantities and interpret them,
# calculate electric potential or voltage and electric field strength, which is - the derivative of Voltage.
#Quantities that would be nice not to have to type out every time:
#Charge of an electron
ech = -1.6e-19
#Charge of a proton
pch = 1.6e-19
#Mass of an electron
m_elec = 9.11e-31
#Mass of a proton
m_prot = 1.67e-27
#First there are several variable quantities needed.
#import math functions
import math
#Permittivity constant 'epsilon naught using alt 238'
ε = 8.854187817e-12
#Store the value of π for ease of use later 'alt 227' for PC 'option P' for mac
π = math.pi
#Electric constant ¼πε
k = 1/(4*π*ε)
#Define formulas for use
#Usube will be used for the Electric Potential Energy formula
#▲K = -▲Usube 'delta = alt 30'
#Formula for change in potential energy
#****▲U = k*(Qsource * qtest/rf - Qsource * qtest/ri)****
#Later we shall add a function that incorporates the use of this formula while taking inputs from the user.
#We first need a way to determine whether we want to calculate a potential energy
# value alone, that can be used later, rather than a change in potential energy.
| [
2,
44132,
2785,
2568,
11,
262,
2562,
835,
284,
8494,
11887,
2761,
13,
198,
2,
1212,
2568,
318,
11513,
422,
2785,
284,
37892,
655,
588,
287,
597,
584,
3074,
13,
198,
2,
464,
16765,
1912,
2402,
6682,
287,
2276,
460,
307,
973,
284,
1... | 3.536765 | 408 |
#! /usr/bin/env python
import sys
import os
sys.path.insert(0, os.environ["QUEX_PATH"])
import quex.input.regular_expression.engine as regex
import quex.engine.state_machine.algebra.union as union
import quex.engine.state_machine.algebra.intersection as intersection
import quex.engine.state_machine.check.identity as identity
#import quex.engine.state_machine.TEST_help.lexeme_set as lexeme_set
if "--hwut-info" in sys.argv:
print "Union"
print "CHOICES: Sequences, SequenceAndOptional, SequenceAndLoop, Loops;"
sys.exit(0)
if "Sequences" in sys.argv:
test('abc', 'abc') # same
test('abc', 'def') # different
test('Xbc', 'abc') # partly same I
test('aXc', 'abc') # partly same II
test('abX', 'abc') # partly same III
elif "SequenceAndOptional" in sys.argv:
test('ab(c?)', 'abc') # same
test('ab(c?)', 'def') # different
test('Xb(c?)', 'abc') # partly same I
test('aX(c?)', 'abc') # partly same II
test('ab(X?)', 'abc') # partly same III
test('(a?)b(c?)', 'abc') # same
test('(a?)b(c?)', 'def') # different
test('(X?)b(c?)', 'abc') # partly same I
test('(a?)X(c?)', 'abc') # partly same II
test('(a?)b(X?)', 'abc') # partly same III
elif "SequenceAndLoop" in sys.argv:
test('(abc)+', 'abc') # same
test('(abc)+', 'def') # different
test('(Xbc)+', 'abc') # partly same I
test('(aXc)+', 'abc') # partly same II
test('(abX)+', 'abc') # partly same III
test('ab(c*)', 'abc') # same
test('ab(c*)', 'def') # different
test('Xb(c*)', 'abc') # partly same I
test('aX(c*)', 'abc') # partly same II
test('ab(X*)', 'abc') # partly same III
test('(a*)b(c*)', 'abc') # same
test('(a*)b(c*)', 'def') # different
test('(X*)b(c*)', 'abc') # partly same I
test('(a*)X(c*)', 'abc') # partly same II
test('(a*)b(X*)', 'abc') # partly same III
if "Loops" in sys.argv:
test('(abc)+', '(abc)+') # same
test('(abc)+', '(def)+') # different
test('(Xbc)+', '(abc)+') # partly same I
test('(aXc)+', '(abc)+') # partly same II
test('(abX)+', '(abc)+') # partly same III
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
268,
2268,
14692,
10917,
6369,
62,
34219,
8973,
8,
198,
198,
11748,
627,
1069,
13,
15414,
13... | 2.145911 | 1,076 |
from django.test import TestCase
from django.contrib.auth.models import User
from .models import UserProfile,Post,Comment,Location,Category,Neighborhood,Company
# Create your tests here.
# Set up method
#Testing instance
# Set up method
#Testing instance
# Set up method
#Testing instance
#Setup method
#Testing instance
#Setup method
#Testing instance
#Setup method
#Testing instance
#Setup method
#Testing instance
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
764,
27530,
1330,
11787,
37046,
11,
6307,
11,
21357,
11,
14749,
11,
27313,
11,
46445,
2865,
2894,
11,... | 3.164474 | 152 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .discriminative_listener import DiscriminativeListener
from ..networks import choose_architecture, layer_init, BetaVAE, reg_nan, hasnan
from ..utils import gumbel_softmax
use_decision_head = True
nbr_head_outputs = 2
not_always_argmax = True ; always_categorical_sampling = True
LogSoftmaxAfterLogOnSigmoid = False
bmm = False
normalize = True
inner_model = True
use_one_minus_max_prob = True
whole_sentence = False
packpadding = False
assume_padding_with_eos = True
stability_eps = 1e-8
scale_negative_logit = -1e4
use_stop_word_in_compute_sentence = False
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
764,
15410,
3036,
259,
876,
62,
4868,
877,
1330,
8444,
3036,
259,
876,
33252,
198,
6738,
11485,
3262,
5225,
1... | 2.793103 | 232 |
""" Set of tools to stop continuously running script from a different process
via RPyC link.
Steps:
- the script class should have public method pause(),
which will be called by client through RPyC link
- once the script is instantiated, assign it as the module to
PauseService instance:
pause_service_instance.assign_module(script_instance)
- instantiate PauseClient in a different process
- call of PauseClient.pause() will call pause() method of the script
"""
from pylabnet.network.core.service_base import ServiceBase
from pylabnet.network.core.client_base import ClientBase
class PauseService(ServiceBase):
"""Makes PauseFlag instance visible on pylabnet network.
Once the script is instantiated, assign it as the module to
PauseService instance:
pause_service_instance.assign_module(script_instance)
"""
class PauseClient(ClientBase):
"""Client to send stop request to the script.
Call of client_instance.pause() will call pause() method of the script
"""
| [
37811,
5345,
286,
4899,
284,
2245,
17282,
2491,
4226,
422,
257,
1180,
1429,
198,
8869,
371,
20519,
34,
2792,
13,
198,
198,
8600,
82,
25,
198,
12,
262,
4226,
1398,
815,
423,
1171,
2446,
14985,
22784,
198,
220,
543,
481,
307,
1444,
41... | 3.494845 | 291 |
from django.urls import path, re_path
from . import views
app_name = "sales"
urlpatterns = [
re_path('^sales/$', views.SalesView.as_view(), name="sales"),
re_path('^sales/search/product/$', views.SearchProductView.as_view(), name="search"),
re_path('^sales/search/customer/$', views.SearchCustomerView.as_view(), name="customer-search"),
re_path(r'^sales/order-session/((?P<oid>\d+)/)?$', views.OrderSessionView.as_view(), name="order-session"),
path('sales/cart/<int:oid>/', views.CartView.as_view(), name="cart"),
path('sales/cart/<int:oid>/customer/', views.OrderCustomerView.as_view(), name="order-customer"),
path('sales/payment/<int:iid>/', views.PaymentView.as_view(), name="payment"),
re_path(r'^sales/order/((?P<oid>\d+)/)?$', views.OrderView.as_view(), name="order"),
re_path(r'^invoices/$', views.InvoiceListView.as_view(), name="list-invoices"),
path('invoices/invoice/<int:pk>/', views.InvoiceDetailUpdateView.as_view(), name="invoice"),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
302,
62,
6978,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
366,
82,
2040,
1,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
302,
62,
6978,
10786,
61,
82,
2040,
... | 2.522843 | 394 |
# -*- coding: utf-8 -*-
"""Project schema."""
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
from projects.schemas.deployment import Deployment
from projects.schemas.experiment import Experiment
from projects.utils import to_camel_case
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
16775,
32815,
526,
15931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
7343,
11,
32233,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,... | 3.464286 | 84 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from utils.data_reader import read_data ,get_data_for_bert
from utils import constant
from utils.utils import getMetrics
from models.transformer import Encoder
from models.lstm_model import HLstmModel
from models.common_layer import NoamOpt, Attention
import argparse
import collections
import logging
import json
import re
from tqdm import tqdm, trange
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler
from torch.optim import Adam
from pytorch_pretrained_bert.tokenization import convert_to_unicode, BertTokenizer
from pytorch_pretrained_bert.modeling import BertModel
from pytorch_pretrained_bert.optimization import BertAdam
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single set of features of data."""
def read_examples(data, no_label = False):
"""Read a list of `InputExample`s from an input file."""
examples = []
if no_label:
for id, sent in zip(*data):
examples.append(
InputExample(unique_id=convert_to_unicode(str(id)),
text_a=convert_to_unicode(sent[0]),
text_b=convert_to_unicode(sent[1]),
text_c=convert_to_unicode(sent[2]),
label = convert_to_unicode('others')))
else:
for id, sent, lab in zip(*data):
examples.append(
InputExample(unique_id=convert_to_unicode(str(id)),
text_a=convert_to_unicode(sent[0]),
text_b=convert_to_unicode(sent[1]),
text_c=convert_to_unicode(sent[2]),
label = convert_to_unicode(lab)))
return examples
def _truncate_seq_pair(tokens, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens)
if total_length <= max_length:
break
tokens.pop()
def convert_examples_to_features(examples, seq_length, tokenizer, hier=True):
"""Loads a data file into a list of `InputBatch`s."""
features = []
label_map = {"others":0, "happy":1, "sad":2, "angry":3}
total_tokens = 0
unk_tokens = 0
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
if hier:
for (ex_index, example) in enumerate(examples):
tokens_a, input_ids_a, input_mask_a, input_type_ids_a, total_tokens, unk_tokens = _convert_one(ex_index, example.text_a, seq_length, tokenizer, total_tokens, unk_tokens)
tokens_b, input_ids_b, input_mask_b, input_type_ids_b, total_tokens, unk_tokens = _convert_one(ex_index, example.text_b, seq_length, tokenizer, total_tokens, unk_tokens)
tokens_c, input_ids_c, input_mask_c, input_type_ids_c, total_tokens, unk_tokens = _convert_one(ex_index, example.text_c, seq_length, tokenizer, total_tokens, unk_tokens)
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=[tokens_a,tokens_b,tokens_c],
input_ids=[input_ids_a,input_ids_b,input_ids_c],
input_mask=[input_mask_a,input_mask_b,input_mask_c],
input_type_ids=[input_type_ids_a,input_type_ids_b,input_type_ids_c],
label_id = label_map[example.label]
))
print("============================================================")
print('unkonw tokens percentage:{}'.format(unk_tokens/total_tokens))
print("============================================================")
return features
else:
for (ex_index, example) in enumerate(examples):
tokens, input_ids, input_mask, input_type_ids = _convert_one(ex_index, example.text_a+example.text_b+example.text_c, seq_length, tokenizer)
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
label_id = label_map[example.label]
))
return features
if __name__ == "__main__":
if constant.test:
predict()
else:
main() | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28686,
198,
6738,
3384,
4487,
13,
7890,
62,
46862,
1330,
1100,
62,
7890,
837,
1136... | 2.181506 | 2,617 |
import gym
import pickle
from mani_skill import env
env = gym.make('OpenCabinetDoor-v0')
# full environment list can be found in available_environments.txt
env.set_env_mode(obs_mode='pointcloud', reward_type='sparse')
# obs_mode can be 'state', 'pointcloud' or 'rgbd'
# reward_type can be 'sparse' or 'dense'
print(env.observation_space) # this shows the structure of the observation, openai gym's format
print(env.action_space) # this shows the action space, openai gym's format
# for level_idx in range(0, 5): # level_idx is a random seed
# level_idx = 1002
num = 10
for level_idx in range(1003, 1010):
obs = env.reset(level=level_idx)
print('#### Level {:d}'.format(level_idx))
for i_step in range(1000000):
env.render('human') # a display is required to use this function, rendering will slower the running speed
# action = env.action_space.sample()
action = [0 for _ in range(13)]
action[0] = 1
action[1] = -1
# print("action")
# print(action)
obs, reward, done, info = env.step(action) # take a random action
with open(f'temp/{level_idx}_{i_step}.pkl', 'wb') as handle:
pickle.dump(obs, handle, protocol=pickle.HIGHEST_PROTOCOL)
if i_step >= num:
break
env.close()
| [
11748,
11550,
198,
11748,
2298,
293,
198,
6738,
582,
72,
62,
42401,
1330,
17365,
198,
198,
24330,
796,
11550,
13,
15883,
10786,
11505,
34,
6014,
316,
35,
2675,
12,
85,
15,
11537,
198,
2,
1336,
2858,
1351,
460,
307,
1043,
287,
1695,
... | 2.521318 | 516 |
#===----------------------------------------------------------------------===#
#
# Peloton
#
# model_generator.py
#
# Identification: src/brain/modelgen/model_generator.py
#
# Copyright (c) 2015-2018, Carnegie Mellon University Database Group
#
#===----------------------------------------------------------------------===#
import json
from LSTM_Model import LSTM_Model
import sys
AVAILABLE_MODELS = {
"LSTM": LSTM_Model
}
REL_SETTINGS_PATH = sys.argv[1]
WRITE_GRAPH_PATH = sys.argv[2]
if __name__ == '__main__':
generate_models() | [
2,
18604,
10097,
23031,
18604,
2,
198,
2,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
12903,
18970,
198,
2,
198,
2,
2746,
62,
8612,
1352,
13,
... | 2.968586 | 191 |
from cffi import FFI
# On macOS dynamic libraries have the .dylib extension. Windows has .dll and linux has .so
# You can obtain libcuckoofilter_c.xxx.{dylib, so, dll} either by downloading it from the latest Release on GitHub
# or by compiling the library using the Zig compiler:
#
# $ zig build-lib -dynamic --release-fast src/cuckoofilter_c.zig
#
# TODO: change the next line to the correct path wher libcuckoofilter_c is stored
dynamic_library_path = 'path/to/libcuckoofilter_c.0.0.0.dylib'
# NOTE: Python's CFFI module also supports compile-time linking to a shared object file,
# which is the preferred method over what is shown in this brief example.
ffi = FFI()
# pycparser unfortunately does nor support directives so
# we can't just .read() the headerfile directly, and must
# instead copypaste all definitions manually.
ffi.cdef("""
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
typedef long long intmax_t;
typedef unsigned long long uintmax_t;
struct Filter8 {
uint8_t cf[56];
};
struct Filter16 {
uint8_t cf[56];
};
struct Filter32 {
uint8_t cf[56];
};
void seed_default_prng(uint64_t seed);
uintptr_t cf_size_for8(uintptr_t min_capacity);
uintptr_t cf_size_for16(uintptr_t min_capacity);
uintptr_t cf_size_for32(uintptr_t min_capacity);
uintptr_t cf_size_for_exactly8(uintptr_t min_capacity);
uintptr_t cf_size_for_exactly16(uintptr_t min_capacity);
uintptr_t cf_size_for_exactly32(uintptr_t min_capacity);
uintptr_t cf_capacity8(uintptr_t size);
uintptr_t cf_capacity16(uintptr_t size);
uintptr_t cf_capacity32(uintptr_t size);
int cf_init8(uint8_t * memory, uintptr_t size, struct Filter8 * cf);
int cf_init16(uint8_t * memory, uintptr_t size, struct Filter16 * cf);
int cf_init32(uint8_t * memory, uintptr_t size, struct Filter32 * cf);
int cf_count8(struct Filter8 * cf, uintptr_t * res);
int cf_count16(struct Filter16 * cf, uintptr_t * res);
int cf_count32(struct Filter32 * cf, uintptr_t * res);
int cf_maybe_contains8(struct Filter8 * cf, uint64_t hash, uint8_t fp, int * res);
int cf_maybe_contains16(struct Filter16 * cf, uint64_t hash, uint16_t fp, int * res);
int cf_maybe_contains32(struct Filter32 * cf, uint64_t hash, uint32_t fp, int * res);
int cf_remove8(struct Filter8 * cf, uint64_t hash, uint8_t fp);
int cf_remove16(struct Filter16 * cf, uint64_t hash, uint16_t fp);
int cf_remove32(struct Filter32 * cf, uint64_t hash, uint32_t fp);
int cf_add8(struct Filter8 * cf, uint64_t hash, uint8_t fp);
int cf_add16(struct Filter16 * cf, uint64_t hash, uint16_t fp);
int cf_add32(struct Filter32 * cf, uint64_t hash, uint32_t fp);
int cf_is_broken8(struct Filter8 * cf);
int cf_is_broken16(struct Filter16 * cf);
int cf_is_broken32(struct Filter32 * cf);
int cf_is_toofull8(struct Filter8 * cf);
int cf_is_toofull16(struct Filter16 * cf);
int cf_is_toofull32(struct Filter32 * cf);
int cf_fix_toofull8(struct Filter8 * cf);
int cf_fix_toofull16(struct Filter16 * cf);
int cf_fix_toofull32(struct Filter32 * cf);
int cf_restore_memory8(struct Filter8 * cf, uint8_t * memory, uintptr_t memory_len);
int cf_restore_memory16(struct Filter16 * cf, uint8_t * memory, uintptr_t memory_len);
int cf_restore_memory32(struct Filter32 * cf, uint8_t * memory, uintptr_t memory_len);
""")
cuckoo = ffi.dlopen(dynamic_library_path)
# Instantiate memory for a new filter:
cf8 = ffi.new("struct Filter8 *")
# Instantiate memory for the filter's buckets:
memory = ffi.new("uint8_t[]", 1024)
# Initialize the filter:
err = cuckoo.cf_init8(memory, 1024, cf8)
assert err == 0
# Add a fingerprint:
err = cuckoo.cf_add8(cf8, 0, ord('a'))
assert err == 0
# Check its presence:
found = ffi.new("int *")
err = cuckoo.cf_maybe_contains8(cf8, 0, ord('a'), found)
assert err == 0
print("Found?", found[0]) # => 1
# Non existing item
err = cuckoo.cf_maybe_contains8(cf8, 0, 0, found)
assert err == 0
print("Found?", found[0]) # => 0
| [
6738,
269,
487,
72,
1330,
376,
11674,
628,
198,
2,
1550,
40017,
8925,
12782,
423,
262,
764,
31739,
7552,
13,
3964,
468,
764,
12736,
290,
32639,
468,
764,
568,
198,
2,
921,
460,
7330,
9195,
66,
1347,
37711,
346,
353,
62,
66,
13,
31... | 2.514886 | 1,713 |
#!/usr/bin/env python
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import datetime
import math
from benchmark import run_benchmark
import cunumeric as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", "--batch", type=int, default=32, dest="batch", help="batch size"
)
parser.add_argument(
"--hidden", type=int, default=10, dest="hidden", help="hidden size"
)
parser.add_argument(
"-s",
"--sentence",
type=int,
default=4,
dest="sentence",
help="sentence length",
)
parser.add_argument(
"-w", "--word", type=int, default=10, dest="word", help="word size"
)
parser.add_argument(
"-t",
"--time",
dest="timing",
action="store_true",
help="perform timing",
)
parser.add_argument(
"--benchmark",
type=int,
default=1,
dest="benchmark",
help="number of times to benchmark this application (default 1 - "
"normal execution)",
)
args = parser.parse_args()
run_benchmark(
run_lstm,
args.benchmark,
"LSTM Forward",
(args.batch, args.hidden, args.sentence, args.word, args.timing),
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
33448,
15127,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.563786 | 729 |
"""
Build a dummy index, update it, and run tests on it.
"""
import os
import shutil
import tempfile
from subprocess import check_call, CalledProcessError
import captions
from lib.common import get_docs_and_lexicon
TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test-small.tar.gz')
BUILD_INDEX_SCRIPT = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'scripts', 'build_index.py')
UPDATE_INDEX_SCRIPT = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'scripts', 'update_index.py')
| [
37811,
198,
15580,
257,
31548,
6376,
11,
4296,
340,
11,
290,
1057,
5254,
319,
340,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
6738,
850,
14681,
1330,
2198,
62,
13345,
11,
34099,
18709,
... | 2.392713 | 247 |
import pygame
from pygame.locals import *
import time
import random
size = 40
# place the block at a certain position on the screen
if __name__ == "__main__":
game = Game()
game.run()
| [
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
198,
11748,
640,
198,
11748,
4738,
198,
198,
7857,
796,
2319,
628,
220,
220,
220,
1303,
1295,
262,
2512,
379,
257,
1728,
2292,
319,
262,
3159,
628,
198,
361,
1159... | 2.985075 | 67 |
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from info import create_app, db
app = create_app("development")
# class Config(object):
# #DEBUG = True
# SQLALCHEMY_DATABASE_URI = "mysql://root:mysql@127.0.0.1:3306/python22"
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# SECRET_KEY = "ASJFJKSA1465879"
# # redis数据库配置信息
# REDIS_HOST = "127.0.0.1"
# REDIS_PORT = 6379
#
# # 将session 存储的数据从内存转移到redis中存储的配置信息中去
# SESSION_TYPE = "redis"
# SESSION_REDIS = StrictRedis(REDIS_HOST, REDIS_PORT)
# SESSION_USE_SIGNER = True
# # 设置数据不需要永久保存, 而是根据我们设置的过期时长进行调整
# SESSION_PERMANENT = False
#
# PERMANENT_SESSION_LIFETIME = 86400 # 24小时
# app = Flask(__name__)
# config_class = config_dict["development"]
# app.config.from_object(config_class)
#
#
# db = SQLAlchemy(app)
#
# redis_store = StrictRedis(host=config_class.REDIS_HOST, port=config_class.REDIS_PORT)
# # 给项目添加防护机制
# CSRFProtect(app)
#
# Session(app)
# 创建管理类
manger = Manager(app)
# 7. 创建数据库迁移对象
Migrate(app, db)
# 8 添加迁移命令
manger.add_command("db", MigrateCommand)
if __name__ == '__main__':
# print(app.url_map)
# app.run(debug=True)
manger.run()
| [
6738,
42903,
62,
12048,
1330,
9142,
198,
6738,
42903,
62,
76,
42175,
1330,
337,
42175,
11,
337,
42175,
21575,
198,
6738,
7508,
1330,
2251,
62,
1324,
11,
20613,
628,
198,
1324,
796,
2251,
62,
1324,
7203,
31267,
4943,
628,
198,
2,
1398,... | 1.676389 | 720 |
import logging
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
from pathlib import Path
from time import sleep
import cv2
import numpy as np
import pandas as pd
from PyQt5.QtCore import Qt, QTimer, pyqtSlot
from PyQt5.QtGui import QColor, QImage, QPixmap
from PyQt5.QtWidgets import QMessageBox, QStyle, QWidget
from .view import VideoAppViewer
| [
11748,
18931,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
640,
1330,
3993,
198,
198,
11748,
... | 3.1 | 130 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 1 12:07:15 2021
@author: xavieraguas
"""
import sqlalchemy as db
DBUSER = 'postgres'
DBPASS = 'ivan'
DBHOST = 'localhost'
DBPORT = '5432'
DBNAME = 'test'
engine = db.create_engine('postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}'.format(user=DBUSER, passwd=DBPASS, host=DBHOST, port=DBPORT, db=DBNAME))
connection = engine.connect()
metadata = db.MetaData()
# print(engine.tables_name())
innovation_table = db.Table('iniciativas', metadata, autoload=True, autoload_with=engine)
print(innovation_table.columns.keys())
print(repr(metadata.tables['iniciativas'])) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
2447,
220,
352,
1105,
25,
2998,
25,
1314,
33448,
198,
198,
31,
9800,
25,
2124,
1949... | 2.579365 | 252 |
from flask import current_app, request
from typing import Optional
from zeus.constants import Result, Status
from zeus.models import Build, FileCoverage, Repository
from zeus.utils.trees import build_tree
from zeus.vcs import vcs_client
from .base_repository import BaseRepositoryResource
from ..schemas import FileCoverageSchema
SEPERATOR = "/"
filecoverage_schema = FileCoverageSchema(many=False)
| [
6738,
42903,
1330,
1459,
62,
1324,
11,
2581,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
1976,
27650,
13,
9979,
1187,
1330,
25414,
11,
12678,
198,
6738,
1976,
27650,
13,
27530,
1330,
10934,
11,
9220,
7222,
1857,
11,
1432,
13264,
198,... | 3.394958 | 119 |
str= ' this | is | a string '
s1, s2, s3 = str.split('|', 2)
print('>%s<' % s1.strip())
print('>%s<' % s2)
print('>%s<' % s3)
s = str.split('|', 2)
print(s)
| [
2536,
28,
705,
428,
930,
318,
930,
257,
4731,
220,
705,
198,
198,
82,
16,
11,
264,
17,
11,
264,
18,
796,
965,
13,
35312,
10786,
91,
3256,
362,
8,
198,
198,
4798,
10786,
29,
4,
82,
27,
6,
4064,
264,
16,
13,
36311,
28955,
198,
... | 1.862069 | 87 |
# Bamboo website specific fields
BAMBOO_LOGIN = "https://your_organization.bamboohr.com/login.php"
CLOCK_OUT_BTN = "Clock Out"
CLOCK_IN_BTN = "Clock In"
STANDARD_LOGIN = "Log in with Email and Password"
LOGIN_BTN = "Log In"
LOGIN_ID = "#lemail"
PWD_ID = "#password"
# user specific constants
MAX_SLEEP = 1200 # 20 minutes
BROWSER = "chrome"
# env variables in which usr/pwd are stored
BAMBOO_USR = "BAMBOO_USR"
BAMBOO_ENV = "BAMBOO_PWD"
| [
2,
347,
27708,
3052,
2176,
7032,
198,
198,
33,
2390,
33,
6684,
62,
25294,
1268,
796,
366,
5450,
1378,
14108,
62,
9971,
1634,
13,
65,
22651,
1219,
81,
13,
785,
14,
38235,
13,
10121,
1,
198,
198,
5097,
11290,
62,
12425,
62,
19313,
4... | 2.352632 | 190 |
# ballot/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import url
from . import views_admin
urlpatterns = [
# views_admin
url(r'^import_ballot_items/$',
views_admin.ballot_items_import_from_master_server_view,
name='ballot_items_import_from_master_server'),
url(r'^import_ballot_returned/$',
views_admin.ballot_returned_import_from_master_server_view,
name='ballot_returned_import_from_master_server'),
url(r'^(?P<ballot_returned_id>[0-9]+)/list_edit/$', views_admin.ballot_item_list_edit_view,
name='ballot_item_list_edit'),
url(r'^list_edit_process/$', views_admin.ballot_item_list_edit_process_view, name='ballot_item_list_edit_process'),
]
| [
2,
11100,
14,
6371,
82,
13,
9078,
198,
2,
347,
2909,
284,
345,
416,
775,
19175,
13,
1355,
922,
13,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
1... | 2.360502 | 319 |
import os
import time
import datetime
import random
from bs4 import BeautifulSoup
from openpyxl import load_workbook
from openpyxl import Workbook
from selenium import webdriver
data_xls_poz_map = {
1: {'name': '用户名', 'code': 'user_name'},
2: {'name': '信用等级', 'code': 'credit_level'},
3: {'name': '贷款金额', 'code': 'amount'},
4: {'name': '还款期限', 'code': 'term'}
}
TYPE_KPT = 4 # LoanCategoryId 4:平衡型,8:保守型,5:进取型
TYPE_SORT = 1 # 0不排序,1降序,2升序
file_name = 'data_history.xlsx' # 存储数据文件名
today = datetime.date.today() # 启动date
now = datetime.datetime.now() # 启动datetime
TYPE_KPT_MAP = {4: '平衡型', 8: '保守型', 5: '进取型'} # 类型映射Map
row = 2 # 表格数据开始行
login_url = 'https://ac.ppdai.com/User/Login' # 登陆链接
base_url = 'https://invest.ppdai.com' # 基础链接前缀
browser = webdriver.Firefox() # 使用火狐浏览器
# 人工登陆
# html转换
# url构造器 获得爬取链接
# 获取链接页面所包含的详情页链接并封装到List
# 获取总页数 [用于修正]
# 详情页信息提取
# 爬取逻辑
# 历史成功借款链接
# 历史成功借款信息
# 输出数据到excel
# Main method
if __name__ == '__main__':
try:
while login():
print('等待登陆')
while True:
data_spider()
finally:
browser.close()
| [
11748,
28686,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
11748,
4738,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
1280,
9078,
87,
75,
1330,
3440,
62,
1818,
2070,
198,
6738,
1280,
9078,
87,
75,
1330,
5521,
2070,
198,
... | 1.375758 | 825 |
from flask import Flask
from flask_cors import CORS
from app.config import properties
from app.api.issues import issues
app = Flask(__name__)
app.register_blueprint(issues, url_prefix=f'/api/issues')
CORS(app, resources={r"/*": {"origins": properties.AUTHORIZED_ORIGIN}}, supports_credentials=True)
if __name__ == '__main__':
app.run(debug=properties.DEBUG) | [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
598,
13,
11250,
1330,
6608,
198,
6738,
598,
13,
15042,
13,
37165,
1330,
2428,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
302... | 2.952 | 125 |
from flask_restful import Resource, Api
# Importamos el contenido de Service
from service.service_sql.service import Service
# Importamos el make_response
from flask import make_response, jsonify
| [
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
5949,
72,
198,
198,
2,
17267,
321,
418,
1288,
542,
268,
17305,
390,
4809,
198,
6738,
2139,
13,
15271,
62,
25410,
13,
15271,
1330,
4809,
198,
198,
2,
17267,
321,
418,
1288,
787,
62,
26209,... | 3.685185 | 54 |
__all__ = [
'dependency_graph',
'discoverable_module',
'module_initialization_container',
'module_manager'
] | [
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
45841,
1387,
62,
34960,
3256,
198,
220,
220,
220,
705,
67,
29392,
540,
62,
21412,
3256,
198,
220,
220,
220,
705,
21412,
62,
36733,
1634,
62,
34924,
3256,
198,
220,
220,
220,
705,
2... | 2.530612 | 49 |
from sys import argv
if __name__ == '__main__':
with open(argv[1], 'r') as f:
lines = f.readlines()
main(lines)
| [
6738,
25064,
1330,
1822,
85,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
351,
1280,
7,
853,
85,
58,
16,
4357,
705,
81,
11537,
355,
277,
25,
198,
220,
220,
220,
220,
220,
220,
220,
3951,
... | 2.186441 | 59 |
import subprocess
subprocess.call(['pyinstaller', '/home/nbroyles/PycharmProjects/PyinstallerWineCompile/main.py', '--onefile', '-n', 'pycompile']) | [
11748,
850,
14681,
198,
198,
7266,
14681,
13,
13345,
7,
17816,
9078,
17350,
263,
3256,
31051,
11195,
14,
46803,
3287,
829,
14,
20519,
354,
1670,
16775,
82,
14,
20519,
17350,
263,
54,
500,
7293,
576,
14,
12417,
13,
9078,
3256,
705,
438... | 2.690909 | 55 |
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
from tests.config import Config
from utils.driver import Driver
from element_object_models.element import Element
from page_object_models.base_page_model import BasePageModel
from page_locators.admin_page_locator import AdminPageLocator
from expected_results.page_content.admin_page_content import AdminPageContent
class AdminPageModel(BasePageModel):
'''
The page object pattern intends creating an object for each web page.
By following this technique a layer of separation between the test code and technical implementation is created.
'''
def hit_initialize_button(self):
'''
Hit Initialize button
:return:
'''
element = Element(self.driver, self.explicit_wait_time, AdminPageLocator.INITIALIZE_BUTTON)
element.click_on()
return None
def hit_clean_button(self):
'''
Hit Clean button
:return:
'''
element = Element(self.driver, self.explicit_wait_time, AdminPageLocator.CLEAN_BUTTON)
element.click_on()
return None
| [
2,
220,
15622,
416,
412,
7053,
509,
455,
272,
13,
198,
2,
220,
21722,
25,
3740,
1378,
12567,
13,
785,
14,
1134,
455,
272,
198,
2,
220,
27133,
25,
3740,
1378,
2503,
13,
25614,
259,
13,
785,
14,
259,
14,
1533,
273,
12,
74,
455,
... | 3.130814 | 344 |
'''
python -m baselines.run
--alg=deepq
--env=BreakoutNoFrameskip-v4
--num_timesteps=0
--load_path=models/Breakout_1e7/baseline
--play
'''
import os
game_name = 'Tennis'
model_timesteps = '1e6'
methods = os.listdir('models/{}_{}'.format(game_name, model_timesteps))
f = open('test_cmd_{}_{}.txt'.format(game_name, model_timesteps), 'w')
# for method in methods:
# f.write('python -m baselines.run ' +
# '--alg=deepq ' +
# '--env=KungFuMasterNoFrameskip-v4 ' +
# '--num_timesteps=0 ' +
# '--load_path=models/KungFuMaster_1e6/' + method + ' ' +
# '--play & ')
for i in range(len(methods)):
f.write('python -m baselines.run2 sos ' + str(i) + '&')
f.close()
| [
7061,
6,
198,
29412,
532,
76,
1615,
20655,
13,
5143,
198,
438,
14016,
28,
22089,
80,
198,
438,
24330,
28,
31737,
448,
2949,
35439,
74,
541,
12,
85,
19,
198,
438,
22510,
62,
16514,
395,
25386,
28,
15,
198,
438,
2220,
62,
6978,
28,
... | 2.062857 | 350 |
"""**Defines models, including transition, observation, reward, policy; Also
includes additional components such as the sensor model and grid map.**"""
| [
37811,
1174,
7469,
1127,
4981,
11,
1390,
6801,
11,
13432,
11,
6721,
11,
2450,
26,
4418,
198,
42813,
3224,
6805,
884,
355,
262,
12694,
2746,
290,
10706,
3975,
13,
1174,
37811,
198
] | 4.75 | 32 |
import numpy as np
import matplotlib.pyplot as plt
from enum import Enum
import pycwt
import os
import json
import pandas
import tensorflow as tf
# use Bay Walker at the beginning of the training?
#def is_state_vars_set_to_zero():
# return False
# updates the buffer so it contains the latest data
# returns only valid memory
# saves data to hidden section
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
33829,
1330,
2039,
388,
198,
11748,
12972,
66,
46569,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
19798,
292,
198,
11748,
11192,... | 3.142857 | 126 |
import collections | [
11748,
17268
] | 9 | 2 |
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
| [
2220,
7203,
31,
65,
41319,
62,
31391,
1003,
31391,
14,
11249,
62,
4299,
82,
14,
260,
7501,
25,
4023,
13,
65,
48274,
1600,
366,
4023,
62,
17474,
1600,
366,
4023,
62,
7753,
4943,
198
] | 2.411765 | 34 |
from .__about__ import *
from .compare import *
from .constants import *
from .filter import *
from .metadata import *
from .misc import *
__all__ = [
*__about__.__all__,
*compare.__all__,
*constants.__all__,
*filter.__all__,
*metadata.__all__,
*misc.__all__,
]
| [
6738,
764,
834,
10755,
834,
1330,
1635,
198,
6738,
764,
5589,
533,
1330,
1635,
198,
6738,
764,
9979,
1187,
1330,
1635,
198,
6738,
764,
24455,
1330,
1635,
198,
6738,
764,
38993,
1330,
1635,
198,
6738,
764,
44374,
1330,
1635,
198,
198,
... | 2.514019 | 107 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
| [
2,
15069,
1853,
3012,
3457,
13,
1439,
6923,
33876,
13,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 3.592179 | 179 |
import requests
try:
response = requests.get("https://raw.githubusercontent.com/danhab05/bacfrancais-ozar/master/msg.txt")
msg = response.text
except Exception as e:
print(e) | [
11748,
7007,
198,
28311,
25,
198,
220,
220,
220,
2882,
796,
7007,
13,
1136,
7203,
5450,
1378,
1831,
13,
12567,
43667,
13,
785,
14,
25604,
5976,
2713,
14,
65,
330,
8310,
1192,
15152,
12,
8590,
283,
14,
9866,
14,
19662,
13,
14116,
494... | 2.818182 | 66 |
import GameManager
if __name__ == '__main__':
GameManager.GameManager().game_loop()
| [
11748,
3776,
13511,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3776,
13511,
13,
8777,
13511,
22446,
6057,
62,
26268,
3419,
628,
198
] | 2.935484 | 31 |
from plex_metadata.agents.main import Agents
| [
6738,
279,
2588,
62,
38993,
13,
49638,
13,
12417,
1330,
28295,
198
] | 3.75 | 12 |
pytest_plugins = "_pytest.pytester"
| [
9078,
9288,
62,
37390,
796,
45434,
9078,
9288,
13,
9078,
4879,
353,
1,
198
] | 2.571429 | 14 |
import subprocess as sp
import sys
try:
res_path = sys.argv[1]
except:
print "Usage: {} karonte_result_file".format(sys.argv[0])
sys.exit(1)
o, e = exec_cmd("grep -r 'Sink' {} | sort | cut -d\" \" -f1-5 |sort|uniq ".format(res_path))
counter = 1
alerts = [x for x in o.split('\n') if x]
for alert in alerts:
cmd = "grep -B8 -A10 -r \'{}\' {}".format(alert, res_path)
o, e = exec_cmd(cmd)
print "Alert " + str(counter)
print "=="
counter += 1
print o.split('===================== Start Info path =====================')[1].split(
'===================== End Info path =====================')[0]
print '==\n\n' | [
11748,
850,
14681,
355,
599,
198,
11748,
25064,
628,
198,
28311,
25,
198,
220,
220,
220,
581,
62,
6978,
796,
25064,
13,
853,
85,
58,
16,
60,
198,
16341,
25,
198,
220,
220,
220,
3601,
366,
28350,
25,
23884,
220,
479,
8045,
660,
62,... | 2.444444 | 270 |
from django.conf.urls import url
from views import request_certificate
from views import update_certificate
urlpatterns = [
url(r'^request_certificate$', request_certificate, name='request_certificate'),
url(r'^update_certificate$', update_certificate, name='update_certificate')
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
5009,
1330,
2581,
62,
22583,
22460,
198,
6738,
5009,
1330,
4296,
62,
22583,
22460,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
25... | 3.244444 | 90 |
"""
LSTM regression on Actuator data.
"""
from __future__ import print_function
import numpy as np
np.random.seed(42)
# Keras
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras.callbacks import EarlyStopping
# Model assembling and executing
from kgp.utils.assemble import load_NN_configs, assemble
from kgp.utils.experiment import train
# Metrics
from kgp.metrics import root_mean_squared_error as RMSE
if __name__ == '__main__':
main()
| [
37811,
198,
43,
2257,
44,
20683,
319,
2191,
84,
1352,
1366,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
299,
32152,
355,
45941,
198,
37659,
13,
25120,
13,
28826,
7,
3682,
8,
198,
198,
2,
1733... | 3.006452 | 155 |
from brownie import (
Registry,
accounts,
)
RINKEBY_RESOLVER_ADDRESS = "0x5713a9cCdB31fBa207Fc4Fac7ee398eab3ecB3A6"
| [
6738,
7586,
494,
1330,
357,
198,
220,
220,
220,
33432,
11,
198,
220,
220,
220,
5504,
11,
198,
8,
198,
198,
49,
1268,
7336,
17513,
62,
19535,
3535,
5959,
62,
2885,
7707,
7597,
796,
366,
15,
87,
3553,
1485,
64,
24,
66,
34,
36077,
... | 1.923077 | 65 |
import inspect
import unittest
import numpy as np
from ringity.ring_scores import (
gap_ring_score,
geometric_ring_score,
linear_ring_score,
amplitude_ring_score,
entropy_ring_score)
all_ring_scores = {
gap_ring_score,
geometric_ring_score,
linear_ring_score,
amplitude_ring_score,
entropy_ring_score
}
inf_ring_scores = {
geometric_ring_score,
amplitude_ring_score,
entropy_ring_score
}
if __name__ == '__main__':
unittest.main()
| [
11748,
10104,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
5858,
414,
13,
1806,
62,
1416,
2850,
1330,
357,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.803279 | 366 |
#!/usr/bin/env python
import cleanup_management
import argparse
import datetime
import os
import re
import sys
import time
try:
from management_tools import loggers
from management_tools import fs_analysis as fsa
# Check for MT version 1.8.1
if not "bytes" in dir(fsa.Filesystem):
raise ImportError
except ImportError as e:
print("You need version 1.8.1 or greater of the 'Management Tools' module to be installed first.")
print("https://github.com/univ-of-utah-marriott-library-apple/management_tools")
raise e
def query_yes_no(question):
"""
Asks a user a yes/no question and expects a valid response.
:param question: The prompt to give to the user.
:return: A boolean; True for 'yes', False for 'no'.
"""
valid = {
'yes': True, 'ye': True, 'y': True,
'no': False, 'n': False
}
# Until they give valid input, loop and keep asking the question.
while True:
# Note the comma at the end. We don't want a newline.
print("{} [y/N] ".format(question)),
choice = raw_input().lower()
if choice == '':
return False
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no'.")
def version():
"""
:return: The version information for this program.
"""
return ("{name}, version {version}\n".format(name='cleanup_manager', version=cleanup_management.__version__))
def usage():
"""
Prints usage information.
"""
print(version())
print('''\
usage: {name} [-hvnV] [-l log] [-k date] [-f format] target
Delete old items from a specific directory, but only at a top-level granularity.
-h, --help
Print this help message and quit.
-v, --version
Print the version information and quit.
-n, --no-log
Do not output log events to file.
-V, --verbose
Increase verbosity to see more information. Two levels of verbosity are
supported.
--skip-prompt
Skips the confirmation prompt. Warning: this will lead to lots of
deletion.
-l log, --log-dest log
Redirect log file output to 'log'.
-k date, --keep-after date
The date to compare file modification times to. Anything before this
date will be removed.
default: seven days prior to today, rounded down to midnight
-d format, --date-format format
The date format, given as a Python datetime.datetime.strptime()-
compatible format.
default: '%Y-%m-%d'
-f size, --freeup size
The amount of space to attempt to free up.
-t trigger, --dir-trigger trigger
Sets a specific file to look for in the top-level of directories inside
the specified target directory. If this file exists, its timestamp will
be used in place of the directory's timestamp to determine removal. If
the file does not exist, the timestamp for the directory will be found
through the usual method.
(Only has an effect with date-based deletion.)
--delete-oldest-first
When deleting by size, older items are deleted first to free up the
designated `--freeup` space.
This is the default action when using `--freeup`.
--delete-largest-first
When deleting by size, larger items are deleted first to free up the
designated `--freeup` space.
--overflow
When deleting by size, this flag will ensure that at the very least the
amount designated will be deleted. (The default action is to delete up
to - but not more than - the amount.) This is useful when your top-level
directory only contains items that are greater in size than the target
free space amount.
target
The top-level directory to delete from within.
Cleanup Manager is a simple script to help you delete items from folders en
masse.
Originally conceived to delete user home directories in student labs at a
university, Cleanup Manager takes a look at a directory's contents and checks
them recursively for the most recently-modified timestamp. This timestamp is
compared against the keep-after date, and any item with a timestamp older than
that date is deleted.
KEEP-AFTER DATE
The date can be either absolute or relative. Absolute dates can be given
with a format to indicate how you want it parsed.
Relative dates can be given as:
NNNXr
where "N" is an integer number, "X" represents a shorthand form of
the time scale, i.e.:
M - minutes
H - hours
d - days
m - months
y - years
and "r" or "R" indicates that the date should be rounded back to
the previous midnight.
Note: When deleting directories, Cleanup Manager will search the full
contents of a directory to find the file with the most recent timestamp.
This ensures that folders aren't deleted whose contents were modified after
the 'keep-after' date even if the folder's own modification timestamp is
from before that date.
Example
To delete everything older than four days ago:
cleanup_manager.py -k 4d /path/to/target
FREEUP SPACE
You can specify an amount of space to attempt to free up in the target
directory. This size can be specified in one of three ways:
1. A number of bytes to free up on the drive.
2. A number of bytes to have free on the drive (this is different).
3. A percentage representing the amount of free space you want on the
drive.
These can be inputted as (for example):
1. 10g - will attempt to remote 10 gigabytes of data
2. 10gf - 10 gigabytes will be free after cleanup runs
3. 10 - 10% of the drive will be free after cleanup
There are five supported byte modifiers to specify explicit sizes:
b: bytes
k: kilobytes - 1024 bytes
m: megabytes - 1024 kilobytes
g: gigabytes - 1024 megabytes
t: terabytes - 1024 gigabytes
Example
To delete up to 15 gigabytes of data within the target directory with
preference given to older items:
cleanup_manager.py -f 15g /path/to/target
To attempt to have 500 megabytes free on your old hard drive with preference
given to larger items:
cleanup_manager.py -f 500mf --delete-largest-first /path/to/target
To clear up 30% of the drive where 'target' exists by deleting items inside
of 'target' (with preference to older items):
cleanup_manager.py -f 30 /path/to/target
LINKS
All links existing within the directory structure are checked for whether
they point internally; that is, if a link points to a file or folder that is
going to be deleted, or if it is in a folder that is going to be deleted,
the link is unmade. However, this program does not check the rest of the
system to ensure that external links do not point inside a deleted
directory.\
'''.format(name='cleanup_manager'))
def date_to_unix(date, date_format):
"""
Converts a date to a local Unix timestamp (non-UTC).
The date can be either absolute or relative. Absolute dates can be given
with a format to indicate how you want it parsed.
Relative dates can be given as:
NNNXr
where "N" is an integer number, "X" represents a shorthand form of
the time scale, i.e.:
M - minutes
H - hours
d - days
m - months
y - years
and "r" or "R" indicates that the date should be rounded back to
the previous midnight.
:param date:
:param date_format:
:return: The Unix timestamp of the given date as a float.
"""
try:
# Attempt to pull the time out directly based on the format.
target_date = datetime.datetime.strptime(date, date_format)
except ValueError:
# If that didn't work, let's try to parse the string for a relative
# date according to the given specifications.
relative_match = re.match(r"\A-?(\d+)([a-zA-Z]?)([rR]?)\Z", date)
if relative_match:
# If no scale is given, "D" is assumed.
if not relative_match.group(2):
scale = 'd'
else:
scale = relative_match.group(2)
# If rounding is not specified, don't do it.
if not relative_match.group(3):
rounding = False
else:
rounding = True
# Set the amount of change.
amount = int(relative_match.group(1))
if scale == 'M':
# Minutes
seconds = amount * 60
elif scale == 'H':
# Hours
seconds = amount * 60 * 60
elif scale == 'd':
# Days
seconds = amount * 60 * 60 * 24
elif scale == 'm':
# Months
seconds = amount * 60 * 60 * 24 * 30
elif scale == 'y':
# Years
seconds = amount * 60 * 60 * 24 * 365
else:
# Invalid specification.
raise ValueError("{date} is not a valid relative date value".format(date=date))
days = seconds / 86399
seconds = seconds % 86399
time_difference = datetime.timedelta(days=days, seconds=seconds)
# Calculate the target date.
target_date = datetime.datetime.now() - time_difference
# If rounding was specified, round to the previous midnight.
if rounding:
target_date = target_date.replace(hour=0, minute=0, second=0, microsecond=0)
else:
# Neither of these is valid. Raise an exception.
raise ValueError("{date} is not a valid date specification".format(date=date))
# Buidl up the current time for Unix time conversion.
time_tuple = time.struct_time((
target_date.year, target_date.month, target_date.day,
target_date.hour, target_date.minute, target_date.second, -1, -1, -1
))
unix_time = time.mktime(time_tuple) + (target_date.microsecond / 1e6)
return unix_time
def volume_size_target(size, target, logger=None):
"""
Converts a size into a number of bytes to clear up on the filesystem where
'target' exists.
The size can be given in one of three ways:
1. A number of bytes to free up on the drive.
2. A number of bytes to have free on the drive (this is different).
3. A percentage representing the amount of free space you want on the
drive.
These can be inputted as (for example):
1. 10g - will attempt to remote 10 gigabytes of data
2. 10gf - 10 gigabytes will be free after cleanup runs
3. 10 - 10% of the drive will be free after cleanup
There are five supported byte modifiers to specify explicit sizes:
b: bytes
k: kilobytes - 1024 bytes
m: megabytes - 1024 kilobytes
g: gigabytes - 1024 megabytes
t: terabytes - 1024 gigabytes
:param size: the amount to clear up on the volume where 'target' exists
:param target: the location in the system where cleanup will occur
:param logger: a Management Tools logger (if you want some things logged)
:return: the number of bytes that should be freed up if possible
:return type: int
"""
# Get the filesystem information for 'target'.
volume = fsa.Filesystem(fsa.get_responsible_fs(target))
delete_target = None
try:
# Is it just the percentage?
percentage = int(size)
# Convert to a decimal percentage for math.
percentage = float(percentage) / 100.0
# Take the ceiling of the product of the percentage with the number of
# blocks available.
from math import ceil
free_target = int(ceil(volume.bytes * percentage))
delete_target = free_target - volume.bytes_free
except ValueError:
# 'size' is not just a percentage. Parse it for values!
size_match = re.match(r"^(\d+)([bkmgt])([f]?)$", size.lower())
if not size_match:
raise ValueError("{size} is not a valid size-deletion target".format(size=size))
amount, indicator, total_free = size_match.groups()
total_free = True if total_free else False
amount = int(amount)
# Concordance between letters and their "byte powers"!
size_indicators = {
'b': 0, # bytes = amount * (1024^0)
'k': 1, # kilobytes (1024 bytes) = amount * (1024^1)
'm': 2, # megabytes (1024 kilobytes) = amount * (1024^2)
'g': 3, # gigabytes (1024 megabytes) = amount * (1024^3)
't': 4 # terabytes (1024 gigabytes) = amount * (1024^4)
}
# Convert the 'amount' into a target amount of bytes to delete.
from math import pow
byte_multiplier = int(pow(1024, size_indicators[indicator]))
free_target = amount * byte_multiplier
if total_free:
delete_target = free_target - volume.bytes_free
else:
delete_target = free_target
# Check if anything happened. If not... problems.
if delete_target is None:
raise RuntimeError("No target deletion size could be found.")
# Check that the volume can actually have that much space deleted.
if delete_target < 0:
raise ValueError("Negative target deletion size encountered - is there already enough free space?")
if delete_target > volume.bytes:
if logger:
logger.warn("Too many bytes to delete; will delete as much as possible.")
# Return the amount of bytes to delete.
return delete_target
##------------------------------------------------------------------------------
## Program entry point.
##------------------------------------------------------------------------------
if __name__ == '__main__':
# Build the argument parser.
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', '--help', action='store_true')
parser.add_argument('-v', '--version', action='store_true')
parser.add_argument('-n', '--no-log', action='store_true')
parser.add_argument('-V', '--verbose', action='count')
parser.add_argument('--skip-prompt', action='store_true')
parser.add_argument('-l', '--log-dest')
parser.add_argument('-k', '--keep-after', default=None)
parser.add_argument('-d', '--date-format', default='%Y-%m-%d')
parser.add_argument('-f', '--freeup', default=None)
parser.add_argument('-t', '--dir-trigger', default=None)
parser.add_argument('--delete-oldest-first', action='store_true', default=True)
parser.add_argument('--delete-largest-first', action='store_false', dest='delete_oldest_first')
parser.add_argument('--overflow', action='store_true')
parser.add_argument('target', nargs='?', default=os.getcwd())
# Parse the arguments.
args = parser.parse_args()
if args.keep_after and args.freeup:
parser.error("You may only specify one of --keep-after and --freeup.")
if not args.keep_after and not args.freeup:
args.keep_after = '-7dr'
if args.help:
usage()
sys.exit(0)
if args.version:
print(version())
sys.exit(0)
# Set the logging level. There's the regular level, the verbose level,
# and the super-verbose level.
if args.verbose is None:
log_level = 20
elif args.verbose == 1:
log_level = 10
else:
log_level = 5
# Build the logger.
logger = loggers.get_logger(
name = 'cleanup_manager',
log = not args.no_log,
level = log_level,
path = args.log_dest
)
# Set output logging prompts.
for logging_level in [x for x in logger.prompts.keys() if x <= loggers.INFO]:
logger.set_prompt(logging_level, '')
# Get the necessary information to perform cleanup. Either calculate the
# unix date of the time to delete before, or find the amount of space to
# delete off the given volume.
if args.keep_after:
free_space = None
keep_after = date_to_unix(args.keep_after, args.date_format)
elif args.freeup:
keep_after = None
free_space = volume_size_target(args.freeup, args.target, logger)
# Run it!
try:
main(
target = args.target,
keep_after = keep_after,
free_space = free_space,
oldest_first = args.delete_oldest_first,
skip_prompt = args.skip_prompt,
overflow = args.overflow,
dir_trigger = args.dir_trigger,
logger = logger,
)
except:
# Output the exception with the error name and its message. Suppresses the stack trace.
logger.error("{errname}: {error}".format(errname=sys.exc_info()[0].__name__, error=' '.join([str(x) for x in sys.exc_info()[1]])))
raise
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
27425,
62,
27604,
198,
198,
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
640,
628,
198,
28311,
25,
198,
220,... | 2.646037 | 6,447 |
# Libraries
import copy
import numpy as np
import pandas as pd
# DataBlend Library
from datablend.core.widgets.base import BaseWidget
from datablend.core.consistency import report_unique_value
from datablend.utils.pandas import str2func
# --------------------------------------------
# Methods
# --------------------------------------------
def fillna_levels(data):
"""Fill nan for levels.
.. note: Fill with 0?
.. note: Fill interpolated if values found?
"""
# Columns containing level
columns = [c for c in data.columns if 'level' in c]
# Format
data[columns] = data[columns].fillna(0)
# Return
return data
def fillna_events(data):
"""Fill nan for events.
.. note: Fill with 0?
.. note: Fill interpolated if values found?
"""
# Columns containing level
columns = [c for c in data.columns if 'event' in c]
# Format
data[columns] = data[columns].fillna(0)
# Return
return data
class StaticTidyWidget(BaseWidget):
"""
raise:
More than one value found!
"""
subset = ['to_name', 'static']
def __init__(self, by, **kwargs):
"""Constructor"""
super().__init__(**kwargs)
self.by = [by] if isinstance(by, str) else by
def report(self, data, trans, report, columns, verbose=10):
"""This method constructs the report.
.. note: Use textwrap at some point?
"""
# Show
if verbose > 0:
print('\n' + '=' * 80)
if verbose > 0:
msg = report_unique_value(data=data[self.by + columns],
groupby=self.by,
verbose=verbose)
print(msg)
if verbose > 1:
print('The STATIC transformations report:\n\n')
print('\t\t%s\n' % report.sort_index().to_string().replace('\n', '\n\t\t'))
if verbose > 0:
print("=" * 80)
def transform(self, data, l=None):
"""Apply the transformation.
.. note: checks that there is only one value. If different values
found the column cannot be assumed static and a warning
should be raised.
.. todo: What if not name in data?
.. todo: Use raise, warn, coerce?
.. todo: merge list with default settings list?
.. todo: Check if no template or list then warning!.
.. note: filter(lambda g: (g.nunique() > 1).any())
"""
# Copy
trans = data.copy(deep=True)
# Get static transformations
map = self.bt.map_kv(key='to_name', value='static')
# Keep only existing columns (not needed)
map = {k: v for k, v in map.items() if k in trans}
# Apply transformations
status = {}
for name, tf in str2func(map).items():
try:
trans[name] = trans.groupby(by=self.by)[name].transform(tf)
status[name] = 'completed'
except Exception as e:
status[name] = e
# -------------
# Create report
# -------------
# Report
report = {
'name': map.keys(),
'transform': map.values(),
'dtypes.1': trans[map.keys()].dtypes,
'dtypes.2': trans[map.keys()].convert_dtypes().dtypes,
'status': status.values()
}
report = pd.DataFrame.from_dict(report).set_index('name')
# Show report
self.report(data, trans, report, list(map.keys()), verbose=self.verbose)
# Return
return data
class DefaultTidyWidget(BaseWidget):
"""Class to set default values from BlenderTemplate.
raise:
More than one value found!
"""
# Required columns
subset = ['to_name', 'default']
def get_map(self):
"""
.. todo: return empty map if they
Returns
-------
"""
# Create empty map
d = {}
# Update with to_name defaults
d.update(self.bt.map_kv(key='to_name', value='default'))
# Update with event defaults
d.update(self.bt.map_kv(key='event', value='default'))
# Return
return d
def report(self, data, trans, report, columns, verbose=10):
"""This method constructs the report.
.. note: Use textwrap at some point?
"""
# Create message
msg = ""
if verbose > 0:
msg += '\n{0}\n'.format('=' * 80)
if verbose > 0:
msg += report_unique_value(data=data[['StudyNo'] + columns],
groupby='StudyNo',
verbose=verbose)
if verbose > 1:
msg += 'The {0} report:\n\n'.format(self.__class__.__name__)
msg += '\t\t%s\n' % report.sort_index() \
.to_string() \
.replace('\n', '\n\t\t')
if verbose > 0:
msg += '\n{0}\n'.format('=' * 80)
# Return
return msg
def transform(self, data, map=None):
"""Apply the transformation.
.. note: Check default value has same type as column?
.. note: Missing percentage tidy[c].notna().mean()*100
.. todo: What if people want to fill with medians.
.. todo: Reporting including verbose?
name dtype default status
anorexia boolean False completed
vomiting boolean False completed
bleeding boolean False ignored (no column found)
ascites boolean False ignored (missing>80%)
"""
# Copy
trans = data.copy(deep=True)
# Get map
map = self.get_map() if map is None else map
# Keep only existing columns (not needed)
map = {k: v for k, v in map.items() if k in data}
# Fill nan
trans = data.fillna(str2func(map))
# -------------
# Create report
# -------------
# Report
report = {
'name': map.keys(),
'transform': map.values(),
'dtypes.1': trans[map.keys()].dtypes,
'dtypes.2': trans[map.keys()].convert_dtypes().dtypes,
}
report['status'] = 'completed'
report = pd.DataFrame.from_dict(report).set_index('name')
# Show report
r = self.report(data, trans, report, list(map.keys()), verbose=self.verbose)
print(r)
# Return
return trans
class DTypesTidyWidget(BaseWidget):
"""This widget...
.. note: When reading with csv it infers types properly
but with xls it infers types based on the first
value and this gives a lots of issues.
.. check that bools are just True, False, Nan
"""
# Required columns
subset = ['to_name', 'dtype']
def transform(self, data):
""""""
# Copy
data = data.copy(deep=True)
# Get dtypes
dtypes = self.bt.map_kv('to_name', 'dtype')
# Keep only existing columns (not needed)
dtypes = {k: v for k, v in dtypes.items() if k in data}
# For those columns that will be boolean.
rp_bool = {1.0: True, 0.0: False, 1: True, 0: False}
for k, v in dtypes.items():
if v != 'boolean':
continue
data[k] = data[k].replace(rp_bool)
# Convert dtypes
data = data.astype(dtypes)
# -------------
# Create report
# -------------
# Unique values
# unique = {k:data[k].unique() for k,v in dtypes.items() if v=='boolean'}
# current dtype
# target dtype
# Create report
report = pd.DataFrame()
report['name'] = dtypes.keys()
report['type'] = dtypes.values()
report = report.set_index('name')
report['unique'] = data.nunique()
report['status'] = 'completed'
# miss = data.columns.difference(set(dtypes.keys()))
# missing = pd.DataFrame()
# missing['name'] = miss
# missing['status'] = 'missing'
# report = report.append(missing)
report = report.sort_index()
# Show
print("\n" + "=" * 80)
print('Setting DTYPES ... \n\n\t%s\n' %
report.to_string().replace('\n', '\n\t\t'))
print("=" * 80)
# Return
return data
class LevelTidyWidget(BaseWidget):
"""
raise:
More than one value found!
"""
def transform(self, data):
"""Apply the transformation.
.. note: Check that column has ints.
.. note: If strings as levels (low, medium, high)?
"""
# Copy
data = data.copy(deep=True)
# Get levels
levels = [c for c in data.columns if 'level' in c]
# Format
data[levels] = data[levels].fillna(0)
# Return
return data
# ---------------------------------------------------------
# TidyWidget
# ---------------------------------------------------------
# Libraries
from pandas.api.types import is_bool_dtype
# Helper methods.
def duplicated_combine_set(x):
"""This method combines rows.
.. note: __repr__ of set orders values.
The elements in x can be of any type, we cast
them to the best possible type and we compute
the mean if they are numeric, the max if they
are boolean (hence keeping True) or the set of
values otherwise (string).
"""
try:
# Convert dtypes
x = x.convert_dtypes()
# Boolean raise exception
if is_bool_dtype(x.dtype):
return x.max()
# Return mean
return pd.to_numeric(x).mean()
except Exception as e:
return ','.join(sorted(x))
return set(x)
class TidyWidget:
"""This widget creates data in tidy structured.
It receives data in the so-called stack structure and returns
the data transformed in tidy structure.
.. note: When combining duplicates it computes the mean
for numeric dtypes and creates a set for other
dtypes such as string or boolean.
.. todo: Check units before transformation.
Examples
--------
# Create widget
widget = TidyWidget(index=index, value=value)
# Transform (keep all)
transform, duplicated = \
widget.transform(data, report_duplicated=True)
# Transform (keep first)
transform_first = \
widget.transform(data, keep='first')
Parameters
----------
index: str or list, default ['id', 'date', 'column']
The column names with the index. It will be used to
identify duplicates within the data,.
value: str, default, result
The column name with the values
convert_dtypes: boolean, default True
Whether convert dtypes.
reset_index: boolean, default True
Whether reset index
Returns
-------
"""
errors = {
'True': True,
'False': False
}
def __init__(self, index=['id', 'date', 'column'],
value='result', convert_dtypes=True,
reset_index=True, replace_errors=True):
"""Constructor"""
# Add attributes
self.index = index
self.value = value
self.convert_dtypes = convert_dtypes
self.reset_index = reset_index
def fit(self):
"""Does nothing."""
return self
def transform(self, data, report_duplicated=False, keep=False):
"""Transform stack data to tidy data.
.. note: data = data.sort_values(by=['StudyNo', 'date', 'column'])
.. todo: Review whether pd.pivot_table could be used?
Old code
--------
# Basic formatting
#replace = {'result': {'False': False, 'True': True}}
#tidy.date = pd.to_datetime(tidy.date)
#tidy.date = tidy.date.dt.date
#tidy = tidy.replace(replace) # Quick fix str to bool
#tidy = tidy.drop_duplicates() # Drop duplicates
#tidy = tidy.set_index(self.index)
Parameters
----------
data: pd.DataFrame
The data in stacked format. It usually has the
columns ['patient_id', 'date', 'column', 'result'].
The first three are usually the index and the
results used as values.
report_duplicated: boolean, default False
Whether to return a DataFrame with the duplicates.
keep: str, default False
Strategy to remove duplicates. The possible values are
to keep 'first' appearance, to keep 'last' or to keep
all appearances combining them in a set using 'False'
Returns
-------
tidy: pd.DataFrame
The tidy DataFrame
report: pd.DataFrame
The report with the duplicate rows.
"""
# Copy data
aux = data.copy(deep=True)
# Remove columns that are not in index
subset = self.index + [self.value]
# Keep only interesting
aux = aux[subset]
aux = aux.drop_duplicates()
aux = aux.set_index(self.index)
# Replace errors
aux.result = aux.result.replace(self.errors)
# Look for index duplicates
duplicated = \
aux.index.duplicated(keep=keep)
# Create duplicates
combination = pd.DataFrame()
if not keep:
# Combine duplicates
combination = aux[duplicated] \
.groupby(self.index) \
.result.apply(duplicated_combine_set) \
.to_frame()
# Create stack without duplicates
tidy = pd.concat([aux[~duplicated], combination])
tidy = tidy.sort_values(by=self.index)
# Create tidy (pivot)
tidy = tidy.unstack() \
.droplevel(level=0, axis=1)
if self.reset_index:
tidy = tidy.reset_index()
if self.convert_dtypes:
tidy = tidy.convert_dtypes()
# Return
if report_duplicated:
return tidy, aux[duplicated]
return tidy
def fit_transform(self, **kwargs):
"""Fit transform (just calls transform)"""
self.fit()
self.transform(**kwargs) | [
2,
46267,
198,
11748,
4866,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
6060,
3629,
437,
10074,
198,
6738,
4818,
23117,
437,
13,
7295,
13,
28029,
11407,
13,
8692,
1330,
7308,
38300,
198,
6738... | 2.254414 | 6,344 |
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from main.models import Post
from utils.templatetags.post_color import post_color
import markdown
register = template.Library()
disable_images = DisableImagesExtension()
more_style = MoreStyleExtension()
post_references = PostReferencesExtension()
dottags = DottagsExtension()
@register.filter('markdown', is_safe=True)
@stringfilter
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
28243,
13,
12286,
10379,
1010,
1330,
4731,
24455,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
1317,
62,
21230,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
... | 3.443709 | 151 |
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Module that contains the test TestLanguage.'''
from __future__ import absolute_import
from harness.test_base import TestBaseNoTargetProcess
class TestLanguage(TestBaseNoTargetProcess):
'''
Tests the "language" command and "language renderscript" subcommand.
'''
| [
2,
15069,
357,
34,
8,
1584,
383,
5565,
4946,
8090,
4935,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789... | 3.757447 | 235 |
# coding: utf-8
# @Author: oliver
# @Date: 2019-07-14 18:50:35
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
2488,
13838,
25,
25776,
1428,
198,
2,
2488,
10430,
25,
220,
220,
13130,
12,
2998,
12,
1415,
1248,
25,
1120,
25,
2327,
198
] | 2.03125 | 32 |
import pytest
import cmakeutils
# findexe(cmd)
# run(*args, path=findexe("cmake"), **runargs)
# validate(cmakePath=findexe("cmake"))
# configured(buildDir)
# clear(buildDir)
# configure(
# root_dir,
# build_dir,
# *args,
# build_type="Release",
# cmakePath=findexe("cmake"),
# need_msvc=False,
# **kwargs,
# )
# build(
# build_dir,
# *args,
# build_type=None,
# parallel=None,
# cmakePath=findexe("cmake"),
# **kwargs,
# ):
# install(
# build_dir,
# install_dir,
# *args,
# build_type=None,
# cmakePath=findexe("cmake"),
# **kwargs,
# ):
# ctest(build_dir, ctestPath=findexe("ctest"), **kwargs):
# read_cache(build_dir, vars=None):
# delete_cache(build_dir):
# get_generators(cmakePath=findexe("cmake"), as_list=False):
# get_generator_names(cmakePath=findexe("cmake")):
# generator_changed(generator, build_dir="build", cmakePath=findexe("cmake")):
| [
11748,
12972,
9288,
198,
11748,
12067,
539,
26791,
198,
198,
2,
1064,
13499,
7,
28758,
8,
198,
2,
1057,
46491,
22046,
11,
3108,
28,
69,
9630,
68,
7203,
11215,
539,
12340,
12429,
5143,
22046,
8,
198,
2,
26571,
7,
11215,
539,
15235,
2... | 2.186321 | 424 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.formula.api as smf
from scipy import stats
pcr=np.array(data('pcr.txt'))#pcr 날짜 전체확진자 국내발생 해외유입 사망자
vaccine=np.array(data('vaccine.txt'))#vaccine 날짜 전체1차 전체2차 전체3차 AZ1 AZ2 F1 F2 Y1 M1 M2 F3 M3 Y3
lmp='local maximum point'
#전체 확진자 추이
x=np.linspace(pcr.T[0].min(),pcr.T[0].max(),len(pcr.T[0])) #날짜 x데이터 생성
y=pcr.T[1] #확진자수 데이터 생성
plt.plot(x,y,'.')
plt.plot(x[220],y[220],'o','red',label=lmp) #지역 max point 표시
plt.plot(x[340],y[340],'o','red',label=lmp)
plt.plot(x[569],y[569],'o','red',label=lmp)
plt.title("positive scatter for whole range")
plt.xlabel('day after debut of covid19')
plt.ylabel('number of positive')
plt.legend()
plt.show()
#전체 사망자 추이
plt.title("death rate for whole range")
plt.ylabel('death rate')
plt.xlabel('day after debut of covid19')
plt.plot(x,pcr.T[4]/pcr.T[1],'-') #경과일에 대응하는 사망자/확진자 수 =>사망률
plt.show()
#위드코로나 시행 전 코로나 확진자 추이
plt.plot(x[:len(x)-50],y[:len(y)-50],'.')
plt.plot(x[220],y[220],'o','red')
plt.plot(x[340],y[340],'o','red') #120
plt.plot(x[569],y[569],'o','red') #220
plt.xlabel('day after debut of covid and before WithCorona')
plt.ylabel('number of positive')
plt.title("positive rate before WithCorona")
plt.show()
###############################################################################################################
#확진자 라인피팅
x1=x[200:len(x)-50] #본격적으로 유행하기 시작한 날부터 위드코로나 시행 전까지
y2=y[200:len(y)-50]
poly_fit=np.polyfit(x1,y2,7) #7차 다항식으로 확진자수 학습
poly_1d=np.poly1d(poly_fit)
xs=np.linspace(x1.min(),x1.max()+50) #위드코로나 시행 후 까지 확장된 날짜변수xs
ys=poly_1d(xs) #xs에 대응하는 확진자함수값
plt.plot(xs,np.abs(ys),'k-',label='line pitting by polyfit(opensource)') #학습된 곡선 피팅(검은색)
##############################################################################################################
###직접 확진자 곡선 짜기###
y1=np.zeros(x1.shape) ###삼각함수값을 저장할 y1열 생성
i=0
for node in x1:
y1[i]=np.abs((np.cos(cof(node)))*((((np.abs(node)/200))//1)))#날짜 변수에 대응하는 삼각함수값 저장
i+=1
coefs=np.vstack((x1,y1,np.ones(x1.shape))) #행렬A로 작성
coefs=np.matmul(np.linalg.pinv(coefs.T),y[:len(x1)]) #A역행렬과 실제 확진자수 열행렬을 곱하여 계수값튜플을 얻어냄
plt.plot(x1,pcrline(x1,coefs),'r-',label='line pitting by myself') #내가 직접 짠 곡선
###############################################################################################################
###위드코로나 이후 직선 만들기###
x3=x[len(x)-50:] #위드코로나 이후 데이터만 추출
y3=y[len(y)-50:] #위드코로나 이후 데이터만 추출
plt.plot(x3,y3,'y.',alpha=1,label='actual pcr positive after WithCorona') #위드코로나 이후 실제확진자 분포
ploy_fit1=np.polyfit(x3,y3,1)#위드 코로나 이후 확진자수 학습
poly_1d=np.poly1d(ploy_fit1)#위드 코로나 이후 확진자수 학습
xs1=np.linspace(x3.min(), x3.max())
ys1=poly_1d(xs1)#학습된 함수에 날짜변수를 넣은 예측확진자수
plt.plot(xs1,ys1,'y-',label='line pitting after withCorona') #위드코로나 이후 학습된 직선 그리기
###############################################################################################################
###실제 확진자 분포###
plt.plot(x[:len(x)-50],y[:len(y)-50],'b.',alpha=0.3,label='actual pcr positive')#실제 확진자 분포
###############################################################################################################
###그래프 메타데이터 값 작성
plt.plot(x[220],y[220],'o','red')
plt.plot(x[340],y[340],'o','red')
plt.plot(x[569],y[569],'o','red')
plt.annotate('local max',xy=(x[220],y[220]),arrowprops=dict(facecolor='black',shrink=0.0005,alpha=0.7))
plt.annotate('local max',xy=(x[340],y[340]),arrowprops=dict(facecolor='black',shrink=0.0005,alpha=0.7))
plt.annotate('local max',xy=(x[569],y[569]),arrowprops=dict(facecolor='black',shrink=0.0005,alpha=0.7))
plt.xlabel('day after debut of covid and before WithCorona')
plt.ylabel('number of positive')
plt.title('predicting pcr positives with line pitting')
plt.legend()
plt.show()
###############################################################################################################
# 내가 라인 피팅에 사용한 함수
plt.title('the sine wave used self line pitting')
x4=np.linspace(200,100000)
plt.plot(x4,100*np.cos(cof(x4)),'k-')
plt.legend()
plt.show()
###############################################################################################################
# 백신과 사망률 관계 추이
pop=51821669 #대한민국 총 인구 FOR 백신 접종률
###############################################################################################################
#데이터 가공
data={
'positive':pcr[403:len(pcr)-2,1],
'deathRate':((pcr[403:len(pcr)-2,4]/pcr[403:len(pcr)-2,1])*10e6)//1,
'vaccine':vaccine[:,1]/pop,
'AZ':vaccine[:,5]/pop,
'Fizer':vaccine[:,7]/pop, #데이터에 쓰일 확진자,총 백신 접종상황, 백신별 접종 현황
'Y':vaccine[:,8]/pop,
'Modern':vaccine[:,10]/pop
}
data=pd.Series(data)
###############################################################################################################
###############################################################################################################
#백신과 확진자수 분포에대한 연관성 3D 시각화
ax=plt.axes(projection='3d')
ax.set_xlabel('days')
ax.set_zlabel('number of positive')
ax.set_ylabel('vaccination of the day')
ax.view_init(10,-10) #3차원 자료 시점 변경
ax.scatter3D(np.linspace(403,len(pcr)-1,len(data['positive'])),data['vaccine'],data['positive'])#(날짜,확진자수,백신접종률)
plt.show() #해당 백신 접종률에 대응하는 확진자 수
###############################################################################################################
#경과일에 따른 백신접종률 시각화
plt.plot(np.linspace(0,len(data['vaccine'])-1,len(data['vaccine']-1)),data['vaccine'])
plt.xlabel('days')
plt.ylabel('vaccination')
plt.show()
###############################################################################################################
#백신 접종률에 따른 확진자수 2차원 시각화
plt.plot(data['vaccine'],data['positive'])
plt.xlabel('vaccination')
plt.ylabel('number of positive')
plt.show()
###############################################################################################################
#백신접종률과 사망률의 관계
x=np.array(data['vaccine']) #백신접종률과 사망률 변수 생성
y=np.array(data['deathRate']) #백신접종률과 사망률 변수 생성
plt.scatter(x,y, label='actual deathRate') #실제 백신 접종률에 따른 사망률 분포
###############################################################################################################
#백신접종률 대 사망률에 대한 라인 피팅
poly_fit=np.polyfit(x,y,4) #np의 poly_fit을 사용한 라인피팅
poly_1d=np.poly1d(poly_fit) #np의 poly_fit을 사용한 라인피팅
xs=np.linspace(x.min(),x.max()) #np의 poly_fit을 사용한 라인피팅
ys=poly_1d(xs) #np의 poly_fit을 사용한 라인피팅
plt.plot(xs,ys,color='red',label='line pitting by poly_fit')#피팅한 곡선 그리기
###############################################################################################################
#백신 접종률 대 사망률에 대한 회귀분석
formular = 'deathRate ~ vaccine' #vaccine 변수를 이용해 사망률을 학습
result=smf.ols(formular,data).fit() #statsmodels를 이용한 선형 분석
print('백신 접종률과 사망률에 대한 분석','\n',result.summary())
xs1=np.linspace(xs.min(),xs.max())
ys1=6.23e-05*xs1+5.296e+4 #학습결과에 나온 계수를 이용해 y값 입력
plt.plot(xs1,ys1,'green',label='regression by overall vaccine') #1차원에는 잘 맞지 않는다.
###############################################################################################################
#백신 별 접종률에 대한 사망률 회귀분석
formula2='deathRate~AZ+Fizer+Y+Modern' #백신별로 변수를 만들어 학습
result2=smf.ols(formula2,data).fit() #학습
print('백신별 사망률에 대한 분석','\n',result2.summary())
plt.plot(x,deathForVaccine(data['AZ'],data['Fizer'],data['Y'],data['Modern']),'k-',label='regression by sum of each vaccine')
#학습결과 곡선 플로팅
I=np.eye(4)
for i in range(4):
now=x[-1]*I[i]
print(deathForVaccine(now[0],now[1],now[2],now[3]))
#한 종류의 백신으로만 맞았을 때의 사망률
#AZ: 1퍼센트, Fizer 1.5퍼센트, Y:음수값, Modern: 3퍼센트가 나온다
###############################################################################################################
#보정 진행
for item in ('AZ','Fizer','Modern'):
for i in range(91,len(data[item])):
data[item][i]=data[item][i]-data[item][i-90] #항체감소 보정, 90일 전의 접종인원을 제외한다.
fomular3='deathRate~AZ+Fizer+Modern' #AZ,화이자,모더나로만 변수를 구성,
result3=smf.ols(fomular3,data).fit() #학습
print("보정 후 결과\n",result3.summary()) #결과 출력
plt.plot(x,deathForVaccine2(data['AZ'], data['Fizer'], data['Modern']),'y-',label='result after correction')
#보정후 학습의 결과 플로팅
I2=np.eye(3)
for i in range(3):
vaccines=('AZ','Fizer','Modern')
now=x[-1]*I2[i]
print(vaccines[i],deathForVaccine2(*I2[i]))
#보정학습 후, 한 가지 백신으로 90퍼센트의 백신접종률을 달성하였을 때 사망률 예측
#AZ: 1퍼센트 미만, Fizer: 음수값, Modern: 2퍼센트 미만 (약 1.7)
###############################################################################################################
#그래프 메타데이터 셋
plt.legend()
plt.xlabel('vaccination rate')
plt.ylabel('death rate')
plt.title('deathRate with vaccination rate')
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
9756,
27530,
13,
687,
4712,
13,
15042,
355,
895,
69,
198,
6738,
629,
541,
88,
1330,
9756,
... | 1.580495 | 5,373 |
#!/usr/bin/env python3
"""Tests for ``amici.pandas``"""
import sys
import amici
import unittest
import os
import copy
import numpy as np
import itertools
class TestAmiciPandasImportExport(unittest.TestCase):
"""
TestCase class for testing csv import using pandas
"""
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(TestAmiciPandasImportExport())
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
51,
3558,
329,
7559,
18127,
72,
13,
79,
392,
292,
15506,
37811,
198,
198,
11748,
25064,
198,
11748,
716,
44070,
198,
11748,
555,
715,
395,
198,
11748,
28686,
198,
11748... | 2.741722 | 151 |
import html
from typing import Optional, List
import telegram.ext as tg
from telegram import Message, Chat, Update, Bot, ParseMode, User, MessageEntity
from telegram import TelegramError
from telegram.error import BadRequest
from telegram.ext import CommandHandler, MessageHandler, Filters
from telegram.ext.dispatcher import run_async
from telegram.utils.helpers import mention_html, mention_markdown
import tg_bot.modules.sql.blsticker_sql as sql
from tg_bot import dispatcher, SUDO_USERS, LOGGER, OWNER_ID
from tg_bot.modules.disable import DisableAbleCommandHandler
from tg_bot.modules.helper_funcs.chat_status import can_delete, is_user_admin, user_not_admin, user_admin, \
bot_can_delete, is_bot_admin
from tg_bot.modules.helper_funcs.filters import CustomFilters
from tg_bot.modules.helper_funcs.misc import split_message
from tg_bot.modules.warns import warn
from tg_bot.modules.log_channel import loggable
from tg_bot.modules.sql import users_sql
from tg_bot.modules.connection import connected
from tg_bot.modules.helper_funcs.alternate import send_message
@run_async
@run_async
@user_admin
@run_async
@user_admin
@run_async
@loggable
@user_admin
@run_async
@user_not_admin
__help__ = """
Blacklist sticker is used to stop certain stickers. Whenever a sticker is sent, the message will be deleted immediately.
*NOTE:* Blacklist stickers do not affect the group admin.
- /blsticker: See current blacklisted sticker.
*Only admin:*
- /addblsticker <sticker link>: Add the sticker trigger to the black list. Can be added via reply sticker.
- /unblsticker <sticker link>: Remove triggers from blacklist. The same newline logic applies here, so you can delete multiple triggers at once.
- /rmblsticker <sticker link>: Same as above.
- /blstickermode ban/tban/mute/tmute .
Note:
- `<sticker link>` can be `https://t.me/addstickers/<sticker>` or just `<sticker>` or reply to the sticker message.
"""
__mod_name__ = "S Blacklist"
BLACKLIST_STICKER_HANDLER = DisableAbleCommandHandler("blsticker", blackliststicker, pass_args=True, admin_ok=True)
ADDBLACKLIST_STICKER_HANDLER = DisableAbleCommandHandler("addblsticker", add_blackliststicker)
UNBLACKLIST_STICKER_HANDLER = CommandHandler(["unblsticker", "rmblsticker"], unblackliststicker)
BLACKLISTMODE_HANDLER = CommandHandler("blstickermode", blacklist_mode, pass_args=True)
BLACKLIST_STICKER_DEL_HANDLER = MessageHandler(Filters.sticker & Filters.group, del_blackliststicker)
dispatcher.add_handler(BLACKLIST_STICKER_HANDLER)
dispatcher.add_handler(ADDBLACKLIST_STICKER_HANDLER)
dispatcher.add_handler(UNBLACKLIST_STICKER_HANDLER)
dispatcher.add_handler(BLACKLISTMODE_HANDLER)
dispatcher.add_handler(BLACKLIST_STICKER_DEL_HANDLER)
| [
11748,
27711,
198,
6738,
19720,
1330,
32233,
11,
7343,
198,
198,
11748,
573,
30536,
13,
2302,
355,
256,
70,
198,
6738,
573,
30536,
1330,
16000,
11,
24101,
11,
10133,
11,
18579,
11,
2547,
325,
19076,
11,
11787,
11,
16000,
32398,
198,
6... | 3.006659 | 901 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from google.appengine.ext import ndb
from rogerthat.dal import parent_ndb_key
from rogerthat.models import NdbModel
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
3469,
6916,
15664,
23973,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
... | 3.518519 | 216 |
import datetime
import logging
import random
import secrets
from fastapi import FastAPI, Depends, APIRouter, HTTPException
from fastapi.security import HTTPBasicCredentials, HTTPBasic
from starlette import status
from uvicorn import Server
from uvicorn.config import Config
from pyctuator.pyctuator import Pyctuator
my_logger = logging.getLogger("example")
username = "u1"
password = "p2"
security = SimplisticBasicAuth(username, password)
app = FastAPI(
title="FastAPI Example Server",
description="Demonstrate Spring Boot Admin Integration with FastAPI",
docs_url="/api",
)
@app.get("/")
example_app_address = "172.18.0.1"
example_sba_address = "localhost"
pyctuator = Pyctuator(
app,
"Example FastAPI",
app_url=f"http://{example_app_address}:8000",
pyctuator_endpoint_url=f"http://{example_app_address}:8000/pyctuator",
registration_url=f"http://{example_sba_address}:8080/instances",
app_description=app.description,
customizer=add_authentication_to_pyctuator, # Customize Pyctuator's API router to require authentication
metadata={
"user.name": username, # Include the credentials in the registration request sent to SBA
"user.password": password,
}
)
# Keep the console clear - configure uvicorn (FastAPI's WSGI web app) not to log the detail of every incoming request
uvicorn_logger = logging.getLogger("uvicorn")
uvicorn_logger.setLevel(logging.WARNING)
server = Server(config=(Config(
app=app,
loop="asyncio",
host="0.0.0.0",
logger=uvicorn_logger,
)))
server.run()
| [
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
13141,
198,
198,
6738,
3049,
15042,
1330,
12549,
17614,
11,
2129,
2412,
11,
3486,
4663,
39605,
11,
14626,
16922,
198,
6738,
3049,
15042,
13,
12961,
1330,
14626,
26416,
34... | 2.886239 | 545 |
import os
import random
import numpy as np
import zipfile
import collections
from mxnet import nd, gluon
from mxnet.gluon import utils as gutils, data as gdata
def data_iter_consecutive(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a consecutive order from sequential data."""
# Offset for the iterator over the data for uniform starts
offset = int(random.uniform(0, num_steps))
# Slice out data - ignore num_steps and just wrap around
num_indices = ((len(corpus_indices) - offset) // batch_size) * batch_size
indices = nd.array(corpus_indices[offset:(offset + num_indices)], ctx=ctx)
indices = indices.reshape((batch_size, -1))
# Need to leave one last token since targets are shifted by 1
num_epochs = ((num_indices // batch_size) - 1) // num_steps
for i in range(0, num_epochs * num_steps, num_steps):
X = indices[:, i:(i+num_steps)]
Y = indices[:, (i+1):(i+1+num_steps)]
yield X, Y
def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a random order from sequential data."""
# Offset for the iterator over the data
offset = int(random.uniform(0, num_steps))
# Subtract 1 extra since we need to account for the sequence length
num_examples = ((len(corpus_indices) - offset - 1) // num_steps) - 1
# Discard half empty batches
num_batches = num_examples // batch_size
example_indices = list(
range(offset, offset + num_examples * num_steps, num_steps))
random.shuffle(example_indices)
# This returns a sequence of the length num_steps starting from pos.
for i in range(0, batch_size * num_batches, batch_size):
# batch_size indicates the random examples read each time.
batch_indices = example_indices[i:(i+batch_size)]
X = [_data(j) for j in batch_indices]
Y = [_data(j + 1) for j in batch_indices]
yield nd.array(X, ctx), nd.array(Y, ctx)
def load_data_time_machine(num_examples=10000):
"""Load the time machine data set (available in the English book)."""
with open('../data/timemachine.txt') as f:
raw_text = f.read()
lines = raw_text.split('\n')
text = ' '.join(' '.join(lines).lower().split())[:num_examples]
vocab = Vocab(text)
corpus_indices = [vocab[char] for char in text]
return corpus_indices, vocab
def mkdir_if_not_exist(path):
"""Make a directory if it does not exist."""
if not os.path.exists(os.path.join(*path)):
os.makedirs(os.path.join(*path))
| [
11748,
28686,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19974,
7753,
198,
11748,
17268,
198,
6738,
285,
87,
3262,
1330,
299,
67,
11,
1278,
84,
261,
198,
6738,
285,
87,
3262,
13,
70,
2290,
261,
1330,
3384,
4487,... | 2.628601 | 972 |
from ..imports import *
# loading txt/strings
# def to_lineparts(linetxt,seps=set(',:;–—()[].!?'),min_len=1,max_len=25):
# o=[]
# for sent in to_sents_str(linetxt):
# toks=tokenize_agnostic(sent)
# ophrase=[]
# for tok in toks:
# ophrase+=[tok]
# # print(tok,ophrases)
# ophrase_len=len([x for x in ophrase if x[0].isalpha()])
# if ophrase_len>=min_len and (tok in seps or ophrase_len>=max_len):
# o+=[''.join(ophrase)]
# ophrase=[]
# # if ophrase:
# # if o and not any(x.isalpha() for x in ophrase):
# # o[-1]+=''.join(ophrase)
# # else:
# # o+=[''.join(ophrase)]
# # if ophrase and any(x.isalpha() for x in ophrase): o+=[''.join(ophrase)]
# return o
# sonnet="""
# How heavy do I journey on the way,
# When what I seek, my weary travel's end,
# Doth teach that ease and that repose to say
# 'Thus far the miles are measured from thy friend!'
# The beast that bears me, tired with my woe,
# Plods dully on, to bear that weight in me,
# As if by some instinct the wretch did know
# His rider loved not speed, being made from thee:
# The bloody spur cannot provoke him on
# That sometimes anger thrusts into his hide;
# Which heavily he answers with a groan,
# More sharp to me than spurring to his side;
# For that same groan doth put this in my mind;
# My grief lies onward and my joy behind.
# """ | [
6738,
11485,
320,
3742,
1330,
1635,
628,
628,
628,
628,
198,
198,
2,
11046,
256,
742,
14,
37336,
628,
220,
220,
220,
220,
198,
198,
2,
825,
284,
62,
1370,
42632,
7,
2815,
316,
742,
11,
325,
862,
28,
2617,
7,
3256,
25,
26,
1906,
... | 2.22449 | 686 |
"""
This module implements a multi-layer perceptron (MLP) in NumPy.
You should fill in code into indicated sections.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from modules import *
from functools import reduce
class MLP(object):
"""
This class implements a Multi-layer Perceptron in NumPy.
It handles the different layers and parameters of the model.
Once initialized an MLP object can perform forward and backward.
"""
def __init__(self, n_inputs, n_hidden, n_classes):
"""
Initializes MLP object.
Args:
n_inputs: number of inputs.
n_hidden: list of ints, specifies the number of units
in each linear layer. If the list is empty, the MLP
will not have any linear layers, and the model
will simply perform a multinomial logistic regression.
n_classes: number of classes of the classification problem.
This number is required in order to specify the
output dimensions of the MLP
TODO:
Implement initialization of the network.
"""
########################
# PUT YOUR CODE HERE #
#######################
# Check there is at least one node in the input layer
if n_inputs < 1:
raise ValueError(
"Number of units in the input layer is incorrect. There should be at least one unit.")
# Check there is at least one node in each of the hidden layers.
# Using `any` instead of all to speed up the check by using short circuit evaluation.
if len(n_hidden) > 0 and any(n_layer < 0 for n_layer in n_hidden):
raise ValueError(
"Number of units in one of the hidden layer is incorrect. There should be at least one unit.")
# Check there is at least one node in the output layer
if n_classes < 1:
raise ValueError(
"Number of units in the output layer is incorrect. There should be at least one unit.")
# Create list with sizes of all the layers.
sizes = [n_inputs] + n_hidden + [n_classes]
self.layers = []
# Go over all the layers, excluding the last one
for idx in range(len(sizes) - 1):
input_size, output_size = sizes[idx], sizes[idx + 1]
self.layers.append(LinearModule(input_size, output_size))
# avoid adding ReLU activation in the very end, instead add softmax
if idx < len(sizes) - 2:
self.layers.append(ReLUModule())
else:
self.layers.append(SoftMaxModule())
########################
# END OF YOUR CODE #
#######################
def forward(self, x):
"""
Performs forward pass of the input. Here an input tensor x is transformed through
several layer transformations.
Args:
x: input to the network
Returns:
out: outputs of the network
TODO:
Implement forward pass of the network.
"""
########################
# PUT YOUR CODE HERE #
#######################
out = reduce(lambda res, f: f.forward(res), self.layers, x)
########################
# END OF YOUR CODE #
#######################
return out
def backward(self, dout):
"""
Performs backward pass given the gradients of the loss.
Args:
dout: gradients of the loss
TODO:
Implement backward pass of the network.
"""
########################
# PUT YOUR CODE HERE #
#######################
reduce(lambda res, f: f.backward(res), self.layers[::-1], dout)
########################
# END OF YOUR CODE #
#######################
return
| [
37811,
198,
1212,
8265,
23986,
257,
5021,
12,
29289,
34953,
1313,
357,
5805,
47,
8,
287,
31835,
20519,
13,
198,
1639,
815,
6070,
287,
2438,
656,
8203,
9004,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
673... | 2.790751 | 1,319 |
from django.contrib import auth
from backend.accounts import User
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
6284,
198,
6738,
30203,
13,
23317,
82,
1330,
11787,
628,
628
] | 3.833333 | 18 |
#!/usr/bin/env python
import socket
from .decorator import timeout
@timeout
@timeout
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
17802,
198,
198,
6738,
764,
12501,
273,
1352,
1330,
26827,
628,
198,
31,
48678,
628,
198,
31,
48678,
628,
198,
2,
43907,
25,
7400,
11338,
28,
19,
4292,
8658,
6482,
10394,
... | 2.94 | 50 |
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from io import StringIO
import sys
import mlcrate as mlc
import string
import json
parse_pages = [int(i) for i in sys.argv[2].split(',')]
pages = {}
with open(sys.argv[1], 'rb') as in_file:
parser = PDFParser(in_file)
doc = PDFDocument(parser)
rsrcmgr = PDFResourceManager()
for i, page in enumerate(PDFPage.create_pages(doc)):
if i not in parse_pages:
continue
if i > max(parse_pages):
break
#print(i)
output_string = StringIO()
device = TextConverter(rsrcmgr, output_string, laparams=LAParams())
interpreter = PDFPageInterpreter(rsrcmgr, device)
interpreter.process_page(page)
output = ''.join(s for s in output_string.getvalue() if s in string.printable)
pages[i] = output
#print(output)
# mlc.save(pages, sys.argv[3])
if sys.argv[3] == '-':
print(json.dumps(pages))
else:
json.dump(pages, open(sys.argv[3], 'w'))
# print(output_string.getvalue())
| [
6738,
37124,
1084,
263,
13,
1102,
332,
353,
1330,
8255,
3103,
332,
353,
201,
198,
6738,
37124,
1084,
263,
13,
39786,
1330,
406,
2969,
283,
4105,
201,
198,
6738,
37124,
1084,
263,
13,
12315,
22897,
1330,
12960,
24941,
201,
198,
6738,
3... | 2.374074 | 540 |
# test_clean_up_standard_json.py
""" test clean_up_standard_json """
import _set_path # noqa
import unittest
# import json
import os
from pathlib import Path
from clean_up_standard_json import _fix_level, _fix_ids
from pipelineutilities.pipeline_config import setup_pipeline_config # noqa: E402
local_folder = os.path.dirname(os.path.realpath(__file__)) + "/"
class Test(unittest.TestCase):
""" Class for test fixtures """
def suite():
""" define test suite """
return unittest.TestLoader().loadTestsFromTestCase(Test)
if __name__ == '__main__':
suite()
unittest.main()
| [
2,
1332,
62,
27773,
62,
929,
62,
20307,
62,
17752,
13,
9078,
198,
37811,
1332,
3424,
62,
929,
62,
20307,
62,
17752,
37227,
198,
11748,
4808,
2617,
62,
6978,
220,
1303,
645,
20402,
198,
11748,
555,
715,
395,
198,
2,
1330,
33918,
198,... | 2.870813 | 209 |
from rest_framework.serializers import (
ModelSerializer,
SerializerMethodField,
)
from .models import Post
| [
6738,
1334,
62,
30604,
13,
46911,
11341,
1330,
357,
198,
220,
220,
220,
9104,
32634,
7509,
11,
198,
220,
220,
220,
23283,
7509,
17410,
15878,
11,
198,
8,
198,
198,
6738,
764,
27530,
1330,
2947,
628
] | 3.277778 | 36 |
import pytest
from manim.renderer.opengl_renderer import OpenGLRenderer
from .simple_scenes import *
@pytest.mark.parametrize("enable_preview", [False])
def testGetFrameWithPreviewDisabled(use_opengl_renderer):
"""Get frame is able to fetch frame with the correct dimensions when preview is disabled"""
scene = SquareToCircle()
assert isinstance(scene.renderer, OpenGLRenderer)
assert not config.preview
renderer = scene.renderer
renderer.update_frame(scene)
frame = renderer.get_frame()
# height and width are flipped
assert renderer.get_pixel_shape()[0] == frame.shape[1]
assert renderer.get_pixel_shape()[1] == frame.shape[0]
@pytest.mark.slow
@pytest.mark.parametrize("enable_preview", [True])
def testGetFrameWithPreviewEnabled(use_opengl_renderer):
"""Get frame is able to fetch frame with the correct dimensions when preview is enabled"""
scene = SquareToCircle()
assert isinstance(scene.renderer, OpenGLRenderer)
assert config.preview is True
renderer = scene.renderer
renderer.update_frame(scene)
frame = renderer.get_frame()
# height and width are flipped
assert renderer.get_pixel_shape()[0] == frame.shape[1]
assert renderer.get_pixel_shape()[1] == frame.shape[0]
| [
11748,
12972,
9288,
198,
198,
6738,
582,
320,
13,
10920,
11882,
13,
404,
1516,
75,
62,
10920,
11882,
1330,
30672,
49,
437,
11882,
198,
198,
6738,
764,
36439,
62,
28123,
1330,
1635,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
... | 2.937355 | 431 |
S = str(input())
flag = True
for i in range(len(S)):
if i%2==0 and S[i]=="L":
flag = False
elif i%2!=0 and S[i]=="R":
flag = False
if flag:
print("Yes")
else:
print("No") | [
50,
796,
965,
7,
15414,
28955,
198,
198,
32109,
796,
6407,
198,
1640,
1312,
287,
2837,
7,
11925,
7,
50,
8,
2599,
198,
220,
611,
1312,
4,
17,
855,
15,
290,
311,
58,
72,
60,
855,
1,
43,
1298,
198,
220,
220,
220,
6056,
796,
10352... | 2.054945 | 91 |
palavra=input("Digite uma palavra: ")
x=0
y=len(palavra)-1
while x != len(palavra):
if palavra[x]==palavra[y]:
resp='É palíndromo'
else:
resp='Não é palíndromo'
y=y-1
x=x+1
print(resp)
| [
198,
18596,
615,
430,
28,
15414,
7203,
19511,
578,
334,
2611,
6340,
615,
430,
25,
366,
8,
198,
198,
87,
28,
15,
198,
88,
28,
11925,
7,
18596,
615,
430,
13219,
16,
198,
4514,
2124,
14512,
18896,
7,
18596,
615,
430,
2599,
198,
220,
... | 1.740157 | 127 |
import inspect
import os
import sys
import time
import traceback
from getgauge.messages.messages_pb2 import Message
from getgauge.messages.spec_pb2 import ProtoExecutionResult
from getgauge.registry import registry, MessagesStore, ScreenshotsStore
| [
11748,
10104,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
12854,
1891,
198,
198,
6738,
651,
70,
559,
469,
13,
37348,
1095,
13,
37348,
1095,
62,
40842,
17,
1330,
16000,
198,
6738,
651,
70,
559,
469,
13,
37348,
10... | 3.459459 | 74 |
from mock import call
from nose.tools import istest
from provy.more.debian import AptitudeRole, RedisRole
from tests.unit.tools.helpers import ProvyTestCase
| [
6738,
15290,
1330,
869,
198,
6738,
9686,
13,
31391,
1330,
318,
9288,
198,
198,
6738,
899,
88,
13,
3549,
13,
24689,
1330,
317,
457,
3984,
47445,
11,
2297,
271,
47445,
198,
6738,
5254,
13,
20850,
13,
31391,
13,
16794,
364,
1330,
7518,
... | 3.456522 | 46 |
from argparse import ArgumentParser
from os import listdir
from os.path import isfile, join
from shutil import copyfile
from progress.bar import Bar
from io_operations.load_files import load_stats_overall, load_texts, load_stats
from outputs.create_html_output import create_output
from helpers.dir_exist import create_if_not_exist, create_new_dir
if __name__ == '__main__':
parser = ArgumentParser(
description='Module for creating html documents providing stats about each audio file. '
'Generated plots could be found in subdir \plots.')
parser.add_argument('src', type=str,
help='source path of pickle files')
parser.add_argument('dest', type=str,
help='destination path for pickle files')
parser.add_argument('template', type=str,
help='html document template')
parser.add_argument('overall_stats', type=str,
help='path of file containing overall stats')
parser.add_argument('text', type=str,
help='path of file containing template texts')
parser.add_argument('--hide_emotions', action="store_true", help='remove emotions from html document')
parser.add_argument('--hide_interruptions', action="store_true", help='remove interruptions from html document')
args = parser.parse_args()
create_if_not_exist(args.dest)
files = [f for f in listdir(args.src) if isfile(join(args.src, f)) and f.endswith(f'.pkl')]
if len(files) < 1:
raise FileNotFoundError(f'No pkl files found in {args.src}')
# Create new subdir for plots
create_new_dir(join(args.dest, 'plots'), 'output plots')
# Copy stylesheet file to destination
copyfile(f'{args.template.split(".html")[0]}.css', join(args.dest, 'style.css'))
print(f'File {join(args.dest, "style.css")} was created')
with Bar(f'Processing files in {args.src}', max=len(files)) as bar:
for file in files:
# Load source files
file_name = file.split('.pkl')[0]
texts = load_texts(args.text)
stats = load_stats(join(args.src, file))
stats_overall = load_stats_overall(args.overall_stats)
# Create output
create_output(stats, stats_overall, texts, args.template, args.dest, file_name, args.hide_emotions, args.hide_interruptions)
bar.next()
| [
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
6738,
4423,
346,
1330,
4866,
7753,
198,
6738,
4371,
13,
5657,
1330,
2409,
198,
6738,
33245,
62,
3575... | 2.543158 | 950 |
from data import initail
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
data = np.load('./data/Orbits.npy')
Names = initail.Names
N = Names.shape[0]
plotxy()
plotxz() | [
6738,
1366,
1330,
2315,
603,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
13,
10331,
62,
37153,
1330,
24060,
2964,
18200,
198,
198,
7890,
796,
45941... | 2.72619 | 84 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
工具类
利用决策树进行分类处理
贝叶斯公式
p(xy)=p(x|y)p(y)=p(y|x)p(x)
p(x|y)=p(y|x)p(x)/p(y)
"""
import numpy as np
def train_naive_bayes(train_mat, train_category):
"""
朴素贝叶斯分类
:param train_mat: 训练文本
:param train_category: 对应的文本类别
:return:
"""
train_doc_num = len(train_mat)
words_num = len(train_mat[0])
# 侮辱性文件的出现概率
pos_abusive = np.sum(train_category) / train_doc_num
# 单词出现的次数
p0num = np.ones(words_num)
p1num = np.ones(words_num)
# 整个数据集单词出现的次数
p0num_all = 2.0
p1num_all = 2.0
# 遍历所有的文件,分别计算此文件中出现的单词出现的频率
for i in range(train_doc_num):
if train_category[i] == 1:
p1num += train_mat[i]
p1num_all += np.sum(train_mat[i])
else:
p0num += train_mat[i]
p0num_all += np.sum(train_mat[i])
p1vec = np.log(p1num / p1num_all)
p0vec = np.log(p0num / p0num_all)
return p0vec, p1vec, pos_abusive
def classify_naive_bayes(vec2classify, p0vec, p1vec, p_class1):
"""
朴素贝叶斯分类
:param vec2classify: 待分类的向量[0,1,1,1,1...]
:param p0vec: 类别0向量,[log(P(F1|C0)),log(P(F2|C0)),log(P(F3|C0)),log(P(F4|C0)),log(P(F5|C0))....]
:param p1vec: 类别1向量,[log(P(F1|C1)),log(P(F2|C1)),log(P(F3|C1)),log(P(F4|C1)),log(P(F5|C1))....]
:param p_class1: 判断类别1出现概率
:return: 类别1 or 0
"""
# 计算公式 log(P(F1|C))+log(P(F2|C))+....+log(P(Fn|C))+log(P(C))
# 使用 NumPy 数组来计算两个向量相乘的结果,这里的相乘是指对应元素相乘,即先将两个向量中的第一个元素相乘,然后将第2个元素相乘,以此类推。
p1 = np.sum(vec2classify * p1vec) + np.log(p_class1)
p0 = np.sum(vec2classify * p0vec) + np.log(1 - p_class1)
if p1 > p0:
return 1
else:
return 0
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
32432,
98,
17739,
115,
163,
109,
119,
198,
26344,
102,
18796,
101,
37863,
111,
163,
255,
244,
43718,
239,
32573,... | 1.322127 | 1,279 |
# example of using continue in a loop
k = 1
while k <= 10:
if k == 7:
k += 1
continue
print(k)
k = k+1
| [
2,
1672,
286,
1262,
2555,
287,
257,
9052,
198,
74,
796,
352,
198,
4514,
479,
19841,
838,
25,
198,
220,
220,
220,
611,
479,
6624,
767,
25,
198,
220,
220,
220,
220,
220,
220,
220,
479,
15853,
352,
198,
220,
220,
220,
220,
220,
220... | 2.015385 | 65 |
import stripe
from stripe import api_requestor, six, util
from stripe.stripe_object import StripeObject
from stripe.six.moves.urllib.parse import quote_plus
stripe.ListObject.list = list_patch
stripe.ListObject.create = create_patch
stripe.ListObject.retrieve = retrieve_patch
stripe.ListObject._request = _request_patch
stripe.ListObject.auto_paging_iter = auto_paging_iter_patch
stripe.ListObject.previous_page = previous_page_patch
stripe.ListObject.next_page = next_page_patch | [
11748,
39858,
198,
6738,
39858,
1330,
40391,
62,
25927,
273,
11,
2237,
11,
7736,
198,
6738,
39858,
13,
33565,
431,
62,
15252,
1330,
26137,
431,
10267,
198,
198,
6738,
39858,
13,
19412,
13,
76,
5241,
13,
333,
297,
571,
13,
29572,
1330,... | 3.184211 | 152 |
import pickle
if __name__ == "__main__":
main()
| [
11748,
2298,
293,
628,
628,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.333333 | 27 |
from datetime import datetime
import numpy as np
import pandas as pd
from course_lib.Base.Evaluation.Evaluator import EvaluatorHoldout
from src.data_management.New_DataSplitter_leave_k_out import New_DataSplitter_leave_k_out
from src.data_management.RecSys2019Reader import RecSys2019Reader
from src.data_management.data_reader import get_ignore_users
from src.model.Ensemble.Boosting.boosting_preprocessing import get_label_array, preprocess_dataframe_after_reading
from src.tuning.holdout_validation.run_parameter_search_lightgbm import run_parameter_search_lightgbm
from src.utils.general_utility_functions import get_split_seed
if __name__ == '__main__':
# Data loading
root_data_path = "../../data/"
data_reader = RecSys2019Reader(root_data_path)
data_reader = New_DataSplitter_leave_k_out(data_reader, k_out_value=3, use_validation_set=False,
force_new_split=True, seed=get_split_seed())
data_reader.load_data()
URM_train, URM_test = data_reader.get_holdout_split()
# Reading the dataframe
dataframe_path = "../../resources/boosting_dataframe/"
train_df = pd.read_csv(dataframe_path + "train_df_100_advanced_lt_20.csv")
valid_df = pd.read_csv(dataframe_path + "valid_df_30_advanced_lt_20.csv")
train_df = preprocess_dataframe_after_reading(train_df)
y_train = train_df['label'].values + 1
train_df = train_df.drop(columns=["label"],
inplace=False)
valid_df = preprocess_dataframe_after_reading(valid_df)
valid_df = valid_df.drop(columns=[],
inplace=False)
_, non_zero_count, total = get_label_array(data_frame=train_df, URM_train=URM_train)
y_valid, _, _ = get_label_array(data_frame=valid_df, URM_train=URM_test)
# Setting evaluator
mapper = data_reader.get_original_user_id_to_index_mapper()
ignore_users = get_ignore_users(URM_train, mapper, lower_threshold=20, upper_threshold=2 ** 16 - 1,
ignore_non_target_users=True)
evaluator = EvaluatorHoldout(URM_test, cutoff_list=[10], ignore_users=ignore_users)
total_users = np.arange(URM_train.shape[0])
mask = np.in1d(total_users, ignore_users, invert=True)
users_to_validate = total_users[mask]
# HP tuning
print("Start tuning...")
version_path = "../../report/hp_tuning/light_gbm/"
now = datetime.now().strftime('%b%d_%H-%M-%S')
now = now + "_k_out_value_3_eval/"
version_path = version_path + now
run_parameter_search_lightgbm(URM_train, train_df, y_train, valid_df, y_valid, cutoff_test=30,
categorical_features=[],
num_iteration=10000, early_stopping_iteration=150,
objective="lambdarank", verbose=True,
output_folder_path=version_path, evaluator_validation=evaluator,
n_cases=100, n_random_starts=40, metric_to_optimize="MAP")
print("...tuning ended")
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
1781,
62,
8019,
13,
14881,
13,
36,
2100,
2288,
13,
36,
2100,
84,
1352,
1330,
26439,
84,
1352,
26807,
... | 2.263858 | 1,353 |
resolver_dict={ 'sawlive.tv': 'sawlive',
'streamking.co': 'streamking',
'streamking.cc': 'streamking',
'castalba.tv' : 'castalba',
'p2pcast.tv' : 'p2pcast',
'p2pcast.tech' : 'p2pcast',
'finecast.tv' : 'finecast',
'filmon.com' : 'filmon',
'miplayer.net' : 'miplayer',
'lshstreams.com' : 'lshunter',
'cdn.lshstream.com' : 'lshunter',
'castamp.com' : 'castamp',
'yocast.tv': 'yocast',
'streamlive.to' : 'streamlive',
'04stream.com' : 'o4stream',
'ustream.tv' : 'ustream' ,
'playwire.com' : 'playwire',
'leton.tv' : 'leton',
'yotv.co' : 'yotv',
'hdcast.me' : 'hdcast',
'zerocast.tv' : 'zerocast',
'castup.tv' : 'castup',
'mybeststream.xyz' :'mybeststream',
'sunhd.info' : 'dinozap',
'ponlatv.com' : 'dinozap',
'youtube.com' : 'youtube',
'livestream.com' : 'livestream',
'new.livestream.com' : 'livestream',
'privatestream.tv' : 'privatestream',
'airq.tv' : 'airq',
'aliez.me' : 'aliez',
'emb.aliez.me' : 'aliez',
'emb.aliez.tv' : 'aliez',
'p3g.tv' : 'p3g',
'liveflashplayer.net' : 'liveflashplayer',
'laola1.tv' : 'laola1',
'ehftv.com' : 'laola1',
'zoomtv.me' : 'zoomtv',#not implemented
'iguide.to' : 'iguide',
'letgo.tv' : 'letgo',
'streamup.com' : 'streamup',
'cast3d.tv' : 'cast3d',
'xvtr.pw' : 'dinozap',
'turbocast.tv' : 'turbocast', #not implemented
'direct-stream.org' : 'directstream',
'pxstream.tv' : 'pxstream',
'pushpublish.tv' : 'publishpublish',
'embeducaster.com' : 'mips',
'rocktv.co' : 'rocktv',
'embedezcast.com' : 'ezcast',
'micast.tv' : 'micast',
'openlive.org' : 'openlive',
'sostart.org' : 'sostart',
'sostart.pw' : 'sostart2',
'scity.tv' : 'sostart2',
'hqstream.tv' : 'hqstream',
'tutelehd.com' : 'tutele',
'janjua.tv' : 'janjuaf',
'abcast.net' : 'abcast',
'castfree.me' : 'castfree',
'playerapp1.pw' : 'dinozap',
'playerapp2.pw' : 'dinozap',
'playerhd1.pw' : 'dinozap',
'playerhd2.pw' : 'dinozap',
'dinostream.pw' : 'dinozap',
'dinozap.info' : 'dinozap',
'tv.verdirectotv.org' : 'dinozap',
'serverhd.eu' : 'dinozap',
'sitenow.me' : 'dinozap',
'sstream.pw' : 'dinozap',
'streamcasttv.biz':'streamcasttv',
'vaughnlive.tv' : 'vaughnlive',
'vapers.tv' : 'vaughnlive',
'breakers.tv' : 'vaughnlive',
'instagib.tv' : 'vaughnlive',
'hdcast.org' : 'hdcast_org',
'ustream.tv' : 'ustream',
'cast4u.tv' : 'cast4u',
'youtu.be' : 'youtube',
'direct-stream.org' : 'directstream',
'veetle.com' : 'veetle',
'tvope.com' : 'tvope',
'static.castto.me' : 'castto',
'castasap.pw' : 'castasap',
'flashlive.pw' : 'castasap',
'castflash.pw' : 'castasap',
'fastflash.pw' : 'castasap',
'deltatv.xyz' : 'deltatv',
'deltatv.pw' : 'deltatv',
'deltatv.site' : 'deltatv',
'hdcast.info' : 'hdcastinfo',
'theactionlive.com' : 'theactionlive',
'ustvnow.com' : 'ustvnow',
'hornos.moy.su' : 'kolstg',
'mipsplayer.com' : 'mips',
'bro.adcast.tech' : 'broadcast',
'bro.adca.st' : 'broadcast',
'sportstream365.com':'sportstream365',
'navixsport.com' : 'navix',
'cndhlsstream.pw' : 'cndhls',
'streamifyplayer.com' : 'streamify',
'zoptv.com' : 'zoptv',
'streamp2p.com' : 'streamp2p',
'akamaistreaming.com' : 'akamaistreaming',
'cast3d.me' : 'dinozap'
}
| [
198,
198,
411,
14375,
62,
11600,
34758,
705,
43439,
12583,
13,
14981,
10354,
705,
43439,
12583,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
5532,
3364,
13,
1073,
10354,
705,
5532,
3364,
3256,
198,
220,
220,
... | 1.624481 | 2,647 |
"""
oktopus algorithm related utils
"""
import random, statistics
from multiprocessing import Pool
from collections import defaultdict, deque
from sys import maxint
import networkx as nx
from cytoolz import merge, partial
from nx_disjoint_paths import edge_disjoint_paths
from ...multicast.session import Session
############################
# Graph contraction
############################
############################
# End - Graph contraction
############################
| [
37811,
198,
482,
4852,
385,
11862,
3519,
3384,
4487,
198,
37811,
198,
11748,
4738,
11,
7869,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
6738,
17268,
1330,
4277,
11600,
11,
390,
4188,
198,
6738,
25064,
1330,
3509,
600,
198,
198,
... | 3.869231 | 130 |
# Check the various features of the ShTest format.
#
# RUN: rm -f %t.xml
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format --xunit-xml-output %t.xml > %t.out
# RUN: FileCheck < %t.out %s
# RUN: FileCheck --check-prefix=XUNIT < %t.xml %s
# END.
# CHECK: -- Testing:
# CHECK: PASS: shtest-format :: argv0.txt
# CHECK: FAIL: shtest-format :: external_shell/fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: Command Output (stderr):
# CHECK-NEXT: --
# CHECK-NEXT: cat{{(\.exe)?}}: {{cannot open does-not-exist|does-not-exist: No such file or directory}}
# CHECK: --
# CHECK: FAIL: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_bad_encoding.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: a line with bad encoding:
# CHECK: --
# CHECK: PASS: shtest-format :: external_shell/pass.txt
# CHECK: FAIL: shtest-format :: fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: fail.txt' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: printf "line 1
# CHECK-NEXT: false
# CHECK-NEXT: --
# CHECK-NEXT: Exit Code: 1
#
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: $ ":" "RUN: at line 1"
# CHECK-NEXT: $ "printf"
# CHECK-NEXT: # command output:
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
# CHECK: PASS: shtest-format :: pass.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
# CHECK: PASS: shtest-format :: requires-present.txt
# CHECK: UNRESOLVED: shtest-format :: requires-star.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-triple.txt
# CHECK: PASS: shtest-format :: unsupported-expr-false.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported-expr-true.txt
# CHECK: UNRESOLVED: shtest-format :: unsupported-star.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
# CHECK: PASS: shtest-format :: xfail-expr-false.txt
# CHECK: XFAIL: shtest-format :: xfail-expr-true.txt
# CHECK: XFAIL: shtest-format :: xfail-feature.txt
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
# CHECK: XPASS: shtest-format :: xpass.txt
# CHECK-NEXT: *** TEST 'shtest-format :: xpass.txt' FAILED ***
# CHECK-NEXT: Script
# CHECK-NEXT: --
# CHECK-NEXT: true
# CHECK-NEXT: --
# CHECK: Testing Time
# CHECK: Unexpected Passing Tests (1)
# CHECK: shtest-format :: xpass.txt
# CHECK: Failing Tests (3)
# CHECK: shtest-format :: external_shell/fail.txt
# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: fail.txt
# CHECK: Expected Passes : 7
# CHECK: Expected Failures : 4
# CHECK: Unsupported Tests : 4
# CHECK: Unresolved Tests : 3
# CHECK: Unexpected Passes : 1
# CHECK: Unexpected Failures: 3
# XUNIT: <?xml version="1.0" encoding="UTF-8" ?>
# XUNIT-NEXT: <testsuites>
# XUNIT-NEXT: <testsuite name="shtest-format" tests="22" failures="7" skipped="4">
# XUNIT: <testcase classname="shtest-format.shtest-format" name="argv0.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.external_shell" name="fail.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.external_shell" name="fail_with_bad_encoding.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.external_shell" name="pass.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="fail.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="no-test-line.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="pass.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-missing.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Skipping because of: a-missing-feature" />
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-present.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-star.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-triple.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Skipping because of: x86_64" />
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-expr-false.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-expr-true.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Skipping because of configuration." />
# XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-star.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: <testcase classname="shtest-format.unsupported_dir" name="some-test.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT:<skipped message="Skipping because of configuration." />
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-expr-false.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-expr-true.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-feature.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-target.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail.txt" time="{{[0-9]+\.[0-9]+}}"/>
# XUNIT: <testcase classname="shtest-format.shtest-format" name="xpass.txt" time="{{[0-9]+\.[0-9]+}}">
# XUNIT-NEXT: <failure{{[ ]*}}>
# XUNIT: </failure>
# XUNIT-NEXT: </testcase>
# XUNIT: </testsuite>
# XUNIT-NEXT: </testsuites>
| [
2,
6822,
262,
2972,
3033,
286,
262,
911,
14402,
5794,
13,
198,
2,
198,
2,
32494,
25,
42721,
532,
69,
4064,
83,
13,
19875,
198,
2,
32494,
25,
407,
4064,
90,
18250,
92,
532,
73,
352,
532,
85,
4064,
90,
15414,
82,
92,
14,
1477,
9... | 2.424105 | 2,655 |
# -*- coding: utf-8 -*-
"""
Program for generating a GeoJSON-file from provinces.bmp.
This is the main file responsible for running the show.
Author: Erkki Mattila, 2014-2015
"""
from PIL import Image
import time, csv
from marcher import Marcher
def read_definition(definition):
"""Function for reading valid rows from definition.csv.
Seas, lakes and rivers are skipped as uninteresting.
The file also contains some empty (or reserved) colours, which are skipped.
Returns a list of interesting rows.
Arguments:
definition -- path to definition.csv
"""
start = time.time()
print("Reading definition.csv")
provs = []
# Stuff to skip
seas_and_rivers = ["Gulf", "Sea", "SEA", "Golf", "Biscay", "Donegal", "Galway", "Coast", "Cabo",
"Strait", "Cote", "Atlantic", "Faroya", "Hav", "bukten", "Occe", "Channel",
"Firth", "Lake", "Saimaa", "The ", "bank", "Manche", "Islas", "Ponant",
"Indus", "Ganges", "Brahmaputra", "Seine", "Loire", "Garonne", "Danube",
"Iskender", "Western", "East", "Nile Delta", "Levant", "Elbe", "Rhine",
"Vistula", "Kattegat", "Waddenzee", "Daugava", "Volga1", "Volga2", "Volga3",
"Volga4", "Volga5", "Volga6", "Volga7", "Volga8", "Volga9", "Svir", "Neva", "Don1",
"Don2", "Don3", "Don4", "Desna", "Oka", "Lovat", "Volkhov", "Dniester",
"Dnieper1", "Dnieper2", "Dnieper3", "Dnipro", "Dny", "Dne", "Pripyat", "Dwina", "Kallavesi", "Bodensee"]
#Näsijärvi, Oulujärvi, Mälaren, Hjälmaren, Vättern, Vänern, Onega, Päijänne, some unnamed lake in Siberia, unnamed spots in the Indian Ocean.
#The names contain unicode characters and I'm skipping them this way, because I'm lazy.
skip_id = [943, 957, 959, 961, 962, 963, 997, 1018, 1293, 1305, 1412]
csv.register_dialect('ckii', delimiter=';', quoting=csv.QUOTE_NONE)
with open(definition, 'r') as f:
reader = csv.reader(f, 'ckii')
for row in reader:
if len(row) > 3 and row[0] != '':
try:
prov_id = int(row[0])
prov_name = row[4]
allow = True
for skip_name in seas_and_rivers:
if prov_id in skip_id or prov_name.find(skip_name) != -1:
allow = False
break
# Try to get rgb-colours from fields.
if allow:
r = int(row[1])
g = int(row[2])
b = int(row[3])
provs.append(row)
except ValueError:
pass
delta = time.time() - start
print ("Reading definition.csv took %.3f seconds" %delta)
return provs
def find_starting_points(width, height, pixels):
"""Iterates through provinces.bmp and maps the first instance of a colour to a x,y-coordinate.
Returns a dict of starting points.
Arguments:
width and height -- dimensions of the target image
pixels -- pixel map of the image
"""
start = time.time()
print ("Begun searching for starting points")
output = {}
stop = False
for i in range(width): # for every pixel:
for j in range(height):
if not pixels[i,j] in output:
output[pixels[i,j]] = (i,j)
delta = time.time() - start
print ("Finding starting points took %.3f seconds" %delta)
return output
def generate():
"""Function for running the show.
It loads provinces.bmp, initializes marcher and calls subfunctions.
Outputs a GeoJSON-file, which contains land provinces as polygons.
"""
print ("Begun generating image")
#Benchmarking
start = time.time()
img = Image.open("provinces.bmp")
pix = img.load()
marcher = Marcher("provinces.bmp")
provs = read_definition("definition.csv")
starting_points = find_starting_points(img.size[0], img.size[1], pix)
#Counter for statistics and GeoJSON-formatting .
#Features in list are delimited with a comma, but the first item isn't preceed with one.
i = 0
#Statistics
prov_count = len(provs)
max_perimeter = 0
min_perimeter = 0
pix_count = 0
#Open writer and the beginning of the file
f = open("ckii_provdata.js", 'w')
f.write('var ckii_provdata = {"type":"FeatureCollection","features":[')
for prov in provs:
i = i + 1
colour = (int(prov[1]), int(prov[2]), int(prov[3]))
prov_id = int(prov[0])
prov_name = prov[4]
print ("{}/{} {} {} {}".format(i, prov_count, colour, prov_name, prov_id))
marcher.colour = colour
if colour in starting_points:
sp = starting_points[colour]
points = marcher.do_march(sp)
perimeter = 0
points_string = ""
for p in points:
x = p[0]
y = -p[1]
if points_string == "":
points_string = points_string + '[{},{}]'.format(x,y)
else:
points_string = points_string + ',[{},{}]'.format(x,y)
#Statistics
pix_count = pix_count + 1
perimeter = perimeter + 1
# Format string and write it to file
prov_string = '{"type":"Feature","id":"' + str(prov_id) + '","properties":{"name":"' + prov_name + '"},"geometry":{"type":"Polygon","coordinates":[[' + points_string + ']]}}'
if i == 1:
f.write(prov_string)
else:
f.write(',' + prov_string)
# Statistics
if perimeter < min_perimeter:
min_perimeter = perimeter
if perimeter > max_perimeter:
max_perimeter = perimeter
if i == 1:
min_perimeter = max_perimeter
#Close the file with closing brackets of features and var ckii_provdata
f.write(']};')
f.close
#Output statistics
print("Pixels in outlines: {}\nLongest perimeter: {}\nShortest perimeter: {}".format(pix_count, max_perimeter, min_perimeter))
if __name__ == "__main__":
start = time.clock()
generate()
delta = time.clock() - start
print ("Generating GeoJSON took %.3f seconds" %delta)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
15167,
329,
15453,
257,
32960,
40386,
12,
7753,
422,
17812,
13,
65,
3149,
13,
198,
198,
1212,
318,
262,
1388,
2393,
4497,
329,
2491,
262,
905,
13,
198... | 2.116121 | 3,083 |
import json
import logging
from typing import Any, Dict, List, Optional
import requests
from . import utils
class HEROFetch:
"""
Handler to search and retrieve literature from US EPA's HERO database.
Given a list of HERO IDs, fetch the content for each one and return a
list of dictionaries of citation information. Note that this citation
includes the PubMed ID, if available in HERO.
"""
default_settings = {"recordsperpage": 100}
| [
11748,
33918,
198,
11748,
18931,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
198,
198,
11748,
7007,
198,
198,
6738,
764,
1330,
3384,
4487,
628,
628,
198,
198,
4871,
24906,
19238,
7569,
25,
198,
220,
220,
220,
3722... | 3.587786 | 131 |
s = input()
size = 0
while size != len(s):
size = len(s)
for i in range(26):
letter = chr(ord("a") + i)
s = s.replace(letter * 2, letter)
print(s)
| [
82,
796,
5128,
3419,
198,
7857,
796,
657,
198,
4514,
2546,
14512,
18896,
7,
82,
2599,
198,
220,
220,
220,
2546,
796,
18896,
7,
82,
8,
198,
220,
220,
220,
329,
1312,
287,
2837,
7,
2075,
2599,
198,
220,
220,
220,
220,
220,
220,
22... | 2.060241 | 83 |